aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/.gitignore1
-rw-r--r--arch/powerpc/kernel/Makefile9
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kernel/btext.c34
-rw-r--r--arch/powerpc/kernel/cpu_setup_ppc970.S4
-rw-r--r--arch/powerpc/kernel/cputable.c24
-rw-r--r--arch/powerpc/kernel/dma-iommu.c (renamed from arch/powerpc/kernel/dma_64.c)101
-rw-r--r--arch/powerpc/kernel/dma.c131
-rw-r--r--arch/powerpc/kernel/entry_64.S69
-rw-r--r--arch/powerpc/kernel/head_32.S10
-rw-r--r--arch/powerpc/kernel/head_64.S473
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S26
-rw-r--r--arch/powerpc/kernel/irq.c169
-rw-r--r--arch/powerpc/kernel/lparcfg.c8
-rw-r--r--arch/powerpc/kernel/misc.S10
-rw-r--r--arch/powerpc/kernel/misc_32.S62
-rw-r--r--arch/powerpc/kernel/misc_64.S8
-rw-r--r--arch/powerpc/kernel/of_device.c2
-rw-r--r--arch/powerpc/kernel/paca.c3
-rw-r--r--arch/powerpc/kernel/pci-common.c216
-rw-r--r--arch/powerpc/kernel/pci_32.c18
-rw-r--r--arch/powerpc/kernel/pci_64.c49
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c3
-rw-r--r--arch/powerpc/kernel/prom.c43
-rw-r--r--arch/powerpc/kernel/prom_init.c29
-rw-r--r--arch/powerpc/kernel/reloc_64.S87
-rw-r--r--arch/powerpc/kernel/setup-common.c17
-rw-r--r--arch/powerpc/kernel/setup_32.c13
-rw-r--r--arch/powerpc/kernel/setup_64.c9
-rw-r--r--arch/powerpc/kernel/smp.c3
-rw-r--r--arch/powerpc/kernel/swsusp_asm64.S2
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c8
-rw-r--r--arch/powerpc/kernel/sysfs.c119
-rw-r--r--arch/powerpc/kernel/vio.c2
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S18
35 files changed, 1130 insertions, 653 deletions
diff --git a/arch/powerpc/kernel/.gitignore b/arch/powerpc/kernel/.gitignore
new file mode 100644
index 000000000000..c5f676c3c224
--- /dev/null
+++ b/arch/powerpc/kernel/.gitignore
@@ -0,0 +1 @@
vmlinux.lds
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 946daea780f1..fdb58253fa5b 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -28,13 +28,14 @@ endif
28obj-y := cputable.o ptrace.o syscalls.o \ 28obj-y := cputable.o ptrace.o syscalls.o \
29 irq.o align.o signal_32.o pmc.o vdso.o \ 29 irq.o align.o signal_32.o pmc.o vdso.o \
30 init_task.o process.o systbl.o idle.o \ 30 init_task.o process.o systbl.o idle.o \
31 signal.o 31 signal.o sysfs.o
32obj-y += vdso32/ 32obj-y += vdso32/
33obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ 33obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
34 signal_64.o ptrace32.o \ 34 signal_64.o ptrace32.o \
35 paca.o cpu_setup_ppc970.o \ 35 paca.o cpu_setup_ppc970.o \
36 cpu_setup_pa6t.o \ 36 cpu_setup_pa6t.o \
37 firmware.o sysfs.o nvram_64.o 37 firmware.o nvram_64.o
38obj64-$(CONFIG_RELOCATABLE) += reloc_64.o
38obj-$(CONFIG_PPC64) += vdso64/ 39obj-$(CONFIG_PPC64) += vdso64/
39obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o 40obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
40obj-$(CONFIG_PPC_970_NAP) += idle_power4.o 41obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
@@ -69,10 +70,10 @@ extra-$(CONFIG_8xx) := head_8xx.o
69extra-y += vmlinux.lds 70extra-y += vmlinux.lds
70 71
71obj-y += time.o prom.o traps.o setup-common.o \ 72obj-y += time.o prom.o traps.o setup-common.o \
72 udbg.o misc.o io.o \ 73 udbg.o misc.o io.o dma.o \
73 misc_$(CONFIG_WORD_SIZE).o 74 misc_$(CONFIG_WORD_SIZE).o
74obj-$(CONFIG_PPC32) += entry_32.o setup_32.o 75obj-$(CONFIG_PPC32) += entry_32.o setup_32.o
75obj-$(CONFIG_PPC64) += dma_64.o iommu.o 76obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o
76obj-$(CONFIG_KGDB) += kgdb.o 77obj-$(CONFIG_KGDB) += kgdb.o
77obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o 78obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o
78obj-$(CONFIG_MODULES) += ppc_ksyms.o 79obj-$(CONFIG_MODULES) += ppc_ksyms.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 92768d3006f7..09febc582584 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -122,6 +122,8 @@ int main(void)
122 DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr)); 122 DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
123 DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1)); 123 DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
124 DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc)); 124 DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
125 DEFINE(PACAKBASE, offsetof(struct paca_struct, kernelbase));
126 DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
125 DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); 127 DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
126 DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled)); 128 DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
127 DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); 129 DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
@@ -350,6 +352,7 @@ int main(void)
350#endif 352#endif
351 353
352 DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE); 354 DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE);
355 DEFINE(PTE_SIZE, sizeof(pte_t));
353 356
354#ifdef CONFIG_KVM 357#ifdef CONFIG_KVM
355 DEFINE(TLBE_BYTES, sizeof(struct tlbe)); 358 DEFINE(TLBE_BYTES, sizeof(struct tlbe));
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index d8f0329b1344..26e58630ed7b 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -442,28 +442,26 @@ void btext_drawtext(const char *c, unsigned int len)
442 442
443void btext_drawhex(unsigned long v) 443void btext_drawhex(unsigned long v)
444{ 444{
445 char *hex_table = "0123456789abcdef";
446
447 if (!boot_text_mapped) 445 if (!boot_text_mapped)
448 return; 446 return;
449#ifdef CONFIG_PPC64 447#ifdef CONFIG_PPC64
450 btext_drawchar(hex_table[(v >> 60) & 0x0000000FUL]); 448 btext_drawchar(hex_asc_hi(v >> 56));
451 btext_drawchar(hex_table[(v >> 56) & 0x0000000FUL]); 449 btext_drawchar(hex_asc_lo(v >> 56));
452 btext_drawchar(hex_table[(v >> 52) & 0x0000000FUL]); 450 btext_drawchar(hex_asc_hi(v >> 48));
453 btext_drawchar(hex_table[(v >> 48) & 0x0000000FUL]); 451 btext_drawchar(hex_asc_lo(v >> 48));
454 btext_drawchar(hex_table[(v >> 44) & 0x0000000FUL]); 452 btext_drawchar(hex_asc_hi(v >> 40));
455 btext_drawchar(hex_table[(v >> 40) & 0x0000000FUL]); 453 btext_drawchar(hex_asc_lo(v >> 40));
456 btext_drawchar(hex_table[(v >> 36) & 0x0000000FUL]); 454 btext_drawchar(hex_asc_hi(v >> 32));
457 btext_drawchar(hex_table[(v >> 32) & 0x0000000FUL]); 455 btext_drawchar(hex_asc_lo(v >> 32));
458#endif 456#endif
459 btext_drawchar(hex_table[(v >> 28) & 0x0000000FUL]); 457 btext_drawchar(hex_asc_hi(v >> 24));
460 btext_drawchar(hex_table[(v >> 24) & 0x0000000FUL]); 458 btext_drawchar(hex_asc_lo(v >> 24));
461 btext_drawchar(hex_table[(v >> 20) & 0x0000000FUL]); 459 btext_drawchar(hex_asc_hi(v >> 16));
462 btext_drawchar(hex_table[(v >> 16) & 0x0000000FUL]); 460 btext_drawchar(hex_asc_lo(v >> 16));
463 btext_drawchar(hex_table[(v >> 12) & 0x0000000FUL]); 461 btext_drawchar(hex_asc_hi(v >> 8));
464 btext_drawchar(hex_table[(v >> 8) & 0x0000000FUL]); 462 btext_drawchar(hex_asc_lo(v >> 8));
465 btext_drawchar(hex_table[(v >> 4) & 0x0000000FUL]); 463 btext_drawchar(hex_asc_hi(v));
466 btext_drawchar(hex_table[(v >> 0) & 0x0000000FUL]); 464 btext_drawchar(hex_asc_lo(v));
467 btext_drawchar(' '); 465 btext_drawchar(' ');
468} 466}
469 467
diff --git a/arch/powerpc/kernel/cpu_setup_ppc970.S b/arch/powerpc/kernel/cpu_setup_ppc970.S
index bf118c385752..27f2507279d8 100644
--- a/arch/powerpc/kernel/cpu_setup_ppc970.S
+++ b/arch/powerpc/kernel/cpu_setup_ppc970.S
@@ -110,7 +110,7 @@ load_hids:
110 isync 110 isync
111 111
112 /* Save away cpu state */ 112 /* Save away cpu state */
113 LOAD_REG_IMMEDIATE(r5,cpu_state_storage) 113 LOAD_REG_ADDR(r5,cpu_state_storage)
114 114
115 /* Save HID0,1,4 and 5 */ 115 /* Save HID0,1,4 and 5 */
116 mfspr r3,SPRN_HID0 116 mfspr r3,SPRN_HID0
@@ -134,7 +134,7 @@ _GLOBAL(__restore_cpu_ppc970)
134 rldicl. r0,r0,4,63 134 rldicl. r0,r0,4,63
135 beqlr 135 beqlr
136 136
137 LOAD_REG_IMMEDIATE(r5,cpu_state_storage) 137 LOAD_REG_ADDR(r5,cpu_state_storage)
138 /* Before accessing memory, we make sure rm_ci is clear */ 138 /* Before accessing memory, we make sure rm_ci is clear */
139 li r0,0 139 li r0,0
140 mfspr r3,SPRN_HID4 140 mfspr r3,SPRN_HID4
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 25c273c761d1..e70d0483fb4e 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -610,6 +610,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
610 .icache_bsize = 32, 610 .icache_bsize = 32,
611 .dcache_bsize = 32, 611 .dcache_bsize = 32,
612 .num_pmcs = 4, 612 .num_pmcs = 4,
613 .pmc_type = PPC_PMC_IBM,
613 .cpu_setup = __setup_cpu_750cx, 614 .cpu_setup = __setup_cpu_750cx,
614 .machine_check = machine_check_generic, 615 .machine_check = machine_check_generic,
615 .platform = "ppc750", 616 .platform = "ppc750",
@@ -623,6 +624,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
623 .icache_bsize = 32, 624 .icache_bsize = 32,
624 .dcache_bsize = 32, 625 .dcache_bsize = 32,
625 .num_pmcs = 4, 626 .num_pmcs = 4,
627 .pmc_type = PPC_PMC_IBM,
626 .cpu_setup = __setup_cpu_750cx, 628 .cpu_setup = __setup_cpu_750cx,
627 .machine_check = machine_check_generic, 629 .machine_check = machine_check_generic,
628 .platform = "ppc750", 630 .platform = "ppc750",
@@ -636,6 +638,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
636 .icache_bsize = 32, 638 .icache_bsize = 32,
637 .dcache_bsize = 32, 639 .dcache_bsize = 32,
638 .num_pmcs = 4, 640 .num_pmcs = 4,
641 .pmc_type = PPC_PMC_IBM,
639 .cpu_setup = __setup_cpu_750cx, 642 .cpu_setup = __setup_cpu_750cx,
640 .machine_check = machine_check_generic, 643 .machine_check = machine_check_generic,
641 .platform = "ppc750", 644 .platform = "ppc750",
@@ -649,6 +652,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
649 .icache_bsize = 32, 652 .icache_bsize = 32,
650 .dcache_bsize = 32, 653 .dcache_bsize = 32,
651 .num_pmcs = 4, 654 .num_pmcs = 4,
655 .pmc_type = PPC_PMC_IBM,
652 .cpu_setup = __setup_cpu_750, 656 .cpu_setup = __setup_cpu_750,
653 .machine_check = machine_check_generic, 657 .machine_check = machine_check_generic,
654 .platform = "ppc750", 658 .platform = "ppc750",
@@ -662,6 +666,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
662 .icache_bsize = 32, 666 .icache_bsize = 32,
663 .dcache_bsize = 32, 667 .dcache_bsize = 32,
664 .num_pmcs = 4, 668 .num_pmcs = 4,
669 .pmc_type = PPC_PMC_IBM,
665 .cpu_setup = __setup_cpu_750, 670 .cpu_setup = __setup_cpu_750,
666 .machine_check = machine_check_generic, 671 .machine_check = machine_check_generic,
667 .platform = "ppc750", 672 .platform = "ppc750",
@@ -675,6 +680,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
675 .icache_bsize = 32, 680 .icache_bsize = 32,
676 .dcache_bsize = 32, 681 .dcache_bsize = 32,
677 .num_pmcs = 4, 682 .num_pmcs = 4,
683 .pmc_type = PPC_PMC_IBM,
678 .cpu_setup = __setup_cpu_750, 684 .cpu_setup = __setup_cpu_750,
679 .machine_check = machine_check_generic, 685 .machine_check = machine_check_generic,
680 .platform = "ppc750", 686 .platform = "ppc750",
@@ -688,6 +694,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
688 .icache_bsize = 32, 694 .icache_bsize = 32,
689 .dcache_bsize = 32, 695 .dcache_bsize = 32,
690 .num_pmcs = 4, 696 .num_pmcs = 4,
697 .pmc_type = PPC_PMC_IBM,
691 .cpu_setup = __setup_cpu_750, 698 .cpu_setup = __setup_cpu_750,
692 .machine_check = machine_check_generic, 699 .machine_check = machine_check_generic,
693 .platform = "ppc750", 700 .platform = "ppc750",
@@ -701,6 +708,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
701 .icache_bsize = 32, 708 .icache_bsize = 32,
702 .dcache_bsize = 32, 709 .dcache_bsize = 32,
703 .num_pmcs = 4, 710 .num_pmcs = 4,
711 .pmc_type = PPC_PMC_IBM,
704 .cpu_setup = __setup_cpu_750fx, 712 .cpu_setup = __setup_cpu_750fx,
705 .machine_check = machine_check_generic, 713 .machine_check = machine_check_generic,
706 .platform = "ppc750", 714 .platform = "ppc750",
@@ -714,6 +722,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
714 .icache_bsize = 32, 722 .icache_bsize = 32,
715 .dcache_bsize = 32, 723 .dcache_bsize = 32,
716 .num_pmcs = 4, 724 .num_pmcs = 4,
725 .pmc_type = PPC_PMC_IBM,
717 .cpu_setup = __setup_cpu_750fx, 726 .cpu_setup = __setup_cpu_750fx,
718 .machine_check = machine_check_generic, 727 .machine_check = machine_check_generic,
719 .platform = "ppc750", 728 .platform = "ppc750",
@@ -727,6 +736,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
727 .icache_bsize = 32, 736 .icache_bsize = 32,
728 .dcache_bsize = 32, 737 .dcache_bsize = 32,
729 .num_pmcs = 4, 738 .num_pmcs = 4,
739 .pmc_type = PPC_PMC_IBM,
730 .cpu_setup = __setup_cpu_750, 740 .cpu_setup = __setup_cpu_750,
731 .machine_check = machine_check_generic, 741 .machine_check = machine_check_generic,
732 .platform = "ppc750", 742 .platform = "ppc750",
@@ -741,6 +751,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
741 .icache_bsize = 32, 751 .icache_bsize = 32,
742 .dcache_bsize = 32, 752 .dcache_bsize = 32,
743 .num_pmcs = 4, 753 .num_pmcs = 4,
754 .pmc_type = PPC_PMC_G4,
744 .cpu_setup = __setup_cpu_7400, 755 .cpu_setup = __setup_cpu_7400,
745 .machine_check = machine_check_generic, 756 .machine_check = machine_check_generic,
746 .platform = "ppc7400", 757 .platform = "ppc7400",
@@ -755,6 +766,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
755 .icache_bsize = 32, 766 .icache_bsize = 32,
756 .dcache_bsize = 32, 767 .dcache_bsize = 32,
757 .num_pmcs = 4, 768 .num_pmcs = 4,
769 .pmc_type = PPC_PMC_G4,
758 .cpu_setup = __setup_cpu_7400, 770 .cpu_setup = __setup_cpu_7400,
759 .machine_check = machine_check_generic, 771 .machine_check = machine_check_generic,
760 .platform = "ppc7400", 772 .platform = "ppc7400",
@@ -769,6 +781,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
769 .icache_bsize = 32, 781 .icache_bsize = 32,
770 .dcache_bsize = 32, 782 .dcache_bsize = 32,
771 .num_pmcs = 4, 783 .num_pmcs = 4,
784 .pmc_type = PPC_PMC_G4,
772 .cpu_setup = __setup_cpu_7410, 785 .cpu_setup = __setup_cpu_7410,
773 .machine_check = machine_check_generic, 786 .machine_check = machine_check_generic,
774 .platform = "ppc7400", 787 .platform = "ppc7400",
@@ -783,6 +796,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
783 .icache_bsize = 32, 796 .icache_bsize = 32,
784 .dcache_bsize = 32, 797 .dcache_bsize = 32,
785 .num_pmcs = 6, 798 .num_pmcs = 6,
799 .pmc_type = PPC_PMC_G4,
786 .cpu_setup = __setup_cpu_745x, 800 .cpu_setup = __setup_cpu_745x,
787 .oprofile_cpu_type = "ppc/7450", 801 .oprofile_cpu_type = "ppc/7450",
788 .oprofile_type = PPC_OPROFILE_G4, 802 .oprofile_type = PPC_OPROFILE_G4,
@@ -799,6 +813,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
799 .icache_bsize = 32, 813 .icache_bsize = 32,
800 .dcache_bsize = 32, 814 .dcache_bsize = 32,
801 .num_pmcs = 6, 815 .num_pmcs = 6,
816 .pmc_type = PPC_PMC_G4,
802 .cpu_setup = __setup_cpu_745x, 817 .cpu_setup = __setup_cpu_745x,
803 .oprofile_cpu_type = "ppc/7450", 818 .oprofile_cpu_type = "ppc/7450",
804 .oprofile_type = PPC_OPROFILE_G4, 819 .oprofile_type = PPC_OPROFILE_G4,
@@ -815,6 +830,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
815 .icache_bsize = 32, 830 .icache_bsize = 32,
816 .dcache_bsize = 32, 831 .dcache_bsize = 32,
817 .num_pmcs = 6, 832 .num_pmcs = 6,
833 .pmc_type = PPC_PMC_G4,
818 .cpu_setup = __setup_cpu_745x, 834 .cpu_setup = __setup_cpu_745x,
819 .oprofile_cpu_type = "ppc/7450", 835 .oprofile_cpu_type = "ppc/7450",
820 .oprofile_type = PPC_OPROFILE_G4, 836 .oprofile_type = PPC_OPROFILE_G4,
@@ -831,6 +847,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
831 .icache_bsize = 32, 847 .icache_bsize = 32,
832 .dcache_bsize = 32, 848 .dcache_bsize = 32,
833 .num_pmcs = 6, 849 .num_pmcs = 6,
850 .pmc_type = PPC_PMC_G4,
834 .cpu_setup = __setup_cpu_745x, 851 .cpu_setup = __setup_cpu_745x,
835 .oprofile_cpu_type = "ppc/7450", 852 .oprofile_cpu_type = "ppc/7450",
836 .oprofile_type = PPC_OPROFILE_G4, 853 .oprofile_type = PPC_OPROFILE_G4,
@@ -847,6 +864,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
847 .icache_bsize = 32, 864 .icache_bsize = 32,
848 .dcache_bsize = 32, 865 .dcache_bsize = 32,
849 .num_pmcs = 6, 866 .num_pmcs = 6,
867 .pmc_type = PPC_PMC_G4,
850 .cpu_setup = __setup_cpu_745x, 868 .cpu_setup = __setup_cpu_745x,
851 .oprofile_cpu_type = "ppc/7450", 869 .oprofile_cpu_type = "ppc/7450",
852 .oprofile_type = PPC_OPROFILE_G4, 870 .oprofile_type = PPC_OPROFILE_G4,
@@ -863,6 +881,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
863 .icache_bsize = 32, 881 .icache_bsize = 32,
864 .dcache_bsize = 32, 882 .dcache_bsize = 32,
865 .num_pmcs = 6, 883 .num_pmcs = 6,
884 .pmc_type = PPC_PMC_G4,
866 .cpu_setup = __setup_cpu_745x, 885 .cpu_setup = __setup_cpu_745x,
867 .oprofile_cpu_type = "ppc/7450", 886 .oprofile_cpu_type = "ppc/7450",
868 .oprofile_type = PPC_OPROFILE_G4, 887 .oprofile_type = PPC_OPROFILE_G4,
@@ -879,6 +898,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
879 .icache_bsize = 32, 898 .icache_bsize = 32,
880 .dcache_bsize = 32, 899 .dcache_bsize = 32,
881 .num_pmcs = 6, 900 .num_pmcs = 6,
901 .pmc_type = PPC_PMC_G4,
882 .cpu_setup = __setup_cpu_745x, 902 .cpu_setup = __setup_cpu_745x,
883 .oprofile_cpu_type = "ppc/7450", 903 .oprofile_cpu_type = "ppc/7450",
884 .oprofile_type = PPC_OPROFILE_G4, 904 .oprofile_type = PPC_OPROFILE_G4,
@@ -895,6 +915,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
895 .icache_bsize = 32, 915 .icache_bsize = 32,
896 .dcache_bsize = 32, 916 .dcache_bsize = 32,
897 .num_pmcs = 6, 917 .num_pmcs = 6,
918 .pmc_type = PPC_PMC_G4,
898 .cpu_setup = __setup_cpu_745x, 919 .cpu_setup = __setup_cpu_745x,
899 .oprofile_cpu_type = "ppc/7450", 920 .oprofile_cpu_type = "ppc/7450",
900 .oprofile_type = PPC_OPROFILE_G4, 921 .oprofile_type = PPC_OPROFILE_G4,
@@ -910,6 +931,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
910 .icache_bsize = 32, 931 .icache_bsize = 32,
911 .dcache_bsize = 32, 932 .dcache_bsize = 32,
912 .num_pmcs = 6, 933 .num_pmcs = 6,
934 .pmc_type = PPC_PMC_G4,
913 .cpu_setup = __setup_cpu_745x, 935 .cpu_setup = __setup_cpu_745x,
914 .oprofile_cpu_type = "ppc/7450", 936 .oprofile_cpu_type = "ppc/7450",
915 .oprofile_type = PPC_OPROFILE_G4, 937 .oprofile_type = PPC_OPROFILE_G4,
@@ -926,6 +948,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
926 .icache_bsize = 32, 948 .icache_bsize = 32,
927 .dcache_bsize = 32, 949 .dcache_bsize = 32,
928 .num_pmcs = 6, 950 .num_pmcs = 6,
951 .pmc_type = PPC_PMC_G4,
929 .cpu_setup = __setup_cpu_745x, 952 .cpu_setup = __setup_cpu_745x,
930 .oprofile_cpu_type = "ppc/7450", 953 .oprofile_cpu_type = "ppc/7450",
931 .oprofile_type = PPC_OPROFILE_G4, 954 .oprofile_type = PPC_OPROFILE_G4,
@@ -942,6 +965,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
942 .icache_bsize = 32, 965 .icache_bsize = 32,
943 .dcache_bsize = 32, 966 .dcache_bsize = 32,
944 .num_pmcs = 6, 967 .num_pmcs = 6,
968 .pmc_type = PPC_PMC_G4,
945 .cpu_setup = __setup_cpu_745x, 969 .cpu_setup = __setup_cpu_745x,
946 .oprofile_cpu_type = "ppc/7450", 970 .oprofile_cpu_type = "ppc/7450",
947 .oprofile_type = PPC_OPROFILE_G4, 971 .oprofile_type = PPC_OPROFILE_G4,
diff --git a/arch/powerpc/kernel/dma_64.c b/arch/powerpc/kernel/dma-iommu.c
index ae5708e3a312..49248f89ce23 100644
--- a/arch/powerpc/kernel/dma_64.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -2,14 +2,10 @@
2 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation 2 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
3 * 3 *
4 * Provide default implementations of the DMA mapping callbacks for 4 * Provide default implementations of the DMA mapping callbacks for
5 * directly mapped busses and busses using the iommu infrastructure 5 * busses using the iommu infrastructure
6 */ 6 */
7 7
8#include <linux/device.h>
9#include <linux/dma-mapping.h>
10#include <asm/bug.h>
11#include <asm/iommu.h> 8#include <asm/iommu.h>
12#include <asm/abs_addr.h>
13 9
14/* 10/*
15 * Generic iommu implementation 11 * Generic iommu implementation
@@ -24,7 +20,7 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
24{ 20{
25 return iommu_alloc_coherent(dev, dev->archdata.dma_data, size, 21 return iommu_alloc_coherent(dev, dev->archdata.dma_data, size,
26 dma_handle, device_to_mask(dev), flag, 22 dma_handle, device_to_mask(dev), flag,
27 dev->archdata.numa_node); 23 dev_to_node(dev));
28} 24}
29 25
30static void dma_iommu_free_coherent(struct device *dev, size_t size, 26static void dma_iommu_free_coherent(struct device *dev, size_t size,
@@ -105,96 +101,3 @@ struct dma_mapping_ops dma_iommu_ops = {
105 .dma_supported = dma_iommu_dma_supported, 101 .dma_supported = dma_iommu_dma_supported,
106}; 102};
107EXPORT_SYMBOL(dma_iommu_ops); 103EXPORT_SYMBOL(dma_iommu_ops);
108
109/*
110 * Generic direct DMA implementation
111 *
112 * This implementation supports a per-device offset that can be applied if
113 * the address at which memory is visible to devices is not 0. Platform code
114 * can set archdata.dma_data to an unsigned long holding the offset. By
115 * default the offset is zero.
116 */
117
118static unsigned long get_dma_direct_offset(struct device *dev)
119{
120 return (unsigned long)dev->archdata.dma_data;
121}
122
123static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
124 dma_addr_t *dma_handle, gfp_t flag)
125{
126 struct page *page;
127 void *ret;
128 int node = dev->archdata.numa_node;
129
130 page = alloc_pages_node(node, flag, get_order(size));
131 if (page == NULL)
132 return NULL;
133 ret = page_address(page);
134 memset(ret, 0, size);
135 *dma_handle = virt_to_abs(ret) + get_dma_direct_offset(dev);
136
137 return ret;
138}
139
140static void dma_direct_free_coherent(struct device *dev, size_t size,
141 void *vaddr, dma_addr_t dma_handle)
142{
143 free_pages((unsigned long)vaddr, get_order(size));
144}
145
146static dma_addr_t dma_direct_map_single(struct device *dev, void *ptr,
147 size_t size,
148 enum dma_data_direction direction,
149 struct dma_attrs *attrs)
150{
151 return virt_to_abs(ptr) + get_dma_direct_offset(dev);
152}
153
154static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr,
155 size_t size,
156 enum dma_data_direction direction,
157 struct dma_attrs *attrs)
158{
159}
160
161static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
162 int nents, enum dma_data_direction direction,
163 struct dma_attrs *attrs)
164{
165 struct scatterlist *sg;
166 int i;
167
168 for_each_sg(sgl, sg, nents, i) {
169 sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
170 sg->dma_length = sg->length;
171 }
172
173 return nents;
174}
175
176static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
177 int nents, enum dma_data_direction direction,
178 struct dma_attrs *attrs)
179{
180}
181
182static int dma_direct_dma_supported(struct device *dev, u64 mask)
183{
184 /* Could be improved to check for memory though it better be
185 * done via some global so platforms can set the limit in case
186 * they have limited DMA windows
187 */
188 return mask >= DMA_32BIT_MASK;
189}
190
191struct dma_mapping_ops dma_direct_ops = {
192 .alloc_coherent = dma_direct_alloc_coherent,
193 .free_coherent = dma_direct_free_coherent,
194 .map_single = dma_direct_map_single,
195 .unmap_single = dma_direct_unmap_single,
196 .map_sg = dma_direct_map_sg,
197 .unmap_sg = dma_direct_unmap_sg,
198 .dma_supported = dma_direct_dma_supported,
199};
200EXPORT_SYMBOL(dma_direct_ops);
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
new file mode 100644
index 000000000000..1562daf8839a
--- /dev/null
+++ b/arch/powerpc/kernel/dma.c
@@ -0,0 +1,131 @@
1/*
2 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
3 *
4 * Provide default implementations of the DMA mapping callbacks for
5 * directly mapped busses.
6 */
7
8#include <linux/device.h>
9#include <linux/dma-mapping.h>
10#include <asm/bug.h>
11#include <asm/abs_addr.h>
12
13/*
14 * Generic direct DMA implementation
15 *
16 * This implementation supports a per-device offset that can be applied if
17 * the address at which memory is visible to devices is not 0. Platform code
18 * can set archdata.dma_data to an unsigned long holding the offset. By
19 * default the offset is PCI_DRAM_OFFSET.
20 */
21
22static unsigned long get_dma_direct_offset(struct device *dev)
23{
24 if (dev)
25 return (unsigned long)dev->archdata.dma_data;
26
27 return PCI_DRAM_OFFSET;
28}
29
30void *dma_direct_alloc_coherent(struct device *dev, size_t size,
31 dma_addr_t *dma_handle, gfp_t flag)
32{
33 void *ret;
34#ifdef CONFIG_NOT_COHERENT_CACHE
35 ret = __dma_alloc_coherent(size, dma_handle, flag);
36 if (ret == NULL)
37 return NULL;
38 *dma_handle += get_dma_direct_offset(dev);
39 return ret;
40#else
41 struct page *page;
42 int node = dev_to_node(dev);
43
44 /* ignore region specifiers */
45 flag &= ~(__GFP_HIGHMEM);
46
47 page = alloc_pages_node(node, flag, get_order(size));
48 if (page == NULL)
49 return NULL;
50 ret = page_address(page);
51 memset(ret, 0, size);
52 *dma_handle = virt_to_abs(ret) + get_dma_direct_offset(dev);
53
54 return ret;
55#endif
56}
57
58void dma_direct_free_coherent(struct device *dev, size_t size,
59 void *vaddr, dma_addr_t dma_handle)
60{
61#ifdef CONFIG_NOT_COHERENT_CACHE
62 __dma_free_coherent(size, vaddr);
63#else
64 free_pages((unsigned long)vaddr, get_order(size));
65#endif
66}
67
68static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
69 int nents, enum dma_data_direction direction,
70 struct dma_attrs *attrs)
71{
72 struct scatterlist *sg;
73 int i;
74
75 for_each_sg(sgl, sg, nents, i) {
76 sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
77 sg->dma_length = sg->length;
78 }
79
80 return nents;
81}
82
83static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
84 int nents, enum dma_data_direction direction,
85 struct dma_attrs *attrs)
86{
87}
88
89static int dma_direct_dma_supported(struct device *dev, u64 mask)
90{
91#ifdef CONFIG_PPC64
92 /* Could be improved to check for memory though it better be
93 * done via some global so platforms can set the limit in case
94 * they have limited DMA windows
95 */
96 return mask >= DMA_32BIT_MASK;
97#else
98 return 1;
99#endif
100}
101
102static inline dma_addr_t dma_direct_map_page(struct device *dev,
103 struct page *page,
104 unsigned long offset,
105 size_t size,
106 enum dma_data_direction dir,
107 struct dma_attrs *attrs)
108{
109 BUG_ON(dir == DMA_NONE);
110 __dma_sync_page(page, offset, size, dir);
111 return page_to_phys(page) + offset + get_dma_direct_offset(dev);
112}
113
114static inline void dma_direct_unmap_page(struct device *dev,
115 dma_addr_t dma_address,
116 size_t size,
117 enum dma_data_direction direction,
118 struct dma_attrs *attrs)
119{
120}
121
122struct dma_mapping_ops dma_direct_ops = {
123 .alloc_coherent = dma_direct_alloc_coherent,
124 .free_coherent = dma_direct_free_coherent,
125 .map_sg = dma_direct_map_sg,
126 .unmap_sg = dma_direct_unmap_sg,
127 .dma_supported = dma_direct_dma_supported,
128 .map_page = dma_direct_map_page,
129 .unmap_page = dma_direct_unmap_page,
130};
131EXPORT_SYMBOL(dma_direct_ops);
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 2d802e97097c..fd8b4bae9b04 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -512,31 +512,12 @@ _GLOBAL(ret_from_except_lite)
512#endif 512#endif
513 513
514restore: 514restore:
515 ld r5,SOFTE(r1)
516#ifdef CONFIG_PPC_ISERIES
517BEGIN_FW_FTR_SECTION 515BEGIN_FW_FTR_SECTION
518 cmpdi 0,r5,0 516 ld r5,SOFTE(r1)
519 beq 4f 517FW_FTR_SECTION_ELSE
520 /* Check for pending interrupts (iSeries) */ 518 b iseries_check_pending_irqs
521 ld r3,PACALPPACAPTR(r13) 519ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
522 ld r3,LPPACAANYINT(r3) 5202:
523 cmpdi r3,0
524 beq+ 4f /* skip do_IRQ if no interrupts */
525
526 li r3,0
527 stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
528#ifdef CONFIG_TRACE_IRQFLAGS
529 bl .trace_hardirqs_off
530 mfmsr r10
531#endif
532 ori r10,r10,MSR_EE
533 mtmsrd r10 /* hard-enable again */
534 addi r3,r1,STACK_FRAME_OVERHEAD
535 bl .do_IRQ
536 b .ret_from_except_lite /* loop back and handle more */
5374:
538END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
539#endif
540 TRACE_AND_RESTORE_IRQ(r5); 521 TRACE_AND_RESTORE_IRQ(r5);
541 522
542 /* extract EE bit and use it to restore paca->hard_enabled */ 523 /* extract EE bit and use it to restore paca->hard_enabled */
@@ -592,6 +573,30 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
592 rfid 573 rfid
593 b . /* prevent speculative execution */ 574 b . /* prevent speculative execution */
594 575
576iseries_check_pending_irqs:
577#ifdef CONFIG_PPC_ISERIES
578 ld r5,SOFTE(r1)
579 cmpdi 0,r5,0
580 beq 2b
581 /* Check for pending interrupts (iSeries) */
582 ld r3,PACALPPACAPTR(r13)
583 ld r3,LPPACAANYINT(r3)
584 cmpdi r3,0
585 beq+ 2b /* skip do_IRQ if no interrupts */
586
587 li r3,0
588 stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
589#ifdef CONFIG_TRACE_IRQFLAGS
590 bl .trace_hardirqs_off
591 mfmsr r10
592#endif
593 ori r10,r10,MSR_EE
594 mtmsrd r10 /* hard-enable again */
595 addi r3,r1,STACK_FRAME_OVERHEAD
596 bl .do_IRQ
597 b .ret_from_except_lite /* loop back and handle more */
598#endif
599
595do_work: 600do_work:
596#ifdef CONFIG_PREEMPT 601#ifdef CONFIG_PREEMPT
597 andi. r0,r3,MSR_PR /* Returning to user mode? */ 602 andi. r0,r3,MSR_PR /* Returning to user mode? */
@@ -685,10 +690,6 @@ _GLOBAL(enter_rtas)
685 std r7,_DAR(r1) 690 std r7,_DAR(r1)
686 mfdsisr r8 691 mfdsisr r8
687 std r8,_DSISR(r1) 692 std r8,_DSISR(r1)
688 mfsrr0 r9
689 std r9,_SRR0(r1)
690 mfsrr1 r10
691 std r10,_SRR1(r1)
692 693
693 /* Temporary workaround to clear CR until RTAS can be modified to 694 /* Temporary workaround to clear CR until RTAS can be modified to
694 * ignore all bits. 695 * ignore all bits.
@@ -749,6 +750,10 @@ _STATIC(rtas_return_loc)
749 mfspr r4,SPRN_SPRG3 /* Get PACA */ 750 mfspr r4,SPRN_SPRG3 /* Get PACA */
750 clrldi r4,r4,2 /* convert to realmode address */ 751 clrldi r4,r4,2 /* convert to realmode address */
751 752
753 bcl 20,31,$+4
7540: mflr r3
755 ld r3,(1f-0b)(r3) /* get &.rtas_restore_regs */
756
752 mfmsr r6 757 mfmsr r6
753 li r0,MSR_RI 758 li r0,MSR_RI
754 andc r6,r6,r0 759 andc r6,r6,r0
@@ -756,7 +761,6 @@ _STATIC(rtas_return_loc)
756 mtmsrd r6 761 mtmsrd r6
757 762
758 ld r1,PACAR1(r4) /* Restore our SP */ 763 ld r1,PACAR1(r4) /* Restore our SP */
759 LOAD_REG_IMMEDIATE(r3,.rtas_restore_regs)
760 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */ 764 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
761 765
762 mtspr SPRN_SRR0,r3 766 mtspr SPRN_SRR0,r3
@@ -764,6 +768,9 @@ _STATIC(rtas_return_loc)
764 rfid 768 rfid
765 b . /* prevent speculative execution */ 769 b . /* prevent speculative execution */
766 770
771 .align 3
7721: .llong .rtas_restore_regs
773
767_STATIC(rtas_restore_regs) 774_STATIC(rtas_restore_regs)
768 /* relocation is on at this point */ 775 /* relocation is on at this point */
769 REST_GPR(2, r1) /* Restore the TOC */ 776 REST_GPR(2, r1) /* Restore the TOC */
@@ -783,10 +790,6 @@ _STATIC(rtas_restore_regs)
783 mtdar r7 790 mtdar r7
784 ld r8,_DSISR(r1) 791 ld r8,_DSISR(r1)
785 mtdsisr r8 792 mtdsisr r8
786 ld r9,_SRR0(r1)
787 mtsrr0 r9
788 ld r10,_SRR1(r1)
789 mtsrr1 r10
790 793
791 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */ 794 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
792 ld r0,16(r1) /* get return address */ 795 ld r0,16(r1) /* get return address */
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 8bb657519299..0c326823c6d4 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -110,6 +110,12 @@ __start:
110#ifdef CONFIG_PPC_MULTIPLATFORM 110#ifdef CONFIG_PPC_MULTIPLATFORM
111 cmpwi 0,r5,0 111 cmpwi 0,r5,0
112 beq 1f 112 beq 1f
113
114 /* find out where we are now */
115 bcl 20,31,$+4
1160: mflr r8 /* r8 = runtime addr here */
117 addis r8,r8,(_stext - 0b)@ha
118 addi r8,r8,(_stext - 0b)@l /* current runtime base addr */
113 bl prom_init 119 bl prom_init
114 trap 120 trap
115#endif 121#endif
@@ -369,13 +375,13 @@ i##n: \
369DataAccess: 375DataAccess:
370 EXCEPTION_PROLOG 376 EXCEPTION_PROLOG
371 mfspr r10,SPRN_DSISR 377 mfspr r10,SPRN_DSISR
378 stw r10,_DSISR(r11)
372 andis. r0,r10,0xa470 /* weird error? */ 379 andis. r0,r10,0xa470 /* weird error? */
373 bne 1f /* if not, try to put a PTE */ 380 bne 1f /* if not, try to put a PTE */
374 mfspr r4,SPRN_DAR /* into the hash table */ 381 mfspr r4,SPRN_DAR /* into the hash table */
375 rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */ 382 rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */
376 bl hash_page 383 bl hash_page
3771: stw r10,_DSISR(r11) 3841: lwz r5,_DSISR(r11) /* get DSISR value */
378 mr r5,r10
379 mfspr r4,SPRN_DAR 385 mfspr r4,SPRN_DAR
380 EXC_XFER_EE_LITE(0x300, handle_page_fault) 386 EXC_XFER_EE_LITE(0x300, handle_page_fault)
381 387
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index cc8fb474d520..84856bee33a5 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -82,7 +82,11 @@ END_FTR_SECTION(0, 1)
82 /* Catch branch to 0 in real mode */ 82 /* Catch branch to 0 in real mode */
83 trap 83 trap
84 84
85 /* Secondary processors spin on this value until it goes to 1. */ 85 /* Secondary processors spin on this value until it becomes nonzero.
86 * When it does it contains the real address of the descriptor
87 * of the function that the cpu should jump to to continue
88 * initialization.
89 */
86 .globl __secondary_hold_spinloop 90 .globl __secondary_hold_spinloop
87__secondary_hold_spinloop: 91__secondary_hold_spinloop:
88 .llong 0x0 92 .llong 0x0
@@ -109,8 +113,11 @@ __secondary_hold_acknowledge:
109 * before the bulk of the kernel has been relocated. This code 113 * before the bulk of the kernel has been relocated. This code
110 * is relocated to physical address 0x60 before prom_init is run. 114 * is relocated to physical address 0x60 before prom_init is run.
111 * All of it must fit below the first exception vector at 0x100. 115 * All of it must fit below the first exception vector at 0x100.
116 * Use .globl here not _GLOBAL because we want __secondary_hold
117 * to be the actual text address, not a descriptor.
112 */ 118 */
113_GLOBAL(__secondary_hold) 119 .globl __secondary_hold
120__secondary_hold:
114 mfmsr r24 121 mfmsr r24
115 ori r24,r24,MSR_RI 122 ori r24,r24,MSR_RI
116 mtmsrd r24 /* RI on */ 123 mtmsrd r24 /* RI on */
@@ -121,16 +128,16 @@ _GLOBAL(__secondary_hold)
121 /* Tell the master cpu we're here */ 128 /* Tell the master cpu we're here */
122 /* Relocation is off & we are located at an address less */ 129 /* Relocation is off & we are located at an address less */
123 /* than 0x100, so only need to grab low order offset. */ 130 /* than 0x100, so only need to grab low order offset. */
124 std r24,__secondary_hold_acknowledge@l(0) 131 std r24,__secondary_hold_acknowledge-_stext(0)
125 sync 132 sync
126 133
127 /* All secondary cpus wait here until told to start. */ 134 /* All secondary cpus wait here until told to start. */
128100: ld r4,__secondary_hold_spinloop@l(0) 135100: ld r4,__secondary_hold_spinloop-_stext(0)
129 cmpdi 0,r4,1 136 cmpdi 0,r4,0
130 bne 100b 137 beq 100b
131 138
132#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) 139#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
133 LOAD_REG_IMMEDIATE(r4, .generic_secondary_smp_init) 140 ld r4,0(r4) /* deref function descriptor */
134 mtctr r4 141 mtctr r4
135 mr r3,r24 142 mr r3,r24
136 bctr 143 bctr
@@ -147,6 +154,10 @@ exception_marker:
147/* 154/*
148 * This is the start of the interrupt handlers for pSeries 155 * This is the start of the interrupt handlers for pSeries
149 * This code runs with relocation off. 156 * This code runs with relocation off.
157 * Code from here to __end_interrupts gets copied down to real
158 * address 0x100 when we are running a relocatable kernel.
159 * Therefore any relative branches in this section must only
160 * branch to labels in this section.
150 */ 161 */
151 . = 0x100 162 . = 0x100
152 .globl __start_interrupts 163 .globl __start_interrupts
@@ -200,7 +211,20 @@ data_access_slb_pSeries:
200 mfspr r10,SPRN_SPRG1 211 mfspr r10,SPRN_SPRG1
201 std r10,PACA_EXSLB+EX_R13(r13) 212 std r10,PACA_EXSLB+EX_R13(r13)
202 mfspr r12,SPRN_SRR1 /* and SRR1 */ 213 mfspr r12,SPRN_SRR1 /* and SRR1 */
203 b .slb_miss_realmode /* Rel. branch works in real mode */ 214#ifndef CONFIG_RELOCATABLE
215 b .slb_miss_realmode
216#else
217 /*
218 * We can't just use a direct branch to .slb_miss_realmode
219 * because the distance from here to there depends on where
220 * the kernel ends up being put.
221 */
222 mfctr r11
223 ld r10,PACAKBASE(r13)
224 LOAD_HANDLER(r10, .slb_miss_realmode)
225 mtctr r10
226 bctr
227#endif
204 228
205 STD_EXCEPTION_PSERIES(0x400, instruction_access) 229 STD_EXCEPTION_PSERIES(0x400, instruction_access)
206 230
@@ -225,7 +249,15 @@ instruction_access_slb_pSeries:
225 mfspr r10,SPRN_SPRG1 249 mfspr r10,SPRN_SPRG1
226 std r10,PACA_EXSLB+EX_R13(r13) 250 std r10,PACA_EXSLB+EX_R13(r13)
227 mfspr r12,SPRN_SRR1 /* and SRR1 */ 251 mfspr r12,SPRN_SRR1 /* and SRR1 */
228 b .slb_miss_realmode /* Rel. branch works in real mode */ 252#ifndef CONFIG_RELOCATABLE
253 b .slb_miss_realmode
254#else
255 mfctr r11
256 ld r10,PACAKBASE(r13)
257 LOAD_HANDLER(r10, .slb_miss_realmode)
258 mtctr r10
259 bctr
260#endif
229 261
230 MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt) 262 MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt)
231 STD_EXCEPTION_PSERIES(0x600, alignment) 263 STD_EXCEPTION_PSERIES(0x600, alignment)
@@ -244,14 +276,12 @@ BEGIN_FTR_SECTION
244 beq- 1f 276 beq- 1f
245END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) 277END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
246 mr r9,r13 278 mr r9,r13
247 mfmsr r10
248 mfspr r13,SPRN_SPRG3 279 mfspr r13,SPRN_SPRG3
249 mfspr r11,SPRN_SRR0 280 mfspr r11,SPRN_SRR0
250 clrrdi r12,r13,32 281 ld r12,PACAKBASE(r13)
251 oris r12,r12,system_call_common@h 282 ld r10,PACAKMSR(r13)
252 ori r12,r12,system_call_common@l 283 LOAD_HANDLER(r12, system_call_entry)
253 mtspr SPRN_SRR0,r12 284 mtspr SPRN_SRR0,r12
254 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
255 mfspr r12,SPRN_SRR1 285 mfspr r12,SPRN_SRR1
256 mtspr SPRN_SRR1,r10 286 mtspr SPRN_SRR1,r10
257 rfid 287 rfid
@@ -325,16 +355,32 @@ do_stab_bolted_pSeries:
325 mfspr r12,SPRN_SPRG2 355 mfspr r12,SPRN_SPRG2
326 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) 356 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
327 357
358#ifdef CONFIG_PPC_PSERIES
359/*
360 * Vectors for the FWNMI option. Share common code.
361 */
362 .globl system_reset_fwnmi
363 .align 7
364system_reset_fwnmi:
365 HMT_MEDIUM
366 mtspr SPRN_SPRG1,r13 /* save r13 */
367 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
368
369 .globl machine_check_fwnmi
370 .align 7
371machine_check_fwnmi:
372 HMT_MEDIUM
373 mtspr SPRN_SPRG1,r13 /* save r13 */
374 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
375
376#endif /* CONFIG_PPC_PSERIES */
377
378#ifdef __DISABLED__
328/* 379/*
329 * We have some room here we use that to put
330 * the peries slb miss user trampoline code so it's reasonably
331 * away from slb_miss_user_common to avoid problems with rfid
332 *
333 * This is used for when the SLB miss handler has to go virtual, 380 * This is used for when the SLB miss handler has to go virtual,
334 * which doesn't happen for now anymore but will once we re-implement 381 * which doesn't happen for now anymore but will once we re-implement
335 * dynamic VSIDs for shared page tables 382 * dynamic VSIDs for shared page tables
336 */ 383 */
337#ifdef __DISABLED__
338slb_miss_user_pseries: 384slb_miss_user_pseries:
339 std r10,PACA_EXGEN+EX_R10(r13) 385 std r10,PACA_EXGEN+EX_R10(r13)
340 std r11,PACA_EXGEN+EX_R11(r13) 386 std r11,PACA_EXGEN+EX_R11(r13)
@@ -357,25 +403,17 @@ slb_miss_user_pseries:
357 b . /* prevent spec. execution */ 403 b . /* prevent spec. execution */
358#endif /* __DISABLED__ */ 404#endif /* __DISABLED__ */
359 405
360#ifdef CONFIG_PPC_PSERIES 406 .align 7
407 .globl __end_interrupts
408__end_interrupts:
409
361/* 410/*
362 * Vectors for the FWNMI option. Share common code. 411 * Code from here down to __end_handlers is invoked from the
412 * exception prologs above. Because the prologs assemble the
413 * addresses of these handlers using the LOAD_HANDLER macro,
414 * which uses an addi instruction, these handlers must be in
415 * the first 32k of the kernel image.
363 */ 416 */
364 .globl system_reset_fwnmi
365 .align 7
366system_reset_fwnmi:
367 HMT_MEDIUM
368 mtspr SPRN_SPRG1,r13 /* save r13 */
369 EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(PACA_EXGEN, system_reset_common)
370
371 .globl machine_check_fwnmi
372 .align 7
373machine_check_fwnmi:
374 HMT_MEDIUM
375 mtspr SPRN_SPRG1,r13 /* save r13 */
376 EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(PACA_EXMC, machine_check_common)
377
378#endif /* CONFIG_PPC_PSERIES */
379 417
380/*** Common interrupt handlers ***/ 418/*** Common interrupt handlers ***/
381 419
@@ -414,6 +452,10 @@ machine_check_common:
414 STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) 452 STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
415#endif /* CONFIG_CBE_RAS */ 453#endif /* CONFIG_CBE_RAS */
416 454
455 .align 7
456system_call_entry:
457 b system_call_common
458
417/* 459/*
418 * Here we have detected that the kernel stack pointer is bad. 460 * Here we have detected that the kernel stack pointer is bad.
419 * R9 contains the saved CR, r13 points to the paca, 461 * R9 contains the saved CR, r13 points to the paca,
@@ -457,65 +499,6 @@ bad_stack:
457 b 1b 499 b 1b
458 500
459/* 501/*
460 * Return from an exception with minimal checks.
461 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
462 * If interrupts have been enabled, or anything has been
463 * done that might have changed the scheduling status of
464 * any task or sent any task a signal, you should use
465 * ret_from_except or ret_from_except_lite instead of this.
466 */
467fast_exc_return_irq: /* restores irq state too */
468 ld r3,SOFTE(r1)
469 TRACE_AND_RESTORE_IRQ(r3);
470 ld r12,_MSR(r1)
471 rldicl r4,r12,49,63 /* get MSR_EE to LSB */
472 stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */
473 b 1f
474
475 .globl fast_exception_return
476fast_exception_return:
477 ld r12,_MSR(r1)
4781: ld r11,_NIP(r1)
479 andi. r3,r12,MSR_RI /* check if RI is set */
480 beq- unrecov_fer
481
482#ifdef CONFIG_VIRT_CPU_ACCOUNTING
483 andi. r3,r12,MSR_PR
484 beq 2f
485 ACCOUNT_CPU_USER_EXIT(r3, r4)
4862:
487#endif
488
489 ld r3,_CCR(r1)
490 ld r4,_LINK(r1)
491 ld r5,_CTR(r1)
492 ld r6,_XER(r1)
493 mtcr r3
494 mtlr r4
495 mtctr r5
496 mtxer r6
497 REST_GPR(0, r1)
498 REST_8GPRS(2, r1)
499
500 mfmsr r10
501 rldicl r10,r10,48,1 /* clear EE */
502 rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */
503 mtmsrd r10,1
504
505 mtspr SPRN_SRR1,r12
506 mtspr SPRN_SRR0,r11
507 REST_4GPRS(10, r1)
508 ld r1,GPR1(r1)
509 rfid
510 b . /* prevent speculative execution */
511
512unrecov_fer:
513 bl .save_nvgprs
5141: addi r3,r1,STACK_FRAME_OVERHEAD
515 bl .unrecoverable_exception
516 b 1b
517
518/*
519 * Here r13 points to the paca, r9 contains the saved CR, 502 * Here r13 points to the paca, r9 contains the saved CR,
520 * SRR0 and SRR1 are saved in r11 and r12, 503 * SRR0 and SRR1 are saved in r11 and r12,
521 * r9 - r13 are saved in paca->exgen. 504 * r9 - r13 are saved in paca->exgen.
@@ -616,6 +599,9 @@ unrecov_user_slb:
616 */ 599 */
617_GLOBAL(slb_miss_realmode) 600_GLOBAL(slb_miss_realmode)
618 mflr r10 601 mflr r10
602#ifdef CONFIG_RELOCATABLE
603 mtctr r11
604#endif
619 605
620 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 606 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
621 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ 607 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
@@ -666,11 +652,10 @@ BEGIN_FW_FTR_SECTION
666END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 652END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
667#endif /* CONFIG_PPC_ISERIES */ 653#endif /* CONFIG_PPC_ISERIES */
668 mfspr r11,SPRN_SRR0 654 mfspr r11,SPRN_SRR0
669 clrrdi r10,r13,32 655 ld r10,PACAKBASE(r13)
670 LOAD_HANDLER(r10,unrecov_slb) 656 LOAD_HANDLER(r10,unrecov_slb)
671 mtspr SPRN_SRR0,r10 657 mtspr SPRN_SRR0,r10
672 mfmsr r10 658 ld r10,PACAKMSR(r13)
673 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
674 mtspr SPRN_SRR1,r10 659 mtspr SPRN_SRR1,r10
675 rfid 660 rfid
676 b . 661 b .
@@ -766,6 +751,85 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
766 bl .altivec_unavailable_exception 751 bl .altivec_unavailable_exception
767 b .ret_from_except 752 b .ret_from_except
768 753
754 .align 7
755 .globl vsx_unavailable_common
756vsx_unavailable_common:
757 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
758#ifdef CONFIG_VSX
759BEGIN_FTR_SECTION
760 bne .load_up_vsx
7611:
762END_FTR_SECTION_IFSET(CPU_FTR_VSX)
763#endif
764 bl .save_nvgprs
765 addi r3,r1,STACK_FRAME_OVERHEAD
766 ENABLE_INTS
767 bl .vsx_unavailable_exception
768 b .ret_from_except
769
770 .align 7
771 .globl __end_handlers
772__end_handlers:
773
774/*
775 * Return from an exception with minimal checks.
776 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
777 * If interrupts have been enabled, or anything has been
778 * done that might have changed the scheduling status of
779 * any task or sent any task a signal, you should use
780 * ret_from_except or ret_from_except_lite instead of this.
781 */
782fast_exc_return_irq: /* restores irq state too */
783 ld r3,SOFTE(r1)
784 TRACE_AND_RESTORE_IRQ(r3);
785 ld r12,_MSR(r1)
786 rldicl r4,r12,49,63 /* get MSR_EE to LSB */
787 stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */
788 b 1f
789
790 .globl fast_exception_return
791fast_exception_return:
792 ld r12,_MSR(r1)
7931: ld r11,_NIP(r1)
794 andi. r3,r12,MSR_RI /* check if RI is set */
795 beq- unrecov_fer
796
797#ifdef CONFIG_VIRT_CPU_ACCOUNTING
798 andi. r3,r12,MSR_PR
799 beq 2f
800 ACCOUNT_CPU_USER_EXIT(r3, r4)
8012:
802#endif
803
804 ld r3,_CCR(r1)
805 ld r4,_LINK(r1)
806 ld r5,_CTR(r1)
807 ld r6,_XER(r1)
808 mtcr r3
809 mtlr r4
810 mtctr r5
811 mtxer r6
812 REST_GPR(0, r1)
813 REST_8GPRS(2, r1)
814
815 mfmsr r10
816 rldicl r10,r10,48,1 /* clear EE */
817 rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */
818 mtmsrd r10,1
819
820 mtspr SPRN_SRR1,r12
821 mtspr SPRN_SRR0,r11
822 REST_4GPRS(10, r1)
823 ld r1,GPR1(r1)
824 rfid
825 b . /* prevent speculative execution */
826
827unrecov_fer:
828 bl .save_nvgprs
8291: addi r3,r1,STACK_FRAME_OVERHEAD
830 bl .unrecoverable_exception
831 b 1b
832
769#ifdef CONFIG_ALTIVEC 833#ifdef CONFIG_ALTIVEC
770/* 834/*
771 * load_up_altivec(unused, unused, tsk) 835 * load_up_altivec(unused, unused, tsk)
@@ -840,22 +904,6 @@ _STATIC(load_up_altivec)
840 blr 904 blr
841#endif /* CONFIG_ALTIVEC */ 905#endif /* CONFIG_ALTIVEC */
842 906
843 .align 7
844 .globl vsx_unavailable_common
845vsx_unavailable_common:
846 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
847#ifdef CONFIG_VSX
848BEGIN_FTR_SECTION
849 bne .load_up_vsx
8501:
851END_FTR_SECTION_IFSET(CPU_FTR_VSX)
852#endif
853 bl .save_nvgprs
854 addi r3,r1,STACK_FRAME_OVERHEAD
855 ENABLE_INTS
856 bl .vsx_unavailable_exception
857 b .ret_from_except
858
859#ifdef CONFIG_VSX 907#ifdef CONFIG_VSX
860/* 908/*
861 * load_up_vsx(unused, unused, tsk) 909 * load_up_vsx(unused, unused, tsk)
@@ -1175,11 +1223,14 @@ _GLOBAL(generic_secondary_smp_init)
1175 /* turn on 64-bit mode */ 1223 /* turn on 64-bit mode */
1176 bl .enable_64b_mode 1224 bl .enable_64b_mode
1177 1225
1226 /* get the TOC pointer (real address) */
1227 bl .relative_toc
1228
1178 /* Set up a paca value for this processor. Since we have the 1229 /* Set up a paca value for this processor. Since we have the
1179 * physical cpu id in r24, we need to search the pacas to find 1230 * physical cpu id in r24, we need to search the pacas to find
1180 * which logical id maps to our physical one. 1231 * which logical id maps to our physical one.
1181 */ 1232 */
1182 LOAD_REG_IMMEDIATE(r13, paca) /* Get base vaddr of paca array */ 1233 LOAD_REG_ADDR(r13, paca) /* Get base vaddr of paca array */
1183 li r5,0 /* logical cpu id */ 1234 li r5,0 /* logical cpu id */
11841: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */ 12351: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
1185 cmpw r6,r24 /* Compare to our id */ 1236 cmpw r6,r24 /* Compare to our id */
@@ -1208,7 +1259,7 @@ _GLOBAL(generic_secondary_smp_init)
1208 sync /* order paca.run and cur_cpu_spec */ 1259 sync /* order paca.run and cur_cpu_spec */
1209 1260
1210 /* See if we need to call a cpu state restore handler */ 1261 /* See if we need to call a cpu state restore handler */
1211 LOAD_REG_IMMEDIATE(r23, cur_cpu_spec) 1262 LOAD_REG_ADDR(r23, cur_cpu_spec)
1212 ld r23,0(r23) 1263 ld r23,0(r23)
1213 ld r23,CPU_SPEC_RESTORE(r23) 1264 ld r23,CPU_SPEC_RESTORE(r23)
1214 cmpdi 0,r23,0 1265 cmpdi 0,r23,0
@@ -1224,10 +1275,15 @@ _GLOBAL(generic_secondary_smp_init)
1224 b __secondary_start 1275 b __secondary_start
1225#endif 1276#endif
1226 1277
1278/*
1279 * Turn the MMU off.
1280 * Assumes we're mapped EA == RA if the MMU is on.
1281 */
1227_STATIC(__mmu_off) 1282_STATIC(__mmu_off)
1228 mfmsr r3 1283 mfmsr r3
1229 andi. r0,r3,MSR_IR|MSR_DR 1284 andi. r0,r3,MSR_IR|MSR_DR
1230 beqlr 1285 beqlr
1286 mflr r4
1231 andc r3,r3,r0 1287 andc r3,r3,r0
1232 mtspr SPRN_SRR0,r4 1288 mtspr SPRN_SRR0,r4
1233 mtspr SPRN_SRR1,r3 1289 mtspr SPRN_SRR1,r3
@@ -1248,6 +1304,18 @@ _STATIC(__mmu_off)
1248 * 1304 *
1249 */ 1305 */
1250_GLOBAL(__start_initialization_multiplatform) 1306_GLOBAL(__start_initialization_multiplatform)
1307 /* Make sure we are running in 64 bits mode */
1308 bl .enable_64b_mode
1309
1310 /* Get TOC pointer (current runtime address) */
1311 bl .relative_toc
1312
1313 /* find out where we are now */
1314 bcl 20,31,$+4
13150: mflr r26 /* r26 = runtime addr here */
1316 addis r26,r26,(_stext - 0b)@ha
1317 addi r26,r26,(_stext - 0b)@l /* current runtime base addr */
1318
1251 /* 1319 /*
1252 * Are we booted from a PROM Of-type client-interface ? 1320 * Are we booted from a PROM Of-type client-interface ?
1253 */ 1321 */
@@ -1259,9 +1327,6 @@ _GLOBAL(__start_initialization_multiplatform)
1259 mr r31,r3 1327 mr r31,r3
1260 mr r30,r4 1328 mr r30,r4
1261 1329
1262 /* Make sure we are running in 64 bits mode */
1263 bl .enable_64b_mode
1264
1265 /* Setup some critical 970 SPRs before switching MMU off */ 1330 /* Setup some critical 970 SPRs before switching MMU off */
1266 mfspr r0,SPRN_PVR 1331 mfspr r0,SPRN_PVR
1267 srwi r0,r0,16 1332 srwi r0,r0,16
@@ -1276,9 +1341,7 @@ _GLOBAL(__start_initialization_multiplatform)
12761: bl .__cpu_preinit_ppc970 13411: bl .__cpu_preinit_ppc970
12772: 13422:
1278 1343
1279 /* Switch off MMU if not already */ 1344 /* Switch off MMU if not already off */
1280 LOAD_REG_IMMEDIATE(r4, .__after_prom_start - KERNELBASE)
1281 add r4,r4,r30
1282 bl .__mmu_off 1345 bl .__mmu_off
1283 b .__after_prom_start 1346 b .__after_prom_start
1284 1347
@@ -1293,22 +1356,15 @@ _INIT_STATIC(__boot_from_prom)
1293 /* 1356 /*
1294 * Align the stack to 16-byte boundary 1357 * Align the stack to 16-byte boundary
1295 * Depending on the size and layout of the ELF sections in the initial 1358 * Depending on the size and layout of the ELF sections in the initial
1296 * boot binary, the stack pointer will be unalignet on PowerMac 1359 * boot binary, the stack pointer may be unaligned on PowerMac
1297 */ 1360 */
1298 rldicr r1,r1,0,59 1361 rldicr r1,r1,0,59
1299 1362
1300 /* Make sure we are running in 64 bits mode */ 1363#ifdef CONFIG_RELOCATABLE
1301 bl .enable_64b_mode 1364 /* Relocate code for where we are now */
1302 1365 mr r3,r26
1303 /* put a relocation offset into r3 */ 1366 bl .relocate
1304 bl .reloc_offset 1367#endif
1305
1306 LOAD_REG_IMMEDIATE(r2,__toc_start)
1307 addi r2,r2,0x4000
1308 addi r2,r2,0x4000
1309
1310 /* Relocate the TOC from a virt addr to a real addr */
1311 add r2,r2,r3
1312 1368
1313 /* Restore parameters */ 1369 /* Restore parameters */
1314 mr r3,r31 1370 mr r3,r31
@@ -1318,60 +1374,51 @@ _INIT_STATIC(__boot_from_prom)
1318 mr r7,r27 1374 mr r7,r27
1319 1375
1320 /* Do all of the interaction with OF client interface */ 1376 /* Do all of the interaction with OF client interface */
1377 mr r8,r26
1321 bl .prom_init 1378 bl .prom_init
1322 /* We never return */ 1379 /* We never return */
1323 trap 1380 trap
1324 1381
1325_STATIC(__after_prom_start) 1382_STATIC(__after_prom_start)
1383#ifdef CONFIG_RELOCATABLE
1384 /* process relocations for the final address of the kernel */
1385 lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */
1386 sldi r25,r25,32
1387 mr r3,r25
1388 bl .relocate
1389#endif
1326 1390
1327/* 1391/*
1328 * We need to run with __start at physical address PHYSICAL_START. 1392 * We need to run with _stext at physical address PHYSICAL_START.
1329 * This will leave some code in the first 256B of 1393 * This will leave some code in the first 256B of
1330 * real memory, which are reserved for software use. 1394 * real memory, which are reserved for software use.
1331 * The remainder of the first page is loaded with the fixed
1332 * interrupt vectors. The next two pages are filled with
1333 * unknown exception placeholders.
1334 * 1395 *
1335 * Note: This process overwrites the OF exception vectors. 1396 * Note: This process overwrites the OF exception vectors.
1336 * r26 == relocation offset
1337 * r27 == KERNELBASE
1338 */ 1397 */
1339 bl .reloc_offset 1398 li r3,0 /* target addr */
1340 mr r26,r3 1399 mr. r4,r26 /* In some cases the loader may */
1341 LOAD_REG_IMMEDIATE(r27, KERNELBASE) 1400 beq 9f /* have already put us at zero */
1342 1401 lis r5,(copy_to_here - _stext)@ha
1343 LOAD_REG_IMMEDIATE(r3, PHYSICAL_START) /* target addr */ 1402 addi r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */
1344
1345 // XXX FIXME: Use phys returned by OF (r30)
1346 add r4,r27,r26 /* source addr */
1347 /* current address of _start */
1348 /* i.e. where we are running */
1349 /* the source addr */
1350
1351 cmpdi r4,0 /* In some cases the loader may */
1352 bne 1f
1353 b .start_here_multiplatform /* have already put us at zero */
1354 /* so we can skip the copy. */
13551: LOAD_REG_IMMEDIATE(r5,copy_to_here) /* # bytes of memory to copy */
1356 sub r5,r5,r27
1357
1358 li r6,0x100 /* Start offset, the first 0x100 */ 1403 li r6,0x100 /* Start offset, the first 0x100 */
1359 /* bytes were copied earlier. */ 1404 /* bytes were copied earlier. */
1360 1405
1361 bl .copy_and_flush /* copy the first n bytes */ 1406 bl .copy_and_flush /* copy the first n bytes */
1362 /* this includes the code being */ 1407 /* this includes the code being */
1363 /* executed here. */ 1408 /* executed here. */
1364 1409 addis r8,r3,(4f - _stext)@ha /* Jump to the copy of this code */
1365 LOAD_REG_IMMEDIATE(r0, 4f) /* Jump to the copy of this code */ 1410 addi r8,r8,(4f - _stext)@l /* that we just made */
1366 mtctr r0 /* that we just made/relocated */ 1411 mtctr r8
1367 bctr 1412 bctr
1368 1413
13694: LOAD_REG_IMMEDIATE(r5,klimit) 14144: /* Now copy the rest of the kernel up to _end */
1370 add r5,r5,r26 1415 addis r5,r26,(p_end - _stext)@ha
1371 ld r5,0(r5) /* get the value of klimit */ 1416 ld r5,(p_end - _stext)@l(r5) /* get _end */
1372 sub r5,r5,r27
1373 bl .copy_and_flush /* copy the rest */ 1417 bl .copy_and_flush /* copy the rest */
1374 b .start_here_multiplatform 1418
14199: b .start_here_multiplatform
1420
1421p_end: .llong _end - _stext
1375 1422
1376/* 1423/*
1377 * Copy routine used to copy the kernel to start at physical address 0 1424 * Copy routine used to copy the kernel to start at physical address 0
@@ -1436,6 +1483,9 @@ _GLOBAL(pmac_secondary_start)
1436 /* turn on 64-bit mode */ 1483 /* turn on 64-bit mode */
1437 bl .enable_64b_mode 1484 bl .enable_64b_mode
1438 1485
1486 /* get TOC pointer (real address) */
1487 bl .relative_toc
1488
1439 /* Copy some CPU settings from CPU 0 */ 1489 /* Copy some CPU settings from CPU 0 */
1440 bl .__restore_cpu_ppc970 1490 bl .__restore_cpu_ppc970
1441 1491
@@ -1445,10 +1495,10 @@ _GLOBAL(pmac_secondary_start)
1445 mtmsrd r3 /* RI on */ 1495 mtmsrd r3 /* RI on */
1446 1496
1447 /* Set up a paca value for this processor. */ 1497 /* Set up a paca value for this processor. */
1448 LOAD_REG_IMMEDIATE(r4, paca) /* Get base vaddr of paca array */ 1498 LOAD_REG_ADDR(r4,paca) /* Get base vaddr of paca array */
1449 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ 1499 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
1450 add r13,r13,r4 /* for this processor. */ 1500 add r13,r13,r4 /* for this processor. */
1451 mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */ 1501 mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1452 1502
1453 /* Create a temp kernel stack for use before relocation is on. */ 1503 /* Create a temp kernel stack for use before relocation is on. */
1454 ld r1,PACAEMERGSP(r13) 1504 ld r1,PACAEMERGSP(r13)
@@ -1476,9 +1526,6 @@ __secondary_start:
1476 /* Set thread priority to MEDIUM */ 1526 /* Set thread priority to MEDIUM */
1477 HMT_MEDIUM 1527 HMT_MEDIUM
1478 1528
1479 /* Load TOC */
1480 ld r2,PACATOC(r13)
1481
1482 /* Do early setup for that CPU (stab, slb, hash table pointer) */ 1529 /* Do early setup for that CPU (stab, slb, hash table pointer) */
1483 bl .early_setup_secondary 1530 bl .early_setup_secondary
1484 1531
@@ -1515,9 +1562,11 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
1515 1562
1516/* 1563/*
1517 * Running with relocation on at this point. All we want to do is 1564 * Running with relocation on at this point. All we want to do is
1518 * zero the stack back-chain pointer before going into C code. 1565 * zero the stack back-chain pointer and get the TOC virtual address
1566 * before going into C code.
1519 */ 1567 */
1520_GLOBAL(start_secondary_prolog) 1568_GLOBAL(start_secondary_prolog)
1569 ld r2,PACATOC(r13)
1521 li r3,0 1570 li r3,0
1522 std r3,0(r1) /* Zero the stack frame pointer */ 1571 std r3,0(r1) /* Zero the stack frame pointer */
1523 bl .start_secondary 1572 bl .start_secondary
@@ -1529,34 +1578,46 @@ _GLOBAL(start_secondary_prolog)
1529 */ 1578 */
1530_GLOBAL(enable_64b_mode) 1579_GLOBAL(enable_64b_mode)
1531 mfmsr r11 /* grab the current MSR */ 1580 mfmsr r11 /* grab the current MSR */
1532 li r12,1 1581 li r12,(MSR_SF | MSR_ISF)@highest
1533 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG) 1582 sldi r12,r12,48
1534 or r11,r11,r12
1535 li r12,1
1536 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
1537 or r11,r11,r12 1583 or r11,r11,r12
1538 mtmsrd r11 1584 mtmsrd r11
1539 isync 1585 isync
1540 blr 1586 blr
1541 1587
1542/* 1588/*
1589 * This puts the TOC pointer into r2, offset by 0x8000 (as expected
1590 * by the toolchain). It computes the correct value for wherever we
1591 * are running at the moment, using position-independent code.
1592 */
1593_GLOBAL(relative_toc)
1594 mflr r0
1595 bcl 20,31,$+4
15960: mflr r9
1597 ld r2,(p_toc - 0b)(r9)
1598 add r2,r2,r9
1599 mtlr r0
1600 blr
1601
1602p_toc: .llong __toc_start + 0x8000 - 0b
1603
1604/*
1543 * This is where the main kernel code starts. 1605 * This is where the main kernel code starts.
1544 */ 1606 */
1545_INIT_STATIC(start_here_multiplatform) 1607_INIT_STATIC(start_here_multiplatform)
1546 /* get a new offset, now that the kernel has moved. */ 1608 /* set up the TOC (real address) */
1547 bl .reloc_offset 1609 bl .relative_toc
1548 mr r26,r3
1549 1610
1550 /* Clear out the BSS. It may have been done in prom_init, 1611 /* Clear out the BSS. It may have been done in prom_init,
1551 * already but that's irrelevant since prom_init will soon 1612 * already but that's irrelevant since prom_init will soon
1552 * be detached from the kernel completely. Besides, we need 1613 * be detached from the kernel completely. Besides, we need
1553 * to clear it now for kexec-style entry. 1614 * to clear it now for kexec-style entry.
1554 */ 1615 */
1555 LOAD_REG_IMMEDIATE(r11,__bss_stop) 1616 LOAD_REG_ADDR(r11,__bss_stop)
1556 LOAD_REG_IMMEDIATE(r8,__bss_start) 1617 LOAD_REG_ADDR(r8,__bss_start)
1557 sub r11,r11,r8 /* bss size */ 1618 sub r11,r11,r8 /* bss size */
1558 addi r11,r11,7 /* round up to an even double word */ 1619 addi r11,r11,7 /* round up to an even double word */
1559 rldicl. r11,r11,61,3 /* shift right by 3 */ 1620 srdi. r11,r11,3 /* shift right by 3 */
1560 beq 4f 1621 beq 4f
1561 addi r8,r8,-8 1622 addi r8,r8,-8
1562 li r0,0 1623 li r0,0
@@ -1569,35 +1630,35 @@ _INIT_STATIC(start_here_multiplatform)
1569 ori r6,r6,MSR_RI 1630 ori r6,r6,MSR_RI
1570 mtmsrd r6 /* RI on */ 1631 mtmsrd r6 /* RI on */
1571 1632
1572 /* The following gets the stack and TOC set up with the regs */ 1633#ifdef CONFIG_RELOCATABLE
1634 /* Save the physical address we're running at in kernstart_addr */
1635 LOAD_REG_ADDR(r4, kernstart_addr)
1636 clrldi r0,r25,2
1637 std r0,0(r4)
1638#endif
1639
1640 /* The following gets the stack set up with the regs */
1573 /* pointing to the real addr of the kernel stack. This is */ 1641 /* pointing to the real addr of the kernel stack. This is */
1574 /* all done to support the C function call below which sets */ 1642 /* all done to support the C function call below which sets */
1575 /* up the htab. This is done because we have relocated the */ 1643 /* up the htab. This is done because we have relocated the */
1576 /* kernel but are still running in real mode. */ 1644 /* kernel but are still running in real mode. */
1577 1645
1578 LOAD_REG_IMMEDIATE(r3,init_thread_union) 1646 LOAD_REG_ADDR(r3,init_thread_union)
1579 add r3,r3,r26
1580 1647
1581 /* set up a stack pointer (physical address) */ 1648 /* set up a stack pointer */
1582 addi r1,r3,THREAD_SIZE 1649 addi r1,r3,THREAD_SIZE
1583 li r0,0 1650 li r0,0
1584 stdu r0,-STACK_FRAME_OVERHEAD(r1) 1651 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1585 1652
1586 /* set up the TOC (physical address) */
1587 LOAD_REG_IMMEDIATE(r2,__toc_start)
1588 addi r2,r2,0x4000
1589 addi r2,r2,0x4000
1590 add r2,r2,r26
1591
1592 /* Do very early kernel initializations, including initial hash table, 1653 /* Do very early kernel initializations, including initial hash table,
1593 * stab and slb setup before we turn on relocation. */ 1654 * stab and slb setup before we turn on relocation. */
1594 1655
1595 /* Restore parameters passed from prom_init/kexec */ 1656 /* Restore parameters passed from prom_init/kexec */
1596 mr r3,r31 1657 mr r3,r31
1597 bl .early_setup 1658 bl .early_setup /* also sets r13 and SPRG3 */
1598 1659
1599 LOAD_REG_IMMEDIATE(r3, .start_here_common) 1660 LOAD_REG_ADDR(r3, .start_here_common)
1600 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) 1661 ld r4,PACAKMSR(r13)
1601 mtspr SPRN_SRR0,r3 1662 mtspr SPRN_SRR0,r3
1602 mtspr SPRN_SRR1,r4 1663 mtspr SPRN_SRR1,r4
1603 rfid 1664 rfid
@@ -1606,20 +1667,10 @@ _INIT_STATIC(start_here_multiplatform)
1606 /* This is where all platforms converge execution */ 1667 /* This is where all platforms converge execution */
1607_INIT_GLOBAL(start_here_common) 1668_INIT_GLOBAL(start_here_common)
1608 /* relocation is on at this point */ 1669 /* relocation is on at this point */
1670 std r1,PACAKSAVE(r13)
1609 1671
1610 /* The following code sets up the SP and TOC now that we are */ 1672 /* Load the TOC (virtual address) */
1611 /* running with translation enabled. */
1612
1613 LOAD_REG_IMMEDIATE(r3,init_thread_union)
1614
1615 /* set up the stack */
1616 addi r1,r3,THREAD_SIZE
1617 li r0,0
1618 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1619
1620 /* Load the TOC */
1621 ld r2,PACATOC(r13) 1673 ld r2,PACATOC(r13)
1622 std r1,PACAKSAVE(r13)
1623 1674
1624 bl .setup_system 1675 bl .setup_system
1625 1676
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 3cb52fa0eda3..590304c24dad 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -422,7 +422,6 @@ skpinv: addi r6,r6,1 /* Increment */
422 * r12 is pointer to the pte 422 * r12 is pointer to the pte
423 */ 423 */
424#ifdef CONFIG_PTE_64BIT 424#ifdef CONFIG_PTE_64BIT
425#define PTE_FLAGS_OFFSET 4
426#define FIND_PTE \ 425#define FIND_PTE \
427 rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \ 426 rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \
428 lwzx r11, r12, r11; /* Get pgd/pmd entry */ \ 427 lwzx r11, r12, r11; /* Get pgd/pmd entry */ \
@@ -431,7 +430,6 @@ skpinv: addi r6,r6,1 /* Increment */
431 rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \ 430 rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \
432 lwz r11, 4(r12); /* Get pte entry */ 431 lwz r11, 4(r12); /* Get pte entry */
433#else 432#else
434#define PTE_FLAGS_OFFSET 0
435#define FIND_PTE \ 433#define FIND_PTE \
436 rlwimi r11, r10, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \ 434 rlwimi r11, r10, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \
437 lwz r11, 0(r11); /* Get L1 entry */ \ 435 lwz r11, 0(r11); /* Get L1 entry */ \
@@ -579,13 +577,19 @@ interrupt_base:
579 577
580 FIND_PTE 578 FIND_PTE
581 andc. r13,r13,r11 /* Check permission */ 579 andc. r13,r13,r11 /* Check permission */
582 bne 2f /* Bail if permission mismach */
583 580
584#ifdef CONFIG_PTE_64BIT 581#ifdef CONFIG_PTE_64BIT
585 lwz r13, 0(r12) 582#ifdef CONFIG_SMP
583 subf r10,r11,r12 /* create false data dep */
584 lwzx r13,r11,r10 /* Get upper pte bits */
585#else
586 lwz r13,0(r12) /* Get upper pte bits */
587#endif
586#endif 588#endif
587 589
588 /* Jump to common tlb load */ 590 bne 2f /* Bail if permission/valid mismach */
591
592 /* Jump to common tlb load */
589 b finish_tlb_load 593 b finish_tlb_load
5902: 5942:
591 /* The bailout. Restore registers to pre-exception conditions 595 /* The bailout. Restore registers to pre-exception conditions
@@ -640,12 +644,18 @@ interrupt_base:
640 644
641 FIND_PTE 645 FIND_PTE
642 andc. r13,r13,r11 /* Check permission */ 646 andc. r13,r13,r11 /* Check permission */
643 bne 2f /* Bail if permission mismach */
644 647
645#ifdef CONFIG_PTE_64BIT 648#ifdef CONFIG_PTE_64BIT
646 lwz r13, 0(r12) 649#ifdef CONFIG_SMP
650 subf r10,r11,r12 /* create false data dep */
651 lwzx r13,r11,r10 /* Get upper pte bits */
652#else
653 lwz r13,0(r12) /* Get upper pte bits */
654#endif
647#endif 655#endif
648 656
657 bne 2f /* Bail if permission mismach */
658
649 /* Jump to common TLB load point */ 659 /* Jump to common TLB load point */
650 b finish_tlb_load 660 b finish_tlb_load
651 661
@@ -702,7 +712,7 @@ interrupt_base:
702/* 712/*
703 * Both the instruction and data TLB miss get to this 713 * Both the instruction and data TLB miss get to this
704 * point to load the TLB. 714 * point to load the TLB.
705 * r10 - EA of fault 715 * r10 - available to use
706 * r11 - TLB (info from Linux PTE) 716 * r11 - TLB (info from Linux PTE)
707 * r12 - available to use 717 * r12 - available to use
708 * r13 - upper bits of PTE (if PTE_64BIT) or available to use 718 * r13 - upper bits of PTE (if PTE_64BIT) or available to use
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index d972decf0324..ac222d0ab12e 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -439,8 +439,8 @@ void do_softirq(void)
439 439
440static LIST_HEAD(irq_hosts); 440static LIST_HEAD(irq_hosts);
441static DEFINE_SPINLOCK(irq_big_lock); 441static DEFINE_SPINLOCK(irq_big_lock);
442static DEFINE_PER_CPU(unsigned int, irq_radix_reader); 442static unsigned int revmap_trees_allocated;
443static unsigned int irq_radix_writer; 443static DEFINE_MUTEX(revmap_trees_mutex);
444struct irq_map_entry irq_map[NR_IRQS]; 444struct irq_map_entry irq_map[NR_IRQS];
445static unsigned int irq_virq_count = NR_IRQS; 445static unsigned int irq_virq_count = NR_IRQS;
446static struct irq_host *irq_default_host; 446static struct irq_host *irq_default_host;
@@ -583,57 +583,6 @@ void irq_set_virq_count(unsigned int count)
583 irq_virq_count = count; 583 irq_virq_count = count;
584} 584}
585 585
586/* radix tree not lockless safe ! we use a brlock-type mecanism
587 * for now, until we can use a lockless radix tree
588 */
589static void irq_radix_wrlock(unsigned long *flags)
590{
591 unsigned int cpu, ok;
592
593 spin_lock_irqsave(&irq_big_lock, *flags);
594 irq_radix_writer = 1;
595 smp_mb();
596 do {
597 barrier();
598 ok = 1;
599 for_each_possible_cpu(cpu) {
600 if (per_cpu(irq_radix_reader, cpu)) {
601 ok = 0;
602 break;
603 }
604 }
605 if (!ok)
606 cpu_relax();
607 } while(!ok);
608}
609
610static void irq_radix_wrunlock(unsigned long flags)
611{
612 smp_wmb();
613 irq_radix_writer = 0;
614 spin_unlock_irqrestore(&irq_big_lock, flags);
615}
616
617static void irq_radix_rdlock(unsigned long *flags)
618{
619 local_irq_save(*flags);
620 __get_cpu_var(irq_radix_reader) = 1;
621 smp_mb();
622 if (likely(irq_radix_writer == 0))
623 return;
624 __get_cpu_var(irq_radix_reader) = 0;
625 smp_wmb();
626 spin_lock(&irq_big_lock);
627 __get_cpu_var(irq_radix_reader) = 1;
628 spin_unlock(&irq_big_lock);
629}
630
631static void irq_radix_rdunlock(unsigned long flags)
632{
633 __get_cpu_var(irq_radix_reader) = 0;
634 local_irq_restore(flags);
635}
636
637static int irq_setup_virq(struct irq_host *host, unsigned int virq, 586static int irq_setup_virq(struct irq_host *host, unsigned int virq,
638 irq_hw_number_t hwirq) 587 irq_hw_number_t hwirq)
639{ 588{
@@ -788,7 +737,6 @@ void irq_dispose_mapping(unsigned int virq)
788{ 737{
789 struct irq_host *host; 738 struct irq_host *host;
790 irq_hw_number_t hwirq; 739 irq_hw_number_t hwirq;
791 unsigned long flags;
792 740
793 if (virq == NO_IRQ) 741 if (virq == NO_IRQ)
794 return; 742 return;
@@ -821,12 +769,16 @@ void irq_dispose_mapping(unsigned int virq)
821 host->revmap_data.linear.revmap[hwirq] = NO_IRQ; 769 host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
822 break; 770 break;
823 case IRQ_HOST_MAP_TREE: 771 case IRQ_HOST_MAP_TREE:
824 /* Check if radix tree allocated yet */ 772 /*
825 if (host->revmap_data.tree.gfp_mask == 0) 773 * Check if radix tree allocated yet, if not then nothing to
774 * remove.
775 */
776 smp_rmb();
777 if (revmap_trees_allocated < 1)
826 break; 778 break;
827 irq_radix_wrlock(&flags); 779 mutex_lock(&revmap_trees_mutex);
828 radix_tree_delete(&host->revmap_data.tree, hwirq); 780 radix_tree_delete(&host->revmap_data.tree, hwirq);
829 irq_radix_wrunlock(flags); 781 mutex_unlock(&revmap_trees_mutex);
830 break; 782 break;
831 } 783 }
832 784
@@ -875,43 +827,62 @@ unsigned int irq_find_mapping(struct irq_host *host,
875EXPORT_SYMBOL_GPL(irq_find_mapping); 827EXPORT_SYMBOL_GPL(irq_find_mapping);
876 828
877 829
878unsigned int irq_radix_revmap(struct irq_host *host, 830unsigned int irq_radix_revmap_lookup(struct irq_host *host,
879 irq_hw_number_t hwirq) 831 irq_hw_number_t hwirq)
880{ 832{
881 struct radix_tree_root *tree;
882 struct irq_map_entry *ptr; 833 struct irq_map_entry *ptr;
883 unsigned int virq; 834 unsigned int virq;
884 unsigned long flags;
885 835
886 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); 836 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
887 837
888 /* Check if the radix tree exist yet. We test the value of 838 /*
889 * the gfp_mask for that. Sneaky but saves another int in the 839 * Check if the radix tree exists and has bee initialized.
890 * structure. If not, we fallback to slow mode 840 * If not, we fallback to slow mode
891 */ 841 */
892 tree = &host->revmap_data.tree; 842 if (revmap_trees_allocated < 2)
893 if (tree->gfp_mask == 0)
894 return irq_find_mapping(host, hwirq); 843 return irq_find_mapping(host, hwirq);
895 844
896 /* Now try to resolve */ 845 /* Now try to resolve */
897 irq_radix_rdlock(&flags); 846 /*
898 ptr = radix_tree_lookup(tree, hwirq); 847 * No rcu_read_lock(ing) needed, the ptr returned can't go under us
899 irq_radix_rdunlock(flags); 848 * as it's referencing an entry in the static irq_map table.
849 */
850 ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
900 851
901 /* Found it, return */ 852 /*
902 if (ptr) { 853 * If found in radix tree, then fine.
854 * Else fallback to linear lookup - this should not happen in practice
855 * as it means that we failed to insert the node in the radix tree.
856 */
857 if (ptr)
903 virq = ptr - irq_map; 858 virq = ptr - irq_map;
904 return virq; 859 else
905 } 860 virq = irq_find_mapping(host, hwirq);
861
862 return virq;
863}
864
865void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
866 irq_hw_number_t hwirq)
867{
868
869 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
870
871 /*
872 * Check if the radix tree exists yet.
873 * If not, then the irq will be inserted into the tree when it gets
874 * initialized.
875 */
876 smp_rmb();
877 if (revmap_trees_allocated < 1)
878 return;
906 879
907 /* If not there, try to insert it */
908 virq = irq_find_mapping(host, hwirq);
909 if (virq != NO_IRQ) { 880 if (virq != NO_IRQ) {
910 irq_radix_wrlock(&flags); 881 mutex_lock(&revmap_trees_mutex);
911 radix_tree_insert(tree, hwirq, &irq_map[virq]); 882 radix_tree_insert(&host->revmap_data.tree, hwirq,
912 irq_radix_wrunlock(flags); 883 &irq_map[virq]);
884 mutex_unlock(&revmap_trees_mutex);
913 } 885 }
914 return virq;
915} 886}
916 887
917unsigned int irq_linear_revmap(struct irq_host *host, 888unsigned int irq_linear_revmap(struct irq_host *host,
@@ -1020,14 +991,44 @@ void irq_early_init(void)
1020static int irq_late_init(void) 991static int irq_late_init(void)
1021{ 992{
1022 struct irq_host *h; 993 struct irq_host *h;
1023 unsigned long flags; 994 unsigned int i;
1024 995
1025 irq_radix_wrlock(&flags); 996 /*
997 * No mutual exclusion with respect to accessors of the tree is needed
998 * here as the synchronization is done via the state variable
999 * revmap_trees_allocated.
1000 */
1026 list_for_each_entry(h, &irq_hosts, link) { 1001 list_for_each_entry(h, &irq_hosts, link) {
1027 if (h->revmap_type == IRQ_HOST_MAP_TREE) 1002 if (h->revmap_type == IRQ_HOST_MAP_TREE)
1028 INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC); 1003 INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL);
1004 }
1005
1006 /*
1007 * Make sure the radix trees inits are visible before setting
1008 * the flag
1009 */
1010 smp_wmb();
1011 revmap_trees_allocated = 1;
1012
1013 /*
1014 * Insert the reverse mapping for those interrupts already present
1015 * in irq_map[].
1016 */
1017 mutex_lock(&revmap_trees_mutex);
1018 for (i = 0; i < irq_virq_count; i++) {
1019 if (irq_map[i].host &&
1020 (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE))
1021 radix_tree_insert(&irq_map[i].host->revmap_data.tree,
1022 irq_map[i].hwirq, &irq_map[i]);
1029 } 1023 }
1030 irq_radix_wrunlock(flags); 1024 mutex_unlock(&revmap_trees_mutex);
1025
1026 /*
1027 * Make sure the radix trees insertions are visible before setting
1028 * the flag
1029 */
1030 smp_wmb();
1031 revmap_trees_allocated = 2;
1031 1032
1032 return 0; 1033 return 0;
1033} 1034}
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index b3eef30b5131..d051e8cbcd03 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -510,10 +510,10 @@ static ssize_t update_ppp(u64 *entitlement, u8 *weight)
510 return -EINVAL; 510 return -EINVAL;
511 511
512 pr_debug("%s: current_entitled = %lu, current_weight = %u\n", 512 pr_debug("%s: current_entitled = %lu, current_weight = %u\n",
513 __FUNCTION__, ppp_data.entitlement, ppp_data.weight); 513 __func__, ppp_data.entitlement, ppp_data.weight);
514 514
515 pr_debug("%s: new_entitled = %lu, new_weight = %u\n", 515 pr_debug("%s: new_entitled = %lu, new_weight = %u\n",
516 __FUNCTION__, new_entitled, new_weight); 516 __func__, new_entitled, new_weight);
517 517
518 retval = plpar_hcall_norets(H_SET_PPP, new_entitled, new_weight); 518 retval = plpar_hcall_norets(H_SET_PPP, new_entitled, new_weight);
519 return retval; 519 return retval;
@@ -556,10 +556,10 @@ static ssize_t update_mpp(u64 *entitlement, u8 *weight)
556 return -EINVAL; 556 return -EINVAL;
557 557
558 pr_debug("%s: current_entitled = %lu, current_weight = %u\n", 558 pr_debug("%s: current_entitled = %lu, current_weight = %u\n",
559 __FUNCTION__, mpp_data.entitled_mem, mpp_data.mem_weight); 559 __func__, mpp_data.entitled_mem, mpp_data.mem_weight);
560 560
561 pr_debug("%s: new_entitled = %lu, new_weight = %u\n", 561 pr_debug("%s: new_entitled = %lu, new_weight = %u\n",
562 __FUNCTION__, new_entitled, new_weight); 562 __func__, new_entitled, new_weight);
563 563
564 rc = plpar_hcall_norets(H_SET_MPP, new_entitled, new_weight); 564 rc = plpar_hcall_norets(H_SET_MPP, new_entitled, new_weight);
565 return rc; 565 return rc;
diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S
index 85cb6f340846..2d29752cbe16 100644
--- a/arch/powerpc/kernel/misc.S
+++ b/arch/powerpc/kernel/misc.S
@@ -31,11 +31,14 @@ _GLOBAL(reloc_offset)
31 mflr r0 31 mflr r0
32 bl 1f 32 bl 1f
331: mflr r3 331: mflr r3
34 LOAD_REG_IMMEDIATE(r4,1b) 34 PPC_LL r4,(2f-1b)(r3)
35 subf r3,r4,r3 35 subf r3,r4,r3
36 mtlr r0 36 mtlr r0
37 blr 37 blr
38 38
39 .align 3
402: PPC_LONG 1b
41
39/* 42/*
40 * add_reloc_offset(x) returns x + reloc_offset(). 43 * add_reloc_offset(x) returns x + reloc_offset().
41 */ 44 */
@@ -43,12 +46,15 @@ _GLOBAL(add_reloc_offset)
43 mflr r0 46 mflr r0
44 bl 1f 47 bl 1f
451: mflr r5 481: mflr r5
46 LOAD_REG_IMMEDIATE(r4,1b) 49 PPC_LL r4,(2f-1b)(r5)
47 subf r5,r4,r5 50 subf r5,r4,r5
48 add r3,r3,r5 51 add r3,r3,r5
49 mtlr r0 52 mtlr r0
50 blr 53 blr
51 54
55 .align 3
562: PPC_LONG 1b
57
52_GLOBAL(kernel_execve) 58_GLOBAL(kernel_execve)
53 li r0,__NR_execve 59 li r0,__NR_execve
54 sc 60 sc
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 7a6dfbca7682..6a9b4bf0d173 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -274,6 +274,10 @@ _GLOBAL(real_writeb)
274/* 274/*
275 * Flush MMU TLB 275 * Flush MMU TLB
276 */ 276 */
277#ifndef CONFIG_FSL_BOOKE
278_GLOBAL(_tlbil_all)
279_GLOBAL(_tlbil_pid)
280#endif
277_GLOBAL(_tlbia) 281_GLOBAL(_tlbia)
278#if defined(CONFIG_40x) 282#if defined(CONFIG_40x)
279 sync /* Flush to memory before changing mapping */ 283 sync /* Flush to memory before changing mapping */
@@ -344,6 +348,9 @@ _GLOBAL(_tlbia)
344/* 348/*
345 * Flush MMU TLB for a particular address 349 * Flush MMU TLB for a particular address
346 */ 350 */
351#ifndef CONFIG_FSL_BOOKE
352_GLOBAL(_tlbil_va)
353#endif
347_GLOBAL(_tlbie) 354_GLOBAL(_tlbie)
348#if defined(CONFIG_40x) 355#if defined(CONFIG_40x)
349 /* We run the search with interrupts disabled because we have to change 356 /* We run the search with interrupts disabled because we have to change
@@ -436,6 +443,53 @@ _GLOBAL(_tlbie)
436#endif /* ! CONFIG_40x */ 443#endif /* ! CONFIG_40x */
437 blr 444 blr
438 445
446#if defined(CONFIG_FSL_BOOKE)
447/*
448 * Flush MMU TLB, but only on the local processor (no broadcast)
449 */
450_GLOBAL(_tlbil_all)
451#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \
452 MMUCSR0_TLB2FI | MMUCSR0_TLB3FI)
453 li r3,(MMUCSR0_TLBFI)@l
454 mtspr SPRN_MMUCSR0, r3
4551:
456 mfspr r3,SPRN_MMUCSR0
457 andi. r3,r3,MMUCSR0_TLBFI@l
458 bne 1b
459 blr
460
461/*
462 * Flush MMU TLB for a particular process id, but only on the local processor
463 * (no broadcast)
464 */
465_GLOBAL(_tlbil_pid)
466/* we currently do an invalidate all since we don't have per pid invalidate */
467 li r3,(MMUCSR0_TLBFI)@l
468 mtspr SPRN_MMUCSR0, r3
4691:
470 mfspr r3,SPRN_MMUCSR0
471 andi. r3,r3,MMUCSR0_TLBFI@l
472 bne 1b
473 blr
474
475/*
476 * Flush MMU TLB for a particular address, but only on the local processor
477 * (no broadcast)
478 */
479_GLOBAL(_tlbil_va)
480 slwi r4,r4,16
481 mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
482 tlbsx 0,r3
483 mfspr r4,SPRN_MAS1 /* check valid */
484 andis. r3,r4,MAS1_VALID@h
485 beqlr
486 rlwinm r4,r4,0,1,31
487 mtspr SPRN_MAS1,r4
488 tlbwe
489 blr
490#endif /* CONFIG_FSL_BOOKE */
491
492
439/* 493/*
440 * Flush instruction cache. 494 * Flush instruction cache.
441 * This is a no-op on the 601. 495 * This is a no-op on the 601.
@@ -846,8 +900,10 @@ _GLOBAL(kernel_thread)
846 li r4,0 /* new sp (unused) */ 900 li r4,0 /* new sp (unused) */
847 li r0,__NR_clone 901 li r0,__NR_clone
848 sc 902 sc
849 cmpwi 0,r3,0 /* parent or child? */ 903 bns+ 1f /* did system call indicate error? */
850 bne 1f /* return if parent */ 904 neg r3,r3 /* if so, make return code negative */
9051: cmpwi 0,r3,0 /* parent or child? */
906 bne 2f /* return if parent */
851 li r0,0 /* make top-level stack frame */ 907 li r0,0 /* make top-level stack frame */
852 stwu r0,-16(r1) 908 stwu r0,-16(r1)
853 mtlr r30 /* fn addr in lr */ 909 mtlr r30 /* fn addr in lr */
@@ -857,7 +913,7 @@ _GLOBAL(kernel_thread)
857 li r0,__NR_exit /* exit if function returns */ 913 li r0,__NR_exit /* exit if function returns */
858 li r3,0 914 li r3,0
859 sc 915 sc
8601: lwz r30,8(r1) 9162: lwz r30,8(r1)
861 lwz r31,12(r1) 917 lwz r31,12(r1)
862 addi r1,r1,16 918 addi r1,r1,16
863 blr 919 blr
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 4dd70cf7bb4e..3053fe5c62f2 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -426,8 +426,10 @@ _GLOBAL(kernel_thread)
426 li r4,0 /* new sp (unused) */ 426 li r4,0 /* new sp (unused) */
427 li r0,__NR_clone 427 li r0,__NR_clone
428 sc 428 sc
429 cmpdi 0,r3,0 /* parent or child? */ 429 bns+ 1f /* did system call indicate error? */
430 bne 1f /* return if parent */ 430 neg r3,r3 /* if so, make return code negative */
4311: cmpdi 0,r3,0 /* parent or child? */
432 bne 2f /* return if parent */
431 li r0,0 433 li r0,0
432 stdu r0,-STACK_FRAME_OVERHEAD(r1) 434 stdu r0,-STACK_FRAME_OVERHEAD(r1)
433 ld r2,8(r29) 435 ld r2,8(r29)
@@ -438,7 +440,7 @@ _GLOBAL(kernel_thread)
438 li r0,__NR_exit /* exit after child exits */ 440 li r0,__NR_exit /* exit after child exits */
439 li r3,0 441 li r3,0
440 sc 442 sc
4411: addi r1,r1,STACK_FRAME_OVERHEAD 4432: addi r1,r1,STACK_FRAME_OVERHEAD
442 ld r29,-24(r1) 444 ld r29,-24(r1)
443 ld r30,-16(r1) 445 ld r30,-16(r1)
444 blr 446 blr
diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c
index e9be908f199b..93ae5b169f41 100644
--- a/arch/powerpc/kernel/of_device.c
+++ b/arch/powerpc/kernel/of_device.c
@@ -78,7 +78,7 @@ struct of_device *of_device_alloc(struct device_node *np,
78 dev->dev.parent = parent; 78 dev->dev.parent = parent;
79 dev->dev.release = of_release_dev; 79 dev->dev.release = of_release_dev;
80 dev->dev.archdata.of_node = np; 80 dev->dev.archdata.of_node = np;
81 dev->dev.archdata.numa_node = of_node_to_nid(np); 81 set_dev_node(&dev->dev, of_node_to_nid(np));
82 82
83 if (bus_id) 83 if (bus_id)
84 strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE); 84 strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE);
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index c9bf17eec31b..48a347133f41 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -12,6 +12,7 @@
12 12
13#include <asm/lppaca.h> 13#include <asm/lppaca.h>
14#include <asm/paca.h> 14#include <asm/paca.h>
15#include <asm/sections.h>
15 16
16/* This symbol is provided by the linker - let it fill in the paca 17/* This symbol is provided by the linker - let it fill in the paca
17 * field correctly */ 18 * field correctly */
@@ -79,6 +80,8 @@ void __init initialise_pacas(void)
79 new_paca->lock_token = 0x8000; 80 new_paca->lock_token = 0x8000;
80 new_paca->paca_index = cpu; 81 new_paca->paca_index = cpu;
81 new_paca->kernel_toc = kernel_toc; 82 new_paca->kernel_toc = kernel_toc;
83 new_paca->kernelbase = (unsigned long) _stext;
84 new_paca->kernel_msr = MSR_KERNEL;
82 new_paca->hw_cpu_id = 0xffff; 85 new_paca->hw_cpu_id = 0xffff;
83 new_paca->slb_shadow_ptr = &slb_shadow[cpu]; 86 new_paca->slb_shadow_ptr = &slb_shadow[cpu];
84 new_paca->__current = &init_task; 87 new_paca->__current = &init_task;
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index ea0c61e09b76..01ce8c38bae6 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -56,6 +56,34 @@ resource_size_t isa_mem_base;
56/* Default PCI flags is 0 */ 56/* Default PCI flags is 0 */
57unsigned int ppc_pci_flags; 57unsigned int ppc_pci_flags;
58 58
59static struct dma_mapping_ops *pci_dma_ops;
60
61void set_pci_dma_ops(struct dma_mapping_ops *dma_ops)
62{
63 pci_dma_ops = dma_ops;
64}
65
66struct dma_mapping_ops *get_pci_dma_ops(void)
67{
68 return pci_dma_ops;
69}
70EXPORT_SYMBOL(get_pci_dma_ops);
71
72int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
73{
74 return dma_set_mask(&dev->dev, mask);
75}
76
77int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
78{
79 int rc;
80
81 rc = dma_set_mask(&dev->dev, mask);
82 dev->dev.coherent_dma_mask = dev->dma_mask;
83
84 return rc;
85}
86
59struct pci_controller *pcibios_alloc_controller(struct device_node *dev) 87struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
60{ 88{
61 struct pci_controller *phb; 89 struct pci_controller *phb;
@@ -180,6 +208,26 @@ char __devinit *pcibios_setup(char *str)
180 return str; 208 return str;
181} 209}
182 210
211void __devinit pcibios_setup_new_device(struct pci_dev *dev)
212{
213 struct dev_archdata *sd = &dev->dev.archdata;
214
215 sd->of_node = pci_device_to_OF_node(dev);
216
217 DBG("PCI: device %s OF node: %s\n", pci_name(dev),
218 sd->of_node ? sd->of_node->full_name : "<none>");
219
220 sd->dma_ops = pci_dma_ops;
221#ifdef CONFIG_PPC32
222 sd->dma_data = (void *)PCI_DRAM_OFFSET;
223#endif
224 set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
225
226 if (ppc_md.pci_dma_dev_setup)
227 ppc_md.pci_dma_dev_setup(dev);
228}
229EXPORT_SYMBOL(pcibios_setup_new_device);
230
183/* 231/*
184 * Reads the interrupt pin to determine if interrupt is use by card. 232 * Reads the interrupt pin to determine if interrupt is use by card.
185 * If the interrupt is used, then gets the interrupt line from the 233 * If the interrupt is used, then gets the interrupt line from the
@@ -371,7 +419,7 @@ pgprot_t pci_phys_mem_access_prot(struct file *file,
371 struct pci_dev *pdev = NULL; 419 struct pci_dev *pdev = NULL;
372 struct resource *found = NULL; 420 struct resource *found = NULL;
373 unsigned long prot = pgprot_val(protection); 421 unsigned long prot = pgprot_val(protection);
374 unsigned long offset = pfn << PAGE_SHIFT; 422 resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
375 int i; 423 int i;
376 424
377 if (page_is_ram(pfn)) 425 if (page_is_ram(pfn))
@@ -422,7 +470,8 @@ pgprot_t pci_phys_mem_access_prot(struct file *file,
422int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, 470int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
423 enum pci_mmap_state mmap_state, int write_combine) 471 enum pci_mmap_state mmap_state, int write_combine)
424{ 472{
425 resource_size_t offset = vma->vm_pgoff << PAGE_SHIFT; 473 resource_size_t offset =
474 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
426 struct resource *rp; 475 struct resource *rp;
427 int ret; 476 int ret;
428 477
@@ -731,11 +780,6 @@ static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
731 780
732 res->start = (res->start + offset) & mask; 781 res->start = (res->start + offset) & mask;
733 res->end = (res->end + offset) & mask; 782 res->end = (res->end + offset) & mask;
734
735 pr_debug("PCI:%s %016llx-%016llx\n",
736 pci_name(dev),
737 (unsigned long long)res->start,
738 (unsigned long long)res->end);
739} 783}
740 784
741 785
@@ -781,6 +825,11 @@ static void __devinit pcibios_fixup_resources(struct pci_dev *dev)
781 (unsigned int)res->flags); 825 (unsigned int)res->flags);
782 826
783 fixup_resource(res, dev); 827 fixup_resource(res, dev);
828
829 pr_debug("PCI:%s %016llx-%016llx\n",
830 pci_name(dev),
831 (unsigned long long)res->start,
832 (unsigned long long)res->end);
784 } 833 }
785 834
786 /* Call machine specific resource fixup */ 835 /* Call machine specific resource fixup */
@@ -789,58 +838,127 @@ static void __devinit pcibios_fixup_resources(struct pci_dev *dev)
789} 838}
790DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources); 839DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
791 840
792static void __devinit __pcibios_fixup_bus(struct pci_bus *bus) 841/* This function tries to figure out if a bridge resource has been initialized
842 * by the firmware or not. It doesn't have to be absolutely bullet proof, but
843 * things go more smoothly when it gets it right. It should covers cases such
844 * as Apple "closed" bridge resources and bare-metal pSeries unassigned bridges
845 */
846static int __devinit pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
847 struct resource *res)
793{ 848{
794 struct pci_controller *hose = pci_bus_to_host(bus); 849 struct pci_controller *hose = pci_bus_to_host(bus);
795 struct pci_dev *dev = bus->self; 850 struct pci_dev *dev = bus->self;
851 resource_size_t offset;
852 u16 command;
853 int i;
796 854
797 pr_debug("PCI: Fixup bus %d (%s)\n", bus->number, dev ? pci_name(dev) : "PHB"); 855 /* We don't do anything if PCI_PROBE_ONLY is set */
856 if (ppc_pci_flags & PPC_PCI_PROBE_ONLY)
857 return 0;
798 858
799 /* Fixup PCI<->PCI bridges. Host bridges are handled separately, for 859 /* Job is a bit different between memory and IO */
800 * now differently between 32 and 64 bits. 860 if (res->flags & IORESOURCE_MEM) {
801 */ 861 /* If the BAR is non-0 (res != pci_mem_offset) then it's probably been
802 if (dev != NULL) { 862 * initialized by somebody
803 struct resource *res; 863 */
804 int i; 864 if (res->start != hose->pci_mem_offset)
865 return 0;
805 866
806 for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) { 867 /* The BAR is 0, let's check if memory decoding is enabled on
807 if ((res = bus->resource[i]) == NULL) 868 * the bridge. If not, we consider it unassigned
808 continue; 869 */
809 if (!res->flags) 870 pci_read_config_word(dev, PCI_COMMAND, &command);
810 continue; 871 if ((command & PCI_COMMAND_MEMORY) == 0)
811 if (i >= 3 && bus->self->transparent) 872 return 1;
812 continue;
813 /* On PowerMac, Apple leaves bridge windows open over
814 * an inaccessible region of memory space (0...fffff)
815 * which is somewhat bogus, but that's what they think
816 * means disabled...
817 *
818 * We clear those to force them to be reallocated later
819 *
820 * We detect such regions by the fact that the base is
821 * equal to the pci_mem_offset of the host bridge and
822 * their size is smaller than 1M.
823 */
824 if (res->flags & IORESOURCE_MEM &&
825 res->start == hose->pci_mem_offset &&
826 res->end < 0x100000) {
827 printk(KERN_INFO
828 "PCI: Closing bogus Apple Firmware"
829 " region %d on bus 0x%02x\n",
830 i, bus->number);
831 res->flags = 0;
832 continue;
833 }
834 873
835 pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x] fixup...\n", 874 /* Memory decoding is enabled and the BAR is 0. If any of the bridge
836 pci_name(dev), i, 875 * resources covers that starting address (0 then it's good enough for
837 (unsigned long long)res->start,\ 876 * us for memory
838 (unsigned long long)res->end, 877 */
839 (unsigned int)res->flags); 878 for (i = 0; i < 3; i++) {
879 if ((hose->mem_resources[i].flags & IORESOURCE_MEM) &&
880 hose->mem_resources[i].start == hose->pci_mem_offset)
881 return 0;
882 }
840 883
841 fixup_resource(res, dev); 884 /* Well, it starts at 0 and we know it will collide so we may as
885 * well consider it as unassigned. That covers the Apple case.
886 */
887 return 1;
888 } else {
889 /* If the BAR is non-0, then we consider it assigned */
890 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
891 if (((res->start - offset) & 0xfffffffful) != 0)
892 return 0;
893
894 /* Here, we are a bit different than memory as typically IO space
895 * starting at low addresses -is- valid. What we do instead if that
896 * we consider as unassigned anything that doesn't have IO enabled
897 * in the PCI command register, and that's it.
898 */
899 pci_read_config_word(dev, PCI_COMMAND, &command);
900 if (command & PCI_COMMAND_IO)
901 return 0;
902
903 /* It's starting at 0 and IO is disabled in the bridge, consider
904 * it unassigned
905 */
906 return 1;
907 }
908}
909
910/* Fixup resources of a PCI<->PCI bridge */
911static void __devinit pcibios_fixup_bridge(struct pci_bus *bus)
912{
913 struct resource *res;
914 int i;
915
916 struct pci_dev *dev = bus->self;
917
918 for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) {
919 if ((res = bus->resource[i]) == NULL)
920 continue;
921 if (!res->flags)
922 continue;
923 if (i >= 3 && bus->self->transparent)
924 continue;
925
926 pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x] fixup...\n",
927 pci_name(dev), i,
928 (unsigned long long)res->start,\
929 (unsigned long long)res->end,
930 (unsigned int)res->flags);
931
932 /* Perform fixup */
933 fixup_resource(res, dev);
934
935 /* Try to detect uninitialized P2P bridge resources,
936 * and clear them out so they get re-assigned later
937 */
938 if (pcibios_uninitialized_bridge_resource(bus, res)) {
939 res->flags = 0;
940 pr_debug("PCI:%s (unassigned)\n", pci_name(dev));
941 } else {
942
943 pr_debug("PCI:%s %016llx-%016llx\n",
944 pci_name(dev),
945 (unsigned long long)res->start,
946 (unsigned long long)res->end);
842 } 947 }
843 } 948 }
949}
950
951static void __devinit __pcibios_fixup_bus(struct pci_bus *bus)
952{
953 struct pci_dev *dev = bus->self;
954
955 pr_debug("PCI: Fixup bus %d (%s)\n", bus->number, dev ? pci_name(dev) : "PHB");
956
957 /* Fixup PCI<->PCI bridges. Host bridges are handled separately, for
958 * now differently between 32 and 64 bits.
959 */
960 if (dev != NULL)
961 pcibios_fixup_bridge(bus);
844 962
845 /* Additional setup that is different between 32 and 64 bits for now */ 963 /* Additional setup that is different between 32 and 64 bits for now */
846 pcibios_do_bus_setup(bus); 964 pcibios_do_bus_setup(bus);
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 88db4ffaf11c..131b1dfa68c6 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -53,12 +53,19 @@ LIST_HEAD(hose_list);
53 53
54static int pci_bus_count; 54static int pci_bus_count;
55 55
56/* This will remain NULL for now, until isa-bridge.c is made common
57 * to both 32-bit and 64-bit.
58 */
59struct pci_dev *isa_bridge_pcidev;
60EXPORT_SYMBOL_GPL(isa_bridge_pcidev);
61
56static void 62static void
57fixup_hide_host_resource_fsl(struct pci_dev* dev) 63fixup_hide_host_resource_fsl(struct pci_dev *dev)
58{ 64{
59 int i, class = dev->class >> 8; 65 int i, class = dev->class >> 8;
60 66
61 if ((class == PCI_CLASS_PROCESSOR_POWERPC) && 67 if ((class == PCI_CLASS_PROCESSOR_POWERPC ||
68 class == PCI_CLASS_BRIDGE_OTHER) &&
62 (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) && 69 (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) &&
63 (dev->bus->parent == NULL)) { 70 (dev->bus->parent == NULL)) {
64 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 71 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
@@ -424,6 +431,7 @@ void __devinit pcibios_do_bus_setup(struct pci_bus *bus)
424 unsigned long io_offset; 431 unsigned long io_offset;
425 struct resource *res; 432 struct resource *res;
426 int i; 433 int i;
434 struct pci_dev *dev;
427 435
428 /* Hookup PHB resources */ 436 /* Hookup PHB resources */
429 io_offset = (unsigned long)hose->io_base_virt - isa_io_base; 437 io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
@@ -457,6 +465,12 @@ void __devinit pcibios_do_bus_setup(struct pci_bus *bus)
457 bus->resource[i+1] = res; 465 bus->resource[i+1] = res;
458 } 466 }
459 } 467 }
468
469 if (ppc_md.pci_dma_bus_setup)
470 ppc_md.pci_dma_bus_setup(bus);
471
472 list_for_each_entry(dev, &bus->devices, bus_list)
473 pcibios_setup_new_device(dev);
460} 474}
461 475
462/* the next one is stolen from the alpha port... */ 476/* the next one is stolen from the alpha port... */
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 30eedfc5a566..8247cff1cb3e 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -52,35 +52,6 @@ EXPORT_SYMBOL(pci_io_base);
52 52
53LIST_HEAD(hose_list); 53LIST_HEAD(hose_list);
54 54
55static struct dma_mapping_ops *pci_dma_ops;
56
57void set_pci_dma_ops(struct dma_mapping_ops *dma_ops)
58{
59 pci_dma_ops = dma_ops;
60}
61
62struct dma_mapping_ops *get_pci_dma_ops(void)
63{
64 return pci_dma_ops;
65}
66EXPORT_SYMBOL(get_pci_dma_ops);
67
68
69int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
70{
71 return dma_set_mask(&dev->dev, mask);
72}
73
74int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
75{
76 int rc;
77
78 rc = dma_set_mask(&dev->dev, mask);
79 dev->dev.coherent_dma_mask = dev->dma_mask;
80
81 return rc;
82}
83
84static void fixup_broken_pcnet32(struct pci_dev* dev) 55static void fixup_broken_pcnet32(struct pci_dev* dev)
85{ 56{
86 if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) { 57 if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
@@ -548,26 +519,6 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
548} 519}
549EXPORT_SYMBOL_GPL(pcibios_map_io_space); 520EXPORT_SYMBOL_GPL(pcibios_map_io_space);
550 521
551void __devinit pcibios_setup_new_device(struct pci_dev *dev)
552{
553 struct dev_archdata *sd = &dev->dev.archdata;
554
555 sd->of_node = pci_device_to_OF_node(dev);
556
557 DBG("PCI: device %s OF node: %s\n", pci_name(dev),
558 sd->of_node ? sd->of_node->full_name : "<none>");
559
560 sd->dma_ops = pci_dma_ops;
561#ifdef CONFIG_NUMA
562 sd->numa_node = pcibus_to_node(dev->bus);
563#else
564 sd->numa_node = -1;
565#endif
566 if (ppc_md.pci_dma_dev_setup)
567 ppc_md.pci_dma_dev_setup(dev);
568}
569EXPORT_SYMBOL(pcibios_setup_new_device);
570
571void __devinit pcibios_do_bus_setup(struct pci_bus *bus) 522void __devinit pcibios_do_bus_setup(struct pci_bus *bus)
572{ 523{
573 struct pci_dev *dev; 524 struct pci_dev *dev;
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index e1ea4fe5cfbd..8edc2359c419 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -119,6 +119,9 @@ EXPORT_SYMBOL(flush_instruction_cache);
119EXPORT_SYMBOL(flush_tlb_kernel_range); 119EXPORT_SYMBOL(flush_tlb_kernel_range);
120EXPORT_SYMBOL(flush_tlb_page); 120EXPORT_SYMBOL(flush_tlb_page);
121EXPORT_SYMBOL(_tlbie); 121EXPORT_SYMBOL(_tlbie);
122#if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE)
123EXPORT_SYMBOL(_tlbil_va);
124#endif
122#endif 125#endif
123EXPORT_SYMBOL(__flush_icache_range); 126EXPORT_SYMBOL(__flush_icache_range);
124EXPORT_SYMBOL(flush_dcache_range); 127EXPORT_SYMBOL(flush_dcache_range);
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 87d83c56b31e..3a2dc7e6586a 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -888,9 +888,10 @@ static u64 __init dt_mem_next_cell(int s, cell_t **cellp)
888 */ 888 */
889static int __init early_init_dt_scan_drconf_memory(unsigned long node) 889static int __init early_init_dt_scan_drconf_memory(unsigned long node)
890{ 890{
891 cell_t *dm, *ls; 891 cell_t *dm, *ls, *usm;
892 unsigned long l, n, flags; 892 unsigned long l, n, flags;
893 u64 base, size, lmb_size; 893 u64 base, size, lmb_size;
894 unsigned int is_kexec_kdump = 0, rngs;
894 895
895 ls = (cell_t *)of_get_flat_dt_prop(node, "ibm,lmb-size", &l); 896 ls = (cell_t *)of_get_flat_dt_prop(node, "ibm,lmb-size", &l);
896 if (ls == NULL || l < dt_root_size_cells * sizeof(cell_t)) 897 if (ls == NULL || l < dt_root_size_cells * sizeof(cell_t))
@@ -905,6 +906,12 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node)
905 if (l < (n * (dt_root_addr_cells + 4) + 1) * sizeof(cell_t)) 906 if (l < (n * (dt_root_addr_cells + 4) + 1) * sizeof(cell_t))
906 return 0; 907 return 0;
907 908
909 /* check if this is a kexec/kdump kernel. */
910 usm = (cell_t *)of_get_flat_dt_prop(node, "linux,drconf-usable-memory",
911 &l);
912 if (usm != NULL)
913 is_kexec_kdump = 1;
914
908 for (; n != 0; --n) { 915 for (; n != 0; --n) {
909 base = dt_mem_next_cell(dt_root_addr_cells, &dm); 916 base = dt_mem_next_cell(dt_root_addr_cells, &dm);
910 flags = dm[3]; 917 flags = dm[3];
@@ -915,13 +922,34 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node)
915 if ((flags & 0x80) || !(flags & 0x8)) 922 if ((flags & 0x80) || !(flags & 0x8))
916 continue; 923 continue;
917 size = lmb_size; 924 size = lmb_size;
918 if (iommu_is_off) { 925 rngs = 1;
919 if (base >= 0x80000000ul) 926 if (is_kexec_kdump) {
927 /*
928 * For each lmb in ibm,dynamic-memory, a corresponding
929 * entry in linux,drconf-usable-memory property contains
930 * a counter 'p' followed by 'p' (base, size) duple.
931 * Now read the counter from
932 * linux,drconf-usable-memory property
933 */
934 rngs = dt_mem_next_cell(dt_root_size_cells, &usm);
935 if (!rngs) /* there are no (base, size) duple */
920 continue; 936 continue;
921 if ((base + size) > 0x80000000ul)
922 size = 0x80000000ul - base;
923 } 937 }
924 lmb_add(base, size); 938 do {
939 if (is_kexec_kdump) {
940 base = dt_mem_next_cell(dt_root_addr_cells,
941 &usm);
942 size = dt_mem_next_cell(dt_root_size_cells,
943 &usm);
944 }
945 if (iommu_is_off) {
946 if (base >= 0x80000000ul)
947 continue;
948 if ((base + size) > 0x80000000ul)
949 size = 0x80000000ul - base;
950 }
951 lmb_add(base, size);
952 } while (--rngs);
925 } 953 }
926 lmb_dump_all(); 954 lmb_dump_all();
927 return 0; 955 return 0;
@@ -1164,6 +1192,9 @@ void __init early_init_devtree(void *params)
1164 1192
1165 /* Reserve LMB regions used by kernel, initrd, dt, etc... */ 1193 /* Reserve LMB regions used by kernel, initrd, dt, etc... */
1166 lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START); 1194 lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
1195 /* If relocatable, reserve first 32k for interrupt vectors etc. */
1196 if (PHYSICAL_START > MEMORY_START)
1197 lmb_reserve(MEMORY_START, 0x8000);
1167 reserve_kdump_trampoline(); 1198 reserve_kdump_trampoline();
1168 reserve_crashkernel(); 1199 reserve_crashkernel();
1169 early_reserve_mem(); 1200 early_reserve_mem();
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index b72849ac7db3..2fdbc18ae94a 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -732,7 +732,7 @@ static struct fake_elf {
732 u32 ignore_me; 732 u32 ignore_me;
733 } rpadesc; 733 } rpadesc;
734 } rpanote; 734 } rpanote;
735} fake_elf = { 735} fake_elf __section(.fakeelf) = {
736 .elfhdr = { 736 .elfhdr = {
737 .e_ident = { 0x7f, 'E', 'L', 'F', 737 .e_ident = { 0x7f, 'E', 'L', 'F',
738 ELFCLASS32, ELFDATA2MSB, EV_CURRENT }, 738 ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
@@ -774,13 +774,13 @@ static struct fake_elf {
774 .type = 0x12759999, 774 .type = 0x12759999,
775 .name = "IBM,RPA-Client-Config", 775 .name = "IBM,RPA-Client-Config",
776 .rpadesc = { 776 .rpadesc = {
777 .lpar_affinity = 0, 777 .lpar_affinity = 1,
778 .min_rmo_size = 64, /* in megabytes */ 778 .min_rmo_size = 128, /* in megabytes */
779 .min_rmo_percent = 0, 779 .min_rmo_percent = 0,
780 .max_pft_size = 48, /* 2^48 bytes max PFT size */ 780 .max_pft_size = 46, /* 2^46 bytes max PFT size */
781 .splpar = 1, 781 .splpar = 1,
782 .min_load = ~0U, 782 .min_load = ~0U,
783 .new_mem_def = 0 783 .new_mem_def = 1
784 } 784 }
785 } 785 }
786}; 786};
@@ -1321,7 +1321,7 @@ static void __init prom_initialize_tce_table(void)
1321 * 1321 *
1322 * -- Cort 1322 * -- Cort
1323 */ 1323 */
1324extern void __secondary_hold(void); 1324extern char __secondary_hold;
1325extern unsigned long __secondary_hold_spinloop; 1325extern unsigned long __secondary_hold_spinloop;
1326extern unsigned long __secondary_hold_acknowledge; 1326extern unsigned long __secondary_hold_acknowledge;
1327 1327
@@ -1342,13 +1342,7 @@ static void __init prom_hold_cpus(void)
1342 = (void *) LOW_ADDR(__secondary_hold_spinloop); 1342 = (void *) LOW_ADDR(__secondary_hold_spinloop);
1343 unsigned long *acknowledge 1343 unsigned long *acknowledge
1344 = (void *) LOW_ADDR(__secondary_hold_acknowledge); 1344 = (void *) LOW_ADDR(__secondary_hold_acknowledge);
1345#ifdef CONFIG_PPC64
1346 /* __secondary_hold is actually a descriptor, not the text address */
1347 unsigned long secondary_hold
1348 = __pa(*PTRRELOC((unsigned long *)__secondary_hold));
1349#else
1350 unsigned long secondary_hold = LOW_ADDR(__secondary_hold); 1345 unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
1351#endif
1352 1346
1353 prom_debug("prom_hold_cpus: start...\n"); 1347 prom_debug("prom_hold_cpus: start...\n");
1354 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop); 1348 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
@@ -2315,13 +2309,14 @@ static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
2315 2309
2316unsigned long __init prom_init(unsigned long r3, unsigned long r4, 2310unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2317 unsigned long pp, 2311 unsigned long pp,
2318 unsigned long r6, unsigned long r7) 2312 unsigned long r6, unsigned long r7,
2313 unsigned long kbase)
2319{ 2314{
2320 struct prom_t *_prom; 2315 struct prom_t *_prom;
2321 unsigned long hdr; 2316 unsigned long hdr;
2322 unsigned long offset = reloc_offset();
2323 2317
2324#ifdef CONFIG_PPC32 2318#ifdef CONFIG_PPC32
2319 unsigned long offset = reloc_offset();
2325 reloc_got2(offset); 2320 reloc_got2(offset);
2326#endif 2321#endif
2327 2322
@@ -2355,9 +2350,11 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2355 */ 2350 */
2356 RELOC(of_platform) = prom_find_machine_type(); 2351 RELOC(of_platform) = prom_find_machine_type();
2357 2352
2353#ifndef CONFIG_RELOCATABLE
2358 /* Bail if this is a kdump kernel. */ 2354 /* Bail if this is a kdump kernel. */
2359 if (PHYSICAL_START > 0) 2355 if (PHYSICAL_START > 0)
2360 prom_panic("Error: You can't boot a kdump kernel from OF!\n"); 2356 prom_panic("Error: You can't boot a kdump kernel from OF!\n");
2357#endif
2361 2358
2362 /* 2359 /*
2363 * Check for an initrd 2360 * Check for an initrd
@@ -2377,7 +2374,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2377 * Copy the CPU hold code 2374 * Copy the CPU hold code
2378 */ 2375 */
2379 if (RELOC(of_platform) != PLATFORM_POWERMAC) 2376 if (RELOC(of_platform) != PLATFORM_POWERMAC)
2380 copy_and_flush(0, KERNELBASE + offset, 0x100, 0); 2377 copy_and_flush(0, kbase, 0x100, 0);
2381 2378
2382 /* 2379 /*
2383 * Do early parsing of command line 2380 * Do early parsing of command line
@@ -2480,7 +2477,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2480 reloc_got2(-offset); 2477 reloc_got2(-offset);
2481#endif 2478#endif
2482 2479
2483 __start(hdr, KERNELBASE + offset, 0); 2480 __start(hdr, kbase, 0);
2484 2481
2485 return 0; 2482 return 0;
2486} 2483}
diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S
new file mode 100644
index 000000000000..b47a0e1ab001
--- /dev/null
+++ b/arch/powerpc/kernel/reloc_64.S
@@ -0,0 +1,87 @@
1/*
2 * Code to process dynamic relocations in the kernel.
3 *
4 * Copyright 2008 Paul Mackerras, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <asm/ppc_asm.h>
13
14RELA = 7
15RELACOUNT = 0x6ffffff9
16R_PPC64_RELATIVE = 22
17
18/*
19 * r3 = desired final address of kernel
20 */
21_GLOBAL(relocate)
22 mflr r0
23 bcl 20,31,$+4
240: mflr r12 /* r12 has runtime addr of label 0 */
25 mtlr r0
26 ld r11,(p_dyn - 0b)(r12)
27 add r11,r11,r12 /* r11 has runtime addr of .dynamic section */
28 ld r9,(p_rela - 0b)(r12)
29 add r9,r9,r12 /* r9 has runtime addr of .rela.dyn section */
30 ld r10,(p_st - 0b)(r12)
31 add r10,r10,r12 /* r10 has runtime addr of _stext */
32
33 /*
34 * Scan the dynamic section for the RELA and RELACOUNT entries.
35 */
36 li r7,0
37 li r8,0
381: ld r6,0(r11) /* get tag */
39 cmpdi r6,0
40 beq 4f /* end of list */
41 cmpdi r6,RELA
42 bne 2f
43 ld r7,8(r11) /* get RELA pointer in r7 */
44 b 3f
452: addis r6,r6,(-RELACOUNT)@ha
46 cmpdi r6,RELACOUNT@l
47 bne 3f
48 ld r8,8(r11) /* get RELACOUNT value in r8 */
493: addi r11,r11,16
50 b 1b
514: cmpdi r7,0 /* check we have both RELA and RELACOUNT */
52 cmpdi cr1,r8,0
53 beq 6f
54 beq cr1,6f
55
56 /*
57 * Work out linktime address of _stext and hence the
58 * relocation offset to be applied.
59 * cur_offset [r7] = rela.run [r9] - rela.link [r7]
60 * _stext.link [r10] = _stext.run [r10] - cur_offset [r7]
61 * final_offset [r3] = _stext.final [r3] - _stext.link [r10]
62 */
63 subf r7,r7,r9 /* cur_offset */
64 subf r10,r7,r10
65 subf r3,r10,r3 /* final_offset */
66
67 /*
68 * Run through the list of relocations and process the
69 * R_PPC64_RELATIVE ones.
70 */
71 mtctr r8
725: lwz r0,12(9) /* ELF64_R_TYPE(reloc->r_info) */
73 cmpwi r0,R_PPC64_RELATIVE
74 bne 6f
75 ld r6,0(r9) /* reloc->r_offset */
76 ld r0,16(r9) /* reloc->r_addend */
77 add r0,r0,r3
78 stdx r0,r7,r6
79 addi r9,r9,24
80 bdnz 5b
81
826: blr
83
84p_dyn: .llong __dynamic_start - 0b
85p_rela: .llong __rela_dyn_start - 0b
86p_st: .llong _stext - 0b
87
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 9cc5a52711e5..5ec56ff03e86 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -254,8 +254,21 @@ static int show_cpuinfo(struct seq_file *m, void *v)
254 /* If we are a Freescale core do a simple check so 254 /* If we are a Freescale core do a simple check so
255 * we dont have to keep adding cases in the future */ 255 * we dont have to keep adding cases in the future */
256 if (PVR_VER(pvr) & 0x8000) { 256 if (PVR_VER(pvr) & 0x8000) {
257 maj = PVR_MAJ(pvr); 257 switch (PVR_VER(pvr)) {
258 min = PVR_MIN(pvr); 258 case 0x8000: /* 7441/7450/7451, Voyager */
259 case 0x8001: /* 7445/7455, Apollo 6 */
260 case 0x8002: /* 7447/7457, Apollo 7 */
261 case 0x8003: /* 7447A, Apollo 7 PM */
262 case 0x8004: /* 7448, Apollo 8 */
263 case 0x800c: /* 7410, Nitro */
264 maj = ((pvr >> 8) & 0xF);
265 min = PVR_MIN(pvr);
266 break;
267 default: /* e500/book-e */
268 maj = PVR_MAJ(pvr);
269 min = PVR_MIN(pvr);
270 break;
271 }
259 } else { 272 } else {
260 switch (PVR_VER(pvr)) { 273 switch (PVR_VER(pvr)) {
261 case 0x0020: /* 403 family */ 274 case 0x0020: /* 403 family */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 066e65c59b58..c1a27626a940 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -111,7 +111,7 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
111 * This is called very early on the boot process, after a minimal 111 * This is called very early on the boot process, after a minimal
112 * MMU environment has been set up but before MMU_init is called. 112 * MMU environment has been set up but before MMU_init is called.
113 */ 113 */
114notrace void __init machine_init(unsigned long dt_ptr, unsigned long phys) 114notrace void __init machine_init(unsigned long dt_ptr)
115{ 115{
116 /* Enable early debugging if any specified (see udbg.h) */ 116 /* Enable early debugging if any specified (see udbg.h) */
117 udbg_early_init(); 117 udbg_early_init();
@@ -209,23 +209,12 @@ EXPORT_SYMBOL(nvram_sync);
209 209
210#endif /* CONFIG_NVRAM */ 210#endif /* CONFIG_NVRAM */
211 211
212static DEFINE_PER_CPU(struct cpu, cpu_devices);
213
214int __init ppc_init(void) 212int __init ppc_init(void)
215{ 213{
216 int cpu;
217
218 /* clear the progress line */ 214 /* clear the progress line */
219 if (ppc_md.progress) 215 if (ppc_md.progress)
220 ppc_md.progress(" ", 0xffff); 216 ppc_md.progress(" ", 0xffff);
221 217
222 /* register CPU devices */
223 for_each_possible_cpu(cpu) {
224 struct cpu *c = &per_cpu(cpu_devices, cpu);
225 c->hotpluggable = 1;
226 register_cpu(c, cpu);
227 }
228
229 /* call platform init */ 218 /* call platform init */
230 if (ppc_md.init != NULL) { 219 if (ppc_md.init != NULL) {
231 ppc_md.init(); 220 ppc_md.init();
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 8b25f51f03bf..843c0af210d0 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -255,9 +255,11 @@ void early_setup_secondary(void)
255#endif /* CONFIG_SMP */ 255#endif /* CONFIG_SMP */
256 256
257#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) 257#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
258extern unsigned long __secondary_hold_spinloop;
259extern void generic_secondary_smp_init(void);
260
258void smp_release_cpus(void) 261void smp_release_cpus(void)
259{ 262{
260 extern unsigned long __secondary_hold_spinloop;
261 unsigned long *ptr; 263 unsigned long *ptr;
262 264
263 DBG(" -> smp_release_cpus()\n"); 265 DBG(" -> smp_release_cpus()\n");
@@ -266,12 +268,11 @@ void smp_release_cpus(void)
266 * all now so they can start to spin on their individual paca 268 * all now so they can start to spin on their individual paca
267 * spinloops. For non SMP kernels, the secondary cpus never get out 269 * spinloops. For non SMP kernels, the secondary cpus never get out
268 * of the common spinloop. 270 * of the common spinloop.
269 * This is useless but harmless on iSeries, secondaries are already 271 */
270 * waiting on their paca spinloops. */
271 272
272 ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop 273 ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
273 - PHYSICAL_START); 274 - PHYSICAL_START);
274 *ptr = 1; 275 *ptr = __pa(generic_secondary_smp_init);
275 mb(); 276 mb();
276 277
277 DBG(" <- smp_release_cpus()\n"); 278 DBG(" <- smp_release_cpus()\n");
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index c27b10a1bd79..ff9f7010097d 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -101,8 +101,7 @@ void smp_message_recv(int msg)
101 generic_smp_call_function_interrupt(); 101 generic_smp_call_function_interrupt();
102 break; 102 break;
103 case PPC_MSG_RESCHEDULE: 103 case PPC_MSG_RESCHEDULE:
104 /* XXX Do we have to do this? */ 104 /* we notice need_resched on exit */
105 set_need_resched();
106 break; 105 break;
107 case PPC_MSG_CALL_FUNC_SINGLE: 106 case PPC_MSG_CALL_FUNC_SINGLE:
108 generic_smp_call_function_single_interrupt(); 107 generic_smp_call_function_single_interrupt();
diff --git a/arch/powerpc/kernel/swsusp_asm64.S b/arch/powerpc/kernel/swsusp_asm64.S
index e092c3cbdb9b..86ac1d90d02b 100644
--- a/arch/powerpc/kernel/swsusp_asm64.S
+++ b/arch/powerpc/kernel/swsusp_asm64.S
@@ -133,7 +133,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
133 133
134 cmpdi r12,0 134 cmpdi r12,0
135 beq- nothing_to_copy 135 beq- nothing_to_copy
136 li r15,512 136 li r15,PAGE_SIZE>>3
137copyloop: 137copyloop:
138 ld r13,pbe_address(r12) 138 ld r13,pbe_address(r12)
139 ld r14,pbe_orig_address(r12) 139 ld r14,pbe_orig_address(r12)
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index d98634c76060..ff7de7b0797e 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -107,14 +107,6 @@ asmlinkage long compat_sys_sysfs(u32 option, u32 arg1, u32 arg2)
107 return sys_sysfs((int)option, arg1, arg2); 107 return sys_sysfs((int)option, arg1, arg2);
108} 108}
109 109
110asmlinkage long compat_sys_pause(void)
111{
112 current->state = TASK_INTERRUPTIBLE;
113 schedule();
114
115 return -ERESTARTNOHAND;
116}
117
118static inline long get_ts32(struct timespec *o, struct compat_timeval __user *i) 110static inline long get_ts32(struct timespec *o, struct compat_timeval __user *i)
119{ 111{
120 long usec; 112 long usec;
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 56d172d16e56..86a2ffccef25 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -15,18 +15,24 @@
15#include <asm/firmware.h> 15#include <asm/firmware.h>
16#include <asm/hvcall.h> 16#include <asm/hvcall.h>
17#include <asm/prom.h> 17#include <asm/prom.h>
18#include <asm/paca.h>
19#include <asm/lppaca.h>
20#include <asm/machdep.h> 18#include <asm/machdep.h>
21#include <asm/smp.h> 19#include <asm/smp.h>
22 20
21#ifdef CONFIG_PPC64
22#include <asm/paca.h>
23#include <asm/lppaca.h>
24#endif
25
23static DEFINE_PER_CPU(struct cpu, cpu_devices); 26static DEFINE_PER_CPU(struct cpu, cpu_devices);
24 27
25static DEFINE_PER_CPU(struct kobject *, cache_toplevel); 28static DEFINE_PER_CPU(struct kobject *, cache_toplevel);
26 29
27/* SMT stuff */ 30/*
31 * SMT snooze delay stuff, 64-bit only for now
32 */
33
34#ifdef CONFIG_PPC64
28 35
29#ifdef CONFIG_PPC_MULTIPLATFORM
30/* Time in microseconds we delay before sleeping in the idle loop */ 36/* Time in microseconds we delay before sleeping in the idle loop */
31DEFINE_PER_CPU(unsigned long, smt_snooze_delay) = { 100 }; 37DEFINE_PER_CPU(unsigned long, smt_snooze_delay) = { 100 };
32 38
@@ -106,7 +112,7 @@ static int __init setup_smt_snooze_delay(char *str)
106} 112}
107__setup("smt-snooze-delay=", setup_smt_snooze_delay); 113__setup("smt-snooze-delay=", setup_smt_snooze_delay);
108 114
109#endif /* CONFIG_PPC_MULTIPLATFORM */ 115#endif /* CONFIG_PPC64 */
110 116
111/* 117/*
112 * Enabling PMCs will slow partition context switch times so we only do 118 * Enabling PMCs will slow partition context switch times so we only do
@@ -115,7 +121,7 @@ __setup("smt-snooze-delay=", setup_smt_snooze_delay);
115 121
116static DEFINE_PER_CPU(char, pmcs_enabled); 122static DEFINE_PER_CPU(char, pmcs_enabled);
117 123
118void ppc64_enable_pmcs(void) 124void ppc_enable_pmcs(void)
119{ 125{
120 /* Only need to enable them once */ 126 /* Only need to enable them once */
121 if (__get_cpu_var(pmcs_enabled)) 127 if (__get_cpu_var(pmcs_enabled))
@@ -126,8 +132,9 @@ void ppc64_enable_pmcs(void)
126 if (ppc_md.enable_pmcs) 132 if (ppc_md.enable_pmcs)
127 ppc_md.enable_pmcs(); 133 ppc_md.enable_pmcs();
128} 134}
129EXPORT_SYMBOL(ppc64_enable_pmcs); 135EXPORT_SYMBOL(ppc_enable_pmcs);
130 136
137#if defined(CONFIG_6xx) || defined(CONFIG_PPC64)
131/* XXX convert to rusty's on_one_cpu */ 138/* XXX convert to rusty's on_one_cpu */
132static unsigned long run_on_cpu(unsigned long cpu, 139static unsigned long run_on_cpu(unsigned long cpu,
133 unsigned long (*func)(unsigned long), 140 unsigned long (*func)(unsigned long),
@@ -146,6 +153,7 @@ static unsigned long run_on_cpu(unsigned long cpu,
146 153
147 return ret; 154 return ret;
148} 155}
156#endif
149 157
150#define SYSFS_PMCSETUP(NAME, ADDRESS) \ 158#define SYSFS_PMCSETUP(NAME, ADDRESS) \
151static unsigned long read_##NAME(unsigned long junk) \ 159static unsigned long read_##NAME(unsigned long junk) \
@@ -154,7 +162,7 @@ static unsigned long read_##NAME(unsigned long junk) \
154} \ 162} \
155static unsigned long write_##NAME(unsigned long val) \ 163static unsigned long write_##NAME(unsigned long val) \
156{ \ 164{ \
157 ppc64_enable_pmcs(); \ 165 ppc_enable_pmcs(); \
158 mtspr(ADDRESS, val); \ 166 mtspr(ADDRESS, val); \
159 return 0; \ 167 return 0; \
160} \ 168} \
@@ -184,28 +192,53 @@ static ssize_t __used \
184 * that are implemented on the current processor 192 * that are implemented on the current processor
185 */ 193 */
186 194
195#if defined(CONFIG_PPC64)
196#define HAS_PPC_PMC_CLASSIC 1
197#define HAS_PPC_PMC_IBM 1
198#define HAS_PPC_PMC_PA6T 1
199#elif defined(CONFIG_6xx)
200#define HAS_PPC_PMC_CLASSIC 1
201#define HAS_PPC_PMC_IBM 1
202#define HAS_PPC_PMC_G4 1
203#endif
204
205
206#ifdef HAS_PPC_PMC_CLASSIC
187SYSFS_PMCSETUP(mmcr0, SPRN_MMCR0); 207SYSFS_PMCSETUP(mmcr0, SPRN_MMCR0);
188SYSFS_PMCSETUP(mmcr1, SPRN_MMCR1); 208SYSFS_PMCSETUP(mmcr1, SPRN_MMCR1);
189SYSFS_PMCSETUP(mmcra, SPRN_MMCRA);
190SYSFS_PMCSETUP(pmc1, SPRN_PMC1); 209SYSFS_PMCSETUP(pmc1, SPRN_PMC1);
191SYSFS_PMCSETUP(pmc2, SPRN_PMC2); 210SYSFS_PMCSETUP(pmc2, SPRN_PMC2);
192SYSFS_PMCSETUP(pmc3, SPRN_PMC3); 211SYSFS_PMCSETUP(pmc3, SPRN_PMC3);
193SYSFS_PMCSETUP(pmc4, SPRN_PMC4); 212SYSFS_PMCSETUP(pmc4, SPRN_PMC4);
194SYSFS_PMCSETUP(pmc5, SPRN_PMC5); 213SYSFS_PMCSETUP(pmc5, SPRN_PMC5);
195SYSFS_PMCSETUP(pmc6, SPRN_PMC6); 214SYSFS_PMCSETUP(pmc6, SPRN_PMC6);
215
216#ifdef HAS_PPC_PMC_G4
217SYSFS_PMCSETUP(mmcr2, SPRN_MMCR2);
218#endif
219
220#ifdef CONFIG_PPC64
196SYSFS_PMCSETUP(pmc7, SPRN_PMC7); 221SYSFS_PMCSETUP(pmc7, SPRN_PMC7);
197SYSFS_PMCSETUP(pmc8, SPRN_PMC8); 222SYSFS_PMCSETUP(pmc8, SPRN_PMC8);
223
224SYSFS_PMCSETUP(mmcra, SPRN_MMCRA);
198SYSFS_PMCSETUP(purr, SPRN_PURR); 225SYSFS_PMCSETUP(purr, SPRN_PURR);
199SYSFS_PMCSETUP(spurr, SPRN_SPURR); 226SYSFS_PMCSETUP(spurr, SPRN_SPURR);
200SYSFS_PMCSETUP(dscr, SPRN_DSCR); 227SYSFS_PMCSETUP(dscr, SPRN_DSCR);
201 228
229static SYSDEV_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
230static SYSDEV_ATTR(spurr, 0600, show_spurr, NULL);
231static SYSDEV_ATTR(dscr, 0600, show_dscr, store_dscr);
232static SYSDEV_ATTR(purr, 0600, show_purr, store_purr);
233#endif /* CONFIG_PPC64 */
234
235#ifdef HAS_PPC_PMC_PA6T
202SYSFS_PMCSETUP(pa6t_pmc0, SPRN_PA6T_PMC0); 236SYSFS_PMCSETUP(pa6t_pmc0, SPRN_PA6T_PMC0);
203SYSFS_PMCSETUP(pa6t_pmc1, SPRN_PA6T_PMC1); 237SYSFS_PMCSETUP(pa6t_pmc1, SPRN_PA6T_PMC1);
204SYSFS_PMCSETUP(pa6t_pmc2, SPRN_PA6T_PMC2); 238SYSFS_PMCSETUP(pa6t_pmc2, SPRN_PA6T_PMC2);
205SYSFS_PMCSETUP(pa6t_pmc3, SPRN_PA6T_PMC3); 239SYSFS_PMCSETUP(pa6t_pmc3, SPRN_PA6T_PMC3);
206SYSFS_PMCSETUP(pa6t_pmc4, SPRN_PA6T_PMC4); 240SYSFS_PMCSETUP(pa6t_pmc4, SPRN_PA6T_PMC4);
207SYSFS_PMCSETUP(pa6t_pmc5, SPRN_PA6T_PMC5); 241SYSFS_PMCSETUP(pa6t_pmc5, SPRN_PA6T_PMC5);
208
209#ifdef CONFIG_DEBUG_KERNEL 242#ifdef CONFIG_DEBUG_KERNEL
210SYSFS_PMCSETUP(hid0, SPRN_HID0); 243SYSFS_PMCSETUP(hid0, SPRN_HID0);
211SYSFS_PMCSETUP(hid1, SPRN_HID1); 244SYSFS_PMCSETUP(hid1, SPRN_HID1);
@@ -236,28 +269,37 @@ SYSFS_PMCSETUP(tsr1, SPRN_PA6T_TSR1);
236SYSFS_PMCSETUP(tsr2, SPRN_PA6T_TSR2); 269SYSFS_PMCSETUP(tsr2, SPRN_PA6T_TSR2);
237SYSFS_PMCSETUP(tsr3, SPRN_PA6T_TSR3); 270SYSFS_PMCSETUP(tsr3, SPRN_PA6T_TSR3);
238#endif /* CONFIG_DEBUG_KERNEL */ 271#endif /* CONFIG_DEBUG_KERNEL */
272#endif /* HAS_PPC_PMC_PA6T */
239 273
240static SYSDEV_ATTR(mmcra, 0600, show_mmcra, store_mmcra); 274#ifdef HAS_PPC_PMC_IBM
241static SYSDEV_ATTR(spurr, 0600, show_spurr, NULL);
242static SYSDEV_ATTR(dscr, 0600, show_dscr, store_dscr);
243static SYSDEV_ATTR(purr, 0600, show_purr, store_purr);
244
245static struct sysdev_attribute ibm_common_attrs[] = { 275static struct sysdev_attribute ibm_common_attrs[] = {
246 _SYSDEV_ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0), 276 _SYSDEV_ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
247 _SYSDEV_ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1), 277 _SYSDEV_ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
248}; 278};
279#endif /* HAS_PPC_PMC_G4 */
280
281#ifdef HAS_PPC_PMC_G4
282static struct sysdev_attribute g4_common_attrs[] = {
283 _SYSDEV_ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
284 _SYSDEV_ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
285 _SYSDEV_ATTR(mmcr2, 0600, show_mmcr2, store_mmcr2),
286};
287#endif /* HAS_PPC_PMC_G4 */
249 288
250static struct sysdev_attribute ibm_pmc_attrs[] = { 289static struct sysdev_attribute classic_pmc_attrs[] = {
251 _SYSDEV_ATTR(pmc1, 0600, show_pmc1, store_pmc1), 290 _SYSDEV_ATTR(pmc1, 0600, show_pmc1, store_pmc1),
252 _SYSDEV_ATTR(pmc2, 0600, show_pmc2, store_pmc2), 291 _SYSDEV_ATTR(pmc2, 0600, show_pmc2, store_pmc2),
253 _SYSDEV_ATTR(pmc3, 0600, show_pmc3, store_pmc3), 292 _SYSDEV_ATTR(pmc3, 0600, show_pmc3, store_pmc3),
254 _SYSDEV_ATTR(pmc4, 0600, show_pmc4, store_pmc4), 293 _SYSDEV_ATTR(pmc4, 0600, show_pmc4, store_pmc4),
255 _SYSDEV_ATTR(pmc5, 0600, show_pmc5, store_pmc5), 294 _SYSDEV_ATTR(pmc5, 0600, show_pmc5, store_pmc5),
256 _SYSDEV_ATTR(pmc6, 0600, show_pmc6, store_pmc6), 295 _SYSDEV_ATTR(pmc6, 0600, show_pmc6, store_pmc6),
296#ifdef CONFIG_PPC64
257 _SYSDEV_ATTR(pmc7, 0600, show_pmc7, store_pmc7), 297 _SYSDEV_ATTR(pmc7, 0600, show_pmc7, store_pmc7),
258 _SYSDEV_ATTR(pmc8, 0600, show_pmc8, store_pmc8), 298 _SYSDEV_ATTR(pmc8, 0600, show_pmc8, store_pmc8),
299#endif
259}; 300};
260 301
302#ifdef HAS_PPC_PMC_PA6T
261static struct sysdev_attribute pa6t_attrs[] = { 303static struct sysdev_attribute pa6t_attrs[] = {
262 _SYSDEV_ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0), 304 _SYSDEV_ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
263 _SYSDEV_ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1), 305 _SYSDEV_ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
@@ -298,6 +340,8 @@ static struct sysdev_attribute pa6t_attrs[] = {
298 _SYSDEV_ATTR(tsr3, 0600, show_tsr3, store_tsr3), 340 _SYSDEV_ATTR(tsr3, 0600, show_tsr3, store_tsr3),
299#endif /* CONFIG_DEBUG_KERNEL */ 341#endif /* CONFIG_DEBUG_KERNEL */
300}; 342};
343#endif /* HAS_PPC_PMC_PA6T */
344#endif /* HAS_PPC_PMC_CLASSIC */
301 345
302struct cache_desc { 346struct cache_desc {
303 struct kobject kobj; 347 struct kobject kobj;
@@ -588,23 +632,36 @@ static void __cpuinit register_cpu_online(unsigned int cpu)
588 struct sysdev_attribute *attrs, *pmc_attrs; 632 struct sysdev_attribute *attrs, *pmc_attrs;
589 int i, nattrs; 633 int i, nattrs;
590 634
635#ifdef CONFIG_PPC64
591 if (!firmware_has_feature(FW_FEATURE_ISERIES) && 636 if (!firmware_has_feature(FW_FEATURE_ISERIES) &&
592 cpu_has_feature(CPU_FTR_SMT)) 637 cpu_has_feature(CPU_FTR_SMT))
593 sysdev_create_file(s, &attr_smt_snooze_delay); 638 sysdev_create_file(s, &attr_smt_snooze_delay);
639#endif
594 640
595 /* PMC stuff */ 641 /* PMC stuff */
596 switch (cur_cpu_spec->pmc_type) { 642 switch (cur_cpu_spec->pmc_type) {
643#ifdef HAS_PPC_PMC_IBM
597 case PPC_PMC_IBM: 644 case PPC_PMC_IBM:
598 attrs = ibm_common_attrs; 645 attrs = ibm_common_attrs;
599 nattrs = sizeof(ibm_common_attrs) / sizeof(struct sysdev_attribute); 646 nattrs = sizeof(ibm_common_attrs) / sizeof(struct sysdev_attribute);
600 pmc_attrs = ibm_pmc_attrs; 647 pmc_attrs = classic_pmc_attrs;
601 break; 648 break;
649#endif /* HAS_PPC_PMC_IBM */
650#ifdef HAS_PPC_PMC_G4
651 case PPC_PMC_G4:
652 attrs = g4_common_attrs;
653 nattrs = sizeof(g4_common_attrs) / sizeof(struct sysdev_attribute);
654 pmc_attrs = classic_pmc_attrs;
655 break;
656#endif /* HAS_PPC_PMC_G4 */
657#ifdef HAS_PPC_PMC_PA6T
602 case PPC_PMC_PA6T: 658 case PPC_PMC_PA6T:
603 /* PA Semi starts counting at PMC0 */ 659 /* PA Semi starts counting at PMC0 */
604 attrs = pa6t_attrs; 660 attrs = pa6t_attrs;
605 nattrs = sizeof(pa6t_attrs) / sizeof(struct sysdev_attribute); 661 nattrs = sizeof(pa6t_attrs) / sizeof(struct sysdev_attribute);
606 pmc_attrs = NULL; 662 pmc_attrs = NULL;
607 break; 663 break;
664#endif /* HAS_PPC_PMC_PA6T */
608 default: 665 default:
609 attrs = NULL; 666 attrs = NULL;
610 nattrs = 0; 667 nattrs = 0;
@@ -618,6 +675,7 @@ static void __cpuinit register_cpu_online(unsigned int cpu)
618 for (i = 0; i < cur_cpu_spec->num_pmcs; i++) 675 for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
619 sysdev_create_file(s, &pmc_attrs[i]); 676 sysdev_create_file(s, &pmc_attrs[i]);
620 677
678#ifdef CONFIG_PPC64
621 if (cpu_has_feature(CPU_FTR_MMCRA)) 679 if (cpu_has_feature(CPU_FTR_MMCRA))
622 sysdev_create_file(s, &attr_mmcra); 680 sysdev_create_file(s, &attr_mmcra);
623 681
@@ -629,6 +687,7 @@ static void __cpuinit register_cpu_online(unsigned int cpu)
629 687
630 if (cpu_has_feature(CPU_FTR_DSCR)) 688 if (cpu_has_feature(CPU_FTR_DSCR))
631 sysdev_create_file(s, &attr_dscr); 689 sysdev_create_file(s, &attr_dscr);
690#endif /* CONFIG_PPC64 */
632 691
633 create_cache_info(s); 692 create_cache_info(s);
634} 693}
@@ -641,16 +700,9 @@ static void remove_cache_info(struct sys_device *sysdev)
641 int cpu = sysdev->id; 700 int cpu = sysdev->id;
642 701
643 cache_desc = per_cpu(cache_desc, cpu); 702 cache_desc = per_cpu(cache_desc, cpu);
644 if (cache_desc != NULL) { 703 if (cache_desc != NULL)
645 sysfs_remove_file(&cache_desc->kobj, &cache_size_attr.attr);
646 sysfs_remove_file(&cache_desc->kobj, &cache_line_size_attr.attr);
647 sysfs_remove_file(&cache_desc->kobj, &cache_type_attr.attr);
648 sysfs_remove_file(&cache_desc->kobj, &cache_level_attr.attr);
649 sysfs_remove_file(&cache_desc->kobj, &cache_nr_sets_attr.attr);
650 sysfs_remove_file(&cache_desc->kobj, &cache_assoc_attr.attr);
651
652 kobject_put(&cache_desc->kobj); 704 kobject_put(&cache_desc->kobj);
653 } 705
654 cache_toplevel = per_cpu(cache_toplevel, cpu); 706 cache_toplevel = per_cpu(cache_toplevel, cpu);
655 if (cache_toplevel != NULL) 707 if (cache_toplevel != NULL)
656 kobject_put(cache_toplevel); 708 kobject_put(cache_toplevel);
@@ -671,17 +723,28 @@ static void unregister_cpu_online(unsigned int cpu)
671 723
672 /* PMC stuff */ 724 /* PMC stuff */
673 switch (cur_cpu_spec->pmc_type) { 725 switch (cur_cpu_spec->pmc_type) {
726#ifdef HAS_PPC_PMC_IBM
674 case PPC_PMC_IBM: 727 case PPC_PMC_IBM:
675 attrs = ibm_common_attrs; 728 attrs = ibm_common_attrs;
676 nattrs = sizeof(ibm_common_attrs) / sizeof(struct sysdev_attribute); 729 nattrs = sizeof(ibm_common_attrs) / sizeof(struct sysdev_attribute);
677 pmc_attrs = ibm_pmc_attrs; 730 pmc_attrs = classic_pmc_attrs;
731 break;
732#endif /* HAS_PPC_PMC_IBM */
733#ifdef HAS_PPC_PMC_G4
734 case PPC_PMC_G4:
735 attrs = g4_common_attrs;
736 nattrs = sizeof(g4_common_attrs) / sizeof(struct sysdev_attribute);
737 pmc_attrs = classic_pmc_attrs;
678 break; 738 break;
739#endif /* HAS_PPC_PMC_G4 */
740#ifdef HAS_PPC_PMC_PA6T
679 case PPC_PMC_PA6T: 741 case PPC_PMC_PA6T:
680 /* PA Semi starts counting at PMC0 */ 742 /* PA Semi starts counting at PMC0 */
681 attrs = pa6t_attrs; 743 attrs = pa6t_attrs;
682 nattrs = sizeof(pa6t_attrs) / sizeof(struct sysdev_attribute); 744 nattrs = sizeof(pa6t_attrs) / sizeof(struct sysdev_attribute);
683 pmc_attrs = NULL; 745 pmc_attrs = NULL;
684 break; 746 break;
747#endif /* HAS_PPC_PMC_PA6T */
685 default: 748 default:
686 attrs = NULL; 749 attrs = NULL;
687 nattrs = 0; 750 nattrs = 0;
@@ -695,6 +758,7 @@ static void unregister_cpu_online(unsigned int cpu)
695 for (i = 0; i < cur_cpu_spec->num_pmcs; i++) 758 for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
696 sysdev_remove_file(s, &pmc_attrs[i]); 759 sysdev_remove_file(s, &pmc_attrs[i]);
697 760
761#ifdef CONFIG_PPC64
698 if (cpu_has_feature(CPU_FTR_MMCRA)) 762 if (cpu_has_feature(CPU_FTR_MMCRA))
699 sysdev_remove_file(s, &attr_mmcra); 763 sysdev_remove_file(s, &attr_mmcra);
700 764
@@ -706,6 +770,7 @@ static void unregister_cpu_online(unsigned int cpu)
706 770
707 if (cpu_has_feature(CPU_FTR_DSCR)) 771 if (cpu_has_feature(CPU_FTR_DSCR))
708 sysdev_remove_file(s, &attr_dscr); 772 sysdev_remove_file(s, &attr_dscr);
773#endif /* CONFIG_PPC64 */
709 774
710 remove_cache_info(s); 775 remove_cache_info(s);
711} 776}
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 2750fbab1975..434c92a85c03 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -1232,7 +1232,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
1232 else 1232 else
1233 viodev->dev.archdata.dma_ops = &dma_iommu_ops; 1233 viodev->dev.archdata.dma_ops = &dma_iommu_ops;
1234 viodev->dev.archdata.dma_data = vio_build_iommu_table(viodev); 1234 viodev->dev.archdata.dma_data = vio_build_iommu_table(viodev);
1235 viodev->dev.archdata.numa_node = of_node_to_nid(of_node); 1235 set_dev_node(&viodev->dev, of_node_to_nid(of_node));
1236 1236
1237 /* init generic 'struct device' fields: */ 1237 /* init generic 'struct device' fields: */
1238 viodev->dev.parent = &vio_bus_device.dev; 1238 viodev->dev.parent = &vio_bus_device.dev;
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 9f6c1ca1739e..b39c27ed7919 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -187,6 +187,24 @@ SECTIONS
187 *(.machine.desc) 187 *(.machine.desc)
188 __machine_desc_end = . ; 188 __machine_desc_end = . ;
189 } 189 }
190 . = ALIGN(8);
191 .dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET) { *(.dynsym) }
192 .dynstr : AT(ADDR(.dynstr) - LOAD_OFFSET) { *(.dynstr) }
193 .dynamic : AT(ADDR(.dynamic) - LOAD_OFFSET)
194 {
195 __dynamic_start = .;
196 *(.dynamic)
197 }
198 .hash : AT(ADDR(.hash) - LOAD_OFFSET) { *(.hash) }
199 .interp : AT(ADDR(.interp) - LOAD_OFFSET) { *(.interp) }
200 .rela.dyn : AT(ADDR(.rela.dyn) - LOAD_OFFSET)
201 {
202 __rela_dyn_start = .;
203 *(.rela*)
204 }
205
206 /* Fake ELF header containing RPA note; for addnote */
207 .fakeelf : AT(ADDR(.fakeelf) - LOAD_OFFSET) { *(.fakeelf) }
190 208
191 /* freed after init ends here */ 209 /* freed after init ends here */
192 . = ALIGN(PAGE_SIZE); 210 . = ALIGN(PAGE_SIZE);