diff options
Diffstat (limited to 'arch/powerpc/kernel')
47 files changed, 1088 insertions, 536 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index b23664a0b86c..c002b0410219 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile | |||
@@ -42,10 +42,11 @@ obj-$(CONFIG_ALTIVEC) += vecemu.o | |||
42 | obj-$(CONFIG_PPC_970_NAP) += idle_power4.o | 42 | obj-$(CONFIG_PPC_970_NAP) += idle_power4.o |
43 | obj-$(CONFIG_PPC_OF) += of_device.o of_platform.o prom_parse.o | 43 | obj-$(CONFIG_PPC_OF) += of_device.o of_platform.o prom_parse.o |
44 | obj-$(CONFIG_PPC_CLOCK) += clock.o | 44 | obj-$(CONFIG_PPC_CLOCK) += clock.o |
45 | procfs-$(CONFIG_PPC64) := proc_ppc64.o | 45 | procfs-y := proc_powerpc.o |
46 | obj-$(CONFIG_PROC_FS) += $(procfs-y) | 46 | obj-$(CONFIG_PROC_FS) += $(procfs-y) |
47 | rtaspci-$(CONFIG_PPC64)-$(CONFIG_PCI) := rtas_pci.o | 47 | rtaspci-$(CONFIG_PPC64)-$(CONFIG_PCI) := rtas_pci.o |
48 | obj-$(CONFIG_PPC_RTAS) += rtas.o rtas-rtc.o $(rtaspci-y-y) | 48 | obj-$(CONFIG_PPC_RTAS) += rtas.o rtas-rtc.o $(rtaspci-y-y) |
49 | obj-$(CONFIG_PPC_RTAS_DAEMON) += rtasd.o | ||
49 | obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o | 50 | obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o |
50 | obj-$(CONFIG_RTAS_PROC) += rtas-proc.o | 51 | obj-$(CONFIG_RTAS_PROC) += rtas-proc.o |
51 | obj-$(CONFIG_LPARCFG) += lparcfg.o | 52 | obj-$(CONFIG_LPARCFG) += lparcfg.o |
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index a5b632e52fae..3839839f83c7 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c | |||
@@ -732,7 +732,7 @@ int fix_alignment(struct pt_regs *regs) | |||
732 | 732 | ||
733 | #ifdef CONFIG_SPE | 733 | #ifdef CONFIG_SPE |
734 | if ((instr >> 26) == 0x4) { | 734 | if ((instr >> 26) == 0x4) { |
735 | PPC_WARN_EMULATED(spe); | 735 | PPC_WARN_ALIGNMENT(spe, regs); |
736 | return emulate_spe(regs, reg, instr); | 736 | return emulate_spe(regs, reg, instr); |
737 | } | 737 | } |
738 | #endif | 738 | #endif |
@@ -786,7 +786,7 @@ int fix_alignment(struct pt_regs *regs) | |||
786 | flags |= SPLT; | 786 | flags |= SPLT; |
787 | nb = 8; | 787 | nb = 8; |
788 | } | 788 | } |
789 | PPC_WARN_EMULATED(vsx); | 789 | PPC_WARN_ALIGNMENT(vsx, regs); |
790 | return emulate_vsx(addr, reg, areg, regs, flags, nb); | 790 | return emulate_vsx(addr, reg, areg, regs, flags, nb); |
791 | } | 791 | } |
792 | #endif | 792 | #endif |
@@ -794,7 +794,7 @@ int fix_alignment(struct pt_regs *regs) | |||
794 | * the exception of DCBZ which is handled as a special case here | 794 | * the exception of DCBZ which is handled as a special case here |
795 | */ | 795 | */ |
796 | if (instr == DCBZ) { | 796 | if (instr == DCBZ) { |
797 | PPC_WARN_EMULATED(dcbz); | 797 | PPC_WARN_ALIGNMENT(dcbz, regs); |
798 | return emulate_dcbz(regs, addr); | 798 | return emulate_dcbz(regs, addr); |
799 | } | 799 | } |
800 | if (unlikely(nb == 0)) | 800 | if (unlikely(nb == 0)) |
@@ -804,7 +804,7 @@ int fix_alignment(struct pt_regs *regs) | |||
804 | * function | 804 | * function |
805 | */ | 805 | */ |
806 | if (flags & M) { | 806 | if (flags & M) { |
807 | PPC_WARN_EMULATED(multiple); | 807 | PPC_WARN_ALIGNMENT(multiple, regs); |
808 | return emulate_multiple(regs, addr, reg, nb, | 808 | return emulate_multiple(regs, addr, reg, nb, |
809 | flags, instr, swiz); | 809 | flags, instr, swiz); |
810 | } | 810 | } |
@@ -825,11 +825,11 @@ int fix_alignment(struct pt_regs *regs) | |||
825 | 825 | ||
826 | /* Special case for 16-byte FP loads and stores */ | 826 | /* Special case for 16-byte FP loads and stores */ |
827 | if (nb == 16) { | 827 | if (nb == 16) { |
828 | PPC_WARN_EMULATED(fp_pair); | 828 | PPC_WARN_ALIGNMENT(fp_pair, regs); |
829 | return emulate_fp_pair(addr, reg, flags); | 829 | return emulate_fp_pair(addr, reg, flags); |
830 | } | 830 | } |
831 | 831 | ||
832 | PPC_WARN_EMULATED(unaligned); | 832 | PPC_WARN_ALIGNMENT(unaligned, regs); |
833 | 833 | ||
834 | /* If we are loading, get the data from user space, else | 834 | /* If we are loading, get the data from user space, else |
835 | * get it from register values | 835 | * get it from register values |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 0812b0f414bb..a6c2b63227b3 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -190,6 +190,11 @@ int main(void) | |||
190 | DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); | 190 | DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); |
191 | DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset)); | 191 | DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset)); |
192 | DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); | 192 | DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); |
193 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER | ||
194 | DEFINE(PACA_KVM_IN_GUEST, offsetof(struct paca_struct, kvm_in_guest)); | ||
195 | DEFINE(PACA_KVM_SLB, offsetof(struct paca_struct, kvm_slb)); | ||
196 | DEFINE(PACA_KVM_SLB_MAX, offsetof(struct paca_struct, kvm_slb_max)); | ||
197 | #endif | ||
193 | #endif /* CONFIG_PPC64 */ | 198 | #endif /* CONFIG_PPC64 */ |
194 | 199 | ||
195 | /* RTAS */ | 200 | /* RTAS */ |
@@ -398,14 +403,24 @@ int main(void) | |||
398 | DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); | 403 | DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); |
399 | DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); | 404 | DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); |
400 | DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); | 405 | DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); |
406 | |||
407 | /* book3s_64 */ | ||
408 | #ifdef CONFIG_PPC64 | ||
409 | DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr)); | ||
410 | DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip)); | ||
411 | DEFINE(VCPU_HOST_R2, offsetof(struct kvm_vcpu, arch.host_r2)); | ||
412 | DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr)); | ||
413 | DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); | ||
414 | DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem)); | ||
415 | DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter)); | ||
416 | DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler)); | ||
417 | DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); | ||
418 | #endif | ||
401 | #endif | 419 | #endif |
402 | #ifdef CONFIG_44x | 420 | #ifdef CONFIG_44x |
403 | DEFINE(PGD_T_LOG2, PGD_T_LOG2); | 421 | DEFINE(PGD_T_LOG2, PGD_T_LOG2); |
404 | DEFINE(PTE_T_LOG2, PTE_T_LOG2); | 422 | DEFINE(PTE_T_LOG2, PTE_T_LOG2); |
405 | #endif | 423 | #endif |
406 | #ifdef CONFIG_FSL_BOOKE | ||
407 | DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam)); | ||
408 | #endif | ||
409 | 424 | ||
410 | #ifdef CONFIG_KVM_EXIT_TIMING | 425 | #ifdef CONFIG_KVM_EXIT_TIMING |
411 | DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu, | 426 | DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu, |
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 0b9c9135922e..03c862b6a9c4 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c | |||
@@ -711,6 +711,8 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
711 | .cpu_setup = __setup_cpu_750, | 711 | .cpu_setup = __setup_cpu_750, |
712 | .machine_check = machine_check_generic, | 712 | .machine_check = machine_check_generic, |
713 | .platform = "ppc750", | 713 | .platform = "ppc750", |
714 | .oprofile_cpu_type = "ppc/750", | ||
715 | .oprofile_type = PPC_OPROFILE_G4, | ||
714 | }, | 716 | }, |
715 | { /* 745/755 */ | 717 | { /* 745/755 */ |
716 | .pvr_mask = 0xfffff000, | 718 | .pvr_mask = 0xfffff000, |
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 0a8439aafdd1..6f4613dd05ef 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c | |||
@@ -373,7 +373,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs) | |||
373 | hard_irq_disable(); | 373 | hard_irq_disable(); |
374 | 374 | ||
375 | for_each_irq(i) { | 375 | for_each_irq(i) { |
376 | struct irq_desc *desc = irq_desc + i; | 376 | struct irq_desc *desc = irq_to_desc(i); |
377 | 377 | ||
378 | if (desc->status & IRQ_INPROGRESS) | 378 | if (desc->status & IRQ_INPROGRESS) |
379 | desc->chip->eoi(i); | 379 | desc->chip->eoi(i); |
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c index e96cbbd9b449..59c928564a03 100644 --- a/arch/powerpc/kernel/dma-swiotlb.c +++ b/arch/powerpc/kernel/dma-swiotlb.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <asm/dma.h> | 21 | #include <asm/dma.h> |
22 | #include <asm/abs_addr.h> | 22 | #include <asm/abs_addr.h> |
23 | 23 | ||
24 | int swiotlb __read_mostly; | ||
25 | unsigned int ppc_swiotlb_enable; | 24 | unsigned int ppc_swiotlb_enable; |
26 | 25 | ||
27 | /* | 26 | /* |
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 900e0eea0099..bdcb557d470a 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -551,7 +551,7 @@ restore: | |||
551 | BEGIN_FW_FTR_SECTION | 551 | BEGIN_FW_FTR_SECTION |
552 | ld r5,SOFTE(r1) | 552 | ld r5,SOFTE(r1) |
553 | FW_FTR_SECTION_ELSE | 553 | FW_FTR_SECTION_ELSE |
554 | b iseries_check_pending_irqs | 554 | b .Liseries_check_pending_irqs |
555 | ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES) | 555 | ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES) |
556 | 2: | 556 | 2: |
557 | TRACE_AND_RESTORE_IRQ(r5); | 557 | TRACE_AND_RESTORE_IRQ(r5); |
@@ -623,7 +623,7 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES) | |||
623 | 623 | ||
624 | #endif /* CONFIG_PPC_BOOK3E */ | 624 | #endif /* CONFIG_PPC_BOOK3E */ |
625 | 625 | ||
626 | iseries_check_pending_irqs: | 626 | .Liseries_check_pending_irqs: |
627 | #ifdef CONFIG_PPC_ISERIES | 627 | #ifdef CONFIG_PPC_ISERIES |
628 | ld r5,SOFTE(r1) | 628 | ld r5,SOFTE(r1) |
629 | cmpdi 0,r5,0 | 629 | cmpdi 0,r5,0 |
@@ -658,42 +658,43 @@ do_work: | |||
658 | cmpdi r0,0 | 658 | cmpdi r0,0 |
659 | crandc eq,cr1*4+eq,eq | 659 | crandc eq,cr1*4+eq,eq |
660 | bne restore | 660 | bne restore |
661 | /* here we are preempting the current task */ | 661 | |
662 | 1: | 662 | /* Here we are preempting the current task. |
663 | #ifdef CONFIG_TRACE_IRQFLAGS | 663 | * |
664 | bl .trace_hardirqs_on | 664 | * Ensure interrupts are soft-disabled. We also properly mark |
665 | /* Note: we just clobbered r10 which used to contain the previous | 665 | * the PACA to reflect the fact that they are hard-disabled |
666 | * MSR before the hard-disabling done by the caller of do_work. | 666 | * and trace the change |
667 | * We don't have that value anymore, but it doesn't matter as | ||
668 | * we will hard-enable unconditionally, we can just reload the | ||
669 | * current MSR into r10 | ||
670 | */ | 667 | */ |
671 | mfmsr r10 | 668 | li r0,0 |
672 | #endif /* CONFIG_TRACE_IRQFLAGS */ | ||
673 | li r0,1 | ||
674 | stb r0,PACASOFTIRQEN(r13) | 669 | stb r0,PACASOFTIRQEN(r13) |
675 | stb r0,PACAHARDIRQEN(r13) | 670 | stb r0,PACAHARDIRQEN(r13) |
671 | TRACE_DISABLE_INTS | ||
672 | |||
673 | /* Call the scheduler with soft IRQs off */ | ||
674 | 1: bl .preempt_schedule_irq | ||
675 | |||
676 | /* Hard-disable interrupts again (and update PACA) */ | ||
676 | #ifdef CONFIG_PPC_BOOK3E | 677 | #ifdef CONFIG_PPC_BOOK3E |
677 | wrteei 1 | ||
678 | bl .preempt_schedule | ||
679 | wrteei 0 | 678 | wrteei 0 |
680 | #else | 679 | #else |
681 | ori r10,r10,MSR_EE | ||
682 | mtmsrd r10,1 /* reenable interrupts */ | ||
683 | bl .preempt_schedule | ||
684 | mfmsr r10 | 680 | mfmsr r10 |
685 | clrrdi r9,r1,THREAD_SHIFT | 681 | rldicl r10,r10,48,1 |
686 | rldicl r10,r10,48,1 /* disable interrupts again */ | ||
687 | rotldi r10,r10,16 | 682 | rotldi r10,r10,16 |
688 | mtmsrd r10,1 | 683 | mtmsrd r10,1 |
689 | #endif /* CONFIG_PPC_BOOK3E */ | 684 | #endif /* CONFIG_PPC_BOOK3E */ |
685 | li r0,0 | ||
686 | stb r0,PACAHARDIRQEN(r13) | ||
687 | |||
688 | /* Re-test flags and eventually loop */ | ||
689 | clrrdi r9,r1,THREAD_SHIFT | ||
690 | ld r4,TI_FLAGS(r9) | 690 | ld r4,TI_FLAGS(r9) |
691 | andi. r0,r4,_TIF_NEED_RESCHED | 691 | andi. r0,r4,_TIF_NEED_RESCHED |
692 | bne 1b | 692 | bne 1b |
693 | b restore | 693 | b restore |
694 | 694 | ||
695 | user_work: | 695 | user_work: |
696 | #endif | 696 | #endif /* CONFIG_PREEMPT */ |
697 | |||
697 | /* Enable interrupts */ | 698 | /* Enable interrupts */ |
698 | #ifdef CONFIG_PPC_BOOK3E | 699 | #ifdef CONFIG_PPC_BOOK3E |
699 | wrteei 1 | 700 | wrteei 1 |
@@ -1038,8 +1039,7 @@ _GLOBAL(mod_return_to_handler) | |||
1038 | * We are in a module using the module's TOC. | 1039 | * We are in a module using the module's TOC. |
1039 | * Switch to our TOC to run inside the core kernel. | 1040 | * Switch to our TOC to run inside the core kernel. |
1040 | */ | 1041 | */ |
1041 | LOAD_REG_IMMEDIATE(r4,ftrace_return_to_handler) | 1042 | ld r2, PACATOC(r13) |
1042 | ld r2, 8(r4) | ||
1043 | 1043 | ||
1044 | bl .ftrace_return_to_handler | 1044 | bl .ftrace_return_to_handler |
1045 | nop | 1045 | nop |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 1808876edcc9..e3be98ffe2a7 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -41,6 +41,7 @@ __start_interrupts: | |||
41 | . = 0x200 | 41 | . = 0x200 |
42 | _machine_check_pSeries: | 42 | _machine_check_pSeries: |
43 | HMT_MEDIUM | 43 | HMT_MEDIUM |
44 | DO_KVM 0x200 | ||
44 | mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */ | 45 | mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */ |
45 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) | 46 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) |
46 | 47 | ||
@@ -48,6 +49,7 @@ _machine_check_pSeries: | |||
48 | .globl data_access_pSeries | 49 | .globl data_access_pSeries |
49 | data_access_pSeries: | 50 | data_access_pSeries: |
50 | HMT_MEDIUM | 51 | HMT_MEDIUM |
52 | DO_KVM 0x300 | ||
51 | mtspr SPRN_SPRG_SCRATCH0,r13 | 53 | mtspr SPRN_SPRG_SCRATCH0,r13 |
52 | BEGIN_FTR_SECTION | 54 | BEGIN_FTR_SECTION |
53 | mfspr r13,SPRN_SPRG_PACA | 55 | mfspr r13,SPRN_SPRG_PACA |
@@ -77,6 +79,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_SLB) | |||
77 | .globl data_access_slb_pSeries | 79 | .globl data_access_slb_pSeries |
78 | data_access_slb_pSeries: | 80 | data_access_slb_pSeries: |
79 | HMT_MEDIUM | 81 | HMT_MEDIUM |
82 | DO_KVM 0x380 | ||
80 | mtspr SPRN_SPRG_SCRATCH0,r13 | 83 | mtspr SPRN_SPRG_SCRATCH0,r13 |
81 | mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */ | 84 | mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */ |
82 | std r3,PACA_EXSLB+EX_R3(r13) | 85 | std r3,PACA_EXSLB+EX_R3(r13) |
@@ -115,6 +118,7 @@ data_access_slb_pSeries: | |||
115 | .globl instruction_access_slb_pSeries | 118 | .globl instruction_access_slb_pSeries |
116 | instruction_access_slb_pSeries: | 119 | instruction_access_slb_pSeries: |
117 | HMT_MEDIUM | 120 | HMT_MEDIUM |
121 | DO_KVM 0x480 | ||
118 | mtspr SPRN_SPRG_SCRATCH0,r13 | 122 | mtspr SPRN_SPRG_SCRATCH0,r13 |
119 | mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */ | 123 | mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */ |
120 | std r3,PACA_EXSLB+EX_R3(r13) | 124 | std r3,PACA_EXSLB+EX_R3(r13) |
@@ -154,6 +158,7 @@ instruction_access_slb_pSeries: | |||
154 | .globl system_call_pSeries | 158 | .globl system_call_pSeries |
155 | system_call_pSeries: | 159 | system_call_pSeries: |
156 | HMT_MEDIUM | 160 | HMT_MEDIUM |
161 | DO_KVM 0xc00 | ||
157 | BEGIN_FTR_SECTION | 162 | BEGIN_FTR_SECTION |
158 | cmpdi r0,0x1ebe | 163 | cmpdi r0,0x1ebe |
159 | beq- 1f | 164 | beq- 1f |
@@ -185,13 +190,19 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) | |||
185 | * prolog code of the PerformanceMonitor one. A little | 190 | * prolog code of the PerformanceMonitor one. A little |
186 | * trickery is thus necessary | 191 | * trickery is thus necessary |
187 | */ | 192 | */ |
193 | performance_monitor_pSeries_1: | ||
188 | . = 0xf00 | 194 | . = 0xf00 |
195 | DO_KVM 0xf00 | ||
189 | b performance_monitor_pSeries | 196 | b performance_monitor_pSeries |
190 | 197 | ||
198 | altivec_unavailable_pSeries_1: | ||
191 | . = 0xf20 | 199 | . = 0xf20 |
200 | DO_KVM 0xf20 | ||
192 | b altivec_unavailable_pSeries | 201 | b altivec_unavailable_pSeries |
193 | 202 | ||
203 | vsx_unavailable_pSeries_1: | ||
194 | . = 0xf40 | 204 | . = 0xf40 |
205 | DO_KVM 0xf40 | ||
195 | b vsx_unavailable_pSeries | 206 | b vsx_unavailable_pSeries |
196 | 207 | ||
197 | #ifdef CONFIG_CBE_RAS | 208 | #ifdef CONFIG_CBE_RAS |
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index c38afdb45d7b..925807488022 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <asm/firmware.h> | 37 | #include <asm/firmware.h> |
38 | #include <asm/page_64.h> | 38 | #include <asm/page_64.h> |
39 | #include <asm/irqflags.h> | 39 | #include <asm/irqflags.h> |
40 | #include <asm/kvm_book3s_64_asm.h> | ||
40 | 41 | ||
41 | /* The physical memory is layed out such that the secondary processor | 42 | /* The physical memory is layed out such that the secondary processor |
42 | * spin code sits at 0x0000...0x00ff. On server, the vectors follow | 43 | * spin code sits at 0x0000...0x00ff. On server, the vectors follow |
@@ -165,6 +166,12 @@ exception_marker: | |||
165 | #include "exceptions-64s.S" | 166 | #include "exceptions-64s.S" |
166 | #endif | 167 | #endif |
167 | 168 | ||
169 | /* KVM trampoline code needs to be close to the interrupt handlers */ | ||
170 | |||
171 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER | ||
172 | #include "../kvm/book3s_64_rmhandlers.S" | ||
173 | #endif | ||
174 | |||
168 | _GLOBAL(generic_secondary_thread_init) | 175 | _GLOBAL(generic_secondary_thread_init) |
169 | mr r24,r3 | 176 | mr r24,r3 |
170 | 177 | ||
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 6ded19d01891..678f98cd5e64 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S | |||
@@ -206,6 +206,8 @@ MachineCheck: | |||
206 | EXCEPTION_PROLOG | 206 | EXCEPTION_PROLOG |
207 | mfspr r4,SPRN_DAR | 207 | mfspr r4,SPRN_DAR |
208 | stw r4,_DAR(r11) | 208 | stw r4,_DAR(r11) |
209 | li r5,0x00f0 | ||
210 | mtspr SPRN_DAR,r5 /* Tag DAR, to be used in DTLB Error */ | ||
209 | mfspr r5,SPRN_DSISR | 211 | mfspr r5,SPRN_DSISR |
210 | stw r5,_DSISR(r11) | 212 | stw r5,_DSISR(r11) |
211 | addi r3,r1,STACK_FRAME_OVERHEAD | 213 | addi r3,r1,STACK_FRAME_OVERHEAD |
@@ -222,6 +224,8 @@ DataAccess: | |||
222 | stw r10,_DSISR(r11) | 224 | stw r10,_DSISR(r11) |
223 | mr r5,r10 | 225 | mr r5,r10 |
224 | mfspr r4,SPRN_DAR | 226 | mfspr r4,SPRN_DAR |
227 | li r10,0x00f0 | ||
228 | mtspr SPRN_DAR,r10 /* Tag DAR, to be used in DTLB Error */ | ||
225 | EXC_XFER_EE_LITE(0x300, handle_page_fault) | 229 | EXC_XFER_EE_LITE(0x300, handle_page_fault) |
226 | 230 | ||
227 | /* Instruction access exception. | 231 | /* Instruction access exception. |
@@ -244,6 +248,8 @@ Alignment: | |||
244 | EXCEPTION_PROLOG | 248 | EXCEPTION_PROLOG |
245 | mfspr r4,SPRN_DAR | 249 | mfspr r4,SPRN_DAR |
246 | stw r4,_DAR(r11) | 250 | stw r4,_DAR(r11) |
251 | li r5,0x00f0 | ||
252 | mtspr SPRN_DAR,r5 /* Tag DAR, to be used in DTLB Error */ | ||
247 | mfspr r5,SPRN_DSISR | 253 | mfspr r5,SPRN_DSISR |
248 | stw r5,_DSISR(r11) | 254 | stw r5,_DSISR(r11) |
249 | addi r3,r1,STACK_FRAME_OVERHEAD | 255 | addi r3,r1,STACK_FRAME_OVERHEAD |
@@ -333,26 +339,20 @@ InstructionTLBMiss: | |||
333 | mfspr r11, SPRN_MD_TWC /* ....and get the pte address */ | 339 | mfspr r11, SPRN_MD_TWC /* ....and get the pte address */ |
334 | lwz r10, 0(r11) /* Get the pte */ | 340 | lwz r10, 0(r11) /* Get the pte */ |
335 | 341 | ||
336 | #ifdef CONFIG_SWAP | 342 | andi. r11, r10, _PAGE_ACCESSED | _PAGE_PRESENT |
337 | /* do not set the _PAGE_ACCESSED bit of a non-present page */ | 343 | cmpwi cr0, r11, _PAGE_ACCESSED | _PAGE_PRESENT |
338 | andi. r11, r10, _PAGE_PRESENT | 344 | bne- cr0, 2f |
339 | beq 4f | 345 | |
340 | ori r10, r10, _PAGE_ACCESSED | 346 | /* Clear PP lsb, 0x400 */ |
341 | mfspr r11, SPRN_MD_TWC /* get the pte address again */ | 347 | rlwinm r10, r10, 0, 22, 20 |
342 | stw r10, 0(r11) | ||
343 | 4: | ||
344 | #else | ||
345 | ori r10, r10, _PAGE_ACCESSED | ||
346 | stw r10, 0(r11) | ||
347 | #endif | ||
348 | 348 | ||
349 | /* The Linux PTE won't go exactly into the MMU TLB. | 349 | /* The Linux PTE won't go exactly into the MMU TLB. |
350 | * Software indicator bits 21, 22 and 28 must be clear. | 350 | * Software indicator bits 22 and 28 must be clear. |
351 | * Software indicator bits 24, 25, 26, and 27 must be | 351 | * Software indicator bits 24, 25, 26, and 27 must be |
352 | * set. All other Linux PTE bits control the behavior | 352 | * set. All other Linux PTE bits control the behavior |
353 | * of the MMU. | 353 | * of the MMU. |
354 | */ | 354 | */ |
355 | 2: li r11, 0x00f0 | 355 | li r11, 0x00f0 |
356 | rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ | 356 | rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ |
357 | DO_8xx_CPU6(0x2d80, r3) | 357 | DO_8xx_CPU6(0x2d80, r3) |
358 | mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ | 358 | mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ |
@@ -365,6 +365,22 @@ InstructionTLBMiss: | |||
365 | lwz r3, 8(r0) | 365 | lwz r3, 8(r0) |
366 | #endif | 366 | #endif |
367 | rfi | 367 | rfi |
368 | 2: | ||
369 | mfspr r11, SPRN_SRR1 | ||
370 | /* clear all error bits as TLB Miss | ||
371 | * sets a few unconditionally | ||
372 | */ | ||
373 | rlwinm r11, r11, 0, 0xffff | ||
374 | mtspr SPRN_SRR1, r11 | ||
375 | |||
376 | mfspr r10, SPRN_M_TW /* Restore registers */ | ||
377 | lwz r11, 0(r0) | ||
378 | mtcr r11 | ||
379 | lwz r11, 4(r0) | ||
380 | #ifdef CONFIG_8xx_CPU6 | ||
381 | lwz r3, 8(r0) | ||
382 | #endif | ||
383 | b InstructionAccess | ||
368 | 384 | ||
369 | . = 0x1200 | 385 | . = 0x1200 |
370 | DataStoreTLBMiss: | 386 | DataStoreTLBMiss: |
@@ -406,29 +422,45 @@ DataStoreTLBMiss: | |||
406 | * above. | 422 | * above. |
407 | */ | 423 | */ |
408 | rlwimi r11, r10, 0, 27, 27 | 424 | rlwimi r11, r10, 0, 27, 27 |
425 | /* Insert the WriteThru flag into the TWC from the Linux PTE. | ||
426 | * It is bit 25 in the Linux PTE and bit 30 in the TWC | ||
427 | */ | ||
428 | rlwimi r11, r10, 32-5, 30, 30 | ||
409 | DO_8xx_CPU6(0x3b80, r3) | 429 | DO_8xx_CPU6(0x3b80, r3) |
410 | mtspr SPRN_MD_TWC, r11 | 430 | mtspr SPRN_MD_TWC, r11 |
411 | 431 | ||
412 | #ifdef CONFIG_SWAP | 432 | /* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set. |
413 | /* do not set the _PAGE_ACCESSED bit of a non-present page */ | 433 | * We also need to know if the insn is a load/store, so: |
414 | andi. r11, r10, _PAGE_PRESENT | 434 | * Clear _PAGE_PRESENT and load that which will |
415 | beq 4f | 435 | * trap into DTLB Error with store bit set accordinly. |
416 | ori r10, r10, _PAGE_ACCESSED | 436 | */ |
417 | 4: | 437 | /* PRESENT=0x1, ACCESSED=0x20 |
418 | /* and update pte in table */ | 438 | * r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5)); |
419 | #else | 439 | * r10 = (r10 & ~PRESENT) | r11; |
420 | ori r10, r10, _PAGE_ACCESSED | 440 | */ |
421 | #endif | 441 | rlwinm r11, r10, 32-5, _PAGE_PRESENT |
422 | mfspr r11, SPRN_MD_TWC /* get the pte address again */ | 442 | and r11, r11, r10 |
423 | stw r10, 0(r11) | 443 | rlwimi r10, r11, 0, _PAGE_PRESENT |
444 | |||
445 | /* Honour kernel RO, User NA */ | ||
446 | /* 0x200 == Extended encoding, bit 22 */ | ||
447 | /* r11 = (r10 & _PAGE_USER) >> 2 */ | ||
448 | rlwinm r11, r10, 32-2, 0x200 | ||
449 | or r10, r11, r10 | ||
450 | /* r11 = (r10 & _PAGE_RW) >> 1 */ | ||
451 | rlwinm r11, r10, 32-1, 0x200 | ||
452 | or r10, r11, r10 | ||
453 | /* invert RW and 0x200 bits */ | ||
454 | xori r10, r10, _PAGE_RW | 0x200 | ||
424 | 455 | ||
425 | /* The Linux PTE won't go exactly into the MMU TLB. | 456 | /* The Linux PTE won't go exactly into the MMU TLB. |
426 | * Software indicator bits 21, 22 and 28 must be clear. | 457 | * Software indicator bits 22 and 28 must be clear. |
427 | * Software indicator bits 24, 25, 26, and 27 must be | 458 | * Software indicator bits 24, 25, 26, and 27 must be |
428 | * set. All other Linux PTE bits control the behavior | 459 | * set. All other Linux PTE bits control the behavior |
429 | * of the MMU. | 460 | * of the MMU. |
430 | */ | 461 | */ |
431 | 2: li r11, 0x00f0 | 462 | 2: li r11, 0x00f0 |
463 | mtspr SPRN_DAR,r11 /* Tag DAR */ | ||
432 | rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ | 464 | rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ |
433 | DO_8xx_CPU6(0x3d80, r3) | 465 | DO_8xx_CPU6(0x3d80, r3) |
434 | mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ | 466 | mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ |
@@ -469,97 +501,10 @@ DataTLBError: | |||
469 | stw r10, 0(r0) | 501 | stw r10, 0(r0) |
470 | stw r11, 4(r0) | 502 | stw r11, 4(r0) |
471 | 503 | ||
472 | /* First, make sure this was a store operation. | ||
473 | */ | ||
474 | mfspr r10, SPRN_DSISR | ||
475 | andis. r11, r10, 0x0200 /* If set, indicates store op */ | ||
476 | beq 2f | ||
477 | |||
478 | /* The EA of a data TLB miss is automatically stored in the MD_EPN | ||
479 | * register. The EA of a data TLB error is automatically stored in | ||
480 | * the DAR, but not the MD_EPN register. We must copy the 20 most | ||
481 | * significant bits of the EA from the DAR to MD_EPN before we | ||
482 | * start walking the page tables. We also need to copy the CASID | ||
483 | * value from the M_CASID register. | ||
484 | * Addendum: The EA of a data TLB error is _supposed_ to be stored | ||
485 | * in DAR, but it seems that this doesn't happen in some cases, such | ||
486 | * as when the error is due to a dcbi instruction to a page with a | ||
487 | * TLB that doesn't have the changed bit set. In such cases, there | ||
488 | * does not appear to be any way to recover the EA of the error | ||
489 | * since it is neither in DAR nor MD_EPN. As a workaround, the | ||
490 | * _PAGE_HWWRITE bit is set for all kernel data pages when the PTEs | ||
491 | * are initialized in mapin_ram(). This will avoid the problem, | ||
492 | * assuming we only use the dcbi instruction on kernel addresses. | ||
493 | */ | ||
494 | mfspr r10, SPRN_DAR | 504 | mfspr r10, SPRN_DAR |
495 | rlwinm r11, r10, 0, 0, 19 | 505 | cmpwi cr0, r10, 0x00f0 |
496 | ori r11, r11, MD_EVALID | 506 | beq- FixupDAR /* must be a buggy dcbX, icbi insn. */ |
497 | mfspr r10, SPRN_M_CASID | 507 | DARFixed:/* Return from dcbx instruction bug workaround, r10 holds value of DAR */ |
498 | rlwimi r11, r10, 0, 28, 31 | ||
499 | DO_8xx_CPU6(0x3780, r3) | ||
500 | mtspr SPRN_MD_EPN, r11 | ||
501 | |||
502 | mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */ | ||
503 | |||
504 | /* If we are faulting a kernel address, we have to use the | ||
505 | * kernel page tables. | ||
506 | */ | ||
507 | andi. r11, r10, 0x0800 | ||
508 | beq 3f | ||
509 | lis r11, swapper_pg_dir@h | ||
510 | ori r11, r11, swapper_pg_dir@l | ||
511 | rlwimi r10, r11, 0, 2, 19 | ||
512 | 3: | ||
513 | lwz r11, 0(r10) /* Get the level 1 entry */ | ||
514 | rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */ | ||
515 | beq 2f /* If zero, bail */ | ||
516 | |||
517 | /* We have a pte table, so fetch the pte from the table. | ||
518 | */ | ||
519 | ori r11, r11, 1 /* Set valid bit in physical L2 page */ | ||
520 | DO_8xx_CPU6(0x3b80, r3) | ||
521 | mtspr SPRN_MD_TWC, r11 /* Load pte table base address */ | ||
522 | mfspr r11, SPRN_MD_TWC /* ....and get the pte address */ | ||
523 | lwz r10, 0(r11) /* Get the pte */ | ||
524 | |||
525 | andi. r11, r10, _PAGE_RW /* Is it writeable? */ | ||
526 | beq 2f /* Bail out if not */ | ||
527 | |||
528 | /* Update 'changed', among others. | ||
529 | */ | ||
530 | #ifdef CONFIG_SWAP | ||
531 | ori r10, r10, _PAGE_DIRTY|_PAGE_HWWRITE | ||
532 | /* do not set the _PAGE_ACCESSED bit of a non-present page */ | ||
533 | andi. r11, r10, _PAGE_PRESENT | ||
534 | beq 4f | ||
535 | ori r10, r10, _PAGE_ACCESSED | ||
536 | 4: | ||
537 | #else | ||
538 | ori r10, r10, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE | ||
539 | #endif | ||
540 | mfspr r11, SPRN_MD_TWC /* Get pte address again */ | ||
541 | stw r10, 0(r11) /* and update pte in table */ | ||
542 | |||
543 | /* The Linux PTE won't go exactly into the MMU TLB. | ||
544 | * Software indicator bits 21, 22 and 28 must be clear. | ||
545 | * Software indicator bits 24, 25, 26, and 27 must be | ||
546 | * set. All other Linux PTE bits control the behavior | ||
547 | * of the MMU. | ||
548 | */ | ||
549 | li r11, 0x00f0 | ||
550 | rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ | ||
551 | DO_8xx_CPU6(0x3d80, r3) | ||
552 | mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ | ||
553 | |||
554 | mfspr r10, SPRN_M_TW /* Restore registers */ | ||
555 | lwz r11, 0(r0) | ||
556 | mtcr r11 | ||
557 | lwz r11, 4(r0) | ||
558 | #ifdef CONFIG_8xx_CPU6 | ||
559 | lwz r3, 8(r0) | ||
560 | #endif | ||
561 | rfi | ||
562 | 2: | ||
563 | mfspr r10, SPRN_M_TW /* Restore registers */ | 508 | mfspr r10, SPRN_M_TW /* Restore registers */ |
564 | lwz r11, 0(r0) | 509 | lwz r11, 0(r0) |
565 | mtcr r11 | 510 | mtcr r11 |
@@ -588,6 +533,140 @@ DataTLBError: | |||
588 | 533 | ||
589 | . = 0x2000 | 534 | . = 0x2000 |
590 | 535 | ||
536 | /* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions | ||
537 | * by decoding the registers used by the dcbx instruction and adding them. | ||
538 | * DAR is set to the calculated address and r10 also holds the EA on exit. | ||
539 | */ | ||
540 | /* define if you don't want to use self modifying code */ | ||
541 | #define NO_SELF_MODIFYING_CODE | ||
542 | FixupDAR:/* Entry point for dcbx workaround. */ | ||
543 | /* fetch instruction from memory. */ | ||
544 | mfspr r10, SPRN_SRR0 | ||
545 | DO_8xx_CPU6(0x3780, r3) | ||
546 | mtspr SPRN_MD_EPN, r10 | ||
547 | mfspr r11, SPRN_M_TWB /* Get level 1 table entry address */ | ||
548 | cmplwi cr0, r11, 0x0800 | ||
549 | blt- 3f /* Branch if user space */ | ||
550 | lis r11, (swapper_pg_dir-PAGE_OFFSET)@h | ||
551 | ori r11, r11, (swapper_pg_dir-PAGE_OFFSET)@l | ||
552 | rlwimi r11, r10, 32-20, 0xffc /* r11 = r11&~0xffc|(r10>>20)&0xffc */ | ||
553 | 3: lwz r11, 0(r11) /* Get the level 1 entry */ | ||
554 | DO_8xx_CPU6(0x3b80, r3) | ||
555 | mtspr SPRN_MD_TWC, r11 /* Load pte table base address */ | ||
556 | mfspr r11, SPRN_MD_TWC /* ....and get the pte address */ | ||
557 | lwz r11, 0(r11) /* Get the pte */ | ||
558 | /* concat physical page address(r11) and page offset(r10) */ | ||
559 | rlwimi r11, r10, 0, 20, 31 | ||
560 | lwz r11,0(r11) | ||
561 | /* Check if it really is a dcbx instruction. */ | ||
562 | /* dcbt and dcbtst does not generate DTLB Misses/Errors, | ||
563 | * no need to include them here */ | ||
564 | srwi r10, r11, 26 /* check if major OP code is 31 */ | ||
565 | cmpwi cr0, r10, 31 | ||
566 | bne- 141f | ||
567 | rlwinm r10, r11, 0, 21, 30 | ||
568 | cmpwi cr0, r10, 2028 /* Is dcbz? */ | ||
569 | beq+ 142f | ||
570 | cmpwi cr0, r10, 940 /* Is dcbi? */ | ||
571 | beq+ 142f | ||
572 | cmpwi cr0, r10, 108 /* Is dcbst? */ | ||
573 | beq+ 144f /* Fix up store bit! */ | ||
574 | cmpwi cr0, r10, 172 /* Is dcbf? */ | ||
575 | beq+ 142f | ||
576 | cmpwi cr0, r10, 1964 /* Is icbi? */ | ||
577 | beq+ 142f | ||
578 | 141: mfspr r10, SPRN_DAR /* r10 must hold DAR at exit */ | ||
579 | b DARFixed /* Nope, go back to normal TLB processing */ | ||
580 | |||
581 | 144: mfspr r10, SPRN_DSISR | ||
582 | rlwinm r10, r10,0,7,5 /* Clear store bit for buggy dcbst insn */ | ||
583 | mtspr SPRN_DSISR, r10 | ||
584 | 142: /* continue, it was a dcbx, dcbi instruction. */ | ||
585 | #ifdef CONFIG_8xx_CPU6 | ||
586 | lwz r3, 8(r0) /* restore r3 from memory */ | ||
587 | #endif | ||
588 | #ifndef NO_SELF_MODIFYING_CODE | ||
589 | andis. r10,r11,0x1f /* test if reg RA is r0 */ | ||
590 | li r10,modified_instr@l | ||
591 | dcbtst r0,r10 /* touch for store */ | ||
592 | rlwinm r11,r11,0,0,20 /* Zero lower 10 bits */ | ||
593 | oris r11,r11,640 /* Transform instr. to a "add r10,RA,RB" */ | ||
594 | ori r11,r11,532 | ||
595 | stw r11,0(r10) /* store add/and instruction */ | ||
596 | dcbf 0,r10 /* flush new instr. to memory. */ | ||
597 | icbi 0,r10 /* invalidate instr. cache line */ | ||
598 | lwz r11, 4(r0) /* restore r11 from memory */ | ||
599 | mfspr r10, SPRN_M_TW /* restore r10 from M_TW */ | ||
600 | isync /* Wait until new instr is loaded from memory */ | ||
601 | modified_instr: | ||
602 | .space 4 /* this is where the add instr. is stored */ | ||
603 | bne+ 143f | ||
604 | subf r10,r0,r10 /* r10=r10-r0, only if reg RA is r0 */ | ||
605 | 143: mtdar r10 /* store faulting EA in DAR */ | ||
606 | b DARFixed /* Go back to normal TLB handling */ | ||
607 | #else | ||
608 | mfctr r10 | ||
609 | mtdar r10 /* save ctr reg in DAR */ | ||
610 | rlwinm r10, r11, 24, 24, 28 /* offset into jump table for reg RB */ | ||
611 | addi r10, r10, 150f@l /* add start of table */ | ||
612 | mtctr r10 /* load ctr with jump address */ | ||
613 | xor r10, r10, r10 /* sum starts at zero */ | ||
614 | bctr /* jump into table */ | ||
615 | 150: | ||
616 | add r10, r10, r0 ;b 151f | ||
617 | add r10, r10, r1 ;b 151f | ||
618 | add r10, r10, r2 ;b 151f | ||
619 | add r10, r10, r3 ;b 151f | ||
620 | add r10, r10, r4 ;b 151f | ||
621 | add r10, r10, r5 ;b 151f | ||
622 | add r10, r10, r6 ;b 151f | ||
623 | add r10, r10, r7 ;b 151f | ||
624 | add r10, r10, r8 ;b 151f | ||
625 | add r10, r10, r9 ;b 151f | ||
626 | mtctr r11 ;b 154f /* r10 needs special handling */ | ||
627 | mtctr r11 ;b 153f /* r11 needs special handling */ | ||
628 | add r10, r10, r12 ;b 151f | ||
629 | add r10, r10, r13 ;b 151f | ||
630 | add r10, r10, r14 ;b 151f | ||
631 | add r10, r10, r15 ;b 151f | ||
632 | add r10, r10, r16 ;b 151f | ||
633 | add r10, r10, r17 ;b 151f | ||
634 | add r10, r10, r18 ;b 151f | ||
635 | add r10, r10, r19 ;b 151f | ||
636 | add r10, r10, r20 ;b 151f | ||
637 | add r10, r10, r21 ;b 151f | ||
638 | add r10, r10, r22 ;b 151f | ||
639 | add r10, r10, r23 ;b 151f | ||
640 | add r10, r10, r24 ;b 151f | ||
641 | add r10, r10, r25 ;b 151f | ||
642 | add r10, r10, r26 ;b 151f | ||
643 | add r10, r10, r27 ;b 151f | ||
644 | add r10, r10, r28 ;b 151f | ||
645 | add r10, r10, r29 ;b 151f | ||
646 | add r10, r10, r30 ;b 151f | ||
647 | add r10, r10, r31 | ||
648 | 151: | ||
649 | rlwinm. r11,r11,19,24,28 /* offset into jump table for reg RA */ | ||
650 | beq 152f /* if reg RA is zero, don't add it */ | ||
651 | addi r11, r11, 150b@l /* add start of table */ | ||
652 | mtctr r11 /* load ctr with jump address */ | ||
653 | rlwinm r11,r11,0,16,10 /* make sure we don't execute this more than once */ | ||
654 | bctr /* jump into table */ | ||
655 | 152: | ||
656 | mfdar r11 | ||
657 | mtctr r11 /* restore ctr reg from DAR */ | ||
658 | mtdar r10 /* save fault EA to DAR */ | ||
659 | b DARFixed /* Go back to normal TLB handling */ | ||
660 | |||
661 | /* special handling for r10,r11 since these are modified already */ | ||
662 | 153: lwz r11, 4(r0) /* load r11 from memory */ | ||
663 | b 155f | ||
664 | 154: mfspr r11, SPRN_M_TW /* load r10 from M_TW */ | ||
665 | 155: add r10, r10, r11 /* add it */ | ||
666 | mfctr r11 /* restore r11 */ | ||
667 | b 151b | ||
668 | #endif | ||
669 | |||
591 | .globl giveup_fpu | 670 | .globl giveup_fpu |
592 | giveup_fpu: | 671 | giveup_fpu: |
593 | blr | 672 | blr |
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 975788ca05d2..7f4bd7f3b6af 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S | |||
@@ -944,28 +944,6 @@ _GLOBAL(__setup_e500mc_ivors) | |||
944 | blr | 944 | blr |
945 | 945 | ||
946 | /* | 946 | /* |
947 | * extern void loadcam_entry(unsigned int index) | ||
948 | * | ||
949 | * Load TLBCAM[index] entry in to the L2 CAM MMU | ||
950 | */ | ||
951 | _GLOBAL(loadcam_entry) | ||
952 | lis r4,TLBCAM@ha | ||
953 | addi r4,r4,TLBCAM@l | ||
954 | mulli r5,r3,TLBCAM_SIZE | ||
955 | add r3,r5,r4 | ||
956 | lwz r4,0(r3) | ||
957 | mtspr SPRN_MAS0,r4 | ||
958 | lwz r4,4(r3) | ||
959 | mtspr SPRN_MAS1,r4 | ||
960 | lwz r4,8(r3) | ||
961 | mtspr SPRN_MAS2,r4 | ||
962 | lwz r4,12(r3) | ||
963 | mtspr SPRN_MAS3,r4 | ||
964 | tlbwe | ||
965 | isync | ||
966 | blr | ||
967 | |||
968 | /* | ||
969 | * extern void giveup_altivec(struct task_struct *prev) | 947 | * extern void giveup_altivec(struct task_struct *prev) |
970 | * | 948 | * |
971 | * The e500 core does not have an AltiVec unit. | 949 | * The e500 core does not have an AltiVec unit. |
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index 88d9c1d5e5fb..049dda60e475 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c | |||
@@ -110,18 +110,16 @@ int powersave_nap; | |||
110 | */ | 110 | */ |
111 | static ctl_table powersave_nap_ctl_table[]={ | 111 | static ctl_table powersave_nap_ctl_table[]={ |
112 | { | 112 | { |
113 | .ctl_name = KERN_PPC_POWERSAVE_NAP, | ||
114 | .procname = "powersave-nap", | 113 | .procname = "powersave-nap", |
115 | .data = &powersave_nap, | 114 | .data = &powersave_nap, |
116 | .maxlen = sizeof(int), | 115 | .maxlen = sizeof(int), |
117 | .mode = 0644, | 116 | .mode = 0644, |
118 | .proc_handler = &proc_dointvec, | 117 | .proc_handler = proc_dointvec, |
119 | }, | 118 | }, |
120 | {} | 119 | {} |
121 | }; | 120 | }; |
122 | static ctl_table powersave_nap_sysctl_root[] = { | 121 | static ctl_table powersave_nap_sysctl_root[] = { |
123 | { | 122 | { |
124 | .ctl_name = CTL_KERN, | ||
125 | .procname = "kernel", | 123 | .procname = "kernel", |
126 | .mode = 0555, | 124 | .mode = 0555, |
127 | .child = powersave_nap_ctl_table, | 125 | .child = powersave_nap_ctl_table, |
diff --git a/arch/powerpc/kernel/io.c b/arch/powerpc/kernel/io.c index 1882bf419fa6..8dc7547c2377 100644 --- a/arch/powerpc/kernel/io.c +++ b/arch/powerpc/kernel/io.c | |||
@@ -161,7 +161,7 @@ void _memcpy_fromio(void *dest, const volatile void __iomem *src, | |||
161 | dest++; | 161 | dest++; |
162 | n--; | 162 | n--; |
163 | } | 163 | } |
164 | while(n > 4) { | 164 | while(n >= 4) { |
165 | *((u32 *)dest) = *((volatile u32 *)vsrc); | 165 | *((u32 *)dest) = *((volatile u32 *)vsrc); |
166 | eieio(); | 166 | eieio(); |
167 | vsrc += 4; | 167 | vsrc += 4; |
@@ -190,7 +190,7 @@ void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n) | |||
190 | vdest++; | 190 | vdest++; |
191 | n--; | 191 | n--; |
192 | } | 192 | } |
193 | while(n > 4) { | 193 | while(n >= 4) { |
194 | *((volatile u32 *)vdest) = *((volatile u32 *)src); | 194 | *((volatile u32 *)vdest) = *((volatile u32 *)src); |
195 | src += 4; | 195 | src += 4; |
196 | vdest += 4; | 196 | vdest += 4; |
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index e5d121177984..f6dca4f4b295 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -70,6 +70,8 @@ | |||
70 | #include <asm/firmware.h> | 70 | #include <asm/firmware.h> |
71 | #include <asm/lv1call.h> | 71 | #include <asm/lv1call.h> |
72 | #endif | 72 | #endif |
73 | #define CREATE_TRACE_POINTS | ||
74 | #include <asm/trace.h> | ||
73 | 75 | ||
74 | int __irq_offset_value; | 76 | int __irq_offset_value; |
75 | static int ppc_spurious_interrupts; | 77 | static int ppc_spurious_interrupts; |
@@ -85,7 +87,10 @@ extern int tau_interrupts(int); | |||
85 | #endif /* CONFIG_PPC32 */ | 87 | #endif /* CONFIG_PPC32 */ |
86 | 88 | ||
87 | #ifdef CONFIG_PPC64 | 89 | #ifdef CONFIG_PPC64 |
90 | |||
91 | #ifndef CONFIG_SPARSE_IRQ | ||
88 | EXPORT_SYMBOL(irq_desc); | 92 | EXPORT_SYMBOL(irq_desc); |
93 | #endif | ||
89 | 94 | ||
90 | int distribute_irqs = 1; | 95 | int distribute_irqs = 1; |
91 | 96 | ||
@@ -187,33 +192,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
187 | for_each_online_cpu(j) | 192 | for_each_online_cpu(j) |
188 | seq_printf(p, "CPU%d ", j); | 193 | seq_printf(p, "CPU%d ", j); |
189 | seq_putc(p, '\n'); | 194 | seq_putc(p, '\n'); |
190 | } | 195 | } else if (i == nr_irqs) { |
191 | |||
192 | if (i < NR_IRQS) { | ||
193 | desc = get_irq_desc(i); | ||
194 | spin_lock_irqsave(&desc->lock, flags); | ||
195 | action = desc->action; | ||
196 | if (!action || !action->handler) | ||
197 | goto skip; | ||
198 | seq_printf(p, "%3d: ", i); | ||
199 | #ifdef CONFIG_SMP | ||
200 | for_each_online_cpu(j) | ||
201 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | ||
202 | #else | ||
203 | seq_printf(p, "%10u ", kstat_irqs(i)); | ||
204 | #endif /* CONFIG_SMP */ | ||
205 | if (desc->chip) | ||
206 | seq_printf(p, " %s ", desc->chip->typename); | ||
207 | else | ||
208 | seq_puts(p, " None "); | ||
209 | seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge "); | ||
210 | seq_printf(p, " %s", action->name); | ||
211 | for (action = action->next; action; action = action->next) | ||
212 | seq_printf(p, ", %s", action->name); | ||
213 | seq_putc(p, '\n'); | ||
214 | skip: | ||
215 | spin_unlock_irqrestore(&desc->lock, flags); | ||
216 | } else if (i == NR_IRQS) { | ||
217 | #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) | 196 | #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) |
218 | if (tau_initialized){ | 197 | if (tau_initialized){ |
219 | seq_puts(p, "TAU: "); | 198 | seq_puts(p, "TAU: "); |
@@ -223,30 +202,68 @@ skip: | |||
223 | } | 202 | } |
224 | #endif /* CONFIG_PPC32 && CONFIG_TAU_INT*/ | 203 | #endif /* CONFIG_PPC32 && CONFIG_TAU_INT*/ |
225 | seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts); | 204 | seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts); |
205 | |||
206 | return 0; | ||
226 | } | 207 | } |
208 | |||
209 | desc = irq_to_desc(i); | ||
210 | if (!desc) | ||
211 | return 0; | ||
212 | |||
213 | spin_lock_irqsave(&desc->lock, flags); | ||
214 | |||
215 | action = desc->action; | ||
216 | if (!action || !action->handler) | ||
217 | goto skip; | ||
218 | |||
219 | seq_printf(p, "%3d: ", i); | ||
220 | #ifdef CONFIG_SMP | ||
221 | for_each_online_cpu(j) | ||
222 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | ||
223 | #else | ||
224 | seq_printf(p, "%10u ", kstat_irqs(i)); | ||
225 | #endif /* CONFIG_SMP */ | ||
226 | |||
227 | if (desc->chip) | ||
228 | seq_printf(p, " %s ", desc->chip->name); | ||
229 | else | ||
230 | seq_puts(p, " None "); | ||
231 | |||
232 | seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge "); | ||
233 | seq_printf(p, " %s", action->name); | ||
234 | |||
235 | for (action = action->next; action; action = action->next) | ||
236 | seq_printf(p, ", %s", action->name); | ||
237 | seq_putc(p, '\n'); | ||
238 | |||
239 | skip: | ||
240 | spin_unlock_irqrestore(&desc->lock, flags); | ||
241 | |||
227 | return 0; | 242 | return 0; |
228 | } | 243 | } |
229 | 244 | ||
230 | #ifdef CONFIG_HOTPLUG_CPU | 245 | #ifdef CONFIG_HOTPLUG_CPU |
231 | void fixup_irqs(cpumask_t map) | 246 | void fixup_irqs(cpumask_t map) |
232 | { | 247 | { |
248 | struct irq_desc *desc; | ||
233 | unsigned int irq; | 249 | unsigned int irq; |
234 | static int warned; | 250 | static int warned; |
235 | 251 | ||
236 | for_each_irq(irq) { | 252 | for_each_irq(irq) { |
237 | cpumask_t mask; | 253 | cpumask_t mask; |
238 | 254 | ||
239 | if (irq_desc[irq].status & IRQ_PER_CPU) | 255 | desc = irq_to_desc(irq); |
256 | if (desc && desc->status & IRQ_PER_CPU) | ||
240 | continue; | 257 | continue; |
241 | 258 | ||
242 | cpumask_and(&mask, irq_desc[irq].affinity, &map); | 259 | cpumask_and(&mask, desc->affinity, &map); |
243 | if (any_online_cpu(mask) == NR_CPUS) { | 260 | if (any_online_cpu(mask) == NR_CPUS) { |
244 | printk("Breaking affinity for irq %i\n", irq); | 261 | printk("Breaking affinity for irq %i\n", irq); |
245 | mask = map; | 262 | mask = map; |
246 | } | 263 | } |
247 | if (irq_desc[irq].chip->set_affinity) | 264 | if (desc->chip->set_affinity) |
248 | irq_desc[irq].chip->set_affinity(irq, &mask); | 265 | desc->chip->set_affinity(irq, &mask); |
249 | else if (irq_desc[irq].action && !(warned++)) | 266 | else if (desc->action && !(warned++)) |
250 | printk("Cannot set affinity for irq %i\n", irq); | 267 | printk("Cannot set affinity for irq %i\n", irq); |
251 | } | 268 | } |
252 | 269 | ||
@@ -273,7 +290,7 @@ static inline void handle_one_irq(unsigned int irq) | |||
273 | return; | 290 | return; |
274 | } | 291 | } |
275 | 292 | ||
276 | desc = irq_desc + irq; | 293 | desc = irq_to_desc(irq); |
277 | saved_sp_limit = current->thread.ksp_limit; | 294 | saved_sp_limit = current->thread.ksp_limit; |
278 | 295 | ||
279 | irqtp->task = curtp->task; | 296 | irqtp->task = curtp->task; |
@@ -325,6 +342,8 @@ void do_IRQ(struct pt_regs *regs) | |||
325 | struct pt_regs *old_regs = set_irq_regs(regs); | 342 | struct pt_regs *old_regs = set_irq_regs(regs); |
326 | unsigned int irq; | 343 | unsigned int irq; |
327 | 344 | ||
345 | trace_irq_entry(regs); | ||
346 | |||
328 | irq_enter(); | 347 | irq_enter(); |
329 | 348 | ||
330 | check_stack_overflow(); | 349 | check_stack_overflow(); |
@@ -348,6 +367,8 @@ void do_IRQ(struct pt_regs *regs) | |||
348 | timer_interrupt(regs); | 367 | timer_interrupt(regs); |
349 | } | 368 | } |
350 | #endif | 369 | #endif |
370 | |||
371 | trace_irq_exit(regs); | ||
351 | } | 372 | } |
352 | 373 | ||
353 | void __init init_IRQ(void) | 374 | void __init init_IRQ(void) |
@@ -535,7 +556,7 @@ struct irq_host *irq_alloc_host(struct device_node *of_node, | |||
535 | smp_wmb(); | 556 | smp_wmb(); |
536 | 557 | ||
537 | /* Clear norequest flags */ | 558 | /* Clear norequest flags */ |
538 | get_irq_desc(i)->status &= ~IRQ_NOREQUEST; | 559 | irq_to_desc(i)->status &= ~IRQ_NOREQUEST; |
539 | 560 | ||
540 | /* Legacy flags are left to default at this point, | 561 | /* Legacy flags are left to default at this point, |
541 | * one can then use irq_create_mapping() to | 562 | * one can then use irq_create_mapping() to |
@@ -601,8 +622,16 @@ void irq_set_virq_count(unsigned int count) | |||
601 | static int irq_setup_virq(struct irq_host *host, unsigned int virq, | 622 | static int irq_setup_virq(struct irq_host *host, unsigned int virq, |
602 | irq_hw_number_t hwirq) | 623 | irq_hw_number_t hwirq) |
603 | { | 624 | { |
625 | struct irq_desc *desc; | ||
626 | |||
627 | desc = irq_to_desc_alloc_node(virq, 0); | ||
628 | if (!desc) { | ||
629 | pr_debug("irq: -> allocating desc failed\n"); | ||
630 | goto error; | ||
631 | } | ||
632 | |||
604 | /* Clear IRQ_NOREQUEST flag */ | 633 | /* Clear IRQ_NOREQUEST flag */ |
605 | get_irq_desc(virq)->status &= ~IRQ_NOREQUEST; | 634 | desc->status &= ~IRQ_NOREQUEST; |
606 | 635 | ||
607 | /* map it */ | 636 | /* map it */ |
608 | smp_wmb(); | 637 | smp_wmb(); |
@@ -611,11 +640,14 @@ static int irq_setup_virq(struct irq_host *host, unsigned int virq, | |||
611 | 640 | ||
612 | if (host->ops->map(host, virq, hwirq)) { | 641 | if (host->ops->map(host, virq, hwirq)) { |
613 | pr_debug("irq: -> mapping failed, freeing\n"); | 642 | pr_debug("irq: -> mapping failed, freeing\n"); |
614 | irq_free_virt(virq, 1); | 643 | goto error; |
615 | return -1; | ||
616 | } | 644 | } |
617 | 645 | ||
618 | return 0; | 646 | return 0; |
647 | |||
648 | error: | ||
649 | irq_free_virt(virq, 1); | ||
650 | return -1; | ||
619 | } | 651 | } |
620 | 652 | ||
621 | unsigned int irq_create_direct_mapping(struct irq_host *host) | 653 | unsigned int irq_create_direct_mapping(struct irq_host *host) |
@@ -699,7 +731,7 @@ unsigned int irq_create_mapping(struct irq_host *host, | |||
699 | EXPORT_SYMBOL_GPL(irq_create_mapping); | 731 | EXPORT_SYMBOL_GPL(irq_create_mapping); |
700 | 732 | ||
701 | unsigned int irq_create_of_mapping(struct device_node *controller, | 733 | unsigned int irq_create_of_mapping(struct device_node *controller, |
702 | u32 *intspec, unsigned int intsize) | 734 | const u32 *intspec, unsigned int intsize) |
703 | { | 735 | { |
704 | struct irq_host *host; | 736 | struct irq_host *host; |
705 | irq_hw_number_t hwirq; | 737 | irq_hw_number_t hwirq; |
@@ -732,7 +764,7 @@ unsigned int irq_create_of_mapping(struct device_node *controller, | |||
732 | 764 | ||
733 | /* Set type if specified and different than the current one */ | 765 | /* Set type if specified and different than the current one */ |
734 | if (type != IRQ_TYPE_NONE && | 766 | if (type != IRQ_TYPE_NONE && |
735 | type != (get_irq_desc(virq)->status & IRQF_TRIGGER_MASK)) | 767 | type != (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK)) |
736 | set_irq_type(virq, type); | 768 | set_irq_type(virq, type); |
737 | return virq; | 769 | return virq; |
738 | } | 770 | } |
@@ -804,7 +836,7 @@ void irq_dispose_mapping(unsigned int virq) | |||
804 | irq_map[virq].hwirq = host->inval_irq; | 836 | irq_map[virq].hwirq = host->inval_irq; |
805 | 837 | ||
806 | /* Set some flags */ | 838 | /* Set some flags */ |
807 | get_irq_desc(virq)->status |= IRQ_NOREQUEST; | 839 | irq_to_desc(virq)->status |= IRQ_NOREQUEST; |
808 | 840 | ||
809 | /* Free it */ | 841 | /* Free it */ |
810 | irq_free_virt(virq, 1); | 842 | irq_free_virt(virq, 1); |
@@ -996,12 +1028,24 @@ void irq_free_virt(unsigned int virq, unsigned int count) | |||
996 | spin_unlock_irqrestore(&irq_big_lock, flags); | 1028 | spin_unlock_irqrestore(&irq_big_lock, flags); |
997 | } | 1029 | } |
998 | 1030 | ||
999 | void irq_early_init(void) | 1031 | int arch_early_irq_init(void) |
1000 | { | 1032 | { |
1001 | unsigned int i; | 1033 | struct irq_desc *desc; |
1034 | int i; | ||
1035 | |||
1036 | for (i = 0; i < NR_IRQS; i++) { | ||
1037 | desc = irq_to_desc(i); | ||
1038 | if (desc) | ||
1039 | desc->status |= IRQ_NOREQUEST; | ||
1040 | } | ||
1041 | |||
1042 | return 0; | ||
1043 | } | ||
1002 | 1044 | ||
1003 | for (i = 0; i < NR_IRQS; i++) | 1045 | int arch_init_chip_data(struct irq_desc *desc, int node) |
1004 | get_irq_desc(i)->status |= IRQ_NOREQUEST; | 1046 | { |
1047 | desc->status |= IRQ_NOREQUEST; | ||
1048 | return 0; | ||
1005 | } | 1049 | } |
1006 | 1050 | ||
1007 | /* We need to create the radix trees late */ | 1051 | /* We need to create the radix trees late */ |
@@ -1063,16 +1107,19 @@ static int virq_debug_show(struct seq_file *m, void *private) | |||
1063 | seq_printf(m, "%-5s %-7s %-15s %s\n", "virq", "hwirq", | 1107 | seq_printf(m, "%-5s %-7s %-15s %s\n", "virq", "hwirq", |
1064 | "chip name", "host name"); | 1108 | "chip name", "host name"); |
1065 | 1109 | ||
1066 | for (i = 1; i < NR_IRQS; i++) { | 1110 | for (i = 1; i < nr_irqs; i++) { |
1067 | desc = get_irq_desc(i); | 1111 | desc = irq_to_desc(i); |
1112 | if (!desc) | ||
1113 | continue; | ||
1114 | |||
1068 | spin_lock_irqsave(&desc->lock, flags); | 1115 | spin_lock_irqsave(&desc->lock, flags); |
1069 | 1116 | ||
1070 | if (desc->action && desc->action->handler) { | 1117 | if (desc->action && desc->action->handler) { |
1071 | seq_printf(m, "%5d ", i); | 1118 | seq_printf(m, "%5d ", i); |
1072 | seq_printf(m, "0x%05lx ", virq_to_hw(i)); | 1119 | seq_printf(m, "0x%05lx ", virq_to_hw(i)); |
1073 | 1120 | ||
1074 | if (desc->chip && desc->chip->typename) | 1121 | if (desc->chip && desc->chip->name) |
1075 | p = desc->chip->typename; | 1122 | p = desc->chip->name; |
1076 | else | 1123 | else |
1077 | p = none; | 1124 | p = none; |
1078 | seq_printf(m, "%-15s ", p); | 1125 | seq_printf(m, "%-15s ", p); |
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index fe8f71dd0b3f..b6bd1eaa1c24 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c | |||
@@ -52,7 +52,7 @@ static struct hard_trap_info | |||
52 | { 0x2030, 0x08 /* SIGFPE */ }, /* spe fp data */ | 52 | { 0x2030, 0x08 /* SIGFPE */ }, /* spe fp data */ |
53 | { 0x2040, 0x08 /* SIGFPE */ }, /* spe fp data */ | 53 | { 0x2040, 0x08 /* SIGFPE */ }, /* spe fp data */ |
54 | { 0x2050, 0x08 /* SIGFPE */ }, /* spe fp round */ | 54 | { 0x2050, 0x08 /* SIGFPE */ }, /* spe fp round */ |
55 | { 0x2060, 0x0e /* SIGILL */ }, /* performace monitor */ | 55 | { 0x2060, 0x0e /* SIGILL */ }, /* performance monitor */ |
56 | { 0x2900, 0x08 /* SIGFPE */ }, /* apu unavailable */ | 56 | { 0x2900, 0x08 /* SIGFPE */ }, /* apu unavailable */ |
57 | { 0x3100, 0x0e /* SIGALRM */ }, /* fixed interval timer */ | 57 | { 0x3100, 0x0e /* SIGALRM */ }, /* fixed interval timer */ |
58 | { 0x3200, 0x02 /* SIGINT */ }, /* watchdog */ | 58 | { 0x3200, 0x02 /* SIGINT */ }, /* watchdog */ |
@@ -282,12 +282,6 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) | |||
282 | { | 282 | { |
283 | unsigned long *ptr = gdb_regs; | 283 | unsigned long *ptr = gdb_regs; |
284 | int reg; | 284 | int reg; |
285 | #ifdef CONFIG_SPE | ||
286 | union { | ||
287 | u32 v32[2]; | ||
288 | u64 v64; | ||
289 | } acc; | ||
290 | #endif | ||
291 | 285 | ||
292 | for (reg = 0; reg < 32; reg++) | 286 | for (reg = 0; reg < 32; reg++) |
293 | UNPACK64(regs->gpr[reg], ptr); | 287 | UNPACK64(regs->gpr[reg], ptr); |
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index ed0ac4e4b8d8..79a00bb9c64c 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c | |||
@@ -781,9 +781,9 @@ static int __init lparcfg_init(void) | |||
781 | !firmware_has_feature(FW_FEATURE_ISERIES)) | 781 | !firmware_has_feature(FW_FEATURE_ISERIES)) |
782 | mode |= S_IWUSR; | 782 | mode |= S_IWUSR; |
783 | 783 | ||
784 | ent = proc_create("ppc64/lparcfg", mode, NULL, &lparcfg_fops); | 784 | ent = proc_create("powerpc/lparcfg", mode, NULL, &lparcfg_fops); |
785 | if (!ent) { | 785 | if (!ent) { |
786 | printk(KERN_ERR "Failed to create ppc64/lparcfg\n"); | 786 | printk(KERN_ERR "Failed to create powerpc/lparcfg\n"); |
787 | return -EIO; | 787 | return -EIO; |
788 | } | 788 | } |
789 | 789 | ||
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index da9c0c4c10f3..8649f536f8df 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S | |||
@@ -502,15 +502,7 @@ _GLOBAL(clear_pages) | |||
502 | li r0,PAGE_SIZE/L1_CACHE_BYTES | 502 | li r0,PAGE_SIZE/L1_CACHE_BYTES |
503 | slw r0,r0,r4 | 503 | slw r0,r0,r4 |
504 | mtctr r0 | 504 | mtctr r0 |
505 | #ifdef CONFIG_8xx | ||
506 | li r4, 0 | ||
507 | 1: stw r4, 0(r3) | ||
508 | stw r4, 4(r3) | ||
509 | stw r4, 8(r3) | ||
510 | stw r4, 12(r3) | ||
511 | #else | ||
512 | 1: dcbz 0,r3 | 505 | 1: dcbz 0,r3 |
513 | #endif | ||
514 | addi r3,r3,L1_CACHE_BYTES | 506 | addi r3,r3,L1_CACHE_BYTES |
515 | bdnz 1b | 507 | bdnz 1b |
516 | blr | 508 | blr |
@@ -535,15 +527,6 @@ _GLOBAL(copy_page) | |||
535 | addi r3,r3,-4 | 527 | addi r3,r3,-4 |
536 | addi r4,r4,-4 | 528 | addi r4,r4,-4 |
537 | 529 | ||
538 | #ifdef CONFIG_8xx | ||
539 | /* don't use prefetch on 8xx */ | ||
540 | li r0,4096/L1_CACHE_BYTES | ||
541 | mtctr r0 | ||
542 | 1: COPY_16_BYTES | ||
543 | bdnz 1b | ||
544 | blr | ||
545 | |||
546 | #else /* not 8xx, we can prefetch */ | ||
547 | li r5,4 | 530 | li r5,4 |
548 | 531 | ||
549 | #if MAX_COPY_PREFETCH > 1 | 532 | #if MAX_COPY_PREFETCH > 1 |
@@ -584,7 +567,6 @@ _GLOBAL(copy_page) | |||
584 | li r0,MAX_COPY_PREFETCH | 567 | li r0,MAX_COPY_PREFETCH |
585 | li r11,4 | 568 | li r11,4 |
586 | b 2b | 569 | b 2b |
587 | #endif /* CONFIG_8xx */ | ||
588 | 570 | ||
589 | /* | 571 | /* |
590 | * void atomic_clear_mask(atomic_t mask, atomic_t *addr) | 572 | * void atomic_clear_mask(atomic_t mask, atomic_t *addr) |
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index 0ed31f220482..ad461e735aec 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c | |||
@@ -139,8 +139,8 @@ out: | |||
139 | 139 | ||
140 | } | 140 | } |
141 | 141 | ||
142 | static int dev_nvram_ioctl(struct inode *inode, struct file *file, | 142 | static long dev_nvram_ioctl(struct file *file, unsigned int cmd, |
143 | unsigned int cmd, unsigned long arg) | 143 | unsigned long arg) |
144 | { | 144 | { |
145 | switch(cmd) { | 145 | switch(cmd) { |
146 | #ifdef CONFIG_PPC_PMAC | 146 | #ifdef CONFIG_PPC_PMAC |
@@ -169,11 +169,11 @@ static int dev_nvram_ioctl(struct inode *inode, struct file *file, | |||
169 | } | 169 | } |
170 | 170 | ||
171 | const struct file_operations nvram_fops = { | 171 | const struct file_operations nvram_fops = { |
172 | .owner = THIS_MODULE, | 172 | .owner = THIS_MODULE, |
173 | .llseek = dev_nvram_llseek, | 173 | .llseek = dev_nvram_llseek, |
174 | .read = dev_nvram_read, | 174 | .read = dev_nvram_read, |
175 | .write = dev_nvram_write, | 175 | .write = dev_nvram_write, |
176 | .ioctl = dev_nvram_ioctl, | 176 | .unlocked_ioctl = dev_nvram_ioctl, |
177 | }; | 177 | }; |
178 | 178 | ||
179 | static struct miscdevice nvram_dev = { | 179 | static struct miscdevice nvram_dev = { |
@@ -184,7 +184,7 @@ static struct miscdevice nvram_dev = { | |||
184 | 184 | ||
185 | 185 | ||
186 | #ifdef DEBUG_NVRAM | 186 | #ifdef DEBUG_NVRAM |
187 | static void nvram_print_partitions(char * label) | 187 | static void __init nvram_print_partitions(char * label) |
188 | { | 188 | { |
189 | struct list_head * p; | 189 | struct list_head * p; |
190 | struct nvram_partition * tmp_part; | 190 | struct nvram_partition * tmp_part; |
@@ -202,7 +202,7 @@ static void nvram_print_partitions(char * label) | |||
202 | #endif | 202 | #endif |
203 | 203 | ||
204 | 204 | ||
205 | static int nvram_write_header(struct nvram_partition * part) | 205 | static int __init nvram_write_header(struct nvram_partition * part) |
206 | { | 206 | { |
207 | loff_t tmp_index; | 207 | loff_t tmp_index; |
208 | int rc; | 208 | int rc; |
@@ -214,7 +214,7 @@ static int nvram_write_header(struct nvram_partition * part) | |||
214 | } | 214 | } |
215 | 215 | ||
216 | 216 | ||
217 | static unsigned char nvram_checksum(struct nvram_header *p) | 217 | static unsigned char __init nvram_checksum(struct nvram_header *p) |
218 | { | 218 | { |
219 | unsigned int c_sum, c_sum2; | 219 | unsigned int c_sum, c_sum2; |
220 | unsigned short *sp = (unsigned short *)p->name; /* assume 6 shorts */ | 220 | unsigned short *sp = (unsigned short *)p->name; /* assume 6 shorts */ |
@@ -228,32 +228,7 @@ static unsigned char nvram_checksum(struct nvram_header *p) | |||
228 | return c_sum; | 228 | return c_sum; |
229 | } | 229 | } |
230 | 230 | ||
231 | 231 | static int __init nvram_remove_os_partition(void) | |
232 | /* | ||
233 | * Find an nvram partition, sig can be 0 for any | ||
234 | * partition or name can be NULL for any name, else | ||
235 | * tries to match both | ||
236 | */ | ||
237 | struct nvram_partition *nvram_find_partition(int sig, const char *name) | ||
238 | { | ||
239 | struct nvram_partition * part; | ||
240 | struct list_head * p; | ||
241 | |||
242 | list_for_each(p, &nvram_part->partition) { | ||
243 | part = list_entry(p, struct nvram_partition, partition); | ||
244 | |||
245 | if (sig && part->header.signature != sig) | ||
246 | continue; | ||
247 | if (name && 0 != strncmp(name, part->header.name, 12)) | ||
248 | continue; | ||
249 | return part; | ||
250 | } | ||
251 | return NULL; | ||
252 | } | ||
253 | EXPORT_SYMBOL(nvram_find_partition); | ||
254 | |||
255 | |||
256 | static int nvram_remove_os_partition(void) | ||
257 | { | 232 | { |
258 | struct list_head *i; | 233 | struct list_head *i; |
259 | struct list_head *j; | 234 | struct list_head *j; |
@@ -319,7 +294,7 @@ static int nvram_remove_os_partition(void) | |||
319 | * Will create a partition starting at the first free | 294 | * Will create a partition starting at the first free |
320 | * space found if space has enough room. | 295 | * space found if space has enough room. |
321 | */ | 296 | */ |
322 | static int nvram_create_os_partition(void) | 297 | static int __init nvram_create_os_partition(void) |
323 | { | 298 | { |
324 | struct nvram_partition *part; | 299 | struct nvram_partition *part; |
325 | struct nvram_partition *new_part; | 300 | struct nvram_partition *new_part; |
@@ -422,7 +397,7 @@ static int nvram_create_os_partition(void) | |||
422 | * 5.) If the max chunk cannot be allocated then try finding a chunk | 397 | * 5.) If the max chunk cannot be allocated then try finding a chunk |
423 | * that will satisfy the minum needed (NVRAM_MIN_REQ). | 398 | * that will satisfy the minum needed (NVRAM_MIN_REQ). |
424 | */ | 399 | */ |
425 | static int nvram_setup_partition(void) | 400 | static int __init nvram_setup_partition(void) |
426 | { | 401 | { |
427 | struct list_head * p; | 402 | struct list_head * p; |
428 | struct nvram_partition * part; | 403 | struct nvram_partition * part; |
@@ -480,7 +455,7 @@ static int nvram_setup_partition(void) | |||
480 | } | 455 | } |
481 | 456 | ||
482 | 457 | ||
483 | static int nvram_scan_partitions(void) | 458 | static int __init nvram_scan_partitions(void) |
484 | { | 459 | { |
485 | loff_t cur_index = 0; | 460 | loff_t cur_index = 0; |
486 | struct nvram_header phead; | 461 | struct nvram_header phead; |
@@ -706,6 +681,9 @@ int nvram_clear_error_log(void) | |||
706 | int clear_word = ERR_FLAG_ALREADY_LOGGED; | 681 | int clear_word = ERR_FLAG_ALREADY_LOGGED; |
707 | int rc; | 682 | int rc; |
708 | 683 | ||
684 | if (nvram_error_log_index == -1) | ||
685 | return -1; | ||
686 | |||
709 | tmp_index = nvram_error_log_index; | 687 | tmp_index = nvram_error_log_index; |
710 | 688 | ||
711 | rc = ppc_md.nvram_write((char *)&clear_word, sizeof(int), &tmp_index); | 689 | rc = ppc_md.nvram_write((char *)&clear_word, sizeof(int), &tmp_index); |
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index bb8209e34931..e8dfdbd9327a 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c | |||
@@ -1190,7 +1190,7 @@ EXPORT_SYMBOL(pcibios_align_resource); | |||
1190 | * Reparent resource children of pr that conflict with res | 1190 | * Reparent resource children of pr that conflict with res |
1191 | * under res, and make res replace those children. | 1191 | * under res, and make res replace those children. |
1192 | */ | 1192 | */ |
1193 | static int __init reparent_resources(struct resource *parent, | 1193 | static int reparent_resources(struct resource *parent, |
1194 | struct resource *res) | 1194 | struct resource *res) |
1195 | { | 1195 | { |
1196 | struct resource *p, **pp; | 1196 | struct resource *p, **pp; |
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index ba949a2c93ac..ccf56ac92de5 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c | |||
@@ -97,7 +97,9 @@ int pcibios_unmap_io_space(struct pci_bus *bus) | |||
97 | * to do an appropriate TLB flush here too | 97 | * to do an appropriate TLB flush here too |
98 | */ | 98 | */ |
99 | if (bus->self) { | 99 | if (bus->self) { |
100 | #ifdef CONFIG_PPC_STD_MMU_64 | ||
100 | struct resource *res = bus->resource[0]; | 101 | struct resource *res = bus->resource[0]; |
102 | #endif | ||
101 | 103 | ||
102 | pr_debug("IO unmapping for PCI-PCI bridge %s\n", | 104 | pr_debug("IO unmapping for PCI-PCI bridge %s\n", |
103 | pci_name(bus->self)); | 105 | pci_name(bus->self)); |
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c index fe59c44f9b5b..a3c11cac3d71 100644 --- a/arch/powerpc/kernel/perf_callchain.c +++ b/arch/powerpc/kernel/perf_callchain.c | |||
@@ -119,13 +119,6 @@ static void perf_callchain_kernel(struct pt_regs *regs, | |||
119 | } | 119 | } |
120 | 120 | ||
121 | #ifdef CONFIG_PPC64 | 121 | #ifdef CONFIG_PPC64 |
122 | |||
123 | #ifdef CONFIG_HUGETLB_PAGE | ||
124 | #define is_huge_psize(pagesize) (HPAGE_SHIFT && mmu_huge_psizes[pagesize]) | ||
125 | #else | ||
126 | #define is_huge_psize(pagesize) 0 | ||
127 | #endif | ||
128 | |||
129 | /* | 122 | /* |
130 | * On 64-bit we don't want to invoke hash_page on user addresses from | 123 | * On 64-bit we don't want to invoke hash_page on user addresses from |
131 | * interrupt context, so if the access faults, we read the page tables | 124 | * interrupt context, so if the access faults, we read the page tables |
@@ -135,7 +128,7 @@ static int read_user_stack_slow(void __user *ptr, void *ret, int nb) | |||
135 | { | 128 | { |
136 | pgd_t *pgdir; | 129 | pgd_t *pgdir; |
137 | pte_t *ptep, pte; | 130 | pte_t *ptep, pte; |
138 | int pagesize; | 131 | unsigned shift; |
139 | unsigned long addr = (unsigned long) ptr; | 132 | unsigned long addr = (unsigned long) ptr; |
140 | unsigned long offset; | 133 | unsigned long offset; |
141 | unsigned long pfn; | 134 | unsigned long pfn; |
@@ -145,17 +138,14 @@ static int read_user_stack_slow(void __user *ptr, void *ret, int nb) | |||
145 | if (!pgdir) | 138 | if (!pgdir) |
146 | return -EFAULT; | 139 | return -EFAULT; |
147 | 140 | ||
148 | pagesize = get_slice_psize(current->mm, addr); | 141 | ptep = find_linux_pte_or_hugepte(pgdir, addr, &shift); |
142 | if (!shift) | ||
143 | shift = PAGE_SHIFT; | ||
149 | 144 | ||
150 | /* align address to page boundary */ | 145 | /* align address to page boundary */ |
151 | offset = addr & ((1ul << mmu_psize_defs[pagesize].shift) - 1); | 146 | offset = addr & ((1UL << shift) - 1); |
152 | addr -= offset; | 147 | addr -= offset; |
153 | 148 | ||
154 | if (is_huge_psize(pagesize)) | ||
155 | ptep = huge_pte_offset(current->mm, addr); | ||
156 | else | ||
157 | ptep = find_linux_pte(pgdir, addr); | ||
158 | |||
159 | if (ptep == NULL) | 149 | if (ptep == NULL) |
160 | return -EFAULT; | 150 | return -EFAULT; |
161 | pte = *ptep; | 151 | pte = *ptep; |
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index bbcbae183e92..1eb85fbf53a5 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c | |||
@@ -116,20 +116,23 @@ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) | |||
116 | static inline u32 perf_get_misc_flags(struct pt_regs *regs) | 116 | static inline u32 perf_get_misc_flags(struct pt_regs *regs) |
117 | { | 117 | { |
118 | unsigned long mmcra = regs->dsisr; | 118 | unsigned long mmcra = regs->dsisr; |
119 | unsigned long sihv = MMCRA_SIHV; | ||
120 | unsigned long sipr = MMCRA_SIPR; | ||
119 | 121 | ||
120 | if (TRAP(regs) != 0xf00) | 122 | if (TRAP(regs) != 0xf00) |
121 | return 0; /* not a PMU interrupt */ | 123 | return 0; /* not a PMU interrupt */ |
122 | 124 | ||
123 | if (ppmu->flags & PPMU_ALT_SIPR) { | 125 | if (ppmu->flags & PPMU_ALT_SIPR) { |
124 | if (mmcra & POWER6_MMCRA_SIHV) | 126 | sihv = POWER6_MMCRA_SIHV; |
125 | return PERF_RECORD_MISC_HYPERVISOR; | 127 | sipr = POWER6_MMCRA_SIPR; |
126 | return (mmcra & POWER6_MMCRA_SIPR) ? | ||
127 | PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL; | ||
128 | } | 128 | } |
129 | if (mmcra & MMCRA_SIHV) | 129 | |
130 | /* PR has priority over HV, so order below is important */ | ||
131 | if (mmcra & sipr) | ||
132 | return PERF_RECORD_MISC_USER; | ||
133 | if ((mmcra & sihv) && (freeze_events_kernel != MMCR0_FCHV)) | ||
130 | return PERF_RECORD_MISC_HYPERVISOR; | 134 | return PERF_RECORD_MISC_HYPERVISOR; |
131 | return (mmcra & MMCRA_SIPR) ? PERF_RECORD_MISC_USER : | 135 | return PERF_RECORD_MISC_KERNEL; |
132 | PERF_RECORD_MISC_KERNEL; | ||
133 | } | 136 | } |
134 | 137 | ||
135 | /* | 138 | /* |
@@ -1162,7 +1165,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
1162 | */ | 1165 | */ |
1163 | if (record) { | 1166 | if (record) { |
1164 | struct perf_sample_data data = { | 1167 | struct perf_sample_data data = { |
1165 | .addr = 0, | 1168 | .addr = ~0ULL, |
1166 | .period = event->hw.last_period, | 1169 | .period = event->hw.last_period, |
1167 | }; | 1170 | }; |
1168 | 1171 | ||
diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c index 0f4c1c73a6ad..199de527d411 100644 --- a/arch/powerpc/kernel/power5+-pmu.c +++ b/arch/powerpc/kernel/power5+-pmu.c | |||
@@ -73,10 +73,6 @@ | |||
73 | #define MMCR1_PMCSEL_MSK 0x7f | 73 | #define MMCR1_PMCSEL_MSK 0x7f |
74 | 74 | ||
75 | /* | 75 | /* |
76 | * Bits in MMCRA | ||
77 | */ | ||
78 | |||
79 | /* | ||
80 | * Layout of constraint bits: | 76 | * Layout of constraint bits: |
81 | * 6666555555555544444444443333333333222222222211111111110000000000 | 77 | * 6666555555555544444444443333333333222222222211111111110000000000 |
82 | * 3210987654321098765432109876543210987654321098765432109876543210 | 78 | * 3210987654321098765432109876543210987654321098765432109876543210 |
diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c index c351b3a57fbb..98b6a729a9dd 100644 --- a/arch/powerpc/kernel/power5-pmu.c +++ b/arch/powerpc/kernel/power5-pmu.c | |||
@@ -73,10 +73,6 @@ | |||
73 | #define MMCR1_PMCSEL_MSK 0x7f | 73 | #define MMCR1_PMCSEL_MSK 0x7f |
74 | 74 | ||
75 | /* | 75 | /* |
76 | * Bits in MMCRA | ||
77 | */ | ||
78 | |||
79 | /* | ||
80 | * Layout of constraint bits: | 76 | * Layout of constraint bits: |
81 | * 6666555555555544444444443333333333222222222211111111110000000000 | 77 | * 6666555555555544444444443333333333222222222211111111110000000000 |
82 | * 3210987654321098765432109876543210987654321098765432109876543210 | 78 | * 3210987654321098765432109876543210987654321098765432109876543210 |
@@ -390,7 +386,7 @@ static int power5_compute_mmcr(u64 event[], int n_ev, | |||
390 | unsigned int hwc[], unsigned long mmcr[]) | 386 | unsigned int hwc[], unsigned long mmcr[]) |
391 | { | 387 | { |
392 | unsigned long mmcr1 = 0; | 388 | unsigned long mmcr1 = 0; |
393 | unsigned long mmcra = 0; | 389 | unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS; |
394 | unsigned int pmc, unit, byte, psel; | 390 | unsigned int pmc, unit, byte, psel; |
395 | unsigned int ttm, grp; | 391 | unsigned int ttm, grp; |
396 | int i, isbus, bit, grsel; | 392 | int i, isbus, bit, grsel; |
diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c index ca399ba5034c..84a607bda8fb 100644 --- a/arch/powerpc/kernel/power6-pmu.c +++ b/arch/powerpc/kernel/power6-pmu.c | |||
@@ -178,7 +178,7 @@ static int p6_compute_mmcr(u64 event[], int n_ev, | |||
178 | unsigned int hwc[], unsigned long mmcr[]) | 178 | unsigned int hwc[], unsigned long mmcr[]) |
179 | { | 179 | { |
180 | unsigned long mmcr1 = 0; | 180 | unsigned long mmcr1 = 0; |
181 | unsigned long mmcra = 0; | 181 | unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS; |
182 | int i; | 182 | int i; |
183 | unsigned int pmc, ev, b, u, s, psel; | 183 | unsigned int pmc, ev, b, u, s, psel; |
184 | unsigned int ttmset = 0; | 184 | unsigned int ttmset = 0; |
diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c index 28a4daacdc02..852f7b7f6b40 100644 --- a/arch/powerpc/kernel/power7-pmu.c +++ b/arch/powerpc/kernel/power7-pmu.c | |||
@@ -51,10 +51,6 @@ | |||
51 | #define MMCR1_PMCSEL_MSK 0xff | 51 | #define MMCR1_PMCSEL_MSK 0xff |
52 | 52 | ||
53 | /* | 53 | /* |
54 | * Bits in MMCRA | ||
55 | */ | ||
56 | |||
57 | /* | ||
58 | * Layout of constraint bits: | 54 | * Layout of constraint bits: |
59 | * 6666555555555544444444443333333333222222222211111111110000000000 | 55 | * 6666555555555544444444443333333333222222222211111111110000000000 |
60 | * 3210987654321098765432109876543210987654321098765432109876543210 | 56 | * 3210987654321098765432109876543210987654321098765432109876543210 |
@@ -230,7 +226,7 @@ static int power7_compute_mmcr(u64 event[], int n_ev, | |||
230 | unsigned int hwc[], unsigned long mmcr[]) | 226 | unsigned int hwc[], unsigned long mmcr[]) |
231 | { | 227 | { |
232 | unsigned long mmcr1 = 0; | 228 | unsigned long mmcr1 = 0; |
233 | unsigned long mmcra = 0; | 229 | unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS; |
234 | unsigned int pmc, unit, combine, l2sel, psel; | 230 | unsigned int pmc, unit, combine, l2sel, psel; |
235 | unsigned int pmc_inuse = 0; | 231 | unsigned int pmc_inuse = 0; |
236 | int i; | 232 | int i; |
diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c index 479574413a93..8eff48e20dba 100644 --- a/arch/powerpc/kernel/ppc970-pmu.c +++ b/arch/powerpc/kernel/ppc970-pmu.c | |||
@@ -84,10 +84,6 @@ static short mmcr1_adder_bits[8] = { | |||
84 | }; | 84 | }; |
85 | 85 | ||
86 | /* | 86 | /* |
87 | * Bits in MMCRA | ||
88 | */ | ||
89 | |||
90 | /* | ||
91 | * Layout of constraint bits: | 87 | * Layout of constraint bits: |
92 | * 6666555555555544444444443333333333222222222211111111110000000000 | 88 | * 6666555555555544444444443333333333222222222211111111110000000000 |
93 | * 3210987654321098765432109876543210987654321098765432109876543210 | 89 | * 3210987654321098765432109876543210987654321098765432109876543210 |
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index c8b27bb4dbde..425451453e96 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c | |||
@@ -96,8 +96,6 @@ EXPORT_SYMBOL(copy_4K_page); | |||
96 | EXPORT_SYMBOL(isa_io_base); | 96 | EXPORT_SYMBOL(isa_io_base); |
97 | EXPORT_SYMBOL(isa_mem_base); | 97 | EXPORT_SYMBOL(isa_mem_base); |
98 | EXPORT_SYMBOL(pci_dram_offset); | 98 | EXPORT_SYMBOL(pci_dram_offset); |
99 | EXPORT_SYMBOL(pci_alloc_consistent); | ||
100 | EXPORT_SYMBOL(pci_free_consistent); | ||
101 | #endif /* CONFIG_PCI */ | 99 | #endif /* CONFIG_PCI */ |
102 | 100 | ||
103 | EXPORT_SYMBOL(start_thread); | 101 | EXPORT_SYMBOL(start_thread); |
@@ -162,7 +160,6 @@ EXPORT_SYMBOL(screen_info); | |||
162 | 160 | ||
163 | #ifdef CONFIG_PPC32 | 161 | #ifdef CONFIG_PPC32 |
164 | EXPORT_SYMBOL(timer_interrupt); | 162 | EXPORT_SYMBOL(timer_interrupt); |
165 | EXPORT_SYMBOL(irq_desc); | ||
166 | EXPORT_SYMBOL(tb_ticks_per_jiffy); | 163 | EXPORT_SYMBOL(tb_ticks_per_jiffy); |
167 | EXPORT_SYMBOL(cacheable_memcpy); | 164 | EXPORT_SYMBOL(cacheable_memcpy); |
168 | EXPORT_SYMBOL(cacheable_memzero); | 165 | EXPORT_SYMBOL(cacheable_memzero); |
diff --git a/arch/powerpc/kernel/proc_ppc64.c b/arch/powerpc/kernel/proc_powerpc.c index c647ddef40dc..1ed3b8d7981e 100644 --- a/arch/powerpc/kernel/proc_ppc64.c +++ b/arch/powerpc/kernel/proc_powerpc.c | |||
@@ -28,55 +28,7 @@ | |||
28 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
29 | #include <asm/prom.h> | 29 | #include <asm/prom.h> |
30 | 30 | ||
31 | static loff_t page_map_seek( struct file *file, loff_t off, int whence); | 31 | #ifdef CONFIG_PPC64 |
32 | static ssize_t page_map_read( struct file *file, char __user *buf, size_t nbytes, | ||
33 | loff_t *ppos); | ||
34 | static int page_map_mmap( struct file *file, struct vm_area_struct *vma ); | ||
35 | |||
36 | static const struct file_operations page_map_fops = { | ||
37 | .llseek = page_map_seek, | ||
38 | .read = page_map_read, | ||
39 | .mmap = page_map_mmap | ||
40 | }; | ||
41 | |||
42 | /* | ||
43 | * Create the ppc64 and ppc64/rtas directories early. This allows us to | ||
44 | * assume that they have been previously created in drivers. | ||
45 | */ | ||
46 | static int __init proc_ppc64_create(void) | ||
47 | { | ||
48 | struct proc_dir_entry *root; | ||
49 | |||
50 | root = proc_mkdir("ppc64", NULL); | ||
51 | if (!root) | ||
52 | return 1; | ||
53 | |||
54 | if (!of_find_node_by_path("/rtas")) | ||
55 | return 0; | ||
56 | |||
57 | if (!proc_mkdir("rtas", root)) | ||
58 | return 1; | ||
59 | |||
60 | if (!proc_symlink("rtas", NULL, "ppc64/rtas")) | ||
61 | return 1; | ||
62 | |||
63 | return 0; | ||
64 | } | ||
65 | core_initcall(proc_ppc64_create); | ||
66 | |||
67 | static int __init proc_ppc64_init(void) | ||
68 | { | ||
69 | struct proc_dir_entry *pde; | ||
70 | |||
71 | pde = proc_create_data("ppc64/systemcfg", S_IFREG|S_IRUGO, NULL, | ||
72 | &page_map_fops, vdso_data); | ||
73 | if (!pde) | ||
74 | return 1; | ||
75 | pde->size = PAGE_SIZE; | ||
76 | |||
77 | return 0; | ||
78 | } | ||
79 | __initcall(proc_ppc64_init); | ||
80 | 32 | ||
81 | static loff_t page_map_seek( struct file *file, loff_t off, int whence) | 33 | static loff_t page_map_seek( struct file *file, loff_t off, int whence) |
82 | { | 34 | { |
@@ -120,3 +72,55 @@ static int page_map_mmap( struct file *file, struct vm_area_struct *vma ) | |||
120 | return 0; | 72 | return 0; |
121 | } | 73 | } |
122 | 74 | ||
75 | static const struct file_operations page_map_fops = { | ||
76 | .llseek = page_map_seek, | ||
77 | .read = page_map_read, | ||
78 | .mmap = page_map_mmap | ||
79 | }; | ||
80 | |||
81 | |||
82 | static int __init proc_ppc64_init(void) | ||
83 | { | ||
84 | struct proc_dir_entry *pde; | ||
85 | |||
86 | pde = proc_create_data("powerpc/systemcfg", S_IFREG|S_IRUGO, NULL, | ||
87 | &page_map_fops, vdso_data); | ||
88 | if (!pde) | ||
89 | return 1; | ||
90 | pde->size = PAGE_SIZE; | ||
91 | |||
92 | return 0; | ||
93 | } | ||
94 | __initcall(proc_ppc64_init); | ||
95 | |||
96 | #endif /* CONFIG_PPC64 */ | ||
97 | |||
98 | /* | ||
99 | * Create the ppc64 and ppc64/rtas directories early. This allows us to | ||
100 | * assume that they have been previously created in drivers. | ||
101 | */ | ||
102 | static int __init proc_ppc64_create(void) | ||
103 | { | ||
104 | struct proc_dir_entry *root; | ||
105 | |||
106 | root = proc_mkdir("powerpc", NULL); | ||
107 | if (!root) | ||
108 | return 1; | ||
109 | |||
110 | #ifdef CONFIG_PPC64 | ||
111 | if (!proc_symlink("ppc64", NULL, "powerpc")) | ||
112 | pr_err("Failed to create link /proc/ppc64 -> /proc/powerpc\n"); | ||
113 | #endif | ||
114 | |||
115 | if (!of_find_node_by_path("/rtas")) | ||
116 | return 0; | ||
117 | |||
118 | if (!proc_mkdir("rtas", root)) | ||
119 | return 1; | ||
120 | |||
121 | if (!proc_symlink("rtas", NULL, "powerpc/rtas")) | ||
122 | return 1; | ||
123 | |||
124 | return 0; | ||
125 | } | ||
126 | core_initcall(proc_ppc64_create); | ||
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 1168c5f440ab..c930ac38e59f 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -1016,9 +1016,13 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) | |||
1016 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 1016 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1017 | int curr_frame = current->curr_ret_stack; | 1017 | int curr_frame = current->curr_ret_stack; |
1018 | extern void return_to_handler(void); | 1018 | extern void return_to_handler(void); |
1019 | unsigned long addr = (unsigned long)return_to_handler; | 1019 | unsigned long rth = (unsigned long)return_to_handler; |
1020 | unsigned long mrth = -1; | ||
1020 | #ifdef CONFIG_PPC64 | 1021 | #ifdef CONFIG_PPC64 |
1021 | addr = *(unsigned long*)addr; | 1022 | extern void mod_return_to_handler(void); |
1023 | rth = *(unsigned long *)rth; | ||
1024 | mrth = (unsigned long)mod_return_to_handler; | ||
1025 | mrth = *(unsigned long *)mrth; | ||
1022 | #endif | 1026 | #endif |
1023 | #endif | 1027 | #endif |
1024 | 1028 | ||
@@ -1044,7 +1048,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) | |||
1044 | if (!firstframe || ip != lr) { | 1048 | if (!firstframe || ip != lr) { |
1045 | printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); | 1049 | printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); |
1046 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 1050 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1047 | if (ip == addr && curr_frame >= 0) { | 1051 | if ((ip == rth || ip == mrth) && curr_frame >= 0) { |
1048 | printk(" (%pS)", | 1052 | printk(" (%pS)", |
1049 | (void *)current->ret_stack[curr_frame].ret); | 1053 | (void *)current->ret_stack[curr_frame].ret); |
1050 | curr_frame--; | 1054 | curr_frame--; |
@@ -1168,7 +1172,7 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) | |||
1168 | unsigned long base = mm->brk; | 1172 | unsigned long base = mm->brk; |
1169 | unsigned long ret; | 1173 | unsigned long ret; |
1170 | 1174 | ||
1171 | #ifdef CONFIG_PPC64 | 1175 | #ifdef CONFIG_PPC_STD_MMU_64 |
1172 | /* | 1176 | /* |
1173 | * If we are using 1TB segments and we are allowed to randomise | 1177 | * If we are using 1TB segments and we are allowed to randomise |
1174 | * the heap, we can put it above 1TB so it is backed by a 1TB | 1178 | * the heap, we can put it above 1TB so it is backed by a 1TB |
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index d4405b95bfaa..4ec300862466 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c | |||
@@ -1317,29 +1317,6 @@ struct device_node *of_find_next_cache_node(struct device_node *np) | |||
1317 | } | 1317 | } |
1318 | 1318 | ||
1319 | /** | 1319 | /** |
1320 | * of_find_all_nodes - Get next node in global list | ||
1321 | * @prev: Previous node or NULL to start iteration | ||
1322 | * of_node_put() will be called on it | ||
1323 | * | ||
1324 | * Returns a node pointer with refcount incremented, use | ||
1325 | * of_node_put() on it when done. | ||
1326 | */ | ||
1327 | struct device_node *of_find_all_nodes(struct device_node *prev) | ||
1328 | { | ||
1329 | struct device_node *np; | ||
1330 | |||
1331 | read_lock(&devtree_lock); | ||
1332 | np = prev ? prev->allnext : allnodes; | ||
1333 | for (; np != 0; np = np->allnext) | ||
1334 | if (of_node_get(np)) | ||
1335 | break; | ||
1336 | of_node_put(prev); | ||
1337 | read_unlock(&devtree_lock); | ||
1338 | return np; | ||
1339 | } | ||
1340 | EXPORT_SYMBOL(of_find_all_nodes); | ||
1341 | |||
1342 | /** | ||
1343 | * of_node_get - Increment refcount of a node | 1320 | * of_node_get - Increment refcount of a node |
1344 | * @node: Node to inc refcount, NULL is supported to | 1321 | * @node: Node to inc refcount, NULL is supported to |
1345 | * simplify writing of callers | 1322 | * simplify writing of callers |
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c index 13011a96a977..a85117d5c9a4 100644 --- a/arch/powerpc/kernel/rtas_flash.c +++ b/arch/powerpc/kernel/rtas_flash.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * as published by the Free Software Foundation; either version | 6 | * as published by the Free Software Foundation; either version |
7 | * 2 of the License, or (at your option) any later version. | 7 | * 2 of the License, or (at your option) any later version. |
8 | * | 8 | * |
9 | * /proc/ppc64/rtas/firmware_flash interface | 9 | * /proc/powerpc/rtas/firmware_flash interface |
10 | * | 10 | * |
11 | * This file implements a firmware_flash interface to pump a firmware | 11 | * This file implements a firmware_flash interface to pump a firmware |
12 | * image into the kernel. At reboot time rtas_restart() will see the | 12 | * image into the kernel. At reboot time rtas_restart() will see the |
@@ -740,7 +740,7 @@ static int __init rtas_flash_init(void) | |||
740 | return 1; | 740 | return 1; |
741 | } | 741 | } |
742 | 742 | ||
743 | firmware_flash_pde = create_flash_pde("ppc64/rtas/" | 743 | firmware_flash_pde = create_flash_pde("powerpc/rtas/" |
744 | FIRMWARE_FLASH_NAME, | 744 | FIRMWARE_FLASH_NAME, |
745 | &rtas_flash_operations); | 745 | &rtas_flash_operations); |
746 | if (firmware_flash_pde == NULL) { | 746 | if (firmware_flash_pde == NULL) { |
@@ -754,7 +754,7 @@ static int __init rtas_flash_init(void) | |||
754 | if (rc != 0) | 754 | if (rc != 0) |
755 | goto cleanup; | 755 | goto cleanup; |
756 | 756 | ||
757 | firmware_update_pde = create_flash_pde("ppc64/rtas/" | 757 | firmware_update_pde = create_flash_pde("powerpc/rtas/" |
758 | FIRMWARE_UPDATE_NAME, | 758 | FIRMWARE_UPDATE_NAME, |
759 | &rtas_flash_operations); | 759 | &rtas_flash_operations); |
760 | if (firmware_update_pde == NULL) { | 760 | if (firmware_update_pde == NULL) { |
@@ -768,7 +768,7 @@ static int __init rtas_flash_init(void) | |||
768 | if (rc != 0) | 768 | if (rc != 0) |
769 | goto cleanup; | 769 | goto cleanup; |
770 | 770 | ||
771 | validate_pde = create_flash_pde("ppc64/rtas/" VALIDATE_FLASH_NAME, | 771 | validate_pde = create_flash_pde("powerpc/rtas/" VALIDATE_FLASH_NAME, |
772 | &validate_flash_operations); | 772 | &validate_flash_operations); |
773 | if (validate_pde == NULL) { | 773 | if (validate_pde == NULL) { |
774 | rc = -ENOMEM; | 774 | rc = -ENOMEM; |
@@ -781,7 +781,7 @@ static int __init rtas_flash_init(void) | |||
781 | if (rc != 0) | 781 | if (rc != 0) |
782 | goto cleanup; | 782 | goto cleanup; |
783 | 783 | ||
784 | manage_pde = create_flash_pde("ppc64/rtas/" MANAGE_FLASH_NAME, | 784 | manage_pde = create_flash_pde("powerpc/rtas/" MANAGE_FLASH_NAME, |
785 | &manage_flash_operations); | 785 | &manage_flash_operations); |
786 | if (manage_pde == NULL) { | 786 | if (manage_pde == NULL) { |
787 | rc = -ENOMEM; | 787 | rc = -ENOMEM; |
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c new file mode 100644 index 000000000000..2e4832ab2108 --- /dev/null +++ b/arch/powerpc/kernel/rtasd.c | |||
@@ -0,0 +1,539 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | * | ||
9 | * Communication to userspace based on kernel/printk.c | ||
10 | */ | ||
11 | |||
12 | #include <linux/types.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/poll.h> | ||
17 | #include <linux/proc_fs.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/vmalloc.h> | ||
20 | #include <linux/spinlock.h> | ||
21 | #include <linux/cpu.h> | ||
22 | #include <linux/workqueue.h> | ||
23 | |||
24 | #include <asm/uaccess.h> | ||
25 | #include <asm/io.h> | ||
26 | #include <asm/rtas.h> | ||
27 | #include <asm/prom.h> | ||
28 | #include <asm/nvram.h> | ||
29 | #include <asm/atomic.h> | ||
30 | #include <asm/machdep.h> | ||
31 | |||
32 | |||
33 | static DEFINE_SPINLOCK(rtasd_log_lock); | ||
34 | |||
35 | static DECLARE_WAIT_QUEUE_HEAD(rtas_log_wait); | ||
36 | |||
37 | static char *rtas_log_buf; | ||
38 | static unsigned long rtas_log_start; | ||
39 | static unsigned long rtas_log_size; | ||
40 | |||
41 | static int surveillance_timeout = -1; | ||
42 | |||
43 | static unsigned int rtas_error_log_max; | ||
44 | static unsigned int rtas_error_log_buffer_max; | ||
45 | |||
46 | /* RTAS service tokens */ | ||
47 | static unsigned int event_scan; | ||
48 | static unsigned int rtas_event_scan_rate; | ||
49 | |||
50 | static int full_rtas_msgs = 0; | ||
51 | |||
52 | /* Stop logging to nvram after first fatal error */ | ||
53 | static int logging_enabled; /* Until we initialize everything, | ||
54 | * make sure we don't try logging | ||
55 | * anything */ | ||
56 | static int error_log_cnt; | ||
57 | |||
58 | /* | ||
59 | * Since we use 32 bit RTAS, the physical address of this must be below | ||
60 | * 4G or else bad things happen. Allocate this in the kernel data and | ||
61 | * make it big enough. | ||
62 | */ | ||
63 | static unsigned char logdata[RTAS_ERROR_LOG_MAX]; | ||
64 | |||
65 | static char *rtas_type[] = { | ||
66 | "Unknown", "Retry", "TCE Error", "Internal Device Failure", | ||
67 | "Timeout", "Data Parity", "Address Parity", "Cache Parity", | ||
68 | "Address Invalid", "ECC Uncorrected", "ECC Corrupted", | ||
69 | }; | ||
70 | |||
71 | static char *rtas_event_type(int type) | ||
72 | { | ||
73 | if ((type > 0) && (type < 11)) | ||
74 | return rtas_type[type]; | ||
75 | |||
76 | switch (type) { | ||
77 | case RTAS_TYPE_EPOW: | ||
78 | return "EPOW"; | ||
79 | case RTAS_TYPE_PLATFORM: | ||
80 | return "Platform Error"; | ||
81 | case RTAS_TYPE_IO: | ||
82 | return "I/O Event"; | ||
83 | case RTAS_TYPE_INFO: | ||
84 | return "Platform Information Event"; | ||
85 | case RTAS_TYPE_DEALLOC: | ||
86 | return "Resource Deallocation Event"; | ||
87 | case RTAS_TYPE_DUMP: | ||
88 | return "Dump Notification Event"; | ||
89 | } | ||
90 | |||
91 | return rtas_type[0]; | ||
92 | } | ||
93 | |||
94 | /* To see this info, grep RTAS /var/log/messages and each entry | ||
95 | * will be collected together with obvious begin/end. | ||
96 | * There will be a unique identifier on the begin and end lines. | ||
97 | * This will persist across reboots. | ||
98 | * | ||
99 | * format of error logs returned from RTAS: | ||
100 | * bytes (size) : contents | ||
101 | * -------------------------------------------------------- | ||
102 | * 0-7 (8) : rtas_error_log | ||
103 | * 8-47 (40) : extended info | ||
104 | * 48-51 (4) : vendor id | ||
105 | * 52-1023 (vendor specific) : location code and debug data | ||
106 | */ | ||
107 | static void printk_log_rtas(char *buf, int len) | ||
108 | { | ||
109 | |||
110 | int i,j,n = 0; | ||
111 | int perline = 16; | ||
112 | char buffer[64]; | ||
113 | char * str = "RTAS event"; | ||
114 | |||
115 | if (full_rtas_msgs) { | ||
116 | printk(RTAS_DEBUG "%d -------- %s begin --------\n", | ||
117 | error_log_cnt, str); | ||
118 | |||
119 | /* | ||
120 | * Print perline bytes on each line, each line will start | ||
121 | * with RTAS and a changing number, so syslogd will | ||
122 | * print lines that are otherwise the same. Separate every | ||
123 | * 4 bytes with a space. | ||
124 | */ | ||
125 | for (i = 0; i < len; i++) { | ||
126 | j = i % perline; | ||
127 | if (j == 0) { | ||
128 | memset(buffer, 0, sizeof(buffer)); | ||
129 | n = sprintf(buffer, "RTAS %d:", i/perline); | ||
130 | } | ||
131 | |||
132 | if ((i % 4) == 0) | ||
133 | n += sprintf(buffer+n, " "); | ||
134 | |||
135 | n += sprintf(buffer+n, "%02x", (unsigned char)buf[i]); | ||
136 | |||
137 | if (j == (perline-1)) | ||
138 | printk(KERN_DEBUG "%s\n", buffer); | ||
139 | } | ||
140 | if ((i % perline) != 0) | ||
141 | printk(KERN_DEBUG "%s\n", buffer); | ||
142 | |||
143 | printk(RTAS_DEBUG "%d -------- %s end ----------\n", | ||
144 | error_log_cnt, str); | ||
145 | } else { | ||
146 | struct rtas_error_log *errlog = (struct rtas_error_log *)buf; | ||
147 | |||
148 | printk(RTAS_DEBUG "event: %d, Type: %s, Severity: %d\n", | ||
149 | error_log_cnt, rtas_event_type(errlog->type), | ||
150 | errlog->severity); | ||
151 | } | ||
152 | } | ||
153 | |||
154 | static int log_rtas_len(char * buf) | ||
155 | { | ||
156 | int len; | ||
157 | struct rtas_error_log *err; | ||
158 | |||
159 | /* rtas fixed header */ | ||
160 | len = 8; | ||
161 | err = (struct rtas_error_log *)buf; | ||
162 | if (err->extended_log_length) { | ||
163 | |||
164 | /* extended header */ | ||
165 | len += err->extended_log_length; | ||
166 | } | ||
167 | |||
168 | if (rtas_error_log_max == 0) | ||
169 | rtas_error_log_max = rtas_get_error_log_max(); | ||
170 | |||
171 | if (len > rtas_error_log_max) | ||
172 | len = rtas_error_log_max; | ||
173 | |||
174 | return len; | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * First write to nvram, if fatal error, that is the only | ||
179 | * place we log the info. The error will be picked up | ||
180 | * on the next reboot by rtasd. If not fatal, run the | ||
181 | * method for the type of error. Currently, only RTAS | ||
182 | * errors have methods implemented, but in the future | ||
183 | * there might be a need to store data in nvram before a | ||
184 | * call to panic(). | ||
185 | * | ||
186 | * XXX We write to nvram periodically, to indicate error has | ||
187 | * been written and sync'd, but there is a possibility | ||
188 | * that if we don't shutdown correctly, a duplicate error | ||
189 | * record will be created on next reboot. | ||
190 | */ | ||
191 | void pSeries_log_error(char *buf, unsigned int err_type, int fatal) | ||
192 | { | ||
193 | unsigned long offset; | ||
194 | unsigned long s; | ||
195 | int len = 0; | ||
196 | |||
197 | pr_debug("rtasd: logging event\n"); | ||
198 | if (buf == NULL) | ||
199 | return; | ||
200 | |||
201 | spin_lock_irqsave(&rtasd_log_lock, s); | ||
202 | |||
203 | /* get length and increase count */ | ||
204 | switch (err_type & ERR_TYPE_MASK) { | ||
205 | case ERR_TYPE_RTAS_LOG: | ||
206 | len = log_rtas_len(buf); | ||
207 | if (!(err_type & ERR_FLAG_BOOT)) | ||
208 | error_log_cnt++; | ||
209 | break; | ||
210 | case ERR_TYPE_KERNEL_PANIC: | ||
211 | default: | ||
212 | WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */ | ||
213 | spin_unlock_irqrestore(&rtasd_log_lock, s); | ||
214 | return; | ||
215 | } | ||
216 | |||
217 | #ifdef CONFIG_PPC64 | ||
218 | /* Write error to NVRAM */ | ||
219 | if (logging_enabled && !(err_type & ERR_FLAG_BOOT)) | ||
220 | nvram_write_error_log(buf, len, err_type, error_log_cnt); | ||
221 | #endif /* CONFIG_PPC64 */ | ||
222 | |||
223 | /* | ||
224 | * rtas errors can occur during boot, and we do want to capture | ||
225 | * those somewhere, even if nvram isn't ready (why not?), and even | ||
226 | * if rtasd isn't ready. Put them into the boot log, at least. | ||
227 | */ | ||
228 | if ((err_type & ERR_TYPE_MASK) == ERR_TYPE_RTAS_LOG) | ||
229 | printk_log_rtas(buf, len); | ||
230 | |||
231 | /* Check to see if we need to or have stopped logging */ | ||
232 | if (fatal || !logging_enabled) { | ||
233 | logging_enabled = 0; | ||
234 | WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */ | ||
235 | spin_unlock_irqrestore(&rtasd_log_lock, s); | ||
236 | return; | ||
237 | } | ||
238 | |||
239 | /* call type specific method for error */ | ||
240 | switch (err_type & ERR_TYPE_MASK) { | ||
241 | case ERR_TYPE_RTAS_LOG: | ||
242 | offset = rtas_error_log_buffer_max * | ||
243 | ((rtas_log_start+rtas_log_size) & LOG_NUMBER_MASK); | ||
244 | |||
245 | /* First copy over sequence number */ | ||
246 | memcpy(&rtas_log_buf[offset], (void *) &error_log_cnt, sizeof(int)); | ||
247 | |||
248 | /* Second copy over error log data */ | ||
249 | offset += sizeof(int); | ||
250 | memcpy(&rtas_log_buf[offset], buf, len); | ||
251 | |||
252 | if (rtas_log_size < LOG_NUMBER) | ||
253 | rtas_log_size += 1; | ||
254 | else | ||
255 | rtas_log_start += 1; | ||
256 | |||
257 | WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */ | ||
258 | spin_unlock_irqrestore(&rtasd_log_lock, s); | ||
259 | wake_up_interruptible(&rtas_log_wait); | ||
260 | break; | ||
261 | case ERR_TYPE_KERNEL_PANIC: | ||
262 | default: | ||
263 | WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */ | ||
264 | spin_unlock_irqrestore(&rtasd_log_lock, s); | ||
265 | return; | ||
266 | } | ||
267 | |||
268 | } | ||
269 | |||
270 | static int rtas_log_open(struct inode * inode, struct file * file) | ||
271 | { | ||
272 | return 0; | ||
273 | } | ||
274 | |||
275 | static int rtas_log_release(struct inode * inode, struct file * file) | ||
276 | { | ||
277 | return 0; | ||
278 | } | ||
279 | |||
280 | /* This will check if all events are logged, if they are then, we | ||
281 | * know that we can safely clear the events in NVRAM. | ||
282 | * Next we'll sit and wait for something else to log. | ||
283 | */ | ||
284 | static ssize_t rtas_log_read(struct file * file, char __user * buf, | ||
285 | size_t count, loff_t *ppos) | ||
286 | { | ||
287 | int error; | ||
288 | char *tmp; | ||
289 | unsigned long s; | ||
290 | unsigned long offset; | ||
291 | |||
292 | if (!buf || count < rtas_error_log_buffer_max) | ||
293 | return -EINVAL; | ||
294 | |||
295 | count = rtas_error_log_buffer_max; | ||
296 | |||
297 | if (!access_ok(VERIFY_WRITE, buf, count)) | ||
298 | return -EFAULT; | ||
299 | |||
300 | tmp = kmalloc(count, GFP_KERNEL); | ||
301 | if (!tmp) | ||
302 | return -ENOMEM; | ||
303 | |||
304 | spin_lock_irqsave(&rtasd_log_lock, s); | ||
305 | |||
306 | /* if it's 0, then we know we got the last one (the one in NVRAM) */ | ||
307 | while (rtas_log_size == 0) { | ||
308 | if (file->f_flags & O_NONBLOCK) { | ||
309 | spin_unlock_irqrestore(&rtasd_log_lock, s); | ||
310 | error = -EAGAIN; | ||
311 | goto out; | ||
312 | } | ||
313 | |||
314 | if (!logging_enabled) { | ||
315 | spin_unlock_irqrestore(&rtasd_log_lock, s); | ||
316 | error = -ENODATA; | ||
317 | goto out; | ||
318 | } | ||
319 | #ifdef CONFIG_PPC64 | ||
320 | nvram_clear_error_log(); | ||
321 | #endif /* CONFIG_PPC64 */ | ||
322 | |||
323 | spin_unlock_irqrestore(&rtasd_log_lock, s); | ||
324 | error = wait_event_interruptible(rtas_log_wait, rtas_log_size); | ||
325 | if (error) | ||
326 | goto out; | ||
327 | spin_lock_irqsave(&rtasd_log_lock, s); | ||
328 | } | ||
329 | |||
330 | offset = rtas_error_log_buffer_max * (rtas_log_start & LOG_NUMBER_MASK); | ||
331 | memcpy(tmp, &rtas_log_buf[offset], count); | ||
332 | |||
333 | rtas_log_start += 1; | ||
334 | rtas_log_size -= 1; | ||
335 | spin_unlock_irqrestore(&rtasd_log_lock, s); | ||
336 | |||
337 | error = copy_to_user(buf, tmp, count) ? -EFAULT : count; | ||
338 | out: | ||
339 | kfree(tmp); | ||
340 | return error; | ||
341 | } | ||
342 | |||
343 | static unsigned int rtas_log_poll(struct file *file, poll_table * wait) | ||
344 | { | ||
345 | poll_wait(file, &rtas_log_wait, wait); | ||
346 | if (rtas_log_size) | ||
347 | return POLLIN | POLLRDNORM; | ||
348 | return 0; | ||
349 | } | ||
350 | |||
351 | static const struct file_operations proc_rtas_log_operations = { | ||
352 | .read = rtas_log_read, | ||
353 | .poll = rtas_log_poll, | ||
354 | .open = rtas_log_open, | ||
355 | .release = rtas_log_release, | ||
356 | }; | ||
357 | |||
358 | static int enable_surveillance(int timeout) | ||
359 | { | ||
360 | int error; | ||
361 | |||
362 | error = rtas_set_indicator(SURVEILLANCE_TOKEN, 0, timeout); | ||
363 | |||
364 | if (error == 0) | ||
365 | return 0; | ||
366 | |||
367 | if (error == -EINVAL) { | ||
368 | printk(KERN_DEBUG "rtasd: surveillance not supported\n"); | ||
369 | return 0; | ||
370 | } | ||
371 | |||
372 | printk(KERN_ERR "rtasd: could not update surveillance\n"); | ||
373 | return -1; | ||
374 | } | ||
375 | |||
376 | static void do_event_scan(void) | ||
377 | { | ||
378 | int error; | ||
379 | do { | ||
380 | memset(logdata, 0, rtas_error_log_max); | ||
381 | error = rtas_call(event_scan, 4, 1, NULL, | ||
382 | RTAS_EVENT_SCAN_ALL_EVENTS, 0, | ||
383 | __pa(logdata), rtas_error_log_max); | ||
384 | if (error == -1) { | ||
385 | printk(KERN_ERR "event-scan failed\n"); | ||
386 | break; | ||
387 | } | ||
388 | |||
389 | if (error == 0) | ||
390 | pSeries_log_error(logdata, ERR_TYPE_RTAS_LOG, 0); | ||
391 | |||
392 | } while(error == 0); | ||
393 | } | ||
394 | |||
395 | static void rtas_event_scan(struct work_struct *w); | ||
396 | DECLARE_DELAYED_WORK(event_scan_work, rtas_event_scan); | ||
397 | |||
398 | /* | ||
399 | * Delay should be at least one second since some machines have problems if | ||
400 | * we call event-scan too quickly. | ||
401 | */ | ||
402 | static unsigned long event_scan_delay = 1*HZ; | ||
403 | static int first_pass = 1; | ||
404 | |||
405 | static void rtas_event_scan(struct work_struct *w) | ||
406 | { | ||
407 | unsigned int cpu; | ||
408 | |||
409 | do_event_scan(); | ||
410 | |||
411 | get_online_cpus(); | ||
412 | |||
413 | cpu = next_cpu(smp_processor_id(), cpu_online_map); | ||
414 | if (cpu == NR_CPUS) { | ||
415 | cpu = first_cpu(cpu_online_map); | ||
416 | |||
417 | if (first_pass) { | ||
418 | first_pass = 0; | ||
419 | event_scan_delay = 30*HZ/rtas_event_scan_rate; | ||
420 | |||
421 | if (surveillance_timeout != -1) { | ||
422 | pr_debug("rtasd: enabling surveillance\n"); | ||
423 | enable_surveillance(surveillance_timeout); | ||
424 | pr_debug("rtasd: surveillance enabled\n"); | ||
425 | } | ||
426 | } | ||
427 | } | ||
428 | |||
429 | schedule_delayed_work_on(cpu, &event_scan_work, | ||
430 | __round_jiffies_relative(event_scan_delay, cpu)); | ||
431 | |||
432 | put_online_cpus(); | ||
433 | } | ||
434 | |||
435 | #ifdef CONFIG_PPC64 | ||
436 | static void retreive_nvram_error_log(void) | ||
437 | { | ||
438 | unsigned int err_type ; | ||
439 | int rc ; | ||
440 | |||
441 | /* See if we have any error stored in NVRAM */ | ||
442 | memset(logdata, 0, rtas_error_log_max); | ||
443 | rc = nvram_read_error_log(logdata, rtas_error_log_max, | ||
444 | &err_type, &error_log_cnt); | ||
445 | /* We can use rtas_log_buf now */ | ||
446 | logging_enabled = 1; | ||
447 | if (!rc) { | ||
448 | if (err_type != ERR_FLAG_ALREADY_LOGGED) { | ||
449 | pSeries_log_error(logdata, err_type | ERR_FLAG_BOOT, 0); | ||
450 | } | ||
451 | } | ||
452 | } | ||
453 | #else /* CONFIG_PPC64 */ | ||
454 | static void retreive_nvram_error_log(void) | ||
455 | { | ||
456 | } | ||
457 | #endif /* CONFIG_PPC64 */ | ||
458 | |||
459 | static void start_event_scan(void) | ||
460 | { | ||
461 | printk(KERN_DEBUG "RTAS daemon started\n"); | ||
462 | pr_debug("rtasd: will sleep for %d milliseconds\n", | ||
463 | (30000 / rtas_event_scan_rate)); | ||
464 | |||
465 | /* Retreive errors from nvram if any */ | ||
466 | retreive_nvram_error_log(); | ||
467 | |||
468 | schedule_delayed_work_on(first_cpu(cpu_online_map), &event_scan_work, | ||
469 | event_scan_delay); | ||
470 | } | ||
471 | |||
472 | static int __init rtas_init(void) | ||
473 | { | ||
474 | struct proc_dir_entry *entry; | ||
475 | |||
476 | if (!machine_is(pseries) && !machine_is(chrp)) | ||
477 | return 0; | ||
478 | |||
479 | /* No RTAS */ | ||
480 | event_scan = rtas_token("event-scan"); | ||
481 | if (event_scan == RTAS_UNKNOWN_SERVICE) { | ||
482 | printk(KERN_INFO "rtasd: No event-scan on system\n"); | ||
483 | return -ENODEV; | ||
484 | } | ||
485 | |||
486 | rtas_event_scan_rate = rtas_token("rtas-event-scan-rate"); | ||
487 | if (rtas_event_scan_rate == RTAS_UNKNOWN_SERVICE) { | ||
488 | printk(KERN_ERR "rtasd: no rtas-event-scan-rate on system\n"); | ||
489 | return -ENODEV; | ||
490 | } | ||
491 | |||
492 | /* Make room for the sequence number */ | ||
493 | rtas_error_log_max = rtas_get_error_log_max(); | ||
494 | rtas_error_log_buffer_max = rtas_error_log_max + sizeof(int); | ||
495 | |||
496 | rtas_log_buf = vmalloc(rtas_error_log_buffer_max*LOG_NUMBER); | ||
497 | if (!rtas_log_buf) { | ||
498 | printk(KERN_ERR "rtasd: no memory\n"); | ||
499 | return -ENOMEM; | ||
500 | } | ||
501 | |||
502 | entry = proc_create("powerpc/rtas/error_log", S_IRUSR, NULL, | ||
503 | &proc_rtas_log_operations); | ||
504 | if (!entry) | ||
505 | printk(KERN_ERR "Failed to create error_log proc entry\n"); | ||
506 | |||
507 | start_event_scan(); | ||
508 | |||
509 | return 0; | ||
510 | } | ||
511 | __initcall(rtas_init); | ||
512 | |||
513 | static int __init surveillance_setup(char *str) | ||
514 | { | ||
515 | int i; | ||
516 | |||
517 | /* We only do surveillance on pseries */ | ||
518 | if (!machine_is(pseries)) | ||
519 | return 0; | ||
520 | |||
521 | if (get_option(&str,&i)) { | ||
522 | if (i >= 0 && i <= 255) | ||
523 | surveillance_timeout = i; | ||
524 | } | ||
525 | |||
526 | return 1; | ||
527 | } | ||
528 | __setup("surveillance=", surveillance_setup); | ||
529 | |||
530 | static int __init rtasmsgs_setup(char *str) | ||
531 | { | ||
532 | if (strcmp(str, "on") == 0) | ||
533 | full_rtas_msgs = 1; | ||
534 | else if (strcmp(str, "off") == 0) | ||
535 | full_rtas_msgs = 0; | ||
536 | |||
537 | return 1; | ||
538 | } | ||
539 | __setup("rtasmsgs=", rtasmsgs_setup); | ||
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index aa5aeb947bc5..03dd6a248198 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c | |||
@@ -660,6 +660,7 @@ late_initcall(check_cache_coherency); | |||
660 | 660 | ||
661 | #ifdef CONFIG_DEBUG_FS | 661 | #ifdef CONFIG_DEBUG_FS |
662 | struct dentry *powerpc_debugfs_root; | 662 | struct dentry *powerpc_debugfs_root; |
663 | EXPORT_SYMBOL(powerpc_debugfs_root); | ||
663 | 664 | ||
664 | static int powerpc_debugfs_init(void) | 665 | static int powerpc_debugfs_init(void) |
665 | { | 666 | { |
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 53bcf3d792db..b152de3e64d4 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c | |||
@@ -345,7 +345,7 @@ void __init setup_arch(char **cmdline_p) | |||
345 | 345 | ||
346 | #ifdef CONFIG_SWIOTLB | 346 | #ifdef CONFIG_SWIOTLB |
347 | if (ppc_swiotlb_enable) | 347 | if (ppc_swiotlb_enable) |
348 | swiotlb_init(); | 348 | swiotlb_init(1); |
349 | #endif | 349 | #endif |
350 | 350 | ||
351 | paging_init(); | 351 | paging_init(); |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 797ea95aae2e..6568406b2a30 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -57,7 +57,6 @@ | |||
57 | #include <asm/cache.h> | 57 | #include <asm/cache.h> |
58 | #include <asm/page.h> | 58 | #include <asm/page.h> |
59 | #include <asm/mmu.h> | 59 | #include <asm/mmu.h> |
60 | #include <asm/mmu-hash64.h> | ||
61 | #include <asm/firmware.h> | 60 | #include <asm/firmware.h> |
62 | #include <asm/xmon.h> | 61 | #include <asm/xmon.h> |
63 | #include <asm/udbg.h> | 62 | #include <asm/udbg.h> |
@@ -357,11 +356,6 @@ void __init setup_system(void) | |||
357 | */ | 356 | */ |
358 | initialize_cache_info(); | 357 | initialize_cache_info(); |
359 | 358 | ||
360 | /* | ||
361 | * Initialize irq remapping subsystem | ||
362 | */ | ||
363 | irq_early_init(); | ||
364 | |||
365 | #ifdef CONFIG_PPC_RTAS | 359 | #ifdef CONFIG_PPC_RTAS |
366 | /* | 360 | /* |
367 | * Initialize RTAS if available | 361 | * Initialize RTAS if available |
@@ -551,7 +545,7 @@ void __init setup_arch(char **cmdline_p) | |||
551 | 545 | ||
552 | #ifdef CONFIG_SWIOTLB | 546 | #ifdef CONFIG_SWIOTLB |
553 | if (ppc_swiotlb_enable) | 547 | if (ppc_swiotlb_enable) |
554 | swiotlb_init(); | 548 | swiotlb_init(1); |
555 | #endif | 549 | #endif |
556 | 550 | ||
557 | paging_init(); | 551 | paging_init(); |
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 2ebb48410976..a521fb8a40ee 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -218,6 +218,9 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) | |||
218 | 218 | ||
219 | static void stop_this_cpu(void *dummy) | 219 | static void stop_this_cpu(void *dummy) |
220 | { | 220 | { |
221 | /* Remove this CPU */ | ||
222 | set_cpu_online(smp_processor_id(), false); | ||
223 | |||
221 | local_irq_disable(); | 224 | local_irq_disable(); |
222 | while (1) | 225 | while (1) |
223 | ; | 226 | ; |
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index b97c2d67f4ac..c5a4732bcc48 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c | |||
@@ -520,58 +520,6 @@ asmlinkage long compat_sys_umask(u32 mask) | |||
520 | return sys_umask((int)mask); | 520 | return sys_umask((int)mask); |
521 | } | 521 | } |
522 | 522 | ||
523 | #ifdef CONFIG_SYSCTL_SYSCALL | ||
524 | struct __sysctl_args32 { | ||
525 | u32 name; | ||
526 | int nlen; | ||
527 | u32 oldval; | ||
528 | u32 oldlenp; | ||
529 | u32 newval; | ||
530 | u32 newlen; | ||
531 | u32 __unused[4]; | ||
532 | }; | ||
533 | |||
534 | asmlinkage long compat_sys_sysctl(struct __sysctl_args32 __user *args) | ||
535 | { | ||
536 | struct __sysctl_args32 tmp; | ||
537 | int error; | ||
538 | size_t oldlen; | ||
539 | size_t __user *oldlenp = NULL; | ||
540 | unsigned long addr = (((unsigned long)&args->__unused[0]) + 7) & ~7; | ||
541 | |||
542 | if (copy_from_user(&tmp, args, sizeof(tmp))) | ||
543 | return -EFAULT; | ||
544 | |||
545 | if (tmp.oldval && tmp.oldlenp) { | ||
546 | /* Duh, this is ugly and might not work if sysctl_args | ||
547 | is in read-only memory, but do_sysctl does indirectly | ||
548 | a lot of uaccess in both directions and we'd have to | ||
549 | basically copy the whole sysctl.c here, and | ||
550 | glibc's __sysctl uses rw memory for the structure | ||
551 | anyway. */ | ||
552 | oldlenp = (size_t __user *)addr; | ||
553 | if (get_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) || | ||
554 | put_user(oldlen, oldlenp)) | ||
555 | return -EFAULT; | ||
556 | } | ||
557 | |||
558 | lock_kernel(); | ||
559 | error = do_sysctl(compat_ptr(tmp.name), tmp.nlen, | ||
560 | compat_ptr(tmp.oldval), oldlenp, | ||
561 | compat_ptr(tmp.newval), tmp.newlen); | ||
562 | unlock_kernel(); | ||
563 | if (oldlenp) { | ||
564 | if (!error) { | ||
565 | if (get_user(oldlen, oldlenp) || | ||
566 | put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp))) | ||
567 | error = -EFAULT; | ||
568 | } | ||
569 | copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)); | ||
570 | } | ||
571 | return error; | ||
572 | } | ||
573 | #endif | ||
574 | |||
575 | unsigned long compat_sys_mmap2(unsigned long addr, size_t len, | 523 | unsigned long compat_sys_mmap2(unsigned long addr, size_t len, |
576 | unsigned long prot, unsigned long flags, | 524 | unsigned long prot, unsigned long flags, |
577 | unsigned long fd, unsigned long pgoff) | 525 | unsigned long fd, unsigned long pgoff) |
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c index c04832c4a02e..3370e62e43d4 100644 --- a/arch/powerpc/kernel/syscalls.c +++ b/arch/powerpc/kernel/syscalls.c | |||
@@ -140,7 +140,6 @@ static inline unsigned long do_mmap2(unsigned long addr, size_t len, | |||
140 | unsigned long prot, unsigned long flags, | 140 | unsigned long prot, unsigned long flags, |
141 | unsigned long fd, unsigned long off, int shift) | 141 | unsigned long fd, unsigned long off, int shift) |
142 | { | 142 | { |
143 | struct file * file = NULL; | ||
144 | unsigned long ret = -EINVAL; | 143 | unsigned long ret = -EINVAL; |
145 | 144 | ||
146 | if (!arch_validate_prot(prot)) | 145 | if (!arch_validate_prot(prot)) |
@@ -151,20 +150,8 @@ static inline unsigned long do_mmap2(unsigned long addr, size_t len, | |||
151 | goto out; | 150 | goto out; |
152 | off >>= shift; | 151 | off >>= shift; |
153 | } | 152 | } |
154 | |||
155 | ret = -EBADF; | ||
156 | if (!(flags & MAP_ANONYMOUS)) { | ||
157 | if (!(file = fget(fd))) | ||
158 | goto out; | ||
159 | } | ||
160 | |||
161 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
162 | 153 | ||
163 | down_write(¤t->mm->mmap_sem); | 154 | ret = sys_mmap_pgoff(addr, len, prot, flags, fd, off); |
164 | ret = do_mmap_pgoff(file, addr, len, prot, flags, off); | ||
165 | up_write(¤t->mm->mmap_sem); | ||
166 | if (file) | ||
167 | fput(file); | ||
168 | out: | 155 | out: |
169 | return ret; | 156 | return ret; |
170 | } | 157 | } |
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 956ab33fd73f..e235e52dc4fe 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c | |||
@@ -461,6 +461,25 @@ static void unregister_cpu_online(unsigned int cpu) | |||
461 | 461 | ||
462 | cacheinfo_cpu_offline(cpu); | 462 | cacheinfo_cpu_offline(cpu); |
463 | } | 463 | } |
464 | |||
465 | #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE | ||
466 | ssize_t arch_cpu_probe(const char *buf, size_t count) | ||
467 | { | ||
468 | if (ppc_md.cpu_probe) | ||
469 | return ppc_md.cpu_probe(buf, count); | ||
470 | |||
471 | return -EINVAL; | ||
472 | } | ||
473 | |||
474 | ssize_t arch_cpu_release(const char *buf, size_t count) | ||
475 | { | ||
476 | if (ppc_md.cpu_release) | ||
477 | return ppc_md.cpu_release(buf, count); | ||
478 | |||
479 | return -EINVAL; | ||
480 | } | ||
481 | #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ | ||
482 | |||
464 | #endif /* CONFIG_HOTPLUG_CPU */ | 483 | #endif /* CONFIG_HOTPLUG_CPU */ |
465 | 484 | ||
466 | static int __cpuinit sysfs_cpu_notify(struct notifier_block *self, | 485 | static int __cpuinit sysfs_cpu_notify(struct notifier_block *self, |
diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c index c3a56d65c5a9..a753b72efbc0 100644 --- a/arch/powerpc/kernel/tau_6xx.c +++ b/arch/powerpc/kernel/tau_6xx.c | |||
@@ -59,7 +59,7 @@ void set_thresholds(unsigned long cpu) | |||
59 | mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TIE | THRM1_TID); | 59 | mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TIE | THRM1_TID); |
60 | 60 | ||
61 | /* setup THRM2, | 61 | /* setup THRM2, |
62 | * threshold, valid bit, enable interrupts, interrupt when above threshhold | 62 | * threshold, valid bit, enable interrupts, interrupt when above threshold |
63 | */ | 63 | */ |
64 | mtspr (SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | THRM1_TIE); | 64 | mtspr (SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | THRM1_TIE); |
65 | #else | 65 | #else |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 92dc844299b6..9ba2cc88591d 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -54,6 +54,7 @@ | |||
54 | #include <linux/irq.h> | 54 | #include <linux/irq.h> |
55 | #include <linux/delay.h> | 55 | #include <linux/delay.h> |
56 | #include <linux/perf_event.h> | 56 | #include <linux/perf_event.h> |
57 | #include <asm/trace.h> | ||
57 | 58 | ||
58 | #include <asm/io.h> | 59 | #include <asm/io.h> |
59 | #include <asm/processor.h> | 60 | #include <asm/processor.h> |
@@ -268,6 +269,7 @@ void account_system_vtime(struct task_struct *tsk) | |||
268 | per_cpu(cputime_scaled_last_delta, smp_processor_id()) = deltascaled; | 269 | per_cpu(cputime_scaled_last_delta, smp_processor_id()) = deltascaled; |
269 | local_irq_restore(flags); | 270 | local_irq_restore(flags); |
270 | } | 271 | } |
272 | EXPORT_SYMBOL_GPL(account_system_vtime); | ||
271 | 273 | ||
272 | /* | 274 | /* |
273 | * Transfer the user and system times accumulated in the paca | 275 | * Transfer the user and system times accumulated in the paca |
@@ -571,6 +573,8 @@ void timer_interrupt(struct pt_regs * regs) | |||
571 | struct clock_event_device *evt = &decrementer->event; | 573 | struct clock_event_device *evt = &decrementer->event; |
572 | u64 now; | 574 | u64 now; |
573 | 575 | ||
576 | trace_timer_interrupt_entry(regs); | ||
577 | |||
574 | /* Ensure a positive value is written to the decrementer, or else | 578 | /* Ensure a positive value is written to the decrementer, or else |
575 | * some CPUs will continuue to take decrementer exceptions */ | 579 | * some CPUs will continuue to take decrementer exceptions */ |
576 | set_dec(DECREMENTER_MAX); | 580 | set_dec(DECREMENTER_MAX); |
@@ -590,6 +594,7 @@ void timer_interrupt(struct pt_regs * regs) | |||
590 | now = decrementer->next_tb - now; | 594 | now = decrementer->next_tb - now; |
591 | if (now <= DECREMENTER_MAX) | 595 | if (now <= DECREMENTER_MAX) |
592 | set_dec((int)now); | 596 | set_dec((int)now); |
597 | trace_timer_interrupt_exit(regs); | ||
593 | return; | 598 | return; |
594 | } | 599 | } |
595 | old_regs = set_irq_regs(regs); | 600 | old_regs = set_irq_regs(regs); |
@@ -620,6 +625,8 @@ void timer_interrupt(struct pt_regs * regs) | |||
620 | 625 | ||
621 | irq_exit(); | 626 | irq_exit(); |
622 | set_irq_regs(old_regs); | 627 | set_irq_regs(old_regs); |
628 | |||
629 | trace_timer_interrupt_exit(regs); | ||
623 | } | 630 | } |
624 | 631 | ||
625 | void wakeup_decrementer(void) | 632 | void wakeup_decrementer(void) |
@@ -777,7 +784,7 @@ int update_persistent_clock(struct timespec now) | |||
777 | return ppc_md.set_rtc_time(&tm); | 784 | return ppc_md.set_rtc_time(&tm); |
778 | } | 785 | } |
779 | 786 | ||
780 | void read_persistent_clock(struct timespec *ts) | 787 | static void __read_persistent_clock(struct timespec *ts) |
781 | { | 788 | { |
782 | struct rtc_time tm; | 789 | struct rtc_time tm; |
783 | static int first = 1; | 790 | static int first = 1; |
@@ -800,10 +807,23 @@ void read_persistent_clock(struct timespec *ts) | |||
800 | return; | 807 | return; |
801 | } | 808 | } |
802 | ppc_md.get_rtc_time(&tm); | 809 | ppc_md.get_rtc_time(&tm); |
810 | |||
803 | ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, | 811 | ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, |
804 | tm.tm_hour, tm.tm_min, tm.tm_sec); | 812 | tm.tm_hour, tm.tm_min, tm.tm_sec); |
805 | } | 813 | } |
806 | 814 | ||
815 | void read_persistent_clock(struct timespec *ts) | ||
816 | { | ||
817 | __read_persistent_clock(ts); | ||
818 | |||
819 | /* Sanitize it in case real time clock is set below EPOCH */ | ||
820 | if (ts->tv_sec < 0) { | ||
821 | ts->tv_sec = 0; | ||
822 | ts->tv_nsec = 0; | ||
823 | } | ||
824 | |||
825 | } | ||
826 | |||
807 | /* clocksource code */ | 827 | /* clocksource code */ |
808 | static cycle_t rtc_read(struct clocksource *cs) | 828 | static cycle_t rtc_read(struct clocksource *cs) |
809 | { | 829 | { |
@@ -815,7 +835,8 @@ static cycle_t timebase_read(struct clocksource *cs) | |||
815 | return (cycle_t)get_tb(); | 835 | return (cycle_t)get_tb(); |
816 | } | 836 | } |
817 | 837 | ||
818 | void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) | 838 | void update_vsyscall(struct timespec *wall_time, struct clocksource *clock, |
839 | u32 mult) | ||
819 | { | 840 | { |
820 | u64 t2x, stamp_xsec; | 841 | u64 t2x, stamp_xsec; |
821 | 842 | ||
@@ -828,7 +849,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) | |||
828 | 849 | ||
829 | /* XXX this assumes clock->shift == 22 */ | 850 | /* XXX this assumes clock->shift == 22 */ |
830 | /* 4611686018 ~= 2^(20+64-22) / 1e9 */ | 851 | /* 4611686018 ~= 2^(20+64-22) / 1e9 */ |
831 | t2x = (u64) clock->mult * 4611686018ULL; | 852 | t2x = (u64) mult * 4611686018ULL; |
832 | stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC; | 853 | stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC; |
833 | do_div(stamp_xsec, 1000000000); | 854 | do_div(stamp_xsec, 1000000000); |
834 | stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC; | 855 | stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC; |
@@ -905,7 +926,7 @@ static void register_decrementer_clockevent(int cpu) | |||
905 | *dec = decrementer_clockevent; | 926 | *dec = decrementer_clockevent; |
906 | dec->cpumask = cpumask_of(cpu); | 927 | dec->cpumask = cpumask_of(cpu); |
907 | 928 | ||
908 | printk(KERN_DEBUG "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n", | 929 | printk(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n", |
909 | dec->name, dec->mult, dec->shift, cpu); | 930 | dec->name, dec->mult, dec->shift, cpu); |
910 | 931 | ||
911 | clockevents_register_device(dec); | 932 | clockevents_register_device(dec); |
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 6f0ae1a9bfae..804f0f30f227 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c | |||
@@ -198,28 +198,6 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) | |||
198 | info.si_code = code; | 198 | info.si_code = code; |
199 | info.si_addr = (void __user *) addr; | 199 | info.si_addr = (void __user *) addr; |
200 | force_sig_info(signr, &info, current); | 200 | force_sig_info(signr, &info, current); |
201 | |||
202 | /* | ||
203 | * Init gets no signals that it doesn't have a handler for. | ||
204 | * That's all very well, but if it has caused a synchronous | ||
205 | * exception and we ignore the resulting signal, it will just | ||
206 | * generate the same exception over and over again and we get | ||
207 | * nowhere. Better to kill it and let the kernel panic. | ||
208 | */ | ||
209 | if (is_global_init(current)) { | ||
210 | __sighandler_t handler; | ||
211 | |||
212 | spin_lock_irq(¤t->sighand->siglock); | ||
213 | handler = current->sighand->action[signr-1].sa.sa_handler; | ||
214 | spin_unlock_irq(¤t->sighand->siglock); | ||
215 | if (handler == SIG_DFL) { | ||
216 | /* init has generated a synchronous exception | ||
217 | and it doesn't have a handler for the signal */ | ||
218 | printk(KERN_CRIT "init has generated signal %d " | ||
219 | "but has no handler for it\n", signr); | ||
220 | do_exit(signr); | ||
221 | } | ||
222 | } | ||
223 | } | 201 | } |
224 | 202 | ||
225 | #ifdef CONFIG_PPC64 | 203 | #ifdef CONFIG_PPC64 |
@@ -759,7 +737,7 @@ static int emulate_instruction(struct pt_regs *regs) | |||
759 | 737 | ||
760 | /* Emulate the mfspr rD, PVR. */ | 738 | /* Emulate the mfspr rD, PVR. */ |
761 | if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) { | 739 | if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) { |
762 | PPC_WARN_EMULATED(mfpvr); | 740 | PPC_WARN_EMULATED(mfpvr, regs); |
763 | rd = (instword >> 21) & 0x1f; | 741 | rd = (instword >> 21) & 0x1f; |
764 | regs->gpr[rd] = mfspr(SPRN_PVR); | 742 | regs->gpr[rd] = mfspr(SPRN_PVR); |
765 | return 0; | 743 | return 0; |
@@ -767,7 +745,7 @@ static int emulate_instruction(struct pt_regs *regs) | |||
767 | 745 | ||
768 | /* Emulating the dcba insn is just a no-op. */ | 746 | /* Emulating the dcba insn is just a no-op. */ |
769 | if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) { | 747 | if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) { |
770 | PPC_WARN_EMULATED(dcba); | 748 | PPC_WARN_EMULATED(dcba, regs); |
771 | return 0; | 749 | return 0; |
772 | } | 750 | } |
773 | 751 | ||
@@ -776,7 +754,7 @@ static int emulate_instruction(struct pt_regs *regs) | |||
776 | int shift = (instword >> 21) & 0x1c; | 754 | int shift = (instword >> 21) & 0x1c; |
777 | unsigned long msk = 0xf0000000UL >> shift; | 755 | unsigned long msk = 0xf0000000UL >> shift; |
778 | 756 | ||
779 | PPC_WARN_EMULATED(mcrxr); | 757 | PPC_WARN_EMULATED(mcrxr, regs); |
780 | regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); | 758 | regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); |
781 | regs->xer &= ~0xf0000000UL; | 759 | regs->xer &= ~0xf0000000UL; |
782 | return 0; | 760 | return 0; |
@@ -784,19 +762,19 @@ static int emulate_instruction(struct pt_regs *regs) | |||
784 | 762 | ||
785 | /* Emulate load/store string insn. */ | 763 | /* Emulate load/store string insn. */ |
786 | if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { | 764 | if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { |
787 | PPC_WARN_EMULATED(string); | 765 | PPC_WARN_EMULATED(string, regs); |
788 | return emulate_string_inst(regs, instword); | 766 | return emulate_string_inst(regs, instword); |
789 | } | 767 | } |
790 | 768 | ||
791 | /* Emulate the popcntb (Population Count Bytes) instruction. */ | 769 | /* Emulate the popcntb (Population Count Bytes) instruction. */ |
792 | if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) { | 770 | if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) { |
793 | PPC_WARN_EMULATED(popcntb); | 771 | PPC_WARN_EMULATED(popcntb, regs); |
794 | return emulate_popcntb_inst(regs, instword); | 772 | return emulate_popcntb_inst(regs, instword); |
795 | } | 773 | } |
796 | 774 | ||
797 | /* Emulate isel (Integer Select) instruction */ | 775 | /* Emulate isel (Integer Select) instruction */ |
798 | if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) { | 776 | if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) { |
799 | PPC_WARN_EMULATED(isel); | 777 | PPC_WARN_EMULATED(isel, regs); |
800 | return emulate_isel(regs, instword); | 778 | return emulate_isel(regs, instword); |
801 | } | 779 | } |
802 | 780 | ||
@@ -995,7 +973,7 @@ void SoftwareEmulation(struct pt_regs *regs) | |||
995 | #ifdef CONFIG_MATH_EMULATION | 973 | #ifdef CONFIG_MATH_EMULATION |
996 | errcode = do_mathemu(regs); | 974 | errcode = do_mathemu(regs); |
997 | if (errcode >= 0) | 975 | if (errcode >= 0) |
998 | PPC_WARN_EMULATED(math); | 976 | PPC_WARN_EMULATED(math, regs); |
999 | 977 | ||
1000 | switch (errcode) { | 978 | switch (errcode) { |
1001 | case 0: | 979 | case 0: |
@@ -1018,7 +996,7 @@ void SoftwareEmulation(struct pt_regs *regs) | |||
1018 | #elif defined(CONFIG_8XX_MINIMAL_FPEMU) | 996 | #elif defined(CONFIG_8XX_MINIMAL_FPEMU) |
1019 | errcode = Soft_emulate_8xx(regs); | 997 | errcode = Soft_emulate_8xx(regs); |
1020 | if (errcode >= 0) | 998 | if (errcode >= 0) |
1021 | PPC_WARN_EMULATED(8xx); | 999 | PPC_WARN_EMULATED(8xx, regs); |
1022 | 1000 | ||
1023 | switch (errcode) { | 1001 | switch (errcode) { |
1024 | case 0: | 1002 | case 0: |
@@ -1129,7 +1107,7 @@ void altivec_assist_exception(struct pt_regs *regs) | |||
1129 | 1107 | ||
1130 | flush_altivec_to_thread(current); | 1108 | flush_altivec_to_thread(current); |
1131 | 1109 | ||
1132 | PPC_WARN_EMULATED(altivec); | 1110 | PPC_WARN_EMULATED(altivec, regs); |
1133 | err = emulate_altivec(regs); | 1111 | err = emulate_altivec(regs); |
1134 | if (err == 0) { | 1112 | if (err == 0) { |
1135 | regs->nip += 4; /* skip emulated instruction */ | 1113 | regs->nip += 4; /* skip emulated instruction */ |
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 94e2df3cae07..d84d19224a95 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c | |||
@@ -50,6 +50,9 @@ | |||
50 | /* Max supported size for symbol names */ | 50 | /* Max supported size for symbol names */ |
51 | #define MAX_SYMNAME 64 | 51 | #define MAX_SYMNAME 64 |
52 | 52 | ||
53 | /* The alignment of the vDSO */ | ||
54 | #define VDSO_ALIGNMENT (1 << 16) | ||
55 | |||
53 | extern char vdso32_start, vdso32_end; | 56 | extern char vdso32_start, vdso32_end; |
54 | static void *vdso32_kbase = &vdso32_start; | 57 | static void *vdso32_kbase = &vdso32_start; |
55 | static unsigned int vdso32_pages; | 58 | static unsigned int vdso32_pages; |
@@ -231,15 +234,21 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | |||
231 | * pick a base address for the vDSO in process space. We try to put it | 234 | * pick a base address for the vDSO in process space. We try to put it |
232 | * at vdso_base which is the "natural" base for it, but we might fail | 235 | * at vdso_base which is the "natural" base for it, but we might fail |
233 | * and end up putting it elsewhere. | 236 | * and end up putting it elsewhere. |
237 | * Add enough to the size so that the result can be aligned. | ||
234 | */ | 238 | */ |
235 | down_write(&mm->mmap_sem); | 239 | down_write(&mm->mmap_sem); |
236 | vdso_base = get_unmapped_area(NULL, vdso_base, | 240 | vdso_base = get_unmapped_area(NULL, vdso_base, |
237 | vdso_pages << PAGE_SHIFT, 0, 0); | 241 | (vdso_pages << PAGE_SHIFT) + |
242 | ((VDSO_ALIGNMENT - 1) & PAGE_MASK), | ||
243 | 0, 0); | ||
238 | if (IS_ERR_VALUE(vdso_base)) { | 244 | if (IS_ERR_VALUE(vdso_base)) { |
239 | rc = vdso_base; | 245 | rc = vdso_base; |
240 | goto fail_mmapsem; | 246 | goto fail_mmapsem; |
241 | } | 247 | } |
242 | 248 | ||
249 | /* Add required alignment. */ | ||
250 | vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT); | ||
251 | |||
243 | /* | 252 | /* |
244 | * Put vDSO base into mm struct. We need to do this before calling | 253 | * Put vDSO base into mm struct. We need to do this before calling |
245 | * install_special_mapping or the perf counter mmap tracking code | 254 | * install_special_mapping or the perf counter mmap tracking code |
@@ -712,7 +721,7 @@ static int __init vdso_init(void) | |||
712 | 721 | ||
713 | #ifdef CONFIG_PPC64 | 722 | #ifdef CONFIG_PPC64 |
714 | /* | 723 | /* |
715 | * Fill up the "systemcfg" stuff for backward compatiblity | 724 | * Fill up the "systemcfg" stuff for backward compatibility |
716 | */ | 725 | */ |
717 | strcpy((char *)vdso_data->eye_catcher, "SYSTEMCFG:PPC64"); | 726 | strcpy((char *)vdso_data->eye_catcher, "SYSTEMCFG:PPC64"); |
718 | vdso_data->version.major = SYSTEMCFG_MAJOR; | 727 | vdso_data->version.major = SYSTEMCFG_MAJOR; |
diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S index 904ef1360dd7..0546bcd49cd0 100644 --- a/arch/powerpc/kernel/vdso32/vdso32.lds.S +++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S | |||
@@ -25,7 +25,7 @@ SECTIONS | |||
25 | . = ALIGN(16); | 25 | . = ALIGN(16); |
26 | .text : { | 26 | .text : { |
27 | *(.text .stub .text.* .gnu.linkonce.t.* __ftr_alt_*) | 27 | *(.text .stub .text.* .gnu.linkonce.t.* __ftr_alt_*) |
28 | } | 28 | } :text |
29 | PROVIDE(__etext = .); | 29 | PROVIDE(__etext = .); |
30 | PROVIDE(_etext = .); | 30 | PROVIDE(_etext = .); |
31 | PROVIDE(etext = .); | 31 | PROVIDE(etext = .); |
@@ -56,7 +56,7 @@ SECTIONS | |||
56 | .fixup : { *(.fixup) } | 56 | .fixup : { *(.fixup) } |
57 | 57 | ||
58 | .dynamic : { *(.dynamic) } :text :dynamic | 58 | .dynamic : { *(.dynamic) } :text :dynamic |
59 | .got : { *(.got) } | 59 | .got : { *(.got) } :text |
60 | .plt : { *(.plt) } | 60 | .plt : { *(.plt) } |
61 | 61 | ||
62 | _end = .; | 62 | _end = .; |
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S index 67b6916f0e94..fe460482fa68 100644 --- a/arch/powerpc/kernel/vector.S +++ b/arch/powerpc/kernel/vector.S | |||
@@ -58,7 +58,7 @@ _GLOBAL(load_up_altivec) | |||
58 | * all 1's | 58 | * all 1's |
59 | */ | 59 | */ |
60 | mfspr r4,SPRN_VRSAVE | 60 | mfspr r4,SPRN_VRSAVE |
61 | cmpdi 0,r4,0 | 61 | cmpwi 0,r4,0 |
62 | bne+ 1f | 62 | bne+ 1f |
63 | li r4,-1 | 63 | li r4,-1 |
64 | mtspr SPRN_VRSAVE,r4 | 64 | mtspr SPRN_VRSAVE,r4 |
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index f56429362a12..27735a7ac12b 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S | |||
@@ -236,6 +236,7 @@ SECTIONS | |||
236 | READ_MOSTLY_DATA(L1_CACHE_BYTES) | 236 | READ_MOSTLY_DATA(L1_CACHE_BYTES) |
237 | } | 237 | } |
238 | 238 | ||
239 | . = ALIGN(PAGE_SIZE); | ||
239 | .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { | 240 | .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { |
240 | NOSAVE_DATA | 241 | NOSAVE_DATA |
241 | } | 242 | } |