diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /arch/powerpc/kernel | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'arch/powerpc/kernel')
97 files changed, 4570 insertions, 2080 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 1dda70129141..e8b981897d44 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile | |||
@@ -29,18 +29,23 @@ endif | |||
29 | obj-y := cputable.o ptrace.o syscalls.o \ | 29 | obj-y := cputable.o ptrace.o syscalls.o \ |
30 | irq.o align.o signal_32.o pmc.o vdso.o \ | 30 | irq.o align.o signal_32.o pmc.o vdso.o \ |
31 | init_task.o process.o systbl.o idle.o \ | 31 | init_task.o process.o systbl.o idle.o \ |
32 | signal.o sysfs.o cacheinfo.o | 32 | signal.o sysfs.o cacheinfo.o time.o \ |
33 | obj-y += vdso32/ | 33 | prom.o traps.o setup-common.o \ |
34 | udbg.o misc.o io.o dma.o \ | ||
35 | misc_$(CONFIG_WORD_SIZE).o vdso32/ | ||
34 | obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ | 36 | obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ |
35 | signal_64.o ptrace32.o \ | 37 | signal_64.o ptrace32.o \ |
36 | paca.o nvram_64.o firmware.o | 38 | paca.o nvram_64.o firmware.o |
37 | obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o | 39 | obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o |
38 | obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o | 40 | obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o |
41 | obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power7.o | ||
39 | obj64-$(CONFIG_RELOCATABLE) += reloc_64.o | 42 | obj64-$(CONFIG_RELOCATABLE) += reloc_64.o |
40 | obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o | 43 | obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o |
44 | obj-$(CONFIG_PPC_A2) += cpu_setup_a2.o | ||
41 | obj-$(CONFIG_PPC64) += vdso64/ | 45 | obj-$(CONFIG_PPC64) += vdso64/ |
42 | obj-$(CONFIG_ALTIVEC) += vecemu.o | 46 | obj-$(CONFIG_ALTIVEC) += vecemu.o |
43 | obj-$(CONFIG_PPC_970_NAP) += idle_power4.o | 47 | obj-$(CONFIG_PPC_970_NAP) += idle_power4.o |
48 | obj-$(CONFIG_PPC_P7_NAP) += idle_power7.o | ||
44 | obj-$(CONFIG_PPC_OF) += of_platform.o prom_parse.o | 49 | obj-$(CONFIG_PPC_OF) += of_platform.o prom_parse.o |
45 | obj-$(CONFIG_PPC_CLOCK) += clock.o | 50 | obj-$(CONFIG_PPC_CLOCK) += clock.o |
46 | procfs-y := proc_powerpc.o | 51 | procfs-y := proc_powerpc.o |
@@ -55,7 +60,9 @@ obj-$(CONFIG_IBMVIO) += vio.o | |||
55 | obj-$(CONFIG_IBMEBUS) += ibmebus.o | 60 | obj-$(CONFIG_IBMEBUS) += ibmebus.o |
56 | obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o | 61 | obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o |
57 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o | 62 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o |
63 | ifeq ($(CONFIG_PPC32),y) | ||
58 | obj-$(CONFIG_E500) += idle_e500.o | 64 | obj-$(CONFIG_E500) += idle_e500.o |
65 | endif | ||
59 | obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o | 66 | obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o |
60 | obj-$(CONFIG_TAU) += tau_6xx.o | 67 | obj-$(CONFIG_TAU) += tau_6xx.o |
61 | obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o | 68 | obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o |
@@ -67,20 +74,16 @@ endif | |||
67 | obj64-$(CONFIG_HIBERNATION) += swsusp_asm64.o | 74 | obj64-$(CONFIG_HIBERNATION) += swsusp_asm64.o |
68 | obj-$(CONFIG_MODULES) += module.o module_$(CONFIG_WORD_SIZE).o | 75 | obj-$(CONFIG_MODULES) += module.o module_$(CONFIG_WORD_SIZE).o |
69 | obj-$(CONFIG_44x) += cpu_setup_44x.o | 76 | obj-$(CONFIG_44x) += cpu_setup_44x.o |
70 | obj-$(CONFIG_FSL_BOOKE) += cpu_setup_fsl_booke.o dbell.o | 77 | obj-$(CONFIG_PPC_FSL_BOOK3E) += cpu_setup_fsl_booke.o dbell.o |
71 | obj-$(CONFIG_PPC_BOOK3E_64) += dbell.o | 78 | obj-$(CONFIG_PPC_BOOK3E_64) += dbell.o |
72 | 79 | ||
73 | extra-y := head_$(CONFIG_WORD_SIZE).o | 80 | extra-y := head_$(CONFIG_WORD_SIZE).o |
74 | extra-$(CONFIG_PPC_BOOK3E_32) := head_new_booke.o | ||
75 | extra-$(CONFIG_40x) := head_40x.o | 81 | extra-$(CONFIG_40x) := head_40x.o |
76 | extra-$(CONFIG_44x) := head_44x.o | 82 | extra-$(CONFIG_44x) := head_44x.o |
77 | extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o | 83 | extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o |
78 | extra-$(CONFIG_8xx) := head_8xx.o | 84 | extra-$(CONFIG_8xx) := head_8xx.o |
79 | extra-y += vmlinux.lds | 85 | extra-y += vmlinux.lds |
80 | 86 | ||
81 | obj-y += time.o prom.o traps.o setup-common.o \ | ||
82 | udbg.o misc.o io.o dma.o \ | ||
83 | misc_$(CONFIG_WORD_SIZE).o | ||
84 | obj-$(CONFIG_PPC32) += entry_32.o setup_32.o | 87 | obj-$(CONFIG_PPC32) += entry_32.o setup_32.o |
85 | obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o | 88 | obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o |
86 | obj-$(CONFIG_KGDB) += kgdb.o | 89 | obj-$(CONFIG_KGDB) += kgdb.o |
@@ -102,8 +105,11 @@ obj-$(CONFIG_KEXEC) += machine_kexec.o crash.o \ | |||
102 | obj-$(CONFIG_AUDIT) += audit.o | 105 | obj-$(CONFIG_AUDIT) += audit.o |
103 | obj64-$(CONFIG_AUDIT) += compat_audit.o | 106 | obj64-$(CONFIG_AUDIT) += compat_audit.o |
104 | 107 | ||
108 | obj-$(CONFIG_PPC_IO_WORKAROUNDS) += io-workarounds.o | ||
109 | |||
105 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | 110 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o |
106 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o | 111 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o |
112 | obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o | ||
107 | obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o | 113 | obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o |
108 | 114 | ||
109 | obj-$(CONFIG_PPC_PERF_CTRS) += perf_event.o | 115 | obj-$(CONFIG_PPC_PERF_CTRS) += perf_event.o |
@@ -127,6 +133,8 @@ ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC),) | |||
127 | obj-y += ppc_save_regs.o | 133 | obj-y += ppc_save_regs.o |
128 | endif | 134 | endif |
129 | 135 | ||
136 | obj-$(CONFIG_KVM_GUEST) += kvm.o kvm_emul.o | ||
137 | |||
130 | # Disable GCOV in odd or sensitive code | 138 | # Disable GCOV in odd or sensitive code |
131 | GCOV_PROFILE_prom_init.o := n | 139 | GCOV_PROFILE_prom_init.o := n |
132 | GCOV_PROFILE_ftrace.o := n | 140 | GCOV_PROFILE_ftrace.o := n |
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index b876e989220b..8184ee97e484 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c | |||
@@ -889,7 +889,7 @@ int fix_alignment(struct pt_regs *regs) | |||
889 | #ifdef CONFIG_PPC_FPU | 889 | #ifdef CONFIG_PPC_FPU |
890 | preempt_disable(); | 890 | preempt_disable(); |
891 | enable_kernel_fp(); | 891 | enable_kernel_fp(); |
892 | cvt_df(&data.dd, (float *)&data.v[4], ¤t->thread); | 892 | cvt_df(&data.dd, (float *)&data.v[4]); |
893 | preempt_enable(); | 893 | preempt_enable(); |
894 | #else | 894 | #else |
895 | return 0; | 895 | return 0; |
@@ -933,7 +933,7 @@ int fix_alignment(struct pt_regs *regs) | |||
933 | #ifdef CONFIG_PPC_FPU | 933 | #ifdef CONFIG_PPC_FPU |
934 | preempt_disable(); | 934 | preempt_disable(); |
935 | enable_kernel_fp(); | 935 | enable_kernel_fp(); |
936 | cvt_fd((float *)&data.v[4], &data.dd, ¤t->thread); | 936 | cvt_fd((float *)&data.v[4], &data.dd); |
937 | preempt_enable(); | 937 | preempt_enable(); |
938 | #else | 938 | #else |
939 | return 0; | 939 | return 0; |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 1c0607ddccc0..36e1c8a29be8 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -48,11 +48,11 @@ | |||
48 | #ifdef CONFIG_PPC_ISERIES | 48 | #ifdef CONFIG_PPC_ISERIES |
49 | #include <asm/iseries/alpaca.h> | 49 | #include <asm/iseries/alpaca.h> |
50 | #endif | 50 | #endif |
51 | #ifdef CONFIG_KVM | 51 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_GUEST) |
52 | #include <linux/kvm_host.h> | 52 | #include <linux/kvm_host.h> |
53 | #ifndef CONFIG_BOOKE | ||
54 | #include <asm/kvm_book3s.h> | ||
55 | #endif | 53 | #endif |
54 | #if defined(CONFIG_KVM) && defined(CONFIG_PPC_BOOK3S) | ||
55 | #include <asm/kvm_book3s.h> | ||
56 | #endif | 56 | #endif |
57 | 57 | ||
58 | #ifdef CONFIG_PPC32 | 58 | #ifdef CONFIG_PPC32 |
@@ -61,7 +61,7 @@ | |||
61 | #endif | 61 | #endif |
62 | #endif | 62 | #endif |
63 | 63 | ||
64 | #if defined(CONFIG_FSL_BOOKE) | 64 | #if defined(CONFIG_PPC_FSL_BOOK3E) |
65 | #include "../mm/mmu_decl.h" | 65 | #include "../mm/mmu_decl.h" |
66 | #endif | 66 | #endif |
67 | 67 | ||
@@ -74,6 +74,7 @@ int main(void) | |||
74 | DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context)); | 74 | DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context)); |
75 | DEFINE(SIGSEGV, SIGSEGV); | 75 | DEFINE(SIGSEGV, SIGSEGV); |
76 | DEFINE(NMI_MASK, NMI_MASK); | 76 | DEFINE(NMI_MASK, NMI_MASK); |
77 | DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr)); | ||
77 | #else | 78 | #else |
78 | DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); | 79 | DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); |
79 | #endif /* CONFIG_PPC64 */ | 80 | #endif /* CONFIG_PPC64 */ |
@@ -181,17 +182,19 @@ int main(void) | |||
181 | offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid)); | 182 | offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid)); |
182 | DEFINE(SLBSHADOW_STACKESID, | 183 | DEFINE(SLBSHADOW_STACKESID, |
183 | offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid)); | 184 | offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid)); |
185 | DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area)); | ||
184 | DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0)); | 186 | DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0)); |
185 | DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1)); | 187 | DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1)); |
186 | DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int)); | 188 | DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int)); |
187 | DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int)); | 189 | DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int)); |
188 | DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area)); | 190 | DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx)); |
191 | DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx)); | ||
189 | #endif /* CONFIG_PPC_STD_MMU_64 */ | 192 | #endif /* CONFIG_PPC_STD_MMU_64 */ |
190 | DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); | 193 | DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); |
191 | DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); | 194 | DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); |
192 | DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state)); | 195 | DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state)); |
193 | DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr)); | 196 | DEFINE(PACA_STARTTIME, offsetof(struct paca_struct, starttime)); |
194 | DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr)); | 197 | DEFINE(PACA_STARTTIME_USER, offsetof(struct paca_struct, starttime_user)); |
195 | DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); | 198 | DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); |
196 | DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); | 199 | DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); |
197 | DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); | 200 | DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); |
@@ -207,7 +210,6 @@ int main(void) | |||
207 | DEFINE(RTASENTRY, offsetof(struct rtas_t, entry)); | 210 | DEFINE(RTASENTRY, offsetof(struct rtas_t, entry)); |
208 | 211 | ||
209 | /* Interrupt register frame */ | 212 | /* Interrupt register frame */ |
210 | DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD); | ||
211 | DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE); | 213 | DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE); |
212 | DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)); | 214 | DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)); |
213 | #ifdef CONFIG_PPC64 | 215 | #ifdef CONFIG_PPC64 |
@@ -394,12 +396,14 @@ int main(void) | |||
394 | DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); | 396 | DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); |
395 | DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); | 397 | DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); |
396 | DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); | 398 | DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); |
397 | DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr)); | 399 | DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); |
398 | DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4)); | 400 | DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4)); |
399 | DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); | 401 | DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); |
400 | DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6)); | 402 | DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6)); |
401 | DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7)); | 403 | DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7)); |
402 | DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid)); | 404 | DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid)); |
405 | DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared)); | ||
406 | DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr)); | ||
403 | 407 | ||
404 | /* book3s */ | 408 | /* book3s */ |
405 | #ifdef CONFIG_PPC_BOOK3S | 409 | #ifdef CONFIG_PPC_BOOK3S |
@@ -464,11 +468,27 @@ int main(void) | |||
464 | DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); | 468 | DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); |
465 | #endif /* CONFIG_PPC_BOOK3S */ | 469 | #endif /* CONFIG_PPC_BOOK3S */ |
466 | #endif | 470 | #endif |
471 | |||
472 | #ifdef CONFIG_KVM_GUEST | ||
473 | DEFINE(KVM_MAGIC_SCRATCH1, offsetof(struct kvm_vcpu_arch_shared, | ||
474 | scratch1)); | ||
475 | DEFINE(KVM_MAGIC_SCRATCH2, offsetof(struct kvm_vcpu_arch_shared, | ||
476 | scratch2)); | ||
477 | DEFINE(KVM_MAGIC_SCRATCH3, offsetof(struct kvm_vcpu_arch_shared, | ||
478 | scratch3)); | ||
479 | DEFINE(KVM_MAGIC_INT, offsetof(struct kvm_vcpu_arch_shared, | ||
480 | int_pending)); | ||
481 | DEFINE(KVM_MAGIC_MSR, offsetof(struct kvm_vcpu_arch_shared, msr)); | ||
482 | DEFINE(KVM_MAGIC_CRITICAL, offsetof(struct kvm_vcpu_arch_shared, | ||
483 | critical)); | ||
484 | DEFINE(KVM_MAGIC_SR, offsetof(struct kvm_vcpu_arch_shared, sr)); | ||
485 | #endif | ||
486 | |||
467 | #ifdef CONFIG_44x | 487 | #ifdef CONFIG_44x |
468 | DEFINE(PGD_T_LOG2, PGD_T_LOG2); | 488 | DEFINE(PGD_T_LOG2, PGD_T_LOG2); |
469 | DEFINE(PTE_T_LOG2, PTE_T_LOG2); | 489 | DEFINE(PTE_T_LOG2, PTE_T_LOG2); |
470 | #endif | 490 | #endif |
471 | #ifdef CONFIG_FSL_BOOKE | 491 | #ifdef CONFIG_PPC_FSL_BOOK3E |
472 | DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam)); | 492 | DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam)); |
473 | DEFINE(TLBCAM_MAS0, offsetof(struct tlbcam, MAS0)); | 493 | DEFINE(TLBCAM_MAS0, offsetof(struct tlbcam, MAS0)); |
474 | DEFINE(TLBCAM_MAS1, offsetof(struct tlbcam, MAS1)); | 494 | DEFINE(TLBCAM_MAS1, offsetof(struct tlbcam, MAS1)); |
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index 625942ae5585..60b3e377b1e4 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c | |||
@@ -99,7 +99,7 @@ void __init btext_prepare_BAT(void) | |||
99 | 99 | ||
100 | /* This function can be used to enable the early boot text when doing | 100 | /* This function can be used to enable the early boot text when doing |
101 | * OF booting or within bootx init. It must be followed by a btext_unmap() | 101 | * OF booting or within bootx init. It must be followed by a btext_unmap() |
102 | * call before the logical address becomes unuseable | 102 | * call before the logical address becomes unusable |
103 | */ | 103 | */ |
104 | void __init btext_setup_display(int width, int height, int depth, int pitch, | 104 | void __init btext_setup_display(int width, int height, int depth, int pitch, |
105 | unsigned long address) | 105 | unsigned long address) |
diff --git a/arch/powerpc/kernel/cpu_setup_44x.S b/arch/powerpc/kernel/cpu_setup_44x.S index 7d606f89a839..e32b4a9a2c22 100644 --- a/arch/powerpc/kernel/cpu_setup_44x.S +++ b/arch/powerpc/kernel/cpu_setup_44x.S | |||
@@ -35,6 +35,7 @@ _GLOBAL(__setup_cpu_440grx) | |||
35 | _GLOBAL(__setup_cpu_460ex) | 35 | _GLOBAL(__setup_cpu_460ex) |
36 | _GLOBAL(__setup_cpu_460gt) | 36 | _GLOBAL(__setup_cpu_460gt) |
37 | _GLOBAL(__setup_cpu_460sx) | 37 | _GLOBAL(__setup_cpu_460sx) |
38 | _GLOBAL(__setup_cpu_apm821xx) | ||
38 | mflr r4 | 39 | mflr r4 |
39 | bl __init_fpu_44x | 40 | bl __init_fpu_44x |
40 | bl __fixup_440A_mcheck | 41 | bl __fixup_440A_mcheck |
diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S index 55cba4a8a959..f8cd9fba4d35 100644 --- a/arch/powerpc/kernel/cpu_setup_6xx.S +++ b/arch/powerpc/kernel/cpu_setup_6xx.S | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <asm/mmu.h> | 18 | #include <asm/mmu.h> |
19 | 19 | ||
20 | _GLOBAL(__setup_cpu_603) | 20 | _GLOBAL(__setup_cpu_603) |
21 | mflr r4 | 21 | mflr r5 |
22 | BEGIN_MMU_FTR_SECTION | 22 | BEGIN_MMU_FTR_SECTION |
23 | li r10,0 | 23 | li r10,0 |
24 | mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */ | 24 | mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */ |
@@ -27,60 +27,60 @@ BEGIN_FTR_SECTION | |||
27 | bl __init_fpu_registers | 27 | bl __init_fpu_registers |
28 | END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE) | 28 | END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE) |
29 | bl setup_common_caches | 29 | bl setup_common_caches |
30 | mtlr r4 | 30 | mtlr r5 |
31 | blr | 31 | blr |
32 | _GLOBAL(__setup_cpu_604) | 32 | _GLOBAL(__setup_cpu_604) |
33 | mflr r4 | 33 | mflr r5 |
34 | bl setup_common_caches | 34 | bl setup_common_caches |
35 | bl setup_604_hid0 | 35 | bl setup_604_hid0 |
36 | mtlr r4 | 36 | mtlr r5 |
37 | blr | 37 | blr |
38 | _GLOBAL(__setup_cpu_750) | 38 | _GLOBAL(__setup_cpu_750) |
39 | mflr r4 | 39 | mflr r5 |
40 | bl __init_fpu_registers | 40 | bl __init_fpu_registers |
41 | bl setup_common_caches | 41 | bl setup_common_caches |
42 | bl setup_750_7400_hid0 | 42 | bl setup_750_7400_hid0 |
43 | mtlr r4 | 43 | mtlr r5 |
44 | blr | 44 | blr |
45 | _GLOBAL(__setup_cpu_750cx) | 45 | _GLOBAL(__setup_cpu_750cx) |
46 | mflr r4 | 46 | mflr r5 |
47 | bl __init_fpu_registers | 47 | bl __init_fpu_registers |
48 | bl setup_common_caches | 48 | bl setup_common_caches |
49 | bl setup_750_7400_hid0 | 49 | bl setup_750_7400_hid0 |
50 | bl setup_750cx | 50 | bl setup_750cx |
51 | mtlr r4 | 51 | mtlr r5 |
52 | blr | 52 | blr |
53 | _GLOBAL(__setup_cpu_750fx) | 53 | _GLOBAL(__setup_cpu_750fx) |
54 | mflr r4 | 54 | mflr r5 |
55 | bl __init_fpu_registers | 55 | bl __init_fpu_registers |
56 | bl setup_common_caches | 56 | bl setup_common_caches |
57 | bl setup_750_7400_hid0 | 57 | bl setup_750_7400_hid0 |
58 | bl setup_750fx | 58 | bl setup_750fx |
59 | mtlr r4 | 59 | mtlr r5 |
60 | blr | 60 | blr |
61 | _GLOBAL(__setup_cpu_7400) | 61 | _GLOBAL(__setup_cpu_7400) |
62 | mflr r4 | 62 | mflr r5 |
63 | bl __init_fpu_registers | 63 | bl __init_fpu_registers |
64 | bl setup_7400_workarounds | 64 | bl setup_7400_workarounds |
65 | bl setup_common_caches | 65 | bl setup_common_caches |
66 | bl setup_750_7400_hid0 | 66 | bl setup_750_7400_hid0 |
67 | mtlr r4 | 67 | mtlr r5 |
68 | blr | 68 | blr |
69 | _GLOBAL(__setup_cpu_7410) | 69 | _GLOBAL(__setup_cpu_7410) |
70 | mflr r4 | 70 | mflr r5 |
71 | bl __init_fpu_registers | 71 | bl __init_fpu_registers |
72 | bl setup_7410_workarounds | 72 | bl setup_7410_workarounds |
73 | bl setup_common_caches | 73 | bl setup_common_caches |
74 | bl setup_750_7400_hid0 | 74 | bl setup_750_7400_hid0 |
75 | li r3,0 | 75 | li r3,0 |
76 | mtspr SPRN_L2CR2,r3 | 76 | mtspr SPRN_L2CR2,r3 |
77 | mtlr r4 | 77 | mtlr r5 |
78 | blr | 78 | blr |
79 | _GLOBAL(__setup_cpu_745x) | 79 | _GLOBAL(__setup_cpu_745x) |
80 | mflr r4 | 80 | mflr r5 |
81 | bl setup_common_caches | 81 | bl setup_common_caches |
82 | bl setup_745x_specifics | 82 | bl setup_745x_specifics |
83 | mtlr r4 | 83 | mtlr r5 |
84 | blr | 84 | blr |
85 | 85 | ||
86 | /* Enable caches for 603's, 604, 750 & 7400 */ | 86 | /* Enable caches for 603's, 604, 750 & 7400 */ |
@@ -194,10 +194,10 @@ setup_750cx: | |||
194 | cror 4*cr0+eq,4*cr0+eq,4*cr1+eq | 194 | cror 4*cr0+eq,4*cr0+eq,4*cr1+eq |
195 | cror 4*cr0+eq,4*cr0+eq,4*cr2+eq | 195 | cror 4*cr0+eq,4*cr0+eq,4*cr2+eq |
196 | bnelr | 196 | bnelr |
197 | lwz r6,CPU_SPEC_FEATURES(r5) | 197 | lwz r6,CPU_SPEC_FEATURES(r4) |
198 | li r7,CPU_FTR_CAN_NAP | 198 | li r7,CPU_FTR_CAN_NAP |
199 | andc r6,r6,r7 | 199 | andc r6,r6,r7 |
200 | stw r6,CPU_SPEC_FEATURES(r5) | 200 | stw r6,CPU_SPEC_FEATURES(r4) |
201 | blr | 201 | blr |
202 | 202 | ||
203 | /* 750fx specific | 203 | /* 750fx specific |
@@ -225,12 +225,12 @@ BEGIN_FTR_SECTION | |||
225 | andis. r11,r11,L3CR_L3E@h | 225 | andis. r11,r11,L3CR_L3E@h |
226 | beq 1f | 226 | beq 1f |
227 | END_FTR_SECTION_IFSET(CPU_FTR_L3CR) | 227 | END_FTR_SECTION_IFSET(CPU_FTR_L3CR) |
228 | lwz r6,CPU_SPEC_FEATURES(r5) | 228 | lwz r6,CPU_SPEC_FEATURES(r4) |
229 | andi. r0,r6,CPU_FTR_L3_DISABLE_NAP | 229 | andi. r0,r6,CPU_FTR_L3_DISABLE_NAP |
230 | beq 1f | 230 | beq 1f |
231 | li r7,CPU_FTR_CAN_NAP | 231 | li r7,CPU_FTR_CAN_NAP |
232 | andc r6,r6,r7 | 232 | andc r6,r6,r7 |
233 | stw r6,CPU_SPEC_FEATURES(r5) | 233 | stw r6,CPU_SPEC_FEATURES(r4) |
234 | 1: | 234 | 1: |
235 | mfspr r11,SPRN_HID0 | 235 | mfspr r11,SPRN_HID0 |
236 | 236 | ||
diff --git a/arch/powerpc/kernel/cpu_setup_a2.S b/arch/powerpc/kernel/cpu_setup_a2.S new file mode 100644 index 000000000000..7f818feaa7a5 --- /dev/null +++ b/arch/powerpc/kernel/cpu_setup_a2.S | |||
@@ -0,0 +1,114 @@ | |||
1 | /* | ||
2 | * A2 specific assembly support code | ||
3 | * | ||
4 | * Copyright 2009 Ben Herrenschmidt, IBM Corp. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <asm/asm-offsets.h> | ||
13 | #include <asm/ppc_asm.h> | ||
14 | #include <asm/ppc-opcode.h> | ||
15 | #include <asm/processor.h> | ||
16 | #include <asm/reg_a2.h> | ||
17 | #include <asm/reg.h> | ||
18 | #include <asm/thread_info.h> | ||
19 | |||
20 | /* | ||
21 | * Disable thdid and class fields in ERATs to bump PID to full 14 bits capacity. | ||
22 | * This also prevents external LPID accesses but that isn't a problem when not a | ||
23 | * guest. Under PV, this setting will be ignored and MMUCR will return the right | ||
24 | * number of PID bits we can use. | ||
25 | */ | ||
26 | #define MMUCR1_EXTEND_PID \ | ||
27 | (MMUCR1_ICTID | MMUCR1_ITTID | MMUCR1_DCTID | \ | ||
28 | MMUCR1_DTTID | MMUCR1_DCCD) | ||
29 | |||
30 | /* | ||
31 | * Use extended PIDs if enabled. | ||
32 | * Don't clear the ERATs on context sync events and enable I & D LRU. | ||
33 | * Enable ERAT back invalidate when tlbwe overwrites an entry. | ||
34 | */ | ||
35 | #define INITIAL_MMUCR1 \ | ||
36 | (MMUCR1_EXTEND_PID | MMUCR1_CSINV_NEVER | MMUCR1_IRRE | \ | ||
37 | MMUCR1_DRRE | MMUCR1_TLBWE_BINV) | ||
38 | |||
39 | _GLOBAL(__setup_cpu_a2) | ||
40 | /* Some of these are actually thread local and some are | ||
41 | * core local but doing it always won't hurt | ||
42 | */ | ||
43 | |||
44 | #ifdef CONFIG_PPC_WSP_COPRO | ||
45 | /* Make sure ACOP starts out as zero */ | ||
46 | li r3,0 | ||
47 | mtspr SPRN_ACOP,r3 | ||
48 | |||
49 | /* Enable icswx instruction */ | ||
50 | mfspr r3,SPRN_A2_CCR2 | ||
51 | ori r3,r3,A2_CCR2_ENABLE_ICSWX | ||
52 | mtspr SPRN_A2_CCR2,r3 | ||
53 | |||
54 | /* Unmask all CTs in HACOP */ | ||
55 | li r3,-1 | ||
56 | mtspr SPRN_HACOP,r3 | ||
57 | #endif /* CONFIG_PPC_WSP_COPRO */ | ||
58 | |||
59 | /* Enable doorbell */ | ||
60 | mfspr r3,SPRN_A2_CCR2 | ||
61 | oris r3,r3,A2_CCR2_ENABLE_PC@h | ||
62 | mtspr SPRN_A2_CCR2,r3 | ||
63 | isync | ||
64 | |||
65 | /* Setup CCR0 to disable power saving for now as it's busted | ||
66 | * in the current implementations. Setup CCR1 to wake on | ||
67 | * interrupts normally (we write the default value but who | ||
68 | * knows what FW may have clobbered...) | ||
69 | */ | ||
70 | li r3,0 | ||
71 | mtspr SPRN_A2_CCR0, r3 | ||
72 | LOAD_REG_IMMEDIATE(r3,0x0f0f0f0f) | ||
73 | mtspr SPRN_A2_CCR1, r3 | ||
74 | |||
75 | /* Initialise MMUCR1 */ | ||
76 | lis r3,INITIAL_MMUCR1@h | ||
77 | ori r3,r3,INITIAL_MMUCR1@l | ||
78 | mtspr SPRN_MMUCR1,r3 | ||
79 | |||
80 | /* Set MMUCR2 to enable 4K, 64K, 1M, 16M and 1G pages */ | ||
81 | LOAD_REG_IMMEDIATE(r3, 0x000a7531) | ||
82 | mtspr SPRN_MMUCR2,r3 | ||
83 | |||
84 | /* Set MMUCR3 to write all thids bit to the TLB */ | ||
85 | LOAD_REG_IMMEDIATE(r3, 0x0000000f) | ||
86 | mtspr SPRN_MMUCR3,r3 | ||
87 | |||
88 | /* Don't do ERAT stuff if running guest mode */ | ||
89 | mfmsr r3 | ||
90 | andis. r0,r3,MSR_GS@h | ||
91 | bne 1f | ||
92 | |||
93 | /* Now set the I-ERAT watermark to 15 */ | ||
94 | lis r4,(MMUCR0_TLBSEL_I|MMUCR0_ECL)@h | ||
95 | mtspr SPRN_MMUCR0, r4 | ||
96 | li r4,A2_IERAT_SIZE-1 | ||
97 | PPC_ERATWE(r4,r4,3) | ||
98 | |||
99 | /* Now set the D-ERAT watermark to 31 */ | ||
100 | lis r4,(MMUCR0_TLBSEL_D|MMUCR0_ECL)@h | ||
101 | mtspr SPRN_MMUCR0, r4 | ||
102 | li r4,A2_DERAT_SIZE-1 | ||
103 | PPC_ERATWE(r4,r4,3) | ||
104 | |||
105 | /* And invalidate the beast just in case. That won't get rid of | ||
106 | * a bolted entry though it will be in LRU and so will go away eventually | ||
107 | * but let's not bother for now | ||
108 | */ | ||
109 | PPC_ERATILX(0,0,0) | ||
110 | 1: | ||
111 | blr | ||
112 | |||
113 | _GLOBAL(__restore_cpu_a2) | ||
114 | b __setup_cpu_a2 | ||
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S index 0adb50ad8031..8053db02b85e 100644 --- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S +++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S | |||
@@ -51,6 +51,7 @@ _GLOBAL(__e500_dcache_setup) | |||
51 | isync | 51 | isync |
52 | blr | 52 | blr |
53 | 53 | ||
54 | #ifdef CONFIG_PPC32 | ||
54 | _GLOBAL(__setup_cpu_e200) | 55 | _GLOBAL(__setup_cpu_e200) |
55 | /* enable dedicated debug exception handling resources (Debug APU) */ | 56 | /* enable dedicated debug exception handling resources (Debug APU) */ |
56 | mfspr r3,SPRN_HID0 | 57 | mfspr r3,SPRN_HID0 |
@@ -63,6 +64,12 @@ _GLOBAL(__setup_cpu_e500v2) | |||
63 | bl __e500_icache_setup | 64 | bl __e500_icache_setup |
64 | bl __e500_dcache_setup | 65 | bl __e500_dcache_setup |
65 | bl __setup_e500_ivors | 66 | bl __setup_e500_ivors |
67 | #ifdef CONFIG_FSL_RIO | ||
68 | /* Ensure that RFXE is set */ | ||
69 | mfspr r3,SPRN_HID1 | ||
70 | oris r3,r3,HID1_RFXE@h | ||
71 | mtspr SPRN_HID1,r3 | ||
72 | #endif | ||
66 | mtlr r4 | 73 | mtlr r4 |
67 | blr | 74 | blr |
68 | _GLOBAL(__setup_cpu_e500mc) | 75 | _GLOBAL(__setup_cpu_e500mc) |
@@ -72,3 +79,20 @@ _GLOBAL(__setup_cpu_e500mc) | |||
72 | bl __setup_e500mc_ivors | 79 | bl __setup_e500mc_ivors |
73 | mtlr r4 | 80 | mtlr r4 |
74 | blr | 81 | blr |
82 | #endif | ||
83 | /* Right now, restore and setup are the same thing */ | ||
84 | _GLOBAL(__restore_cpu_e5500) | ||
85 | _GLOBAL(__setup_cpu_e5500) | ||
86 | mflr r4 | ||
87 | bl __e500_icache_setup | ||
88 | bl __e500_dcache_setup | ||
89 | #ifdef CONFIG_PPC_BOOK3E_64 | ||
90 | bl .__setup_base_ivors | ||
91 | bl .setup_perfmon_ivor | ||
92 | bl .setup_doorbell_ivors | ||
93 | bl .setup_ehv_ivors | ||
94 | #else | ||
95 | bl __setup_e500mc_ivors | ||
96 | #endif | ||
97 | mtlr r4 | ||
98 | blr | ||
diff --git a/arch/powerpc/kernel/cpu_setup_power7.S b/arch/powerpc/kernel/cpu_setup_power7.S new file mode 100644 index 000000000000..4f9a93fcfe07 --- /dev/null +++ b/arch/powerpc/kernel/cpu_setup_power7.S | |||
@@ -0,0 +1,91 @@ | |||
1 | /* | ||
2 | * This file contains low level CPU setup functions. | ||
3 | * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org) | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version | ||
8 | * 2 of the License, or (at your option) any later version. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <asm/processor.h> | ||
13 | #include <asm/page.h> | ||
14 | #include <asm/cputable.h> | ||
15 | #include <asm/ppc_asm.h> | ||
16 | #include <asm/asm-offsets.h> | ||
17 | #include <asm/cache.h> | ||
18 | |||
19 | /* Entry: r3 = crap, r4 = ptr to cputable entry | ||
20 | * | ||
21 | * Note that we can be called twice for pseudo-PVRs | ||
22 | */ | ||
23 | _GLOBAL(__setup_cpu_power7) | ||
24 | mflr r11 | ||
25 | bl __init_hvmode_206 | ||
26 | mtlr r11 | ||
27 | beqlr | ||
28 | li r0,0 | ||
29 | mtspr SPRN_LPID,r0 | ||
30 | bl __init_LPCR | ||
31 | bl __init_TLB | ||
32 | mtlr r11 | ||
33 | blr | ||
34 | |||
35 | _GLOBAL(__restore_cpu_power7) | ||
36 | mflr r11 | ||
37 | mfmsr r3 | ||
38 | rldicl. r0,r3,4,63 | ||
39 | beqlr | ||
40 | li r0,0 | ||
41 | mtspr SPRN_LPID,r0 | ||
42 | bl __init_LPCR | ||
43 | bl __init_TLB | ||
44 | mtlr r11 | ||
45 | blr | ||
46 | |||
47 | __init_hvmode_206: | ||
48 | /* Disable CPU_FTR_HVMODE_206 and exit if MSR:HV is not set */ | ||
49 | mfmsr r3 | ||
50 | rldicl. r0,r3,4,63 | ||
51 | bnelr | ||
52 | ld r5,CPU_SPEC_FEATURES(r4) | ||
53 | LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE_206) | ||
54 | xor r5,r5,r6 | ||
55 | std r5,CPU_SPEC_FEATURES(r4) | ||
56 | blr | ||
57 | |||
58 | __init_LPCR: | ||
59 | /* Setup a sane LPCR: | ||
60 | * | ||
61 | * LPES = 0b01 (HSRR0/1 used for 0x500) | ||
62 | * PECE = 0b111 | ||
63 | * DPFD = 4 | ||
64 | * | ||
65 | * Other bits untouched for now | ||
66 | */ | ||
67 | mfspr r3,SPRN_LPCR | ||
68 | ori r3,r3,(LPCR_LPES0|LPCR_LPES1) | ||
69 | xori r3,r3, LPCR_LPES0 | ||
70 | ori r3,r3,(LPCR_PECE0|LPCR_PECE1|LPCR_PECE2) | ||
71 | li r5,7 | ||
72 | sldi r5,r5,LPCR_DPFD_SH | ||
73 | andc r3,r3,r5 | ||
74 | li r5,4 | ||
75 | sldi r5,r5,LPCR_DPFD_SH | ||
76 | or r3,r3,r5 | ||
77 | mtspr SPRN_LPCR,r3 | ||
78 | isync | ||
79 | blr | ||
80 | |||
81 | __init_TLB: | ||
82 | /* Clear the TLB */ | ||
83 | li r6,128 | ||
84 | mtctr r6 | ||
85 | li r7,0xc00 /* IS field = 0b11 */ | ||
86 | ptesync | ||
87 | 2: tlbiel r7 | ||
88 | addi r7,r7,0x1000 | ||
89 | bdnz 2b | ||
90 | ptesync | ||
91 | 1: blr | ||
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 1f9123f412ec..9fb933248ab6 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c | |||
@@ -48,6 +48,7 @@ extern void __setup_cpu_440x5(unsigned long offset, struct cpu_spec* spec); | |||
48 | extern void __setup_cpu_460ex(unsigned long offset, struct cpu_spec* spec); | 48 | extern void __setup_cpu_460ex(unsigned long offset, struct cpu_spec* spec); |
49 | extern void __setup_cpu_460gt(unsigned long offset, struct cpu_spec* spec); | 49 | extern void __setup_cpu_460gt(unsigned long offset, struct cpu_spec* spec); |
50 | extern void __setup_cpu_460sx(unsigned long offset, struct cpu_spec *spec); | 50 | extern void __setup_cpu_460sx(unsigned long offset, struct cpu_spec *spec); |
51 | extern void __setup_cpu_apm821xx(unsigned long offset, struct cpu_spec *spec); | ||
51 | extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec); | 52 | extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec); |
52 | extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec); | 53 | extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec); |
53 | extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec); | 54 | extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec); |
@@ -61,11 +62,17 @@ extern void __setup_cpu_745x(unsigned long offset, struct cpu_spec* spec); | |||
61 | extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec); | 62 | extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec); |
62 | extern void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec* spec); | 63 | extern void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec* spec); |
63 | extern void __setup_cpu_pa6t(unsigned long offset, struct cpu_spec* spec); | 64 | extern void __setup_cpu_pa6t(unsigned long offset, struct cpu_spec* spec); |
65 | extern void __setup_cpu_a2(unsigned long offset, struct cpu_spec* spec); | ||
64 | extern void __restore_cpu_pa6t(void); | 66 | extern void __restore_cpu_pa6t(void); |
65 | extern void __restore_cpu_ppc970(void); | 67 | extern void __restore_cpu_ppc970(void); |
66 | extern void __setup_cpu_power7(unsigned long offset, struct cpu_spec* spec); | 68 | extern void __setup_cpu_power7(unsigned long offset, struct cpu_spec* spec); |
67 | extern void __restore_cpu_power7(void); | 69 | extern void __restore_cpu_power7(void); |
70 | extern void __restore_cpu_a2(void); | ||
68 | #endif /* CONFIG_PPC64 */ | 71 | #endif /* CONFIG_PPC64 */ |
72 | #if defined(CONFIG_E500) | ||
73 | extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec); | ||
74 | extern void __restore_cpu_e5500(void); | ||
75 | #endif /* CONFIG_E500 */ | ||
69 | 76 | ||
70 | /* This table only contains "desktop" CPUs, it need to be filled with embedded | 77 | /* This table only contains "desktop" CPUs, it need to be filled with embedded |
71 | * ones as well... | 78 | * ones as well... |
@@ -111,7 +118,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
111 | .pmc_type = PPC_PMC_IBM, | 118 | .pmc_type = PPC_PMC_IBM, |
112 | .oprofile_cpu_type = "ppc64/power3", | 119 | .oprofile_cpu_type = "ppc64/power3", |
113 | .oprofile_type = PPC_OPROFILE_RS64, | 120 | .oprofile_type = PPC_OPROFILE_RS64, |
114 | .machine_check = machine_check_generic, | ||
115 | .platform = "power3", | 121 | .platform = "power3", |
116 | }, | 122 | }, |
117 | { /* Power3+ */ | 123 | { /* Power3+ */ |
@@ -127,7 +133,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
127 | .pmc_type = PPC_PMC_IBM, | 133 | .pmc_type = PPC_PMC_IBM, |
128 | .oprofile_cpu_type = "ppc64/power3", | 134 | .oprofile_cpu_type = "ppc64/power3", |
129 | .oprofile_type = PPC_OPROFILE_RS64, | 135 | .oprofile_type = PPC_OPROFILE_RS64, |
130 | .machine_check = machine_check_generic, | ||
131 | .platform = "power3", | 136 | .platform = "power3", |
132 | }, | 137 | }, |
133 | { /* Northstar */ | 138 | { /* Northstar */ |
@@ -143,7 +148,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
143 | .pmc_type = PPC_PMC_IBM, | 148 | .pmc_type = PPC_PMC_IBM, |
144 | .oprofile_cpu_type = "ppc64/rs64", | 149 | .oprofile_cpu_type = "ppc64/rs64", |
145 | .oprofile_type = PPC_OPROFILE_RS64, | 150 | .oprofile_type = PPC_OPROFILE_RS64, |
146 | .machine_check = machine_check_generic, | ||
147 | .platform = "rs64", | 151 | .platform = "rs64", |
148 | }, | 152 | }, |
149 | { /* Pulsar */ | 153 | { /* Pulsar */ |
@@ -159,7 +163,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
159 | .pmc_type = PPC_PMC_IBM, | 163 | .pmc_type = PPC_PMC_IBM, |
160 | .oprofile_cpu_type = "ppc64/rs64", | 164 | .oprofile_cpu_type = "ppc64/rs64", |
161 | .oprofile_type = PPC_OPROFILE_RS64, | 165 | .oprofile_type = PPC_OPROFILE_RS64, |
162 | .machine_check = machine_check_generic, | ||
163 | .platform = "rs64", | 166 | .platform = "rs64", |
164 | }, | 167 | }, |
165 | { /* I-star */ | 168 | { /* I-star */ |
@@ -175,7 +178,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
175 | .pmc_type = PPC_PMC_IBM, | 178 | .pmc_type = PPC_PMC_IBM, |
176 | .oprofile_cpu_type = "ppc64/rs64", | 179 | .oprofile_cpu_type = "ppc64/rs64", |
177 | .oprofile_type = PPC_OPROFILE_RS64, | 180 | .oprofile_type = PPC_OPROFILE_RS64, |
178 | .machine_check = machine_check_generic, | ||
179 | .platform = "rs64", | 181 | .platform = "rs64", |
180 | }, | 182 | }, |
181 | { /* S-star */ | 183 | { /* S-star */ |
@@ -191,7 +193,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
191 | .pmc_type = PPC_PMC_IBM, | 193 | .pmc_type = PPC_PMC_IBM, |
192 | .oprofile_cpu_type = "ppc64/rs64", | 194 | .oprofile_cpu_type = "ppc64/rs64", |
193 | .oprofile_type = PPC_OPROFILE_RS64, | 195 | .oprofile_type = PPC_OPROFILE_RS64, |
194 | .machine_check = machine_check_generic, | ||
195 | .platform = "rs64", | 196 | .platform = "rs64", |
196 | }, | 197 | }, |
197 | { /* Power4 */ | 198 | { /* Power4 */ |
@@ -200,14 +201,13 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
200 | .cpu_name = "POWER4 (gp)", | 201 | .cpu_name = "POWER4 (gp)", |
201 | .cpu_features = CPU_FTRS_POWER4, | 202 | .cpu_features = CPU_FTRS_POWER4, |
202 | .cpu_user_features = COMMON_USER_POWER4, | 203 | .cpu_user_features = COMMON_USER_POWER4, |
203 | .mmu_features = MMU_FTR_HPTE_TABLE, | 204 | .mmu_features = MMU_FTRS_POWER4, |
204 | .icache_bsize = 128, | 205 | .icache_bsize = 128, |
205 | .dcache_bsize = 128, | 206 | .dcache_bsize = 128, |
206 | .num_pmcs = 8, | 207 | .num_pmcs = 8, |
207 | .pmc_type = PPC_PMC_IBM, | 208 | .pmc_type = PPC_PMC_IBM, |
208 | .oprofile_cpu_type = "ppc64/power4", | 209 | .oprofile_cpu_type = "ppc64/power4", |
209 | .oprofile_type = PPC_OPROFILE_POWER4, | 210 | .oprofile_type = PPC_OPROFILE_POWER4, |
210 | .machine_check = machine_check_generic, | ||
211 | .platform = "power4", | 211 | .platform = "power4", |
212 | }, | 212 | }, |
213 | { /* Power4+ */ | 213 | { /* Power4+ */ |
@@ -216,14 +216,13 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
216 | .cpu_name = "POWER4+ (gq)", | 216 | .cpu_name = "POWER4+ (gq)", |
217 | .cpu_features = CPU_FTRS_POWER4, | 217 | .cpu_features = CPU_FTRS_POWER4, |
218 | .cpu_user_features = COMMON_USER_POWER4, | 218 | .cpu_user_features = COMMON_USER_POWER4, |
219 | .mmu_features = MMU_FTR_HPTE_TABLE, | 219 | .mmu_features = MMU_FTRS_POWER4, |
220 | .icache_bsize = 128, | 220 | .icache_bsize = 128, |
221 | .dcache_bsize = 128, | 221 | .dcache_bsize = 128, |
222 | .num_pmcs = 8, | 222 | .num_pmcs = 8, |
223 | .pmc_type = PPC_PMC_IBM, | 223 | .pmc_type = PPC_PMC_IBM, |
224 | .oprofile_cpu_type = "ppc64/power4", | 224 | .oprofile_cpu_type = "ppc64/power4", |
225 | .oprofile_type = PPC_OPROFILE_POWER4, | 225 | .oprofile_type = PPC_OPROFILE_POWER4, |
226 | .machine_check = machine_check_generic, | ||
227 | .platform = "power4", | 226 | .platform = "power4", |
228 | }, | 227 | }, |
229 | { /* PPC970 */ | 228 | { /* PPC970 */ |
@@ -233,7 +232,7 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
233 | .cpu_features = CPU_FTRS_PPC970, | 232 | .cpu_features = CPU_FTRS_PPC970, |
234 | .cpu_user_features = COMMON_USER_POWER4 | | 233 | .cpu_user_features = COMMON_USER_POWER4 | |
235 | PPC_FEATURE_HAS_ALTIVEC_COMP, | 234 | PPC_FEATURE_HAS_ALTIVEC_COMP, |
236 | .mmu_features = MMU_FTR_HPTE_TABLE, | 235 | .mmu_features = MMU_FTRS_PPC970, |
237 | .icache_bsize = 128, | 236 | .icache_bsize = 128, |
238 | .dcache_bsize = 128, | 237 | .dcache_bsize = 128, |
239 | .num_pmcs = 8, | 238 | .num_pmcs = 8, |
@@ -242,7 +241,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
242 | .cpu_restore = __restore_cpu_ppc970, | 241 | .cpu_restore = __restore_cpu_ppc970, |
243 | .oprofile_cpu_type = "ppc64/970", | 242 | .oprofile_cpu_type = "ppc64/970", |
244 | .oprofile_type = PPC_OPROFILE_POWER4, | 243 | .oprofile_type = PPC_OPROFILE_POWER4, |
245 | .machine_check = machine_check_generic, | ||
246 | .platform = "ppc970", | 244 | .platform = "ppc970", |
247 | }, | 245 | }, |
248 | { /* PPC970FX */ | 246 | { /* PPC970FX */ |
@@ -252,7 +250,7 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
252 | .cpu_features = CPU_FTRS_PPC970, | 250 | .cpu_features = CPU_FTRS_PPC970, |
253 | .cpu_user_features = COMMON_USER_POWER4 | | 251 | .cpu_user_features = COMMON_USER_POWER4 | |
254 | PPC_FEATURE_HAS_ALTIVEC_COMP, | 252 | PPC_FEATURE_HAS_ALTIVEC_COMP, |
255 | .mmu_features = MMU_FTR_HPTE_TABLE, | 253 | .mmu_features = MMU_FTRS_PPC970, |
256 | .icache_bsize = 128, | 254 | .icache_bsize = 128, |
257 | .dcache_bsize = 128, | 255 | .dcache_bsize = 128, |
258 | .num_pmcs = 8, | 256 | .num_pmcs = 8, |
@@ -261,7 +259,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
261 | .cpu_restore = __restore_cpu_ppc970, | 259 | .cpu_restore = __restore_cpu_ppc970, |
262 | .oprofile_cpu_type = "ppc64/970", | 260 | .oprofile_cpu_type = "ppc64/970", |
263 | .oprofile_type = PPC_OPROFILE_POWER4, | 261 | .oprofile_type = PPC_OPROFILE_POWER4, |
264 | .machine_check = machine_check_generic, | ||
265 | .platform = "ppc970", | 262 | .platform = "ppc970", |
266 | }, | 263 | }, |
267 | { /* PPC970MP DD1.0 - no DEEPNAP, use regular 970 init */ | 264 | { /* PPC970MP DD1.0 - no DEEPNAP, use regular 970 init */ |
@@ -280,7 +277,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
280 | .cpu_restore = __restore_cpu_ppc970, | 277 | .cpu_restore = __restore_cpu_ppc970, |
281 | .oprofile_cpu_type = "ppc64/970MP", | 278 | .oprofile_cpu_type = "ppc64/970MP", |
282 | .oprofile_type = PPC_OPROFILE_POWER4, | 279 | .oprofile_type = PPC_OPROFILE_POWER4, |
283 | .machine_check = machine_check_generic, | ||
284 | .platform = "ppc970", | 280 | .platform = "ppc970", |
285 | }, | 281 | }, |
286 | { /* PPC970MP */ | 282 | { /* PPC970MP */ |
@@ -290,7 +286,7 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
290 | .cpu_features = CPU_FTRS_PPC970, | 286 | .cpu_features = CPU_FTRS_PPC970, |
291 | .cpu_user_features = COMMON_USER_POWER4 | | 287 | .cpu_user_features = COMMON_USER_POWER4 | |
292 | PPC_FEATURE_HAS_ALTIVEC_COMP, | 288 | PPC_FEATURE_HAS_ALTIVEC_COMP, |
293 | .mmu_features = MMU_FTR_HPTE_TABLE, | 289 | .mmu_features = MMU_FTRS_PPC970, |
294 | .icache_bsize = 128, | 290 | .icache_bsize = 128, |
295 | .dcache_bsize = 128, | 291 | .dcache_bsize = 128, |
296 | .num_pmcs = 8, | 292 | .num_pmcs = 8, |
@@ -299,7 +295,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
299 | .cpu_restore = __restore_cpu_ppc970, | 295 | .cpu_restore = __restore_cpu_ppc970, |
300 | .oprofile_cpu_type = "ppc64/970MP", | 296 | .oprofile_cpu_type = "ppc64/970MP", |
301 | .oprofile_type = PPC_OPROFILE_POWER4, | 297 | .oprofile_type = PPC_OPROFILE_POWER4, |
302 | .machine_check = machine_check_generic, | ||
303 | .platform = "ppc970", | 298 | .platform = "ppc970", |
304 | }, | 299 | }, |
305 | { /* PPC970GX */ | 300 | { /* PPC970GX */ |
@@ -309,7 +304,7 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
309 | .cpu_features = CPU_FTRS_PPC970, | 304 | .cpu_features = CPU_FTRS_PPC970, |
310 | .cpu_user_features = COMMON_USER_POWER4 | | 305 | .cpu_user_features = COMMON_USER_POWER4 | |
311 | PPC_FEATURE_HAS_ALTIVEC_COMP, | 306 | PPC_FEATURE_HAS_ALTIVEC_COMP, |
312 | .mmu_features = MMU_FTR_HPTE_TABLE, | 307 | .mmu_features = MMU_FTRS_PPC970, |
313 | .icache_bsize = 128, | 308 | .icache_bsize = 128, |
314 | .dcache_bsize = 128, | 309 | .dcache_bsize = 128, |
315 | .num_pmcs = 8, | 310 | .num_pmcs = 8, |
@@ -317,7 +312,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
317 | .cpu_setup = __setup_cpu_ppc970, | 312 | .cpu_setup = __setup_cpu_ppc970, |
318 | .oprofile_cpu_type = "ppc64/970", | 313 | .oprofile_cpu_type = "ppc64/970", |
319 | .oprofile_type = PPC_OPROFILE_POWER4, | 314 | .oprofile_type = PPC_OPROFILE_POWER4, |
320 | .machine_check = machine_check_generic, | ||
321 | .platform = "ppc970", | 315 | .platform = "ppc970", |
322 | }, | 316 | }, |
323 | { /* Power5 GR */ | 317 | { /* Power5 GR */ |
@@ -326,7 +320,7 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
326 | .cpu_name = "POWER5 (gr)", | 320 | .cpu_name = "POWER5 (gr)", |
327 | .cpu_features = CPU_FTRS_POWER5, | 321 | .cpu_features = CPU_FTRS_POWER5, |
328 | .cpu_user_features = COMMON_USER_POWER5, | 322 | .cpu_user_features = COMMON_USER_POWER5, |
329 | .mmu_features = MMU_FTR_HPTE_TABLE, | 323 | .mmu_features = MMU_FTRS_POWER5, |
330 | .icache_bsize = 128, | 324 | .icache_bsize = 128, |
331 | .dcache_bsize = 128, | 325 | .dcache_bsize = 128, |
332 | .num_pmcs = 6, | 326 | .num_pmcs = 6, |
@@ -338,7 +332,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
338 | */ | 332 | */ |
339 | .oprofile_mmcra_sihv = MMCRA_SIHV, | 333 | .oprofile_mmcra_sihv = MMCRA_SIHV, |
340 | .oprofile_mmcra_sipr = MMCRA_SIPR, | 334 | .oprofile_mmcra_sipr = MMCRA_SIPR, |
341 | .machine_check = machine_check_generic, | ||
342 | .platform = "power5", | 335 | .platform = "power5", |
343 | }, | 336 | }, |
344 | { /* Power5++ */ | 337 | { /* Power5++ */ |
@@ -347,7 +340,7 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
347 | .cpu_name = "POWER5+ (gs)", | 340 | .cpu_name = "POWER5+ (gs)", |
348 | .cpu_features = CPU_FTRS_POWER5, | 341 | .cpu_features = CPU_FTRS_POWER5, |
349 | .cpu_user_features = COMMON_USER_POWER5_PLUS, | 342 | .cpu_user_features = COMMON_USER_POWER5_PLUS, |
350 | .mmu_features = MMU_FTR_HPTE_TABLE, | 343 | .mmu_features = MMU_FTRS_POWER5, |
351 | .icache_bsize = 128, | 344 | .icache_bsize = 128, |
352 | .dcache_bsize = 128, | 345 | .dcache_bsize = 128, |
353 | .num_pmcs = 6, | 346 | .num_pmcs = 6, |
@@ -355,7 +348,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
355 | .oprofile_type = PPC_OPROFILE_POWER4, | 348 | .oprofile_type = PPC_OPROFILE_POWER4, |
356 | .oprofile_mmcra_sihv = MMCRA_SIHV, | 349 | .oprofile_mmcra_sihv = MMCRA_SIHV, |
357 | .oprofile_mmcra_sipr = MMCRA_SIPR, | 350 | .oprofile_mmcra_sipr = MMCRA_SIPR, |
358 | .machine_check = machine_check_generic, | ||
359 | .platform = "power5+", | 351 | .platform = "power5+", |
360 | }, | 352 | }, |
361 | { /* Power5 GS */ | 353 | { /* Power5 GS */ |
@@ -364,7 +356,7 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
364 | .cpu_name = "POWER5+ (gs)", | 356 | .cpu_name = "POWER5+ (gs)", |
365 | .cpu_features = CPU_FTRS_POWER5, | 357 | .cpu_features = CPU_FTRS_POWER5, |
366 | .cpu_user_features = COMMON_USER_POWER5_PLUS, | 358 | .cpu_user_features = COMMON_USER_POWER5_PLUS, |
367 | .mmu_features = MMU_FTR_HPTE_TABLE, | 359 | .mmu_features = MMU_FTRS_POWER5, |
368 | .icache_bsize = 128, | 360 | .icache_bsize = 128, |
369 | .dcache_bsize = 128, | 361 | .dcache_bsize = 128, |
370 | .num_pmcs = 6, | 362 | .num_pmcs = 6, |
@@ -373,7 +365,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
373 | .oprofile_type = PPC_OPROFILE_POWER4, | 365 | .oprofile_type = PPC_OPROFILE_POWER4, |
374 | .oprofile_mmcra_sihv = MMCRA_SIHV, | 366 | .oprofile_mmcra_sihv = MMCRA_SIHV, |
375 | .oprofile_mmcra_sipr = MMCRA_SIPR, | 367 | .oprofile_mmcra_sipr = MMCRA_SIPR, |
376 | .machine_check = machine_check_generic, | ||
377 | .platform = "power5+", | 368 | .platform = "power5+", |
378 | }, | 369 | }, |
379 | { /* POWER6 in P5+ mode; 2.04-compliant processor */ | 370 | { /* POWER6 in P5+ mode; 2.04-compliant processor */ |
@@ -382,10 +373,9 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
382 | .cpu_name = "POWER5+", | 373 | .cpu_name = "POWER5+", |
383 | .cpu_features = CPU_FTRS_POWER5, | 374 | .cpu_features = CPU_FTRS_POWER5, |
384 | .cpu_user_features = COMMON_USER_POWER5_PLUS, | 375 | .cpu_user_features = COMMON_USER_POWER5_PLUS, |
385 | .mmu_features = MMU_FTR_HPTE_TABLE, | 376 | .mmu_features = MMU_FTRS_POWER5, |
386 | .icache_bsize = 128, | 377 | .icache_bsize = 128, |
387 | .dcache_bsize = 128, | 378 | .dcache_bsize = 128, |
388 | .machine_check = machine_check_generic, | ||
389 | .oprofile_cpu_type = "ppc64/ibm-compat-v1", | 379 | .oprofile_cpu_type = "ppc64/ibm-compat-v1", |
390 | .oprofile_type = PPC_OPROFILE_POWER4, | 380 | .oprofile_type = PPC_OPROFILE_POWER4, |
391 | .platform = "power5+", | 381 | .platform = "power5+", |
@@ -397,7 +387,7 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
397 | .cpu_features = CPU_FTRS_POWER6, | 387 | .cpu_features = CPU_FTRS_POWER6, |
398 | .cpu_user_features = COMMON_USER_POWER6 | | 388 | .cpu_user_features = COMMON_USER_POWER6 | |
399 | PPC_FEATURE_POWER6_EXT, | 389 | PPC_FEATURE_POWER6_EXT, |
400 | .mmu_features = MMU_FTR_HPTE_TABLE, | 390 | .mmu_features = MMU_FTRS_POWER6, |
401 | .icache_bsize = 128, | 391 | .icache_bsize = 128, |
402 | .dcache_bsize = 128, | 392 | .dcache_bsize = 128, |
403 | .num_pmcs = 6, | 393 | .num_pmcs = 6, |
@@ -408,7 +398,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
408 | .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR, | 398 | .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR, |
409 | .oprofile_mmcra_clear = POWER6_MMCRA_THRM | | 399 | .oprofile_mmcra_clear = POWER6_MMCRA_THRM | |
410 | POWER6_MMCRA_OTHER, | 400 | POWER6_MMCRA_OTHER, |
411 | .machine_check = machine_check_generic, | ||
412 | .platform = "power6x", | 401 | .platform = "power6x", |
413 | }, | 402 | }, |
414 | { /* 2.05-compliant processor, i.e. Power6 "architected" mode */ | 403 | { /* 2.05-compliant processor, i.e. Power6 "architected" mode */ |
@@ -417,10 +406,9 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
417 | .cpu_name = "POWER6 (architected)", | 406 | .cpu_name = "POWER6 (architected)", |
418 | .cpu_features = CPU_FTRS_POWER6, | 407 | .cpu_features = CPU_FTRS_POWER6, |
419 | .cpu_user_features = COMMON_USER_POWER6, | 408 | .cpu_user_features = COMMON_USER_POWER6, |
420 | .mmu_features = MMU_FTR_HPTE_TABLE, | 409 | .mmu_features = MMU_FTRS_POWER6, |
421 | .icache_bsize = 128, | 410 | .icache_bsize = 128, |
422 | .dcache_bsize = 128, | 411 | .dcache_bsize = 128, |
423 | .machine_check = machine_check_generic, | ||
424 | .oprofile_cpu_type = "ppc64/ibm-compat-v1", | 412 | .oprofile_cpu_type = "ppc64/ibm-compat-v1", |
425 | .oprofile_type = PPC_OPROFILE_POWER4, | 413 | .oprofile_type = PPC_OPROFILE_POWER4, |
426 | .platform = "power6", | 414 | .platform = "power6", |
@@ -431,13 +419,13 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
431 | .cpu_name = "POWER7 (architected)", | 419 | .cpu_name = "POWER7 (architected)", |
432 | .cpu_features = CPU_FTRS_POWER7, | 420 | .cpu_features = CPU_FTRS_POWER7, |
433 | .cpu_user_features = COMMON_USER_POWER7, | 421 | .cpu_user_features = COMMON_USER_POWER7, |
434 | .mmu_features = MMU_FTR_HPTE_TABLE | | 422 | .mmu_features = MMU_FTRS_POWER7, |
435 | MMU_FTR_TLBIE_206, | ||
436 | .icache_bsize = 128, | 423 | .icache_bsize = 128, |
437 | .dcache_bsize = 128, | 424 | .dcache_bsize = 128, |
438 | .machine_check = machine_check_generic, | ||
439 | .oprofile_type = PPC_OPROFILE_POWER4, | 425 | .oprofile_type = PPC_OPROFILE_POWER4, |
440 | .oprofile_cpu_type = "ppc64/ibm-compat-v1", | 426 | .oprofile_cpu_type = "ppc64/ibm-compat-v1", |
427 | .cpu_setup = __setup_cpu_power7, | ||
428 | .cpu_restore = __restore_cpu_power7, | ||
441 | .platform = "power7", | 429 | .platform = "power7", |
442 | }, | 430 | }, |
443 | { /* Power7 */ | 431 | { /* Power7 */ |
@@ -446,21 +434,33 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
446 | .cpu_name = "POWER7 (raw)", | 434 | .cpu_name = "POWER7 (raw)", |
447 | .cpu_features = CPU_FTRS_POWER7, | 435 | .cpu_features = CPU_FTRS_POWER7, |
448 | .cpu_user_features = COMMON_USER_POWER7, | 436 | .cpu_user_features = COMMON_USER_POWER7, |
449 | .mmu_features = MMU_FTR_HPTE_TABLE | | 437 | .mmu_features = MMU_FTRS_POWER7, |
450 | MMU_FTR_TLBIE_206, | ||
451 | .icache_bsize = 128, | 438 | .icache_bsize = 128, |
452 | .dcache_bsize = 128, | 439 | .dcache_bsize = 128, |
453 | .num_pmcs = 6, | 440 | .num_pmcs = 6, |
454 | .pmc_type = PPC_PMC_IBM, | 441 | .pmc_type = PPC_PMC_IBM, |
442 | .oprofile_cpu_type = "ppc64/power7", | ||
443 | .oprofile_type = PPC_OPROFILE_POWER4, | ||
455 | .cpu_setup = __setup_cpu_power7, | 444 | .cpu_setup = __setup_cpu_power7, |
456 | .cpu_restore = __restore_cpu_power7, | 445 | .cpu_restore = __restore_cpu_power7, |
446 | .platform = "power7", | ||
447 | }, | ||
448 | { /* Power7+ */ | ||
449 | .pvr_mask = 0xffff0000, | ||
450 | .pvr_value = 0x004A0000, | ||
451 | .cpu_name = "POWER7+ (raw)", | ||
452 | .cpu_features = CPU_FTRS_POWER7, | ||
453 | .cpu_user_features = COMMON_USER_POWER7, | ||
454 | .mmu_features = MMU_FTRS_POWER7, | ||
455 | .icache_bsize = 128, | ||
456 | .dcache_bsize = 128, | ||
457 | .num_pmcs = 6, | ||
458 | .pmc_type = PPC_PMC_IBM, | ||
457 | .oprofile_cpu_type = "ppc64/power7", | 459 | .oprofile_cpu_type = "ppc64/power7", |
458 | .oprofile_type = PPC_OPROFILE_POWER4, | 460 | .oprofile_type = PPC_OPROFILE_POWER4, |
459 | .oprofile_mmcra_sihv = POWER6_MMCRA_SIHV, | 461 | .cpu_setup = __setup_cpu_power7, |
460 | .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR, | 462 | .cpu_restore = __restore_cpu_power7, |
461 | .oprofile_mmcra_clear = POWER6_MMCRA_THRM | | 463 | .platform = "power7+", |
462 | POWER6_MMCRA_OTHER, | ||
463 | .platform = "power7", | ||
464 | }, | 464 | }, |
465 | { /* Cell Broadband Engine */ | 465 | { /* Cell Broadband Engine */ |
466 | .pvr_mask = 0xffff0000, | 466 | .pvr_mask = 0xffff0000, |
@@ -470,14 +470,13 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
470 | .cpu_user_features = COMMON_USER_PPC64 | | 470 | .cpu_user_features = COMMON_USER_PPC64 | |
471 | PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP | | 471 | PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP | |
472 | PPC_FEATURE_SMT, | 472 | PPC_FEATURE_SMT, |
473 | .mmu_features = MMU_FTR_HPTE_TABLE, | 473 | .mmu_features = MMU_FTRS_CELL, |
474 | .icache_bsize = 128, | 474 | .icache_bsize = 128, |
475 | .dcache_bsize = 128, | 475 | .dcache_bsize = 128, |
476 | .num_pmcs = 4, | 476 | .num_pmcs = 4, |
477 | .pmc_type = PPC_PMC_IBM, | 477 | .pmc_type = PPC_PMC_IBM, |
478 | .oprofile_cpu_type = "ppc64/cell-be", | 478 | .oprofile_cpu_type = "ppc64/cell-be", |
479 | .oprofile_type = PPC_OPROFILE_CELL, | 479 | .oprofile_type = PPC_OPROFILE_CELL, |
480 | .machine_check = machine_check_generic, | ||
481 | .platform = "ppc-cell-be", | 480 | .platform = "ppc-cell-be", |
482 | }, | 481 | }, |
483 | { /* PA Semi PA6T */ | 482 | { /* PA Semi PA6T */ |
@@ -486,7 +485,7 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
486 | .cpu_name = "PA6T", | 485 | .cpu_name = "PA6T", |
487 | .cpu_features = CPU_FTRS_PA6T, | 486 | .cpu_features = CPU_FTRS_PA6T, |
488 | .cpu_user_features = COMMON_USER_PA6T, | 487 | .cpu_user_features = COMMON_USER_PA6T, |
489 | .mmu_features = MMU_FTR_HPTE_TABLE, | 488 | .mmu_features = MMU_FTRS_PA6T, |
490 | .icache_bsize = 64, | 489 | .icache_bsize = 64, |
491 | .dcache_bsize = 64, | 490 | .dcache_bsize = 64, |
492 | .num_pmcs = 6, | 491 | .num_pmcs = 6, |
@@ -495,7 +494,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
495 | .cpu_restore = __restore_cpu_pa6t, | 494 | .cpu_restore = __restore_cpu_pa6t, |
496 | .oprofile_cpu_type = "ppc64/pa6t", | 495 | .oprofile_cpu_type = "ppc64/pa6t", |
497 | .oprofile_type = PPC_OPROFILE_PA6T, | 496 | .oprofile_type = PPC_OPROFILE_PA6T, |
498 | .machine_check = machine_check_generic, | ||
499 | .platform = "pa6t", | 497 | .platform = "pa6t", |
500 | }, | 498 | }, |
501 | { /* default match */ | 499 | { /* default match */ |
@@ -504,12 +502,11 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
504 | .cpu_name = "POWER4 (compatible)", | 502 | .cpu_name = "POWER4 (compatible)", |
505 | .cpu_features = CPU_FTRS_COMPATIBLE, | 503 | .cpu_features = CPU_FTRS_COMPATIBLE, |
506 | .cpu_user_features = COMMON_USER_PPC64, | 504 | .cpu_user_features = COMMON_USER_PPC64, |
507 | .mmu_features = MMU_FTR_HPTE_TABLE, | 505 | .mmu_features = MMU_FTRS_DEFAULT_HPTE_ARCH_V2, |
508 | .icache_bsize = 128, | 506 | .icache_bsize = 128, |
509 | .dcache_bsize = 128, | 507 | .dcache_bsize = 128, |
510 | .num_pmcs = 6, | 508 | .num_pmcs = 6, |
511 | .pmc_type = PPC_PMC_IBM, | 509 | .pmc_type = PPC_PMC_IBM, |
512 | .machine_check = machine_check_generic, | ||
513 | .platform = "power4", | 510 | .platform = "power4", |
514 | } | 511 | } |
515 | #endif /* CONFIG_PPC_BOOK3S_64 */ | 512 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
@@ -1805,11 +1802,25 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
1805 | .machine_check = machine_check_440A, | 1802 | .machine_check = machine_check_440A, |
1806 | .platform = "ppc440", | 1803 | .platform = "ppc440", |
1807 | }, | 1804 | }, |
1808 | { /* 476 core */ | 1805 | { /* 464 in APM821xx */ |
1809 | .pvr_mask = 0xffff0000, | 1806 | .pvr_mask = 0xffffff00, |
1810 | .pvr_value = 0x11a50000, | 1807 | .pvr_value = 0x12C41C80, |
1808 | .cpu_name = "APM821XX", | ||
1809 | .cpu_features = CPU_FTRS_44X, | ||
1810 | .cpu_user_features = COMMON_USER_BOOKE | | ||
1811 | PPC_FEATURE_HAS_FPU, | ||
1812 | .mmu_features = MMU_FTR_TYPE_44x, | ||
1813 | .icache_bsize = 32, | ||
1814 | .dcache_bsize = 32, | ||
1815 | .cpu_setup = __setup_cpu_apm821xx, | ||
1816 | .machine_check = machine_check_440A, | ||
1817 | .platform = "ppc440", | ||
1818 | }, | ||
1819 | { /* 476 DD2 core */ | ||
1820 | .pvr_mask = 0xffffffff, | ||
1821 | .pvr_value = 0x11a52080, | ||
1811 | .cpu_name = "476", | 1822 | .cpu_name = "476", |
1812 | .cpu_features = CPU_FTRS_47X, | 1823 | .cpu_features = CPU_FTRS_47X | CPU_FTR_476_DD2, |
1813 | .cpu_user_features = COMMON_USER_BOOKE | | 1824 | .cpu_user_features = COMMON_USER_BOOKE | |
1814 | PPC_FEATURE_HAS_FPU, | 1825 | PPC_FEATURE_HAS_FPU, |
1815 | .mmu_features = MMU_FTR_TYPE_47x | | 1826 | .mmu_features = MMU_FTR_TYPE_47x | |
@@ -1833,6 +1844,20 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
1833 | .machine_check = machine_check_47x, | 1844 | .machine_check = machine_check_47x, |
1834 | .platform = "ppc470", | 1845 | .platform = "ppc470", |
1835 | }, | 1846 | }, |
1847 | { /* 476 others */ | ||
1848 | .pvr_mask = 0xffff0000, | ||
1849 | .pvr_value = 0x11a50000, | ||
1850 | .cpu_name = "476", | ||
1851 | .cpu_features = CPU_FTRS_47X, | ||
1852 | .cpu_user_features = COMMON_USER_BOOKE | | ||
1853 | PPC_FEATURE_HAS_FPU, | ||
1854 | .mmu_features = MMU_FTR_TYPE_47x | | ||
1855 | MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL, | ||
1856 | .icache_bsize = 32, | ||
1857 | .dcache_bsize = 128, | ||
1858 | .machine_check = machine_check_47x, | ||
1859 | .platform = "ppc470", | ||
1860 | }, | ||
1836 | { /* default match */ | 1861 | { /* default match */ |
1837 | .pvr_mask = 0x00000000, | 1862 | .pvr_mask = 0x00000000, |
1838 | .pvr_value = 0x00000000, | 1863 | .pvr_value = 0x00000000, |
@@ -1891,7 +1916,9 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
1891 | .platform = "ppc5554", | 1916 | .platform = "ppc5554", |
1892 | } | 1917 | } |
1893 | #endif /* CONFIG_E200 */ | 1918 | #endif /* CONFIG_E200 */ |
1919 | #endif /* CONFIG_PPC32 */ | ||
1894 | #ifdef CONFIG_E500 | 1920 | #ifdef CONFIG_E500 |
1921 | #ifdef CONFIG_PPC32 | ||
1895 | { /* e500 */ | 1922 | { /* e500 */ |
1896 | .pvr_mask = 0xffff0000, | 1923 | .pvr_mask = 0xffff0000, |
1897 | .pvr_value = 0x80200000, | 1924 | .pvr_value = 0x80200000, |
@@ -1946,6 +1973,26 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
1946 | .machine_check = machine_check_e500mc, | 1973 | .machine_check = machine_check_e500mc, |
1947 | .platform = "ppce500mc", | 1974 | .platform = "ppce500mc", |
1948 | }, | 1975 | }, |
1976 | #endif /* CONFIG_PPC32 */ | ||
1977 | { /* e5500 */ | ||
1978 | .pvr_mask = 0xffff0000, | ||
1979 | .pvr_value = 0x80240000, | ||
1980 | .cpu_name = "e5500", | ||
1981 | .cpu_features = CPU_FTRS_E5500, | ||
1982 | .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, | ||
1983 | .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | | ||
1984 | MMU_FTR_USE_TLBILX, | ||
1985 | .icache_bsize = 64, | ||
1986 | .dcache_bsize = 64, | ||
1987 | .num_pmcs = 4, | ||
1988 | .oprofile_cpu_type = "ppc/e500mc", | ||
1989 | .oprofile_type = PPC_OPROFILE_FSL_EMB, | ||
1990 | .cpu_setup = __setup_cpu_e5500, | ||
1991 | .cpu_restore = __restore_cpu_e5500, | ||
1992 | .machine_check = machine_check_e500mc, | ||
1993 | .platform = "ppce5500", | ||
1994 | }, | ||
1995 | #ifdef CONFIG_PPC32 | ||
1949 | { /* default match */ | 1996 | { /* default match */ |
1950 | .pvr_mask = 0x00000000, | 1997 | .pvr_mask = 0x00000000, |
1951 | .pvr_value = 0x00000000, | 1998 | .pvr_value = 0x00000000, |
@@ -1960,10 +2007,25 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
1960 | .machine_check = machine_check_e500, | 2007 | .machine_check = machine_check_e500, |
1961 | .platform = "powerpc", | 2008 | .platform = "powerpc", |
1962 | } | 2009 | } |
1963 | #endif /* CONFIG_E500 */ | ||
1964 | #endif /* CONFIG_PPC32 */ | 2010 | #endif /* CONFIG_PPC32 */ |
2011 | #endif /* CONFIG_E500 */ | ||
1965 | 2012 | ||
1966 | #ifdef CONFIG_PPC_BOOK3E_64 | 2013 | #ifdef CONFIG_PPC_A2 |
2014 | { /* Standard A2 (>= DD2) + FPU core */ | ||
2015 | .pvr_mask = 0xffff0000, | ||
2016 | .pvr_value = 0x00480000, | ||
2017 | .cpu_name = "A2 (>= DD2)", | ||
2018 | .cpu_features = CPU_FTRS_A2, | ||
2019 | .cpu_user_features = COMMON_USER_PPC64, | ||
2020 | .mmu_features = MMU_FTRS_A2, | ||
2021 | .icache_bsize = 64, | ||
2022 | .dcache_bsize = 64, | ||
2023 | .num_pmcs = 0, | ||
2024 | .cpu_setup = __setup_cpu_a2, | ||
2025 | .cpu_restore = __restore_cpu_a2, | ||
2026 | .machine_check = machine_check_generic, | ||
2027 | .platform = "ppca2", | ||
2028 | }, | ||
1967 | { /* This is a default entry to get going, to be replaced by | 2029 | { /* This is a default entry to get going, to be replaced by |
1968 | * a real one at some stage | 2030 | * a real one at some stage |
1969 | */ | 2031 | */ |
@@ -1984,7 +2046,7 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
1984 | .machine_check = machine_check_generic, | 2046 | .machine_check = machine_check_generic, |
1985 | .platform = "power6", | 2047 | .platform = "power6", |
1986 | }, | 2048 | }, |
1987 | #endif | 2049 | #endif /* CONFIG_PPC_A2 */ |
1988 | }; | 2050 | }; |
1989 | 2051 | ||
1990 | static struct cpu_spec the_cpu_spec; | 2052 | static struct cpu_spec the_cpu_spec; |
@@ -2048,8 +2110,8 @@ static void __init setup_cpu_spec(unsigned long offset, struct cpu_spec *s) | |||
2048 | * pointer on ppc64 and booke as we are running at 0 in real mode | 2110 | * pointer on ppc64 and booke as we are running at 0 in real mode |
2049 | * on ppc64 and reloc_offset is always 0 on booke. | 2111 | * on ppc64 and reloc_offset is always 0 on booke. |
2050 | */ | 2112 | */ |
2051 | if (s->cpu_setup) { | 2113 | if (t->cpu_setup) { |
2052 | s->cpu_setup(offset, s); | 2114 | t->cpu_setup(offset, t); |
2053 | } | 2115 | } |
2054 | #endif /* CONFIG_PPC64 || CONFIG_BOOKE */ | 2116 | #endif /* CONFIG_PPC64 || CONFIG_BOOKE */ |
2055 | } | 2117 | } |
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 4457382f8667..4e6ee944495a 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c | |||
@@ -48,7 +48,7 @@ int crashing_cpu = -1; | |||
48 | static cpumask_t cpus_in_crash = CPU_MASK_NONE; | 48 | static cpumask_t cpus_in_crash = CPU_MASK_NONE; |
49 | cpumask_t cpus_in_sr = CPU_MASK_NONE; | 49 | cpumask_t cpus_in_sr = CPU_MASK_NONE; |
50 | 50 | ||
51 | #define CRASH_HANDLER_MAX 2 | 51 | #define CRASH_HANDLER_MAX 3 |
52 | /* NULL terminated list of shutdown handles */ | 52 | /* NULL terminated list of shutdown handles */ |
53 | static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX+1]; | 53 | static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX+1]; |
54 | static DEFINE_SPINLOCK(crash_handlers_lock); | 54 | static DEFINE_SPINLOCK(crash_handlers_lock); |
@@ -64,9 +64,9 @@ void crash_ipi_callback(struct pt_regs *regs) | |||
64 | return; | 64 | return; |
65 | 65 | ||
66 | hard_irq_disable(); | 66 | hard_irq_disable(); |
67 | if (!cpu_isset(cpu, cpus_in_crash)) | 67 | if (!cpumask_test_cpu(cpu, &cpus_in_crash)) |
68 | crash_save_cpu(regs, cpu); | 68 | crash_save_cpu(regs, cpu); |
69 | cpu_set(cpu, cpus_in_crash); | 69 | cpumask_set_cpu(cpu, &cpus_in_crash); |
70 | 70 | ||
71 | /* | 71 | /* |
72 | * Entered via soft-reset - could be the kdump | 72 | * Entered via soft-reset - could be the kdump |
@@ -77,8 +77,8 @@ void crash_ipi_callback(struct pt_regs *regs) | |||
77 | * Tell the kexec CPU that entered via soft-reset and ready | 77 | * Tell the kexec CPU that entered via soft-reset and ready |
78 | * to go down. | 78 | * to go down. |
79 | */ | 79 | */ |
80 | if (cpu_isset(cpu, cpus_in_sr)) { | 80 | if (cpumask_test_cpu(cpu, &cpus_in_sr)) { |
81 | cpu_clear(cpu, cpus_in_sr); | 81 | cpumask_clear_cpu(cpu, &cpus_in_sr); |
82 | atomic_inc(&enter_on_soft_reset); | 82 | atomic_inc(&enter_on_soft_reset); |
83 | } | 83 | } |
84 | 84 | ||
@@ -87,7 +87,7 @@ void crash_ipi_callback(struct pt_regs *regs) | |||
87 | * This barrier is needed to make sure that all CPUs are stopped. | 87 | * This barrier is needed to make sure that all CPUs are stopped. |
88 | * If not, soft-reset will be invoked to bring other CPUs. | 88 | * If not, soft-reset will be invoked to bring other CPUs. |
89 | */ | 89 | */ |
90 | while (!cpu_isset(crashing_cpu, cpus_in_crash)) | 90 | while (!cpumask_test_cpu(crashing_cpu, &cpus_in_crash)) |
91 | cpu_relax(); | 91 | cpu_relax(); |
92 | 92 | ||
93 | if (ppc_md.kexec_cpu_down) | 93 | if (ppc_md.kexec_cpu_down) |
@@ -109,7 +109,7 @@ static void crash_soft_reset_check(int cpu) | |||
109 | { | 109 | { |
110 | unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ | 110 | unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ |
111 | 111 | ||
112 | cpu_clear(cpu, cpus_in_sr); | 112 | cpumask_clear_cpu(cpu, &cpus_in_sr); |
113 | while (atomic_read(&enter_on_soft_reset) != ncpus) | 113 | while (atomic_read(&enter_on_soft_reset) != ncpus) |
114 | cpu_relax(); | 114 | cpu_relax(); |
115 | } | 115 | } |
@@ -125,14 +125,14 @@ static void crash_kexec_prepare_cpus(int cpu) | |||
125 | smp_wmb(); | 125 | smp_wmb(); |
126 | 126 | ||
127 | /* | 127 | /* |
128 | * FIXME: Until we will have the way to stop other CPUSs reliabally, | 128 | * FIXME: Until we will have the way to stop other CPUs reliably, |
129 | * the crash CPU will send an IPI and wait for other CPUs to | 129 | * the crash CPU will send an IPI and wait for other CPUs to |
130 | * respond. | 130 | * respond. |
131 | * Delay of at least 10 seconds. | 131 | * Delay of at least 10 seconds. |
132 | */ | 132 | */ |
133 | printk(KERN_EMERG "Sending IPI to other cpus...\n"); | 133 | printk(KERN_EMERG "Sending IPI to other cpus...\n"); |
134 | msecs = 10000; | 134 | msecs = 10000; |
135 | while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) { | 135 | while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) { |
136 | cpu_relax(); | 136 | cpu_relax(); |
137 | mdelay(1); | 137 | mdelay(1); |
138 | } | 138 | } |
@@ -144,52 +144,24 @@ static void crash_kexec_prepare_cpus(int cpu) | |||
144 | * user to do soft reset such that we get all. | 144 | * user to do soft reset such that we get all. |
145 | * Soft-reset will be used until better mechanism is implemented. | 145 | * Soft-reset will be used until better mechanism is implemented. |
146 | */ | 146 | */ |
147 | if (cpus_weight(cpus_in_crash) < ncpus) { | 147 | if (cpumask_weight(&cpus_in_crash) < ncpus) { |
148 | printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n", | 148 | printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n", |
149 | ncpus - cpus_weight(cpus_in_crash)); | 149 | ncpus - cpumask_weight(&cpus_in_crash)); |
150 | printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n"); | 150 | printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n"); |
151 | cpus_in_sr = CPU_MASK_NONE; | 151 | cpumask_clear(&cpus_in_sr); |
152 | atomic_set(&enter_on_soft_reset, 0); | 152 | atomic_set(&enter_on_soft_reset, 0); |
153 | while (cpus_weight(cpus_in_crash) < ncpus) | 153 | while (cpumask_weight(&cpus_in_crash) < ncpus) |
154 | cpu_relax(); | 154 | cpu_relax(); |
155 | } | 155 | } |
156 | /* | 156 | /* |
157 | * Make sure all CPUs are entered via soft-reset if the kdump is | 157 | * Make sure all CPUs are entered via soft-reset if the kdump is |
158 | * invoked using soft-reset. | 158 | * invoked using soft-reset. |
159 | */ | 159 | */ |
160 | if (cpu_isset(cpu, cpus_in_sr)) | 160 | if (cpumask_test_cpu(cpu, &cpus_in_sr)) |
161 | crash_soft_reset_check(cpu); | 161 | crash_soft_reset_check(cpu); |
162 | /* Leave the IPI callback set */ | 162 | /* Leave the IPI callback set */ |
163 | } | 163 | } |
164 | 164 | ||
165 | /* wait for all the CPUs to hit real mode but timeout if they don't come in */ | ||
166 | #ifdef CONFIG_PPC_STD_MMU_64 | ||
167 | static void crash_kexec_wait_realmode(int cpu) | ||
168 | { | ||
169 | unsigned int msecs; | ||
170 | int i; | ||
171 | |||
172 | msecs = 10000; | ||
173 | for (i=0; i < NR_CPUS && msecs > 0; i++) { | ||
174 | if (i == cpu) | ||
175 | continue; | ||
176 | |||
177 | while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) { | ||
178 | barrier(); | ||
179 | if (!cpu_possible(i)) { | ||
180 | break; | ||
181 | } | ||
182 | if (!cpu_online(i)) { | ||
183 | break; | ||
184 | } | ||
185 | msecs--; | ||
186 | mdelay(1); | ||
187 | } | ||
188 | } | ||
189 | mb(); | ||
190 | } | ||
191 | #endif | ||
192 | |||
193 | /* | 165 | /* |
194 | * This function will be called by secondary cpus or by kexec cpu | 166 | * This function will be called by secondary cpus or by kexec cpu |
195 | * if soft-reset is activated to stop some CPUs. | 167 | * if soft-reset is activated to stop some CPUs. |
@@ -210,7 +182,7 @@ void crash_kexec_secondary(struct pt_regs *regs) | |||
210 | * exited using 'x'(exit and recover) or | 182 | * exited using 'x'(exit and recover) or |
211 | * kexec_should_crash() failed for all running tasks. | 183 | * kexec_should_crash() failed for all running tasks. |
212 | */ | 184 | */ |
213 | cpu_clear(cpu, cpus_in_sr); | 185 | cpumask_clear_cpu(cpu, &cpus_in_sr); |
214 | local_irq_restore(flags); | 186 | local_irq_restore(flags); |
215 | return; | 187 | return; |
216 | } | 188 | } |
@@ -224,7 +196,7 @@ void crash_kexec_secondary(struct pt_regs *regs) | |||
224 | * then start kexec boot. | 196 | * then start kexec boot. |
225 | */ | 197 | */ |
226 | crash_soft_reset_check(cpu); | 198 | crash_soft_reset_check(cpu); |
227 | cpu_set(crashing_cpu, cpus_in_crash); | 199 | cpumask_set_cpu(crashing_cpu, &cpus_in_crash); |
228 | if (ppc_md.kexec_cpu_down) | 200 | if (ppc_md.kexec_cpu_down) |
229 | ppc_md.kexec_cpu_down(1, 0); | 201 | ppc_md.kexec_cpu_down(1, 0); |
230 | machine_kexec(kexec_crash_image); | 202 | machine_kexec(kexec_crash_image); |
@@ -233,7 +205,8 @@ void crash_kexec_secondary(struct pt_regs *regs) | |||
233 | crash_ipi_callback(regs); | 205 | crash_ipi_callback(regs); |
234 | } | 206 | } |
235 | 207 | ||
236 | #else | 208 | #else /* ! CONFIG_SMP */ |
209 | |||
237 | static void crash_kexec_prepare_cpus(int cpu) | 210 | static void crash_kexec_prepare_cpus(int cpu) |
238 | { | 211 | { |
239 | /* | 212 | /* |
@@ -251,75 +224,39 @@ static void crash_kexec_prepare_cpus(int cpu) | |||
251 | 224 | ||
252 | void crash_kexec_secondary(struct pt_regs *regs) | 225 | void crash_kexec_secondary(struct pt_regs *regs) |
253 | { | 226 | { |
254 | cpus_in_sr = CPU_MASK_NONE; | 227 | cpumask_clear(&cpus_in_sr); |
255 | } | 228 | } |
256 | #endif | 229 | #endif /* CONFIG_SMP */ |
257 | #ifdef CONFIG_SPU_BASE | ||
258 | |||
259 | #include <asm/spu.h> | ||
260 | #include <asm/spu_priv1.h> | ||
261 | |||
262 | struct crash_spu_info { | ||
263 | struct spu *spu; | ||
264 | u32 saved_spu_runcntl_RW; | ||
265 | u32 saved_spu_status_R; | ||
266 | u32 saved_spu_npc_RW; | ||
267 | u64 saved_mfc_sr1_RW; | ||
268 | u64 saved_mfc_dar; | ||
269 | u64 saved_mfc_dsisr; | ||
270 | }; | ||
271 | 230 | ||
272 | #define CRASH_NUM_SPUS 16 /* Enough for current hardware */ | 231 | /* wait for all the CPUs to hit real mode but timeout if they don't come in */ |
273 | static struct crash_spu_info crash_spu_info[CRASH_NUM_SPUS]; | 232 | #if defined(CONFIG_SMP) && defined(CONFIG_PPC_STD_MMU_64) |
274 | 233 | static void crash_kexec_wait_realmode(int cpu) | |
275 | static void crash_kexec_stop_spus(void) | ||
276 | { | 234 | { |
277 | struct spu *spu; | 235 | unsigned int msecs; |
278 | int i; | 236 | int i; |
279 | u64 tmp; | ||
280 | 237 | ||
281 | for (i = 0; i < CRASH_NUM_SPUS; i++) { | 238 | msecs = 10000; |
282 | if (!crash_spu_info[i].spu) | 239 | for (i=0; i < nr_cpu_ids && msecs > 0; i++) { |
283 | continue; | 240 | if (i == cpu) |
284 | |||
285 | spu = crash_spu_info[i].spu; | ||
286 | |||
287 | crash_spu_info[i].saved_spu_runcntl_RW = | ||
288 | in_be32(&spu->problem->spu_runcntl_RW); | ||
289 | crash_spu_info[i].saved_spu_status_R = | ||
290 | in_be32(&spu->problem->spu_status_R); | ||
291 | crash_spu_info[i].saved_spu_npc_RW = | ||
292 | in_be32(&spu->problem->spu_npc_RW); | ||
293 | |||
294 | crash_spu_info[i].saved_mfc_dar = spu_mfc_dar_get(spu); | ||
295 | crash_spu_info[i].saved_mfc_dsisr = spu_mfc_dsisr_get(spu); | ||
296 | tmp = spu_mfc_sr1_get(spu); | ||
297 | crash_spu_info[i].saved_mfc_sr1_RW = tmp; | ||
298 | |||
299 | tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK; | ||
300 | spu_mfc_sr1_set(spu, tmp); | ||
301 | |||
302 | __delay(200); | ||
303 | } | ||
304 | } | ||
305 | |||
306 | void crash_register_spus(struct list_head *list) | ||
307 | { | ||
308 | struct spu *spu; | ||
309 | |||
310 | list_for_each_entry(spu, list, full_list) { | ||
311 | if (WARN_ON(spu->number >= CRASH_NUM_SPUS)) | ||
312 | continue; | 241 | continue; |
313 | 242 | ||
314 | crash_spu_info[spu->number].spu = spu; | 243 | while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) { |
244 | barrier(); | ||
245 | if (!cpu_possible(i)) { | ||
246 | break; | ||
247 | } | ||
248 | if (!cpu_online(i)) { | ||
249 | break; | ||
250 | } | ||
251 | msecs--; | ||
252 | mdelay(1); | ||
253 | } | ||
315 | } | 254 | } |
255 | mb(); | ||
316 | } | 256 | } |
317 | |||
318 | #else | 257 | #else |
319 | static inline void crash_kexec_stop_spus(void) | 258 | static inline void crash_kexec_wait_realmode(int cpu) {} |
320 | { | 259 | #endif /* CONFIG_SMP && CONFIG_PPC_STD_MMU_64 */ |
321 | } | ||
322 | #endif /* CONFIG_SPU_BASE */ | ||
323 | 260 | ||
324 | /* | 261 | /* |
325 | * Register a function to be called on shutdown. Only use this if you | 262 | * Register a function to be called on shutdown. Only use this if you |
@@ -409,23 +346,10 @@ void default_machine_crash_shutdown(struct pt_regs *regs) | |||
409 | crashing_cpu = smp_processor_id(); | 346 | crashing_cpu = smp_processor_id(); |
410 | crash_save_cpu(regs, crashing_cpu); | 347 | crash_save_cpu(regs, crashing_cpu); |
411 | crash_kexec_prepare_cpus(crashing_cpu); | 348 | crash_kexec_prepare_cpus(crashing_cpu); |
412 | cpu_set(crashing_cpu, cpus_in_crash); | 349 | cpumask_set_cpu(crashing_cpu, &cpus_in_crash); |
413 | #if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP) | ||
414 | crash_kexec_wait_realmode(crashing_cpu); | 350 | crash_kexec_wait_realmode(crashing_cpu); |
415 | #endif | ||
416 | |||
417 | for_each_irq(i) { | ||
418 | struct irq_desc *desc = irq_to_desc(i); | ||
419 | |||
420 | if (!desc || !desc->chip || !desc->chip->eoi) | ||
421 | continue; | ||
422 | |||
423 | if (desc->status & IRQ_INPROGRESS) | ||
424 | desc->chip->eoi(i); | ||
425 | 351 | ||
426 | if (!(desc->status & IRQ_DISABLED)) | 352 | machine_kexec_mask_interrupts(); |
427 | desc->chip->shutdown(i); | ||
428 | } | ||
429 | 353 | ||
430 | /* | 354 | /* |
431 | * Call registered shutdown routines savely. Swap out | 355 | * Call registered shutdown routines savely. Swap out |
@@ -450,8 +374,6 @@ void default_machine_crash_shutdown(struct pt_regs *regs) | |||
450 | crash_shutdown_cpu = -1; | 374 | crash_shutdown_cpu = -1; |
451 | __debugger_fault_handler = old_handler; | 375 | __debugger_fault_handler = old_handler; |
452 | 376 | ||
453 | crash_kexec_stop_spus(); | ||
454 | |||
455 | if (ppc_md.kexec_cpu_down) | 377 | if (ppc_md.kexec_cpu_down) |
456 | ppc_md.kexec_cpu_down(1, 0); | 378 | ppc_md.kexec_cpu_down(1, 0); |
457 | } | 379 | } |
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 8e05c16344e4..424afb6b8fba 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <asm/prom.h> | 19 | #include <asm/prom.h> |
20 | #include <asm/firmware.h> | 20 | #include <asm/firmware.h> |
21 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
22 | #include <asm/rtas.h> | ||
22 | 23 | ||
23 | #ifdef DEBUG | 24 | #ifdef DEBUG |
24 | #include <asm/udbg.h> | 25 | #include <asm/udbg.h> |
@@ -27,9 +28,6 @@ | |||
27 | #define DBG(fmt...) | 28 | #define DBG(fmt...) |
28 | #endif | 29 | #endif |
29 | 30 | ||
30 | /* Stores the physical address of elf header of crash image. */ | ||
31 | unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; | ||
32 | |||
33 | #ifndef CONFIG_RELOCATABLE | 31 | #ifndef CONFIG_RELOCATABLE |
34 | void __init reserve_kdump_trampoline(void) | 32 | void __init reserve_kdump_trampoline(void) |
35 | { | 33 | { |
@@ -71,20 +69,6 @@ void __init setup_kdump_trampoline(void) | |||
71 | } | 69 | } |
72 | #endif /* CONFIG_RELOCATABLE */ | 70 | #endif /* CONFIG_RELOCATABLE */ |
73 | 71 | ||
74 | /* | ||
75 | * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by | ||
76 | * is_kdump_kernel() to determine if we are booting after a panic. Hence | ||
77 | * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE. | ||
78 | */ | ||
79 | static int __init parse_elfcorehdr(char *p) | ||
80 | { | ||
81 | if (p) | ||
82 | elfcorehdr_addr = memparse(p, &p); | ||
83 | |||
84 | return 1; | ||
85 | } | ||
86 | __setup("elfcorehdr=", parse_elfcorehdr); | ||
87 | |||
88 | static int __init parse_savemaxmem(char *p) | 72 | static int __init parse_savemaxmem(char *p) |
89 | { | 73 | { |
90 | if (p) | 74 | if (p) |
@@ -141,3 +125,35 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | |||
141 | 125 | ||
142 | return csize; | 126 | return csize; |
143 | } | 127 | } |
128 | |||
129 | #ifdef CONFIG_PPC_RTAS | ||
130 | /* | ||
131 | * The crashkernel region will almost always overlap the RTAS region, so | ||
132 | * we have to be careful when shrinking the crashkernel region. | ||
133 | */ | ||
134 | void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) | ||
135 | { | ||
136 | unsigned long addr; | ||
137 | const u32 *basep, *sizep; | ||
138 | unsigned int rtas_start = 0, rtas_end = 0; | ||
139 | |||
140 | basep = of_get_property(rtas.dev, "linux,rtas-base", NULL); | ||
141 | sizep = of_get_property(rtas.dev, "rtas-size", NULL); | ||
142 | |||
143 | if (basep && sizep) { | ||
144 | rtas_start = *basep; | ||
145 | rtas_end = *basep + *sizep; | ||
146 | } | ||
147 | |||
148 | for (addr = begin; addr < end; addr += PAGE_SIZE) { | ||
149 | /* Does this page overlap with the RTAS region? */ | ||
150 | if (addr <= rtas_end && ((addr + PAGE_SIZE) > rtas_start)) | ||
151 | continue; | ||
152 | |||
153 | ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT)); | ||
154 | init_page_count(pfn_to_page(addr >> PAGE_SHIFT)); | ||
155 | free_page((unsigned long)__va(addr)); | ||
156 | totalram_pages++; | ||
157 | } | ||
158 | } | ||
159 | #endif | ||
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c index 3307a52d797f..2cc451aaaca7 100644 --- a/arch/powerpc/kernel/dbell.c +++ b/arch/powerpc/kernel/dbell.c | |||
@@ -13,84 +13,35 @@ | |||
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/smp.h> | 14 | #include <linux/smp.h> |
15 | #include <linux/threads.h> | 15 | #include <linux/threads.h> |
16 | #include <linux/percpu.h> | 16 | #include <linux/hardirq.h> |
17 | 17 | ||
18 | #include <asm/dbell.h> | 18 | #include <asm/dbell.h> |
19 | #include <asm/irq_regs.h> | 19 | #include <asm/irq_regs.h> |
20 | 20 | ||
21 | #ifdef CONFIG_SMP | 21 | #ifdef CONFIG_SMP |
22 | struct doorbell_cpu_info { | ||
23 | unsigned long messages; /* current messages bits */ | ||
24 | unsigned int tag; /* tag value */ | ||
25 | }; | ||
26 | |||
27 | static DEFINE_PER_CPU(struct doorbell_cpu_info, doorbell_cpu_info); | ||
28 | |||
29 | void doorbell_setup_this_cpu(void) | 22 | void doorbell_setup_this_cpu(void) |
30 | { | 23 | { |
31 | struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info); | 24 | unsigned long tag = mfspr(SPRN_PIR) & 0x3fff; |
32 | 25 | ||
33 | info->messages = 0; | 26 | smp_muxed_ipi_set_data(smp_processor_id(), tag); |
34 | info->tag = mfspr(SPRN_PIR) & 0x3fff; | ||
35 | } | 27 | } |
36 | 28 | ||
37 | void doorbell_message_pass(int target, int msg) | 29 | void doorbell_cause_ipi(int cpu, unsigned long data) |
38 | { | 30 | { |
39 | struct doorbell_cpu_info *info; | 31 | ppc_msgsnd(PPC_DBELL, 0, data); |
40 | int i; | ||
41 | |||
42 | if (target < NR_CPUS) { | ||
43 | info = &per_cpu(doorbell_cpu_info, target); | ||
44 | set_bit(msg, &info->messages); | ||
45 | ppc_msgsnd(PPC_DBELL, 0, info->tag); | ||
46 | } | ||
47 | else if (target == MSG_ALL_BUT_SELF) { | ||
48 | for_each_online_cpu(i) { | ||
49 | if (i == smp_processor_id()) | ||
50 | continue; | ||
51 | info = &per_cpu(doorbell_cpu_info, i); | ||
52 | set_bit(msg, &info->messages); | ||
53 | ppc_msgsnd(PPC_DBELL, 0, info->tag); | ||
54 | } | ||
55 | } | ||
56 | else { /* target == MSG_ALL */ | ||
57 | for_each_online_cpu(i) { | ||
58 | info = &per_cpu(doorbell_cpu_info, i); | ||
59 | set_bit(msg, &info->messages); | ||
60 | } | ||
61 | ppc_msgsnd(PPC_DBELL, PPC_DBELL_MSG_BRDCAST, 0); | ||
62 | } | ||
63 | } | 32 | } |
64 | 33 | ||
65 | void doorbell_exception(struct pt_regs *regs) | 34 | void doorbell_exception(struct pt_regs *regs) |
66 | { | 35 | { |
67 | struct pt_regs *old_regs = set_irq_regs(regs); | 36 | struct pt_regs *old_regs = set_irq_regs(regs); |
68 | struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info); | ||
69 | int msg; | ||
70 | 37 | ||
71 | /* Warning: regs can be NULL when called from irq enable */ | 38 | irq_enter(); |
72 | 39 | ||
73 | if (!info->messages || (num_online_cpus() < 2)) | 40 | smp_ipi_demux(); |
74 | goto out; | ||
75 | 41 | ||
76 | for (msg = 0; msg < 4; msg++) | 42 | irq_exit(); |
77 | if (test_and_clear_bit(msg, &info->messages)) | ||
78 | smp_message_recv(msg); | ||
79 | |||
80 | out: | ||
81 | set_irq_regs(old_regs); | 43 | set_irq_regs(old_regs); |
82 | } | 44 | } |
83 | |||
84 | void doorbell_check_self(void) | ||
85 | { | ||
86 | struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info); | ||
87 | |||
88 | if (!info->messages) | ||
89 | return; | ||
90 | |||
91 | ppc_msgsnd(PPC_DBELL, 0, info->tag); | ||
92 | } | ||
93 | |||
94 | #else /* CONFIG_SMP */ | 45 | #else /* CONFIG_SMP */ |
95 | void doorbell_exception(struct pt_regs *regs) | 46 | void doorbell_exception(struct pt_regs *regs) |
96 | { | 47 | { |
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index 37771a518119..e7554154a6de 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c | |||
@@ -19,7 +19,7 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, | |||
19 | dma_addr_t *dma_handle, gfp_t flag) | 19 | dma_addr_t *dma_handle, gfp_t flag) |
20 | { | 20 | { |
21 | return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size, | 21 | return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size, |
22 | dma_handle, device_to_mask(dev), flag, | 22 | dma_handle, dev->coherent_dma_mask, flag, |
23 | dev_to_node(dev)); | 23 | dev_to_node(dev)); |
24 | } | 24 | } |
25 | 25 | ||
@@ -74,16 +74,17 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask) | |||
74 | { | 74 | { |
75 | struct iommu_table *tbl = get_iommu_table_base(dev); | 75 | struct iommu_table *tbl = get_iommu_table_base(dev); |
76 | 76 | ||
77 | if (!tbl || tbl->it_offset > mask) { | 77 | if (!tbl) { |
78 | printk(KERN_INFO | 78 | dev_info(dev, "Warning: IOMMU dma not supported: mask 0x%08llx" |
79 | "Warning: IOMMU offset too big for device mask\n"); | 79 | ", table unavailable\n", mask); |
80 | if (tbl) | 80 | return 0; |
81 | printk(KERN_INFO | 81 | } |
82 | "mask: 0x%08llx, table offset: 0x%08lx\n", | 82 | |
83 | mask, tbl->it_offset); | 83 | if ((tbl->it_offset + tbl->it_size) > (mask >> IOMMU_PAGE_SHIFT)) { |
84 | else | 84 | dev_info(dev, "Warning: IOMMU window too big for device mask\n"); |
85 | printk(KERN_INFO "mask: 0x%08llx, table unavailable\n", | 85 | dev_info(dev, "mask: 0x%08llx, table end: 0x%08lx\n", |
86 | mask); | 86 | mask, (tbl->it_offset + tbl->it_size) << |
87 | IOMMU_PAGE_SHIFT); | ||
87 | return 0; | 88 | return 0; |
88 | } else | 89 | } else |
89 | return 1; | 90 | return 1; |
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 84d6367ec003..d238c082c3c5 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/memblock.h> | 12 | #include <linux/memblock.h> |
13 | #include <asm/bug.h> | 13 | #include <asm/bug.h> |
14 | #include <asm/abs_addr.h> | 14 | #include <asm/abs_addr.h> |
15 | #include <asm/machdep.h> | ||
15 | 16 | ||
16 | /* | 17 | /* |
17 | * Generic direct DMA implementation | 18 | * Generic direct DMA implementation |
@@ -89,7 +90,7 @@ static int dma_direct_dma_supported(struct device *dev, u64 mask) | |||
89 | /* Could be improved so platforms can set the limit in case | 90 | /* Could be improved so platforms can set the limit in case |
90 | * they have limited DMA windows | 91 | * they have limited DMA windows |
91 | */ | 92 | */ |
92 | return mask >= (memblock_end_of_DRAM() - 1); | 93 | return mask >= get_dma_offset(dev) + (memblock_end_of_DRAM() - 1); |
93 | #else | 94 | #else |
94 | return 1; | 95 | return 1; |
95 | #endif | 96 | #endif |
@@ -154,6 +155,23 @@ EXPORT_SYMBOL(dma_direct_ops); | |||
154 | 155 | ||
155 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | 156 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) |
156 | 157 | ||
158 | int dma_set_mask(struct device *dev, u64 dma_mask) | ||
159 | { | ||
160 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | ||
161 | |||
162 | if (ppc_md.dma_set_mask) | ||
163 | return ppc_md.dma_set_mask(dev, dma_mask); | ||
164 | if (unlikely(dma_ops == NULL)) | ||
165 | return -EIO; | ||
166 | if (dma_ops->set_dma_mask != NULL) | ||
167 | return dma_ops->set_dma_mask(dev, dma_mask); | ||
168 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | ||
169 | return -EIO; | ||
170 | *dev->dma_mask = dma_mask; | ||
171 | return 0; | ||
172 | } | ||
173 | EXPORT_SYMBOL(dma_set_mask); | ||
174 | |||
157 | static int __init dma_init(void) | 175 | static int __init dma_init(void) |
158 | { | 176 | { |
159 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | 177 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); |
@@ -161,3 +179,21 @@ static int __init dma_init(void) | |||
161 | return 0; | 179 | return 0; |
162 | } | 180 | } |
163 | fs_initcall(dma_init); | 181 | fs_initcall(dma_init); |
182 | |||
183 | int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | ||
184 | void *cpu_addr, dma_addr_t handle, size_t size) | ||
185 | { | ||
186 | unsigned long pfn; | ||
187 | |||
188 | #ifdef CONFIG_NOT_COHERENT_CACHE | ||
189 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
190 | pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr); | ||
191 | #else | ||
192 | pfn = page_to_pfn(virt_to_page(cpu_addr)); | ||
193 | #endif | ||
194 | return remap_pfn_range(vma, vma->vm_start, | ||
195 | pfn + vma->vm_pgoff, | ||
196 | vma->vm_end - vma->vm_start, | ||
197 | vma->vm_page_prot); | ||
198 | } | ||
199 | EXPORT_SYMBOL_GPL(dma_mmap_coherent); | ||
diff --git a/arch/powerpc/kernel/e500-pmu.c b/arch/powerpc/kernel/e500-pmu.c index 7c07de0d8943..b150b510510f 100644 --- a/arch/powerpc/kernel/e500-pmu.c +++ b/arch/powerpc/kernel/e500-pmu.c | |||
@@ -126,4 +126,4 @@ static int init_e500_pmu(void) | |||
126 | return register_fsl_emb_pmu(&e500_pmu); | 126 | return register_fsl_emb_pmu(&e500_pmu); |
127 | } | 127 | } |
128 | 128 | ||
129 | arch_initcall(init_e500_pmu); | 129 | early_initcall(init_e500_pmu); |
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index ed4aeb96398b..56212bc0ab08 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <asm/asm-offsets.h> | 31 | #include <asm/asm-offsets.h> |
32 | #include <asm/unistd.h> | 32 | #include <asm/unistd.h> |
33 | #include <asm/ftrace.h> | 33 | #include <asm/ftrace.h> |
34 | #include <asm/ptrace.h> | ||
34 | 35 | ||
35 | #undef SHOW_SYSCALLS | 36 | #undef SHOW_SYSCALLS |
36 | #undef SHOW_SYSCALLS_TASK | 37 | #undef SHOW_SYSCALLS_TASK |
@@ -879,7 +880,18 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x) | |||
879 | */ | 880 | */ |
880 | andi. r10,r9,MSR_EE | 881 | andi. r10,r9,MSR_EE |
881 | beq 1f | 882 | beq 1f |
883 | /* | ||
884 | * Since the ftrace irqsoff latency trace checks CALLER_ADDR1, | ||
885 | * which is the stack frame here, we need to force a stack frame | ||
886 | * in case we came from user space. | ||
887 | */ | ||
888 | stwu r1,-32(r1) | ||
889 | mflr r0 | ||
890 | stw r0,4(r1) | ||
891 | stwu r1,-32(r1) | ||
882 | bl trace_hardirqs_on | 892 | bl trace_hardirqs_on |
893 | lwz r1,0(r1) | ||
894 | lwz r1,0(r1) | ||
883 | lwz r9,_MSR(r1) | 895 | lwz r9,_MSR(r1) |
884 | 1: | 896 | 1: |
885 | #endif /* CONFIG_TRACE_IRQFLAGS */ | 897 | #endif /* CONFIG_TRACE_IRQFLAGS */ |
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 42e9d908914a..d834425186ae 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -97,6 +97,24 @@ system_call_common: | |||
97 | addi r9,r1,STACK_FRAME_OVERHEAD | 97 | addi r9,r1,STACK_FRAME_OVERHEAD |
98 | ld r11,exception_marker@toc(r2) | 98 | ld r11,exception_marker@toc(r2) |
99 | std r11,-16(r9) /* "regshere" marker */ | 99 | std r11,-16(r9) /* "regshere" marker */ |
100 | #if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR) | ||
101 | BEGIN_FW_FTR_SECTION | ||
102 | beq 33f | ||
103 | /* if from user, see if there are any DTL entries to process */ | ||
104 | ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */ | ||
105 | ld r11,PACA_DTL_RIDX(r13) /* get log read index */ | ||
106 | ld r10,LPPACA_DTLIDX(r10) /* get log write index */ | ||
107 | cmpd cr1,r11,r10 | ||
108 | beq+ cr1,33f | ||
109 | bl .accumulate_stolen_time | ||
110 | REST_GPR(0,r1) | ||
111 | REST_4GPRS(3,r1) | ||
112 | REST_2GPRS(7,r1) | ||
113 | addi r9,r1,STACK_FRAME_OVERHEAD | ||
114 | 33: | ||
115 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) | ||
116 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */ | ||
117 | |||
100 | #ifdef CONFIG_TRACE_IRQFLAGS | 118 | #ifdef CONFIG_TRACE_IRQFLAGS |
101 | bl .trace_hardirqs_on | 119 | bl .trace_hardirqs_on |
102 | REST_GPR(0,r1) | 120 | REST_GPR(0,r1) |
@@ -202,7 +220,9 @@ syscall_exit: | |||
202 | bge- syscall_error | 220 | bge- syscall_error |
203 | syscall_error_cont: | 221 | syscall_error_cont: |
204 | ld r7,_NIP(r1) | 222 | ld r7,_NIP(r1) |
223 | BEGIN_FTR_SECTION | ||
205 | stdcx. r0,0,r1 /* to clear the reservation */ | 224 | stdcx. r0,0,r1 /* to clear the reservation */ |
225 | END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) | ||
206 | andi. r6,r8,MSR_PR | 226 | andi. r6,r8,MSR_PR |
207 | ld r4,_LINK(r1) | 227 | ld r4,_LINK(r1) |
208 | /* | 228 | /* |
@@ -401,6 +421,12 @@ BEGIN_FTR_SECTION | |||
401 | std r24,THREAD_VRSAVE(r3) | 421 | std r24,THREAD_VRSAVE(r3) |
402 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | 422 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
403 | #endif /* CONFIG_ALTIVEC */ | 423 | #endif /* CONFIG_ALTIVEC */ |
424 | #ifdef CONFIG_PPC64 | ||
425 | BEGIN_FTR_SECTION | ||
426 | mfspr r25,SPRN_DSCR | ||
427 | std r25,THREAD_DSCR(r3) | ||
428 | END_FTR_SECTION_IFSET(CPU_FTR_DSCR) | ||
429 | #endif | ||
404 | and. r0,r0,r22 | 430 | and. r0,r0,r22 |
405 | beq+ 1f | 431 | beq+ 1f |
406 | andc r22,r22,r0 | 432 | andc r22,r22,r0 |
@@ -419,6 +445,17 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
419 | sync | 445 | sync |
420 | #endif /* CONFIG_SMP */ | 446 | #endif /* CONFIG_SMP */ |
421 | 447 | ||
448 | /* | ||
449 | * If we optimise away the clear of the reservation in system | ||
450 | * calls because we know the CPU tracks the address of the | ||
451 | * reservation, then we need to clear it here to cover the | ||
452 | * case that the kernel context switch path has no larx | ||
453 | * instructions. | ||
454 | */ | ||
455 | BEGIN_FTR_SECTION | ||
456 | ldarx r6,0,r1 | ||
457 | END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS) | ||
458 | |||
422 | addi r6,r4,-THREAD /* Convert THREAD to 'current' */ | 459 | addi r6,r4,-THREAD /* Convert THREAD to 'current' */ |
423 | std r6,PACACURRENT(r13) /* Set new 'current' */ | 460 | std r6,PACACURRENT(r13) /* Set new 'current' */ |
424 | 461 | ||
@@ -431,10 +468,10 @@ BEGIN_FTR_SECTION | |||
431 | FTR_SECTION_ELSE_NESTED(95) | 468 | FTR_SECTION_ELSE_NESTED(95) |
432 | clrrdi r6,r8,40 /* get its 1T ESID */ | 469 | clrrdi r6,r8,40 /* get its 1T ESID */ |
433 | clrrdi r9,r1,40 /* get current sp 1T ESID */ | 470 | clrrdi r9,r1,40 /* get current sp 1T ESID */ |
434 | ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_1T_SEGMENT, 95) | 471 | ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(MMU_FTR_1T_SEGMENT, 95) |
435 | FTR_SECTION_ELSE | 472 | FTR_SECTION_ELSE |
436 | b 2f | 473 | b 2f |
437 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_SLB) | 474 | ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_SLB) |
438 | clrldi. r0,r6,2 /* is new ESID c00000000? */ | 475 | clrldi. r0,r6,2 /* is new ESID c00000000? */ |
439 | cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */ | 476 | cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */ |
440 | cror eq,4*cr1+eq,eq | 477 | cror eq,4*cr1+eq,eq |
@@ -448,7 +485,7 @@ BEGIN_FTR_SECTION | |||
448 | li r9,MMU_SEGSIZE_1T /* insert B field */ | 485 | li r9,MMU_SEGSIZE_1T /* insert B field */ |
449 | oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h | 486 | oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h |
450 | rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0 | 487 | rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0 |
451 | END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) | 488 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) |
452 | 489 | ||
453 | /* Update the last bolted SLB. No write barriers are needed | 490 | /* Update the last bolted SLB. No write barriers are needed |
454 | * here, provided we only update the current CPU's SLB shadow | 491 | * here, provided we only update the current CPU's SLB shadow |
@@ -460,7 +497,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) | |||
460 | std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */ | 497 | std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */ |
461 | std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */ | 498 | std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */ |
462 | 499 | ||
463 | /* No need to check for CPU_FTR_NO_SLBIE_B here, since when | 500 | /* No need to check for MMU_FTR_NO_SLBIE_B here, since when |
464 | * we have 1TB segments, the only CPUs known to have the errata | 501 | * we have 1TB segments, the only CPUs known to have the errata |
465 | * only support less than 1TB of system memory and we'll never | 502 | * only support less than 1TB of system memory and we'll never |
466 | * actually hit this code path. | 503 | * actually hit this code path. |
@@ -491,6 +528,15 @@ BEGIN_FTR_SECTION | |||
491 | mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */ | 528 | mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */ |
492 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | 529 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
493 | #endif /* CONFIG_ALTIVEC */ | 530 | #endif /* CONFIG_ALTIVEC */ |
531 | #ifdef CONFIG_PPC64 | ||
532 | BEGIN_FTR_SECTION | ||
533 | ld r0,THREAD_DSCR(r4) | ||
534 | cmpd r0,r25 | ||
535 | beq 1f | ||
536 | mtspr SPRN_DSCR,r0 | ||
537 | 1: | ||
538 | END_FTR_SECTION_IFSET(CPU_FTR_DSCR) | ||
539 | #endif | ||
494 | 540 | ||
495 | /* r3-r13 are destroyed -- Cort */ | 541 | /* r3-r13 are destroyed -- Cort */ |
496 | REST_8GPRS(14, r1) | 542 | REST_8GPRS(14, r1) |
@@ -576,7 +622,16 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES) | |||
576 | andi. r0,r3,MSR_RI | 622 | andi. r0,r3,MSR_RI |
577 | beq- unrecov_restore | 623 | beq- unrecov_restore |
578 | 624 | ||
625 | /* | ||
626 | * Clear the reservation. If we know the CPU tracks the address of | ||
627 | * the reservation then we can potentially save some cycles and use | ||
628 | * a larx. On POWER6 and POWER7 this is significantly faster. | ||
629 | */ | ||
630 | BEGIN_FTR_SECTION | ||
579 | stdcx. r0,0,r1 /* to clear the reservation */ | 631 | stdcx. r0,0,r1 /* to clear the reservation */ |
632 | FTR_SECTION_ELSE | ||
633 | ldarx r4,0,r1 | ||
634 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) | ||
580 | 635 | ||
581 | /* | 636 | /* |
582 | * Clear RI before restoring r13. If we are returning to | 637 | * Clear RI before restoring r13. If we are returning to |
@@ -798,7 +853,7 @@ _GLOBAL(enter_rtas) | |||
798 | 853 | ||
799 | _STATIC(rtas_return_loc) | 854 | _STATIC(rtas_return_loc) |
800 | /* relocation is off at this point */ | 855 | /* relocation is off at this point */ |
801 | mfspr r4,SPRN_SPRG_PACA /* Get PACA */ | 856 | GET_PACA(r4) |
802 | clrldi r4,r4,2 /* convert to realmode address */ | 857 | clrldi r4,r4,2 /* convert to realmode address */ |
803 | 858 | ||
804 | bcl 20,31,$+4 | 859 | bcl 20,31,$+4 |
@@ -829,7 +884,7 @@ _STATIC(rtas_restore_regs) | |||
829 | REST_8GPRS(14, r1) /* Restore the non-volatiles */ | 884 | REST_8GPRS(14, r1) /* Restore the non-volatiles */ |
830 | REST_10GPRS(22, r1) /* ditto */ | 885 | REST_10GPRS(22, r1) /* ditto */ |
831 | 886 | ||
832 | mfspr r13,SPRN_SPRG_PACA | 887 | GET_PACA(r13) |
833 | 888 | ||
834 | ld r4,_CCR(r1) | 889 | ld r4,_CCR(r1) |
835 | mtcr r4 | 890 | mtcr r4 |
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 5c43063d2506..d24d4400cc79 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <asm/cputable.h> | 17 | #include <asm/cputable.h> |
18 | #include <asm/setup.h> | 18 | #include <asm/setup.h> |
19 | #include <asm/thread_info.h> | 19 | #include <asm/thread_info.h> |
20 | #include <asm/reg_a2.h> | ||
20 | #include <asm/exception-64e.h> | 21 | #include <asm/exception-64e.h> |
21 | #include <asm/bug.h> | 22 | #include <asm/bug.h> |
22 | #include <asm/irqflags.h> | 23 | #include <asm/irqflags.h> |
@@ -252,9 +253,6 @@ exception_marker: | |||
252 | .balign 0x1000 | 253 | .balign 0x1000 |
253 | .globl interrupt_base_book3e | 254 | .globl interrupt_base_book3e |
254 | interrupt_base_book3e: /* fake trap */ | 255 | interrupt_base_book3e: /* fake trap */ |
255 | /* Note: If real debug exceptions are supported by the HW, the vector | ||
256 | * below will have to be patched up to point to an appropriate handler | ||
257 | */ | ||
258 | EXCEPTION_STUB(0x000, machine_check) /* 0x0200 */ | 256 | EXCEPTION_STUB(0x000, machine_check) /* 0x0200 */ |
259 | EXCEPTION_STUB(0x020, critical_input) /* 0x0580 */ | 257 | EXCEPTION_STUB(0x020, critical_input) /* 0x0580 */ |
260 | EXCEPTION_STUB(0x040, debug_crit) /* 0x0d00 */ | 258 | EXCEPTION_STUB(0x040, debug_crit) /* 0x0d00 */ |
@@ -271,8 +269,13 @@ interrupt_base_book3e: /* fake trap */ | |||
271 | EXCEPTION_STUB(0x1a0, watchdog) /* 0x09f0 */ | 269 | EXCEPTION_STUB(0x1a0, watchdog) /* 0x09f0 */ |
272 | EXCEPTION_STUB(0x1c0, data_tlb_miss) | 270 | EXCEPTION_STUB(0x1c0, data_tlb_miss) |
273 | EXCEPTION_STUB(0x1e0, instruction_tlb_miss) | 271 | EXCEPTION_STUB(0x1e0, instruction_tlb_miss) |
272 | EXCEPTION_STUB(0x260, perfmon) | ||
274 | EXCEPTION_STUB(0x280, doorbell) | 273 | EXCEPTION_STUB(0x280, doorbell) |
275 | EXCEPTION_STUB(0x2a0, doorbell_crit) | 274 | EXCEPTION_STUB(0x2a0, doorbell_crit) |
275 | EXCEPTION_STUB(0x2c0, guest_doorbell) | ||
276 | EXCEPTION_STUB(0x2e0, guest_doorbell_crit) | ||
277 | EXCEPTION_STUB(0x300, hypercall) | ||
278 | EXCEPTION_STUB(0x320, ehpriv) | ||
276 | 279 | ||
277 | .globl interrupt_end_book3e | 280 | .globl interrupt_end_book3e |
278 | interrupt_end_book3e: | 281 | interrupt_end_book3e: |
@@ -379,7 +382,7 @@ interrupt_end_book3e: | |||
379 | mfspr r13,SPRN_SPRG_PACA /* get our PACA */ | 382 | mfspr r13,SPRN_SPRG_PACA /* get our PACA */ |
380 | b system_call_common | 383 | b system_call_common |
381 | 384 | ||
382 | /* Auxillary Processor Unavailable Interrupt */ | 385 | /* Auxiliary Processor Unavailable Interrupt */ |
383 | START_EXCEPTION(ap_unavailable); | 386 | START_EXCEPTION(ap_unavailable); |
384 | NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE) | 387 | NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE) |
385 | EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_KEEP) | 388 | EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_KEEP) |
@@ -454,6 +457,70 @@ interrupt_end_book3e: | |||
454 | kernel_dbg_exc: | 457 | kernel_dbg_exc: |
455 | b . /* NYI */ | 458 | b . /* NYI */ |
456 | 459 | ||
460 | /* Debug exception as a debug interrupt*/ | ||
461 | START_EXCEPTION(debug_debug); | ||
462 | DBG_EXCEPTION_PROLOG(0xd00, PROLOG_ADDITION_2REGS) | ||
463 | |||
464 | /* | ||
465 | * If there is a single step or branch-taken exception in an | ||
466 | * exception entry sequence, it was probably meant to apply to | ||
467 | * the code where the exception occurred (since exception entry | ||
468 | * doesn't turn off DE automatically). We simulate the effect | ||
469 | * of turning off DE on entry to an exception handler by turning | ||
470 | * off DE in the DSRR1 value and clearing the debug status. | ||
471 | */ | ||
472 | |||
473 | mfspr r14,SPRN_DBSR /* check single-step/branch taken */ | ||
474 | andis. r15,r14,DBSR_IC@h | ||
475 | beq+ 1f | ||
476 | |||
477 | LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) | ||
478 | LOAD_REG_IMMEDIATE(r15,interrupt_end_book3e) | ||
479 | cmpld cr0,r10,r14 | ||
480 | cmpld cr1,r10,r15 | ||
481 | blt+ cr0,1f | ||
482 | bge+ cr1,1f | ||
483 | |||
484 | /* here it looks like we got an inappropriate debug exception. */ | ||
485 | lis r14,DBSR_IC@h /* clear the IC event */ | ||
486 | rlwinm r11,r11,0,~MSR_DE /* clear DE in the DSRR1 value */ | ||
487 | mtspr SPRN_DBSR,r14 | ||
488 | mtspr SPRN_DSRR1,r11 | ||
489 | lwz r10,PACA_EXDBG+EX_CR(r13) /* restore registers */ | ||
490 | ld r1,PACA_EXDBG+EX_R1(r13) | ||
491 | ld r14,PACA_EXDBG+EX_R14(r13) | ||
492 | ld r15,PACA_EXDBG+EX_R15(r13) | ||
493 | mtcr r10 | ||
494 | ld r10,PACA_EXDBG+EX_R10(r13) /* restore registers */ | ||
495 | ld r11,PACA_EXDBG+EX_R11(r13) | ||
496 | mfspr r13,SPRN_SPRG_DBG_SCRATCH | ||
497 | rfdi | ||
498 | |||
499 | /* Normal debug exception */ | ||
500 | /* XXX We only handle coming from userspace for now since we can't | ||
501 | * quite save properly an interrupted kernel state yet | ||
502 | */ | ||
503 | 1: andi. r14,r11,MSR_PR; /* check for userspace again */ | ||
504 | beq kernel_dbg_exc; /* if from kernel mode */ | ||
505 | |||
506 | /* Now we mash up things to make it look like we are coming on a | ||
507 | * normal exception | ||
508 | */ | ||
509 | mfspr r15,SPRN_SPRG_DBG_SCRATCH | ||
510 | mtspr SPRN_SPRG_GEN_SCRATCH,r15 | ||
511 | mfspr r14,SPRN_DBSR | ||
512 | EXCEPTION_COMMON(0xd00, PACA_EXDBG, INTS_DISABLE_ALL) | ||
513 | std r14,_DSISR(r1) | ||
514 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
515 | mr r4,r14 | ||
516 | ld r14,PACA_EXDBG+EX_R14(r13) | ||
517 | ld r15,PACA_EXDBG+EX_R15(r13) | ||
518 | bl .save_nvgprs | ||
519 | bl .DebugException | ||
520 | b .ret_from_except | ||
521 | |||
522 | MASKABLE_EXCEPTION(0x260, perfmon, .performance_monitor_exception, ACK_NONE) | ||
523 | |||
457 | /* Doorbell interrupt */ | 524 | /* Doorbell interrupt */ |
458 | MASKABLE_EXCEPTION(0x2070, doorbell, .doorbell_exception, ACK_NONE) | 525 | MASKABLE_EXCEPTION(0x2070, doorbell, .doorbell_exception, ACK_NONE) |
459 | 526 | ||
@@ -468,6 +535,11 @@ kernel_dbg_exc: | |||
468 | // b ret_from_crit_except | 535 | // b ret_from_crit_except |
469 | b . | 536 | b . |
470 | 537 | ||
538 | MASKABLE_EXCEPTION(0x2c0, guest_doorbell, .unknown_exception, ACK_NONE) | ||
539 | MASKABLE_EXCEPTION(0x2e0, guest_doorbell_crit, .unknown_exception, ACK_NONE) | ||
540 | MASKABLE_EXCEPTION(0x310, hypercall, .unknown_exception, ACK_NONE) | ||
541 | MASKABLE_EXCEPTION(0x320, ehpriv, .unknown_exception, ACK_NONE) | ||
542 | |||
471 | 543 | ||
472 | /* | 544 | /* |
473 | * An interrupt came in while soft-disabled; clear EE in SRR1, | 545 | * An interrupt came in while soft-disabled; clear EE in SRR1, |
@@ -587,7 +659,12 @@ fast_exception_return: | |||
587 | BAD_STACK_TRAMPOLINE(0x000) | 659 | BAD_STACK_TRAMPOLINE(0x000) |
588 | BAD_STACK_TRAMPOLINE(0x100) | 660 | BAD_STACK_TRAMPOLINE(0x100) |
589 | BAD_STACK_TRAMPOLINE(0x200) | 661 | BAD_STACK_TRAMPOLINE(0x200) |
662 | BAD_STACK_TRAMPOLINE(0x260) | ||
663 | BAD_STACK_TRAMPOLINE(0x2c0) | ||
664 | BAD_STACK_TRAMPOLINE(0x2e0) | ||
590 | BAD_STACK_TRAMPOLINE(0x300) | 665 | BAD_STACK_TRAMPOLINE(0x300) |
666 | BAD_STACK_TRAMPOLINE(0x310) | ||
667 | BAD_STACK_TRAMPOLINE(0x320) | ||
591 | BAD_STACK_TRAMPOLINE(0x400) | 668 | BAD_STACK_TRAMPOLINE(0x400) |
592 | BAD_STACK_TRAMPOLINE(0x500) | 669 | BAD_STACK_TRAMPOLINE(0x500) |
593 | BAD_STACK_TRAMPOLINE(0x600) | 670 | BAD_STACK_TRAMPOLINE(0x600) |
@@ -864,8 +941,23 @@ have_hes: | |||
864 | * that will have to be made dependent on whether we are running under | 941 | * that will have to be made dependent on whether we are running under |
865 | * a hypervisor I suppose. | 942 | * a hypervisor I suppose. |
866 | */ | 943 | */ |
867 | ori r3,r3,MAS0_HES | MAS0_WQ_ALLWAYS | 944 | |
868 | mtspr SPRN_MAS0,r3 | 945 | /* BEWARE, MAGIC |
946 | * This code is called as an ordinary function on the boot CPU. But to | ||
947 | * avoid duplication, this code is also used in SCOM bringup of | ||
948 | * secondary CPUs. We read the code between the initial_tlb_code_start | ||
949 | * and initial_tlb_code_end labels one instruction at a time and RAM it | ||
950 | * into the new core via SCOM. That doesn't process branches, so there | ||
951 | * must be none between those two labels. It also means if this code | ||
952 | * ever takes any parameters, the SCOM code must also be updated to | ||
953 | * provide them. | ||
954 | */ | ||
955 | .globl a2_tlbinit_code_start | ||
956 | a2_tlbinit_code_start: | ||
957 | |||
958 | ori r11,r3,MAS0_WQ_ALLWAYS | ||
959 | oris r11,r11,MAS0_ESEL(3)@h /* Use way 3: workaround A2 erratum 376 */ | ||
960 | mtspr SPRN_MAS0,r11 | ||
869 | lis r3,(MAS1_VALID | MAS1_IPROT)@h | 961 | lis r3,(MAS1_VALID | MAS1_IPROT)@h |
870 | ori r3,r3,BOOK3E_PAGESZ_1GB << MAS1_TSIZE_SHIFT | 962 | ori r3,r3,BOOK3E_PAGESZ_1GB << MAS1_TSIZE_SHIFT |
871 | mtspr SPRN_MAS1,r3 | 963 | mtspr SPRN_MAS1,r3 |
@@ -879,18 +971,86 @@ have_hes: | |||
879 | /* Write the TLB entry */ | 971 | /* Write the TLB entry */ |
880 | tlbwe | 972 | tlbwe |
881 | 973 | ||
974 | .globl a2_tlbinit_after_linear_map | ||
975 | a2_tlbinit_after_linear_map: | ||
976 | |||
882 | /* Now we branch the new virtual address mapped by this entry */ | 977 | /* Now we branch the new virtual address mapped by this entry */ |
883 | LOAD_REG_IMMEDIATE(r3,1f) | 978 | LOAD_REG_IMMEDIATE(r3,1f) |
884 | mtctr r3 | 979 | mtctr r3 |
885 | bctr | 980 | bctr |
886 | 981 | ||
887 | 1: /* We are now running at PAGE_OFFSET, clean the TLB of everything | 982 | 1: /* We are now running at PAGE_OFFSET, clean the TLB of everything |
888 | * else (XXX we should scan for bolted crap from the firmware too) | 983 | * else (including IPROTed things left by firmware) |
984 | * r4 = TLBnCFG | ||
985 | * r3 = current address (more or less) | ||
889 | */ | 986 | */ |
987 | |||
988 | li r5,0 | ||
989 | mtspr SPRN_MAS6,r5 | ||
990 | tlbsx 0,r3 | ||
991 | |||
992 | rlwinm r9,r4,0,TLBnCFG_N_ENTRY | ||
993 | rlwinm r10,r4,8,0xff | ||
994 | addi r10,r10,-1 /* Get inner loop mask */ | ||
995 | |||
996 | li r3,1 | ||
997 | |||
998 | mfspr r5,SPRN_MAS1 | ||
999 | rlwinm r5,r5,0,(~(MAS1_VALID|MAS1_IPROT)) | ||
1000 | |||
1001 | mfspr r6,SPRN_MAS2 | ||
1002 | rldicr r6,r6,0,51 /* Extract EPN */ | ||
1003 | |||
1004 | mfspr r7,SPRN_MAS0 | ||
1005 | rlwinm r7,r7,0,0xffff0fff /* Clear HES and WQ */ | ||
1006 | |||
1007 | rlwinm r8,r7,16,0xfff /* Extract ESEL */ | ||
1008 | |||
1009 | 2: add r4,r3,r8 | ||
1010 | and r4,r4,r10 | ||
1011 | |||
1012 | rlwimi r7,r4,16,MAS0_ESEL_MASK | ||
1013 | |||
1014 | mtspr SPRN_MAS0,r7 | ||
1015 | mtspr SPRN_MAS1,r5 | ||
1016 | mtspr SPRN_MAS2,r6 | ||
1017 | tlbwe | ||
1018 | |||
1019 | addi r3,r3,1 | ||
1020 | and. r4,r3,r10 | ||
1021 | |||
1022 | bne 3f | ||
1023 | addis r6,r6,(1<<30)@h | ||
1024 | 3: | ||
1025 | cmpw r3,r9 | ||
1026 | blt 2b | ||
1027 | |||
1028 | .globl a2_tlbinit_after_iprot_flush | ||
1029 | a2_tlbinit_after_iprot_flush: | ||
1030 | |||
1031 | #ifdef CONFIG_PPC_EARLY_DEBUG_WSP | ||
1032 | /* Now establish early debug mappings if applicable */ | ||
1033 | /* Restore the MAS0 we used for linear mapping load */ | ||
1034 | mtspr SPRN_MAS0,r11 | ||
1035 | |||
1036 | lis r3,(MAS1_VALID | MAS1_IPROT)@h | ||
1037 | ori r3,r3,(BOOK3E_PAGESZ_4K << MAS1_TSIZE_SHIFT) | ||
1038 | mtspr SPRN_MAS1,r3 | ||
1039 | LOAD_REG_IMMEDIATE(r3, WSP_UART_VIRT | MAS2_I | MAS2_G) | ||
1040 | mtspr SPRN_MAS2,r3 | ||
1041 | LOAD_REG_IMMEDIATE(r3, WSP_UART_PHYS | MAS3_SR | MAS3_SW) | ||
1042 | mtspr SPRN_MAS7_MAS3,r3 | ||
1043 | /* re-use the MAS8 value from the linear mapping */ | ||
1044 | tlbwe | ||
1045 | #endif /* CONFIG_PPC_EARLY_DEBUG_WSP */ | ||
1046 | |||
890 | PPC_TLBILX(0,0,0) | 1047 | PPC_TLBILX(0,0,0) |
891 | sync | 1048 | sync |
892 | isync | 1049 | isync |
893 | 1050 | ||
1051 | .globl a2_tlbinit_code_end | ||
1052 | a2_tlbinit_code_end: | ||
1053 | |||
894 | /* We translate LR and return */ | 1054 | /* We translate LR and return */ |
895 | mflr r3 | 1055 | mflr r3 |
896 | tovirt(r3,r3) | 1056 | tovirt(r3,r3) |
@@ -1040,3 +1200,33 @@ _GLOBAL(__setup_base_ivors) | |||
1040 | sync | 1200 | sync |
1041 | 1201 | ||
1042 | blr | 1202 | blr |
1203 | |||
1204 | _GLOBAL(setup_perfmon_ivor) | ||
1205 | SET_IVOR(35, 0x260) /* Performance Monitor */ | ||
1206 | blr | ||
1207 | |||
1208 | _GLOBAL(setup_doorbell_ivors) | ||
1209 | SET_IVOR(36, 0x280) /* Processor Doorbell */ | ||
1210 | SET_IVOR(37, 0x2a0) /* Processor Doorbell Crit */ | ||
1211 | |||
1212 | /* Check MMUCFG[LPIDSIZE] to determine if we have category E.HV */ | ||
1213 | mfspr r10,SPRN_MMUCFG | ||
1214 | rlwinm. r10,r10,0,MMUCFG_LPIDSIZE | ||
1215 | beqlr | ||
1216 | |||
1217 | SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */ | ||
1218 | SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */ | ||
1219 | blr | ||
1220 | |||
1221 | _GLOBAL(setup_ehv_ivors) | ||
1222 | /* | ||
1223 | * We may be running as a guest and lack E.HV even on a chip | ||
1224 | * that normally has it. | ||
1225 | */ | ||
1226 | mfspr r10,SPRN_MMUCFG | ||
1227 | rlwinm. r10,r10,0,MMUCFG_LPIDSIZE | ||
1228 | beqlr | ||
1229 | |||
1230 | SET_IVOR(40, 0x300) /* Embedded Hypervisor System Call */ | ||
1231 | SET_IVOR(41, 0x320) /* Embedded Hypervisor Privilege */ | ||
1232 | blr | ||
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index f53029a01554..a85f4874cba7 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -5,7 +5,7 @@ | |||
5 | * handling and other fixed offset specific things. | 5 | * handling and other fixed offset specific things. |
6 | * | 6 | * |
7 | * This file is meant to be #included from head_64.S due to | 7 | * This file is meant to be #included from head_64.S due to |
8 | * position dependant assembly. | 8 | * position dependent assembly. |
9 | * | 9 | * |
10 | * Most of this originates from head_64.S and thus has the same | 10 | * Most of this originates from head_64.S and thus has the same |
11 | * copyright history. | 11 | * copyright history. |
@@ -13,6 +13,7 @@ | |||
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <asm/exception-64s.h> | 15 | #include <asm/exception-64s.h> |
16 | #include <asm/ptrace.h> | ||
16 | 17 | ||
17 | /* | 18 | /* |
18 | * We layout physical memory as follows: | 19 | * We layout physical memory as follows: |
@@ -36,23 +37,51 @@ | |||
36 | .globl __start_interrupts | 37 | .globl __start_interrupts |
37 | __start_interrupts: | 38 | __start_interrupts: |
38 | 39 | ||
39 | STD_EXCEPTION_PSERIES(0x100, system_reset) | 40 | .globl system_reset_pSeries; |
41 | system_reset_pSeries: | ||
42 | HMT_MEDIUM; | ||
43 | DO_KVM 0x100; | ||
44 | SET_SCRATCH0(r13) | ||
45 | #ifdef CONFIG_PPC_P7_NAP | ||
46 | BEGIN_FTR_SECTION | ||
47 | /* Running native on arch 2.06 or later, check if we are | ||
48 | * waking up from nap. We only handle no state loss and | ||
49 | * supervisor state loss. We do -not- handle hypervisor | ||
50 | * state loss at this time. | ||
51 | */ | ||
52 | mfspr r13,SPRN_SRR1 | ||
53 | rlwinm r13,r13,47-31,30,31 | ||
54 | cmpwi cr0,r13,1 | ||
55 | bne 1f | ||
56 | b .power7_wakeup_noloss | ||
57 | 1: cmpwi cr0,r13,2 | ||
58 | bne 1f | ||
59 | b .power7_wakeup_loss | ||
60 | /* Total loss of HV state is fatal, we could try to use the | ||
61 | * PIR to locate a PACA, then use an emergency stack etc... | ||
62 | * but for now, let's just stay stuck here | ||
63 | */ | ||
64 | 1: cmpwi cr0,r13,3 | ||
65 | beq . | ||
66 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE_206) | ||
67 | #endif /* CONFIG_PPC_P7_NAP */ | ||
68 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD) | ||
40 | 69 | ||
41 | . = 0x200 | 70 | . = 0x200 |
42 | _machine_check_pSeries: | 71 | _machine_check_pSeries: |
43 | HMT_MEDIUM | 72 | HMT_MEDIUM |
44 | DO_KVM 0x200 | 73 | DO_KVM 0x200 |
45 | mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */ | 74 | SET_SCRATCH0(r13) |
46 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) | 75 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, EXC_STD) |
47 | 76 | ||
48 | . = 0x300 | 77 | . = 0x300 |
49 | .globl data_access_pSeries | 78 | .globl data_access_pSeries |
50 | data_access_pSeries: | 79 | data_access_pSeries: |
51 | HMT_MEDIUM | 80 | HMT_MEDIUM |
52 | DO_KVM 0x300 | 81 | DO_KVM 0x300 |
53 | mtspr SPRN_SPRG_SCRATCH0,r13 | 82 | SET_SCRATCH0(r13) |
54 | BEGIN_FTR_SECTION | 83 | BEGIN_FTR_SECTION |
55 | mfspr r13,SPRN_SPRG_PACA | 84 | GET_PACA(r13) |
56 | std r9,PACA_EXSLB+EX_R9(r13) | 85 | std r9,PACA_EXSLB+EX_R9(r13) |
57 | std r10,PACA_EXSLB+EX_R10(r13) | 86 | std r10,PACA_EXSLB+EX_R10(r13) |
58 | mfspr r10,SPRN_DAR | 87 | mfspr r10,SPRN_DAR |
@@ -66,22 +95,22 @@ BEGIN_FTR_SECTION | |||
66 | std r11,PACA_EXGEN+EX_R11(r13) | 95 | std r11,PACA_EXGEN+EX_R11(r13) |
67 | ld r11,PACA_EXSLB+EX_R9(r13) | 96 | ld r11,PACA_EXSLB+EX_R9(r13) |
68 | std r12,PACA_EXGEN+EX_R12(r13) | 97 | std r12,PACA_EXGEN+EX_R12(r13) |
69 | mfspr r12,SPRN_SPRG_SCRATCH0 | 98 | GET_SCRATCH0(r12) |
70 | std r10,PACA_EXGEN+EX_R10(r13) | 99 | std r10,PACA_EXGEN+EX_R10(r13) |
71 | std r11,PACA_EXGEN+EX_R9(r13) | 100 | std r11,PACA_EXGEN+EX_R9(r13) |
72 | std r12,PACA_EXGEN+EX_R13(r13) | 101 | std r12,PACA_EXGEN+EX_R13(r13) |
73 | EXCEPTION_PROLOG_PSERIES_1(data_access_common) | 102 | EXCEPTION_PROLOG_PSERIES_1(data_access_common, EXC_STD) |
74 | FTR_SECTION_ELSE | 103 | FTR_SECTION_ELSE |
75 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common) | 104 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD) |
76 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_SLB) | 105 | ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB) |
77 | 106 | ||
78 | . = 0x380 | 107 | . = 0x380 |
79 | .globl data_access_slb_pSeries | 108 | .globl data_access_slb_pSeries |
80 | data_access_slb_pSeries: | 109 | data_access_slb_pSeries: |
81 | HMT_MEDIUM | 110 | HMT_MEDIUM |
82 | DO_KVM 0x380 | 111 | DO_KVM 0x380 |
83 | mtspr SPRN_SPRG_SCRATCH0,r13 | 112 | SET_SCRATCH0(r13) |
84 | mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */ | 113 | GET_PACA(r13) |
85 | std r3,PACA_EXSLB+EX_R3(r13) | 114 | std r3,PACA_EXSLB+EX_R3(r13) |
86 | mfspr r3,SPRN_DAR | 115 | mfspr r3,SPRN_DAR |
87 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ | 116 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ |
@@ -94,7 +123,7 @@ data_access_slb_pSeries: | |||
94 | std r10,PACA_EXSLB+EX_R10(r13) | 123 | std r10,PACA_EXSLB+EX_R10(r13) |
95 | std r11,PACA_EXSLB+EX_R11(r13) | 124 | std r11,PACA_EXSLB+EX_R11(r13) |
96 | std r12,PACA_EXSLB+EX_R12(r13) | 125 | std r12,PACA_EXSLB+EX_R12(r13) |
97 | mfspr r10,SPRN_SPRG_SCRATCH0 | 126 | GET_SCRATCH0(r10) |
98 | std r10,PACA_EXSLB+EX_R13(r13) | 127 | std r10,PACA_EXSLB+EX_R13(r13) |
99 | mfspr r12,SPRN_SRR1 /* and SRR1 */ | 128 | mfspr r12,SPRN_SRR1 /* and SRR1 */ |
100 | #ifndef CONFIG_RELOCATABLE | 129 | #ifndef CONFIG_RELOCATABLE |
@@ -112,15 +141,15 @@ data_access_slb_pSeries: | |||
112 | bctr | 141 | bctr |
113 | #endif | 142 | #endif |
114 | 143 | ||
115 | STD_EXCEPTION_PSERIES(0x400, instruction_access) | 144 | STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access) |
116 | 145 | ||
117 | . = 0x480 | 146 | . = 0x480 |
118 | .globl instruction_access_slb_pSeries | 147 | .globl instruction_access_slb_pSeries |
119 | instruction_access_slb_pSeries: | 148 | instruction_access_slb_pSeries: |
120 | HMT_MEDIUM | 149 | HMT_MEDIUM |
121 | DO_KVM 0x480 | 150 | DO_KVM 0x480 |
122 | mtspr SPRN_SPRG_SCRATCH0,r13 | 151 | SET_SCRATCH0(r13) |
123 | mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */ | 152 | GET_PACA(r13) |
124 | std r3,PACA_EXSLB+EX_R3(r13) | 153 | std r3,PACA_EXSLB+EX_R3(r13) |
125 | mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ | 154 | mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ |
126 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ | 155 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ |
@@ -133,7 +162,7 @@ instruction_access_slb_pSeries: | |||
133 | std r10,PACA_EXSLB+EX_R10(r13) | 162 | std r10,PACA_EXSLB+EX_R10(r13) |
134 | std r11,PACA_EXSLB+EX_R11(r13) | 163 | std r11,PACA_EXSLB+EX_R11(r13) |
135 | std r12,PACA_EXSLB+EX_R12(r13) | 164 | std r12,PACA_EXSLB+EX_R12(r13) |
136 | mfspr r10,SPRN_SPRG_SCRATCH0 | 165 | GET_SCRATCH0(r10) |
137 | std r10,PACA_EXSLB+EX_R13(r13) | 166 | std r10,PACA_EXSLB+EX_R13(r13) |
138 | mfspr r12,SPRN_SRR1 /* and SRR1 */ | 167 | mfspr r12,SPRN_SRR1 /* and SRR1 */ |
139 | #ifndef CONFIG_RELOCATABLE | 168 | #ifndef CONFIG_RELOCATABLE |
@@ -146,13 +175,29 @@ instruction_access_slb_pSeries: | |||
146 | bctr | 175 | bctr |
147 | #endif | 176 | #endif |
148 | 177 | ||
149 | MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt) | 178 | /* We open code these as we can't have a ". = x" (even with |
150 | STD_EXCEPTION_PSERIES(0x600, alignment) | 179 | * x = "." within a feature section |
151 | STD_EXCEPTION_PSERIES(0x700, program_check) | 180 | */ |
152 | STD_EXCEPTION_PSERIES(0x800, fp_unavailable) | 181 | . = 0x500; |
153 | MASKABLE_EXCEPTION_PSERIES(0x900, decrementer) | 182 | .globl hardware_interrupt_pSeries; |
154 | STD_EXCEPTION_PSERIES(0xa00, trap_0a) | 183 | .globl hardware_interrupt_hv; |
155 | STD_EXCEPTION_PSERIES(0xb00, trap_0b) | 184 | hardware_interrupt_pSeries: |
185 | hardware_interrupt_hv: | ||
186 | BEGIN_FTR_SECTION | ||
187 | _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD) | ||
188 | FTR_SECTION_ELSE | ||
189 | _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV) | ||
190 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_HVMODE_206) | ||
191 | |||
192 | STD_EXCEPTION_PSERIES(0x600, 0x600, alignment) | ||
193 | STD_EXCEPTION_PSERIES(0x700, 0x700, program_check) | ||
194 | STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable) | ||
195 | |||
196 | MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer) | ||
197 | MASKABLE_EXCEPTION_HV(0x980, 0x980, decrementer) | ||
198 | |||
199 | STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a) | ||
200 | STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b) | ||
156 | 201 | ||
157 | . = 0xc00 | 202 | . = 0xc00 |
158 | .globl system_call_pSeries | 203 | .globl system_call_pSeries |
@@ -164,13 +209,13 @@ BEGIN_FTR_SECTION | |||
164 | beq- 1f | 209 | beq- 1f |
165 | END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) | 210 | END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) |
166 | mr r9,r13 | 211 | mr r9,r13 |
167 | mfspr r13,SPRN_SPRG_PACA | 212 | GET_PACA(r13) |
168 | mfspr r11,SPRN_SRR0 | 213 | mfspr r11,SPRN_SRR0 |
169 | ld r12,PACAKBASE(r13) | ||
170 | ld r10,PACAKMSR(r13) | ||
171 | LOAD_HANDLER(r12, system_call_entry) | ||
172 | mtspr SPRN_SRR0,r12 | ||
173 | mfspr r12,SPRN_SRR1 | 214 | mfspr r12,SPRN_SRR1 |
215 | ld r10,PACAKBASE(r13) | ||
216 | LOAD_HANDLER(r10, system_call_entry) | ||
217 | mtspr SPRN_SRR0,r10 | ||
218 | ld r10,PACAKMSR(r13) | ||
174 | mtspr SPRN_SRR1,r10 | 219 | mtspr SPRN_SRR1,r10 |
175 | rfid | 220 | rfid |
176 | b . /* prevent speculative execution */ | 221 | b . /* prevent speculative execution */ |
@@ -182,8 +227,21 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) | |||
182 | rfid /* return to userspace */ | 227 | rfid /* return to userspace */ |
183 | b . | 228 | b . |
184 | 229 | ||
185 | STD_EXCEPTION_PSERIES(0xd00, single_step) | 230 | STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step) |
186 | STD_EXCEPTION_PSERIES(0xe00, trap_0e) | 231 | |
232 | /* At 0xe??? we have a bunch of hypervisor exceptions, we branch | ||
233 | * out of line to handle them | ||
234 | */ | ||
235 | . = 0xe00 | ||
236 | b h_data_storage_hv | ||
237 | . = 0xe20 | ||
238 | b h_instr_storage_hv | ||
239 | . = 0xe40 | ||
240 | b emulation_assist_hv | ||
241 | . = 0xe50 | ||
242 | b hmi_exception_hv | ||
243 | . = 0xe60 | ||
244 | b hmi_exception_hv | ||
187 | 245 | ||
188 | /* We need to deal with the Altivec unavailable exception | 246 | /* We need to deal with the Altivec unavailable exception |
189 | * here which is at 0xf20, thus in the middle of the | 247 | * here which is at 0xf20, thus in the middle of the |
@@ -192,39 +250,42 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) | |||
192 | */ | 250 | */ |
193 | performance_monitor_pSeries_1: | 251 | performance_monitor_pSeries_1: |
194 | . = 0xf00 | 252 | . = 0xf00 |
195 | DO_KVM 0xf00 | ||
196 | b performance_monitor_pSeries | 253 | b performance_monitor_pSeries |
197 | 254 | ||
198 | altivec_unavailable_pSeries_1: | 255 | altivec_unavailable_pSeries_1: |
199 | . = 0xf20 | 256 | . = 0xf20 |
200 | DO_KVM 0xf20 | ||
201 | b altivec_unavailable_pSeries | 257 | b altivec_unavailable_pSeries |
202 | 258 | ||
203 | vsx_unavailable_pSeries_1: | 259 | vsx_unavailable_pSeries_1: |
204 | . = 0xf40 | 260 | . = 0xf40 |
205 | DO_KVM 0xf40 | ||
206 | b vsx_unavailable_pSeries | 261 | b vsx_unavailable_pSeries |
207 | 262 | ||
208 | #ifdef CONFIG_CBE_RAS | 263 | #ifdef CONFIG_CBE_RAS |
209 | HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error) | 264 | STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error) |
210 | #endif /* CONFIG_CBE_RAS */ | 265 | #endif /* CONFIG_CBE_RAS */ |
211 | STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) | 266 | STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint) |
212 | #ifdef CONFIG_CBE_RAS | 267 | #ifdef CONFIG_CBE_RAS |
213 | HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance) | 268 | STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance) |
214 | #endif /* CONFIG_CBE_RAS */ | 269 | #endif /* CONFIG_CBE_RAS */ |
215 | STD_EXCEPTION_PSERIES(0x1700, altivec_assist) | 270 | STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist) |
216 | #ifdef CONFIG_CBE_RAS | 271 | #ifdef CONFIG_CBE_RAS |
217 | HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal) | 272 | STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal) |
218 | #endif /* CONFIG_CBE_RAS */ | 273 | #endif /* CONFIG_CBE_RAS */ |
219 | 274 | ||
220 | . = 0x3000 | 275 | . = 0x3000 |
221 | 276 | ||
222 | /*** pSeries interrupt support ***/ | 277 | /*** Out of line interrupts support ***/ |
278 | |||
279 | /* moved from 0xe00 */ | ||
280 | STD_EXCEPTION_HV(., 0xe00, h_data_storage) | ||
281 | STD_EXCEPTION_HV(., 0xe20, h_instr_storage) | ||
282 | STD_EXCEPTION_HV(., 0xe40, emulation_assist) | ||
283 | STD_EXCEPTION_HV(., 0xe60, hmi_exception) /* need to flush cache ? */ | ||
223 | 284 | ||
224 | /* moved from 0xf00 */ | 285 | /* moved from 0xf00 */ |
225 | STD_EXCEPTION_PSERIES(., performance_monitor) | 286 | STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor) |
226 | STD_EXCEPTION_PSERIES(., altivec_unavailable) | 287 | STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable) |
227 | STD_EXCEPTION_PSERIES(., vsx_unavailable) | 288 | STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable) |
228 | 289 | ||
229 | /* | 290 | /* |
230 | * An interrupt came in while soft-disabled; clear EE in SRR1, | 291 | * An interrupt came in while soft-disabled; clear EE in SRR1, |
@@ -239,17 +300,30 @@ masked_interrupt: | |||
239 | rotldi r10,r10,16 | 300 | rotldi r10,r10,16 |
240 | mtspr SPRN_SRR1,r10 | 301 | mtspr SPRN_SRR1,r10 |
241 | ld r10,PACA_EXGEN+EX_R10(r13) | 302 | ld r10,PACA_EXGEN+EX_R10(r13) |
242 | mfspr r13,SPRN_SPRG_SCRATCH0 | 303 | GET_SCRATCH0(r13) |
243 | rfid | 304 | rfid |
244 | b . | 305 | b . |
245 | 306 | ||
307 | masked_Hinterrupt: | ||
308 | stb r10,PACAHARDIRQEN(r13) | ||
309 | mtcrf 0x80,r9 | ||
310 | ld r9,PACA_EXGEN+EX_R9(r13) | ||
311 | mfspr r10,SPRN_HSRR1 | ||
312 | rldicl r10,r10,48,1 /* clear MSR_EE */ | ||
313 | rotldi r10,r10,16 | ||
314 | mtspr SPRN_HSRR1,r10 | ||
315 | ld r10,PACA_EXGEN+EX_R10(r13) | ||
316 | GET_SCRATCH0(r13) | ||
317 | hrfid | ||
318 | b . | ||
319 | |||
246 | .align 7 | 320 | .align 7 |
247 | do_stab_bolted_pSeries: | 321 | do_stab_bolted_pSeries: |
248 | std r11,PACA_EXSLB+EX_R11(r13) | 322 | std r11,PACA_EXSLB+EX_R11(r13) |
249 | std r12,PACA_EXSLB+EX_R12(r13) | 323 | std r12,PACA_EXSLB+EX_R12(r13) |
250 | mfspr r10,SPRN_SPRG_SCRATCH0 | 324 | GET_SCRATCH0(r10) |
251 | std r10,PACA_EXSLB+EX_R13(r13) | 325 | std r10,PACA_EXSLB+EX_R13(r13) |
252 | EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted) | 326 | EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD) |
253 | 327 | ||
254 | #ifdef CONFIG_PPC_PSERIES | 328 | #ifdef CONFIG_PPC_PSERIES |
255 | /* | 329 | /* |
@@ -259,15 +333,15 @@ do_stab_bolted_pSeries: | |||
259 | .align 7 | 333 | .align 7 |
260 | system_reset_fwnmi: | 334 | system_reset_fwnmi: |
261 | HMT_MEDIUM | 335 | HMT_MEDIUM |
262 | mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */ | 336 | SET_SCRATCH0(r13) /* save r13 */ |
263 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) | 337 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD) |
264 | 338 | ||
265 | .globl machine_check_fwnmi | 339 | .globl machine_check_fwnmi |
266 | .align 7 | 340 | .align 7 |
267 | machine_check_fwnmi: | 341 | machine_check_fwnmi: |
268 | HMT_MEDIUM | 342 | HMT_MEDIUM |
269 | mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */ | 343 | SET_SCRATCH0(r13) /* save r13 */ |
270 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) | 344 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, EXC_STD) |
271 | 345 | ||
272 | #endif /* CONFIG_PPC_PSERIES */ | 346 | #endif /* CONFIG_PPC_PSERIES */ |
273 | 347 | ||
@@ -281,7 +355,7 @@ slb_miss_user_pseries: | |||
281 | std r10,PACA_EXGEN+EX_R10(r13) | 355 | std r10,PACA_EXGEN+EX_R10(r13) |
282 | std r11,PACA_EXGEN+EX_R11(r13) | 356 | std r11,PACA_EXGEN+EX_R11(r13) |
283 | std r12,PACA_EXGEN+EX_R12(r13) | 357 | std r12,PACA_EXGEN+EX_R12(r13) |
284 | mfspr r10,SPRG_SCRATCH0 | 358 | GET_SCRATCH0(r10) |
285 | ld r11,PACA_EXSLB+EX_R9(r13) | 359 | ld r11,PACA_EXSLB+EX_R9(r13) |
286 | ld r12,PACA_EXSLB+EX_R3(r13) | 360 | ld r12,PACA_EXSLB+EX_R3(r13) |
287 | std r10,PACA_EXGEN+EX_R13(r13) | 361 | std r10,PACA_EXGEN+EX_R13(r13) |
@@ -299,6 +373,12 @@ slb_miss_user_pseries: | |||
299 | b . /* prevent spec. execution */ | 373 | b . /* prevent spec. execution */ |
300 | #endif /* __DISABLED__ */ | 374 | #endif /* __DISABLED__ */ |
301 | 375 | ||
376 | /* KVM's trampoline code needs to be close to the interrupt handlers */ | ||
377 | |||
378 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER | ||
379 | #include "../kvm/book3s_rmhandlers.S" | ||
380 | #endif | ||
381 | |||
302 | .align 7 | 382 | .align 7 |
303 | .globl __end_interrupts | 383 | .globl __end_interrupts |
304 | __end_interrupts: | 384 | __end_interrupts: |
@@ -335,6 +415,8 @@ machine_check_common: | |||
335 | STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) | 415 | STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) |
336 | STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) | 416 | STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) |
337 | STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) | 417 | STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) |
418 | STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception) | ||
419 | STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) | ||
338 | STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception) | 420 | STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception) |
339 | STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) | 421 | STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) |
340 | #ifdef CONFIG_ALTIVEC | 422 | #ifdef CONFIG_ALTIVEC |
@@ -379,9 +461,24 @@ bad_stack: | |||
379 | std r12,_XER(r1) | 461 | std r12,_XER(r1) |
380 | SAVE_GPR(0,r1) | 462 | SAVE_GPR(0,r1) |
381 | SAVE_GPR(2,r1) | 463 | SAVE_GPR(2,r1) |
382 | SAVE_4GPRS(3,r1) | 464 | ld r10,EX_R3(r3) |
383 | SAVE_2GPRS(7,r1) | 465 | std r10,GPR3(r1) |
384 | SAVE_10GPRS(12,r1) | 466 | SAVE_GPR(4,r1) |
467 | SAVE_4GPRS(5,r1) | ||
468 | ld r9,EX_R9(r3) | ||
469 | ld r10,EX_R10(r3) | ||
470 | SAVE_2GPRS(9,r1) | ||
471 | ld r9,EX_R11(r3) | ||
472 | ld r10,EX_R12(r3) | ||
473 | ld r11,EX_R13(r3) | ||
474 | std r9,GPR11(r1) | ||
475 | std r10,GPR12(r1) | ||
476 | std r11,GPR13(r1) | ||
477 | BEGIN_FTR_SECTION | ||
478 | ld r10,EX_CFAR(r3) | ||
479 | std r10,ORIG_GPR3(r1) | ||
480 | END_FTR_SECTION_IFSET(CPU_FTR_CFAR) | ||
481 | SAVE_8GPRS(14,r1) | ||
385 | SAVE_10GPRS(22,r1) | 482 | SAVE_10GPRS(22,r1) |
386 | lhz r12,PACA_TRAP_SAVE(r13) | 483 | lhz r12,PACA_TRAP_SAVE(r13) |
387 | std r12,_TRAP(r1) | 484 | std r12,_TRAP(r1) |
@@ -390,6 +487,9 @@ bad_stack: | |||
390 | li r12,0 | 487 | li r12,0 |
391 | std r12,0(r11) | 488 | std r12,0(r11) |
392 | ld r2,PACATOC(r13) | 489 | ld r2,PACATOC(r13) |
490 | ld r11,exception_marker@toc(r2) | ||
491 | std r12,RESULT(r1) | ||
492 | std r11,STACK_FRAME_OVERHEAD-16(r1) | ||
393 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | 493 | 1: addi r3,r1,STACK_FRAME_OVERHEAD |
394 | bl .kernel_bad_stack | 494 | bl .kernel_bad_stack |
395 | b 1b | 495 | b 1b |
@@ -412,6 +512,19 @@ data_access_common: | |||
412 | li r5,0x300 | 512 | li r5,0x300 |
413 | b .do_hash_page /* Try to handle as hpte fault */ | 513 | b .do_hash_page /* Try to handle as hpte fault */ |
414 | 514 | ||
515 | .align 7 | ||
516 | .globl h_data_storage_common | ||
517 | h_data_storage_common: | ||
518 | mfspr r10,SPRN_HDAR | ||
519 | std r10,PACA_EXGEN+EX_DAR(r13) | ||
520 | mfspr r10,SPRN_HDSISR | ||
521 | stw r10,PACA_EXGEN+EX_DSISR(r13) | ||
522 | EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) | ||
523 | bl .save_nvgprs | ||
524 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
525 | bl .unknown_exception | ||
526 | b .ret_from_except | ||
527 | |||
415 | .align 7 | 528 | .align 7 |
416 | .globl instruction_access_common | 529 | .globl instruction_access_common |
417 | instruction_access_common: | 530 | instruction_access_common: |
@@ -421,6 +534,8 @@ instruction_access_common: | |||
421 | li r5,0x400 | 534 | li r5,0x400 |
422 | b .do_hash_page /* Try to handle as hpte fault */ | 535 | b .do_hash_page /* Try to handle as hpte fault */ |
423 | 536 | ||
537 | STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception) | ||
538 | |||
424 | /* | 539 | /* |
425 | * Here is the common SLB miss user that is used when going to virtual | 540 | * Here is the common SLB miss user that is used when going to virtual |
426 | * mode for SLB misses, that is currently not used | 541 | * mode for SLB misses, that is currently not used |
@@ -743,7 +858,7 @@ _STATIC(do_hash_page) | |||
743 | BEGIN_FTR_SECTION | 858 | BEGIN_FTR_SECTION |
744 | andis. r0,r4,0x0020 /* Is it a segment table fault? */ | 859 | andis. r0,r4,0x0020 /* Is it a segment table fault? */ |
745 | bne- do_ste_alloc /* If so handle it */ | 860 | bne- do_ste_alloc /* If so handle it */ |
746 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) | 861 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) |
747 | 862 | ||
748 | clrrdi r11,r1,THREAD_SHIFT | 863 | clrrdi r11,r1,THREAD_SHIFT |
749 | lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ | 864 | lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ |
@@ -818,12 +933,12 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) | |||
818 | 933 | ||
819 | /* | 934 | /* |
820 | * hash_page couldn't handle it, set soft interrupt enable back | 935 | * hash_page couldn't handle it, set soft interrupt enable back |
821 | * to what it was before the trap. Note that .raw_local_irq_restore | 936 | * to what it was before the trap. Note that .arch_local_irq_restore |
822 | * handles any interrupts pending at this point. | 937 | * handles any interrupts pending at this point. |
823 | */ | 938 | */ |
824 | ld r3,SOFTE(r1) | 939 | ld r3,SOFTE(r1) |
825 | TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f) | 940 | TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f) |
826 | bl .raw_local_irq_restore | 941 | bl .arch_local_irq_restore |
827 | b 11f | 942 | b 11f |
828 | 943 | ||
829 | /* We have a data breakpoint exception - handle it */ | 944 | /* We have a data breakpoint exception - handle it */ |
@@ -970,20 +1085,6 @@ _GLOBAL(do_stab_bolted) | |||
970 | rfid | 1085 | rfid |
971 | b . /* prevent speculative execution */ | 1086 | b . /* prevent speculative execution */ |
972 | 1087 | ||
973 | /* | ||
974 | * Space for CPU0's segment table. | ||
975 | * | ||
976 | * On iSeries, the hypervisor must fill in at least one entry before | ||
977 | * we get control (with relocate on). The address is given to the hv | ||
978 | * as a page number (see xLparMap below), so this must be at a | ||
979 | * fixed address (the linker can't compute (u64)&initial_stab >> | ||
980 | * PAGE_SHIFT). | ||
981 | */ | ||
982 | . = STAB0_OFFSET /* 0x6000 */ | ||
983 | .globl initial_stab | ||
984 | initial_stab: | ||
985 | .space 4096 | ||
986 | |||
987 | #ifdef CONFIG_PPC_PSERIES | 1088 | #ifdef CONFIG_PPC_PSERIES |
988 | /* | 1089 | /* |
989 | * Data area reserved for FWNMI option. | 1090 | * Data area reserved for FWNMI option. |
@@ -1020,3 +1121,17 @@ xLparMap: | |||
1020 | #ifdef CONFIG_PPC_PSERIES | 1121 | #ifdef CONFIG_PPC_PSERIES |
1021 | . = 0x8000 | 1122 | . = 0x8000 |
1022 | #endif /* CONFIG_PPC_PSERIES */ | 1123 | #endif /* CONFIG_PPC_PSERIES */ |
1124 | |||
1125 | /* | ||
1126 | * Space for CPU0's segment table. | ||
1127 | * | ||
1128 | * On iSeries, the hypervisor must fill in at least one entry before | ||
1129 | * we get control (with relocate on). The address is given to the hv | ||
1130 | * as a page number (see xLparMap above), so this must be at a | ||
1131 | * fixed address (the linker can't compute (u64)&initial_stab >> | ||
1132 | * PAGE_SHIFT). | ||
1133 | */ | ||
1134 | . = STAB0_OFFSET /* 0x8000 */ | ||
1135 | .globl initial_stab | ||
1136 | initial_stab: | ||
1137 | .space 4096 | ||
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S index fc8f5b14019c..de369558bf0a 100644 --- a/arch/powerpc/kernel/fpu.S +++ b/arch/powerpc/kernel/fpu.S | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <asm/thread_info.h> | 23 | #include <asm/thread_info.h> |
24 | #include <asm/ppc_asm.h> | 24 | #include <asm/ppc_asm.h> |
25 | #include <asm/asm-offsets.h> | 25 | #include <asm/asm-offsets.h> |
26 | #include <asm/ptrace.h> | ||
26 | 27 | ||
27 | #ifdef CONFIG_VSX | 28 | #ifdef CONFIG_VSX |
28 | #define REST_32FPVSRS(n,c,base) \ | 29 | #define REST_32FPVSRS(n,c,base) \ |
@@ -163,24 +164,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |||
163 | /* | 164 | /* |
164 | * These are used in the alignment trap handler when emulating | 165 | * These are used in the alignment trap handler when emulating |
165 | * single-precision loads and stores. | 166 | * single-precision loads and stores. |
166 | * We restore and save the fpscr so the task gets the same result | ||
167 | * and exceptions as if the cpu had performed the load or store. | ||
168 | */ | 167 | */ |
169 | 168 | ||
170 | _GLOBAL(cvt_fd) | 169 | _GLOBAL(cvt_fd) |
171 | lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */ | ||
172 | MTFSF_L(0) | ||
173 | lfs 0,0(r3) | 170 | lfs 0,0(r3) |
174 | stfd 0,0(r4) | 171 | stfd 0,0(r4) |
175 | mffs 0 | ||
176 | stfd 0,THREAD_FPSCR(r5) /* save new fpscr value */ | ||
177 | blr | 172 | blr |
178 | 173 | ||
179 | _GLOBAL(cvt_df) | 174 | _GLOBAL(cvt_df) |
180 | lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */ | ||
181 | MTFSF_L(0) | ||
182 | lfd 0,0(r3) | 175 | lfd 0,0(r3) |
183 | stfs 0,0(r4) | 176 | stfs 0,0(r4) |
184 | mffs 0 | ||
185 | stfd 0,THREAD_FPSCR(r5) /* save new fpscr value */ | ||
186 | blr | 177 | blr |
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index ce1f3e44c24f..bf99cfa6bbfe 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <asm/cacheflush.h> | 22 | #include <asm/cacheflush.h> |
23 | #include <asm/code-patching.h> | 23 | #include <asm/code-patching.h> |
24 | #include <asm/ftrace.h> | 24 | #include <asm/ftrace.h> |
25 | #include <asm/syscall.h> | ||
25 | 26 | ||
26 | 27 | ||
27 | #ifdef CONFIG_DYNAMIC_FTRACE | 28 | #ifdef CONFIG_DYNAMIC_FTRACE |
@@ -600,3 +601,10 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
600 | } | 601 | } |
601 | } | 602 | } |
602 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 603 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
604 | |||
605 | #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) | ||
606 | unsigned long __init arch_syscall_addr(int nr) | ||
607 | { | ||
608 | return sys_call_table[nr*2]; | ||
609 | } | ||
610 | #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */ | ||
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 98c4b29a56f4..ba250d505e07 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S | |||
@@ -805,19 +805,6 @@ _ENTRY(copy_and_flush) | |||
805 | blr | 805 | blr |
806 | 806 | ||
807 | #ifdef CONFIG_SMP | 807 | #ifdef CONFIG_SMP |
808 | #ifdef CONFIG_GEMINI | ||
809 | .globl __secondary_start_gemini | ||
810 | __secondary_start_gemini: | ||
811 | mfspr r4,SPRN_HID0 | ||
812 | ori r4,r4,HID0_ICFI | ||
813 | li r3,0 | ||
814 | ori r3,r3,HID0_ICE | ||
815 | andc r4,r4,r3 | ||
816 | mtspr SPRN_HID0,r4 | ||
817 | sync | ||
818 | b __secondary_start | ||
819 | #endif /* CONFIG_GEMINI */ | ||
820 | |||
821 | .globl __secondary_start_mpc86xx | 808 | .globl __secondary_start_mpc86xx |
822 | __secondary_start_mpc86xx: | 809 | __secondary_start_mpc86xx: |
823 | mfspr r3, SPRN_PIR | 810 | mfspr r3, SPRN_PIR |
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index a90625f9b485..a91626d87fc9 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <asm/thread_info.h> | 40 | #include <asm/thread_info.h> |
41 | #include <asm/ppc_asm.h> | 41 | #include <asm/ppc_asm.h> |
42 | #include <asm/asm-offsets.h> | 42 | #include <asm/asm-offsets.h> |
43 | #include <asm/ptrace.h> | ||
43 | 44 | ||
44 | /* As with the other PowerPC ports, it is expected that when code | 45 | /* As with the other PowerPC ports, it is expected that when code |
45 | * execution begins here, the following registers contain valid, yet | 46 | * execution begins here, the following registers contain valid, yet |
@@ -765,7 +766,7 @@ DataAccess: | |||
765 | * miss get to this point to load the TLB. | 766 | * miss get to this point to load the TLB. |
766 | * r10 - TLB_TAG value | 767 | * r10 - TLB_TAG value |
767 | * r11 - Linux PTE | 768 | * r11 - Linux PTE |
768 | * r12, r9 - avilable to use | 769 | * r12, r9 - available to use |
769 | * PID - loaded with proper value when we get here | 770 | * PID - loaded with proper value when we get here |
770 | * Upon exit, we reload everything and RFI. | 771 | * Upon exit, we reload everything and RFI. |
771 | * Actually, it will fit now, but oh well.....a common place | 772 | * Actually, it will fit now, but oh well.....a common place |
@@ -923,11 +924,7 @@ initial_mmu: | |||
923 | mtspr SPRN_PID,r0 | 924 | mtspr SPRN_PID,r0 |
924 | sync | 925 | sync |
925 | 926 | ||
926 | /* Configure and load two entries into TLB slots 62 and 63. | 927 | /* Configure and load one entry into TLB slots 63 */ |
927 | * In case we are pinning TLBs, these are reserved in by the | ||
928 | * other TLB functions. If not reserving, then it doesn't | ||
929 | * matter where they are loaded. | ||
930 | */ | ||
931 | clrrwi r4,r4,10 /* Mask off the real page number */ | 928 | clrrwi r4,r4,10 /* Mask off the real page number */ |
932 | ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */ | 929 | ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */ |
933 | 930 | ||
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index 562305b40a8e..5e12b741ba5f 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <asm/thread_info.h> | 37 | #include <asm/thread_info.h> |
38 | #include <asm/ppc_asm.h> | 38 | #include <asm/ppc_asm.h> |
39 | #include <asm/asm-offsets.h> | 39 | #include <asm/asm-offsets.h> |
40 | #include <asm/ptrace.h> | ||
40 | #include <asm/synch.h> | 41 | #include <asm/synch.h> |
41 | #include "head_booke.h" | 42 | #include "head_booke.h" |
42 | 43 | ||
@@ -177,7 +178,7 @@ interrupt_base: | |||
177 | NORMAL_EXCEPTION_PROLOG | 178 | NORMAL_EXCEPTION_PROLOG |
178 | EXC_XFER_EE_LITE(0x0c00, DoSyscall) | 179 | EXC_XFER_EE_LITE(0x0c00, DoSyscall) |
179 | 180 | ||
180 | /* Auxillary Processor Unavailable Interrupt */ | 181 | /* Auxiliary Processor Unavailable Interrupt */ |
181 | EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) | 182 | EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) |
182 | 183 | ||
183 | /* Decrementer Interrupt */ | 184 | /* Decrementer Interrupt */ |
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index c571cd3c1453..ba504099844a 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S | |||
@@ -38,8 +38,9 @@ | |||
38 | #include <asm/page_64.h> | 38 | #include <asm/page_64.h> |
39 | #include <asm/irqflags.h> | 39 | #include <asm/irqflags.h> |
40 | #include <asm/kvm_book3s_asm.h> | 40 | #include <asm/kvm_book3s_asm.h> |
41 | #include <asm/ptrace.h> | ||
41 | 42 | ||
42 | /* The physical memory is layed out such that the secondary processor | 43 | /* The physical memory is laid out such that the secondary processor |
43 | * spin code sits at 0x0000...0x00ff. On server, the vectors follow | 44 | * spin code sits at 0x0000...0x00ff. On server, the vectors follow |
44 | * using the layout described in exceptions-64s.S | 45 | * using the layout described in exceptions-64s.S |
45 | */ | 46 | */ |
@@ -96,7 +97,7 @@ __secondary_hold_acknowledge: | |||
96 | .llong hvReleaseData-KERNELBASE | 97 | .llong hvReleaseData-KERNELBASE |
97 | #endif /* CONFIG_PPC_ISERIES */ | 98 | #endif /* CONFIG_PPC_ISERIES */ |
98 | 99 | ||
99 | #ifdef CONFIG_CRASH_DUMP | 100 | #ifdef CONFIG_RELOCATABLE |
100 | /* This flag is set to 1 by a loader if the kernel should run | 101 | /* This flag is set to 1 by a loader if the kernel should run |
101 | * at the loaded address instead of the linked address. This | 102 | * at the loaded address instead of the linked address. This |
102 | * is used by kexec-tools to keep the the kdump kernel in the | 103 | * is used by kexec-tools to keep the the kdump kernel in the |
@@ -146,6 +147,8 @@ __secondary_hold: | |||
146 | mtctr r4 | 147 | mtctr r4 |
147 | mr r3,r24 | 148 | mr r3,r24 |
148 | li r4,0 | 149 | li r4,0 |
150 | /* Make sure that patched code is visible */ | ||
151 | isync | ||
149 | bctr | 152 | bctr |
150 | #else | 153 | #else |
151 | BUG_OPCODE | 154 | BUG_OPCODE |
@@ -166,12 +169,6 @@ exception_marker: | |||
166 | #include "exceptions-64s.S" | 169 | #include "exceptions-64s.S" |
167 | #endif | 170 | #endif |
168 | 171 | ||
169 | /* KVM trampoline code needs to be close to the interrupt handlers */ | ||
170 | |||
171 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER | ||
172 | #include "../kvm/book3s_rmhandlers.S" | ||
173 | #endif | ||
174 | |||
175 | _GLOBAL(generic_secondary_thread_init) | 172 | _GLOBAL(generic_secondary_thread_init) |
176 | mr r24,r3 | 173 | mr r24,r3 |
177 | 174 | ||
@@ -221,19 +218,25 @@ generic_secondary_common_init: | |||
221 | */ | 218 | */ |
222 | LOAD_REG_ADDR(r13, paca) /* Load paca pointer */ | 219 | LOAD_REG_ADDR(r13, paca) /* Load paca pointer */ |
223 | ld r13,0(r13) /* Get base vaddr of paca array */ | 220 | ld r13,0(r13) /* Get base vaddr of paca array */ |
221 | #ifndef CONFIG_SMP | ||
222 | addi r13,r13,PACA_SIZE /* know r13 if used accidentally */ | ||
223 | b .kexec_wait /* wait for next kernel if !SMP */ | ||
224 | #else | ||
225 | LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */ | ||
226 | lwz r7,0(r7) /* also the max paca allocated */ | ||
224 | li r5,0 /* logical cpu id */ | 227 | li r5,0 /* logical cpu id */ |
225 | 1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */ | 228 | 1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */ |
226 | cmpw r6,r24 /* Compare to our id */ | 229 | cmpw r6,r24 /* Compare to our id */ |
227 | beq 2f | 230 | beq 2f |
228 | addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */ | 231 | addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */ |
229 | addi r5,r5,1 | 232 | addi r5,r5,1 |
230 | cmpwi r5,NR_CPUS | 233 | cmpw r5,r7 /* Check if more pacas exist */ |
231 | blt 1b | 234 | blt 1b |
232 | 235 | ||
233 | mr r3,r24 /* not found, copy phys to r3 */ | 236 | mr r3,r24 /* not found, copy phys to r3 */ |
234 | b .kexec_wait /* next kernel might do better */ | 237 | b .kexec_wait /* next kernel might do better */ |
235 | 238 | ||
236 | 2: mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG */ | 239 | 2: SET_PACA(r13) |
237 | #ifdef CONFIG_PPC_BOOK3E | 240 | #ifdef CONFIG_PPC_BOOK3E |
238 | addi r12,r13,PACA_EXTLB /* and TLB exc frame in another */ | 241 | addi r12,r13,PACA_EXTLB /* and TLB exc frame in another */ |
239 | mtspr SPRN_SPRG_TLB_EXFRAME,r12 | 242 | mtspr SPRN_SPRG_TLB_EXFRAME,r12 |
@@ -241,34 +244,39 @@ generic_secondary_common_init: | |||
241 | 244 | ||
242 | /* From now on, r24 is expected to be logical cpuid */ | 245 | /* From now on, r24 is expected to be logical cpuid */ |
243 | mr r24,r5 | 246 | mr r24,r5 |
244 | 3: HMT_LOW | ||
245 | lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ | ||
246 | /* start. */ | ||
247 | |||
248 | #ifndef CONFIG_SMP | ||
249 | b 3b /* Never go on non-SMP */ | ||
250 | #else | ||
251 | cmpwi 0,r23,0 | ||
252 | beq 3b /* Loop until told to go */ | ||
253 | |||
254 | sync /* order paca.run and cur_cpu_spec */ | ||
255 | 247 | ||
256 | /* See if we need to call a cpu state restore handler */ | 248 | /* See if we need to call a cpu state restore handler */ |
257 | LOAD_REG_ADDR(r23, cur_cpu_spec) | 249 | LOAD_REG_ADDR(r23, cur_cpu_spec) |
258 | ld r23,0(r23) | 250 | ld r23,0(r23) |
259 | ld r23,CPU_SPEC_RESTORE(r23) | 251 | ld r23,CPU_SPEC_RESTORE(r23) |
260 | cmpdi 0,r23,0 | 252 | cmpdi 0,r23,0 |
261 | beq 4f | 253 | beq 3f |
262 | ld r23,0(r23) | 254 | ld r23,0(r23) |
263 | mtctr r23 | 255 | mtctr r23 |
264 | bctrl | 256 | bctrl |
265 | 257 | ||
266 | 4: /* Create a temp kernel stack for use before relocation is on. */ | 258 | 3: LOAD_REG_ADDR(r3, boot_cpu_count) /* Decrement boot_cpu_count */ |
259 | lwarx r4,0,r3 | ||
260 | subi r4,r4,1 | ||
261 | stwcx. r4,0,r3 | ||
262 | bne 3b | ||
263 | isync | ||
264 | |||
265 | 4: HMT_LOW | ||
266 | lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ | ||
267 | /* start. */ | ||
268 | cmpwi 0,r23,0 | ||
269 | beq 4b /* Loop until told to go */ | ||
270 | |||
271 | sync /* order paca.run and cur_cpu_spec */ | ||
272 | isync /* In case code patching happened */ | ||
273 | |||
274 | /* Create a temp kernel stack for use before relocation is on. */ | ||
267 | ld r1,PACAEMERGSP(r13) | 275 | ld r1,PACAEMERGSP(r13) |
268 | subi r1,r1,STACK_FRAME_OVERHEAD | 276 | subi r1,r1,STACK_FRAME_OVERHEAD |
269 | 277 | ||
270 | b __secondary_start | 278 | b __secondary_start |
271 | #endif | 279 | #endif /* SMP */ |
272 | 280 | ||
273 | /* | 281 | /* |
274 | * Turn the MMU off. | 282 | * Turn the MMU off. |
@@ -390,12 +398,10 @@ _STATIC(__after_prom_start) | |||
390 | /* process relocations for the final address of the kernel */ | 398 | /* process relocations for the final address of the kernel */ |
391 | lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */ | 399 | lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */ |
392 | sldi r25,r25,32 | 400 | sldi r25,r25,32 |
393 | #ifdef CONFIG_CRASH_DUMP | ||
394 | lwz r7,__run_at_load-_stext(r26) | 401 | lwz r7,__run_at_load-_stext(r26) |
395 | cmplwi cr0,r7,1 /* kdump kernel ? - stay where we are */ | 402 | cmplwi cr0,r7,1 /* flagged to stay where we are ? */ |
396 | bne 1f | 403 | bne 1f |
397 | add r25,r25,r26 | 404 | add r25,r25,r26 |
398 | #endif | ||
399 | 1: mr r3,r25 | 405 | 1: mr r3,r25 |
400 | bl .relocate | 406 | bl .relocate |
401 | #endif | 407 | #endif |
@@ -541,7 +547,14 @@ _GLOBAL(pmac_secondary_start) | |||
541 | ld r4,0(r4) /* Get base vaddr of paca array */ | 547 | ld r4,0(r4) /* Get base vaddr of paca array */ |
542 | mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ | 548 | mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ |
543 | add r13,r13,r4 /* for this processor. */ | 549 | add r13,r13,r4 /* for this processor. */ |
544 | mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG*/ | 550 | SET_PACA(r13) /* Save vaddr of paca in an SPRG*/ |
551 | |||
552 | /* Mark interrupts soft and hard disabled (they might be enabled | ||
553 | * in the PACA when doing hotplug) | ||
554 | */ | ||
555 | li r0,0 | ||
556 | stb r0,PACASOFTIRQEN(r13) | ||
557 | stb r0,PACAHARDIRQEN(r13) | ||
545 | 558 | ||
546 | /* Create a temp kernel stack for use before relocation is on. */ | 559 | /* Create a temp kernel stack for use before relocation is on. */ |
547 | ld r1,PACAEMERGSP(r13) | 560 | ld r1,PACAEMERGSP(r13) |
@@ -645,7 +658,7 @@ _GLOBAL(enable_64b_mode) | |||
645 | oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */ | 658 | oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */ |
646 | mtmsr r11 | 659 | mtmsr r11 |
647 | #else /* CONFIG_PPC_BOOK3E */ | 660 | #else /* CONFIG_PPC_BOOK3E */ |
648 | li r12,(MSR_SF | MSR_ISF)@highest | 661 | li r12,(MSR_64BIT | MSR_ISF)@highest |
649 | sldi r12,r12,48 | 662 | sldi r12,r12,48 |
650 | or r11,r11,r12 | 663 | or r11,r11,r12 |
651 | mtmsrd r11 | 664 | mtmsrd r11 |
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 1f1a04b5c2a4..1cbf64e6b416 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <asm/thread_info.h> | 29 | #include <asm/thread_info.h> |
30 | #include <asm/ppc_asm.h> | 30 | #include <asm/ppc_asm.h> |
31 | #include <asm/asm-offsets.h> | 31 | #include <asm/asm-offsets.h> |
32 | #include <asm/ptrace.h> | ||
32 | 33 | ||
33 | /* Macro to make the code more readable. */ | 34 | /* Macro to make the code more readable. */ |
34 | #ifdef CONFIG_8xx_CPU6 | 35 | #ifdef CONFIG_8xx_CPU6 |
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 4faeba247854..5ecf54cfa7d4 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <asm/ppc_asm.h> | 41 | #include <asm/ppc_asm.h> |
42 | #include <asm/asm-offsets.h> | 42 | #include <asm/asm-offsets.h> |
43 | #include <asm/cache.h> | 43 | #include <asm/cache.h> |
44 | #include <asm/ptrace.h> | ||
44 | #include "head_booke.h" | 45 | #include "head_booke.h" |
45 | 46 | ||
46 | /* As with the other PowerPC ports, it is expected that when code | 47 | /* As with the other PowerPC ports, it is expected that when code |
@@ -152,8 +153,11 @@ _ENTRY(__early_start) | |||
152 | /* Check to see if we're the second processor, and jump | 153 | /* Check to see if we're the second processor, and jump |
153 | * to the secondary_start code if so | 154 | * to the secondary_start code if so |
154 | */ | 155 | */ |
155 | mfspr r24,SPRN_PIR | 156 | lis r24, boot_cpuid@h |
156 | cmpwi r24,0 | 157 | ori r24, r24, boot_cpuid@l |
158 | lwz r24, 0(r24) | ||
159 | cmpwi r24, -1 | ||
160 | mfspr r24,SPRN_PIR | ||
157 | bne __secondary_start | 161 | bne __secondary_start |
158 | #endif | 162 | #endif |
159 | 163 | ||
@@ -175,6 +179,9 @@ _ENTRY(__early_start) | |||
175 | li r0,0 | 179 | li r0,0 |
176 | stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) | 180 | stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) |
177 | 181 | ||
182 | rlwinm r22,r1,0,0,31-THREAD_SHIFT /* current thread_info */ | ||
183 | stw r24, TI_CPU(r22) | ||
184 | |||
178 | bl early_init | 185 | bl early_init |
179 | 186 | ||
180 | #ifdef CONFIG_RELOCATABLE | 187 | #ifdef CONFIG_RELOCATABLE |
@@ -319,7 +326,7 @@ interrupt_base: | |||
319 | NORMAL_EXCEPTION_PROLOG | 326 | NORMAL_EXCEPTION_PROLOG |
320 | EXC_XFER_EE_LITE(0x0c00, DoSyscall) | 327 | EXC_XFER_EE_LITE(0x0c00, DoSyscall) |
321 | 328 | ||
322 | /* Auxillary Processor Unavailable Interrupt */ | 329 | /* Auxiliary Processor Unavailable Interrupt */ |
323 | EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) | 330 | EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) |
324 | 331 | ||
325 | /* Decrementer Interrupt */ | 332 | /* Decrementer Interrupt */ |
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index 9b626cfffce1..28581f1ad2c0 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c | |||
@@ -162,13 +162,10 @@ static int ibmebus_create_device(struct device_node *dn) | |||
162 | dev->dev.bus = &ibmebus_bus_type; | 162 | dev->dev.bus = &ibmebus_bus_type; |
163 | dev->dev.archdata.dma_ops = &ibmebus_dma_ops; | 163 | dev->dev.archdata.dma_ops = &ibmebus_dma_ops; |
164 | 164 | ||
165 | ret = of_device_register(dev); | 165 | ret = of_device_add(dev); |
166 | if (ret) { | 166 | if (ret) |
167 | of_device_free(dev); | 167 | platform_device_put(dev); |
168 | return ret; | 168 | return ret; |
169 | } | ||
170 | |||
171 | return 0; | ||
172 | } | 169 | } |
173 | 170 | ||
174 | static int ibmebus_create_devices(const struct of_device_id *matches) | 171 | static int ibmebus_create_devices(const struct of_device_id *matches) |
@@ -204,13 +201,14 @@ int ibmebus_register_driver(struct of_platform_driver *drv) | |||
204 | /* If the driver uses devices that ibmebus doesn't know, add them */ | 201 | /* If the driver uses devices that ibmebus doesn't know, add them */ |
205 | ibmebus_create_devices(drv->driver.of_match_table); | 202 | ibmebus_create_devices(drv->driver.of_match_table); |
206 | 203 | ||
207 | return of_register_driver(drv, &ibmebus_bus_type); | 204 | drv->driver.bus = &ibmebus_bus_type; |
205 | return driver_register(&drv->driver); | ||
208 | } | 206 | } |
209 | EXPORT_SYMBOL(ibmebus_register_driver); | 207 | EXPORT_SYMBOL(ibmebus_register_driver); |
210 | 208 | ||
211 | void ibmebus_unregister_driver(struct of_platform_driver *drv) | 209 | void ibmebus_unregister_driver(struct of_platform_driver *drv) |
212 | { | 210 | { |
213 | of_unregister_driver(drv); | 211 | driver_unregister(&drv->driver); |
214 | } | 212 | } |
215 | EXPORT_SYMBOL(ibmebus_unregister_driver); | 213 | EXPORT_SYMBOL(ibmebus_unregister_driver); |
216 | 214 | ||
@@ -311,15 +309,410 @@ static ssize_t ibmebus_store_remove(struct bus_type *bus, | |||
311 | } | 309 | } |
312 | } | 310 | } |
313 | 311 | ||
312 | |||
314 | static struct bus_attribute ibmebus_bus_attrs[] = { | 313 | static struct bus_attribute ibmebus_bus_attrs[] = { |
315 | __ATTR(probe, S_IWUSR, NULL, ibmebus_store_probe), | 314 | __ATTR(probe, S_IWUSR, NULL, ibmebus_store_probe), |
316 | __ATTR(remove, S_IWUSR, NULL, ibmebus_store_remove), | 315 | __ATTR(remove, S_IWUSR, NULL, ibmebus_store_remove), |
317 | __ATTR_NULL | 316 | __ATTR_NULL |
318 | }; | 317 | }; |
319 | 318 | ||
319 | static int ibmebus_bus_bus_match(struct device *dev, struct device_driver *drv) | ||
320 | { | ||
321 | const struct of_device_id *matches = drv->of_match_table; | ||
322 | |||
323 | if (!matches) | ||
324 | return 0; | ||
325 | |||
326 | return of_match_device(matches, dev) != NULL; | ||
327 | } | ||
328 | |||
329 | static int ibmebus_bus_device_probe(struct device *dev) | ||
330 | { | ||
331 | int error = -ENODEV; | ||
332 | struct of_platform_driver *drv; | ||
333 | struct platform_device *of_dev; | ||
334 | const struct of_device_id *match; | ||
335 | |||
336 | drv = to_of_platform_driver(dev->driver); | ||
337 | of_dev = to_platform_device(dev); | ||
338 | |||
339 | if (!drv->probe) | ||
340 | return error; | ||
341 | |||
342 | of_dev_get(of_dev); | ||
343 | |||
344 | match = of_match_device(drv->driver.of_match_table, dev); | ||
345 | if (match) | ||
346 | error = drv->probe(of_dev, match); | ||
347 | if (error) | ||
348 | of_dev_put(of_dev); | ||
349 | |||
350 | return error; | ||
351 | } | ||
352 | |||
353 | static int ibmebus_bus_device_remove(struct device *dev) | ||
354 | { | ||
355 | struct platform_device *of_dev = to_platform_device(dev); | ||
356 | struct of_platform_driver *drv = to_of_platform_driver(dev->driver); | ||
357 | |||
358 | if (dev->driver && drv->remove) | ||
359 | drv->remove(of_dev); | ||
360 | return 0; | ||
361 | } | ||
362 | |||
363 | static void ibmebus_bus_device_shutdown(struct device *dev) | ||
364 | { | ||
365 | struct platform_device *of_dev = to_platform_device(dev); | ||
366 | struct of_platform_driver *drv = to_of_platform_driver(dev->driver); | ||
367 | |||
368 | if (dev->driver && drv->shutdown) | ||
369 | drv->shutdown(of_dev); | ||
370 | } | ||
371 | |||
372 | /* | ||
373 | * ibmebus_bus_device_attrs | ||
374 | */ | ||
375 | static ssize_t devspec_show(struct device *dev, | ||
376 | struct device_attribute *attr, char *buf) | ||
377 | { | ||
378 | struct platform_device *ofdev; | ||
379 | |||
380 | ofdev = to_platform_device(dev); | ||
381 | return sprintf(buf, "%s\n", ofdev->dev.of_node->full_name); | ||
382 | } | ||
383 | |||
384 | static ssize_t name_show(struct device *dev, | ||
385 | struct device_attribute *attr, char *buf) | ||
386 | { | ||
387 | struct platform_device *ofdev; | ||
388 | |||
389 | ofdev = to_platform_device(dev); | ||
390 | return sprintf(buf, "%s\n", ofdev->dev.of_node->name); | ||
391 | } | ||
392 | |||
393 | static ssize_t modalias_show(struct device *dev, | ||
394 | struct device_attribute *attr, char *buf) | ||
395 | { | ||
396 | ssize_t len = of_device_get_modalias(dev, buf, PAGE_SIZE - 2); | ||
397 | buf[len] = '\n'; | ||
398 | buf[len+1] = 0; | ||
399 | return len+1; | ||
400 | } | ||
401 | |||
402 | struct device_attribute ibmebus_bus_device_attrs[] = { | ||
403 | __ATTR_RO(devspec), | ||
404 | __ATTR_RO(name), | ||
405 | __ATTR_RO(modalias), | ||
406 | __ATTR_NULL | ||
407 | }; | ||
408 | |||
409 | #ifdef CONFIG_PM_SLEEP | ||
410 | static int ibmebus_bus_legacy_suspend(struct device *dev, pm_message_t mesg) | ||
411 | { | ||
412 | struct platform_device *of_dev = to_platform_device(dev); | ||
413 | struct of_platform_driver *drv = to_of_platform_driver(dev->driver); | ||
414 | int ret = 0; | ||
415 | |||
416 | if (dev->driver && drv->suspend) | ||
417 | ret = drv->suspend(of_dev, mesg); | ||
418 | return ret; | ||
419 | } | ||
420 | |||
421 | static int ibmebus_bus_legacy_resume(struct device *dev) | ||
422 | { | ||
423 | struct platform_device *of_dev = to_platform_device(dev); | ||
424 | struct of_platform_driver *drv = to_of_platform_driver(dev->driver); | ||
425 | int ret = 0; | ||
426 | |||
427 | if (dev->driver && drv->resume) | ||
428 | ret = drv->resume(of_dev); | ||
429 | return ret; | ||
430 | } | ||
431 | |||
432 | static int ibmebus_bus_pm_prepare(struct device *dev) | ||
433 | { | ||
434 | struct device_driver *drv = dev->driver; | ||
435 | int ret = 0; | ||
436 | |||
437 | if (drv && drv->pm && drv->pm->prepare) | ||
438 | ret = drv->pm->prepare(dev); | ||
439 | |||
440 | return ret; | ||
441 | } | ||
442 | |||
443 | static void ibmebus_bus_pm_complete(struct device *dev) | ||
444 | { | ||
445 | struct device_driver *drv = dev->driver; | ||
446 | |||
447 | if (drv && drv->pm && drv->pm->complete) | ||
448 | drv->pm->complete(dev); | ||
449 | } | ||
450 | |||
451 | #ifdef CONFIG_SUSPEND | ||
452 | |||
453 | static int ibmebus_bus_pm_suspend(struct device *dev) | ||
454 | { | ||
455 | struct device_driver *drv = dev->driver; | ||
456 | int ret = 0; | ||
457 | |||
458 | if (!drv) | ||
459 | return 0; | ||
460 | |||
461 | if (drv->pm) { | ||
462 | if (drv->pm->suspend) | ||
463 | ret = drv->pm->suspend(dev); | ||
464 | } else { | ||
465 | ret = ibmebus_bus_legacy_suspend(dev, PMSG_SUSPEND); | ||
466 | } | ||
467 | |||
468 | return ret; | ||
469 | } | ||
470 | |||
471 | static int ibmebus_bus_pm_suspend_noirq(struct device *dev) | ||
472 | { | ||
473 | struct device_driver *drv = dev->driver; | ||
474 | int ret = 0; | ||
475 | |||
476 | if (!drv) | ||
477 | return 0; | ||
478 | |||
479 | if (drv->pm) { | ||
480 | if (drv->pm->suspend_noirq) | ||
481 | ret = drv->pm->suspend_noirq(dev); | ||
482 | } | ||
483 | |||
484 | return ret; | ||
485 | } | ||
486 | |||
487 | static int ibmebus_bus_pm_resume(struct device *dev) | ||
488 | { | ||
489 | struct device_driver *drv = dev->driver; | ||
490 | int ret = 0; | ||
491 | |||
492 | if (!drv) | ||
493 | return 0; | ||
494 | |||
495 | if (drv->pm) { | ||
496 | if (drv->pm->resume) | ||
497 | ret = drv->pm->resume(dev); | ||
498 | } else { | ||
499 | ret = ibmebus_bus_legacy_resume(dev); | ||
500 | } | ||
501 | |||
502 | return ret; | ||
503 | } | ||
504 | |||
505 | static int ibmebus_bus_pm_resume_noirq(struct device *dev) | ||
506 | { | ||
507 | struct device_driver *drv = dev->driver; | ||
508 | int ret = 0; | ||
509 | |||
510 | if (!drv) | ||
511 | return 0; | ||
512 | |||
513 | if (drv->pm) { | ||
514 | if (drv->pm->resume_noirq) | ||
515 | ret = drv->pm->resume_noirq(dev); | ||
516 | } | ||
517 | |||
518 | return ret; | ||
519 | } | ||
520 | |||
521 | #else /* !CONFIG_SUSPEND */ | ||
522 | |||
523 | #define ibmebus_bus_pm_suspend NULL | ||
524 | #define ibmebus_bus_pm_resume NULL | ||
525 | #define ibmebus_bus_pm_suspend_noirq NULL | ||
526 | #define ibmebus_bus_pm_resume_noirq NULL | ||
527 | |||
528 | #endif /* !CONFIG_SUSPEND */ | ||
529 | |||
530 | #ifdef CONFIG_HIBERNATE_CALLBACKS | ||
531 | |||
532 | static int ibmebus_bus_pm_freeze(struct device *dev) | ||
533 | { | ||
534 | struct device_driver *drv = dev->driver; | ||
535 | int ret = 0; | ||
536 | |||
537 | if (!drv) | ||
538 | return 0; | ||
539 | |||
540 | if (drv->pm) { | ||
541 | if (drv->pm->freeze) | ||
542 | ret = drv->pm->freeze(dev); | ||
543 | } else { | ||
544 | ret = ibmebus_bus_legacy_suspend(dev, PMSG_FREEZE); | ||
545 | } | ||
546 | |||
547 | return ret; | ||
548 | } | ||
549 | |||
550 | static int ibmebus_bus_pm_freeze_noirq(struct device *dev) | ||
551 | { | ||
552 | struct device_driver *drv = dev->driver; | ||
553 | int ret = 0; | ||
554 | |||
555 | if (!drv) | ||
556 | return 0; | ||
557 | |||
558 | if (drv->pm) { | ||
559 | if (drv->pm->freeze_noirq) | ||
560 | ret = drv->pm->freeze_noirq(dev); | ||
561 | } | ||
562 | |||
563 | return ret; | ||
564 | } | ||
565 | |||
566 | static int ibmebus_bus_pm_thaw(struct device *dev) | ||
567 | { | ||
568 | struct device_driver *drv = dev->driver; | ||
569 | int ret = 0; | ||
570 | |||
571 | if (!drv) | ||
572 | return 0; | ||
573 | |||
574 | if (drv->pm) { | ||
575 | if (drv->pm->thaw) | ||
576 | ret = drv->pm->thaw(dev); | ||
577 | } else { | ||
578 | ret = ibmebus_bus_legacy_resume(dev); | ||
579 | } | ||
580 | |||
581 | return ret; | ||
582 | } | ||
583 | |||
584 | static int ibmebus_bus_pm_thaw_noirq(struct device *dev) | ||
585 | { | ||
586 | struct device_driver *drv = dev->driver; | ||
587 | int ret = 0; | ||
588 | |||
589 | if (!drv) | ||
590 | return 0; | ||
591 | |||
592 | if (drv->pm) { | ||
593 | if (drv->pm->thaw_noirq) | ||
594 | ret = drv->pm->thaw_noirq(dev); | ||
595 | } | ||
596 | |||
597 | return ret; | ||
598 | } | ||
599 | |||
600 | static int ibmebus_bus_pm_poweroff(struct device *dev) | ||
601 | { | ||
602 | struct device_driver *drv = dev->driver; | ||
603 | int ret = 0; | ||
604 | |||
605 | if (!drv) | ||
606 | return 0; | ||
607 | |||
608 | if (drv->pm) { | ||
609 | if (drv->pm->poweroff) | ||
610 | ret = drv->pm->poweroff(dev); | ||
611 | } else { | ||
612 | ret = ibmebus_bus_legacy_suspend(dev, PMSG_HIBERNATE); | ||
613 | } | ||
614 | |||
615 | return ret; | ||
616 | } | ||
617 | |||
618 | static int ibmebus_bus_pm_poweroff_noirq(struct device *dev) | ||
619 | { | ||
620 | struct device_driver *drv = dev->driver; | ||
621 | int ret = 0; | ||
622 | |||
623 | if (!drv) | ||
624 | return 0; | ||
625 | |||
626 | if (drv->pm) { | ||
627 | if (drv->pm->poweroff_noirq) | ||
628 | ret = drv->pm->poweroff_noirq(dev); | ||
629 | } | ||
630 | |||
631 | return ret; | ||
632 | } | ||
633 | |||
634 | static int ibmebus_bus_pm_restore(struct device *dev) | ||
635 | { | ||
636 | struct device_driver *drv = dev->driver; | ||
637 | int ret = 0; | ||
638 | |||
639 | if (!drv) | ||
640 | return 0; | ||
641 | |||
642 | if (drv->pm) { | ||
643 | if (drv->pm->restore) | ||
644 | ret = drv->pm->restore(dev); | ||
645 | } else { | ||
646 | ret = ibmebus_bus_legacy_resume(dev); | ||
647 | } | ||
648 | |||
649 | return ret; | ||
650 | } | ||
651 | |||
652 | static int ibmebus_bus_pm_restore_noirq(struct device *dev) | ||
653 | { | ||
654 | struct device_driver *drv = dev->driver; | ||
655 | int ret = 0; | ||
656 | |||
657 | if (!drv) | ||
658 | return 0; | ||
659 | |||
660 | if (drv->pm) { | ||
661 | if (drv->pm->restore_noirq) | ||
662 | ret = drv->pm->restore_noirq(dev); | ||
663 | } | ||
664 | |||
665 | return ret; | ||
666 | } | ||
667 | |||
668 | #else /* !CONFIG_HIBERNATE_CALLBACKS */ | ||
669 | |||
670 | #define ibmebus_bus_pm_freeze NULL | ||
671 | #define ibmebus_bus_pm_thaw NULL | ||
672 | #define ibmebus_bus_pm_poweroff NULL | ||
673 | #define ibmebus_bus_pm_restore NULL | ||
674 | #define ibmebus_bus_pm_freeze_noirq NULL | ||
675 | #define ibmebus_bus_pm_thaw_noirq NULL | ||
676 | #define ibmebus_bus_pm_poweroff_noirq NULL | ||
677 | #define ibmebus_bus_pm_restore_noirq NULL | ||
678 | |||
679 | #endif /* !CONFIG_HIBERNATE_CALLBACKS */ | ||
680 | |||
681 | static struct dev_pm_ops ibmebus_bus_dev_pm_ops = { | ||
682 | .prepare = ibmebus_bus_pm_prepare, | ||
683 | .complete = ibmebus_bus_pm_complete, | ||
684 | .suspend = ibmebus_bus_pm_suspend, | ||
685 | .resume = ibmebus_bus_pm_resume, | ||
686 | .freeze = ibmebus_bus_pm_freeze, | ||
687 | .thaw = ibmebus_bus_pm_thaw, | ||
688 | .poweroff = ibmebus_bus_pm_poweroff, | ||
689 | .restore = ibmebus_bus_pm_restore, | ||
690 | .suspend_noirq = ibmebus_bus_pm_suspend_noirq, | ||
691 | .resume_noirq = ibmebus_bus_pm_resume_noirq, | ||
692 | .freeze_noirq = ibmebus_bus_pm_freeze_noirq, | ||
693 | .thaw_noirq = ibmebus_bus_pm_thaw_noirq, | ||
694 | .poweroff_noirq = ibmebus_bus_pm_poweroff_noirq, | ||
695 | .restore_noirq = ibmebus_bus_pm_restore_noirq, | ||
696 | }; | ||
697 | |||
698 | #define IBMEBUS_BUS_PM_OPS_PTR (&ibmebus_bus_dev_pm_ops) | ||
699 | |||
700 | #else /* !CONFIG_PM_SLEEP */ | ||
701 | |||
702 | #define IBMEBUS_BUS_PM_OPS_PTR NULL | ||
703 | |||
704 | #endif /* !CONFIG_PM_SLEEP */ | ||
705 | |||
320 | struct bus_type ibmebus_bus_type = { | 706 | struct bus_type ibmebus_bus_type = { |
707 | .name = "ibmebus", | ||
321 | .uevent = of_device_uevent, | 708 | .uevent = of_device_uevent, |
322 | .bus_attrs = ibmebus_bus_attrs | 709 | .bus_attrs = ibmebus_bus_attrs, |
710 | .match = ibmebus_bus_bus_match, | ||
711 | .probe = ibmebus_bus_device_probe, | ||
712 | .remove = ibmebus_bus_device_remove, | ||
713 | .shutdown = ibmebus_bus_device_shutdown, | ||
714 | .dev_attrs = ibmebus_bus_device_attrs, | ||
715 | .pm = IBMEBUS_BUS_PM_OPS_PTR, | ||
323 | }; | 716 | }; |
324 | EXPORT_SYMBOL(ibmebus_bus_type); | 717 | EXPORT_SYMBOL(ibmebus_bus_type); |
325 | 718 | ||
@@ -329,7 +722,7 @@ static int __init ibmebus_bus_init(void) | |||
329 | 722 | ||
330 | printk(KERN_INFO "IBM eBus Device Driver\n"); | 723 | printk(KERN_INFO "IBM eBus Device Driver\n"); |
331 | 724 | ||
332 | err = of_bus_type_init(&ibmebus_bus_type, "ibmebus"); | 725 | err = bus_register(&ibmebus_bus_type); |
333 | if (err) { | 726 | if (err) { |
334 | printk(KERN_ERR "%s: failed to register IBM eBus.\n", | 727 | printk(KERN_ERR "%s: failed to register IBM eBus.\n", |
335 | __func__); | 728 | __func__); |
diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S index 5328709eeedc..ba3195478600 100644 --- a/arch/powerpc/kernel/idle_power4.S +++ b/arch/powerpc/kernel/idle_power4.S | |||
@@ -53,24 +53,3 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
53 | isync | 53 | isync |
54 | b 1b | 54 | b 1b |
55 | 55 | ||
56 | _GLOBAL(power4_cpu_offline_powersave) | ||
57 | /* Go to NAP now */ | ||
58 | mfmsr r7 | ||
59 | rldicl r0,r7,48,1 | ||
60 | rotldi r0,r0,16 | ||
61 | mtmsrd r0,1 /* hard-disable interrupts */ | ||
62 | li r0,1 | ||
63 | li r6,0 | ||
64 | stb r0,PACAHARDIRQEN(r13) /* we'll hard-enable shortly */ | ||
65 | stb r6,PACASOFTIRQEN(r13) /* soft-disable irqs */ | ||
66 | BEGIN_FTR_SECTION | ||
67 | DSSALL | ||
68 | sync | ||
69 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | ||
70 | ori r7,r7,MSR_EE | ||
71 | oris r7,r7,MSR_POW@h | ||
72 | sync | ||
73 | isync | ||
74 | mtmsrd r7 | ||
75 | isync | ||
76 | blr | ||
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S new file mode 100644 index 000000000000..f8f0bc7f1d4f --- /dev/null +++ b/arch/powerpc/kernel/idle_power7.S | |||
@@ -0,0 +1,97 @@ | |||
1 | /* | ||
2 | * This file contains the power_save function for 970-family CPUs. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #include <linux/threads.h> | ||
11 | #include <asm/processor.h> | ||
12 | #include <asm/page.h> | ||
13 | #include <asm/cputable.h> | ||
14 | #include <asm/thread_info.h> | ||
15 | #include <asm/ppc_asm.h> | ||
16 | #include <asm/asm-offsets.h> | ||
17 | #include <asm/ppc-opcode.h> | ||
18 | |||
19 | #undef DEBUG | ||
20 | |||
21 | .text | ||
22 | |||
23 | _GLOBAL(power7_idle) | ||
24 | /* Now check if user or arch enabled NAP mode */ | ||
25 | LOAD_REG_ADDRBASE(r3,powersave_nap) | ||
26 | lwz r4,ADDROFF(powersave_nap)(r3) | ||
27 | cmpwi 0,r4,0 | ||
28 | beqlr | ||
29 | |||
30 | /* NAP is a state loss, we create a regs frame on the | ||
31 | * stack, fill it up with the state we care about and | ||
32 | * stick a pointer to it in PACAR1. We really only | ||
33 | * need to save PC, some CR bits and the NV GPRs, | ||
34 | * but for now an interrupt frame will do. | ||
35 | */ | ||
36 | mflr r0 | ||
37 | std r0,16(r1) | ||
38 | stdu r1,-INT_FRAME_SIZE(r1) | ||
39 | std r0,_LINK(r1) | ||
40 | std r0,_NIP(r1) | ||
41 | |||
42 | #ifndef CONFIG_SMP | ||
43 | /* Make sure FPU, VSX etc... are flushed as we may lose | ||
44 | * state when going to nap mode | ||
45 | */ | ||
46 | bl .discard_lazy_cpu_state | ||
47 | #endif /* CONFIG_SMP */ | ||
48 | |||
49 | /* Hard disable interrupts */ | ||
50 | mfmsr r9 | ||
51 | rldicl r9,r9,48,1 | ||
52 | rotldi r9,r9,16 | ||
53 | mtmsrd r9,1 /* hard-disable interrupts */ | ||
54 | li r0,0 | ||
55 | stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */ | ||
56 | stb r0,PACAHARDIRQEN(r13) | ||
57 | |||
58 | /* Continue saving state */ | ||
59 | SAVE_GPR(2, r1) | ||
60 | SAVE_NVGPRS(r1) | ||
61 | mfcr r3 | ||
62 | std r3,_CCR(r1) | ||
63 | std r9,_MSR(r1) | ||
64 | std r1,PACAR1(r13) | ||
65 | |||
66 | /* Magic NAP mode enter sequence */ | ||
67 | std r0,0(r1) | ||
68 | ptesync | ||
69 | ld r0,0(r1) | ||
70 | 1: cmp cr0,r0,r0 | ||
71 | bne 1b | ||
72 | PPC_NAP | ||
73 | b . | ||
74 | |||
75 | _GLOBAL(power7_wakeup_loss) | ||
76 | GET_PACA(r13) | ||
77 | ld r1,PACAR1(r13) | ||
78 | REST_NVGPRS(r1) | ||
79 | REST_GPR(2, r1) | ||
80 | ld r3,_CCR(r1) | ||
81 | ld r4,_MSR(r1) | ||
82 | ld r5,_NIP(r1) | ||
83 | addi r1,r1,INT_FRAME_SIZE | ||
84 | mtcr r3 | ||
85 | mtspr SPRN_SRR1,r4 | ||
86 | mtspr SPRN_SRR0,r5 | ||
87 | rfid | ||
88 | |||
89 | _GLOBAL(power7_wakeup_noloss) | ||
90 | GET_PACA(r13) | ||
91 | ld r1,PACAR1(r13) | ||
92 | ld r4,_MSR(r1) | ||
93 | ld r5,_NIP(r1) | ||
94 | addi r1,r1,INT_FRAME_SIZE | ||
95 | mtspr SPRN_SRR1,r4 | ||
96 | mtspr SPRN_SRR0,r5 | ||
97 | rfid | ||
diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c new file mode 100644 index 000000000000..ffafaea3d261 --- /dev/null +++ b/arch/powerpc/kernel/io-workarounds.c | |||
@@ -0,0 +1,188 @@ | |||
1 | /* | ||
2 | * Support PCI IO workaround | ||
3 | * | ||
4 | * Copyright (C) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org> | ||
5 | * IBM, Corp. | ||
6 | * (C) Copyright 2007-2008 TOSHIBA CORPORATION | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | #undef DEBUG | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | |||
16 | #include <asm/io.h> | ||
17 | #include <asm/machdep.h> | ||
18 | #include <asm/pgtable.h> | ||
19 | #include <asm/ppc-pci.h> | ||
20 | #include <asm/io-workarounds.h> | ||
21 | |||
22 | #define IOWA_MAX_BUS 8 | ||
23 | |||
24 | static struct iowa_bus iowa_busses[IOWA_MAX_BUS]; | ||
25 | static unsigned int iowa_bus_count; | ||
26 | |||
27 | static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr) | ||
28 | { | ||
29 | int i, j; | ||
30 | struct resource *res; | ||
31 | unsigned long vstart, vend; | ||
32 | |||
33 | for (i = 0; i < iowa_bus_count; i++) { | ||
34 | struct iowa_bus *bus = &iowa_busses[i]; | ||
35 | struct pci_controller *phb = bus->phb; | ||
36 | |||
37 | if (vaddr) { | ||
38 | vstart = (unsigned long)phb->io_base_virt; | ||
39 | vend = vstart + phb->pci_io_size - 1; | ||
40 | if ((vaddr >= vstart) && (vaddr <= vend)) | ||
41 | return bus; | ||
42 | } | ||
43 | |||
44 | if (paddr) | ||
45 | for (j = 0; j < 3; j++) { | ||
46 | res = &phb->mem_resources[j]; | ||
47 | if (paddr >= res->start && paddr <= res->end) | ||
48 | return bus; | ||
49 | } | ||
50 | } | ||
51 | |||
52 | return NULL; | ||
53 | } | ||
54 | |||
55 | struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr) | ||
56 | { | ||
57 | struct iowa_bus *bus; | ||
58 | int token; | ||
59 | |||
60 | token = PCI_GET_ADDR_TOKEN(addr); | ||
61 | |||
62 | if (token && token <= iowa_bus_count) | ||
63 | bus = &iowa_busses[token - 1]; | ||
64 | else { | ||
65 | unsigned long vaddr, paddr; | ||
66 | pte_t *ptep; | ||
67 | |||
68 | vaddr = (unsigned long)PCI_FIX_ADDR(addr); | ||
69 | if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END) | ||
70 | return NULL; | ||
71 | |||
72 | ptep = find_linux_pte(init_mm.pgd, vaddr); | ||
73 | if (ptep == NULL) | ||
74 | paddr = 0; | ||
75 | else | ||
76 | paddr = pte_pfn(*ptep) << PAGE_SHIFT; | ||
77 | bus = iowa_pci_find(vaddr, paddr); | ||
78 | |||
79 | if (bus == NULL) | ||
80 | return NULL; | ||
81 | } | ||
82 | |||
83 | return bus; | ||
84 | } | ||
85 | |||
86 | struct iowa_bus *iowa_pio_find_bus(unsigned long port) | ||
87 | { | ||
88 | unsigned long vaddr = (unsigned long)pci_io_base + port; | ||
89 | return iowa_pci_find(vaddr, 0); | ||
90 | } | ||
91 | |||
92 | |||
93 | #define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \ | ||
94 | static ret iowa_##name at \ | ||
95 | { \ | ||
96 | struct iowa_bus *bus; \ | ||
97 | bus = iowa_##space##_find_bus(aa); \ | ||
98 | if (bus && bus->ops && bus->ops->name) \ | ||
99 | return bus->ops->name al; \ | ||
100 | return __do_##name al; \ | ||
101 | } | ||
102 | |||
103 | #define DEF_PCI_AC_NORET(name, at, al, space, aa) \ | ||
104 | static void iowa_##name at \ | ||
105 | { \ | ||
106 | struct iowa_bus *bus; \ | ||
107 | bus = iowa_##space##_find_bus(aa); \ | ||
108 | if (bus && bus->ops && bus->ops->name) { \ | ||
109 | bus->ops->name al; \ | ||
110 | return; \ | ||
111 | } \ | ||
112 | __do_##name al; \ | ||
113 | } | ||
114 | |||
115 | #include <asm/io-defs.h> | ||
116 | |||
117 | #undef DEF_PCI_AC_RET | ||
118 | #undef DEF_PCI_AC_NORET | ||
119 | |||
120 | static const struct ppc_pci_io __devinitconst iowa_pci_io = { | ||
121 | |||
122 | #define DEF_PCI_AC_RET(name, ret, at, al, space, aa) .name = iowa_##name, | ||
123 | #define DEF_PCI_AC_NORET(name, at, al, space, aa) .name = iowa_##name, | ||
124 | |||
125 | #include <asm/io-defs.h> | ||
126 | |||
127 | #undef DEF_PCI_AC_RET | ||
128 | #undef DEF_PCI_AC_NORET | ||
129 | |||
130 | }; | ||
131 | |||
132 | static void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size, | ||
133 | unsigned long flags, void *caller) | ||
134 | { | ||
135 | struct iowa_bus *bus; | ||
136 | void __iomem *res = __ioremap_caller(addr, size, flags, caller); | ||
137 | int busno; | ||
138 | |||
139 | bus = iowa_pci_find(0, (unsigned long)addr); | ||
140 | if (bus != NULL) { | ||
141 | busno = bus - iowa_busses; | ||
142 | PCI_SET_ADDR_TOKEN(res, busno + 1); | ||
143 | } | ||
144 | return res; | ||
145 | } | ||
146 | |||
147 | /* Enable IO workaround */ | ||
148 | static void __devinit io_workaround_init(void) | ||
149 | { | ||
150 | static int io_workaround_inited; | ||
151 | |||
152 | if (io_workaround_inited) | ||
153 | return; | ||
154 | ppc_pci_io = iowa_pci_io; | ||
155 | ppc_md.ioremap = iowa_ioremap; | ||
156 | io_workaround_inited = 1; | ||
157 | } | ||
158 | |||
159 | /* Register new bus to support workaround */ | ||
160 | void __devinit iowa_register_bus(struct pci_controller *phb, | ||
161 | struct ppc_pci_io *ops, | ||
162 | int (*initfunc)(struct iowa_bus *, void *), void *data) | ||
163 | { | ||
164 | struct iowa_bus *bus; | ||
165 | struct device_node *np = phb->dn; | ||
166 | |||
167 | io_workaround_init(); | ||
168 | |||
169 | if (iowa_bus_count >= IOWA_MAX_BUS) { | ||
170 | pr_err("IOWA:Too many pci bridges, " | ||
171 | "workarounds disabled for %s\n", np->full_name); | ||
172 | return; | ||
173 | } | ||
174 | |||
175 | bus = &iowa_busses[iowa_bus_count]; | ||
176 | bus->phb = phb; | ||
177 | bus->ops = ops; | ||
178 | bus->private = data; | ||
179 | |||
180 | if (initfunc) | ||
181 | if ((*initfunc)(bus, data)) | ||
182 | return; | ||
183 | |||
184 | iowa_bus_count++; | ||
185 | |||
186 | pr_debug("IOWA:[%d]Add bus, %s.\n", iowa_bus_count-1, np->full_name); | ||
187 | } | ||
188 | |||
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index d5839179ec77..961bb03413f3 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c | |||
@@ -311,8 +311,9 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, | |||
311 | /* Handle failure */ | 311 | /* Handle failure */ |
312 | if (unlikely(entry == DMA_ERROR_CODE)) { | 312 | if (unlikely(entry == DMA_ERROR_CODE)) { |
313 | if (printk_ratelimit()) | 313 | if (printk_ratelimit()) |
314 | printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx" | 314 | dev_info(dev, "iommu_alloc failed, tbl %p " |
315 | " npages %lx\n", tbl, vaddr, npages); | 315 | "vaddr %lx npages %lu\n", tbl, vaddr, |
316 | npages); | ||
316 | goto failure; | 317 | goto failure; |
317 | } | 318 | } |
318 | 319 | ||
@@ -579,9 +580,9 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, | |||
579 | attrs); | 580 | attrs); |
580 | if (dma_handle == DMA_ERROR_CODE) { | 581 | if (dma_handle == DMA_ERROR_CODE) { |
581 | if (printk_ratelimit()) { | 582 | if (printk_ratelimit()) { |
582 | printk(KERN_INFO "iommu_alloc failed, " | 583 | dev_info(dev, "iommu_alloc failed, tbl %p " |
583 | "tbl %p vaddr %p npages %d\n", | 584 | "vaddr %p npages %d\n", tbl, vaddr, |
584 | tbl, vaddr, npages); | 585 | npages); |
585 | } | 586 | } |
586 | } else | 587 | } else |
587 | dma_handle |= (uaddr & ~IOMMU_PAGE_MASK); | 588 | dma_handle |= (uaddr & ~IOMMU_PAGE_MASK); |
@@ -627,7 +628,8 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, | |||
627 | * the tce tables. | 628 | * the tce tables. |
628 | */ | 629 | */ |
629 | if (order >= IOMAP_MAX_ORDER) { | 630 | if (order >= IOMAP_MAX_ORDER) { |
630 | printk("iommu_alloc_consistent size too large: 0x%lx\n", size); | 631 | dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n", |
632 | size); | ||
631 | return NULL; | 633 | return NULL; |
632 | } | 634 | } |
633 | 635 | ||
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 4a65386995d7..5b428e308666 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -66,7 +66,6 @@ | |||
66 | #include <asm/ptrace.h> | 66 | #include <asm/ptrace.h> |
67 | #include <asm/machdep.h> | 67 | #include <asm/machdep.h> |
68 | #include <asm/udbg.h> | 68 | #include <asm/udbg.h> |
69 | #include <asm/dbell.h> | ||
70 | #include <asm/smp.h> | 69 | #include <asm/smp.h> |
71 | 70 | ||
72 | #ifdef CONFIG_PPC64 | 71 | #ifdef CONFIG_PPC64 |
@@ -116,7 +115,7 @@ static inline notrace void set_soft_enabled(unsigned long enable) | |||
116 | : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); | 115 | : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); |
117 | } | 116 | } |
118 | 117 | ||
119 | notrace void raw_local_irq_restore(unsigned long en) | 118 | notrace void arch_local_irq_restore(unsigned long en) |
120 | { | 119 | { |
121 | /* | 120 | /* |
122 | * get_paca()->soft_enabled = en; | 121 | * get_paca()->soft_enabled = en; |
@@ -160,7 +159,8 @@ notrace void raw_local_irq_restore(unsigned long en) | |||
160 | 159 | ||
161 | #if defined(CONFIG_BOOKE) && defined(CONFIG_SMP) | 160 | #if defined(CONFIG_BOOKE) && defined(CONFIG_SMP) |
162 | /* Check for pending doorbell interrupts and resend to ourself */ | 161 | /* Check for pending doorbell interrupts and resend to ourself */ |
163 | doorbell_check_self(); | 162 | if (cpu_has_feature(CPU_FTR_DBELL)) |
163 | smp_muxed_ipi_resend(); | ||
164 | #endif | 164 | #endif |
165 | 165 | ||
166 | /* | 166 | /* |
@@ -192,10 +192,10 @@ notrace void raw_local_irq_restore(unsigned long en) | |||
192 | 192 | ||
193 | __hard_irq_enable(); | 193 | __hard_irq_enable(); |
194 | } | 194 | } |
195 | EXPORT_SYMBOL(raw_local_irq_restore); | 195 | EXPORT_SYMBOL(arch_local_irq_restore); |
196 | #endif /* CONFIG_PPC64 */ | 196 | #endif /* CONFIG_PPC64 */ |
197 | 197 | ||
198 | static int show_other_interrupts(struct seq_file *p, int prec) | 198 | int arch_show_interrupts(struct seq_file *p, int prec) |
199 | { | 199 | { |
200 | int j; | 200 | int j; |
201 | 201 | ||
@@ -231,63 +231,6 @@ static int show_other_interrupts(struct seq_file *p, int prec) | |||
231 | return 0; | 231 | return 0; |
232 | } | 232 | } |
233 | 233 | ||
234 | int show_interrupts(struct seq_file *p, void *v) | ||
235 | { | ||
236 | unsigned long flags, any_count = 0; | ||
237 | int i = *(loff_t *) v, j, prec; | ||
238 | struct irqaction *action; | ||
239 | struct irq_desc *desc; | ||
240 | |||
241 | if (i > nr_irqs) | ||
242 | return 0; | ||
243 | |||
244 | for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) | ||
245 | j *= 10; | ||
246 | |||
247 | if (i == nr_irqs) | ||
248 | return show_other_interrupts(p, prec); | ||
249 | |||
250 | /* print header */ | ||
251 | if (i == 0) { | ||
252 | seq_printf(p, "%*s", prec + 8, ""); | ||
253 | for_each_online_cpu(j) | ||
254 | seq_printf(p, "CPU%-8d", j); | ||
255 | seq_putc(p, '\n'); | ||
256 | } | ||
257 | |||
258 | desc = irq_to_desc(i); | ||
259 | if (!desc) | ||
260 | return 0; | ||
261 | |||
262 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
263 | for_each_online_cpu(j) | ||
264 | any_count |= kstat_irqs_cpu(i, j); | ||
265 | action = desc->action; | ||
266 | if (!action && !any_count) | ||
267 | goto out; | ||
268 | |||
269 | seq_printf(p, "%*d: ", prec, i); | ||
270 | for_each_online_cpu(j) | ||
271 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | ||
272 | |||
273 | if (desc->chip) | ||
274 | seq_printf(p, " %-16s", desc->chip->name); | ||
275 | else | ||
276 | seq_printf(p, " %-16s", "None"); | ||
277 | seq_printf(p, " %-8s", (desc->status & IRQ_LEVEL) ? "Level" : "Edge"); | ||
278 | |||
279 | if (action) { | ||
280 | seq_printf(p, " %s", action->name); | ||
281 | while ((action = action->next) != NULL) | ||
282 | seq_printf(p, ", %s", action->name); | ||
283 | } | ||
284 | |||
285 | seq_putc(p, '\n'); | ||
286 | out: | ||
287 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
288 | return 0; | ||
289 | } | ||
290 | |||
291 | /* | 234 | /* |
292 | * /proc/stat helpers | 235 | * /proc/stat helpers |
293 | */ | 236 | */ |
@@ -303,30 +246,37 @@ u64 arch_irq_stat_cpu(unsigned int cpu) | |||
303 | } | 246 | } |
304 | 247 | ||
305 | #ifdef CONFIG_HOTPLUG_CPU | 248 | #ifdef CONFIG_HOTPLUG_CPU |
306 | void fixup_irqs(const struct cpumask *map) | 249 | void migrate_irqs(void) |
307 | { | 250 | { |
308 | struct irq_desc *desc; | 251 | struct irq_desc *desc; |
309 | unsigned int irq; | 252 | unsigned int irq; |
310 | static int warned; | 253 | static int warned; |
311 | cpumask_var_t mask; | 254 | cpumask_var_t mask; |
255 | const struct cpumask *map = cpu_online_mask; | ||
312 | 256 | ||
313 | alloc_cpumask_var(&mask, GFP_KERNEL); | 257 | alloc_cpumask_var(&mask, GFP_KERNEL); |
314 | 258 | ||
315 | for_each_irq(irq) { | 259 | for_each_irq(irq) { |
260 | struct irq_data *data; | ||
261 | struct irq_chip *chip; | ||
262 | |||
316 | desc = irq_to_desc(irq); | 263 | desc = irq_to_desc(irq); |
317 | if (!desc) | 264 | if (!desc) |
318 | continue; | 265 | continue; |
319 | 266 | ||
320 | if (desc->status & IRQ_PER_CPU) | 267 | data = irq_desc_get_irq_data(desc); |
268 | if (irqd_is_per_cpu(data)) | ||
321 | continue; | 269 | continue; |
322 | 270 | ||
323 | cpumask_and(mask, desc->affinity, map); | 271 | chip = irq_data_get_irq_chip(data); |
272 | |||
273 | cpumask_and(mask, data->affinity, map); | ||
324 | if (cpumask_any(mask) >= nr_cpu_ids) { | 274 | if (cpumask_any(mask) >= nr_cpu_ids) { |
325 | printk("Breaking affinity for irq %i\n", irq); | 275 | printk("Breaking affinity for irq %i\n", irq); |
326 | cpumask_copy(mask, map); | 276 | cpumask_copy(mask, map); |
327 | } | 277 | } |
328 | if (desc->chip->set_affinity) | 278 | if (chip->irq_set_affinity) |
329 | desc->chip->set_affinity(irq, mask); | 279 | chip->irq_set_affinity(data, mask, true); |
330 | else if (desc->action && !(warned++)) | 280 | else if (desc->action && !(warned++)) |
331 | printk("Cannot set affinity for irq %i\n", irq); | 281 | printk("Cannot set affinity for irq %i\n", irq); |
332 | } | 282 | } |
@@ -345,17 +295,20 @@ static inline void handle_one_irq(unsigned int irq) | |||
345 | unsigned long saved_sp_limit; | 295 | unsigned long saved_sp_limit; |
346 | struct irq_desc *desc; | 296 | struct irq_desc *desc; |
347 | 297 | ||
298 | desc = irq_to_desc(irq); | ||
299 | if (!desc) | ||
300 | return; | ||
301 | |||
348 | /* Switch to the irq stack to handle this */ | 302 | /* Switch to the irq stack to handle this */ |
349 | curtp = current_thread_info(); | 303 | curtp = current_thread_info(); |
350 | irqtp = hardirq_ctx[smp_processor_id()]; | 304 | irqtp = hardirq_ctx[smp_processor_id()]; |
351 | 305 | ||
352 | if (curtp == irqtp) { | 306 | if (curtp == irqtp) { |
353 | /* We're already on the irq stack, just handle it */ | 307 | /* We're already on the irq stack, just handle it */ |
354 | generic_handle_irq(irq); | 308 | desc->handle_irq(irq, desc); |
355 | return; | 309 | return; |
356 | } | 310 | } |
357 | 311 | ||
358 | desc = irq_to_desc(irq); | ||
359 | saved_sp_limit = current->thread.ksp_limit; | 312 | saved_sp_limit = current->thread.ksp_limit; |
360 | 313 | ||
361 | irqtp->task = curtp->task; | 314 | irqtp->task = curtp->task; |
@@ -447,24 +400,28 @@ struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly; | |||
447 | void exc_lvl_ctx_init(void) | 400 | void exc_lvl_ctx_init(void) |
448 | { | 401 | { |
449 | struct thread_info *tp; | 402 | struct thread_info *tp; |
450 | int i, hw_cpu; | 403 | int i, cpu_nr; |
451 | 404 | ||
452 | for_each_possible_cpu(i) { | 405 | for_each_possible_cpu(i) { |
453 | hw_cpu = get_hard_smp_processor_id(i); | 406 | #ifdef CONFIG_PPC64 |
454 | memset((void *)critirq_ctx[hw_cpu], 0, THREAD_SIZE); | 407 | cpu_nr = i; |
455 | tp = critirq_ctx[hw_cpu]; | 408 | #else |
456 | tp->cpu = i; | 409 | cpu_nr = get_hard_smp_processor_id(i); |
410 | #endif | ||
411 | memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE); | ||
412 | tp = critirq_ctx[cpu_nr]; | ||
413 | tp->cpu = cpu_nr; | ||
457 | tp->preempt_count = 0; | 414 | tp->preempt_count = 0; |
458 | 415 | ||
459 | #ifdef CONFIG_BOOKE | 416 | #ifdef CONFIG_BOOKE |
460 | memset((void *)dbgirq_ctx[hw_cpu], 0, THREAD_SIZE); | 417 | memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE); |
461 | tp = dbgirq_ctx[hw_cpu]; | 418 | tp = dbgirq_ctx[cpu_nr]; |
462 | tp->cpu = i; | 419 | tp->cpu = cpu_nr; |
463 | tp->preempt_count = 0; | 420 | tp->preempt_count = 0; |
464 | 421 | ||
465 | memset((void *)mcheckirq_ctx[hw_cpu], 0, THREAD_SIZE); | 422 | memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE); |
466 | tp = mcheckirq_ctx[hw_cpu]; | 423 | tp = mcheckirq_ctx[cpu_nr]; |
467 | tp->cpu = i; | 424 | tp->cpu = cpu_nr; |
468 | tp->preempt_count = HARDIRQ_OFFSET; | 425 | tp->preempt_count = HARDIRQ_OFFSET; |
469 | #endif | 426 | #endif |
470 | } | 427 | } |
@@ -527,20 +484,41 @@ void do_softirq(void) | |||
527 | * IRQ controller and virtual interrupts | 484 | * IRQ controller and virtual interrupts |
528 | */ | 485 | */ |
529 | 486 | ||
487 | /* The main irq map itself is an array of NR_IRQ entries containing the | ||
488 | * associate host and irq number. An entry with a host of NULL is free. | ||
489 | * An entry can be allocated if it's free, the allocator always then sets | ||
490 | * hwirq first to the host's invalid irq number and then fills ops. | ||
491 | */ | ||
492 | struct irq_map_entry { | ||
493 | irq_hw_number_t hwirq; | ||
494 | struct irq_host *host; | ||
495 | }; | ||
496 | |||
530 | static LIST_HEAD(irq_hosts); | 497 | static LIST_HEAD(irq_hosts); |
531 | static DEFINE_RAW_SPINLOCK(irq_big_lock); | 498 | static DEFINE_RAW_SPINLOCK(irq_big_lock); |
532 | static unsigned int revmap_trees_allocated; | ||
533 | static DEFINE_MUTEX(revmap_trees_mutex); | 499 | static DEFINE_MUTEX(revmap_trees_mutex); |
534 | struct irq_map_entry irq_map[NR_IRQS]; | 500 | static struct irq_map_entry irq_map[NR_IRQS]; |
535 | static unsigned int irq_virq_count = NR_IRQS; | 501 | static unsigned int irq_virq_count = NR_IRQS; |
536 | static struct irq_host *irq_default_host; | 502 | static struct irq_host *irq_default_host; |
537 | 503 | ||
504 | irq_hw_number_t irqd_to_hwirq(struct irq_data *d) | ||
505 | { | ||
506 | return irq_map[d->irq].hwirq; | ||
507 | } | ||
508 | EXPORT_SYMBOL_GPL(irqd_to_hwirq); | ||
509 | |||
538 | irq_hw_number_t virq_to_hw(unsigned int virq) | 510 | irq_hw_number_t virq_to_hw(unsigned int virq) |
539 | { | 511 | { |
540 | return irq_map[virq].hwirq; | 512 | return irq_map[virq].hwirq; |
541 | } | 513 | } |
542 | EXPORT_SYMBOL_GPL(virq_to_hw); | 514 | EXPORT_SYMBOL_GPL(virq_to_hw); |
543 | 515 | ||
516 | bool virq_is_host(unsigned int virq, struct irq_host *host) | ||
517 | { | ||
518 | return irq_map[virq].host == host; | ||
519 | } | ||
520 | EXPORT_SYMBOL_GPL(virq_is_host); | ||
521 | |||
544 | static int default_irq_host_match(struct irq_host *h, struct device_node *np) | 522 | static int default_irq_host_match(struct irq_host *h, struct device_node *np) |
545 | { | 523 | { |
546 | return h->of_node != NULL && h->of_node == np; | 524 | return h->of_node != NULL && h->of_node == np; |
@@ -561,7 +539,7 @@ struct irq_host *irq_alloc_host(struct device_node *of_node, | |||
561 | /* Allocate structure and revmap table if using linear mapping */ | 539 | /* Allocate structure and revmap table if using linear mapping */ |
562 | if (revmap_type == IRQ_HOST_MAP_LINEAR) | 540 | if (revmap_type == IRQ_HOST_MAP_LINEAR) |
563 | size += revmap_arg * sizeof(unsigned int); | 541 | size += revmap_arg * sizeof(unsigned int); |
564 | host = zalloc_maybe_bootmem(size, GFP_KERNEL); | 542 | host = kzalloc(size, GFP_KERNEL); |
565 | if (host == NULL) | 543 | if (host == NULL) |
566 | return NULL; | 544 | return NULL; |
567 | 545 | ||
@@ -582,13 +560,8 @@ struct irq_host *irq_alloc_host(struct device_node *of_node, | |||
582 | if (revmap_type == IRQ_HOST_MAP_LEGACY) { | 560 | if (revmap_type == IRQ_HOST_MAP_LEGACY) { |
583 | if (irq_map[0].host != NULL) { | 561 | if (irq_map[0].host != NULL) { |
584 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); | 562 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); |
585 | /* If we are early boot, we can't free the structure, | 563 | of_node_put(host->of_node); |
586 | * too bad... | 564 | kfree(host); |
587 | * this will be fixed once slab is made available early | ||
588 | * instead of the current cruft | ||
589 | */ | ||
590 | if (mem_init_done) | ||
591 | kfree(host); | ||
592 | return NULL; | 565 | return NULL; |
593 | } | 566 | } |
594 | irq_map[0].host = host; | 567 | irq_map[0].host = host; |
@@ -609,14 +582,14 @@ struct irq_host *irq_alloc_host(struct device_node *of_node, | |||
609 | irq_map[i].host = host; | 582 | irq_map[i].host = host; |
610 | smp_wmb(); | 583 | smp_wmb(); |
611 | 584 | ||
612 | /* Clear norequest flags */ | ||
613 | irq_to_desc(i)->status &= ~IRQ_NOREQUEST; | ||
614 | |||
615 | /* Legacy flags are left to default at this point, | 585 | /* Legacy flags are left to default at this point, |
616 | * one can then use irq_create_mapping() to | 586 | * one can then use irq_create_mapping() to |
617 | * explicitly change them | 587 | * explicitly change them |
618 | */ | 588 | */ |
619 | ops->map(host, i, i); | 589 | ops->map(host, i, i); |
590 | |||
591 | /* Clear norequest flags */ | ||
592 | irq_clear_status_flags(i, IRQ_NOREQUEST); | ||
620 | } | 593 | } |
621 | break; | 594 | break; |
622 | case IRQ_HOST_MAP_LINEAR: | 595 | case IRQ_HOST_MAP_LINEAR: |
@@ -627,6 +600,9 @@ struct irq_host *irq_alloc_host(struct device_node *of_node, | |||
627 | smp_wmb(); | 600 | smp_wmb(); |
628 | host->revmap_data.linear.revmap = rmap; | 601 | host->revmap_data.linear.revmap = rmap; |
629 | break; | 602 | break; |
603 | case IRQ_HOST_MAP_TREE: | ||
604 | INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL); | ||
605 | break; | ||
630 | default: | 606 | default: |
631 | break; | 607 | break; |
632 | } | 608 | } |
@@ -676,17 +652,14 @@ void irq_set_virq_count(unsigned int count) | |||
676 | static int irq_setup_virq(struct irq_host *host, unsigned int virq, | 652 | static int irq_setup_virq(struct irq_host *host, unsigned int virq, |
677 | irq_hw_number_t hwirq) | 653 | irq_hw_number_t hwirq) |
678 | { | 654 | { |
679 | struct irq_desc *desc; | 655 | int res; |
680 | 656 | ||
681 | desc = irq_to_desc_alloc_node(virq, 0); | 657 | res = irq_alloc_desc_at(virq, 0); |
682 | if (!desc) { | 658 | if (res != virq) { |
683 | pr_debug("irq: -> allocating desc failed\n"); | 659 | pr_debug("irq: -> allocating desc failed\n"); |
684 | goto error; | 660 | goto error; |
685 | } | 661 | } |
686 | 662 | ||
687 | /* Clear IRQ_NOREQUEST flag */ | ||
688 | desc->status &= ~IRQ_NOREQUEST; | ||
689 | |||
690 | /* map it */ | 663 | /* map it */ |
691 | smp_wmb(); | 664 | smp_wmb(); |
692 | irq_map[virq].hwirq = hwirq; | 665 | irq_map[virq].hwirq = hwirq; |
@@ -694,11 +667,15 @@ static int irq_setup_virq(struct irq_host *host, unsigned int virq, | |||
694 | 667 | ||
695 | if (host->ops->map(host, virq, hwirq)) { | 668 | if (host->ops->map(host, virq, hwirq)) { |
696 | pr_debug("irq: -> mapping failed, freeing\n"); | 669 | pr_debug("irq: -> mapping failed, freeing\n"); |
697 | goto error; | 670 | goto errdesc; |
698 | } | 671 | } |
699 | 672 | ||
673 | irq_clear_status_flags(virq, IRQ_NOREQUEST); | ||
674 | |||
700 | return 0; | 675 | return 0; |
701 | 676 | ||
677 | errdesc: | ||
678 | irq_free_descs(virq, 1); | ||
702 | error: | 679 | error: |
703 | irq_free_virt(virq, 1); | 680 | irq_free_virt(virq, 1); |
704 | return -1; | 681 | return -1; |
@@ -746,13 +723,9 @@ unsigned int irq_create_mapping(struct irq_host *host, | |||
746 | } | 723 | } |
747 | pr_debug("irq: -> using host @%p\n", host); | 724 | pr_debug("irq: -> using host @%p\n", host); |
748 | 725 | ||
749 | /* Check if mapping already exist, if it does, call | 726 | /* Check if mapping already exists */ |
750 | * host->ops->map() to update the flags | ||
751 | */ | ||
752 | virq = irq_find_mapping(host, hwirq); | 727 | virq = irq_find_mapping(host, hwirq); |
753 | if (virq != NO_IRQ) { | 728 | if (virq != NO_IRQ) { |
754 | if (host->ops->remap) | ||
755 | host->ops->remap(host, virq, hwirq); | ||
756 | pr_debug("irq: -> existing mapping on virq %d\n", virq); | 729 | pr_debug("irq: -> existing mapping on virq %d\n", virq); |
757 | return virq; | 730 | return virq; |
758 | } | 731 | } |
@@ -818,8 +791,8 @@ unsigned int irq_create_of_mapping(struct device_node *controller, | |||
818 | 791 | ||
819 | /* Set type if specified and different than the current one */ | 792 | /* Set type if specified and different than the current one */ |
820 | if (type != IRQ_TYPE_NONE && | 793 | if (type != IRQ_TYPE_NONE && |
821 | type != (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK)) | 794 | type != (irqd_get_trigger_type(irq_get_irq_data(virq)))) |
822 | set_irq_type(virq, type); | 795 | irq_set_irq_type(virq, type); |
823 | return virq; | 796 | return virq; |
824 | } | 797 | } |
825 | EXPORT_SYMBOL_GPL(irq_create_of_mapping); | 798 | EXPORT_SYMBOL_GPL(irq_create_of_mapping); |
@@ -833,16 +806,17 @@ void irq_dispose_mapping(unsigned int virq) | |||
833 | return; | 806 | return; |
834 | 807 | ||
835 | host = irq_map[virq].host; | 808 | host = irq_map[virq].host; |
836 | WARN_ON (host == NULL); | 809 | if (WARN_ON(host == NULL)) |
837 | if (host == NULL) | ||
838 | return; | 810 | return; |
839 | 811 | ||
840 | /* Never unmap legacy interrupts */ | 812 | /* Never unmap legacy interrupts */ |
841 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) | 813 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) |
842 | return; | 814 | return; |
843 | 815 | ||
816 | irq_set_status_flags(virq, IRQ_NOREQUEST); | ||
817 | |||
844 | /* remove chip and handler */ | 818 | /* remove chip and handler */ |
845 | set_irq_chip_and_handler(virq, NULL, NULL); | 819 | irq_set_chip_and_handler(virq, NULL, NULL); |
846 | 820 | ||
847 | /* Make sure it's completed */ | 821 | /* Make sure it's completed */ |
848 | synchronize_irq(virq); | 822 | synchronize_irq(virq); |
@@ -860,13 +834,6 @@ void irq_dispose_mapping(unsigned int virq) | |||
860 | host->revmap_data.linear.revmap[hwirq] = NO_IRQ; | 834 | host->revmap_data.linear.revmap[hwirq] = NO_IRQ; |
861 | break; | 835 | break; |
862 | case IRQ_HOST_MAP_TREE: | 836 | case IRQ_HOST_MAP_TREE: |
863 | /* | ||
864 | * Check if radix tree allocated yet, if not then nothing to | ||
865 | * remove. | ||
866 | */ | ||
867 | smp_rmb(); | ||
868 | if (revmap_trees_allocated < 1) | ||
869 | break; | ||
870 | mutex_lock(&revmap_trees_mutex); | 837 | mutex_lock(&revmap_trees_mutex); |
871 | radix_tree_delete(&host->revmap_data.tree, hwirq); | 838 | radix_tree_delete(&host->revmap_data.tree, hwirq); |
872 | mutex_unlock(&revmap_trees_mutex); | 839 | mutex_unlock(&revmap_trees_mutex); |
@@ -877,9 +844,7 @@ void irq_dispose_mapping(unsigned int virq) | |||
877 | smp_mb(); | 844 | smp_mb(); |
878 | irq_map[virq].hwirq = host->inval_irq; | 845 | irq_map[virq].hwirq = host->inval_irq; |
879 | 846 | ||
880 | /* Set some flags */ | 847 | irq_free_descs(virq, 1); |
881 | irq_to_desc(virq)->status |= IRQ_NOREQUEST; | ||
882 | |||
883 | /* Free it */ | 848 | /* Free it */ |
884 | irq_free_virt(virq, 1); | 849 | irq_free_virt(virq, 1); |
885 | } | 850 | } |
@@ -924,21 +889,17 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host, | |||
924 | struct irq_map_entry *ptr; | 889 | struct irq_map_entry *ptr; |
925 | unsigned int virq; | 890 | unsigned int virq; |
926 | 891 | ||
927 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); | 892 | if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_TREE)) |
928 | |||
929 | /* | ||
930 | * Check if the radix tree exists and has bee initialized. | ||
931 | * If not, we fallback to slow mode | ||
932 | */ | ||
933 | if (revmap_trees_allocated < 2) | ||
934 | return irq_find_mapping(host, hwirq); | 893 | return irq_find_mapping(host, hwirq); |
935 | 894 | ||
936 | /* Now try to resolve */ | ||
937 | /* | 895 | /* |
938 | * No rcu_read_lock(ing) needed, the ptr returned can't go under us | 896 | * The ptr returned references the static global irq_map. |
939 | * as it's referencing an entry in the static irq_map table. | 897 | * but freeing an irq can delete nodes along the path to |
898 | * do the lookup via call_rcu. | ||
940 | */ | 899 | */ |
900 | rcu_read_lock(); | ||
941 | ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq); | 901 | ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq); |
902 | rcu_read_unlock(); | ||
942 | 903 | ||
943 | /* | 904 | /* |
944 | * If found in radix tree, then fine. | 905 | * If found in radix tree, then fine. |
@@ -956,16 +917,7 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host, | |||
956 | void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, | 917 | void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, |
957 | irq_hw_number_t hwirq) | 918 | irq_hw_number_t hwirq) |
958 | { | 919 | { |
959 | 920 | if (WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE)) | |
960 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); | ||
961 | |||
962 | /* | ||
963 | * Check if the radix tree exists yet. | ||
964 | * If not, then the irq will be inserted into the tree when it gets | ||
965 | * initialized. | ||
966 | */ | ||
967 | smp_rmb(); | ||
968 | if (revmap_trees_allocated < 1) | ||
969 | return; | 921 | return; |
970 | 922 | ||
971 | if (virq != NO_IRQ) { | 923 | if (virq != NO_IRQ) { |
@@ -981,7 +933,8 @@ unsigned int irq_linear_revmap(struct irq_host *host, | |||
981 | { | 933 | { |
982 | unsigned int *revmap; | 934 | unsigned int *revmap; |
983 | 935 | ||
984 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR); | 936 | if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_LINEAR)) |
937 | return irq_find_mapping(host, hwirq); | ||
985 | 938 | ||
986 | /* Check revmap bounds */ | 939 | /* Check revmap bounds */ |
987 | if (unlikely(hwirq >= host->revmap_data.linear.size)) | 940 | if (unlikely(hwirq >= host->revmap_data.linear.size)) |
@@ -1054,14 +1007,23 @@ void irq_free_virt(unsigned int virq, unsigned int count) | |||
1054 | WARN_ON (virq < NUM_ISA_INTERRUPTS); | 1007 | WARN_ON (virq < NUM_ISA_INTERRUPTS); |
1055 | WARN_ON (count == 0 || (virq + count) > irq_virq_count); | 1008 | WARN_ON (count == 0 || (virq + count) > irq_virq_count); |
1056 | 1009 | ||
1010 | if (virq < NUM_ISA_INTERRUPTS) { | ||
1011 | if (virq + count < NUM_ISA_INTERRUPTS) | ||
1012 | return; | ||
1013 | count =- NUM_ISA_INTERRUPTS - virq; | ||
1014 | virq = NUM_ISA_INTERRUPTS; | ||
1015 | } | ||
1016 | |||
1017 | if (count > irq_virq_count || virq > irq_virq_count - count) { | ||
1018 | if (virq > irq_virq_count) | ||
1019 | return; | ||
1020 | count = irq_virq_count - virq; | ||
1021 | } | ||
1022 | |||
1057 | raw_spin_lock_irqsave(&irq_big_lock, flags); | 1023 | raw_spin_lock_irqsave(&irq_big_lock, flags); |
1058 | for (i = virq; i < (virq + count); i++) { | 1024 | for (i = virq; i < (virq + count); i++) { |
1059 | struct irq_host *host; | 1025 | struct irq_host *host; |
1060 | 1026 | ||
1061 | if (i < NUM_ISA_INTERRUPTS || | ||
1062 | (virq + count) > irq_virq_count) | ||
1063 | continue; | ||
1064 | |||
1065 | host = irq_map[i].host; | 1027 | host = irq_map[i].host; |
1066 | irq_map[i].hwirq = host->inval_irq; | 1028 | irq_map[i].hwirq = host->inval_irq; |
1067 | smp_wmb(); | 1029 | smp_wmb(); |
@@ -1072,82 +1034,21 @@ void irq_free_virt(unsigned int virq, unsigned int count) | |||
1072 | 1034 | ||
1073 | int arch_early_irq_init(void) | 1035 | int arch_early_irq_init(void) |
1074 | { | 1036 | { |
1075 | struct irq_desc *desc; | ||
1076 | int i; | ||
1077 | |||
1078 | for (i = 0; i < NR_IRQS; i++) { | ||
1079 | desc = irq_to_desc(i); | ||
1080 | if (desc) | ||
1081 | desc->status |= IRQ_NOREQUEST; | ||
1082 | } | ||
1083 | |||
1084 | return 0; | ||
1085 | } | ||
1086 | |||
1087 | int arch_init_chip_data(struct irq_desc *desc, int node) | ||
1088 | { | ||
1089 | desc->status |= IRQ_NOREQUEST; | ||
1090 | return 0; | 1037 | return 0; |
1091 | } | 1038 | } |
1092 | 1039 | ||
1093 | /* We need to create the radix trees late */ | ||
1094 | static int irq_late_init(void) | ||
1095 | { | ||
1096 | struct irq_host *h; | ||
1097 | unsigned int i; | ||
1098 | |||
1099 | /* | ||
1100 | * No mutual exclusion with respect to accessors of the tree is needed | ||
1101 | * here as the synchronization is done via the state variable | ||
1102 | * revmap_trees_allocated. | ||
1103 | */ | ||
1104 | list_for_each_entry(h, &irq_hosts, link) { | ||
1105 | if (h->revmap_type == IRQ_HOST_MAP_TREE) | ||
1106 | INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL); | ||
1107 | } | ||
1108 | |||
1109 | /* | ||
1110 | * Make sure the radix trees inits are visible before setting | ||
1111 | * the flag | ||
1112 | */ | ||
1113 | smp_wmb(); | ||
1114 | revmap_trees_allocated = 1; | ||
1115 | |||
1116 | /* | ||
1117 | * Insert the reverse mapping for those interrupts already present | ||
1118 | * in irq_map[]. | ||
1119 | */ | ||
1120 | mutex_lock(&revmap_trees_mutex); | ||
1121 | for (i = 0; i < irq_virq_count; i++) { | ||
1122 | if (irq_map[i].host && | ||
1123 | (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE)) | ||
1124 | radix_tree_insert(&irq_map[i].host->revmap_data.tree, | ||
1125 | irq_map[i].hwirq, &irq_map[i]); | ||
1126 | } | ||
1127 | mutex_unlock(&revmap_trees_mutex); | ||
1128 | |||
1129 | /* | ||
1130 | * Make sure the radix trees insertions are visible before setting | ||
1131 | * the flag | ||
1132 | */ | ||
1133 | smp_wmb(); | ||
1134 | revmap_trees_allocated = 2; | ||
1135 | |||
1136 | return 0; | ||
1137 | } | ||
1138 | arch_initcall(irq_late_init); | ||
1139 | |||
1140 | #ifdef CONFIG_VIRQ_DEBUG | 1040 | #ifdef CONFIG_VIRQ_DEBUG |
1141 | static int virq_debug_show(struct seq_file *m, void *private) | 1041 | static int virq_debug_show(struct seq_file *m, void *private) |
1142 | { | 1042 | { |
1143 | unsigned long flags; | 1043 | unsigned long flags; |
1144 | struct irq_desc *desc; | 1044 | struct irq_desc *desc; |
1145 | const char *p; | 1045 | const char *p; |
1146 | char none[] = "none"; | 1046 | static const char none[] = "none"; |
1047 | void *data; | ||
1147 | int i; | 1048 | int i; |
1148 | 1049 | ||
1149 | seq_printf(m, "%-5s %-7s %-15s %s\n", "virq", "hwirq", | 1050 | seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq", |
1150 | "chip name", "host name"); | 1051 | "chip name", "chip data", "host name"); |
1151 | 1052 | ||
1152 | for (i = 1; i < nr_irqs; i++) { | 1053 | for (i = 1; i < nr_irqs; i++) { |
1153 | desc = irq_to_desc(i); | 1054 | desc = irq_to_desc(i); |
@@ -1157,15 +1058,21 @@ static int virq_debug_show(struct seq_file *m, void *private) | |||
1157 | raw_spin_lock_irqsave(&desc->lock, flags); | 1058 | raw_spin_lock_irqsave(&desc->lock, flags); |
1158 | 1059 | ||
1159 | if (desc->action && desc->action->handler) { | 1060 | if (desc->action && desc->action->handler) { |
1061 | struct irq_chip *chip; | ||
1062 | |||
1160 | seq_printf(m, "%5d ", i); | 1063 | seq_printf(m, "%5d ", i); |
1161 | seq_printf(m, "0x%05lx ", virq_to_hw(i)); | 1064 | seq_printf(m, "0x%05lx ", irq_map[i].hwirq); |
1162 | 1065 | ||
1163 | if (desc->chip && desc->chip->name) | 1066 | chip = irq_desc_get_chip(desc); |
1164 | p = desc->chip->name; | 1067 | if (chip && chip->name) |
1068 | p = chip->name; | ||
1165 | else | 1069 | else |
1166 | p = none; | 1070 | p = none; |
1167 | seq_printf(m, "%-15s ", p); | 1071 | seq_printf(m, "%-15s ", p); |
1168 | 1072 | ||
1073 | data = irq_desc_get_chip_data(desc); | ||
1074 | seq_printf(m, "0x%16p ", data); | ||
1075 | |||
1169 | if (irq_map[i].host && irq_map[i].host->of_node) | 1076 | if (irq_map[i].host && irq_map[i].host->of_node) |
1170 | p = irq_map[i].host->of_node->full_name; | 1077 | p = irq_map[i].host->of_node->full_name; |
1171 | else | 1078 | else |
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index 7f61a3ac787c..76a6e40a6f7c 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c | |||
@@ -109,7 +109,7 @@ static int kgdb_call_nmi_hook(struct pt_regs *regs) | |||
109 | #ifdef CONFIG_SMP | 109 | #ifdef CONFIG_SMP |
110 | void kgdb_roundup_cpus(unsigned long flags) | 110 | void kgdb_roundup_cpus(unsigned long flags) |
111 | { | 111 | { |
112 | smp_send_debugger_break(MSG_ALL_BUT_SELF); | 112 | smp_send_debugger_break(); |
113 | } | 113 | } |
114 | #endif | 114 | #endif |
115 | 115 | ||
@@ -142,7 +142,7 @@ static int kgdb_singlestep(struct pt_regs *regs) | |||
142 | return 0; | 142 | return 0; |
143 | 143 | ||
144 | /* | 144 | /* |
145 | * On Book E and perhaps other processsors, singlestep is handled on | 145 | * On Book E and perhaps other processors, singlestep is handled on |
146 | * the critical exception stack. This causes current_thread_info() | 146 | * the critical exception stack. This causes current_thread_info() |
147 | * to fail, since it it locates the thread_info by masking off | 147 | * to fail, since it it locates the thread_info by masking off |
148 | * the low bits of the current stack pointer. We work around | 148 | * the low bits of the current stack pointer. We work around |
@@ -194,40 +194,6 @@ static int kgdb_dabr_match(struct pt_regs *regs) | |||
194 | ptr = (unsigned long *)ptr32; \ | 194 | ptr = (unsigned long *)ptr32; \ |
195 | } while (0) | 195 | } while (0) |
196 | 196 | ||
197 | |||
198 | void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) | ||
199 | { | ||
200 | unsigned long *ptr = gdb_regs; | ||
201 | int reg; | ||
202 | |||
203 | memset(gdb_regs, 0, NUMREGBYTES); | ||
204 | |||
205 | for (reg = 0; reg < 32; reg++) | ||
206 | PACK64(ptr, regs->gpr[reg]); | ||
207 | |||
208 | #ifdef CONFIG_FSL_BOOKE | ||
209 | #ifdef CONFIG_SPE | ||
210 | for (reg = 0; reg < 32; reg++) | ||
211 | PACK64(ptr, current->thread.evr[reg]); | ||
212 | #else | ||
213 | ptr += 32; | ||
214 | #endif | ||
215 | #else | ||
216 | /* fp registers not used by kernel, leave zero */ | ||
217 | ptr += 32 * 8 / sizeof(long); | ||
218 | #endif | ||
219 | |||
220 | PACK64(ptr, regs->nip); | ||
221 | PACK64(ptr, regs->msr); | ||
222 | PACK32(ptr, regs->ccr); | ||
223 | PACK64(ptr, regs->link); | ||
224 | PACK64(ptr, regs->ctr); | ||
225 | PACK32(ptr, regs->xer); | ||
226 | |||
227 | BUG_ON((unsigned long)ptr > | ||
228 | (unsigned long)(((void *)gdb_regs) + NUMREGBYTES)); | ||
229 | } | ||
230 | |||
231 | void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) | 197 | void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) |
232 | { | 198 | { |
233 | struct pt_regs *regs = (struct pt_regs *)(p->thread.ksp + | 199 | struct pt_regs *regs = (struct pt_regs *)(p->thread.ksp + |
@@ -271,44 +237,140 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) | |||
271 | (unsigned long)(((void *)gdb_regs) + NUMREGBYTES)); | 237 | (unsigned long)(((void *)gdb_regs) + NUMREGBYTES)); |
272 | } | 238 | } |
273 | 239 | ||
274 | #define UNPACK64(dest, ptr) do { dest = *(ptr++); } while (0) | 240 | #define GDB_SIZEOF_REG sizeof(unsigned long) |
241 | #define GDB_SIZEOF_REG_U32 sizeof(u32) | ||
275 | 242 | ||
276 | #define UNPACK32(dest, ptr) do { \ | 243 | #ifdef CONFIG_FSL_BOOKE |
277 | u32 *ptr32; \ | 244 | #define GDB_SIZEOF_FLOAT_REG sizeof(unsigned long) |
278 | ptr32 = (u32 *)ptr; \ | 245 | #else |
279 | dest = *(ptr32++); \ | 246 | #define GDB_SIZEOF_FLOAT_REG sizeof(u64) |
280 | ptr = (unsigned long *)ptr32; \ | 247 | #endif |
281 | } while (0) | ||
282 | 248 | ||
283 | void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) | 249 | struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = |
284 | { | 250 | { |
285 | unsigned long *ptr = gdb_regs; | 251 | { "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[0]) }, |
286 | int reg; | 252 | { "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[1]) }, |
287 | 253 | { "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[2]) }, | |
288 | for (reg = 0; reg < 32; reg++) | 254 | { "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[3]) }, |
289 | UNPACK64(regs->gpr[reg], ptr); | 255 | { "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[4]) }, |
256 | { "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[5]) }, | ||
257 | { "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[6]) }, | ||
258 | { "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[7]) }, | ||
259 | { "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[8]) }, | ||
260 | { "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[9]) }, | ||
261 | { "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[10]) }, | ||
262 | { "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[11]) }, | ||
263 | { "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[12]) }, | ||
264 | { "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[13]) }, | ||
265 | { "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[14]) }, | ||
266 | { "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[15]) }, | ||
267 | { "r16", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[16]) }, | ||
268 | { "r17", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[17]) }, | ||
269 | { "r18", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[18]) }, | ||
270 | { "r19", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[19]) }, | ||
271 | { "r20", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[20]) }, | ||
272 | { "r21", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[21]) }, | ||
273 | { "r22", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[22]) }, | ||
274 | { "r23", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[23]) }, | ||
275 | { "r24", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[24]) }, | ||
276 | { "r25", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[25]) }, | ||
277 | { "r26", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[26]) }, | ||
278 | { "r27", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[27]) }, | ||
279 | { "r28", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[28]) }, | ||
280 | { "r29", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[29]) }, | ||
281 | { "r30", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[30]) }, | ||
282 | { "r31", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[31]) }, | ||
283 | |||
284 | { "f0", GDB_SIZEOF_FLOAT_REG, 0 }, | ||
285 | { "f1", GDB_SIZEOF_FLOAT_REG, 1 }, | ||
286 | { "f2", GDB_SIZEOF_FLOAT_REG, 2 }, | ||
287 | { "f3", GDB_SIZEOF_FLOAT_REG, 3 }, | ||
288 | { "f4", GDB_SIZEOF_FLOAT_REG, 4 }, | ||
289 | { "f5", GDB_SIZEOF_FLOAT_REG, 5 }, | ||
290 | { "f6", GDB_SIZEOF_FLOAT_REG, 6 }, | ||
291 | { "f7", GDB_SIZEOF_FLOAT_REG, 7 }, | ||
292 | { "f8", GDB_SIZEOF_FLOAT_REG, 8 }, | ||
293 | { "f9", GDB_SIZEOF_FLOAT_REG, 9 }, | ||
294 | { "f10", GDB_SIZEOF_FLOAT_REG, 10 }, | ||
295 | { "f11", GDB_SIZEOF_FLOAT_REG, 11 }, | ||
296 | { "f12", GDB_SIZEOF_FLOAT_REG, 12 }, | ||
297 | { "f13", GDB_SIZEOF_FLOAT_REG, 13 }, | ||
298 | { "f14", GDB_SIZEOF_FLOAT_REG, 14 }, | ||
299 | { "f15", GDB_SIZEOF_FLOAT_REG, 15 }, | ||
300 | { "f16", GDB_SIZEOF_FLOAT_REG, 16 }, | ||
301 | { "f17", GDB_SIZEOF_FLOAT_REG, 17 }, | ||
302 | { "f18", GDB_SIZEOF_FLOAT_REG, 18 }, | ||
303 | { "f19", GDB_SIZEOF_FLOAT_REG, 19 }, | ||
304 | { "f20", GDB_SIZEOF_FLOAT_REG, 20 }, | ||
305 | { "f21", GDB_SIZEOF_FLOAT_REG, 21 }, | ||
306 | { "f22", GDB_SIZEOF_FLOAT_REG, 22 }, | ||
307 | { "f23", GDB_SIZEOF_FLOAT_REG, 23 }, | ||
308 | { "f24", GDB_SIZEOF_FLOAT_REG, 24 }, | ||
309 | { "f25", GDB_SIZEOF_FLOAT_REG, 25 }, | ||
310 | { "f26", GDB_SIZEOF_FLOAT_REG, 26 }, | ||
311 | { "f27", GDB_SIZEOF_FLOAT_REG, 27 }, | ||
312 | { "f28", GDB_SIZEOF_FLOAT_REG, 28 }, | ||
313 | { "f29", GDB_SIZEOF_FLOAT_REG, 29 }, | ||
314 | { "f30", GDB_SIZEOF_FLOAT_REG, 30 }, | ||
315 | { "f31", GDB_SIZEOF_FLOAT_REG, 31 }, | ||
316 | |||
317 | { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, nip) }, | ||
318 | { "msr", GDB_SIZEOF_REG, offsetof(struct pt_regs, msr) }, | ||
319 | { "cr", GDB_SIZEOF_REG_U32, offsetof(struct pt_regs, ccr) }, | ||
320 | { "lr", GDB_SIZEOF_REG, offsetof(struct pt_regs, link) }, | ||
321 | { "ctr", GDB_SIZEOF_REG_U32, offsetof(struct pt_regs, ctr) }, | ||
322 | { "xer", GDB_SIZEOF_REG, offsetof(struct pt_regs, xer) }, | ||
323 | }; | ||
290 | 324 | ||
291 | #ifdef CONFIG_FSL_BOOKE | 325 | char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) |
292 | #ifdef CONFIG_SPE | 326 | { |
293 | for (reg = 0; reg < 32; reg++) | 327 | if (regno >= DBG_MAX_REG_NUM || regno < 0) |
294 | UNPACK64(current->thread.evr[reg], ptr); | 328 | return NULL; |
329 | |||
330 | if (regno < 32 || regno >= 64) | ||
331 | /* First 0 -> 31 gpr registers*/ | ||
332 | /* pc, msr, ls... registers 64 -> 69 */ | ||
333 | memcpy(mem, (void *)regs + dbg_reg_def[regno].offset, | ||
334 | dbg_reg_def[regno].size); | ||
335 | |||
336 | if (regno >= 32 && regno < 64) { | ||
337 | /* FP registers 32 -> 63 */ | ||
338 | #if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_SPE) | ||
339 | if (current) | ||
340 | memcpy(mem, ¤t->thread.evr[regno-32], | ||
341 | dbg_reg_def[regno].size); | ||
295 | #else | 342 | #else |
296 | ptr += 32; | 343 | /* fp registers not used by kernel, leave zero */ |
344 | memset(mem, 0, dbg_reg_def[regno].size); | ||
297 | #endif | 345 | #endif |
346 | } | ||
347 | |||
348 | return dbg_reg_def[regno].name; | ||
349 | } | ||
350 | |||
351 | int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) | ||
352 | { | ||
353 | if (regno >= DBG_MAX_REG_NUM || regno < 0) | ||
354 | return -EINVAL; | ||
355 | |||
356 | if (regno < 32 || regno >= 64) | ||
357 | /* First 0 -> 31 gpr registers*/ | ||
358 | /* pc, msr, ls... registers 64 -> 69 */ | ||
359 | memcpy((void *)regs + dbg_reg_def[regno].offset, mem, | ||
360 | dbg_reg_def[regno].size); | ||
361 | |||
362 | if (regno >= 32 && regno < 64) { | ||
363 | /* FP registers 32 -> 63 */ | ||
364 | #if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_SPE) | ||
365 | memcpy(¤t->thread.evr[regno-32], mem, | ||
366 | dbg_reg_def[regno].size); | ||
298 | #else | 367 | #else |
299 | /* fp registers not used by kernel, leave zero */ | 368 | /* fp registers not used by kernel, leave zero */ |
300 | ptr += 32 * 8 / sizeof(int); | 369 | return 0; |
301 | #endif | 370 | #endif |
371 | } | ||
302 | 372 | ||
303 | UNPACK64(regs->nip, ptr); | 373 | return 0; |
304 | UNPACK64(regs->msr, ptr); | ||
305 | UNPACK32(regs->ccr, ptr); | ||
306 | UNPACK64(regs->link, ptr); | ||
307 | UNPACK64(regs->ctr, ptr); | ||
308 | UNPACK32(regs->xer, ptr); | ||
309 | |||
310 | BUG_ON((unsigned long)ptr > | ||
311 | (unsigned long)(((void *)gdb_regs) + NUMREGBYTES)); | ||
312 | } | 374 | } |
313 | 375 | ||
314 | void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) | 376 | void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) |
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c new file mode 100644 index 000000000000..b06bdae04064 --- /dev/null +++ b/arch/powerpc/kernel/kvm.c | |||
@@ -0,0 +1,596 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved. | ||
3 | * | ||
4 | * Authors: | ||
5 | * Alexander Graf <agraf@suse.de> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License, version 2, as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
19 | */ | ||
20 | |||
21 | #include <linux/kvm_host.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/kvm_para.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/of.h> | ||
26 | |||
27 | #include <asm/reg.h> | ||
28 | #include <asm/sections.h> | ||
29 | #include <asm/cacheflush.h> | ||
30 | #include <asm/disassemble.h> | ||
31 | |||
32 | #define KVM_MAGIC_PAGE (-4096L) | ||
33 | #define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x) | ||
34 | |||
35 | #define KVM_INST_LWZ 0x80000000 | ||
36 | #define KVM_INST_STW 0x90000000 | ||
37 | #define KVM_INST_LD 0xe8000000 | ||
38 | #define KVM_INST_STD 0xf8000000 | ||
39 | #define KVM_INST_NOP 0x60000000 | ||
40 | #define KVM_INST_B 0x48000000 | ||
41 | #define KVM_INST_B_MASK 0x03ffffff | ||
42 | #define KVM_INST_B_MAX 0x01ffffff | ||
43 | |||
44 | #define KVM_MASK_RT 0x03e00000 | ||
45 | #define KVM_RT_30 0x03c00000 | ||
46 | #define KVM_MASK_RB 0x0000f800 | ||
47 | #define KVM_INST_MFMSR 0x7c0000a6 | ||
48 | #define KVM_INST_MFSPR_SPRG0 0x7c1042a6 | ||
49 | #define KVM_INST_MFSPR_SPRG1 0x7c1142a6 | ||
50 | #define KVM_INST_MFSPR_SPRG2 0x7c1242a6 | ||
51 | #define KVM_INST_MFSPR_SPRG3 0x7c1342a6 | ||
52 | #define KVM_INST_MFSPR_SRR0 0x7c1a02a6 | ||
53 | #define KVM_INST_MFSPR_SRR1 0x7c1b02a6 | ||
54 | #define KVM_INST_MFSPR_DAR 0x7c1302a6 | ||
55 | #define KVM_INST_MFSPR_DSISR 0x7c1202a6 | ||
56 | |||
57 | #define KVM_INST_MTSPR_SPRG0 0x7c1043a6 | ||
58 | #define KVM_INST_MTSPR_SPRG1 0x7c1143a6 | ||
59 | #define KVM_INST_MTSPR_SPRG2 0x7c1243a6 | ||
60 | #define KVM_INST_MTSPR_SPRG3 0x7c1343a6 | ||
61 | #define KVM_INST_MTSPR_SRR0 0x7c1a03a6 | ||
62 | #define KVM_INST_MTSPR_SRR1 0x7c1b03a6 | ||
63 | #define KVM_INST_MTSPR_DAR 0x7c1303a6 | ||
64 | #define KVM_INST_MTSPR_DSISR 0x7c1203a6 | ||
65 | |||
66 | #define KVM_INST_TLBSYNC 0x7c00046c | ||
67 | #define KVM_INST_MTMSRD_L0 0x7c000164 | ||
68 | #define KVM_INST_MTMSRD_L1 0x7c010164 | ||
69 | #define KVM_INST_MTMSR 0x7c000124 | ||
70 | |||
71 | #define KVM_INST_WRTEEI_0 0x7c000146 | ||
72 | #define KVM_INST_WRTEEI_1 0x7c008146 | ||
73 | |||
74 | #define KVM_INST_MTSRIN 0x7c0001e4 | ||
75 | |||
76 | static bool kvm_patching_worked = true; | ||
77 | static char kvm_tmp[1024 * 1024]; | ||
78 | static int kvm_tmp_index; | ||
79 | |||
80 | static inline void kvm_patch_ins(u32 *inst, u32 new_inst) | ||
81 | { | ||
82 | *inst = new_inst; | ||
83 | flush_icache_range((ulong)inst, (ulong)inst + 4); | ||
84 | } | ||
85 | |||
86 | static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt) | ||
87 | { | ||
88 | #ifdef CONFIG_64BIT | ||
89 | kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc)); | ||
90 | #else | ||
91 | kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc)); | ||
92 | #endif | ||
93 | } | ||
94 | |||
95 | static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt) | ||
96 | { | ||
97 | #ifdef CONFIG_64BIT | ||
98 | kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc)); | ||
99 | #else | ||
100 | kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc)); | ||
101 | #endif | ||
102 | } | ||
103 | |||
104 | static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt) | ||
105 | { | ||
106 | kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff)); | ||
107 | } | ||
108 | |||
109 | static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt) | ||
110 | { | ||
111 | #ifdef CONFIG_64BIT | ||
112 | kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc)); | ||
113 | #else | ||
114 | kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc)); | ||
115 | #endif | ||
116 | } | ||
117 | |||
118 | static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt) | ||
119 | { | ||
120 | kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc)); | ||
121 | } | ||
122 | |||
123 | static void kvm_patch_ins_nop(u32 *inst) | ||
124 | { | ||
125 | kvm_patch_ins(inst, KVM_INST_NOP); | ||
126 | } | ||
127 | |||
128 | static void kvm_patch_ins_b(u32 *inst, int addr) | ||
129 | { | ||
130 | #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S) | ||
131 | /* On relocatable kernels interrupts handlers and our code | ||
132 | can be in different regions, so we don't patch them */ | ||
133 | |||
134 | extern u32 __end_interrupts; | ||
135 | if ((ulong)inst < (ulong)&__end_interrupts) | ||
136 | return; | ||
137 | #endif | ||
138 | |||
139 | kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK)); | ||
140 | } | ||
141 | |||
142 | static u32 *kvm_alloc(int len) | ||
143 | { | ||
144 | u32 *p; | ||
145 | |||
146 | if ((kvm_tmp_index + len) > ARRAY_SIZE(kvm_tmp)) { | ||
147 | printk(KERN_ERR "KVM: No more space (%d + %d)\n", | ||
148 | kvm_tmp_index, len); | ||
149 | kvm_patching_worked = false; | ||
150 | return NULL; | ||
151 | } | ||
152 | |||
153 | p = (void*)&kvm_tmp[kvm_tmp_index]; | ||
154 | kvm_tmp_index += len; | ||
155 | |||
156 | return p; | ||
157 | } | ||
158 | |||
159 | extern u32 kvm_emulate_mtmsrd_branch_offs; | ||
160 | extern u32 kvm_emulate_mtmsrd_reg_offs; | ||
161 | extern u32 kvm_emulate_mtmsrd_orig_ins_offs; | ||
162 | extern u32 kvm_emulate_mtmsrd_len; | ||
163 | extern u32 kvm_emulate_mtmsrd[]; | ||
164 | |||
165 | static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt) | ||
166 | { | ||
167 | u32 *p; | ||
168 | int distance_start; | ||
169 | int distance_end; | ||
170 | ulong next_inst; | ||
171 | |||
172 | p = kvm_alloc(kvm_emulate_mtmsrd_len * 4); | ||
173 | if (!p) | ||
174 | return; | ||
175 | |||
176 | /* Find out where we are and put everything there */ | ||
177 | distance_start = (ulong)p - (ulong)inst; | ||
178 | next_inst = ((ulong)inst + 4); | ||
179 | distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs]; | ||
180 | |||
181 | /* Make sure we only write valid b instructions */ | ||
182 | if (distance_start > KVM_INST_B_MAX) { | ||
183 | kvm_patching_worked = false; | ||
184 | return; | ||
185 | } | ||
186 | |||
187 | /* Modify the chunk to fit the invocation */ | ||
188 | memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4); | ||
189 | p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK; | ||
190 | switch (get_rt(rt)) { | ||
191 | case 30: | ||
192 | kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs], | ||
193 | magic_var(scratch2), KVM_RT_30); | ||
194 | break; | ||
195 | case 31: | ||
196 | kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs], | ||
197 | magic_var(scratch1), KVM_RT_30); | ||
198 | break; | ||
199 | default: | ||
200 | p[kvm_emulate_mtmsrd_reg_offs] |= rt; | ||
201 | break; | ||
202 | } | ||
203 | |||
204 | p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst; | ||
205 | flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4); | ||
206 | |||
207 | /* Patch the invocation */ | ||
208 | kvm_patch_ins_b(inst, distance_start); | ||
209 | } | ||
210 | |||
211 | extern u32 kvm_emulate_mtmsr_branch_offs; | ||
212 | extern u32 kvm_emulate_mtmsr_reg1_offs; | ||
213 | extern u32 kvm_emulate_mtmsr_reg2_offs; | ||
214 | extern u32 kvm_emulate_mtmsr_orig_ins_offs; | ||
215 | extern u32 kvm_emulate_mtmsr_len; | ||
216 | extern u32 kvm_emulate_mtmsr[]; | ||
217 | |||
218 | static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt) | ||
219 | { | ||
220 | u32 *p; | ||
221 | int distance_start; | ||
222 | int distance_end; | ||
223 | ulong next_inst; | ||
224 | |||
225 | p = kvm_alloc(kvm_emulate_mtmsr_len * 4); | ||
226 | if (!p) | ||
227 | return; | ||
228 | |||
229 | /* Find out where we are and put everything there */ | ||
230 | distance_start = (ulong)p - (ulong)inst; | ||
231 | next_inst = ((ulong)inst + 4); | ||
232 | distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs]; | ||
233 | |||
234 | /* Make sure we only write valid b instructions */ | ||
235 | if (distance_start > KVM_INST_B_MAX) { | ||
236 | kvm_patching_worked = false; | ||
237 | return; | ||
238 | } | ||
239 | |||
240 | /* Modify the chunk to fit the invocation */ | ||
241 | memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4); | ||
242 | p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK; | ||
243 | |||
244 | /* Make clobbered registers work too */ | ||
245 | switch (get_rt(rt)) { | ||
246 | case 30: | ||
247 | kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs], | ||
248 | magic_var(scratch2), KVM_RT_30); | ||
249 | kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs], | ||
250 | magic_var(scratch2), KVM_RT_30); | ||
251 | break; | ||
252 | case 31: | ||
253 | kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs], | ||
254 | magic_var(scratch1), KVM_RT_30); | ||
255 | kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs], | ||
256 | magic_var(scratch1), KVM_RT_30); | ||
257 | break; | ||
258 | default: | ||
259 | p[kvm_emulate_mtmsr_reg1_offs] |= rt; | ||
260 | p[kvm_emulate_mtmsr_reg2_offs] |= rt; | ||
261 | break; | ||
262 | } | ||
263 | |||
264 | p[kvm_emulate_mtmsr_orig_ins_offs] = *inst; | ||
265 | flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4); | ||
266 | |||
267 | /* Patch the invocation */ | ||
268 | kvm_patch_ins_b(inst, distance_start); | ||
269 | } | ||
270 | |||
271 | #ifdef CONFIG_BOOKE | ||
272 | |||
273 | extern u32 kvm_emulate_wrteei_branch_offs; | ||
274 | extern u32 kvm_emulate_wrteei_ee_offs; | ||
275 | extern u32 kvm_emulate_wrteei_len; | ||
276 | extern u32 kvm_emulate_wrteei[]; | ||
277 | |||
278 | static void kvm_patch_ins_wrteei(u32 *inst) | ||
279 | { | ||
280 | u32 *p; | ||
281 | int distance_start; | ||
282 | int distance_end; | ||
283 | ulong next_inst; | ||
284 | |||
285 | p = kvm_alloc(kvm_emulate_wrteei_len * 4); | ||
286 | if (!p) | ||
287 | return; | ||
288 | |||
289 | /* Find out where we are and put everything there */ | ||
290 | distance_start = (ulong)p - (ulong)inst; | ||
291 | next_inst = ((ulong)inst + 4); | ||
292 | distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_branch_offs]; | ||
293 | |||
294 | /* Make sure we only write valid b instructions */ | ||
295 | if (distance_start > KVM_INST_B_MAX) { | ||
296 | kvm_patching_worked = false; | ||
297 | return; | ||
298 | } | ||
299 | |||
300 | /* Modify the chunk to fit the invocation */ | ||
301 | memcpy(p, kvm_emulate_wrteei, kvm_emulate_wrteei_len * 4); | ||
302 | p[kvm_emulate_wrteei_branch_offs] |= distance_end & KVM_INST_B_MASK; | ||
303 | p[kvm_emulate_wrteei_ee_offs] |= (*inst & MSR_EE); | ||
304 | flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_len * 4); | ||
305 | |||
306 | /* Patch the invocation */ | ||
307 | kvm_patch_ins_b(inst, distance_start); | ||
308 | } | ||
309 | |||
310 | #endif | ||
311 | |||
312 | #ifdef CONFIG_PPC_BOOK3S_32 | ||
313 | |||
314 | extern u32 kvm_emulate_mtsrin_branch_offs; | ||
315 | extern u32 kvm_emulate_mtsrin_reg1_offs; | ||
316 | extern u32 kvm_emulate_mtsrin_reg2_offs; | ||
317 | extern u32 kvm_emulate_mtsrin_orig_ins_offs; | ||
318 | extern u32 kvm_emulate_mtsrin_len; | ||
319 | extern u32 kvm_emulate_mtsrin[]; | ||
320 | |||
321 | static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb) | ||
322 | { | ||
323 | u32 *p; | ||
324 | int distance_start; | ||
325 | int distance_end; | ||
326 | ulong next_inst; | ||
327 | |||
328 | p = kvm_alloc(kvm_emulate_mtsrin_len * 4); | ||
329 | if (!p) | ||
330 | return; | ||
331 | |||
332 | /* Find out where we are and put everything there */ | ||
333 | distance_start = (ulong)p - (ulong)inst; | ||
334 | next_inst = ((ulong)inst + 4); | ||
335 | distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs]; | ||
336 | |||
337 | /* Make sure we only write valid b instructions */ | ||
338 | if (distance_start > KVM_INST_B_MAX) { | ||
339 | kvm_patching_worked = false; | ||
340 | return; | ||
341 | } | ||
342 | |||
343 | /* Modify the chunk to fit the invocation */ | ||
344 | memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4); | ||
345 | p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK; | ||
346 | p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10); | ||
347 | p[kvm_emulate_mtsrin_reg2_offs] |= rt; | ||
348 | p[kvm_emulate_mtsrin_orig_ins_offs] = *inst; | ||
349 | flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4); | ||
350 | |||
351 | /* Patch the invocation */ | ||
352 | kvm_patch_ins_b(inst, distance_start); | ||
353 | } | ||
354 | |||
355 | #endif | ||
356 | |||
357 | static void kvm_map_magic_page(void *data) | ||
358 | { | ||
359 | u32 *features = data; | ||
360 | |||
361 | ulong in[8]; | ||
362 | ulong out[8]; | ||
363 | |||
364 | in[0] = KVM_MAGIC_PAGE; | ||
365 | in[1] = KVM_MAGIC_PAGE; | ||
366 | |||
367 | kvm_hypercall(in, out, HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE); | ||
368 | |||
369 | *features = out[0]; | ||
370 | } | ||
371 | |||
372 | static void kvm_check_ins(u32 *inst, u32 features) | ||
373 | { | ||
374 | u32 _inst = *inst; | ||
375 | u32 inst_no_rt = _inst & ~KVM_MASK_RT; | ||
376 | u32 inst_rt = _inst & KVM_MASK_RT; | ||
377 | |||
378 | switch (inst_no_rt) { | ||
379 | /* Loads */ | ||
380 | case KVM_INST_MFMSR: | ||
381 | kvm_patch_ins_ld(inst, magic_var(msr), inst_rt); | ||
382 | break; | ||
383 | case KVM_INST_MFSPR_SPRG0: | ||
384 | kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt); | ||
385 | break; | ||
386 | case KVM_INST_MFSPR_SPRG1: | ||
387 | kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt); | ||
388 | break; | ||
389 | case KVM_INST_MFSPR_SPRG2: | ||
390 | kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt); | ||
391 | break; | ||
392 | case KVM_INST_MFSPR_SPRG3: | ||
393 | kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt); | ||
394 | break; | ||
395 | case KVM_INST_MFSPR_SRR0: | ||
396 | kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt); | ||
397 | break; | ||
398 | case KVM_INST_MFSPR_SRR1: | ||
399 | kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt); | ||
400 | break; | ||
401 | case KVM_INST_MFSPR_DAR: | ||
402 | kvm_patch_ins_ld(inst, magic_var(dar), inst_rt); | ||
403 | break; | ||
404 | case KVM_INST_MFSPR_DSISR: | ||
405 | kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt); | ||
406 | break; | ||
407 | |||
408 | /* Stores */ | ||
409 | case KVM_INST_MTSPR_SPRG0: | ||
410 | kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt); | ||
411 | break; | ||
412 | case KVM_INST_MTSPR_SPRG1: | ||
413 | kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt); | ||
414 | break; | ||
415 | case KVM_INST_MTSPR_SPRG2: | ||
416 | kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt); | ||
417 | break; | ||
418 | case KVM_INST_MTSPR_SPRG3: | ||
419 | kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt); | ||
420 | break; | ||
421 | case KVM_INST_MTSPR_SRR0: | ||
422 | kvm_patch_ins_std(inst, magic_var(srr0), inst_rt); | ||
423 | break; | ||
424 | case KVM_INST_MTSPR_SRR1: | ||
425 | kvm_patch_ins_std(inst, magic_var(srr1), inst_rt); | ||
426 | break; | ||
427 | case KVM_INST_MTSPR_DAR: | ||
428 | kvm_patch_ins_std(inst, magic_var(dar), inst_rt); | ||
429 | break; | ||
430 | case KVM_INST_MTSPR_DSISR: | ||
431 | kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt); | ||
432 | break; | ||
433 | |||
434 | /* Nops */ | ||
435 | case KVM_INST_TLBSYNC: | ||
436 | kvm_patch_ins_nop(inst); | ||
437 | break; | ||
438 | |||
439 | /* Rewrites */ | ||
440 | case KVM_INST_MTMSRD_L1: | ||
441 | kvm_patch_ins_mtmsrd(inst, inst_rt); | ||
442 | break; | ||
443 | case KVM_INST_MTMSR: | ||
444 | case KVM_INST_MTMSRD_L0: | ||
445 | kvm_patch_ins_mtmsr(inst, inst_rt); | ||
446 | break; | ||
447 | } | ||
448 | |||
449 | switch (inst_no_rt & ~KVM_MASK_RB) { | ||
450 | #ifdef CONFIG_PPC_BOOK3S_32 | ||
451 | case KVM_INST_MTSRIN: | ||
452 | if (features & KVM_MAGIC_FEAT_SR) { | ||
453 | u32 inst_rb = _inst & KVM_MASK_RB; | ||
454 | kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb); | ||
455 | } | ||
456 | break; | ||
457 | break; | ||
458 | #endif | ||
459 | } | ||
460 | |||
461 | switch (_inst) { | ||
462 | #ifdef CONFIG_BOOKE | ||
463 | case KVM_INST_WRTEEI_0: | ||
464 | case KVM_INST_WRTEEI_1: | ||
465 | kvm_patch_ins_wrteei(inst); | ||
466 | break; | ||
467 | #endif | ||
468 | } | ||
469 | } | ||
470 | |||
471 | static void kvm_use_magic_page(void) | ||
472 | { | ||
473 | u32 *p; | ||
474 | u32 *start, *end; | ||
475 | u32 tmp; | ||
476 | u32 features; | ||
477 | |||
478 | /* Tell the host to map the magic page to -4096 on all CPUs */ | ||
479 | on_each_cpu(kvm_map_magic_page, &features, 1); | ||
480 | |||
481 | /* Quick self-test to see if the mapping works */ | ||
482 | if (__get_user(tmp, (u32*)KVM_MAGIC_PAGE)) { | ||
483 | kvm_patching_worked = false; | ||
484 | return; | ||
485 | } | ||
486 | |||
487 | /* Now loop through all code and find instructions */ | ||
488 | start = (void*)_stext; | ||
489 | end = (void*)_etext; | ||
490 | |||
491 | for (p = start; p < end; p++) | ||
492 | kvm_check_ins(p, features); | ||
493 | |||
494 | printk(KERN_INFO "KVM: Live patching for a fast VM %s\n", | ||
495 | kvm_patching_worked ? "worked" : "failed"); | ||
496 | } | ||
497 | |||
498 | unsigned long kvm_hypercall(unsigned long *in, | ||
499 | unsigned long *out, | ||
500 | unsigned long nr) | ||
501 | { | ||
502 | unsigned long register r0 asm("r0"); | ||
503 | unsigned long register r3 asm("r3") = in[0]; | ||
504 | unsigned long register r4 asm("r4") = in[1]; | ||
505 | unsigned long register r5 asm("r5") = in[2]; | ||
506 | unsigned long register r6 asm("r6") = in[3]; | ||
507 | unsigned long register r7 asm("r7") = in[4]; | ||
508 | unsigned long register r8 asm("r8") = in[5]; | ||
509 | unsigned long register r9 asm("r9") = in[6]; | ||
510 | unsigned long register r10 asm("r10") = in[7]; | ||
511 | unsigned long register r11 asm("r11") = nr; | ||
512 | unsigned long register r12 asm("r12"); | ||
513 | |||
514 | asm volatile("bl kvm_hypercall_start" | ||
515 | : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6), | ||
516 | "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11), | ||
517 | "=r"(r12) | ||
518 | : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8), | ||
519 | "r"(r9), "r"(r10), "r"(r11) | ||
520 | : "memory", "cc", "xer", "ctr", "lr"); | ||
521 | |||
522 | out[0] = r4; | ||
523 | out[1] = r5; | ||
524 | out[2] = r6; | ||
525 | out[3] = r7; | ||
526 | out[4] = r8; | ||
527 | out[5] = r9; | ||
528 | out[6] = r10; | ||
529 | out[7] = r11; | ||
530 | |||
531 | return r3; | ||
532 | } | ||
533 | EXPORT_SYMBOL_GPL(kvm_hypercall); | ||
534 | |||
535 | static int kvm_para_setup(void) | ||
536 | { | ||
537 | extern u32 kvm_hypercall_start; | ||
538 | struct device_node *hyper_node; | ||
539 | u32 *insts; | ||
540 | int len, i; | ||
541 | |||
542 | hyper_node = of_find_node_by_path("/hypervisor"); | ||
543 | if (!hyper_node) | ||
544 | return -1; | ||
545 | |||
546 | insts = (u32*)of_get_property(hyper_node, "hcall-instructions", &len); | ||
547 | if (len % 4) | ||
548 | return -1; | ||
549 | if (len > (4 * 4)) | ||
550 | return -1; | ||
551 | |||
552 | for (i = 0; i < (len / 4); i++) | ||
553 | kvm_patch_ins(&(&kvm_hypercall_start)[i], insts[i]); | ||
554 | |||
555 | return 0; | ||
556 | } | ||
557 | |||
558 | static __init void kvm_free_tmp(void) | ||
559 | { | ||
560 | unsigned long start, end; | ||
561 | |||
562 | start = (ulong)&kvm_tmp[kvm_tmp_index + (PAGE_SIZE - 1)] & PAGE_MASK; | ||
563 | end = (ulong)&kvm_tmp[ARRAY_SIZE(kvm_tmp)] & PAGE_MASK; | ||
564 | |||
565 | /* Free the tmp space we don't need */ | ||
566 | for (; start < end; start += PAGE_SIZE) { | ||
567 | ClearPageReserved(virt_to_page(start)); | ||
568 | init_page_count(virt_to_page(start)); | ||
569 | free_page(start); | ||
570 | totalram_pages++; | ||
571 | } | ||
572 | } | ||
573 | |||
574 | static int __init kvm_guest_init(void) | ||
575 | { | ||
576 | if (!kvm_para_available()) | ||
577 | goto free_tmp; | ||
578 | |||
579 | if (kvm_para_setup()) | ||
580 | goto free_tmp; | ||
581 | |||
582 | if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE)) | ||
583 | kvm_use_magic_page(); | ||
584 | |||
585 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
586 | /* Enable napping */ | ||
587 | powersave_nap = 1; | ||
588 | #endif | ||
589 | |||
590 | free_tmp: | ||
591 | kvm_free_tmp(); | ||
592 | |||
593 | return 0; | ||
594 | } | ||
595 | |||
596 | postcore_initcall(kvm_guest_init); | ||
diff --git a/arch/powerpc/kernel/kvm_emul.S b/arch/powerpc/kernel/kvm_emul.S new file mode 100644 index 000000000000..f2b1b2523e61 --- /dev/null +++ b/arch/powerpc/kernel/kvm_emul.S | |||
@@ -0,0 +1,302 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright SUSE Linux Products GmbH 2010 | ||
16 | * | ||
17 | * Authors: Alexander Graf <agraf@suse.de> | ||
18 | */ | ||
19 | |||
20 | #include <asm/ppc_asm.h> | ||
21 | #include <asm/kvm_asm.h> | ||
22 | #include <asm/reg.h> | ||
23 | #include <asm/page.h> | ||
24 | #include <asm/asm-offsets.h> | ||
25 | |||
26 | /* Hypercall entry point. Will be patched with device tree instructions. */ | ||
27 | |||
28 | .global kvm_hypercall_start | ||
29 | kvm_hypercall_start: | ||
30 | li r3, -1 | ||
31 | nop | ||
32 | nop | ||
33 | nop | ||
34 | blr | ||
35 | |||
36 | #define KVM_MAGIC_PAGE (-4096) | ||
37 | |||
38 | #ifdef CONFIG_64BIT | ||
39 | #define LL64(reg, offs, reg2) ld reg, (offs)(reg2) | ||
40 | #define STL64(reg, offs, reg2) std reg, (offs)(reg2) | ||
41 | #else | ||
42 | #define LL64(reg, offs, reg2) lwz reg, (offs + 4)(reg2) | ||
43 | #define STL64(reg, offs, reg2) stw reg, (offs + 4)(reg2) | ||
44 | #endif | ||
45 | |||
46 | #define SCRATCH_SAVE \ | ||
47 | /* Enable critical section. We are critical if \ | ||
48 | shared->critical == r1 */ \ | ||
49 | STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); \ | ||
50 | \ | ||
51 | /* Save state */ \ | ||
52 | PPC_STL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \ | ||
53 | PPC_STL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \ | ||
54 | mfcr r31; \ | ||
55 | stw r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0); | ||
56 | |||
57 | #define SCRATCH_RESTORE \ | ||
58 | /* Restore state */ \ | ||
59 | PPC_LL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \ | ||
60 | lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0); \ | ||
61 | mtcr r30; \ | ||
62 | PPC_LL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \ | ||
63 | \ | ||
64 | /* Disable critical section. We are critical if \ | ||
65 | shared->critical == r1 and r2 is always != r1 */ \ | ||
66 | STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); | ||
67 | |||
68 | .global kvm_emulate_mtmsrd | ||
69 | kvm_emulate_mtmsrd: | ||
70 | |||
71 | SCRATCH_SAVE | ||
72 | |||
73 | /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */ | ||
74 | LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) | ||
75 | lis r30, (~(MSR_EE | MSR_RI))@h | ||
76 | ori r30, r30, (~(MSR_EE | MSR_RI))@l | ||
77 | and r31, r31, r30 | ||
78 | |||
79 | /* OR the register's (MSR_EE|MSR_RI) on MSR */ | ||
80 | kvm_emulate_mtmsrd_reg: | ||
81 | ori r30, r0, 0 | ||
82 | andi. r30, r30, (MSR_EE|MSR_RI) | ||
83 | or r31, r31, r30 | ||
84 | |||
85 | /* Put MSR back into magic page */ | ||
86 | STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) | ||
87 | |||
88 | /* Check if we have to fetch an interrupt */ | ||
89 | lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0) | ||
90 | cmpwi r31, 0 | ||
91 | beq+ no_check | ||
92 | |||
93 | /* Check if we may trigger an interrupt */ | ||
94 | andi. r30, r30, MSR_EE | ||
95 | beq no_check | ||
96 | |||
97 | SCRATCH_RESTORE | ||
98 | |||
99 | /* Nag hypervisor */ | ||
100 | kvm_emulate_mtmsrd_orig_ins: | ||
101 | tlbsync | ||
102 | |||
103 | b kvm_emulate_mtmsrd_branch | ||
104 | |||
105 | no_check: | ||
106 | |||
107 | SCRATCH_RESTORE | ||
108 | |||
109 | /* Go back to caller */ | ||
110 | kvm_emulate_mtmsrd_branch: | ||
111 | b . | ||
112 | kvm_emulate_mtmsrd_end: | ||
113 | |||
114 | .global kvm_emulate_mtmsrd_branch_offs | ||
115 | kvm_emulate_mtmsrd_branch_offs: | ||
116 | .long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4 | ||
117 | |||
118 | .global kvm_emulate_mtmsrd_reg_offs | ||
119 | kvm_emulate_mtmsrd_reg_offs: | ||
120 | .long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4 | ||
121 | |||
122 | .global kvm_emulate_mtmsrd_orig_ins_offs | ||
123 | kvm_emulate_mtmsrd_orig_ins_offs: | ||
124 | .long (kvm_emulate_mtmsrd_orig_ins - kvm_emulate_mtmsrd) / 4 | ||
125 | |||
126 | .global kvm_emulate_mtmsrd_len | ||
127 | kvm_emulate_mtmsrd_len: | ||
128 | .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4 | ||
129 | |||
130 | |||
131 | #define MSR_SAFE_BITS (MSR_EE | MSR_CE | MSR_ME | MSR_RI) | ||
132 | #define MSR_CRITICAL_BITS ~MSR_SAFE_BITS | ||
133 | |||
134 | .global kvm_emulate_mtmsr | ||
135 | kvm_emulate_mtmsr: | ||
136 | |||
137 | SCRATCH_SAVE | ||
138 | |||
139 | /* Fetch old MSR in r31 */ | ||
140 | LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) | ||
141 | |||
142 | /* Find the changed bits between old and new MSR */ | ||
143 | kvm_emulate_mtmsr_reg1: | ||
144 | ori r30, r0, 0 | ||
145 | xor r31, r30, r31 | ||
146 | |||
147 | /* Check if we need to really do mtmsr */ | ||
148 | LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS) | ||
149 | and. r31, r31, r30 | ||
150 | |||
151 | /* No critical bits changed? Maybe we can stay in the guest. */ | ||
152 | beq maybe_stay_in_guest | ||
153 | |||
154 | do_mtmsr: | ||
155 | |||
156 | SCRATCH_RESTORE | ||
157 | |||
158 | /* Just fire off the mtmsr if it's critical */ | ||
159 | kvm_emulate_mtmsr_orig_ins: | ||
160 | mtmsr r0 | ||
161 | |||
162 | b kvm_emulate_mtmsr_branch | ||
163 | |||
164 | maybe_stay_in_guest: | ||
165 | |||
166 | /* Get the target register in r30 */ | ||
167 | kvm_emulate_mtmsr_reg2: | ||
168 | ori r30, r0, 0 | ||
169 | |||
170 | /* Check if we have to fetch an interrupt */ | ||
171 | lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0) | ||
172 | cmpwi r31, 0 | ||
173 | beq+ no_mtmsr | ||
174 | |||
175 | /* Check if we may trigger an interrupt */ | ||
176 | andi. r31, r30, MSR_EE | ||
177 | beq no_mtmsr | ||
178 | |||
179 | b do_mtmsr | ||
180 | |||
181 | no_mtmsr: | ||
182 | |||
183 | /* Put MSR into magic page because we don't call mtmsr */ | ||
184 | STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) | ||
185 | |||
186 | SCRATCH_RESTORE | ||
187 | |||
188 | /* Go back to caller */ | ||
189 | kvm_emulate_mtmsr_branch: | ||
190 | b . | ||
191 | kvm_emulate_mtmsr_end: | ||
192 | |||
193 | .global kvm_emulate_mtmsr_branch_offs | ||
194 | kvm_emulate_mtmsr_branch_offs: | ||
195 | .long (kvm_emulate_mtmsr_branch - kvm_emulate_mtmsr) / 4 | ||
196 | |||
197 | .global kvm_emulate_mtmsr_reg1_offs | ||
198 | kvm_emulate_mtmsr_reg1_offs: | ||
199 | .long (kvm_emulate_mtmsr_reg1 - kvm_emulate_mtmsr) / 4 | ||
200 | |||
201 | .global kvm_emulate_mtmsr_reg2_offs | ||
202 | kvm_emulate_mtmsr_reg2_offs: | ||
203 | .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4 | ||
204 | |||
205 | .global kvm_emulate_mtmsr_orig_ins_offs | ||
206 | kvm_emulate_mtmsr_orig_ins_offs: | ||
207 | .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4 | ||
208 | |||
209 | .global kvm_emulate_mtmsr_len | ||
210 | kvm_emulate_mtmsr_len: | ||
211 | .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4 | ||
212 | |||
213 | |||
214 | |||
215 | .global kvm_emulate_wrteei | ||
216 | kvm_emulate_wrteei: | ||
217 | |||
218 | SCRATCH_SAVE | ||
219 | |||
220 | /* Fetch old MSR in r31 */ | ||
221 | LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) | ||
222 | |||
223 | /* Remove MSR_EE from old MSR */ | ||
224 | li r30, 0 | ||
225 | ori r30, r30, MSR_EE | ||
226 | andc r31, r31, r30 | ||
227 | |||
228 | /* OR new MSR_EE onto the old MSR */ | ||
229 | kvm_emulate_wrteei_ee: | ||
230 | ori r31, r31, 0 | ||
231 | |||
232 | /* Write new MSR value back */ | ||
233 | STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) | ||
234 | |||
235 | SCRATCH_RESTORE | ||
236 | |||
237 | /* Go back to caller */ | ||
238 | kvm_emulate_wrteei_branch: | ||
239 | b . | ||
240 | kvm_emulate_wrteei_end: | ||
241 | |||
242 | .global kvm_emulate_wrteei_branch_offs | ||
243 | kvm_emulate_wrteei_branch_offs: | ||
244 | .long (kvm_emulate_wrteei_branch - kvm_emulate_wrteei) / 4 | ||
245 | |||
246 | .global kvm_emulate_wrteei_ee_offs | ||
247 | kvm_emulate_wrteei_ee_offs: | ||
248 | .long (kvm_emulate_wrteei_ee - kvm_emulate_wrteei) / 4 | ||
249 | |||
250 | .global kvm_emulate_wrteei_len | ||
251 | kvm_emulate_wrteei_len: | ||
252 | .long (kvm_emulate_wrteei_end - kvm_emulate_wrteei) / 4 | ||
253 | |||
254 | |||
255 | .global kvm_emulate_mtsrin | ||
256 | kvm_emulate_mtsrin: | ||
257 | |||
258 | SCRATCH_SAVE | ||
259 | |||
260 | LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) | ||
261 | andi. r31, r31, MSR_DR | MSR_IR | ||
262 | beq kvm_emulate_mtsrin_reg1 | ||
263 | |||
264 | SCRATCH_RESTORE | ||
265 | |||
266 | kvm_emulate_mtsrin_orig_ins: | ||
267 | nop | ||
268 | b kvm_emulate_mtsrin_branch | ||
269 | |||
270 | kvm_emulate_mtsrin_reg1: | ||
271 | /* rX >> 26 */ | ||
272 | rlwinm r30,r0,6,26,29 | ||
273 | |||
274 | kvm_emulate_mtsrin_reg2: | ||
275 | stw r0, (KVM_MAGIC_PAGE + KVM_MAGIC_SR)(r30) | ||
276 | |||
277 | SCRATCH_RESTORE | ||
278 | |||
279 | /* Go back to caller */ | ||
280 | kvm_emulate_mtsrin_branch: | ||
281 | b . | ||
282 | kvm_emulate_mtsrin_end: | ||
283 | |||
284 | .global kvm_emulate_mtsrin_branch_offs | ||
285 | kvm_emulate_mtsrin_branch_offs: | ||
286 | .long (kvm_emulate_mtsrin_branch - kvm_emulate_mtsrin) / 4 | ||
287 | |||
288 | .global kvm_emulate_mtsrin_reg1_offs | ||
289 | kvm_emulate_mtsrin_reg1_offs: | ||
290 | .long (kvm_emulate_mtsrin_reg1 - kvm_emulate_mtsrin) / 4 | ||
291 | |||
292 | .global kvm_emulate_mtsrin_reg2_offs | ||
293 | kvm_emulate_mtsrin_reg2_offs: | ||
294 | .long (kvm_emulate_mtsrin_reg2 - kvm_emulate_mtsrin) / 4 | ||
295 | |||
296 | .global kvm_emulate_mtsrin_orig_ins_offs | ||
297 | kvm_emulate_mtsrin_orig_ins_offs: | ||
298 | .long (kvm_emulate_mtsrin_orig_ins - kvm_emulate_mtsrin) / 4 | ||
299 | |||
300 | .global kvm_emulate_mtsrin_len | ||
301 | kvm_emulate_mtsrin_len: | ||
302 | .long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4 | ||
diff --git a/arch/powerpc/kernel/l2cr_6xx.S b/arch/powerpc/kernel/l2cr_6xx.S index 2a2f3c3f6d80..97ec8557f974 100644 --- a/arch/powerpc/kernel/l2cr_6xx.S +++ b/arch/powerpc/kernel/l2cr_6xx.S | |||
@@ -151,7 +151,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
151 | /**** Might be a good idea to set L2DO here - to prevent instructions | 151 | /**** Might be a good idea to set L2DO here - to prevent instructions |
152 | from getting into the cache. But since we invalidate | 152 | from getting into the cache. But since we invalidate |
153 | the next time we enable the cache it doesn't really matter. | 153 | the next time we enable the cache it doesn't really matter. |
154 | Don't do this unless you accomodate all processor variations. | 154 | Don't do this unless you accommodate all processor variations. |
155 | The bit moved on the 7450..... | 155 | The bit moved on the 7450..... |
156 | ****/ | 156 | ****/ |
157 | 157 | ||
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index c1fd0f9658fd..2b97b80d6d7d 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c | |||
@@ -52,14 +52,14 @@ static int __init add_legacy_port(struct device_node *np, int want_index, | |||
52 | phys_addr_t taddr, unsigned long irq, | 52 | phys_addr_t taddr, unsigned long irq, |
53 | upf_t flags, int irq_check_parent) | 53 | upf_t flags, int irq_check_parent) |
54 | { | 54 | { |
55 | const u32 *clk, *spd; | 55 | const __be32 *clk, *spd; |
56 | u32 clock = BASE_BAUD * 16; | 56 | u32 clock = BASE_BAUD * 16; |
57 | int index; | 57 | int index; |
58 | 58 | ||
59 | /* get clock freq. if present */ | 59 | /* get clock freq. if present */ |
60 | clk = of_get_property(np, "clock-frequency", NULL); | 60 | clk = of_get_property(np, "clock-frequency", NULL); |
61 | if (clk && *clk) | 61 | if (clk && *clk) |
62 | clock = *clk; | 62 | clock = be32_to_cpup(clk); |
63 | 63 | ||
64 | /* get default speed if present */ | 64 | /* get default speed if present */ |
65 | spd = of_get_property(np, "current-speed", NULL); | 65 | spd = of_get_property(np, "current-speed", NULL); |
@@ -109,7 +109,7 @@ static int __init add_legacy_port(struct device_node *np, int want_index, | |||
109 | legacy_serial_infos[index].taddr = taddr; | 109 | legacy_serial_infos[index].taddr = taddr; |
110 | legacy_serial_infos[index].np = of_node_get(np); | 110 | legacy_serial_infos[index].np = of_node_get(np); |
111 | legacy_serial_infos[index].clock = clock; | 111 | legacy_serial_infos[index].clock = clock; |
112 | legacy_serial_infos[index].speed = spd ? *spd : 0; | 112 | legacy_serial_infos[index].speed = spd ? be32_to_cpup(spd) : 0; |
113 | legacy_serial_infos[index].irq_check_parent = irq_check_parent; | 113 | legacy_serial_infos[index].irq_check_parent = irq_check_parent; |
114 | 114 | ||
115 | printk(KERN_DEBUG "Found legacy serial port %d for %s\n", | 115 | printk(KERN_DEBUG "Found legacy serial port %d for %s\n", |
@@ -168,7 +168,7 @@ static int __init add_legacy_soc_port(struct device_node *np, | |||
168 | static int __init add_legacy_isa_port(struct device_node *np, | 168 | static int __init add_legacy_isa_port(struct device_node *np, |
169 | struct device_node *isa_brg) | 169 | struct device_node *isa_brg) |
170 | { | 170 | { |
171 | const u32 *reg; | 171 | const __be32 *reg; |
172 | const char *typep; | 172 | const char *typep; |
173 | int index = -1; | 173 | int index = -1; |
174 | u64 taddr; | 174 | u64 taddr; |
@@ -181,7 +181,7 @@ static int __init add_legacy_isa_port(struct device_node *np, | |||
181 | return -1; | 181 | return -1; |
182 | 182 | ||
183 | /* Verify it's an IO port, we don't support anything else */ | 183 | /* Verify it's an IO port, we don't support anything else */ |
184 | if (!(reg[0] & 0x00000001)) | 184 | if (!(be32_to_cpu(reg[0]) & 0x00000001)) |
185 | return -1; | 185 | return -1; |
186 | 186 | ||
187 | /* Now look for an "ibm,aix-loc" property that gives us ordering | 187 | /* Now look for an "ibm,aix-loc" property that gives us ordering |
@@ -202,7 +202,7 @@ static int __init add_legacy_isa_port(struct device_node *np, | |||
202 | taddr = 0; | 202 | taddr = 0; |
203 | 203 | ||
204 | /* Add port, irq will be dealt with later */ | 204 | /* Add port, irq will be dealt with later */ |
205 | return add_legacy_port(np, index, UPIO_PORT, reg[1], taddr, | 205 | return add_legacy_port(np, index, UPIO_PORT, be32_to_cpu(reg[1]), taddr, |
206 | NO_IRQ, UPF_BOOT_AUTOCONF, 0); | 206 | NO_IRQ, UPF_BOOT_AUTOCONF, 0); |
207 | 207 | ||
208 | } | 208 | } |
@@ -251,9 +251,9 @@ static int __init add_legacy_pci_port(struct device_node *np, | |||
251 | * we get to their "reg" property | 251 | * we get to their "reg" property |
252 | */ | 252 | */ |
253 | if (np != pci_dev) { | 253 | if (np != pci_dev) { |
254 | const u32 *reg = of_get_property(np, "reg", NULL); | 254 | const __be32 *reg = of_get_property(np, "reg", NULL); |
255 | if (reg && (*reg < 4)) | 255 | if (reg && (be32_to_cpup(reg) < 4)) |
256 | index = lindex = *reg; | 256 | index = lindex = be32_to_cpup(reg); |
257 | } | 257 | } |
258 | 258 | ||
259 | /* Local index means it's the Nth port in the PCI chip. Unfortunately | 259 | /* Local index means it's the Nth port in the PCI chip. Unfortunately |
@@ -330,9 +330,11 @@ void __init find_legacy_serial_ports(void) | |||
330 | if (!parent) | 330 | if (!parent) |
331 | continue; | 331 | continue; |
332 | if (of_match_node(legacy_serial_parents, parent) != NULL) { | 332 | if (of_match_node(legacy_serial_parents, parent) != NULL) { |
333 | index = add_legacy_soc_port(np, np); | 333 | if (of_device_is_available(np)) { |
334 | if (index >= 0 && np == stdout) | 334 | index = add_legacy_soc_port(np, np); |
335 | legacy_serial_console = index; | 335 | if (index >= 0 && np == stdout) |
336 | legacy_serial_console = index; | ||
337 | } | ||
336 | } | 338 | } |
337 | of_node_put(parent); | 339 | of_node_put(parent); |
338 | } | 340 | } |
@@ -507,7 +509,7 @@ static int __init check_legacy_serial_console(void) | |||
507 | struct device_node *prom_stdout = NULL; | 509 | struct device_node *prom_stdout = NULL; |
508 | int i, speed = 0, offset = 0; | 510 | int i, speed = 0, offset = 0; |
509 | const char *name; | 511 | const char *name; |
510 | const u32 *spd; | 512 | const __be32 *spd; |
511 | 513 | ||
512 | DBG(" -> check_legacy_serial_console()\n"); | 514 | DBG(" -> check_legacy_serial_console()\n"); |
513 | 515 | ||
@@ -547,7 +549,7 @@ static int __init check_legacy_serial_console(void) | |||
547 | } | 549 | } |
548 | spd = of_get_property(prom_stdout, "current-speed", NULL); | 550 | spd = of_get_property(prom_stdout, "current-speed", NULL); |
549 | if (spd) | 551 | if (spd) |
550 | speed = *spd; | 552 | speed = be32_to_cpup(spd); |
551 | 553 | ||
552 | if (strcmp(name, "serial") != 0) | 554 | if (strcmp(name, "serial") != 0) |
553 | goto not_found; | 555 | goto not_found; |
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index 50362b6ef6e9..84daabe2fcba 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c | |||
@@ -56,7 +56,7 @@ static unsigned long get_purr(void) | |||
56 | 56 | ||
57 | for_each_possible_cpu(cpu) { | 57 | for_each_possible_cpu(cpu) { |
58 | if (firmware_has_feature(FW_FEATURE_ISERIES)) | 58 | if (firmware_has_feature(FW_FEATURE_ISERIES)) |
59 | sum_purr += lppaca[cpu].emulated_time_base; | 59 | sum_purr += lppaca_of(cpu).emulated_time_base; |
60 | else { | 60 | else { |
61 | struct cpu_usage *cu; | 61 | struct cpu_usage *cu; |
62 | 62 | ||
@@ -132,34 +132,6 @@ static int iseries_lparcfg_data(struct seq_file *m, void *v) | |||
132 | /* | 132 | /* |
133 | * Methods used to fetch LPAR data when running on a pSeries platform. | 133 | * Methods used to fetch LPAR data when running on a pSeries platform. |
134 | */ | 134 | */ |
135 | /** | ||
136 | * h_get_mpp | ||
137 | * H_GET_MPP hcall returns info in 7 parms | ||
138 | */ | ||
139 | int h_get_mpp(struct hvcall_mpp_data *mpp_data) | ||
140 | { | ||
141 | int rc; | ||
142 | unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; | ||
143 | |||
144 | rc = plpar_hcall9(H_GET_MPP, retbuf); | ||
145 | |||
146 | mpp_data->entitled_mem = retbuf[0]; | ||
147 | mpp_data->mapped_mem = retbuf[1]; | ||
148 | |||
149 | mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff; | ||
150 | mpp_data->pool_num = retbuf[2] & 0xffff; | ||
151 | |||
152 | mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff; | ||
153 | mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff; | ||
154 | mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffff; | ||
155 | |||
156 | mpp_data->pool_size = retbuf[4]; | ||
157 | mpp_data->loan_request = retbuf[5]; | ||
158 | mpp_data->backing_mem = retbuf[6]; | ||
159 | |||
160 | return rc; | ||
161 | } | ||
162 | EXPORT_SYMBOL(h_get_mpp); | ||
163 | 135 | ||
164 | struct hvcall_ppp_data { | 136 | struct hvcall_ppp_data { |
165 | u64 entitlement; | 137 | u64 entitlement; |
@@ -262,8 +234,8 @@ static void parse_ppp_data(struct seq_file *m) | |||
262 | seq_printf(m, "system_active_processors=%d\n", | 234 | seq_printf(m, "system_active_processors=%d\n", |
263 | ppp_data.active_system_procs); | 235 | ppp_data.active_system_procs); |
264 | 236 | ||
265 | /* pool related entries are apropriate for shared configs */ | 237 | /* pool related entries are appropriate for shared configs */ |
266 | if (lppaca[0].shared_proc) { | 238 | if (lppaca_of(0).shared_proc) { |
267 | unsigned long pool_idle_time, pool_procs; | 239 | unsigned long pool_idle_time, pool_procs; |
268 | 240 | ||
269 | seq_printf(m, "pool=%d\n", ppp_data.pool_num); | 241 | seq_printf(m, "pool=%d\n", ppp_data.pool_num); |
@@ -345,6 +317,30 @@ static void parse_mpp_data(struct seq_file *m) | |||
345 | seq_printf(m, "backing_memory=%ld bytes\n", mpp_data.backing_mem); | 317 | seq_printf(m, "backing_memory=%ld bytes\n", mpp_data.backing_mem); |
346 | } | 318 | } |
347 | 319 | ||
320 | /** | ||
321 | * parse_mpp_x_data | ||
322 | * Parse out data returned from h_get_mpp_x | ||
323 | */ | ||
324 | static void parse_mpp_x_data(struct seq_file *m) | ||
325 | { | ||
326 | struct hvcall_mpp_x_data mpp_x_data; | ||
327 | |||
328 | if (!firmware_has_feature(FW_FEATURE_XCMO)) | ||
329 | return; | ||
330 | if (h_get_mpp_x(&mpp_x_data)) | ||
331 | return; | ||
332 | |||
333 | seq_printf(m, "coalesced_bytes=%ld\n", mpp_x_data.coalesced_bytes); | ||
334 | |||
335 | if (mpp_x_data.pool_coalesced_bytes) | ||
336 | seq_printf(m, "pool_coalesced_bytes=%ld\n", | ||
337 | mpp_x_data.pool_coalesced_bytes); | ||
338 | if (mpp_x_data.pool_purr_cycles) | ||
339 | seq_printf(m, "coalesce_pool_purr=%ld\n", mpp_x_data.pool_purr_cycles); | ||
340 | if (mpp_x_data.pool_spurr_cycles) | ||
341 | seq_printf(m, "coalesce_pool_spurr=%ld\n", mpp_x_data.pool_spurr_cycles); | ||
342 | } | ||
343 | |||
348 | #define SPLPAR_CHARACTERISTICS_TOKEN 20 | 344 | #define SPLPAR_CHARACTERISTICS_TOKEN 20 |
349 | #define SPLPAR_MAXLENGTH 1026*(sizeof(char)) | 345 | #define SPLPAR_MAXLENGTH 1026*(sizeof(char)) |
350 | 346 | ||
@@ -460,8 +456,8 @@ static void pseries_cmo_data(struct seq_file *m) | |||
460 | return; | 456 | return; |
461 | 457 | ||
462 | for_each_possible_cpu(cpu) { | 458 | for_each_possible_cpu(cpu) { |
463 | cmo_faults += lppaca[cpu].cmo_faults; | 459 | cmo_faults += lppaca_of(cpu).cmo_faults; |
464 | cmo_fault_time += lppaca[cpu].cmo_fault_time; | 460 | cmo_fault_time += lppaca_of(cpu).cmo_fault_time; |
465 | } | 461 | } |
466 | 462 | ||
467 | seq_printf(m, "cmo_faults=%lu\n", cmo_faults); | 463 | seq_printf(m, "cmo_faults=%lu\n", cmo_faults); |
@@ -479,8 +475,8 @@ static void splpar_dispatch_data(struct seq_file *m) | |||
479 | unsigned long dispatch_dispersions = 0; | 475 | unsigned long dispatch_dispersions = 0; |
480 | 476 | ||
481 | for_each_possible_cpu(cpu) { | 477 | for_each_possible_cpu(cpu) { |
482 | dispatches += lppaca[cpu].yield_count; | 478 | dispatches += lppaca_of(cpu).yield_count; |
483 | dispatch_dispersions += lppaca[cpu].dispersion_count; | 479 | dispatch_dispersions += lppaca_of(cpu).dispersion_count; |
484 | } | 480 | } |
485 | 481 | ||
486 | seq_printf(m, "dispatches=%lu\n", dispatches); | 482 | seq_printf(m, "dispatches=%lu\n", dispatches); |
@@ -520,6 +516,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) | |||
520 | parse_system_parameter_string(m); | 516 | parse_system_parameter_string(m); |
521 | parse_ppp_data(m); | 517 | parse_ppp_data(m); |
522 | parse_mpp_data(m); | 518 | parse_mpp_data(m); |
519 | parse_mpp_x_data(m); | ||
523 | pseries_cmo_data(m); | 520 | pseries_cmo_data(m); |
524 | splpar_dispatch_data(m); | 521 | splpar_dispatch_data(m); |
525 | 522 | ||
@@ -545,7 +542,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) | |||
545 | seq_printf(m, "partition_potential_processors=%d\n", | 542 | seq_printf(m, "partition_potential_processors=%d\n", |
546 | partition_potential_processors); | 543 | partition_potential_processors); |
547 | 544 | ||
548 | seq_printf(m, "shared_processor_mode=%d\n", lppaca[0].shared_proc); | 545 | seq_printf(m, "shared_processor_mode=%d\n", lppaca_of(0).shared_proc); |
549 | 546 | ||
550 | seq_printf(m, "slb_size=%d\n", mmu_slb_size); | 547 | seq_printf(m, "slb_size=%d\n", mmu_slb_size); |
551 | 548 | ||
@@ -780,6 +777,7 @@ static const struct file_operations lparcfg_fops = { | |||
780 | .write = lparcfg_write, | 777 | .write = lparcfg_write, |
781 | .open = lparcfg_open, | 778 | .open = lparcfg_open, |
782 | .release = single_release, | 779 | .release = single_release, |
780 | .llseek = seq_lseek, | ||
783 | }; | 781 | }; |
784 | 782 | ||
785 | static int __init lparcfg_init(void) | 783 | static int __init lparcfg_init(void) |
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index dd6c141f1662..7ee50f0547cb 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c | |||
@@ -14,16 +14,41 @@ | |||
14 | #include <linux/threads.h> | 14 | #include <linux/threads.h> |
15 | #include <linux/memblock.h> | 15 | #include <linux/memblock.h> |
16 | #include <linux/of.h> | 16 | #include <linux/of.h> |
17 | #include <linux/irq.h> | ||
18 | #include <linux/ftrace.h> | ||
19 | |||
17 | #include <asm/machdep.h> | 20 | #include <asm/machdep.h> |
18 | #include <asm/prom.h> | 21 | #include <asm/prom.h> |
19 | #include <asm/sections.h> | 22 | #include <asm/sections.h> |
20 | 23 | ||
24 | void machine_kexec_mask_interrupts(void) { | ||
25 | unsigned int i; | ||
26 | |||
27 | for_each_irq(i) { | ||
28 | struct irq_desc *desc = irq_to_desc(i); | ||
29 | struct irq_chip *chip; | ||
30 | |||
31 | if (!desc) | ||
32 | continue; | ||
33 | |||
34 | chip = irq_desc_get_chip(desc); | ||
35 | if (!chip) | ||
36 | continue; | ||
37 | |||
38 | if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data)) | ||
39 | chip->irq_eoi(&desc->irq_data); | ||
40 | |||
41 | if (chip->irq_mask) | ||
42 | chip->irq_mask(&desc->irq_data); | ||
43 | |||
44 | if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data)) | ||
45 | chip->irq_disable(&desc->irq_data); | ||
46 | } | ||
47 | } | ||
48 | |||
21 | void machine_crash_shutdown(struct pt_regs *regs) | 49 | void machine_crash_shutdown(struct pt_regs *regs) |
22 | { | 50 | { |
23 | if (ppc_md.machine_crash_shutdown) | 51 | default_machine_crash_shutdown(regs); |
24 | ppc_md.machine_crash_shutdown(regs); | ||
25 | else | ||
26 | default_machine_crash_shutdown(regs); | ||
27 | } | 52 | } |
28 | 53 | ||
29 | /* | 54 | /* |
@@ -41,8 +66,6 @@ int machine_kexec_prepare(struct kimage *image) | |||
41 | 66 | ||
42 | void machine_kexec_cleanup(struct kimage *image) | 67 | void machine_kexec_cleanup(struct kimage *image) |
43 | { | 68 | { |
44 | if (ppc_md.machine_kexec_cleanup) | ||
45 | ppc_md.machine_kexec_cleanup(image); | ||
46 | } | 69 | } |
47 | 70 | ||
48 | void arch_crash_save_vmcoreinfo(void) | 71 | void arch_crash_save_vmcoreinfo(void) |
@@ -63,11 +86,17 @@ void arch_crash_save_vmcoreinfo(void) | |||
63 | */ | 86 | */ |
64 | void machine_kexec(struct kimage *image) | 87 | void machine_kexec(struct kimage *image) |
65 | { | 88 | { |
89 | int save_ftrace_enabled; | ||
90 | |||
91 | save_ftrace_enabled = __ftrace_enabled_save(); | ||
92 | |||
66 | if (ppc_md.machine_kexec) | 93 | if (ppc_md.machine_kexec) |
67 | ppc_md.machine_kexec(image); | 94 | ppc_md.machine_kexec(image); |
68 | else | 95 | else |
69 | default_machine_kexec(image); | 96 | default_machine_kexec(image); |
70 | 97 | ||
98 | __ftrace_enabled_restore(save_ftrace_enabled); | ||
99 | |||
71 | /* Fall back to normal restart if we're still alive. */ | 100 | /* Fall back to normal restart if we're still alive. */ |
72 | machine_restart(NULL); | 101 | machine_restart(NULL); |
73 | for(;;); | 102 | for(;;); |
diff --git a/arch/powerpc/kernel/machine_kexec_32.c b/arch/powerpc/kernel/machine_kexec_32.c index ae63a964b858..e63f2e7d2efb 100644 --- a/arch/powerpc/kernel/machine_kexec_32.c +++ b/arch/powerpc/kernel/machine_kexec_32.c | |||
@@ -39,6 +39,10 @@ void default_machine_kexec(struct kimage *image) | |||
39 | /* Interrupts aren't acceptable while we reboot */ | 39 | /* Interrupts aren't acceptable while we reboot */ |
40 | local_irq_disable(); | 40 | local_irq_disable(); |
41 | 41 | ||
42 | /* mask each interrupt so we are in a more sane state for the | ||
43 | * kexec kernel */ | ||
44 | machine_kexec_mask_interrupts(); | ||
45 | |||
42 | page_list = image->head; | 46 | page_list = image->head; |
43 | 47 | ||
44 | /* we need both effective and real address here */ | 48 | /* we need both effective and real address here */ |
diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S index 2d29752cbe16..b69463ec2010 100644 --- a/arch/powerpc/kernel/misc.S +++ b/arch/powerpc/kernel/misc.S | |||
@@ -122,8 +122,3 @@ _GLOBAL(longjmp) | |||
122 | mtlr r0 | 122 | mtlr r0 |
123 | mr r3,r4 | 123 | mr r3,r4 |
124 | blr | 124 | blr |
125 | |||
126 | _GLOBAL(__setup_cpu_power7) | ||
127 | _GLOBAL(__restore_cpu_power7) | ||
128 | /* place holder */ | ||
129 | blr | ||
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index a7a570dcdd57..998a10028608 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <asm/processor.h> | 30 | #include <asm/processor.h> |
31 | #include <asm/kexec.h> | 31 | #include <asm/kexec.h> |
32 | #include <asm/bug.h> | 32 | #include <asm/bug.h> |
33 | #include <asm/ptrace.h> | ||
33 | 34 | ||
34 | .text | 35 | .text |
35 | 36 | ||
@@ -693,6 +694,17 @@ _GLOBAL(kernel_thread) | |||
693 | addi r1,r1,16 | 694 | addi r1,r1,16 |
694 | blr | 695 | blr |
695 | 696 | ||
697 | #ifdef CONFIG_SMP | ||
698 | _GLOBAL(start_secondary_resume) | ||
699 | /* Reset stack */ | ||
700 | rlwinm r1,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ | ||
701 | addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD | ||
702 | li r3,0 | ||
703 | stw r3,0(r1) /* Zero the stack frame pointer */ | ||
704 | bl start_secondary | ||
705 | b . | ||
706 | #endif /* CONFIG_SMP */ | ||
707 | |||
696 | /* | 708 | /* |
697 | * This routine is just here to keep GCC happy - sigh... | 709 | * This routine is just here to keep GCC happy - sigh... |
698 | */ | 710 | */ |
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index e5144906a56d..e89df59cdc5a 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <asm/cputable.h> | 25 | #include <asm/cputable.h> |
26 | #include <asm/thread_info.h> | 26 | #include <asm/thread_info.h> |
27 | #include <asm/kexec.h> | 27 | #include <asm/kexec.h> |
28 | #include <asm/ptrace.h> | ||
28 | 29 | ||
29 | .text | 30 | .text |
30 | 31 | ||
@@ -461,7 +462,8 @@ _GLOBAL(disable_kernel_fp) | |||
461 | * wait for the flag to change, indicating this kernel is going away but | 462 | * wait for the flag to change, indicating this kernel is going away but |
462 | * the slave code for the next one is at addresses 0 to 100. | 463 | * the slave code for the next one is at addresses 0 to 100. |
463 | * | 464 | * |
464 | * This is used by all slaves. | 465 | * This is used by all slaves, even those that did not find a matching |
466 | * paca in the secondary startup code. | ||
465 | * | 467 | * |
466 | * Physical (hardware) cpu id should be in r3. | 468 | * Physical (hardware) cpu id should be in r3. |
467 | */ | 469 | */ |
@@ -470,10 +472,6 @@ _GLOBAL(kexec_wait) | |||
470 | 1: mflr r5 | 472 | 1: mflr r5 |
471 | addi r5,r5,kexec_flag-1b | 473 | addi r5,r5,kexec_flag-1b |
472 | 474 | ||
473 | li r4,KEXEC_STATE_REAL_MODE | ||
474 | stb r4,PACAKEXECSTATE(r13) | ||
475 | SYNC | ||
476 | |||
477 | 99: HMT_LOW | 475 | 99: HMT_LOW |
478 | #ifdef CONFIG_KEXEC /* use no memory without kexec */ | 476 | #ifdef CONFIG_KEXEC /* use no memory without kexec */ |
479 | lwz r4,0(r5) | 477 | lwz r4,0(r5) |
@@ -498,11 +496,17 @@ kexec_flag: | |||
498 | * | 496 | * |
499 | * get phys id from paca | 497 | * get phys id from paca |
500 | * switch to real mode | 498 | * switch to real mode |
499 | * mark the paca as no longer used | ||
501 | * join other cpus in kexec_wait(phys_id) | 500 | * join other cpus in kexec_wait(phys_id) |
502 | */ | 501 | */ |
503 | _GLOBAL(kexec_smp_wait) | 502 | _GLOBAL(kexec_smp_wait) |
504 | lhz r3,PACAHWCPUID(r13) | 503 | lhz r3,PACAHWCPUID(r13) |
505 | bl real_mode | 504 | bl real_mode |
505 | |||
506 | li r4,KEXEC_STATE_REAL_MODE | ||
507 | stb r4,PACAKEXECSTATE(r13) | ||
508 | SYNC | ||
509 | |||
506 | b .kexec_wait | 510 | b .kexec_wait |
507 | 511 | ||
508 | /* | 512 | /* |
diff --git a/arch/powerpc/kernel/mpc7450-pmu.c b/arch/powerpc/kernel/mpc7450-pmu.c index 09d72028f317..2cc5e0301d0b 100644 --- a/arch/powerpc/kernel/mpc7450-pmu.c +++ b/arch/powerpc/kernel/mpc7450-pmu.c | |||
@@ -414,4 +414,4 @@ static int init_mpc7450_pmu(void) | |||
414 | return register_power_pmu(&mpc7450_pmu); | 414 | return register_power_pmu(&mpc7450_pmu); |
415 | } | 415 | } |
416 | 416 | ||
417 | arch_initcall(init_mpc7450_pmu); | 417 | early_initcall(init_mpc7450_pmu); |
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index 9cf197f01e94..bec1e930ed73 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c | |||
@@ -34,15 +34,26 @@ | |||
34 | 34 | ||
35 | #undef DEBUG_NVRAM | 35 | #undef DEBUG_NVRAM |
36 | 36 | ||
37 | static struct nvram_partition * nvram_part; | 37 | #define NVRAM_HEADER_LEN sizeof(struct nvram_header) |
38 | static long nvram_error_log_index = -1; | 38 | #define NVRAM_BLOCK_LEN NVRAM_HEADER_LEN |
39 | static long nvram_error_log_size = 0; | 39 | |
40 | /* If change this size, then change the size of NVNAME_LEN */ | ||
41 | struct nvram_header { | ||
42 | unsigned char signature; | ||
43 | unsigned char checksum; | ||
44 | unsigned short length; | ||
45 | /* Terminating null required only for names < 12 chars. */ | ||
46 | char name[12]; | ||
47 | }; | ||
40 | 48 | ||
41 | struct err_log_info { | 49 | struct nvram_partition { |
42 | int error_type; | 50 | struct list_head partition; |
43 | unsigned int seq_num; | 51 | struct nvram_header header; |
52 | unsigned int index; | ||
44 | }; | 53 | }; |
45 | 54 | ||
55 | static LIST_HEAD(nvram_partitions); | ||
56 | |||
46 | static loff_t dev_nvram_llseek(struct file *file, loff_t offset, int origin) | 57 | static loff_t dev_nvram_llseek(struct file *file, loff_t offset, int origin) |
47 | { | 58 | { |
48 | int size; | 59 | int size; |
@@ -186,14 +197,12 @@ static struct miscdevice nvram_dev = { | |||
186 | #ifdef DEBUG_NVRAM | 197 | #ifdef DEBUG_NVRAM |
187 | static void __init nvram_print_partitions(char * label) | 198 | static void __init nvram_print_partitions(char * label) |
188 | { | 199 | { |
189 | struct list_head * p; | ||
190 | struct nvram_partition * tmp_part; | 200 | struct nvram_partition * tmp_part; |
191 | 201 | ||
192 | printk(KERN_WARNING "--------%s---------\n", label); | 202 | printk(KERN_WARNING "--------%s---------\n", label); |
193 | printk(KERN_WARNING "indx\t\tsig\tchks\tlen\tname\n"); | 203 | printk(KERN_WARNING "indx\t\tsig\tchks\tlen\tname\n"); |
194 | list_for_each(p, &nvram_part->partition) { | 204 | list_for_each_entry(tmp_part, &nvram_partitions, partition) { |
195 | tmp_part = list_entry(p, struct nvram_partition, partition); | 205 | printk(KERN_WARNING "%4d \t%02x\t%02x\t%d\t%12s\n", |
196 | printk(KERN_WARNING "%4d \t%02x\t%02x\t%d\t%s\n", | ||
197 | tmp_part->index, tmp_part->header.signature, | 206 | tmp_part->index, tmp_part->header.signature, |
198 | tmp_part->header.checksum, tmp_part->header.length, | 207 | tmp_part->header.checksum, tmp_part->header.length, |
199 | tmp_part->header.name); | 208 | tmp_part->header.name); |
@@ -228,95 +237,136 @@ static unsigned char __init nvram_checksum(struct nvram_header *p) | |||
228 | return c_sum; | 237 | return c_sum; |
229 | } | 238 | } |
230 | 239 | ||
231 | static int __init nvram_remove_os_partition(void) | 240 | /* |
241 | * Per the criteria passed via nvram_remove_partition(), should this | ||
242 | * partition be removed? 1=remove, 0=keep | ||
243 | */ | ||
244 | static int nvram_can_remove_partition(struct nvram_partition *part, | ||
245 | const char *name, int sig, const char *exceptions[]) | ||
246 | { | ||
247 | if (part->header.signature != sig) | ||
248 | return 0; | ||
249 | if (name) { | ||
250 | if (strncmp(name, part->header.name, 12)) | ||
251 | return 0; | ||
252 | } else if (exceptions) { | ||
253 | const char **except; | ||
254 | for (except = exceptions; *except; except++) { | ||
255 | if (!strncmp(*except, part->header.name, 12)) | ||
256 | return 0; | ||
257 | } | ||
258 | } | ||
259 | return 1; | ||
260 | } | ||
261 | |||
262 | /** | ||
263 | * nvram_remove_partition - Remove one or more partitions in nvram | ||
264 | * @name: name of the partition to remove, or NULL for a | ||
265 | * signature only match | ||
266 | * @sig: signature of the partition(s) to remove | ||
267 | * @exceptions: When removing all partitions with a matching signature, | ||
268 | * leave these alone. | ||
269 | */ | ||
270 | |||
271 | int __init nvram_remove_partition(const char *name, int sig, | ||
272 | const char *exceptions[]) | ||
232 | { | 273 | { |
233 | struct list_head *i; | 274 | struct nvram_partition *part, *prev, *tmp; |
234 | struct list_head *j; | ||
235 | struct nvram_partition * part; | ||
236 | struct nvram_partition * cur_part; | ||
237 | int rc; | 275 | int rc; |
238 | 276 | ||
239 | list_for_each(i, &nvram_part->partition) { | 277 | list_for_each_entry(part, &nvram_partitions, partition) { |
240 | part = list_entry(i, struct nvram_partition, partition); | 278 | if (!nvram_can_remove_partition(part, name, sig, exceptions)) |
241 | if (part->header.signature != NVRAM_SIG_OS) | ||
242 | continue; | 279 | continue; |
243 | 280 | ||
244 | /* Make os partition a free partition */ | 281 | /* Make partition a free partition */ |
245 | part->header.signature = NVRAM_SIG_FREE; | 282 | part->header.signature = NVRAM_SIG_FREE; |
246 | sprintf(part->header.name, "wwwwwwwwwwww"); | 283 | strncpy(part->header.name, "wwwwwwwwwwww", 12); |
247 | part->header.checksum = nvram_checksum(&part->header); | 284 | part->header.checksum = nvram_checksum(&part->header); |
248 | |||
249 | /* Merge contiguous free partitions backwards */ | ||
250 | list_for_each_prev(j, &part->partition) { | ||
251 | cur_part = list_entry(j, struct nvram_partition, partition); | ||
252 | if (cur_part == nvram_part || cur_part->header.signature != NVRAM_SIG_FREE) { | ||
253 | break; | ||
254 | } | ||
255 | |||
256 | part->header.length += cur_part->header.length; | ||
257 | part->header.checksum = nvram_checksum(&part->header); | ||
258 | part->index = cur_part->index; | ||
259 | |||
260 | list_del(&cur_part->partition); | ||
261 | kfree(cur_part); | ||
262 | j = &part->partition; /* fixup our loop */ | ||
263 | } | ||
264 | |||
265 | /* Merge contiguous free partitions forwards */ | ||
266 | list_for_each(j, &part->partition) { | ||
267 | cur_part = list_entry(j, struct nvram_partition, partition); | ||
268 | if (cur_part == nvram_part || cur_part->header.signature != NVRAM_SIG_FREE) { | ||
269 | break; | ||
270 | } | ||
271 | |||
272 | part->header.length += cur_part->header.length; | ||
273 | part->header.checksum = nvram_checksum(&part->header); | ||
274 | |||
275 | list_del(&cur_part->partition); | ||
276 | kfree(cur_part); | ||
277 | j = &part->partition; /* fixup our loop */ | ||
278 | } | ||
279 | |||
280 | rc = nvram_write_header(part); | 285 | rc = nvram_write_header(part); |
281 | if (rc <= 0) { | 286 | if (rc <= 0) { |
282 | printk(KERN_ERR "nvram_remove_os_partition: nvram_write failed (%d)\n", rc); | 287 | printk(KERN_ERR "nvram_remove_partition: nvram_write failed (%d)\n", rc); |
283 | return rc; | 288 | return rc; |
284 | } | 289 | } |
290 | } | ||
285 | 291 | ||
292 | /* Merge contiguous ones */ | ||
293 | prev = NULL; | ||
294 | list_for_each_entry_safe(part, tmp, &nvram_partitions, partition) { | ||
295 | if (part->header.signature != NVRAM_SIG_FREE) { | ||
296 | prev = NULL; | ||
297 | continue; | ||
298 | } | ||
299 | if (prev) { | ||
300 | prev->header.length += part->header.length; | ||
301 | prev->header.checksum = nvram_checksum(&part->header); | ||
302 | rc = nvram_write_header(part); | ||
303 | if (rc <= 0) { | ||
304 | printk(KERN_ERR "nvram_remove_partition: nvram_write failed (%d)\n", rc); | ||
305 | return rc; | ||
306 | } | ||
307 | list_del(&part->partition); | ||
308 | kfree(part); | ||
309 | } else | ||
310 | prev = part; | ||
286 | } | 311 | } |
287 | 312 | ||
288 | return 0; | 313 | return 0; |
289 | } | 314 | } |
290 | 315 | ||
291 | /* nvram_create_os_partition | 316 | /** |
317 | * nvram_create_partition - Create a partition in nvram | ||
318 | * @name: name of the partition to create | ||
319 | * @sig: signature of the partition to create | ||
320 | * @req_size: size of data to allocate in bytes | ||
321 | * @min_size: minimum acceptable size (0 means req_size) | ||
292 | * | 322 | * |
293 | * Create a OS linux partition to buffer error logs. | 323 | * Returns a negative error code or a positive nvram index |
294 | * Will create a partition starting at the first free | 324 | * of the beginning of the data area of the newly created |
295 | * space found if space has enough room. | 325 | * partition. If you provided a min_size smaller than req_size |
326 | * you need to query for the actual size yourself after the | ||
327 | * call using nvram_partition_get_size(). | ||
296 | */ | 328 | */ |
297 | static int __init nvram_create_os_partition(void) | 329 | loff_t __init nvram_create_partition(const char *name, int sig, |
330 | int req_size, int min_size) | ||
298 | { | 331 | { |
299 | struct nvram_partition *part; | 332 | struct nvram_partition *part; |
300 | struct nvram_partition *new_part; | 333 | struct nvram_partition *new_part; |
301 | struct nvram_partition *free_part = NULL; | 334 | struct nvram_partition *free_part = NULL; |
302 | int seq_init[2] = { 0, 0 }; | 335 | static char nv_init_vals[16]; |
303 | loff_t tmp_index; | 336 | loff_t tmp_index; |
304 | long size = 0; | 337 | long size = 0; |
305 | int rc; | 338 | int rc; |
306 | 339 | ||
340 | /* Convert sizes from bytes to blocks */ | ||
341 | req_size = _ALIGN_UP(req_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN; | ||
342 | min_size = _ALIGN_UP(min_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN; | ||
343 | |||
344 | /* If no minimum size specified, make it the same as the | ||
345 | * requested size | ||
346 | */ | ||
347 | if (min_size == 0) | ||
348 | min_size = req_size; | ||
349 | if (min_size > req_size) | ||
350 | return -EINVAL; | ||
351 | |||
352 | /* Now add one block to each for the header */ | ||
353 | req_size += 1; | ||
354 | min_size += 1; | ||
355 | |||
307 | /* Find a free partition that will give us the maximum needed size | 356 | /* Find a free partition that will give us the maximum needed size |
308 | If can't find one that will give us the minimum size needed */ | 357 | If can't find one that will give us the minimum size needed */ |
309 | list_for_each_entry(part, &nvram_part->partition, partition) { | 358 | list_for_each_entry(part, &nvram_partitions, partition) { |
310 | if (part->header.signature != NVRAM_SIG_FREE) | 359 | if (part->header.signature != NVRAM_SIG_FREE) |
311 | continue; | 360 | continue; |
312 | 361 | ||
313 | if (part->header.length >= NVRAM_MAX_REQ) { | 362 | if (part->header.length >= req_size) { |
314 | size = NVRAM_MAX_REQ; | 363 | size = req_size; |
315 | free_part = part; | 364 | free_part = part; |
316 | break; | 365 | break; |
317 | } | 366 | } |
318 | if (!size && part->header.length >= NVRAM_MIN_REQ) { | 367 | if (part->header.length > size && |
319 | size = NVRAM_MIN_REQ; | 368 | part->header.length >= min_size) { |
369 | size = part->header.length; | ||
320 | free_part = part; | 370 | free_part = part; |
321 | } | 371 | } |
322 | } | 372 | } |
@@ -326,136 +376,95 @@ static int __init nvram_create_os_partition(void) | |||
326 | /* Create our OS partition */ | 376 | /* Create our OS partition */ |
327 | new_part = kmalloc(sizeof(*new_part), GFP_KERNEL); | 377 | new_part = kmalloc(sizeof(*new_part), GFP_KERNEL); |
328 | if (!new_part) { | 378 | if (!new_part) { |
329 | printk(KERN_ERR "nvram_create_os_partition: kmalloc failed\n"); | 379 | pr_err("nvram_create_os_partition: kmalloc failed\n"); |
330 | return -ENOMEM; | 380 | return -ENOMEM; |
331 | } | 381 | } |
332 | 382 | ||
333 | new_part->index = free_part->index; | 383 | new_part->index = free_part->index; |
334 | new_part->header.signature = NVRAM_SIG_OS; | 384 | new_part->header.signature = sig; |
335 | new_part->header.length = size; | 385 | new_part->header.length = size; |
336 | strcpy(new_part->header.name, "ppc64,linux"); | 386 | strncpy(new_part->header.name, name, 12); |
337 | new_part->header.checksum = nvram_checksum(&new_part->header); | 387 | new_part->header.checksum = nvram_checksum(&new_part->header); |
338 | 388 | ||
339 | rc = nvram_write_header(new_part); | 389 | rc = nvram_write_header(new_part); |
340 | if (rc <= 0) { | 390 | if (rc <= 0) { |
341 | printk(KERN_ERR "nvram_create_os_partition: nvram_write_header " | 391 | pr_err("nvram_create_os_partition: nvram_write_header " |
342 | "failed (%d)\n", rc); | ||
343 | return rc; | ||
344 | } | ||
345 | |||
346 | /* make sure and initialize to zero the sequence number and the error | ||
347 | type logged */ | ||
348 | tmp_index = new_part->index + NVRAM_HEADER_LEN; | ||
349 | rc = ppc_md.nvram_write((char *)&seq_init, sizeof(seq_init), &tmp_index); | ||
350 | if (rc <= 0) { | ||
351 | printk(KERN_ERR "nvram_create_os_partition: nvram_write " | ||
352 | "failed (%d)\n", rc); | 392 | "failed (%d)\n", rc); |
353 | return rc; | 393 | return rc; |
354 | } | 394 | } |
355 | |||
356 | nvram_error_log_index = new_part->index + NVRAM_HEADER_LEN; | ||
357 | nvram_error_log_size = ((part->header.length - 1) * | ||
358 | NVRAM_BLOCK_LEN) - sizeof(struct err_log_info); | ||
359 | |||
360 | list_add_tail(&new_part->partition, &free_part->partition); | 395 | list_add_tail(&new_part->partition, &free_part->partition); |
361 | 396 | ||
362 | if (free_part->header.length <= size) { | 397 | /* Adjust or remove the partition we stole the space from */ |
398 | if (free_part->header.length > size) { | ||
399 | free_part->index += size * NVRAM_BLOCK_LEN; | ||
400 | free_part->header.length -= size; | ||
401 | free_part->header.checksum = nvram_checksum(&free_part->header); | ||
402 | rc = nvram_write_header(free_part); | ||
403 | if (rc <= 0) { | ||
404 | pr_err("nvram_create_os_partition: nvram_write_header " | ||
405 | "failed (%d)\n", rc); | ||
406 | return rc; | ||
407 | } | ||
408 | } else { | ||
363 | list_del(&free_part->partition); | 409 | list_del(&free_part->partition); |
364 | kfree(free_part); | 410 | kfree(free_part); |
365 | return 0; | ||
366 | } | 411 | } |
367 | 412 | ||
368 | /* Adjust the partition we stole the space from */ | 413 | /* Clear the new partition */ |
369 | free_part->index += size * NVRAM_BLOCK_LEN; | 414 | for (tmp_index = new_part->index + NVRAM_HEADER_LEN; |
370 | free_part->header.length -= size; | 415 | tmp_index < ((size - 1) * NVRAM_BLOCK_LEN); |
371 | free_part->header.checksum = nvram_checksum(&free_part->header); | 416 | tmp_index += NVRAM_BLOCK_LEN) { |
372 | 417 | rc = ppc_md.nvram_write(nv_init_vals, NVRAM_BLOCK_LEN, &tmp_index); | |
373 | rc = nvram_write_header(free_part); | 418 | if (rc <= 0) { |
374 | if (rc <= 0) { | 419 | pr_err("nvram_create_partition: nvram_write failed (%d)\n", rc); |
375 | printk(KERN_ERR "nvram_create_os_partition: nvram_write_header " | 420 | return rc; |
376 | "failed (%d)\n", rc); | 421 | } |
377 | return rc; | ||
378 | } | 422 | } |
379 | 423 | ||
380 | return 0; | 424 | return new_part->index + NVRAM_HEADER_LEN; |
381 | } | 425 | } |
382 | 426 | ||
383 | 427 | /** | |
384 | /* nvram_setup_partition | 428 | * nvram_get_partition_size - Get the data size of an nvram partition |
385 | * | 429 | * @data_index: This is the offset of the start of the data of |
386 | * This will setup the partition we need for buffering the | 430 | * the partition. The same value that is returned by |
387 | * error logs and cleanup partitions if needed. | 431 | * nvram_create_partition(). |
388 | * | ||
389 | * The general strategy is the following: | ||
390 | * 1.) If there is ppc64,linux partition large enough then use it. | ||
391 | * 2.) If there is not a ppc64,linux partition large enough, search | ||
392 | * for a free partition that is large enough. | ||
393 | * 3.) If there is not a free partition large enough remove | ||
394 | * _all_ OS partitions and consolidate the space. | ||
395 | * 4.) Will first try getting a chunk that will satisfy the maximum | ||
396 | * error log size (NVRAM_MAX_REQ). | ||
397 | * 5.) If the max chunk cannot be allocated then try finding a chunk | ||
398 | * that will satisfy the minum needed (NVRAM_MIN_REQ). | ||
399 | */ | 432 | */ |
400 | static int __init nvram_setup_partition(void) | 433 | int nvram_get_partition_size(loff_t data_index) |
401 | { | 434 | { |
402 | struct list_head * p; | 435 | struct nvram_partition *part; |
403 | struct nvram_partition * part; | 436 | |
404 | int rc; | 437 | list_for_each_entry(part, &nvram_partitions, partition) { |
405 | 438 | if (part->index + NVRAM_HEADER_LEN == data_index) | |
406 | /* For now, we don't do any of this on pmac, until I | 439 | return (part->header.length - 1) * NVRAM_BLOCK_LEN; |
407 | * have figured out if it's worth killing some unused stuffs | 440 | } |
408 | * in our nvram, as Apple defined partitions use pretty much | 441 | return -1; |
409 | * all of the space | 442 | } |
410 | */ | ||
411 | if (machine_is(powermac)) | ||
412 | return -ENOSPC; | ||
413 | |||
414 | /* see if we have an OS partition that meets our needs. | ||
415 | will try getting the max we need. If not we'll delete | ||
416 | partitions and try again. */ | ||
417 | list_for_each(p, &nvram_part->partition) { | ||
418 | part = list_entry(p, struct nvram_partition, partition); | ||
419 | if (part->header.signature != NVRAM_SIG_OS) | ||
420 | continue; | ||
421 | 443 | ||
422 | if (strcmp(part->header.name, "ppc64,linux")) | ||
423 | continue; | ||
424 | 444 | ||
425 | if (part->header.length >= NVRAM_MIN_REQ) { | 445 | /** |
426 | /* found our partition */ | 446 | * nvram_find_partition - Find an nvram partition by signature and name |
427 | nvram_error_log_index = part->index + NVRAM_HEADER_LEN; | 447 | * @name: Name of the partition or NULL for any name |
428 | nvram_error_log_size = ((part->header.length - 1) * | 448 | * @sig: Signature to test against |
429 | NVRAM_BLOCK_LEN) - sizeof(struct err_log_info); | 449 | * @out_size: if non-NULL, returns the size of the data part of the partition |
430 | return 0; | 450 | */ |
451 | loff_t nvram_find_partition(const char *name, int sig, int *out_size) | ||
452 | { | ||
453 | struct nvram_partition *p; | ||
454 | |||
455 | list_for_each_entry(p, &nvram_partitions, partition) { | ||
456 | if (p->header.signature == sig && | ||
457 | (!name || !strncmp(p->header.name, name, 12))) { | ||
458 | if (out_size) | ||
459 | *out_size = (p->header.length - 1) * | ||
460 | NVRAM_BLOCK_LEN; | ||
461 | return p->index + NVRAM_HEADER_LEN; | ||
431 | } | 462 | } |
432 | } | 463 | } |
433 | |||
434 | /* try creating a partition with the free space we have */ | ||
435 | rc = nvram_create_os_partition(); | ||
436 | if (!rc) { | ||
437 | return 0; | ||
438 | } | ||
439 | |||
440 | /* need to free up some space */ | ||
441 | rc = nvram_remove_os_partition(); | ||
442 | if (rc) { | ||
443 | return rc; | ||
444 | } | ||
445 | |||
446 | /* create a partition in this new space */ | ||
447 | rc = nvram_create_os_partition(); | ||
448 | if (rc) { | ||
449 | printk(KERN_ERR "nvram_create_os_partition: Could not find a " | ||
450 | "NVRAM partition large enough\n"); | ||
451 | return rc; | ||
452 | } | ||
453 | |||
454 | return 0; | 464 | return 0; |
455 | } | 465 | } |
456 | 466 | ||
457 | 467 | int __init nvram_scan_partitions(void) | |
458 | static int __init nvram_scan_partitions(void) | ||
459 | { | 468 | { |
460 | loff_t cur_index = 0; | 469 | loff_t cur_index = 0; |
461 | struct nvram_header phead; | 470 | struct nvram_header phead; |
@@ -465,7 +474,7 @@ static int __init nvram_scan_partitions(void) | |||
465 | int total_size; | 474 | int total_size; |
466 | int err; | 475 | int err; |
467 | 476 | ||
468 | if (ppc_md.nvram_size == NULL) | 477 | if (ppc_md.nvram_size == NULL || ppc_md.nvram_size() <= 0) |
469 | return -ENODEV; | 478 | return -ENODEV; |
470 | total_size = ppc_md.nvram_size(); | 479 | total_size = ppc_md.nvram_size(); |
471 | 480 | ||
@@ -512,12 +521,16 @@ static int __init nvram_scan_partitions(void) | |||
512 | 521 | ||
513 | memcpy(&tmp_part->header, &phead, NVRAM_HEADER_LEN); | 522 | memcpy(&tmp_part->header, &phead, NVRAM_HEADER_LEN); |
514 | tmp_part->index = cur_index; | 523 | tmp_part->index = cur_index; |
515 | list_add_tail(&tmp_part->partition, &nvram_part->partition); | 524 | list_add_tail(&tmp_part->partition, &nvram_partitions); |
516 | 525 | ||
517 | cur_index += phead.length * NVRAM_BLOCK_LEN; | 526 | cur_index += phead.length * NVRAM_BLOCK_LEN; |
518 | } | 527 | } |
519 | err = 0; | 528 | err = 0; |
520 | 529 | ||
530 | #ifdef DEBUG_NVRAM | ||
531 | nvram_print_partitions("NVRAM Partitions"); | ||
532 | #endif | ||
533 | |||
521 | out: | 534 | out: |
522 | kfree(header); | 535 | kfree(header); |
523 | return err; | 536 | return err; |
@@ -525,9 +538,10 @@ static int __init nvram_scan_partitions(void) | |||
525 | 538 | ||
526 | static int __init nvram_init(void) | 539 | static int __init nvram_init(void) |
527 | { | 540 | { |
528 | int error; | ||
529 | int rc; | 541 | int rc; |
530 | 542 | ||
543 | BUILD_BUG_ON(NVRAM_BLOCK_LEN != 16); | ||
544 | |||
531 | if (ppc_md.nvram_size == NULL || ppc_md.nvram_size() <= 0) | 545 | if (ppc_md.nvram_size == NULL || ppc_md.nvram_size() <= 0) |
532 | return -ENODEV; | 546 | return -ENODEV; |
533 | 547 | ||
@@ -537,29 +551,6 @@ static int __init nvram_init(void) | |||
537 | return rc; | 551 | return rc; |
538 | } | 552 | } |
539 | 553 | ||
540 | /* initialize our anchor for the nvram partition list */ | ||
541 | nvram_part = kmalloc(sizeof(struct nvram_partition), GFP_KERNEL); | ||
542 | if (!nvram_part) { | ||
543 | printk(KERN_ERR "nvram_init: Failed kmalloc\n"); | ||
544 | return -ENOMEM; | ||
545 | } | ||
546 | INIT_LIST_HEAD(&nvram_part->partition); | ||
547 | |||
548 | /* Get all the NVRAM partitions */ | ||
549 | error = nvram_scan_partitions(); | ||
550 | if (error) { | ||
551 | printk(KERN_ERR "nvram_init: Failed nvram_scan_partitions\n"); | ||
552 | return error; | ||
553 | } | ||
554 | |||
555 | if(nvram_setup_partition()) | ||
556 | printk(KERN_WARNING "nvram_init: Could not find nvram partition" | ||
557 | " for nvram buffered error logging.\n"); | ||
558 | |||
559 | #ifdef DEBUG_NVRAM | ||
560 | nvram_print_partitions("NVRAM Partitions"); | ||
561 | #endif | ||
562 | |||
563 | return rc; | 554 | return rc; |
564 | } | 555 | } |
565 | 556 | ||
@@ -568,135 +559,6 @@ void __exit nvram_cleanup(void) | |||
568 | misc_deregister( &nvram_dev ); | 559 | misc_deregister( &nvram_dev ); |
569 | } | 560 | } |
570 | 561 | ||
571 | |||
572 | #ifdef CONFIG_PPC_PSERIES | ||
573 | |||
574 | /* nvram_write_error_log | ||
575 | * | ||
576 | * We need to buffer the error logs into nvram to ensure that we have | ||
577 | * the failure information to decode. If we have a severe error there | ||
578 | * is no way to guarantee that the OS or the machine is in a state to | ||
579 | * get back to user land and write the error to disk. For example if | ||
580 | * the SCSI device driver causes a Machine Check by writing to a bad | ||
581 | * IO address, there is no way of guaranteeing that the device driver | ||
582 | * is in any state that is would also be able to write the error data | ||
583 | * captured to disk, thus we buffer it in NVRAM for analysis on the | ||
584 | * next boot. | ||
585 | * | ||
586 | * In NVRAM the partition containing the error log buffer will looks like: | ||
587 | * Header (in bytes): | ||
588 | * +-----------+----------+--------+------------+------------------+ | ||
589 | * | signature | checksum | length | name | data | | ||
590 | * |0 |1 |2 3|4 15|16 length-1| | ||
591 | * +-----------+----------+--------+------------+------------------+ | ||
592 | * | ||
593 | * The 'data' section would look like (in bytes): | ||
594 | * +--------------+------------+-----------------------------------+ | ||
595 | * | event_logged | sequence # | error log | | ||
596 | * |0 3|4 7|8 nvram_error_log_size-1| | ||
597 | * +--------------+------------+-----------------------------------+ | ||
598 | * | ||
599 | * event_logged: 0 if event has not been logged to syslog, 1 if it has | ||
600 | * sequence #: The unique sequence # for each event. (until it wraps) | ||
601 | * error log: The error log from event_scan | ||
602 | */ | ||
603 | int nvram_write_error_log(char * buff, int length, | ||
604 | unsigned int err_type, unsigned int error_log_cnt) | ||
605 | { | ||
606 | int rc; | ||
607 | loff_t tmp_index; | ||
608 | struct err_log_info info; | ||
609 | |||
610 | if (nvram_error_log_index == -1) { | ||
611 | return -ESPIPE; | ||
612 | } | ||
613 | |||
614 | if (length > nvram_error_log_size) { | ||
615 | length = nvram_error_log_size; | ||
616 | } | ||
617 | |||
618 | info.error_type = err_type; | ||
619 | info.seq_num = error_log_cnt; | ||
620 | |||
621 | tmp_index = nvram_error_log_index; | ||
622 | |||
623 | rc = ppc_md.nvram_write((char *)&info, sizeof(struct err_log_info), &tmp_index); | ||
624 | if (rc <= 0) { | ||
625 | printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc); | ||
626 | return rc; | ||
627 | } | ||
628 | |||
629 | rc = ppc_md.nvram_write(buff, length, &tmp_index); | ||
630 | if (rc <= 0) { | ||
631 | printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc); | ||
632 | return rc; | ||
633 | } | ||
634 | |||
635 | return 0; | ||
636 | } | ||
637 | |||
638 | /* nvram_read_error_log | ||
639 | * | ||
640 | * Reads nvram for error log for at most 'length' | ||
641 | */ | ||
642 | int nvram_read_error_log(char * buff, int length, | ||
643 | unsigned int * err_type, unsigned int * error_log_cnt) | ||
644 | { | ||
645 | int rc; | ||
646 | loff_t tmp_index; | ||
647 | struct err_log_info info; | ||
648 | |||
649 | if (nvram_error_log_index == -1) | ||
650 | return -1; | ||
651 | |||
652 | if (length > nvram_error_log_size) | ||
653 | length = nvram_error_log_size; | ||
654 | |||
655 | tmp_index = nvram_error_log_index; | ||
656 | |||
657 | rc = ppc_md.nvram_read((char *)&info, sizeof(struct err_log_info), &tmp_index); | ||
658 | if (rc <= 0) { | ||
659 | printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc); | ||
660 | return rc; | ||
661 | } | ||
662 | |||
663 | rc = ppc_md.nvram_read(buff, length, &tmp_index); | ||
664 | if (rc <= 0) { | ||
665 | printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc); | ||
666 | return rc; | ||
667 | } | ||
668 | |||
669 | *error_log_cnt = info.seq_num; | ||
670 | *err_type = info.error_type; | ||
671 | |||
672 | return 0; | ||
673 | } | ||
674 | |||
675 | /* This doesn't actually zero anything, but it sets the event_logged | ||
676 | * word to tell that this event is safely in syslog. | ||
677 | */ | ||
678 | int nvram_clear_error_log(void) | ||
679 | { | ||
680 | loff_t tmp_index; | ||
681 | int clear_word = ERR_FLAG_ALREADY_LOGGED; | ||
682 | int rc; | ||
683 | |||
684 | if (nvram_error_log_index == -1) | ||
685 | return -1; | ||
686 | |||
687 | tmp_index = nvram_error_log_index; | ||
688 | |||
689 | rc = ppc_md.nvram_write((char *)&clear_word, sizeof(int), &tmp_index); | ||
690 | if (rc <= 0) { | ||
691 | printk(KERN_ERR "nvram_clear_error_log: Failed nvram_write (%d)\n", rc); | ||
692 | return rc; | ||
693 | } | ||
694 | |||
695 | return 0; | ||
696 | } | ||
697 | |||
698 | #endif /* CONFIG_PPC_PSERIES */ | ||
699 | |||
700 | module_init(nvram_init); | 562 | module_init(nvram_init); |
701 | module_exit(nvram_cleanup); | 563 | module_exit(nvram_cleanup); |
702 | MODULE_LICENSE("GPL"); | 564 | MODULE_LICENSE("GPL"); |
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c index b2c363ef38ad..24582181b6ec 100644 --- a/arch/powerpc/kernel/of_platform.c +++ b/arch/powerpc/kernel/of_platform.c | |||
@@ -36,8 +36,7 @@ | |||
36 | * lacking some bits needed here. | 36 | * lacking some bits needed here. |
37 | */ | 37 | */ |
38 | 38 | ||
39 | static int __devinit of_pci_phb_probe(struct platform_device *dev, | 39 | static int __devinit of_pci_phb_probe(struct platform_device *dev) |
40 | const struct of_device_id *match) | ||
41 | { | 40 | { |
42 | struct pci_controller *phb; | 41 | struct pci_controller *phb; |
43 | 42 | ||
@@ -74,7 +73,7 @@ static int __devinit of_pci_phb_probe(struct platform_device *dev, | |||
74 | #endif /* CONFIG_EEH */ | 73 | #endif /* CONFIG_EEH */ |
75 | 74 | ||
76 | /* Scan the bus */ | 75 | /* Scan the bus */ |
77 | pcibios_scan_phb(phb, dev->dev.of_node); | 76 | pcibios_scan_phb(phb); |
78 | if (phb->bus == NULL) | 77 | if (phb->bus == NULL) |
79 | return -ENXIO; | 78 | return -ENXIO; |
80 | 79 | ||
@@ -104,7 +103,7 @@ static struct of_device_id of_pci_phb_ids[] = { | |||
104 | {} | 103 | {} |
105 | }; | 104 | }; |
106 | 105 | ||
107 | static struct of_platform_driver of_pci_phb_driver = { | 106 | static struct platform_driver of_pci_phb_driver = { |
108 | .probe = of_pci_phb_probe, | 107 | .probe = of_pci_phb_probe, |
109 | .driver = { | 108 | .driver = { |
110 | .name = "of-pci", | 109 | .name = "of-pci", |
@@ -115,7 +114,7 @@ static struct of_platform_driver of_pci_phb_driver = { | |||
115 | 114 | ||
116 | static __init int of_pci_phb_init(void) | 115 | static __init int of_pci_phb_init(void) |
117 | { | 116 | { |
118 | return of_register_platform_driver(&of_pci_phb_driver); | 117 | return platform_driver_register(&of_pci_phb_driver); |
119 | } | 118 | } |
120 | 119 | ||
121 | device_initcall(of_pci_phb_init); | 120 | device_initcall(of_pci_phb_init); |
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index d0a26f1770fe..efeb88184182 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * 2 of the License, or (at your option) any later version. | 7 | * 2 of the License, or (at your option) any later version. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/threads.h> | 10 | #include <linux/smp.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/memblock.h> | 12 | #include <linux/memblock.h> |
13 | 13 | ||
@@ -36,7 +36,7 @@ extern unsigned long __toc_start; | |||
36 | * will suffice to ensure that it doesn't cross a page boundary. | 36 | * will suffice to ensure that it doesn't cross a page boundary. |
37 | */ | 37 | */ |
38 | struct lppaca lppaca[] = { | 38 | struct lppaca lppaca[] = { |
39 | [0 ... (NR_CPUS-1)] = { | 39 | [0 ... (NR_LPPACAS-1)] = { |
40 | .desc = 0xd397d781, /* "LpPa" */ | 40 | .desc = 0xd397d781, /* "LpPa" */ |
41 | .size = sizeof(struct lppaca), | 41 | .size = sizeof(struct lppaca), |
42 | .dyn_proc_status = 2, | 42 | .dyn_proc_status = 2, |
@@ -49,6 +49,54 @@ struct lppaca lppaca[] = { | |||
49 | }, | 49 | }, |
50 | }; | 50 | }; |
51 | 51 | ||
52 | static struct lppaca *extra_lppacas; | ||
53 | static long __initdata lppaca_size; | ||
54 | |||
55 | static void allocate_lppacas(int nr_cpus, unsigned long limit) | ||
56 | { | ||
57 | if (nr_cpus <= NR_LPPACAS) | ||
58 | return; | ||
59 | |||
60 | lppaca_size = PAGE_ALIGN(sizeof(struct lppaca) * | ||
61 | (nr_cpus - NR_LPPACAS)); | ||
62 | extra_lppacas = __va(memblock_alloc_base(lppaca_size, | ||
63 | PAGE_SIZE, limit)); | ||
64 | } | ||
65 | |||
66 | static struct lppaca *new_lppaca(int cpu) | ||
67 | { | ||
68 | struct lppaca *lp; | ||
69 | |||
70 | if (cpu < NR_LPPACAS) | ||
71 | return &lppaca[cpu]; | ||
72 | |||
73 | lp = extra_lppacas + (cpu - NR_LPPACAS); | ||
74 | *lp = lppaca[0]; | ||
75 | |||
76 | return lp; | ||
77 | } | ||
78 | |||
79 | static void free_lppacas(void) | ||
80 | { | ||
81 | long new_size = 0, nr; | ||
82 | |||
83 | if (!lppaca_size) | ||
84 | return; | ||
85 | nr = num_possible_cpus() - NR_LPPACAS; | ||
86 | if (nr > 0) | ||
87 | new_size = PAGE_ALIGN(nr * sizeof(struct lppaca)); | ||
88 | if (new_size >= lppaca_size) | ||
89 | return; | ||
90 | |||
91 | memblock_free(__pa(extra_lppacas) + new_size, lppaca_size - new_size); | ||
92 | lppaca_size = new_size; | ||
93 | } | ||
94 | |||
95 | #else | ||
96 | |||
97 | static inline void allocate_lppacas(int nr_cpus, unsigned long limit) { } | ||
98 | static inline void free_lppacas(void) { } | ||
99 | |||
52 | #endif /* CONFIG_PPC_BOOK3S */ | 100 | #endif /* CONFIG_PPC_BOOK3S */ |
53 | 101 | ||
54 | #ifdef CONFIG_PPC_STD_MMU_64 | 102 | #ifdef CONFIG_PPC_STD_MMU_64 |
@@ -88,7 +136,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu) | |||
88 | unsigned long kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL; | 136 | unsigned long kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL; |
89 | 137 | ||
90 | #ifdef CONFIG_PPC_BOOK3S | 138 | #ifdef CONFIG_PPC_BOOK3S |
91 | new_paca->lppaca_ptr = &lppaca[cpu]; | 139 | new_paca->lppaca_ptr = new_lppaca(cpu); |
92 | #else | 140 | #else |
93 | new_paca->kernel_pgd = swapper_pg_dir; | 141 | new_paca->kernel_pgd = swapper_pg_dir; |
94 | #endif | 142 | #endif |
@@ -108,18 +156,29 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu) | |||
108 | /* Put the paca pointer into r13 and SPRG_PACA */ | 156 | /* Put the paca pointer into r13 and SPRG_PACA */ |
109 | void setup_paca(struct paca_struct *new_paca) | 157 | void setup_paca(struct paca_struct *new_paca) |
110 | { | 158 | { |
159 | /* Setup r13 */ | ||
111 | local_paca = new_paca; | 160 | local_paca = new_paca; |
112 | mtspr(SPRN_SPRG_PACA, local_paca); | 161 | |
113 | #ifdef CONFIG_PPC_BOOK3E | 162 | #ifdef CONFIG_PPC_BOOK3E |
163 | /* On Book3E, initialize the TLB miss exception frames */ | ||
114 | mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb); | 164 | mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb); |
165 | #else | ||
166 | /* In HV mode, we setup both HPACA and PACA to avoid problems | ||
167 | * if we do a GET_PACA() before the feature fixups have been | ||
168 | * applied | ||
169 | */ | ||
170 | if (cpu_has_feature(CPU_FTR_HVMODE_206)) | ||
171 | mtspr(SPRN_SPRG_HPACA, local_paca); | ||
115 | #endif | 172 | #endif |
173 | mtspr(SPRN_SPRG_PACA, local_paca); | ||
174 | |||
116 | } | 175 | } |
117 | 176 | ||
118 | static int __initdata paca_size; | 177 | static int __initdata paca_size; |
119 | 178 | ||
120 | void __init allocate_pacas(void) | 179 | void __init allocate_pacas(void) |
121 | { | 180 | { |
122 | int nr_cpus, cpu, limit; | 181 | int cpu, limit; |
123 | 182 | ||
124 | /* | 183 | /* |
125 | * We can't take SLB misses on the paca, and we want to access them | 184 | * We can't take SLB misses on the paca, and we want to access them |
@@ -127,25 +186,22 @@ void __init allocate_pacas(void) | |||
127 | * the first segment. On iSeries they must be within the area mapped | 186 | * the first segment. On iSeries they must be within the area mapped |
128 | * by the HV, which is HvPagesToMap * HVPAGESIZE bytes. | 187 | * by the HV, which is HvPagesToMap * HVPAGESIZE bytes. |
129 | */ | 188 | */ |
130 | limit = min(0x10000000ULL, memblock.rmo_size); | 189 | limit = min(0x10000000ULL, ppc64_rma_size); |
131 | if (firmware_has_feature(FW_FEATURE_ISERIES)) | 190 | if (firmware_has_feature(FW_FEATURE_ISERIES)) |
132 | limit = min(limit, HvPagesToMap * HVPAGESIZE); | 191 | limit = min(limit, HvPagesToMap * HVPAGESIZE); |
133 | 192 | ||
134 | nr_cpus = NR_CPUS; | 193 | paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); |
135 | /* On iSeries we know we can never have more than 64 cpus */ | ||
136 | if (firmware_has_feature(FW_FEATURE_ISERIES)) | ||
137 | nr_cpus = min(64, nr_cpus); | ||
138 | |||
139 | paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpus); | ||
140 | 194 | ||
141 | paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit)); | 195 | paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit)); |
142 | memset(paca, 0, paca_size); | 196 | memset(paca, 0, paca_size); |
143 | 197 | ||
144 | printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n", | 198 | printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n", |
145 | paca_size, nr_cpus, paca); | 199 | paca_size, nr_cpu_ids, paca); |
200 | |||
201 | allocate_lppacas(nr_cpu_ids, limit); | ||
146 | 202 | ||
147 | /* Can't use for_each_*_cpu, as they aren't functional yet */ | 203 | /* Can't use for_each_*_cpu, as they aren't functional yet */ |
148 | for (cpu = 0; cpu < nr_cpus; cpu++) | 204 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) |
149 | initialise_paca(&paca[cpu], cpu); | 205 | initialise_paca(&paca[cpu], cpu); |
150 | } | 206 | } |
151 | 207 | ||
@@ -153,7 +209,7 @@ void __init free_unused_pacas(void) | |||
153 | { | 209 | { |
154 | int new_size; | 210 | int new_size; |
155 | 211 | ||
156 | new_size = PAGE_ALIGN(sizeof(struct paca_struct) * num_possible_cpus()); | 212 | new_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); |
157 | 213 | ||
158 | if (new_size >= paca_size) | 214 | if (new_size >= paca_size) |
159 | return; | 215 | return; |
@@ -164,4 +220,6 @@ void __init free_unused_pacas(void) | |||
164 | paca_size - new_size); | 220 | paca_size - new_size); |
165 | 221 | ||
166 | paca_size = new_size; | 222 | paca_size = new_size; |
223 | |||
224 | free_lppacas(); | ||
167 | } | 225 | } |
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 9021c4ad4bbd..893af2a9cd03 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/bootmem.h> | 23 | #include <linux/bootmem.h> |
24 | #include <linux/of_address.h> | 24 | #include <linux/of_address.h> |
25 | #include <linux/of_pci.h> | ||
25 | #include <linux/mm.h> | 26 | #include <linux/mm.h> |
26 | #include <linux/list.h> | 27 | #include <linux/list.h> |
27 | #include <linux/syscalls.h> | 28 | #include <linux/syscalls.h> |
@@ -260,7 +261,7 @@ int pci_read_irq_line(struct pci_dev *pci_dev) | |||
260 | 261 | ||
261 | virq = irq_create_mapping(NULL, line); | 262 | virq = irq_create_mapping(NULL, line); |
262 | if (virq != NO_IRQ) | 263 | if (virq != NO_IRQ) |
263 | set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); | 264 | irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); |
264 | } else { | 265 | } else { |
265 | pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n", | 266 | pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n", |
266 | oirq.size, oirq.specifier[0], oirq.specifier[1], | 267 | oirq.size, oirq.specifier[0], oirq.specifier[1], |
@@ -1090,8 +1091,6 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus) | |||
1090 | bus->number, bus->self ? pci_name(bus->self) : "PHB"); | 1091 | bus->number, bus->self ? pci_name(bus->self) : "PHB"); |
1091 | 1092 | ||
1092 | list_for_each_entry(dev, &bus->devices, bus_list) { | 1093 | list_for_each_entry(dev, &bus->devices, bus_list) { |
1093 | struct dev_archdata *sd = &dev->dev.archdata; | ||
1094 | |||
1095 | /* Cardbus can call us to add new devices to a bus, so ignore | 1094 | /* Cardbus can call us to add new devices to a bus, so ignore |
1096 | * those who are already fully discovered | 1095 | * those who are already fully discovered |
1097 | */ | 1096 | */ |
@@ -1107,7 +1106,7 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus) | |||
1107 | set_dev_node(&dev->dev, pcibus_to_node(dev->bus)); | 1106 | set_dev_node(&dev->dev, pcibus_to_node(dev->bus)); |
1108 | 1107 | ||
1109 | /* Hook up default DMA ops */ | 1108 | /* Hook up default DMA ops */ |
1110 | sd->dma_ops = pci_dma_ops; | 1109 | set_dma_ops(&dev->dev, pci_dma_ops); |
1111 | set_dma_offset(&dev->dev, PCI_DRAM_OFFSET); | 1110 | set_dma_offset(&dev->dev, PCI_DRAM_OFFSET); |
1112 | 1111 | ||
1113 | /* Additional platform DMA/iommu setup */ | 1112 | /* Additional platform DMA/iommu setup */ |
@@ -1689,13 +1688,8 @@ int early_find_capability(struct pci_controller *hose, int bus, int devfn, | |||
1689 | /** | 1688 | /** |
1690 | * pci_scan_phb - Given a pci_controller, setup and scan the PCI bus | 1689 | * pci_scan_phb - Given a pci_controller, setup and scan the PCI bus |
1691 | * @hose: Pointer to the PCI host controller instance structure | 1690 | * @hose: Pointer to the PCI host controller instance structure |
1692 | * @sysdata: value to use for sysdata pointer. ppc32 and ppc64 differ here | ||
1693 | * | ||
1694 | * Note: the 'data' pointer is a temporary measure. As 32 and 64 bit | ||
1695 | * pci code gets merged, this parameter should become unnecessary because | ||
1696 | * both will use the same value. | ||
1697 | */ | 1691 | */ |
1698 | void __devinit pcibios_scan_phb(struct pci_controller *hose, void *sysdata) | 1692 | void __devinit pcibios_scan_phb(struct pci_controller *hose) |
1699 | { | 1693 | { |
1700 | struct pci_bus *bus; | 1694 | struct pci_bus *bus; |
1701 | struct device_node *node = hose->dn; | 1695 | struct device_node *node = hose->dn; |
@@ -1705,13 +1699,13 @@ void __devinit pcibios_scan_phb(struct pci_controller *hose, void *sysdata) | |||
1705 | node ? node->full_name : "<NO NAME>"); | 1699 | node ? node->full_name : "<NO NAME>"); |
1706 | 1700 | ||
1707 | /* Create an empty bus for the toplevel */ | 1701 | /* Create an empty bus for the toplevel */ |
1708 | bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, | 1702 | bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, hose); |
1709 | sysdata); | ||
1710 | if (bus == NULL) { | 1703 | if (bus == NULL) { |
1711 | pr_err("Failed to create bus for PCI domain %04x\n", | 1704 | pr_err("Failed to create bus for PCI domain %04x\n", |
1712 | hose->global_number); | 1705 | hose->global_number); |
1713 | return; | 1706 | return; |
1714 | } | 1707 | } |
1708 | bus->dev.of_node = of_node_get(node); | ||
1715 | bus->secondary = hose->first_busno; | 1709 | bus->secondary = hose->first_busno; |
1716 | hose->bus = bus; | 1710 | hose->bus = bus; |
1717 | 1711 | ||
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index e7db5b48004a..bedb370459f2 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c | |||
@@ -381,7 +381,7 @@ static int __init pcibios_init(void) | |||
381 | if (pci_assign_all_buses) | 381 | if (pci_assign_all_buses) |
382 | hose->first_busno = next_busno; | 382 | hose->first_busno = next_busno; |
383 | hose->last_busno = 0xff; | 383 | hose->last_busno = 0xff; |
384 | pcibios_scan_phb(hose, hose); | 384 | pcibios_scan_phb(hose); |
385 | pci_bus_add_devices(hose->bus); | 385 | pci_bus_add_devices(hose->bus); |
386 | if (pci_assign_all_buses || next_busno <= hose->last_busno) | 386 | if (pci_assign_all_buses || next_busno <= hose->last_busno) |
387 | next_busno = hose->last_busno + pcibios_assign_bus_offset; | 387 | next_busno = hose->last_busno + pcibios_assign_bus_offset; |
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index d43fc65749c1..fc6452b6be9f 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c | |||
@@ -64,7 +64,7 @@ static int __init pcibios_init(void) | |||
64 | 64 | ||
65 | /* Scan all of the recorded PCI controllers. */ | 65 | /* Scan all of the recorded PCI controllers. */ |
66 | list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { | 66 | list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { |
67 | pcibios_scan_phb(hose, hose->dn); | 67 | pcibios_scan_phb(hose); |
68 | pci_bus_add_devices(hose->bus); | 68 | pci_bus_add_devices(hose->bus); |
69 | } | 69 | } |
70 | 70 | ||
@@ -193,8 +193,7 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus) | |||
193 | hose->io_resource.start += io_virt_offset; | 193 | hose->io_resource.start += io_virt_offset; |
194 | hose->io_resource.end += io_virt_offset; | 194 | hose->io_resource.end += io_virt_offset; |
195 | 195 | ||
196 | pr_debug(" hose->io_resource=0x%016llx...0x%016llx\n", | 196 | pr_debug(" hose->io_resource=%pR\n", &hose->io_resource); |
197 | hose->io_resource.start, hose->io_resource.end); | ||
198 | 197 | ||
199 | return 0; | 198 | return 0; |
200 | } | 199 | } |
@@ -243,10 +242,10 @@ long sys_pciconfig_iobase(long which, unsigned long in_bus, | |||
243 | break; | 242 | break; |
244 | bus = NULL; | 243 | bus = NULL; |
245 | } | 244 | } |
246 | if (bus == NULL || bus->sysdata == NULL) | 245 | if (bus == NULL || bus->dev.of_node == NULL) |
247 | return -ENODEV; | 246 | return -ENODEV; |
248 | 247 | ||
249 | hose_node = (struct device_node *)bus->sysdata; | 248 | hose_node = bus->dev.of_node; |
250 | hose = PCI_DN(hose_node)->phb; | 249 | hose = PCI_DN(hose_node)->phb; |
251 | 250 | ||
252 | switch (which) { | 251 | switch (which) { |
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c index d56b35ee7f74..6baabc13306a 100644 --- a/arch/powerpc/kernel/pci_dn.c +++ b/arch/powerpc/kernel/pci_dn.c | |||
@@ -43,10 +43,9 @@ void * __devinit update_dn_pci_info(struct device_node *dn, void *data) | |||
43 | const u32 *regs; | 43 | const u32 *regs; |
44 | struct pci_dn *pdn; | 44 | struct pci_dn *pdn; |
45 | 45 | ||
46 | pdn = alloc_maybe_bootmem(sizeof(*pdn), GFP_KERNEL); | 46 | pdn = zalloc_maybe_bootmem(sizeof(*pdn), GFP_KERNEL); |
47 | if (pdn == NULL) | 47 | if (pdn == NULL) |
48 | return NULL; | 48 | return NULL; |
49 | memset(pdn, 0, sizeof(*pdn)); | ||
50 | dn->data = pdn; | 49 | dn->data = pdn; |
51 | pdn->node = dn; | 50 | pdn->node = dn; |
52 | pdn->phb = phb; | 51 | pdn->phb = phb; |
@@ -161,7 +160,7 @@ static void *is_devfn_node(struct device_node *dn, void *data) | |||
161 | /* | 160 | /* |
162 | * This is the "slow" path for looking up a device_node from a | 161 | * This is the "slow" path for looking up a device_node from a |
163 | * pci_dev. It will hunt for the device under its parent's | 162 | * pci_dev. It will hunt for the device under its parent's |
164 | * phb and then update sysdata for a future fastpath. | 163 | * phb and then update of_node pointer. |
165 | * | 164 | * |
166 | * It may also do fixups on the actual device since this happens | 165 | * It may also do fixups on the actual device since this happens |
167 | * on the first read/write. | 166 | * on the first read/write. |
@@ -170,16 +169,22 @@ static void *is_devfn_node(struct device_node *dn, void *data) | |||
170 | * In this case it may probe for real hardware ("just in case") | 169 | * In this case it may probe for real hardware ("just in case") |
171 | * and add a device_node to the device tree if necessary. | 170 | * and add a device_node to the device tree if necessary. |
172 | * | 171 | * |
172 | * Is this function necessary anymore now that dev->dev.of_node is | ||
173 | * used to store the node pointer? | ||
174 | * | ||
173 | */ | 175 | */ |
174 | struct device_node *fetch_dev_dn(struct pci_dev *dev) | 176 | struct device_node *fetch_dev_dn(struct pci_dev *dev) |
175 | { | 177 | { |
176 | struct device_node *orig_dn = dev->sysdata; | 178 | struct pci_controller *phb = dev->sysdata; |
177 | struct device_node *dn; | 179 | struct device_node *dn; |
178 | unsigned long searchval = (dev->bus->number << 8) | dev->devfn; | 180 | unsigned long searchval = (dev->bus->number << 8) | dev->devfn; |
179 | 181 | ||
180 | dn = traverse_pci_devices(orig_dn, is_devfn_node, (void *)searchval); | 182 | if (WARN_ON(!phb)) |
183 | return NULL; | ||
184 | |||
185 | dn = traverse_pci_devices(phb->dn, is_devfn_node, (void *)searchval); | ||
181 | if (dn) | 186 | if (dn) |
182 | dev->sysdata = dn; | 187 | dev->dev.of_node = dn; |
183 | return dn; | 188 | return dn; |
184 | } | 189 | } |
185 | EXPORT_SYMBOL(fetch_dev_dn); | 190 | EXPORT_SYMBOL(fetch_dev_dn); |
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index e751506323b4..1e89a72fd030 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c | |||
@@ -135,7 +135,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, | |||
135 | pr_debug(" create device, devfn: %x, type: %s\n", devfn, type); | 135 | pr_debug(" create device, devfn: %x, type: %s\n", devfn, type); |
136 | 136 | ||
137 | dev->bus = bus; | 137 | dev->bus = bus; |
138 | dev->sysdata = node; | 138 | dev->dev.of_node = of_node_get(node); |
139 | dev->dev.parent = bus->bridge; | 139 | dev->dev.parent = bus->bridge; |
140 | dev->dev.bus = &pci_bus_type; | 140 | dev->dev.bus = &pci_bus_type; |
141 | dev->devfn = devfn; | 141 | dev->devfn = devfn; |
@@ -238,7 +238,7 @@ void __devinit of_scan_pci_bridge(struct device_node *node, | |||
238 | bus->primary = dev->bus->number; | 238 | bus->primary = dev->bus->number; |
239 | bus->subordinate = busrange[1]; | 239 | bus->subordinate = busrange[1]; |
240 | bus->bridge_ctl = 0; | 240 | bus->bridge_ctl = 0; |
241 | bus->sysdata = node; | 241 | bus->dev.of_node = of_node_get(node); |
242 | 242 | ||
243 | /* parse ranges property */ | 243 | /* parse ranges property */ |
244 | /* PCI #address-cells == 3 and #size-cells == 2 always */ | 244 | /* PCI #address-cells == 3 and #size-cells == 2 always */ |
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c index 95ad9dad298e..d05ae4204bbf 100644 --- a/arch/powerpc/kernel/perf_callchain.c +++ b/arch/powerpc/kernel/perf_callchain.c | |||
@@ -23,18 +23,6 @@ | |||
23 | #include "ppc32.h" | 23 | #include "ppc32.h" |
24 | #endif | 24 | #endif |
25 | 25 | ||
26 | /* | ||
27 | * Store another value in a callchain_entry. | ||
28 | */ | ||
29 | static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip) | ||
30 | { | ||
31 | unsigned int nr = entry->nr; | ||
32 | |||
33 | if (nr < PERF_MAX_STACK_DEPTH) { | ||
34 | entry->ip[nr] = ip; | ||
35 | entry->nr = nr + 1; | ||
36 | } | ||
37 | } | ||
38 | 26 | ||
39 | /* | 27 | /* |
40 | * Is sp valid as the address of the next kernel stack frame after prev_sp? | 28 | * Is sp valid as the address of the next kernel stack frame after prev_sp? |
@@ -58,8 +46,8 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp) | |||
58 | return 0; | 46 | return 0; |
59 | } | 47 | } |
60 | 48 | ||
61 | static void perf_callchain_kernel(struct pt_regs *regs, | 49 | void |
62 | struct perf_callchain_entry *entry) | 50 | perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) |
63 | { | 51 | { |
64 | unsigned long sp, next_sp; | 52 | unsigned long sp, next_sp; |
65 | unsigned long next_ip; | 53 | unsigned long next_ip; |
@@ -69,8 +57,7 @@ static void perf_callchain_kernel(struct pt_regs *regs, | |||
69 | 57 | ||
70 | lr = regs->link; | 58 | lr = regs->link; |
71 | sp = regs->gpr[1]; | 59 | sp = regs->gpr[1]; |
72 | callchain_store(entry, PERF_CONTEXT_KERNEL); | 60 | perf_callchain_store(entry, regs->nip); |
73 | callchain_store(entry, regs->nip); | ||
74 | 61 | ||
75 | if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) | 62 | if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) |
76 | return; | 63 | return; |
@@ -89,7 +76,7 @@ static void perf_callchain_kernel(struct pt_regs *regs, | |||
89 | next_ip = regs->nip; | 76 | next_ip = regs->nip; |
90 | lr = regs->link; | 77 | lr = regs->link; |
91 | level = 0; | 78 | level = 0; |
92 | callchain_store(entry, PERF_CONTEXT_KERNEL); | 79 | perf_callchain_store(entry, PERF_CONTEXT_KERNEL); |
93 | 80 | ||
94 | } else { | 81 | } else { |
95 | if (level == 0) | 82 | if (level == 0) |
@@ -111,7 +98,7 @@ static void perf_callchain_kernel(struct pt_regs *regs, | |||
111 | ++level; | 98 | ++level; |
112 | } | 99 | } |
113 | 100 | ||
114 | callchain_store(entry, next_ip); | 101 | perf_callchain_store(entry, next_ip); |
115 | if (!valid_next_sp(next_sp, sp)) | 102 | if (!valid_next_sp(next_sp, sp)) |
116 | return; | 103 | return; |
117 | sp = next_sp; | 104 | sp = next_sp; |
@@ -233,8 +220,8 @@ static int sane_signal_64_frame(unsigned long sp) | |||
233 | puc == (unsigned long) &sf->uc; | 220 | puc == (unsigned long) &sf->uc; |
234 | } | 221 | } |
235 | 222 | ||
236 | static void perf_callchain_user_64(struct pt_regs *regs, | 223 | static void perf_callchain_user_64(struct perf_callchain_entry *entry, |
237 | struct perf_callchain_entry *entry) | 224 | struct pt_regs *regs) |
238 | { | 225 | { |
239 | unsigned long sp, next_sp; | 226 | unsigned long sp, next_sp; |
240 | unsigned long next_ip; | 227 | unsigned long next_ip; |
@@ -246,8 +233,7 @@ static void perf_callchain_user_64(struct pt_regs *regs, | |||
246 | next_ip = regs->nip; | 233 | next_ip = regs->nip; |
247 | lr = regs->link; | 234 | lr = regs->link; |
248 | sp = regs->gpr[1]; | 235 | sp = regs->gpr[1]; |
249 | callchain_store(entry, PERF_CONTEXT_USER); | 236 | perf_callchain_store(entry, next_ip); |
250 | callchain_store(entry, next_ip); | ||
251 | 237 | ||
252 | for (;;) { | 238 | for (;;) { |
253 | fp = (unsigned long __user *) sp; | 239 | fp = (unsigned long __user *) sp; |
@@ -276,14 +262,14 @@ static void perf_callchain_user_64(struct pt_regs *regs, | |||
276 | read_user_stack_64(&uregs[PT_R1], &sp)) | 262 | read_user_stack_64(&uregs[PT_R1], &sp)) |
277 | return; | 263 | return; |
278 | level = 0; | 264 | level = 0; |
279 | callchain_store(entry, PERF_CONTEXT_USER); | 265 | perf_callchain_store(entry, PERF_CONTEXT_USER); |
280 | callchain_store(entry, next_ip); | 266 | perf_callchain_store(entry, next_ip); |
281 | continue; | 267 | continue; |
282 | } | 268 | } |
283 | 269 | ||
284 | if (level == 0) | 270 | if (level == 0) |
285 | next_ip = lr; | 271 | next_ip = lr; |
286 | callchain_store(entry, next_ip); | 272 | perf_callchain_store(entry, next_ip); |
287 | ++level; | 273 | ++level; |
288 | sp = next_sp; | 274 | sp = next_sp; |
289 | } | 275 | } |
@@ -315,8 +301,8 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) | |||
315 | return __get_user_inatomic(*ret, ptr); | 301 | return __get_user_inatomic(*ret, ptr); |
316 | } | 302 | } |
317 | 303 | ||
318 | static inline void perf_callchain_user_64(struct pt_regs *regs, | 304 | static inline void perf_callchain_user_64(struct perf_callchain_entry *entry, |
319 | struct perf_callchain_entry *entry) | 305 | struct pt_regs *regs) |
320 | { | 306 | { |
321 | } | 307 | } |
322 | 308 | ||
@@ -435,8 +421,8 @@ static unsigned int __user *signal_frame_32_regs(unsigned int sp, | |||
435 | return mctx->mc_gregs; | 421 | return mctx->mc_gregs; |
436 | } | 422 | } |
437 | 423 | ||
438 | static void perf_callchain_user_32(struct pt_regs *regs, | 424 | static void perf_callchain_user_32(struct perf_callchain_entry *entry, |
439 | struct perf_callchain_entry *entry) | 425 | struct pt_regs *regs) |
440 | { | 426 | { |
441 | unsigned int sp, next_sp; | 427 | unsigned int sp, next_sp; |
442 | unsigned int next_ip; | 428 | unsigned int next_ip; |
@@ -447,8 +433,7 @@ static void perf_callchain_user_32(struct pt_regs *regs, | |||
447 | next_ip = regs->nip; | 433 | next_ip = regs->nip; |
448 | lr = regs->link; | 434 | lr = regs->link; |
449 | sp = regs->gpr[1]; | 435 | sp = regs->gpr[1]; |
450 | callchain_store(entry, PERF_CONTEXT_USER); | 436 | perf_callchain_store(entry, next_ip); |
451 | callchain_store(entry, next_ip); | ||
452 | 437 | ||
453 | while (entry->nr < PERF_MAX_STACK_DEPTH) { | 438 | while (entry->nr < PERF_MAX_STACK_DEPTH) { |
454 | fp = (unsigned int __user *) (unsigned long) sp; | 439 | fp = (unsigned int __user *) (unsigned long) sp; |
@@ -470,45 +455,24 @@ static void perf_callchain_user_32(struct pt_regs *regs, | |||
470 | read_user_stack_32(&uregs[PT_R1], &sp)) | 455 | read_user_stack_32(&uregs[PT_R1], &sp)) |
471 | return; | 456 | return; |
472 | level = 0; | 457 | level = 0; |
473 | callchain_store(entry, PERF_CONTEXT_USER); | 458 | perf_callchain_store(entry, PERF_CONTEXT_USER); |
474 | callchain_store(entry, next_ip); | 459 | perf_callchain_store(entry, next_ip); |
475 | continue; | 460 | continue; |
476 | } | 461 | } |
477 | 462 | ||
478 | if (level == 0) | 463 | if (level == 0) |
479 | next_ip = lr; | 464 | next_ip = lr; |
480 | callchain_store(entry, next_ip); | 465 | perf_callchain_store(entry, next_ip); |
481 | ++level; | 466 | ++level; |
482 | sp = next_sp; | 467 | sp = next_sp; |
483 | } | 468 | } |
484 | } | 469 | } |
485 | 470 | ||
486 | /* | 471 | void |
487 | * Since we can't get PMU interrupts inside a PMU interrupt handler, | 472 | perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) |
488 | * we don't need separate irq and nmi entries here. | ||
489 | */ | ||
490 | static DEFINE_PER_CPU(struct perf_callchain_entry, cpu_perf_callchain); | ||
491 | |||
492 | struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | ||
493 | { | 473 | { |
494 | struct perf_callchain_entry *entry = &__get_cpu_var(cpu_perf_callchain); | 474 | if (current_is_64bit()) |
495 | 475 | perf_callchain_user_64(entry, regs); | |
496 | entry->nr = 0; | 476 | else |
497 | 477 | perf_callchain_user_32(entry, regs); | |
498 | if (!user_mode(regs)) { | ||
499 | perf_callchain_kernel(regs, entry); | ||
500 | if (current->mm) | ||
501 | regs = task_pt_regs(current); | ||
502 | else | ||
503 | regs = NULL; | ||
504 | } | ||
505 | |||
506 | if (regs) { | ||
507 | if (current_is_64bit()) | ||
508 | perf_callchain_user_64(regs, entry); | ||
509 | else | ||
510 | perf_callchain_user_32(regs, entry); | ||
511 | } | ||
512 | |||
513 | return entry; | ||
514 | } | 478 | } |
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index d301a30445e0..822f63008ae1 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c | |||
@@ -398,10 +398,32 @@ static int check_excludes(struct perf_event **ctrs, unsigned int cflags[], | |||
398 | return 0; | 398 | return 0; |
399 | } | 399 | } |
400 | 400 | ||
401 | static u64 check_and_compute_delta(u64 prev, u64 val) | ||
402 | { | ||
403 | u64 delta = (val - prev) & 0xfffffffful; | ||
404 | |||
405 | /* | ||
406 | * POWER7 can roll back counter values, if the new value is smaller | ||
407 | * than the previous value it will cause the delta and the counter to | ||
408 | * have bogus values unless we rolled a counter over. If a coutner is | ||
409 | * rolled back, it will be smaller, but within 256, which is the maximum | ||
410 | * number of events to rollback at once. If we dectect a rollback | ||
411 | * return 0. This can lead to a small lack of precision in the | ||
412 | * counters. | ||
413 | */ | ||
414 | if (prev > val && (prev - val) < 256) | ||
415 | delta = 0; | ||
416 | |||
417 | return delta; | ||
418 | } | ||
419 | |||
401 | static void power_pmu_read(struct perf_event *event) | 420 | static void power_pmu_read(struct perf_event *event) |
402 | { | 421 | { |
403 | s64 val, delta, prev; | 422 | s64 val, delta, prev; |
404 | 423 | ||
424 | if (event->hw.state & PERF_HES_STOPPED) | ||
425 | return; | ||
426 | |||
405 | if (!event->hw.idx) | 427 | if (!event->hw.idx) |
406 | return; | 428 | return; |
407 | /* | 429 | /* |
@@ -413,10 +435,11 @@ static void power_pmu_read(struct perf_event *event) | |||
413 | prev = local64_read(&event->hw.prev_count); | 435 | prev = local64_read(&event->hw.prev_count); |
414 | barrier(); | 436 | barrier(); |
415 | val = read_pmc(event->hw.idx); | 437 | val = read_pmc(event->hw.idx); |
438 | delta = check_and_compute_delta(prev, val); | ||
439 | if (!delta) | ||
440 | return; | ||
416 | } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); | 441 | } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); |
417 | 442 | ||
418 | /* The counters are only 32 bits wide */ | ||
419 | delta = (val - prev) & 0xfffffffful; | ||
420 | local64_add(delta, &event->count); | 443 | local64_add(delta, &event->count); |
421 | local64_sub(delta, &event->hw.period_left); | 444 | local64_sub(delta, &event->hw.period_left); |
422 | } | 445 | } |
@@ -446,8 +469,9 @@ static void freeze_limited_counters(struct cpu_hw_events *cpuhw, | |||
446 | val = (event->hw.idx == 5) ? pmc5 : pmc6; | 469 | val = (event->hw.idx == 5) ? pmc5 : pmc6; |
447 | prev = local64_read(&event->hw.prev_count); | 470 | prev = local64_read(&event->hw.prev_count); |
448 | event->hw.idx = 0; | 471 | event->hw.idx = 0; |
449 | delta = (val - prev) & 0xfffffffful; | 472 | delta = check_and_compute_delta(prev, val); |
450 | local64_add(delta, &event->count); | 473 | if (delta) |
474 | local64_add(delta, &event->count); | ||
451 | } | 475 | } |
452 | } | 476 | } |
453 | 477 | ||
@@ -455,14 +479,16 @@ static void thaw_limited_counters(struct cpu_hw_events *cpuhw, | |||
455 | unsigned long pmc5, unsigned long pmc6) | 479 | unsigned long pmc5, unsigned long pmc6) |
456 | { | 480 | { |
457 | struct perf_event *event; | 481 | struct perf_event *event; |
458 | u64 val; | 482 | u64 val, prev; |
459 | int i; | 483 | int i; |
460 | 484 | ||
461 | for (i = 0; i < cpuhw->n_limited; ++i) { | 485 | for (i = 0; i < cpuhw->n_limited; ++i) { |
462 | event = cpuhw->limited_counter[i]; | 486 | event = cpuhw->limited_counter[i]; |
463 | event->hw.idx = cpuhw->limited_hwidx[i]; | 487 | event->hw.idx = cpuhw->limited_hwidx[i]; |
464 | val = (event->hw.idx == 5) ? pmc5 : pmc6; | 488 | val = (event->hw.idx == 5) ? pmc5 : pmc6; |
465 | local64_set(&event->hw.prev_count, val); | 489 | prev = local64_read(&event->hw.prev_count); |
490 | if (check_and_compute_delta(prev, val)) | ||
491 | local64_set(&event->hw.prev_count, val); | ||
466 | perf_event_update_userpage(event); | 492 | perf_event_update_userpage(event); |
467 | } | 493 | } |
468 | } | 494 | } |
@@ -517,7 +543,7 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0) | |||
517 | * Disable all events to prevent PMU interrupts and to allow | 543 | * Disable all events to prevent PMU interrupts and to allow |
518 | * events to be added or removed. | 544 | * events to be added or removed. |
519 | */ | 545 | */ |
520 | void hw_perf_disable(void) | 546 | static void power_pmu_disable(struct pmu *pmu) |
521 | { | 547 | { |
522 | struct cpu_hw_events *cpuhw; | 548 | struct cpu_hw_events *cpuhw; |
523 | unsigned long flags; | 549 | unsigned long flags; |
@@ -565,7 +591,7 @@ void hw_perf_disable(void) | |||
565 | * If we were previously disabled and events were added, then | 591 | * If we were previously disabled and events were added, then |
566 | * put the new config on the PMU. | 592 | * put the new config on the PMU. |
567 | */ | 593 | */ |
568 | void hw_perf_enable(void) | 594 | static void power_pmu_enable(struct pmu *pmu) |
569 | { | 595 | { |
570 | struct perf_event *event; | 596 | struct perf_event *event; |
571 | struct cpu_hw_events *cpuhw; | 597 | struct cpu_hw_events *cpuhw; |
@@ -672,6 +698,8 @@ void hw_perf_enable(void) | |||
672 | } | 698 | } |
673 | local64_set(&event->hw.prev_count, val); | 699 | local64_set(&event->hw.prev_count, val); |
674 | event->hw.idx = idx; | 700 | event->hw.idx = idx; |
701 | if (event->hw.state & PERF_HES_STOPPED) | ||
702 | val = 0; | ||
675 | write_pmc(idx, val); | 703 | write_pmc(idx, val); |
676 | perf_event_update_userpage(event); | 704 | perf_event_update_userpage(event); |
677 | } | 705 | } |
@@ -727,7 +755,7 @@ static int collect_events(struct perf_event *group, int max_count, | |||
727 | * re-enable the PMU in order to get hw_perf_enable to do the | 755 | * re-enable the PMU in order to get hw_perf_enable to do the |
728 | * actual work of reconfiguring the PMU. | 756 | * actual work of reconfiguring the PMU. |
729 | */ | 757 | */ |
730 | static int power_pmu_enable(struct perf_event *event) | 758 | static int power_pmu_add(struct perf_event *event, int ef_flags) |
731 | { | 759 | { |
732 | struct cpu_hw_events *cpuhw; | 760 | struct cpu_hw_events *cpuhw; |
733 | unsigned long flags; | 761 | unsigned long flags; |
@@ -735,7 +763,7 @@ static int power_pmu_enable(struct perf_event *event) | |||
735 | int ret = -EAGAIN; | 763 | int ret = -EAGAIN; |
736 | 764 | ||
737 | local_irq_save(flags); | 765 | local_irq_save(flags); |
738 | perf_disable(); | 766 | perf_pmu_disable(event->pmu); |
739 | 767 | ||
740 | /* | 768 | /* |
741 | * Add the event to the list (if there is room) | 769 | * Add the event to the list (if there is room) |
@@ -749,9 +777,12 @@ static int power_pmu_enable(struct perf_event *event) | |||
749 | cpuhw->events[n0] = event->hw.config; | 777 | cpuhw->events[n0] = event->hw.config; |
750 | cpuhw->flags[n0] = event->hw.event_base; | 778 | cpuhw->flags[n0] = event->hw.event_base; |
751 | 779 | ||
780 | if (!(ef_flags & PERF_EF_START)) | ||
781 | event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
782 | |||
752 | /* | 783 | /* |
753 | * If group events scheduling transaction was started, | 784 | * If group events scheduling transaction was started, |
754 | * skip the schedulability test here, it will be peformed | 785 | * skip the schedulability test here, it will be performed |
755 | * at commit time(->commit_txn) as a whole | 786 | * at commit time(->commit_txn) as a whole |
756 | */ | 787 | */ |
757 | if (cpuhw->group_flag & PERF_EVENT_TXN) | 788 | if (cpuhw->group_flag & PERF_EVENT_TXN) |
@@ -769,7 +800,7 @@ nocheck: | |||
769 | 800 | ||
770 | ret = 0; | 801 | ret = 0; |
771 | out: | 802 | out: |
772 | perf_enable(); | 803 | perf_pmu_enable(event->pmu); |
773 | local_irq_restore(flags); | 804 | local_irq_restore(flags); |
774 | return ret; | 805 | return ret; |
775 | } | 806 | } |
@@ -777,14 +808,14 @@ nocheck: | |||
777 | /* | 808 | /* |
778 | * Remove a event from the PMU. | 809 | * Remove a event from the PMU. |
779 | */ | 810 | */ |
780 | static void power_pmu_disable(struct perf_event *event) | 811 | static void power_pmu_del(struct perf_event *event, int ef_flags) |
781 | { | 812 | { |
782 | struct cpu_hw_events *cpuhw; | 813 | struct cpu_hw_events *cpuhw; |
783 | long i; | 814 | long i; |
784 | unsigned long flags; | 815 | unsigned long flags; |
785 | 816 | ||
786 | local_irq_save(flags); | 817 | local_irq_save(flags); |
787 | perf_disable(); | 818 | perf_pmu_disable(event->pmu); |
788 | 819 | ||
789 | power_pmu_read(event); | 820 | power_pmu_read(event); |
790 | 821 | ||
@@ -821,34 +852,60 @@ static void power_pmu_disable(struct perf_event *event) | |||
821 | cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); | 852 | cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); |
822 | } | 853 | } |
823 | 854 | ||
824 | perf_enable(); | 855 | perf_pmu_enable(event->pmu); |
825 | local_irq_restore(flags); | 856 | local_irq_restore(flags); |
826 | } | 857 | } |
827 | 858 | ||
828 | /* | 859 | /* |
829 | * Re-enable interrupts on a event after they were throttled | 860 | * POWER-PMU does not support disabling individual counters, hence |
830 | * because they were coming too fast. | 861 | * program their cycle counter to their max value and ignore the interrupts. |
831 | */ | 862 | */ |
832 | static void power_pmu_unthrottle(struct perf_event *event) | 863 | |
864 | static void power_pmu_start(struct perf_event *event, int ef_flags) | ||
865 | { | ||
866 | unsigned long flags; | ||
867 | s64 left; | ||
868 | |||
869 | if (!event->hw.idx || !event->hw.sample_period) | ||
870 | return; | ||
871 | |||
872 | if (!(event->hw.state & PERF_HES_STOPPED)) | ||
873 | return; | ||
874 | |||
875 | if (ef_flags & PERF_EF_RELOAD) | ||
876 | WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); | ||
877 | |||
878 | local_irq_save(flags); | ||
879 | perf_pmu_disable(event->pmu); | ||
880 | |||
881 | event->hw.state = 0; | ||
882 | left = local64_read(&event->hw.period_left); | ||
883 | write_pmc(event->hw.idx, left); | ||
884 | |||
885 | perf_event_update_userpage(event); | ||
886 | perf_pmu_enable(event->pmu); | ||
887 | local_irq_restore(flags); | ||
888 | } | ||
889 | |||
890 | static void power_pmu_stop(struct perf_event *event, int ef_flags) | ||
833 | { | 891 | { |
834 | s64 val, left; | ||
835 | unsigned long flags; | 892 | unsigned long flags; |
836 | 893 | ||
837 | if (!event->hw.idx || !event->hw.sample_period) | 894 | if (!event->hw.idx || !event->hw.sample_period) |
838 | return; | 895 | return; |
896 | |||
897 | if (event->hw.state & PERF_HES_STOPPED) | ||
898 | return; | ||
899 | |||
839 | local_irq_save(flags); | 900 | local_irq_save(flags); |
840 | perf_disable(); | 901 | perf_pmu_disable(event->pmu); |
902 | |||
841 | power_pmu_read(event); | 903 | power_pmu_read(event); |
842 | left = event->hw.sample_period; | 904 | event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; |
843 | event->hw.last_period = left; | 905 | write_pmc(event->hw.idx, 0); |
844 | val = 0; | 906 | |
845 | if (left < 0x80000000L) | ||
846 | val = 0x80000000L - left; | ||
847 | write_pmc(event->hw.idx, val); | ||
848 | local64_set(&event->hw.prev_count, val); | ||
849 | local64_set(&event->hw.period_left, left); | ||
850 | perf_event_update_userpage(event); | 907 | perf_event_update_userpage(event); |
851 | perf_enable(); | 908 | perf_pmu_enable(event->pmu); |
852 | local_irq_restore(flags); | 909 | local_irq_restore(flags); |
853 | } | 910 | } |
854 | 911 | ||
@@ -857,10 +914,11 @@ static void power_pmu_unthrottle(struct perf_event *event) | |||
857 | * Set the flag to make pmu::enable() not perform the | 914 | * Set the flag to make pmu::enable() not perform the |
858 | * schedulability test, it will be performed at commit time | 915 | * schedulability test, it will be performed at commit time |
859 | */ | 916 | */ |
860 | void power_pmu_start_txn(const struct pmu *pmu) | 917 | void power_pmu_start_txn(struct pmu *pmu) |
861 | { | 918 | { |
862 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 919 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
863 | 920 | ||
921 | perf_pmu_disable(pmu); | ||
864 | cpuhw->group_flag |= PERF_EVENT_TXN; | 922 | cpuhw->group_flag |= PERF_EVENT_TXN; |
865 | cpuhw->n_txn_start = cpuhw->n_events; | 923 | cpuhw->n_txn_start = cpuhw->n_events; |
866 | } | 924 | } |
@@ -870,11 +928,12 @@ void power_pmu_start_txn(const struct pmu *pmu) | |||
870 | * Clear the flag and pmu::enable() will perform the | 928 | * Clear the flag and pmu::enable() will perform the |
871 | * schedulability test. | 929 | * schedulability test. |
872 | */ | 930 | */ |
873 | void power_pmu_cancel_txn(const struct pmu *pmu) | 931 | void power_pmu_cancel_txn(struct pmu *pmu) |
874 | { | 932 | { |
875 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 933 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
876 | 934 | ||
877 | cpuhw->group_flag &= ~PERF_EVENT_TXN; | 935 | cpuhw->group_flag &= ~PERF_EVENT_TXN; |
936 | perf_pmu_enable(pmu); | ||
878 | } | 937 | } |
879 | 938 | ||
880 | /* | 939 | /* |
@@ -882,7 +941,7 @@ void power_pmu_cancel_txn(const struct pmu *pmu) | |||
882 | * Perform the group schedulability test as a whole | 941 | * Perform the group schedulability test as a whole |
883 | * Return 0 if success | 942 | * Return 0 if success |
884 | */ | 943 | */ |
885 | int power_pmu_commit_txn(const struct pmu *pmu) | 944 | int power_pmu_commit_txn(struct pmu *pmu) |
886 | { | 945 | { |
887 | struct cpu_hw_events *cpuhw; | 946 | struct cpu_hw_events *cpuhw; |
888 | long i, n; | 947 | long i, n; |
@@ -901,19 +960,10 @@ int power_pmu_commit_txn(const struct pmu *pmu) | |||
901 | cpuhw->event[i]->hw.config = cpuhw->events[i]; | 960 | cpuhw->event[i]->hw.config = cpuhw->events[i]; |
902 | 961 | ||
903 | cpuhw->group_flag &= ~PERF_EVENT_TXN; | 962 | cpuhw->group_flag &= ~PERF_EVENT_TXN; |
963 | perf_pmu_enable(pmu); | ||
904 | return 0; | 964 | return 0; |
905 | } | 965 | } |
906 | 966 | ||
907 | struct pmu power_pmu = { | ||
908 | .enable = power_pmu_enable, | ||
909 | .disable = power_pmu_disable, | ||
910 | .read = power_pmu_read, | ||
911 | .unthrottle = power_pmu_unthrottle, | ||
912 | .start_txn = power_pmu_start_txn, | ||
913 | .cancel_txn = power_pmu_cancel_txn, | ||
914 | .commit_txn = power_pmu_commit_txn, | ||
915 | }; | ||
916 | |||
917 | /* | 967 | /* |
918 | * Return 1 if we might be able to put event on a limited PMC, | 968 | * Return 1 if we might be able to put event on a limited PMC, |
919 | * or 0 if not. | 969 | * or 0 if not. |
@@ -1014,7 +1064,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp) | |||
1014 | return 0; | 1064 | return 0; |
1015 | } | 1065 | } |
1016 | 1066 | ||
1017 | const struct pmu *hw_perf_event_init(struct perf_event *event) | 1067 | static int power_pmu_event_init(struct perf_event *event) |
1018 | { | 1068 | { |
1019 | u64 ev; | 1069 | u64 ev; |
1020 | unsigned long flags; | 1070 | unsigned long flags; |
@@ -1026,25 +1076,27 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
1026 | struct cpu_hw_events *cpuhw; | 1076 | struct cpu_hw_events *cpuhw; |
1027 | 1077 | ||
1028 | if (!ppmu) | 1078 | if (!ppmu) |
1029 | return ERR_PTR(-ENXIO); | 1079 | return -ENOENT; |
1080 | |||
1030 | switch (event->attr.type) { | 1081 | switch (event->attr.type) { |
1031 | case PERF_TYPE_HARDWARE: | 1082 | case PERF_TYPE_HARDWARE: |
1032 | ev = event->attr.config; | 1083 | ev = event->attr.config; |
1033 | if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) | 1084 | if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) |
1034 | return ERR_PTR(-EOPNOTSUPP); | 1085 | return -EOPNOTSUPP; |
1035 | ev = ppmu->generic_events[ev]; | 1086 | ev = ppmu->generic_events[ev]; |
1036 | break; | 1087 | break; |
1037 | case PERF_TYPE_HW_CACHE: | 1088 | case PERF_TYPE_HW_CACHE: |
1038 | err = hw_perf_cache_event(event->attr.config, &ev); | 1089 | err = hw_perf_cache_event(event->attr.config, &ev); |
1039 | if (err) | 1090 | if (err) |
1040 | return ERR_PTR(err); | 1091 | return err; |
1041 | break; | 1092 | break; |
1042 | case PERF_TYPE_RAW: | 1093 | case PERF_TYPE_RAW: |
1043 | ev = event->attr.config; | 1094 | ev = event->attr.config; |
1044 | break; | 1095 | break; |
1045 | default: | 1096 | default: |
1046 | return ERR_PTR(-EINVAL); | 1097 | return -ENOENT; |
1047 | } | 1098 | } |
1099 | |||
1048 | event->hw.config_base = ev; | 1100 | event->hw.config_base = ev; |
1049 | event->hw.idx = 0; | 1101 | event->hw.idx = 0; |
1050 | 1102 | ||
@@ -1063,7 +1115,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
1063 | * XXX we should check if the task is an idle task. | 1115 | * XXX we should check if the task is an idle task. |
1064 | */ | 1116 | */ |
1065 | flags = 0; | 1117 | flags = 0; |
1066 | if (event->ctx->task) | 1118 | if (event->attach_state & PERF_ATTACH_TASK) |
1067 | flags |= PPMU_ONLY_COUNT_RUN; | 1119 | flags |= PPMU_ONLY_COUNT_RUN; |
1068 | 1120 | ||
1069 | /* | 1121 | /* |
@@ -1081,7 +1133,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
1081 | */ | 1133 | */ |
1082 | ev = normal_pmc_alternative(ev, flags); | 1134 | ev = normal_pmc_alternative(ev, flags); |
1083 | if (!ev) | 1135 | if (!ev) |
1084 | return ERR_PTR(-EINVAL); | 1136 | return -EINVAL; |
1085 | } | 1137 | } |
1086 | } | 1138 | } |
1087 | 1139 | ||
@@ -1095,19 +1147,19 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
1095 | n = collect_events(event->group_leader, ppmu->n_counter - 1, | 1147 | n = collect_events(event->group_leader, ppmu->n_counter - 1, |
1096 | ctrs, events, cflags); | 1148 | ctrs, events, cflags); |
1097 | if (n < 0) | 1149 | if (n < 0) |
1098 | return ERR_PTR(-EINVAL); | 1150 | return -EINVAL; |
1099 | } | 1151 | } |
1100 | events[n] = ev; | 1152 | events[n] = ev; |
1101 | ctrs[n] = event; | 1153 | ctrs[n] = event; |
1102 | cflags[n] = flags; | 1154 | cflags[n] = flags; |
1103 | if (check_excludes(ctrs, cflags, n, 1)) | 1155 | if (check_excludes(ctrs, cflags, n, 1)) |
1104 | return ERR_PTR(-EINVAL); | 1156 | return -EINVAL; |
1105 | 1157 | ||
1106 | cpuhw = &get_cpu_var(cpu_hw_events); | 1158 | cpuhw = &get_cpu_var(cpu_hw_events); |
1107 | err = power_check_constraints(cpuhw, events, cflags, n + 1); | 1159 | err = power_check_constraints(cpuhw, events, cflags, n + 1); |
1108 | put_cpu_var(cpu_hw_events); | 1160 | put_cpu_var(cpu_hw_events); |
1109 | if (err) | 1161 | if (err) |
1110 | return ERR_PTR(-EINVAL); | 1162 | return -EINVAL; |
1111 | 1163 | ||
1112 | event->hw.config = events[n]; | 1164 | event->hw.config = events[n]; |
1113 | event->hw.event_base = cflags[n]; | 1165 | event->hw.event_base = cflags[n]; |
@@ -1132,11 +1184,23 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
1132 | } | 1184 | } |
1133 | event->destroy = hw_perf_event_destroy; | 1185 | event->destroy = hw_perf_event_destroy; |
1134 | 1186 | ||
1135 | if (err) | 1187 | return err; |
1136 | return ERR_PTR(err); | ||
1137 | return &power_pmu; | ||
1138 | } | 1188 | } |
1139 | 1189 | ||
1190 | struct pmu power_pmu = { | ||
1191 | .pmu_enable = power_pmu_enable, | ||
1192 | .pmu_disable = power_pmu_disable, | ||
1193 | .event_init = power_pmu_event_init, | ||
1194 | .add = power_pmu_add, | ||
1195 | .del = power_pmu_del, | ||
1196 | .start = power_pmu_start, | ||
1197 | .stop = power_pmu_stop, | ||
1198 | .read = power_pmu_read, | ||
1199 | .start_txn = power_pmu_start_txn, | ||
1200 | .cancel_txn = power_pmu_cancel_txn, | ||
1201 | .commit_txn = power_pmu_commit_txn, | ||
1202 | }; | ||
1203 | |||
1140 | /* | 1204 | /* |
1141 | * A counter has overflowed; update its count and record | 1205 | * A counter has overflowed; update its count and record |
1142 | * things if requested. Note that interrupts are hard-disabled | 1206 | * things if requested. Note that interrupts are hard-disabled |
@@ -1149,9 +1213,14 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
1149 | s64 prev, delta, left; | 1213 | s64 prev, delta, left; |
1150 | int record = 0; | 1214 | int record = 0; |
1151 | 1215 | ||
1216 | if (event->hw.state & PERF_HES_STOPPED) { | ||
1217 | write_pmc(event->hw.idx, 0); | ||
1218 | return; | ||
1219 | } | ||
1220 | |||
1152 | /* we don't have to worry about interrupts here */ | 1221 | /* we don't have to worry about interrupts here */ |
1153 | prev = local64_read(&event->hw.prev_count); | 1222 | prev = local64_read(&event->hw.prev_count); |
1154 | delta = (val - prev) & 0xfffffffful; | 1223 | delta = check_and_compute_delta(prev, val); |
1155 | local64_add(delta, &event->count); | 1224 | local64_add(delta, &event->count); |
1156 | 1225 | ||
1157 | /* | 1226 | /* |
@@ -1166,11 +1235,17 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
1166 | if (left <= 0) | 1235 | if (left <= 0) |
1167 | left = period; | 1236 | left = period; |
1168 | record = 1; | 1237 | record = 1; |
1238 | event->hw.last_period = event->hw.sample_period; | ||
1169 | } | 1239 | } |
1170 | if (left < 0x80000000LL) | 1240 | if (left < 0x80000000LL) |
1171 | val = 0x80000000LL - left; | 1241 | val = 0x80000000LL - left; |
1172 | } | 1242 | } |
1173 | 1243 | ||
1244 | write_pmc(event->hw.idx, val); | ||
1245 | local64_set(&event->hw.prev_count, val); | ||
1246 | local64_set(&event->hw.period_left, left); | ||
1247 | perf_event_update_userpage(event); | ||
1248 | |||
1174 | /* | 1249 | /* |
1175 | * Finally record data if requested. | 1250 | * Finally record data if requested. |
1176 | */ | 1251 | */ |
@@ -1183,23 +1258,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
1183 | if (event->attr.sample_type & PERF_SAMPLE_ADDR) | 1258 | if (event->attr.sample_type & PERF_SAMPLE_ADDR) |
1184 | perf_get_data_addr(regs, &data.addr); | 1259 | perf_get_data_addr(regs, &data.addr); |
1185 | 1260 | ||
1186 | if (perf_event_overflow(event, nmi, &data, regs)) { | 1261 | if (perf_event_overflow(event, nmi, &data, regs)) |
1187 | /* | 1262 | power_pmu_stop(event, 0); |
1188 | * Interrupts are coming too fast - throttle them | ||
1189 | * by setting the event to 0, so it will be | ||
1190 | * at least 2^30 cycles until the next interrupt | ||
1191 | * (assuming each event counts at most 2 counts | ||
1192 | * per cycle). | ||
1193 | */ | ||
1194 | val = 0; | ||
1195 | left = ~0ULL >> 1; | ||
1196 | } | ||
1197 | } | 1263 | } |
1198 | |||
1199 | write_pmc(event->hw.idx, val); | ||
1200 | local64_set(&event->hw.prev_count, val); | ||
1201 | local64_set(&event->hw.period_left, left); | ||
1202 | perf_event_update_userpage(event); | ||
1203 | } | 1264 | } |
1204 | 1265 | ||
1205 | /* | 1266 | /* |
@@ -1231,6 +1292,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) | |||
1231 | return ip; | 1292 | return ip; |
1232 | } | 1293 | } |
1233 | 1294 | ||
1295 | static bool pmc_overflow(unsigned long val) | ||
1296 | { | ||
1297 | if ((int)val < 0) | ||
1298 | return true; | ||
1299 | |||
1300 | /* | ||
1301 | * Events on POWER7 can roll back if a speculative event doesn't | ||
1302 | * eventually complete. Unfortunately in some rare cases they will | ||
1303 | * raise a performance monitor exception. We need to catch this to | ||
1304 | * ensure we reset the PMC. In all cases the PMC will be 256 or less | ||
1305 | * cycles from overflow. | ||
1306 | * | ||
1307 | * We only do this if the first pass fails to find any overflowing | ||
1308 | * PMCs because a user might set a period of less than 256 and we | ||
1309 | * don't want to mistakenly reset them. | ||
1310 | */ | ||
1311 | if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) | ||
1312 | return true; | ||
1313 | |||
1314 | return false; | ||
1315 | } | ||
1316 | |||
1234 | /* | 1317 | /* |
1235 | * Performance monitor interrupt stuff | 1318 | * Performance monitor interrupt stuff |
1236 | */ | 1319 | */ |
@@ -1278,7 +1361,7 @@ static void perf_event_interrupt(struct pt_regs *regs) | |||
1278 | if (is_limited_pmc(i + 1)) | 1361 | if (is_limited_pmc(i + 1)) |
1279 | continue; | 1362 | continue; |
1280 | val = read_pmc(i + 1); | 1363 | val = read_pmc(i + 1); |
1281 | if ((int)val < 0) | 1364 | if (pmc_overflow(val)) |
1282 | write_pmc(i + 1, 0); | 1365 | write_pmc(i + 1, 0); |
1283 | } | 1366 | } |
1284 | } | 1367 | } |
@@ -1342,6 +1425,7 @@ int register_power_pmu(struct power_pmu *pmu) | |||
1342 | freeze_events_kernel = MMCR0_FCHV; | 1425 | freeze_events_kernel = MMCR0_FCHV; |
1343 | #endif /* CONFIG_PPC64 */ | 1426 | #endif /* CONFIG_PPC64 */ |
1344 | 1427 | ||
1428 | perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW); | ||
1345 | perf_cpu_notifier(power_pmu_notifier); | 1429 | perf_cpu_notifier(power_pmu_notifier); |
1346 | 1430 | ||
1347 | return 0; | 1431 | return 0; |
diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c index 1ba45471ae43..b0dc8f7069cd 100644 --- a/arch/powerpc/kernel/perf_event_fsl_emb.c +++ b/arch/powerpc/kernel/perf_event_fsl_emb.c | |||
@@ -156,6 +156,9 @@ static void fsl_emb_pmu_read(struct perf_event *event) | |||
156 | { | 156 | { |
157 | s64 val, delta, prev; | 157 | s64 val, delta, prev; |
158 | 158 | ||
159 | if (event->hw.state & PERF_HES_STOPPED) | ||
160 | return; | ||
161 | |||
159 | /* | 162 | /* |
160 | * Performance monitor interrupts come even when interrupts | 163 | * Performance monitor interrupts come even when interrupts |
161 | * are soft-disabled, as long as interrupts are hard-enabled. | 164 | * are soft-disabled, as long as interrupts are hard-enabled. |
@@ -177,7 +180,7 @@ static void fsl_emb_pmu_read(struct perf_event *event) | |||
177 | * Disable all events to prevent PMU interrupts and to allow | 180 | * Disable all events to prevent PMU interrupts and to allow |
178 | * events to be added or removed. | 181 | * events to be added or removed. |
179 | */ | 182 | */ |
180 | void hw_perf_disable(void) | 183 | static void fsl_emb_pmu_disable(struct pmu *pmu) |
181 | { | 184 | { |
182 | struct cpu_hw_events *cpuhw; | 185 | struct cpu_hw_events *cpuhw; |
183 | unsigned long flags; | 186 | unsigned long flags; |
@@ -216,7 +219,7 @@ void hw_perf_disable(void) | |||
216 | * If we were previously disabled and events were added, then | 219 | * If we were previously disabled and events were added, then |
217 | * put the new config on the PMU. | 220 | * put the new config on the PMU. |
218 | */ | 221 | */ |
219 | void hw_perf_enable(void) | 222 | static void fsl_emb_pmu_enable(struct pmu *pmu) |
220 | { | 223 | { |
221 | struct cpu_hw_events *cpuhw; | 224 | struct cpu_hw_events *cpuhw; |
222 | unsigned long flags; | 225 | unsigned long flags; |
@@ -262,8 +265,8 @@ static int collect_events(struct perf_event *group, int max_count, | |||
262 | return n; | 265 | return n; |
263 | } | 266 | } |
264 | 267 | ||
265 | /* perf must be disabled, context locked on entry */ | 268 | /* context locked on entry */ |
266 | static int fsl_emb_pmu_enable(struct perf_event *event) | 269 | static int fsl_emb_pmu_add(struct perf_event *event, int flags) |
267 | { | 270 | { |
268 | struct cpu_hw_events *cpuhw; | 271 | struct cpu_hw_events *cpuhw; |
269 | int ret = -EAGAIN; | 272 | int ret = -EAGAIN; |
@@ -271,6 +274,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event) | |||
271 | u64 val; | 274 | u64 val; |
272 | int i; | 275 | int i; |
273 | 276 | ||
277 | perf_pmu_disable(event->pmu); | ||
274 | cpuhw = &get_cpu_var(cpu_hw_events); | 278 | cpuhw = &get_cpu_var(cpu_hw_events); |
275 | 279 | ||
276 | if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) | 280 | if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) |
@@ -301,6 +305,12 @@ static int fsl_emb_pmu_enable(struct perf_event *event) | |||
301 | val = 0x80000000L - left; | 305 | val = 0x80000000L - left; |
302 | } | 306 | } |
303 | local64_set(&event->hw.prev_count, val); | 307 | local64_set(&event->hw.prev_count, val); |
308 | |||
309 | if (!(flags & PERF_EF_START)) { | ||
310 | event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
311 | val = 0; | ||
312 | } | ||
313 | |||
304 | write_pmc(i, val); | 314 | write_pmc(i, val); |
305 | perf_event_update_userpage(event); | 315 | perf_event_update_userpage(event); |
306 | 316 | ||
@@ -310,15 +320,17 @@ static int fsl_emb_pmu_enable(struct perf_event *event) | |||
310 | ret = 0; | 320 | ret = 0; |
311 | out: | 321 | out: |
312 | put_cpu_var(cpu_hw_events); | 322 | put_cpu_var(cpu_hw_events); |
323 | perf_pmu_enable(event->pmu); | ||
313 | return ret; | 324 | return ret; |
314 | } | 325 | } |
315 | 326 | ||
316 | /* perf must be disabled, context locked on entry */ | 327 | /* context locked on entry */ |
317 | static void fsl_emb_pmu_disable(struct perf_event *event) | 328 | static void fsl_emb_pmu_del(struct perf_event *event, int flags) |
318 | { | 329 | { |
319 | struct cpu_hw_events *cpuhw; | 330 | struct cpu_hw_events *cpuhw; |
320 | int i = event->hw.idx; | 331 | int i = event->hw.idx; |
321 | 332 | ||
333 | perf_pmu_disable(event->pmu); | ||
322 | if (i < 0) | 334 | if (i < 0) |
323 | goto out; | 335 | goto out; |
324 | 336 | ||
@@ -346,44 +358,57 @@ static void fsl_emb_pmu_disable(struct perf_event *event) | |||
346 | cpuhw->n_events--; | 358 | cpuhw->n_events--; |
347 | 359 | ||
348 | out: | 360 | out: |
361 | perf_pmu_enable(event->pmu); | ||
349 | put_cpu_var(cpu_hw_events); | 362 | put_cpu_var(cpu_hw_events); |
350 | } | 363 | } |
351 | 364 | ||
352 | /* | 365 | static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags) |
353 | * Re-enable interrupts on a event after they were throttled | ||
354 | * because they were coming too fast. | ||
355 | * | ||
356 | * Context is locked on entry, but perf is not disabled. | ||
357 | */ | ||
358 | static void fsl_emb_pmu_unthrottle(struct perf_event *event) | ||
359 | { | 366 | { |
360 | s64 val, left; | ||
361 | unsigned long flags; | 367 | unsigned long flags; |
368 | s64 left; | ||
362 | 369 | ||
363 | if (event->hw.idx < 0 || !event->hw.sample_period) | 370 | if (event->hw.idx < 0 || !event->hw.sample_period) |
364 | return; | 371 | return; |
372 | |||
373 | if (!(event->hw.state & PERF_HES_STOPPED)) | ||
374 | return; | ||
375 | |||
376 | if (ef_flags & PERF_EF_RELOAD) | ||
377 | WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); | ||
378 | |||
365 | local_irq_save(flags); | 379 | local_irq_save(flags); |
366 | perf_disable(); | 380 | perf_pmu_disable(event->pmu); |
367 | fsl_emb_pmu_read(event); | 381 | |
368 | left = event->hw.sample_period; | 382 | event->hw.state = 0; |
369 | event->hw.last_period = left; | 383 | left = local64_read(&event->hw.period_left); |
370 | val = 0; | 384 | write_pmc(event->hw.idx, left); |
371 | if (left < 0x80000000L) | 385 | |
372 | val = 0x80000000L - left; | ||
373 | write_pmc(event->hw.idx, val); | ||
374 | local64_set(&event->hw.prev_count, val); | ||
375 | local64_set(&event->hw.period_left, left); | ||
376 | perf_event_update_userpage(event); | 386 | perf_event_update_userpage(event); |
377 | perf_enable(); | 387 | perf_pmu_enable(event->pmu); |
378 | local_irq_restore(flags); | 388 | local_irq_restore(flags); |
379 | } | 389 | } |
380 | 390 | ||
381 | static struct pmu fsl_emb_pmu = { | 391 | static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags) |
382 | .enable = fsl_emb_pmu_enable, | 392 | { |
383 | .disable = fsl_emb_pmu_disable, | 393 | unsigned long flags; |
384 | .read = fsl_emb_pmu_read, | 394 | |
385 | .unthrottle = fsl_emb_pmu_unthrottle, | 395 | if (event->hw.idx < 0 || !event->hw.sample_period) |
386 | }; | 396 | return; |
397 | |||
398 | if (event->hw.state & PERF_HES_STOPPED) | ||
399 | return; | ||
400 | |||
401 | local_irq_save(flags); | ||
402 | perf_pmu_disable(event->pmu); | ||
403 | |||
404 | fsl_emb_pmu_read(event); | ||
405 | event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
406 | write_pmc(event->hw.idx, 0); | ||
407 | |||
408 | perf_event_update_userpage(event); | ||
409 | perf_pmu_enable(event->pmu); | ||
410 | local_irq_restore(flags); | ||
411 | } | ||
387 | 412 | ||
388 | /* | 413 | /* |
389 | * Release the PMU if this is the last perf_event. | 414 | * Release the PMU if this is the last perf_event. |
@@ -428,7 +453,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp) | |||
428 | return 0; | 453 | return 0; |
429 | } | 454 | } |
430 | 455 | ||
431 | const struct pmu *hw_perf_event_init(struct perf_event *event) | 456 | static int fsl_emb_pmu_event_init(struct perf_event *event) |
432 | { | 457 | { |
433 | u64 ev; | 458 | u64 ev; |
434 | struct perf_event *events[MAX_HWEVENTS]; | 459 | struct perf_event *events[MAX_HWEVENTS]; |
@@ -441,14 +466,14 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
441 | case PERF_TYPE_HARDWARE: | 466 | case PERF_TYPE_HARDWARE: |
442 | ev = event->attr.config; | 467 | ev = event->attr.config; |
443 | if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) | 468 | if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) |
444 | return ERR_PTR(-EOPNOTSUPP); | 469 | return -EOPNOTSUPP; |
445 | ev = ppmu->generic_events[ev]; | 470 | ev = ppmu->generic_events[ev]; |
446 | break; | 471 | break; |
447 | 472 | ||
448 | case PERF_TYPE_HW_CACHE: | 473 | case PERF_TYPE_HW_CACHE: |
449 | err = hw_perf_cache_event(event->attr.config, &ev); | 474 | err = hw_perf_cache_event(event->attr.config, &ev); |
450 | if (err) | 475 | if (err) |
451 | return ERR_PTR(err); | 476 | return err; |
452 | break; | 477 | break; |
453 | 478 | ||
454 | case PERF_TYPE_RAW: | 479 | case PERF_TYPE_RAW: |
@@ -456,12 +481,12 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
456 | break; | 481 | break; |
457 | 482 | ||
458 | default: | 483 | default: |
459 | return ERR_PTR(-EINVAL); | 484 | return -ENOENT; |
460 | } | 485 | } |
461 | 486 | ||
462 | event->hw.config = ppmu->xlate_event(ev); | 487 | event->hw.config = ppmu->xlate_event(ev); |
463 | if (!(event->hw.config & FSL_EMB_EVENT_VALID)) | 488 | if (!(event->hw.config & FSL_EMB_EVENT_VALID)) |
464 | return ERR_PTR(-EINVAL); | 489 | return -EINVAL; |
465 | 490 | ||
466 | /* | 491 | /* |
467 | * If this is in a group, check if it can go on with all the | 492 | * If this is in a group, check if it can go on with all the |
@@ -473,7 +498,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
473 | n = collect_events(event->group_leader, | 498 | n = collect_events(event->group_leader, |
474 | ppmu->n_counter - 1, events); | 499 | ppmu->n_counter - 1, events); |
475 | if (n < 0) | 500 | if (n < 0) |
476 | return ERR_PTR(-EINVAL); | 501 | return -EINVAL; |
477 | } | 502 | } |
478 | 503 | ||
479 | if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) { | 504 | if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) { |
@@ -484,7 +509,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
484 | } | 509 | } |
485 | 510 | ||
486 | if (num_restricted >= ppmu->n_restricted) | 511 | if (num_restricted >= ppmu->n_restricted) |
487 | return ERR_PTR(-EINVAL); | 512 | return -EINVAL; |
488 | } | 513 | } |
489 | 514 | ||
490 | event->hw.idx = -1; | 515 | event->hw.idx = -1; |
@@ -497,7 +522,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
497 | if (event->attr.exclude_kernel) | 522 | if (event->attr.exclude_kernel) |
498 | event->hw.config_base |= PMLCA_FCS; | 523 | event->hw.config_base |= PMLCA_FCS; |
499 | if (event->attr.exclude_idle) | 524 | if (event->attr.exclude_idle) |
500 | return ERR_PTR(-ENOTSUPP); | 525 | return -ENOTSUPP; |
501 | 526 | ||
502 | event->hw.last_period = event->hw.sample_period; | 527 | event->hw.last_period = event->hw.sample_period; |
503 | local64_set(&event->hw.period_left, event->hw.last_period); | 528 | local64_set(&event->hw.period_left, event->hw.last_period); |
@@ -523,11 +548,20 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
523 | } | 548 | } |
524 | event->destroy = hw_perf_event_destroy; | 549 | event->destroy = hw_perf_event_destroy; |
525 | 550 | ||
526 | if (err) | 551 | return err; |
527 | return ERR_PTR(err); | ||
528 | return &fsl_emb_pmu; | ||
529 | } | 552 | } |
530 | 553 | ||
554 | static struct pmu fsl_emb_pmu = { | ||
555 | .pmu_enable = fsl_emb_pmu_enable, | ||
556 | .pmu_disable = fsl_emb_pmu_disable, | ||
557 | .event_init = fsl_emb_pmu_event_init, | ||
558 | .add = fsl_emb_pmu_add, | ||
559 | .del = fsl_emb_pmu_del, | ||
560 | .start = fsl_emb_pmu_start, | ||
561 | .stop = fsl_emb_pmu_stop, | ||
562 | .read = fsl_emb_pmu_read, | ||
563 | }; | ||
564 | |||
531 | /* | 565 | /* |
532 | * A counter has overflowed; update its count and record | 566 | * A counter has overflowed; update its count and record |
533 | * things if requested. Note that interrupts are hard-disabled | 567 | * things if requested. Note that interrupts are hard-disabled |
@@ -540,6 +574,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
540 | s64 prev, delta, left; | 574 | s64 prev, delta, left; |
541 | int record = 0; | 575 | int record = 0; |
542 | 576 | ||
577 | if (event->hw.state & PERF_HES_STOPPED) { | ||
578 | write_pmc(event->hw.idx, 0); | ||
579 | return; | ||
580 | } | ||
581 | |||
543 | /* we don't have to worry about interrupts here */ | 582 | /* we don't have to worry about interrupts here */ |
544 | prev = local64_read(&event->hw.prev_count); | 583 | prev = local64_read(&event->hw.prev_count); |
545 | delta = (val - prev) & 0xfffffffful; | 584 | delta = (val - prev) & 0xfffffffful; |
@@ -557,11 +596,17 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
557 | if (left <= 0) | 596 | if (left <= 0) |
558 | left = period; | 597 | left = period; |
559 | record = 1; | 598 | record = 1; |
599 | event->hw.last_period = event->hw.sample_period; | ||
560 | } | 600 | } |
561 | if (left < 0x80000000LL) | 601 | if (left < 0x80000000LL) |
562 | val = 0x80000000LL - left; | 602 | val = 0x80000000LL - left; |
563 | } | 603 | } |
564 | 604 | ||
605 | write_pmc(event->hw.idx, val); | ||
606 | local64_set(&event->hw.prev_count, val); | ||
607 | local64_set(&event->hw.period_left, left); | ||
608 | perf_event_update_userpage(event); | ||
609 | |||
565 | /* | 610 | /* |
566 | * Finally record data if requested. | 611 | * Finally record data if requested. |
567 | */ | 612 | */ |
@@ -571,23 +616,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
571 | perf_sample_data_init(&data, 0); | 616 | perf_sample_data_init(&data, 0); |
572 | data.period = event->hw.last_period; | 617 | data.period = event->hw.last_period; |
573 | 618 | ||
574 | if (perf_event_overflow(event, nmi, &data, regs)) { | 619 | if (perf_event_overflow(event, nmi, &data, regs)) |
575 | /* | 620 | fsl_emb_pmu_stop(event, 0); |
576 | * Interrupts are coming too fast - throttle them | ||
577 | * by setting the event to 0, so it will be | ||
578 | * at least 2^30 cycles until the next interrupt | ||
579 | * (assuming each event counts at most 2 counts | ||
580 | * per cycle). | ||
581 | */ | ||
582 | val = 0; | ||
583 | left = ~0ULL >> 1; | ||
584 | } | ||
585 | } | 621 | } |
586 | |||
587 | write_pmc(event->hw.idx, val); | ||
588 | local64_set(&event->hw.prev_count, val); | ||
589 | local64_set(&event->hw.period_left, left); | ||
590 | perf_event_update_userpage(event); | ||
591 | } | 622 | } |
592 | 623 | ||
593 | static void perf_event_interrupt(struct pt_regs *regs) | 624 | static void perf_event_interrupt(struct pt_regs *regs) |
@@ -651,5 +682,7 @@ int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu) | |||
651 | pr_info("%s performance monitor hardware support registered\n", | 682 | pr_info("%s performance monitor hardware support registered\n", |
652 | pmu->name); | 683 | pmu->name); |
653 | 684 | ||
685 | perf_pmu_register(&fsl_emb_pmu, "cpu", PERF_TYPE_RAW); | ||
686 | |||
654 | return 0; | 687 | return 0; |
655 | } | 688 | } |
diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c index 2a361cdda635..ead8b3c2649e 100644 --- a/arch/powerpc/kernel/power4-pmu.c +++ b/arch/powerpc/kernel/power4-pmu.c | |||
@@ -613,4 +613,4 @@ static int init_power4_pmu(void) | |||
613 | return register_power_pmu(&power4_pmu); | 613 | return register_power_pmu(&power4_pmu); |
614 | } | 614 | } |
615 | 615 | ||
616 | arch_initcall(init_power4_pmu); | 616 | early_initcall(init_power4_pmu); |
diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c index 199de527d411..eca0ac595cb6 100644 --- a/arch/powerpc/kernel/power5+-pmu.c +++ b/arch/powerpc/kernel/power5+-pmu.c | |||
@@ -682,4 +682,4 @@ static int init_power5p_pmu(void) | |||
682 | return register_power_pmu(&power5p_pmu); | 682 | return register_power_pmu(&power5p_pmu); |
683 | } | 683 | } |
684 | 684 | ||
685 | arch_initcall(init_power5p_pmu); | 685 | early_initcall(init_power5p_pmu); |
diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c index 98b6a729a9dd..d5ff0f64a5e6 100644 --- a/arch/powerpc/kernel/power5-pmu.c +++ b/arch/powerpc/kernel/power5-pmu.c | |||
@@ -621,4 +621,4 @@ static int init_power5_pmu(void) | |||
621 | return register_power_pmu(&power5_pmu); | 621 | return register_power_pmu(&power5_pmu); |
622 | } | 622 | } |
623 | 623 | ||
624 | arch_initcall(init_power5_pmu); | 624 | early_initcall(init_power5_pmu); |
diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c index 84a607bda8fb..31603927e376 100644 --- a/arch/powerpc/kernel/power6-pmu.c +++ b/arch/powerpc/kernel/power6-pmu.c | |||
@@ -544,4 +544,4 @@ static int init_power6_pmu(void) | |||
544 | return register_power_pmu(&power6_pmu); | 544 | return register_power_pmu(&power6_pmu); |
545 | } | 545 | } |
546 | 546 | ||
547 | arch_initcall(init_power6_pmu); | 547 | early_initcall(init_power6_pmu); |
diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c index 852f7b7f6b40..593740fcb799 100644 --- a/arch/powerpc/kernel/power7-pmu.c +++ b/arch/powerpc/kernel/power7-pmu.c | |||
@@ -369,4 +369,4 @@ static int init_power7_pmu(void) | |||
369 | return register_power_pmu(&power7_pmu); | 369 | return register_power_pmu(&power7_pmu); |
370 | } | 370 | } |
371 | 371 | ||
372 | arch_initcall(init_power7_pmu); | 372 | early_initcall(init_power7_pmu); |
diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c index 8eff48e20dba..9a6e093858fe 100644 --- a/arch/powerpc/kernel/ppc970-pmu.c +++ b/arch/powerpc/kernel/ppc970-pmu.c | |||
@@ -169,9 +169,11 @@ static int p970_marked_instr_event(u64 event) | |||
169 | switch (unit) { | 169 | switch (unit) { |
170 | case PM_VPU: | 170 | case PM_VPU: |
171 | mask = 0x4c; /* byte 0 bits 2,3,6 */ | 171 | mask = 0x4c; /* byte 0 bits 2,3,6 */ |
172 | break; | ||
172 | case PM_LSU0: | 173 | case PM_LSU0: |
173 | /* byte 2 bits 0,2,3,4,6; all of byte 1 */ | 174 | /* byte 2 bits 0,2,3,4,6; all of byte 1 */ |
174 | mask = 0x085dff00; | 175 | mask = 0x085dff00; |
176 | break; | ||
175 | case PM_LSU1L: | 177 | case PM_LSU1L: |
176 | mask = 0x50 << 24; /* byte 3 bits 4,6 */ | 178 | mask = 0x50 << 24; /* byte 3 bits 4,6 */ |
177 | break; | 179 | break; |
@@ -492,4 +494,4 @@ static int init_ppc970_pmu(void) | |||
492 | return register_power_pmu(&ppc970_pmu); | 494 | return register_power_pmu(&ppc970_pmu); |
493 | } | 495 | } |
494 | 496 | ||
495 | arch_initcall(init_ppc970_pmu); | 497 | early_initcall(init_ppc970_pmu); |
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index ab3e392ac63c..7d28f540200c 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c | |||
@@ -54,7 +54,6 @@ extern void single_step_exception(struct pt_regs *regs); | |||
54 | extern int sys_sigreturn(struct pt_regs *regs); | 54 | extern int sys_sigreturn(struct pt_regs *regs); |
55 | 55 | ||
56 | EXPORT_SYMBOL(clear_pages); | 56 | EXPORT_SYMBOL(clear_pages); |
57 | EXPORT_SYMBOL(copy_page); | ||
58 | EXPORT_SYMBOL(ISA_DMA_THRESHOLD); | 57 | EXPORT_SYMBOL(ISA_DMA_THRESHOLD); |
59 | EXPORT_SYMBOL(DMA_MODE_READ); | 58 | EXPORT_SYMBOL(DMA_MODE_READ); |
60 | EXPORT_SYMBOL(DMA_MODE_WRITE); | 59 | EXPORT_SYMBOL(DMA_MODE_WRITE); |
@@ -88,9 +87,7 @@ EXPORT_SYMBOL(__copy_tofrom_user); | |||
88 | EXPORT_SYMBOL(__clear_user); | 87 | EXPORT_SYMBOL(__clear_user); |
89 | EXPORT_SYMBOL(__strncpy_from_user); | 88 | EXPORT_SYMBOL(__strncpy_from_user); |
90 | EXPORT_SYMBOL(__strnlen_user); | 89 | EXPORT_SYMBOL(__strnlen_user); |
91 | #ifdef CONFIG_PPC64 | 90 | EXPORT_SYMBOL(copy_page); |
92 | EXPORT_SYMBOL(copy_4K_page); | ||
93 | #endif | ||
94 | 91 | ||
95 | #if defined(CONFIG_PCI) && defined(CONFIG_PPC32) | 92 | #if defined(CONFIG_PCI) && defined(CONFIG_PPC32) |
96 | EXPORT_SYMBOL(isa_io_base); | 93 | EXPORT_SYMBOL(isa_io_base); |
@@ -186,3 +183,10 @@ EXPORT_SYMBOL(__mtdcr); | |||
186 | EXPORT_SYMBOL(__mfdcr); | 183 | EXPORT_SYMBOL(__mfdcr); |
187 | #endif | 184 | #endif |
188 | EXPORT_SYMBOL(empty_zero_page); | 185 | EXPORT_SYMBOL(empty_zero_page); |
186 | |||
187 | #ifdef CONFIG_PPC64 | ||
188 | EXPORT_SYMBOL(__arch_hweight8); | ||
189 | EXPORT_SYMBOL(__arch_hweight16); | ||
190 | EXPORT_SYMBOL(__arch_hweight32); | ||
191 | EXPORT_SYMBOL(__arch_hweight64); | ||
192 | #endif | ||
diff --git a/arch/powerpc/kernel/ppc_save_regs.S b/arch/powerpc/kernel/ppc_save_regs.S index 5113bd2285e1..1b1787d52896 100644 --- a/arch/powerpc/kernel/ppc_save_regs.S +++ b/arch/powerpc/kernel/ppc_save_regs.S | |||
@@ -11,10 +11,11 @@ | |||
11 | #include <asm/processor.h> | 11 | #include <asm/processor.h> |
12 | #include <asm/ppc_asm.h> | 12 | #include <asm/ppc_asm.h> |
13 | #include <asm/asm-offsets.h> | 13 | #include <asm/asm-offsets.h> |
14 | #include <asm/ptrace.h> | ||
14 | 15 | ||
15 | /* | 16 | /* |
16 | * Grab the register values as they are now. | 17 | * Grab the register values as they are now. |
17 | * This won't do a particularily good job because we really | 18 | * This won't do a particularly good job because we really |
18 | * want our caller's caller's registers, and our caller has | 19 | * want our caller's caller's registers, and our caller has |
19 | * already executed its prologue. | 20 | * already executed its prologue. |
20 | * ToDo: We could reach back into the caller's save area to do | 21 | * ToDo: We could reach back into the caller's save area to do |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index b1c648a36b03..91e52df3d81d 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -353,6 +353,7 @@ static void switch_booke_debug_regs(struct thread_struct *new_thread) | |||
353 | prime_debug_regs(new_thread); | 353 | prime_debug_regs(new_thread); |
354 | } | 354 | } |
355 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ | 355 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ |
356 | #ifndef CONFIG_HAVE_HW_BREAKPOINT | ||
356 | static void set_debug_reg_defaults(struct thread_struct *thread) | 357 | static void set_debug_reg_defaults(struct thread_struct *thread) |
357 | { | 358 | { |
358 | if (thread->dabr) { | 359 | if (thread->dabr) { |
@@ -360,6 +361,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread) | |||
360 | set_dabr(0); | 361 | set_dabr(0); |
361 | } | 362 | } |
362 | } | 363 | } |
364 | #endif /* !CONFIG_HAVE_HW_BREAKPOINT */ | ||
363 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ | 365 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ |
364 | 366 | ||
365 | int set_dabr(unsigned long dabr) | 367 | int set_dabr(unsigned long dabr) |
@@ -393,6 +395,9 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
393 | struct thread_struct *new_thread, *old_thread; | 395 | struct thread_struct *new_thread, *old_thread; |
394 | unsigned long flags; | 396 | unsigned long flags; |
395 | struct task_struct *last; | 397 | struct task_struct *last; |
398 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
399 | struct ppc64_tlb_batch *batch; | ||
400 | #endif | ||
396 | 401 | ||
397 | #ifdef CONFIG_SMP | 402 | #ifdef CONFIG_SMP |
398 | /* avoid complexity of lazy save/restore of fpu | 403 | /* avoid complexity of lazy save/restore of fpu |
@@ -511,13 +516,22 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
511 | old_thread->accum_tb += (current_tb - start_tb); | 516 | old_thread->accum_tb += (current_tb - start_tb); |
512 | new_thread->start_tb = current_tb; | 517 | new_thread->start_tb = current_tb; |
513 | } | 518 | } |
514 | #endif | 519 | #endif /* CONFIG_PPC64 */ |
520 | |||
521 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
522 | batch = &__get_cpu_var(ppc64_tlb_batch); | ||
523 | if (batch->active) { | ||
524 | current_thread_info()->local_flags |= _TLF_LAZY_MMU; | ||
525 | if (batch->index) | ||
526 | __flush_tlb_pending(batch); | ||
527 | batch->active = 0; | ||
528 | } | ||
529 | #endif /* CONFIG_PPC_BOOK3S_64 */ | ||
515 | 530 | ||
516 | local_irq_save(flags); | 531 | local_irq_save(flags); |
517 | 532 | ||
518 | account_system_vtime(current); | 533 | account_system_vtime(current); |
519 | account_process_vtime(current); | 534 | account_process_vtime(current); |
520 | calculate_steal_time(); | ||
521 | 535 | ||
522 | /* | 536 | /* |
523 | * We can't take a PMU exception inside _switch() since there is a | 537 | * We can't take a PMU exception inside _switch() since there is a |
@@ -527,6 +541,14 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
527 | hard_irq_disable(); | 541 | hard_irq_disable(); |
528 | last = _switch(old_thread, new_thread); | 542 | last = _switch(old_thread, new_thread); |
529 | 543 | ||
544 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
545 | if (current_thread_info()->local_flags & _TLF_LAZY_MMU) { | ||
546 | current_thread_info()->local_flags &= ~_TLF_LAZY_MMU; | ||
547 | batch = &__get_cpu_var(ppc64_tlb_batch); | ||
548 | batch->active = 1; | ||
549 | } | ||
550 | #endif /* CONFIG_PPC_BOOK3S_64 */ | ||
551 | |||
530 | local_irq_restore(flags); | 552 | local_irq_restore(flags); |
531 | 553 | ||
532 | return last; | 554 | return last; |
@@ -632,7 +654,7 @@ void show_regs(struct pt_regs * regs) | |||
632 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 654 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
633 | printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr); | 655 | printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr); |
634 | #else | 656 | #else |
635 | printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr); | 657 | printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr); |
636 | #endif | 658 | #endif |
637 | printk("TASK = %p[%d] '%s' THREAD: %p", | 659 | printk("TASK = %p[%d] '%s' THREAD: %p", |
638 | current, task_pid_nr(current), current->comm, task_thread_info(current)); | 660 | current, task_pid_nr(current), current->comm, task_thread_info(current)); |
@@ -671,11 +693,11 @@ void flush_thread(void) | |||
671 | { | 693 | { |
672 | discard_lazy_cpu_state(); | 694 | discard_lazy_cpu_state(); |
673 | 695 | ||
674 | #ifdef CONFIG_HAVE_HW_BREAKPOINTS | 696 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
675 | flush_ptrace_hw_breakpoint(current); | 697 | flush_ptrace_hw_breakpoint(current); |
676 | #else /* CONFIG_HAVE_HW_BREAKPOINTS */ | 698 | #else /* CONFIG_HAVE_HW_BREAKPOINT */ |
677 | set_debug_reg_defaults(¤t->thread); | 699 | set_debug_reg_defaults(¤t->thread); |
678 | #endif /* CONFIG_HAVE_HW_BREAKPOINTS */ | 700 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ |
679 | } | 701 | } |
680 | 702 | ||
681 | void | 703 | void |
@@ -701,6 +723,8 @@ void prepare_to_copy(struct task_struct *tsk) | |||
701 | /* | 723 | /* |
702 | * Copy a thread.. | 724 | * Copy a thread.. |
703 | */ | 725 | */ |
726 | extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */ | ||
727 | |||
704 | int copy_thread(unsigned long clone_flags, unsigned long usp, | 728 | int copy_thread(unsigned long clone_flags, unsigned long usp, |
705 | unsigned long unused, struct task_struct *p, | 729 | unsigned long unused, struct task_struct *p, |
706 | struct pt_regs *regs) | 730 | struct pt_regs *regs) |
@@ -754,11 +778,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
754 | _ALIGN_UP(sizeof(struct thread_info), 16); | 778 | _ALIGN_UP(sizeof(struct thread_info), 16); |
755 | 779 | ||
756 | #ifdef CONFIG_PPC_STD_MMU_64 | 780 | #ifdef CONFIG_PPC_STD_MMU_64 |
757 | if (cpu_has_feature(CPU_FTR_SLB)) { | 781 | if (mmu_has_feature(MMU_FTR_SLB)) { |
758 | unsigned long sp_vsid; | 782 | unsigned long sp_vsid; |
759 | unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; | 783 | unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; |
760 | 784 | ||
761 | if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) | 785 | if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) |
762 | sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T) | 786 | sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T) |
763 | << SLB_VSID_SHIFT_1T; | 787 | << SLB_VSID_SHIFT_1T; |
764 | else | 788 | else |
@@ -768,6 +792,20 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
768 | p->thread.ksp_vsid = sp_vsid; | 792 | p->thread.ksp_vsid = sp_vsid; |
769 | } | 793 | } |
770 | #endif /* CONFIG_PPC_STD_MMU_64 */ | 794 | #endif /* CONFIG_PPC_STD_MMU_64 */ |
795 | #ifdef CONFIG_PPC64 | ||
796 | if (cpu_has_feature(CPU_FTR_DSCR)) { | ||
797 | if (current->thread.dscr_inherit) { | ||
798 | p->thread.dscr_inherit = 1; | ||
799 | p->thread.dscr = current->thread.dscr; | ||
800 | } else if (0 != dscr_default) { | ||
801 | p->thread.dscr_inherit = 1; | ||
802 | p->thread.dscr = dscr_default; | ||
803 | } else { | ||
804 | p->thread.dscr_inherit = 0; | ||
805 | p->thread.dscr = 0; | ||
806 | } | ||
807 | } | ||
808 | #endif | ||
771 | 809 | ||
772 | /* | 810 | /* |
773 | * The PPC64 ABI makes use of a TOC to contain function | 811 | * The PPC64 ABI makes use of a TOC to contain function |
@@ -1217,11 +1255,11 @@ void __ppc64_runlatch_off(void) | |||
1217 | 1255 | ||
1218 | static struct kmem_cache *thread_info_cache; | 1256 | static struct kmem_cache *thread_info_cache; |
1219 | 1257 | ||
1220 | struct thread_info *alloc_thread_info(struct task_struct *tsk) | 1258 | struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node) |
1221 | { | 1259 | { |
1222 | struct thread_info *ti; | 1260 | struct thread_info *ti; |
1223 | 1261 | ||
1224 | ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL); | 1262 | ti = kmem_cache_alloc_node(thread_info_cache, GFP_KERNEL, node); |
1225 | if (unlikely(ti == NULL)) | 1263 | if (unlikely(ti == NULL)) |
1226 | return NULL; | 1264 | return NULL; |
1227 | #ifdef CONFIG_DEBUG_STACK_USAGE | 1265 | #ifdef CONFIG_DEBUG_STACK_USAGE |
@@ -1298,14 +1336,3 @@ unsigned long randomize_et_dyn(unsigned long base) | |||
1298 | 1336 | ||
1299 | return ret; | 1337 | return ret; |
1300 | } | 1338 | } |
1301 | |||
1302 | #ifdef CONFIG_SMP | ||
1303 | int arch_sd_sibling_asym_packing(void) | ||
1304 | { | ||
1305 | if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { | ||
1306 | printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); | ||
1307 | return SD_ASYM_PACKING; | ||
1308 | } | ||
1309 | return 0; | ||
1310 | } | ||
1311 | #endif | ||
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index fed9bf6187d1..8c3112a57cf2 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c | |||
@@ -66,7 +66,9 @@ | |||
66 | int __initdata iommu_is_off; | 66 | int __initdata iommu_is_off; |
67 | int __initdata iommu_force_on; | 67 | int __initdata iommu_force_on; |
68 | unsigned long tce_alloc_start, tce_alloc_end; | 68 | unsigned long tce_alloc_start, tce_alloc_end; |
69 | u64 ppc64_rma_size; | ||
69 | #endif | 70 | #endif |
71 | static phys_addr_t first_memblock_size; | ||
70 | 72 | ||
71 | static int __init early_parse_mem(char *p) | 73 | static int __init early_parse_mem(char *p) |
72 | { | 74 | { |
@@ -80,11 +82,29 @@ static int __init early_parse_mem(char *p) | |||
80 | } | 82 | } |
81 | early_param("mem", early_parse_mem); | 83 | early_param("mem", early_parse_mem); |
82 | 84 | ||
85 | /* | ||
86 | * overlaps_initrd - check for overlap with page aligned extension of | ||
87 | * initrd. | ||
88 | */ | ||
89 | static inline int overlaps_initrd(unsigned long start, unsigned long size) | ||
90 | { | ||
91 | #ifdef CONFIG_BLK_DEV_INITRD | ||
92 | if (!initrd_start) | ||
93 | return 0; | ||
94 | |||
95 | return (start + size) > _ALIGN_DOWN(initrd_start, PAGE_SIZE) && | ||
96 | start <= _ALIGN_UP(initrd_end, PAGE_SIZE); | ||
97 | #else | ||
98 | return 0; | ||
99 | #endif | ||
100 | } | ||
101 | |||
83 | /** | 102 | /** |
84 | * move_device_tree - move tree to an unused area, if needed. | 103 | * move_device_tree - move tree to an unused area, if needed. |
85 | * | 104 | * |
86 | * The device tree may be allocated beyond our memory limit, or inside the | 105 | * The device tree may be allocated beyond our memory limit, or inside the |
87 | * crash kernel region for kdump. If so, move it out of the way. | 106 | * crash kernel region for kdump, or within the page aligned range of initrd. |
107 | * If so, move it out of the way. | ||
88 | */ | 108 | */ |
89 | static void __init move_device_tree(void) | 109 | static void __init move_device_tree(void) |
90 | { | 110 | { |
@@ -96,9 +116,10 @@ static void __init move_device_tree(void) | |||
96 | start = __pa(initial_boot_params); | 116 | start = __pa(initial_boot_params); |
97 | size = be32_to_cpu(initial_boot_params->totalsize); | 117 | size = be32_to_cpu(initial_boot_params->totalsize); |
98 | 118 | ||
99 | if ((memory_limit && (start + size) > memory_limit) || | 119 | if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) || |
100 | overlaps_crashkernel(start, size)) { | 120 | overlaps_crashkernel(start, size) || |
101 | p = __va(memblock_alloc_base(size, PAGE_SIZE, memblock.rmo_size)); | 121 | overlaps_initrd(start, size)) { |
122 | p = __va(memblock_alloc(size, PAGE_SIZE)); | ||
102 | memcpy(p, initial_boot_params, size); | 123 | memcpy(p, initial_boot_params, size); |
103 | initial_boot_params = (struct boot_param_header *)p; | 124 | initial_boot_params = (struct boot_param_header *)p; |
104 | DBG("Moved device tree to 0x%p\n", p); | 125 | DBG("Moved device tree to 0x%p\n", p); |
@@ -122,18 +143,19 @@ static void __init move_device_tree(void) | |||
122 | */ | 143 | */ |
123 | static struct ibm_pa_feature { | 144 | static struct ibm_pa_feature { |
124 | unsigned long cpu_features; /* CPU_FTR_xxx bit */ | 145 | unsigned long cpu_features; /* CPU_FTR_xxx bit */ |
146 | unsigned long mmu_features; /* MMU_FTR_xxx bit */ | ||
125 | unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */ | 147 | unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */ |
126 | unsigned char pabyte; /* byte number in ibm,pa-features */ | 148 | unsigned char pabyte; /* byte number in ibm,pa-features */ |
127 | unsigned char pabit; /* bit number (big-endian) */ | 149 | unsigned char pabit; /* bit number (big-endian) */ |
128 | unsigned char invert; /* if 1, pa bit set => clear feature */ | 150 | unsigned char invert; /* if 1, pa bit set => clear feature */ |
129 | } ibm_pa_features[] __initdata = { | 151 | } ibm_pa_features[] __initdata = { |
130 | {0, PPC_FEATURE_HAS_MMU, 0, 0, 0}, | 152 | {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0}, |
131 | {0, PPC_FEATURE_HAS_FPU, 0, 1, 0}, | 153 | {0, 0, PPC_FEATURE_HAS_FPU, 0, 1, 0}, |
132 | {CPU_FTR_SLB, 0, 0, 2, 0}, | 154 | {0, MMU_FTR_SLB, 0, 0, 2, 0}, |
133 | {CPU_FTR_CTRL, 0, 0, 3, 0}, | 155 | {CPU_FTR_CTRL, 0, 0, 0, 3, 0}, |
134 | {CPU_FTR_NOEXECUTE, 0, 0, 6, 0}, | 156 | {CPU_FTR_NOEXECUTE, 0, 0, 0, 6, 0}, |
135 | {CPU_FTR_NODSISRALIGN, 0, 1, 1, 1}, | 157 | {CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1}, |
136 | {CPU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0}, | 158 | {0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0}, |
137 | {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0}, | 159 | {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0}, |
138 | }; | 160 | }; |
139 | 161 | ||
@@ -165,9 +187,11 @@ static void __init scan_features(unsigned long node, unsigned char *ftrs, | |||
165 | if (bit ^ fp->invert) { | 187 | if (bit ^ fp->invert) { |
166 | cur_cpu_spec->cpu_features |= fp->cpu_features; | 188 | cur_cpu_spec->cpu_features |= fp->cpu_features; |
167 | cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs; | 189 | cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs; |
190 | cur_cpu_spec->mmu_features |= fp->mmu_features; | ||
168 | } else { | 191 | } else { |
169 | cur_cpu_spec->cpu_features &= ~fp->cpu_features; | 192 | cur_cpu_spec->cpu_features &= ~fp->cpu_features; |
170 | cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs; | 193 | cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs; |
194 | cur_cpu_spec->mmu_features &= ~fp->mmu_features; | ||
171 | } | 195 | } |
172 | } | 196 | } |
173 | } | 197 | } |
@@ -267,13 +291,13 @@ static int __init early_init_dt_scan_cpus(unsigned long node, | |||
267 | const char *uname, int depth, | 291 | const char *uname, int depth, |
268 | void *data) | 292 | void *data) |
269 | { | 293 | { |
270 | static int logical_cpuid = 0; | ||
271 | char *type = of_get_flat_dt_prop(node, "device_type", NULL); | 294 | char *type = of_get_flat_dt_prop(node, "device_type", NULL); |
272 | const u32 *prop; | 295 | const u32 *prop; |
273 | const u32 *intserv; | 296 | const u32 *intserv; |
274 | int i, nthreads; | 297 | int i, nthreads; |
275 | unsigned long len; | 298 | unsigned long len; |
276 | int found = 0; | 299 | int found = -1; |
300 | int found_thread = 0; | ||
277 | 301 | ||
278 | /* We are scanning "cpu" nodes only */ | 302 | /* We are scanning "cpu" nodes only */ |
279 | if (type == NULL || strcmp(type, "cpu") != 0) | 303 | if (type == NULL || strcmp(type, "cpu") != 0) |
@@ -297,11 +321,10 @@ static int __init early_init_dt_scan_cpus(unsigned long node, | |||
297 | * version 2 of the kexec param format adds the phys cpuid of | 321 | * version 2 of the kexec param format adds the phys cpuid of |
298 | * booted proc. | 322 | * booted proc. |
299 | */ | 323 | */ |
300 | if (initial_boot_params && initial_boot_params->version >= 2) { | 324 | if (initial_boot_params->version >= 2) { |
301 | if (intserv[i] == | 325 | if (intserv[i] == initial_boot_params->boot_cpuid_phys) { |
302 | initial_boot_params->boot_cpuid_phys) { | 326 | found = boot_cpu_count; |
303 | found = 1; | 327 | found_thread = i; |
304 | break; | ||
305 | } | 328 | } |
306 | } else { | 329 | } else { |
307 | /* | 330 | /* |
@@ -310,23 +333,20 @@ static int __init early_init_dt_scan_cpus(unsigned long node, | |||
310 | * off secondary threads. | 333 | * off secondary threads. |
311 | */ | 334 | */ |
312 | if (of_get_flat_dt_prop(node, | 335 | if (of_get_flat_dt_prop(node, |
313 | "linux,boot-cpu", NULL) != NULL) { | 336 | "linux,boot-cpu", NULL) != NULL) |
314 | found = 1; | 337 | found = boot_cpu_count; |
315 | break; | ||
316 | } | ||
317 | } | 338 | } |
318 | |||
319 | #ifdef CONFIG_SMP | 339 | #ifdef CONFIG_SMP |
320 | /* logical cpu id is always 0 on UP kernels */ | 340 | /* logical cpu id is always 0 on UP kernels */ |
321 | logical_cpuid++; | 341 | boot_cpu_count++; |
322 | #endif | 342 | #endif |
323 | } | 343 | } |
324 | 344 | ||
325 | if (found) { | 345 | if (found >= 0) { |
326 | DBG("boot cpu: logical %d physical %d\n", logical_cpuid, | 346 | DBG("boot cpu: logical %d physical %d\n", found, |
327 | intserv[i]); | 347 | intserv[found_thread]); |
328 | boot_cpuid = logical_cpuid; | 348 | boot_cpuid = found; |
329 | set_hard_smp_processor_id(boot_cpuid, intserv[i]); | 349 | set_hard_smp_processor_id(found, intserv[found_thread]); |
330 | 350 | ||
331 | /* | 351 | /* |
332 | * PAPR defines "logical" PVR values for cpus that | 352 | * PAPR defines "logical" PVR values for cpus that |
@@ -363,10 +383,15 @@ static int __init early_init_dt_scan_cpus(unsigned long node, | |||
363 | return 0; | 383 | return 0; |
364 | } | 384 | } |
365 | 385 | ||
366 | void __init early_init_dt_scan_chosen_arch(unsigned long node) | 386 | int __init early_init_dt_scan_chosen_ppc(unsigned long node, const char *uname, |
387 | int depth, void *data) | ||
367 | { | 388 | { |
368 | unsigned long *lprop; | 389 | unsigned long *lprop; |
369 | 390 | ||
391 | /* Use common scan routine to determine if this is the chosen node */ | ||
392 | if (early_init_dt_scan_chosen(node, uname, depth, data) == 0) | ||
393 | return 0; | ||
394 | |||
370 | #ifdef CONFIG_PPC64 | 395 | #ifdef CONFIG_PPC64 |
371 | /* check if iommu is forced on or off */ | 396 | /* check if iommu is forced on or off */ |
372 | if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL) | 397 | if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL) |
@@ -398,6 +423,9 @@ void __init early_init_dt_scan_chosen_arch(unsigned long node) | |||
398 | if (lprop) | 423 | if (lprop) |
399 | crashk_res.end = crashk_res.start + *lprop - 1; | 424 | crashk_res.end = crashk_res.start + *lprop - 1; |
400 | #endif | 425 | #endif |
426 | |||
427 | /* break now */ | ||
428 | return 1; | ||
401 | } | 429 | } |
402 | 430 | ||
403 | #ifdef CONFIG_PPC_PSERIES | 431 | #ifdef CONFIG_PPC_PSERIES |
@@ -492,7 +520,7 @@ static int __init early_init_dt_scan_memory_ppc(unsigned long node, | |||
492 | 520 | ||
493 | void __init early_init_dt_add_memory_arch(u64 base, u64 size) | 521 | void __init early_init_dt_add_memory_arch(u64 base, u64 size) |
494 | { | 522 | { |
495 | #if defined(CONFIG_PPC64) | 523 | #ifdef CONFIG_PPC64 |
496 | if (iommu_is_off) { | 524 | if (iommu_is_off) { |
497 | if (base >= 0x80000000ul) | 525 | if (base >= 0x80000000ul) |
498 | return; | 526 | return; |
@@ -500,15 +528,22 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size) | |||
500 | size = 0x80000000ul - base; | 528 | size = 0x80000000ul - base; |
501 | } | 529 | } |
502 | #endif | 530 | #endif |
531 | /* Keep track of the beginning of memory -and- the size of | ||
532 | * the very first block in the device-tree as it represents | ||
533 | * the RMA on ppc64 server | ||
534 | */ | ||
535 | if (base < memstart_addr) { | ||
536 | memstart_addr = base; | ||
537 | first_memblock_size = size; | ||
538 | } | ||
503 | 539 | ||
540 | /* Add the chunk to the MEMBLOCK list */ | ||
504 | memblock_add(base, size); | 541 | memblock_add(base, size); |
505 | |||
506 | memstart_addr = min((u64)memstart_addr, base); | ||
507 | } | 542 | } |
508 | 543 | ||
509 | u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align) | 544 | void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) |
510 | { | 545 | { |
511 | return memblock_alloc(size, align); | 546 | return __va(memblock_alloc(size, align)); |
512 | } | 547 | } |
513 | 548 | ||
514 | #ifdef CONFIG_BLK_DEV_INITRD | 549 | #ifdef CONFIG_BLK_DEV_INITRD |
@@ -539,7 +574,9 @@ static void __init early_reserve_mem(void) | |||
539 | #ifdef CONFIG_BLK_DEV_INITRD | 574 | #ifdef CONFIG_BLK_DEV_INITRD |
540 | /* then reserve the initrd, if any */ | 575 | /* then reserve the initrd, if any */ |
541 | if (initrd_start && (initrd_end > initrd_start)) | 576 | if (initrd_start && (initrd_end > initrd_start)) |
542 | memblock_reserve(__pa(initrd_start), initrd_end - initrd_start); | 577 | memblock_reserve(_ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE), |
578 | _ALIGN_UP(initrd_end, PAGE_SIZE) - | ||
579 | _ALIGN_DOWN(initrd_start, PAGE_SIZE)); | ||
543 | #endif /* CONFIG_BLK_DEV_INITRD */ | 580 | #endif /* CONFIG_BLK_DEV_INITRD */ |
544 | 581 | ||
545 | #ifdef CONFIG_PPC32 | 582 | #ifdef CONFIG_PPC32 |
@@ -655,7 +692,6 @@ static void __init phyp_dump_reserve_mem(void) | |||
655 | static inline void __init phyp_dump_reserve_mem(void) {} | 692 | static inline void __init phyp_dump_reserve_mem(void) {} |
656 | #endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */ | 693 | #endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */ |
657 | 694 | ||
658 | |||
659 | void __init early_init_devtree(void *params) | 695 | void __init early_init_devtree(void *params) |
660 | { | 696 | { |
661 | phys_addr_t limit; | 697 | phys_addr_t limit; |
@@ -671,7 +707,7 @@ void __init early_init_devtree(void *params) | |||
671 | #endif | 707 | #endif |
672 | 708 | ||
673 | #ifdef CONFIG_PHYP_DUMP | 709 | #ifdef CONFIG_PHYP_DUMP |
674 | /* scan tree to see if dump occured during last boot */ | 710 | /* scan tree to see if dump occurred during last boot */ |
675 | of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL); | 711 | of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL); |
676 | #endif | 712 | #endif |
677 | 713 | ||
@@ -679,12 +715,14 @@ void __init early_init_devtree(void *params) | |||
679 | * device-tree, including the platform type, initrd location and | 715 | * device-tree, including the platform type, initrd location and |
680 | * size, TCE reserve, and more ... | 716 | * size, TCE reserve, and more ... |
681 | */ | 717 | */ |
682 | of_scan_flat_dt(early_init_dt_scan_chosen, NULL); | 718 | of_scan_flat_dt(early_init_dt_scan_chosen_ppc, cmd_line); |
683 | 719 | ||
684 | /* Scan memory nodes and rebuild MEMBLOCKs */ | 720 | /* Scan memory nodes and rebuild MEMBLOCKs */ |
685 | memblock_init(); | 721 | memblock_init(); |
722 | |||
686 | of_scan_flat_dt(early_init_dt_scan_root, NULL); | 723 | of_scan_flat_dt(early_init_dt_scan_root, NULL); |
687 | of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); | 724 | of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); |
725 | setup_initial_memory_limit(memstart_addr, first_memblock_size); | ||
688 | 726 | ||
689 | /* Save command line for /proc/cmdline and then parse parameters */ | 727 | /* Save command line for /proc/cmdline and then parse parameters */ |
690 | strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); | 728 | strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); |
@@ -726,7 +764,7 @@ void __init early_init_devtree(void *params) | |||
726 | 764 | ||
727 | DBG("Scanning CPUs ...\n"); | 765 | DBG("Scanning CPUs ...\n"); |
728 | 766 | ||
729 | /* Retreive CPU related informations from the flat tree | 767 | /* Retrieve CPU related informations from the flat tree |
730 | * (altivec support, boot CPU ID, ...) | 768 | * (altivec support, boot CPU ID, ...) |
731 | */ | 769 | */ |
732 | of_scan_flat_dt(early_init_dt_scan_cpus, NULL); | 770 | of_scan_flat_dt(early_init_dt_scan_cpus, NULL); |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 941ff4dbc567..c016033ba78d 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
@@ -335,6 +335,7 @@ static void __init prom_printf(const char *format, ...) | |||
335 | const char *p, *q, *s; | 335 | const char *p, *q, *s; |
336 | va_list args; | 336 | va_list args; |
337 | unsigned long v; | 337 | unsigned long v; |
338 | long vs; | ||
338 | struct prom_t *_prom = &RELOC(prom); | 339 | struct prom_t *_prom = &RELOC(prom); |
339 | 340 | ||
340 | va_start(args, format); | 341 | va_start(args, format); |
@@ -368,12 +369,35 @@ static void __init prom_printf(const char *format, ...) | |||
368 | v = va_arg(args, unsigned long); | 369 | v = va_arg(args, unsigned long); |
369 | prom_print_hex(v); | 370 | prom_print_hex(v); |
370 | break; | 371 | break; |
372 | case 'd': | ||
373 | ++q; | ||
374 | vs = va_arg(args, int); | ||
375 | if (vs < 0) { | ||
376 | prom_print(RELOC("-")); | ||
377 | vs = -vs; | ||
378 | } | ||
379 | prom_print_dec(vs); | ||
380 | break; | ||
371 | case 'l': | 381 | case 'l': |
372 | ++q; | 382 | ++q; |
373 | if (*q == 'u') { /* '%lu' */ | 383 | if (*q == 0) |
384 | break; | ||
385 | else if (*q == 'x') { | ||
386 | ++q; | ||
387 | v = va_arg(args, unsigned long); | ||
388 | prom_print_hex(v); | ||
389 | } else if (*q == 'u') { /* '%lu' */ | ||
374 | ++q; | 390 | ++q; |
375 | v = va_arg(args, unsigned long); | 391 | v = va_arg(args, unsigned long); |
376 | prom_print_dec(v); | 392 | prom_print_dec(v); |
393 | } else if (*q == 'd') { /* %ld */ | ||
394 | ++q; | ||
395 | vs = va_arg(args, long); | ||
396 | if (vs < 0) { | ||
397 | prom_print(RELOC("-")); | ||
398 | vs = -vs; | ||
399 | } | ||
400 | prom_print_dec(vs); | ||
377 | } | 401 | } |
378 | break; | 402 | break; |
379 | } | 403 | } |
@@ -676,8 +700,10 @@ static void __init early_cmdline_parse(void) | |||
676 | #endif /* CONFIG_PCI_MSI */ | 700 | #endif /* CONFIG_PCI_MSI */ |
677 | #ifdef CONFIG_PPC_SMLPAR | 701 | #ifdef CONFIG_PPC_SMLPAR |
678 | #define OV5_CMO 0x80 /* Cooperative Memory Overcommitment */ | 702 | #define OV5_CMO 0x80 /* Cooperative Memory Overcommitment */ |
703 | #define OV5_XCMO 0x40 /* Page Coalescing */ | ||
679 | #else | 704 | #else |
680 | #define OV5_CMO 0x00 | 705 | #define OV5_CMO 0x00 |
706 | #define OV5_XCMO 0x00 | ||
681 | #endif | 707 | #endif |
682 | #define OV5_TYPE1_AFFINITY 0x80 /* Type 1 NUMA affinity */ | 708 | #define OV5_TYPE1_AFFINITY 0x80 /* Type 1 NUMA affinity */ |
683 | 709 | ||
@@ -732,7 +758,7 @@ static unsigned char ibm_architecture_vec[] = { | |||
732 | OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY | | 758 | OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY | |
733 | OV5_DONATE_DEDICATE_CPU | OV5_MSI, | 759 | OV5_DONATE_DEDICATE_CPU | OV5_MSI, |
734 | 0, | 760 | 0, |
735 | OV5_CMO, | 761 | OV5_CMO | OV5_XCMO, |
736 | OV5_TYPE1_AFFINITY, | 762 | OV5_TYPE1_AFFINITY, |
737 | 0, | 763 | 0, |
738 | 0, | 764 | 0, |
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c index 88334af038e5..47187cc2cf00 100644 --- a/arch/powerpc/kernel/prom_parse.c +++ b/arch/powerpc/kernel/prom_parse.c | |||
@@ -2,95 +2,11 @@ | |||
2 | 2 | ||
3 | #include <linux/kernel.h> | 3 | #include <linux/kernel.h> |
4 | #include <linux/string.h> | 4 | #include <linux/string.h> |
5 | #include <linux/pci_regs.h> | ||
6 | #include <linux/module.h> | 5 | #include <linux/module.h> |
7 | #include <linux/ioport.h> | 6 | #include <linux/ioport.h> |
8 | #include <linux/etherdevice.h> | 7 | #include <linux/etherdevice.h> |
9 | #include <linux/of_address.h> | 8 | #include <linux/of_address.h> |
10 | #include <asm/prom.h> | 9 | #include <asm/prom.h> |
11 | #include <asm/pci-bridge.h> | ||
12 | |||
13 | #ifdef CONFIG_PCI | ||
14 | int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq) | ||
15 | { | ||
16 | struct device_node *dn, *ppnode; | ||
17 | struct pci_dev *ppdev; | ||
18 | u32 lspec; | ||
19 | u32 laddr[3]; | ||
20 | u8 pin; | ||
21 | int rc; | ||
22 | |||
23 | /* Check if we have a device node, if yes, fallback to standard OF | ||
24 | * parsing | ||
25 | */ | ||
26 | dn = pci_device_to_OF_node(pdev); | ||
27 | if (dn) { | ||
28 | rc = of_irq_map_one(dn, 0, out_irq); | ||
29 | if (!rc) | ||
30 | return rc; | ||
31 | } | ||
32 | |||
33 | /* Ok, we don't, time to have fun. Let's start by building up an | ||
34 | * interrupt spec. we assume #interrupt-cells is 1, which is standard | ||
35 | * for PCI. If you do different, then don't use that routine. | ||
36 | */ | ||
37 | rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); | ||
38 | if (rc != 0) | ||
39 | return rc; | ||
40 | /* No pin, exit */ | ||
41 | if (pin == 0) | ||
42 | return -ENODEV; | ||
43 | |||
44 | /* Now we walk up the PCI tree */ | ||
45 | lspec = pin; | ||
46 | for (;;) { | ||
47 | /* Get the pci_dev of our parent */ | ||
48 | ppdev = pdev->bus->self; | ||
49 | |||
50 | /* Ouch, it's a host bridge... */ | ||
51 | if (ppdev == NULL) { | ||
52 | #ifdef CONFIG_PPC64 | ||
53 | ppnode = pci_bus_to_OF_node(pdev->bus); | ||
54 | #else | ||
55 | struct pci_controller *host; | ||
56 | host = pci_bus_to_host(pdev->bus); | ||
57 | ppnode = host ? host->dn : NULL; | ||
58 | #endif | ||
59 | /* No node for host bridge ? give up */ | ||
60 | if (ppnode == NULL) | ||
61 | return -EINVAL; | ||
62 | } else | ||
63 | /* We found a P2P bridge, check if it has a node */ | ||
64 | ppnode = pci_device_to_OF_node(ppdev); | ||
65 | |||
66 | /* Ok, we have found a parent with a device-node, hand over to | ||
67 | * the OF parsing code. | ||
68 | * We build a unit address from the linux device to be used for | ||
69 | * resolution. Note that we use the linux bus number which may | ||
70 | * not match your firmware bus numbering. | ||
71 | * Fortunately, in most cases, interrupt-map-mask doesn't include | ||
72 | * the bus number as part of the matching. | ||
73 | * You should still be careful about that though if you intend | ||
74 | * to rely on this function (you ship a firmware that doesn't | ||
75 | * create device nodes for all PCI devices). | ||
76 | */ | ||
77 | if (ppnode) | ||
78 | break; | ||
79 | |||
80 | /* We can only get here if we hit a P2P bridge with no node, | ||
81 | * let's do standard swizzling and try again | ||
82 | */ | ||
83 | lspec = pci_swizzle_interrupt_pin(pdev, lspec); | ||
84 | pdev = ppdev; | ||
85 | } | ||
86 | |||
87 | laddr[0] = (pdev->bus->number << 16) | ||
88 | | (pdev->devfn << 8); | ||
89 | laddr[1] = laddr[2] = 0; | ||
90 | return of_irq_map_raw(ppnode, &lspec, 1, laddr, out_irq); | ||
91 | } | ||
92 | EXPORT_SYMBOL_GPL(of_irq_map_pci); | ||
93 | #endif /* CONFIG_PCI */ | ||
94 | 10 | ||
95 | void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop, | 11 | void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop, |
96 | unsigned long *busno, unsigned long *phys, unsigned long *size) | 12 | unsigned long *busno, unsigned long *phys, unsigned long *size) |
@@ -117,41 +33,3 @@ void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop, | |||
117 | cells = prop ? *(u32 *)prop : of_n_size_cells(dn); | 33 | cells = prop ? *(u32 *)prop : of_n_size_cells(dn); |
118 | *size = of_read_number(dma_window, cells); | 34 | *size = of_read_number(dma_window, cells); |
119 | } | 35 | } |
120 | |||
121 | /** | ||
122 | * Search the device tree for the best MAC address to use. 'mac-address' is | ||
123 | * checked first, because that is supposed to contain to "most recent" MAC | ||
124 | * address. If that isn't set, then 'local-mac-address' is checked next, | ||
125 | * because that is the default address. If that isn't set, then the obsolete | ||
126 | * 'address' is checked, just in case we're using an old device tree. | ||
127 | * | ||
128 | * Note that the 'address' property is supposed to contain a virtual address of | ||
129 | * the register set, but some DTS files have redefined that property to be the | ||
130 | * MAC address. | ||
131 | * | ||
132 | * All-zero MAC addresses are rejected, because those could be properties that | ||
133 | * exist in the device tree, but were not set by U-Boot. For example, the | ||
134 | * DTS could define 'mac-address' and 'local-mac-address', with zero MAC | ||
135 | * addresses. Some older U-Boots only initialized 'local-mac-address'. In | ||
136 | * this case, the real MAC is in 'local-mac-address', and 'mac-address' exists | ||
137 | * but is all zeros. | ||
138 | */ | ||
139 | const void *of_get_mac_address(struct device_node *np) | ||
140 | { | ||
141 | struct property *pp; | ||
142 | |||
143 | pp = of_find_property(np, "mac-address", NULL); | ||
144 | if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value)) | ||
145 | return pp->value; | ||
146 | |||
147 | pp = of_find_property(np, "local-mac-address", NULL); | ||
148 | if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value)) | ||
149 | return pp->value; | ||
150 | |||
151 | pp = of_find_property(np, "address", NULL); | ||
152 | if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value)) | ||
153 | return pp->value; | ||
154 | |||
155 | return NULL; | ||
156 | } | ||
157 | EXPORT_SYMBOL(of_get_mac_address); | ||
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 11f3cd9c832f..cb22024f2b42 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/signal.h> | 29 | #include <linux/signal.h> |
30 | #include <linux/seccomp.h> | 30 | #include <linux/seccomp.h> |
31 | #include <linux/audit.h> | 31 | #include <linux/audit.h> |
32 | #include <trace/syscall.h> | ||
32 | #ifdef CONFIG_PPC32 | 33 | #ifdef CONFIG_PPC32 |
33 | #include <linux/module.h> | 34 | #include <linux/module.h> |
34 | #endif | 35 | #endif |
@@ -40,6 +41,9 @@ | |||
40 | #include <asm/pgtable.h> | 41 | #include <asm/pgtable.h> |
41 | #include <asm/system.h> | 42 | #include <asm/system.h> |
42 | 43 | ||
44 | #define CREATE_TRACE_POINTS | ||
45 | #include <trace/events/syscalls.h> | ||
46 | |||
43 | /* | 47 | /* |
44 | * The parameter save area on the stack is used to store arguments being passed | 48 | * The parameter save area on the stack is used to store arguments being passed |
45 | * to callee function and is located at fixed offset from stack pointer. | 49 | * to callee function and is located at fixed offset from stack pointer. |
@@ -229,12 +233,16 @@ static int gpr_get(struct task_struct *target, const struct user_regset *regset, | |||
229 | unsigned int pos, unsigned int count, | 233 | unsigned int pos, unsigned int count, |
230 | void *kbuf, void __user *ubuf) | 234 | void *kbuf, void __user *ubuf) |
231 | { | 235 | { |
232 | int ret; | 236 | int i, ret; |
233 | 237 | ||
234 | if (target->thread.regs == NULL) | 238 | if (target->thread.regs == NULL) |
235 | return -EIO; | 239 | return -EIO; |
236 | 240 | ||
237 | CHECK_FULL_REGS(target->thread.regs); | 241 | if (!FULL_REGS(target->thread.regs)) { |
242 | /* We have a partial register set. Fill 14-31 with bogus values */ | ||
243 | for (i = 14; i < 32; i++) | ||
244 | target->thread.regs->gpr[i] = NV_REG_POISON; | ||
245 | } | ||
238 | 246 | ||
239 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | 247 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
240 | target->thread.regs, | 248 | target->thread.regs, |
@@ -459,7 +467,7 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset, | |||
459 | #ifdef CONFIG_VSX | 467 | #ifdef CONFIG_VSX |
460 | /* | 468 | /* |
461 | * Currently to set and and get all the vsx state, you need to call | 469 | * Currently to set and and get all the vsx state, you need to call |
462 | * the fp and VMX calls aswell. This only get/sets the lower 32 | 470 | * the fp and VMX calls as well. This only get/sets the lower 32 |
463 | * 128bit VSX registers. | 471 | * 128bit VSX registers. |
464 | */ | 472 | */ |
465 | 473 | ||
@@ -641,11 +649,16 @@ static int gpr32_get(struct task_struct *target, | |||
641 | compat_ulong_t *k = kbuf; | 649 | compat_ulong_t *k = kbuf; |
642 | compat_ulong_t __user *u = ubuf; | 650 | compat_ulong_t __user *u = ubuf; |
643 | compat_ulong_t reg; | 651 | compat_ulong_t reg; |
652 | int i; | ||
644 | 653 | ||
645 | if (target->thread.regs == NULL) | 654 | if (target->thread.regs == NULL) |
646 | return -EIO; | 655 | return -EIO; |
647 | 656 | ||
648 | CHECK_FULL_REGS(target->thread.regs); | 657 | if (!FULL_REGS(target->thread.regs)) { |
658 | /* We have a partial register set. Fill 14-31 with bogus values */ | ||
659 | for (i = 14; i < 32; i++) | ||
660 | target->thread.regs->gpr[i] = NV_REG_POISON; | ||
661 | } | ||
649 | 662 | ||
650 | pos /= sizeof(reg); | 663 | pos /= sizeof(reg); |
651 | count /= sizeof(reg); | 664 | count /= sizeof(reg); |
@@ -924,12 +937,16 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, | |||
924 | if (data && !(data & DABR_TRANSLATION)) | 937 | if (data && !(data & DABR_TRANSLATION)) |
925 | return -EIO; | 938 | return -EIO; |
926 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 939 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
940 | if (ptrace_get_breakpoints(task) < 0) | ||
941 | return -ESRCH; | ||
942 | |||
927 | bp = thread->ptrace_bps[0]; | 943 | bp = thread->ptrace_bps[0]; |
928 | if ((!data) || !(data & (DABR_DATA_WRITE | DABR_DATA_READ))) { | 944 | if ((!data) || !(data & (DABR_DATA_WRITE | DABR_DATA_READ))) { |
929 | if (bp) { | 945 | if (bp) { |
930 | unregister_hw_breakpoint(bp); | 946 | unregister_hw_breakpoint(bp); |
931 | thread->ptrace_bps[0] = NULL; | 947 | thread->ptrace_bps[0] = NULL; |
932 | } | 948 | } |
949 | ptrace_put_breakpoints(task); | ||
933 | return 0; | 950 | return 0; |
934 | } | 951 | } |
935 | if (bp) { | 952 | if (bp) { |
@@ -939,9 +956,12 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, | |||
939 | (DABR_DATA_WRITE | DABR_DATA_READ), | 956 | (DABR_DATA_WRITE | DABR_DATA_READ), |
940 | &attr.bp_type); | 957 | &attr.bp_type); |
941 | ret = modify_user_hw_breakpoint(bp, &attr); | 958 | ret = modify_user_hw_breakpoint(bp, &attr); |
942 | if (ret) | 959 | if (ret) { |
960 | ptrace_put_breakpoints(task); | ||
943 | return ret; | 961 | return ret; |
962 | } | ||
944 | thread->ptrace_bps[0] = bp; | 963 | thread->ptrace_bps[0] = bp; |
964 | ptrace_put_breakpoints(task); | ||
945 | thread->dabr = data; | 965 | thread->dabr = data; |
946 | return 0; | 966 | return 0; |
947 | } | 967 | } |
@@ -956,9 +976,12 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, | |||
956 | ptrace_triggered, task); | 976 | ptrace_triggered, task); |
957 | if (IS_ERR(bp)) { | 977 | if (IS_ERR(bp)) { |
958 | thread->ptrace_bps[0] = NULL; | 978 | thread->ptrace_bps[0] = NULL; |
979 | ptrace_put_breakpoints(task); | ||
959 | return PTR_ERR(bp); | 980 | return PTR_ERR(bp); |
960 | } | 981 | } |
961 | 982 | ||
983 | ptrace_put_breakpoints(task); | ||
984 | |||
962 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ | 985 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ |
963 | 986 | ||
964 | /* Move contents to the DABR register */ | 987 | /* Move contents to the DABR register */ |
@@ -1316,6 +1339,10 @@ static int set_dac_range(struct task_struct *child, | |||
1316 | static long ppc_set_hwdebug(struct task_struct *child, | 1339 | static long ppc_set_hwdebug(struct task_struct *child, |
1317 | struct ppc_hw_breakpoint *bp_info) | 1340 | struct ppc_hw_breakpoint *bp_info) |
1318 | { | 1341 | { |
1342 | #ifndef CONFIG_PPC_ADV_DEBUG_REGS | ||
1343 | unsigned long dabr; | ||
1344 | #endif | ||
1345 | |||
1319 | if (bp_info->version != 1) | 1346 | if (bp_info->version != 1) |
1320 | return -ENOTSUPP; | 1347 | return -ENOTSUPP; |
1321 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 1348 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
@@ -1353,11 +1380,10 @@ static long ppc_set_hwdebug(struct task_struct *child, | |||
1353 | /* | 1380 | /* |
1354 | * We only support one data breakpoint | 1381 | * We only support one data breakpoint |
1355 | */ | 1382 | */ |
1356 | if (((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0) || | 1383 | if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 || |
1357 | ((bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0) || | 1384 | (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 || |
1358 | (bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_WRITE) || | 1385 | bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT || |
1359 | (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) || | 1386 | bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE) |
1360 | (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)) | ||
1361 | return -EINVAL; | 1387 | return -EINVAL; |
1362 | 1388 | ||
1363 | if (child->thread.dabr) | 1389 | if (child->thread.dabr) |
@@ -1366,7 +1392,14 @@ static long ppc_set_hwdebug(struct task_struct *child, | |||
1366 | if ((unsigned long)bp_info->addr >= TASK_SIZE) | 1392 | if ((unsigned long)bp_info->addr >= TASK_SIZE) |
1367 | return -EIO; | 1393 | return -EIO; |
1368 | 1394 | ||
1369 | child->thread.dabr = (unsigned long)bp_info->addr; | 1395 | dabr = (unsigned long)bp_info->addr & ~7UL; |
1396 | dabr |= DABR_TRANSLATION; | ||
1397 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) | ||
1398 | dabr |= DABR_DATA_READ; | ||
1399 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) | ||
1400 | dabr |= DABR_DATA_WRITE; | ||
1401 | |||
1402 | child->thread.dabr = dabr; | ||
1370 | 1403 | ||
1371 | return 1; | 1404 | return 1; |
1372 | #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */ | 1405 | #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */ |
@@ -1406,37 +1439,42 @@ static long ppc_del_hwdebug(struct task_struct *child, long addr, long data) | |||
1406 | * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls, | 1439 | * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls, |
1407 | * we mark them as obsolete now, they will be removed in a future version | 1440 | * we mark them as obsolete now, they will be removed in a future version |
1408 | */ | 1441 | */ |
1409 | static long arch_ptrace_old(struct task_struct *child, long request, long addr, | 1442 | static long arch_ptrace_old(struct task_struct *child, long request, |
1410 | long data) | 1443 | unsigned long addr, unsigned long data) |
1411 | { | 1444 | { |
1445 | void __user *datavp = (void __user *) data; | ||
1446 | |||
1412 | switch (request) { | 1447 | switch (request) { |
1413 | case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */ | 1448 | case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */ |
1414 | return copy_regset_to_user(child, &user_ppc_native_view, | 1449 | return copy_regset_to_user(child, &user_ppc_native_view, |
1415 | REGSET_GPR, 0, 32 * sizeof(long), | 1450 | REGSET_GPR, 0, 32 * sizeof(long), |
1416 | (void __user *) data); | 1451 | datavp); |
1417 | 1452 | ||
1418 | case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */ | 1453 | case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */ |
1419 | return copy_regset_from_user(child, &user_ppc_native_view, | 1454 | return copy_regset_from_user(child, &user_ppc_native_view, |
1420 | REGSET_GPR, 0, 32 * sizeof(long), | 1455 | REGSET_GPR, 0, 32 * sizeof(long), |
1421 | (const void __user *) data); | 1456 | datavp); |
1422 | 1457 | ||
1423 | case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */ | 1458 | case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */ |
1424 | return copy_regset_to_user(child, &user_ppc_native_view, | 1459 | return copy_regset_to_user(child, &user_ppc_native_view, |
1425 | REGSET_FPR, 0, 32 * sizeof(double), | 1460 | REGSET_FPR, 0, 32 * sizeof(double), |
1426 | (void __user *) data); | 1461 | datavp); |
1427 | 1462 | ||
1428 | case PPC_PTRACE_SETFPREGS: /* Set FPRs 0 - 31. */ | 1463 | case PPC_PTRACE_SETFPREGS: /* Set FPRs 0 - 31. */ |
1429 | return copy_regset_from_user(child, &user_ppc_native_view, | 1464 | return copy_regset_from_user(child, &user_ppc_native_view, |
1430 | REGSET_FPR, 0, 32 * sizeof(double), | 1465 | REGSET_FPR, 0, 32 * sizeof(double), |
1431 | (const void __user *) data); | 1466 | datavp); |
1432 | } | 1467 | } |
1433 | 1468 | ||
1434 | return -EPERM; | 1469 | return -EPERM; |
1435 | } | 1470 | } |
1436 | 1471 | ||
1437 | long arch_ptrace(struct task_struct *child, long request, long addr, long data) | 1472 | long arch_ptrace(struct task_struct *child, long request, |
1473 | unsigned long addr, unsigned long data) | ||
1438 | { | 1474 | { |
1439 | int ret = -EPERM; | 1475 | int ret = -EPERM; |
1476 | void __user *datavp = (void __user *) data; | ||
1477 | unsigned long __user *datalp = datavp; | ||
1440 | 1478 | ||
1441 | switch (request) { | 1479 | switch (request) { |
1442 | /* read the word at location addr in the USER area. */ | 1480 | /* read the word at location addr in the USER area. */ |
@@ -1446,11 +1484,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
1446 | ret = -EIO; | 1484 | ret = -EIO; |
1447 | /* convert to index and check */ | 1485 | /* convert to index and check */ |
1448 | #ifdef CONFIG_PPC32 | 1486 | #ifdef CONFIG_PPC32 |
1449 | index = (unsigned long) addr >> 2; | 1487 | index = addr >> 2; |
1450 | if ((addr & 3) || (index > PT_FPSCR) | 1488 | if ((addr & 3) || (index > PT_FPSCR) |
1451 | || (child->thread.regs == NULL)) | 1489 | || (child->thread.regs == NULL)) |
1452 | #else | 1490 | #else |
1453 | index = (unsigned long) addr >> 3; | 1491 | index = addr >> 3; |
1454 | if ((addr & 7) || (index > PT_FPSCR)) | 1492 | if ((addr & 7) || (index > PT_FPSCR)) |
1455 | #endif | 1493 | #endif |
1456 | break; | 1494 | break; |
@@ -1463,7 +1501,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
1463 | tmp = ((unsigned long *)child->thread.fpr) | 1501 | tmp = ((unsigned long *)child->thread.fpr) |
1464 | [TS_FPRWIDTH * (index - PT_FPR0)]; | 1502 | [TS_FPRWIDTH * (index - PT_FPR0)]; |
1465 | } | 1503 | } |
1466 | ret = put_user(tmp,(unsigned long __user *) data); | 1504 | ret = put_user(tmp, datalp); |
1467 | break; | 1505 | break; |
1468 | } | 1506 | } |
1469 | 1507 | ||
@@ -1474,11 +1512,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
1474 | ret = -EIO; | 1512 | ret = -EIO; |
1475 | /* convert to index and check */ | 1513 | /* convert to index and check */ |
1476 | #ifdef CONFIG_PPC32 | 1514 | #ifdef CONFIG_PPC32 |
1477 | index = (unsigned long) addr >> 2; | 1515 | index = addr >> 2; |
1478 | if ((addr & 3) || (index > PT_FPSCR) | 1516 | if ((addr & 3) || (index > PT_FPSCR) |
1479 | || (child->thread.regs == NULL)) | 1517 | || (child->thread.regs == NULL)) |
1480 | #else | 1518 | #else |
1481 | index = (unsigned long) addr >> 3; | 1519 | index = addr >> 3; |
1482 | if ((addr & 7) || (index > PT_FPSCR)) | 1520 | if ((addr & 7) || (index > PT_FPSCR)) |
1483 | #endif | 1521 | #endif |
1484 | break; | 1522 | break; |
@@ -1525,11 +1563,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
1525 | dbginfo.features = 0; | 1563 | dbginfo.features = 0; |
1526 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ | 1564 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ |
1527 | 1565 | ||
1528 | if (!access_ok(VERIFY_WRITE, data, | 1566 | if (!access_ok(VERIFY_WRITE, datavp, |
1529 | sizeof(struct ppc_debug_info))) | 1567 | sizeof(struct ppc_debug_info))) |
1530 | return -EFAULT; | 1568 | return -EFAULT; |
1531 | ret = __copy_to_user((struct ppc_debug_info __user *)data, | 1569 | ret = __copy_to_user(datavp, &dbginfo, |
1532 | &dbginfo, sizeof(struct ppc_debug_info)) ? | 1570 | sizeof(struct ppc_debug_info)) ? |
1533 | -EFAULT : 0; | 1571 | -EFAULT : 0; |
1534 | break; | 1572 | break; |
1535 | } | 1573 | } |
@@ -1537,11 +1575,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
1537 | case PPC_PTRACE_SETHWDEBUG: { | 1575 | case PPC_PTRACE_SETHWDEBUG: { |
1538 | struct ppc_hw_breakpoint bp_info; | 1576 | struct ppc_hw_breakpoint bp_info; |
1539 | 1577 | ||
1540 | if (!access_ok(VERIFY_READ, data, | 1578 | if (!access_ok(VERIFY_READ, datavp, |
1541 | sizeof(struct ppc_hw_breakpoint))) | 1579 | sizeof(struct ppc_hw_breakpoint))) |
1542 | return -EFAULT; | 1580 | return -EFAULT; |
1543 | ret = __copy_from_user(&bp_info, | 1581 | ret = __copy_from_user(&bp_info, datavp, |
1544 | (struct ppc_hw_breakpoint __user *)data, | ||
1545 | sizeof(struct ppc_hw_breakpoint)) ? | 1582 | sizeof(struct ppc_hw_breakpoint)) ? |
1546 | -EFAULT : 0; | 1583 | -EFAULT : 0; |
1547 | if (!ret) | 1584 | if (!ret) |
@@ -1560,11 +1597,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
1560 | if (addr > 0) | 1597 | if (addr > 0) |
1561 | break; | 1598 | break; |
1562 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 1599 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
1563 | ret = put_user(child->thread.dac1, | 1600 | ret = put_user(child->thread.dac1, datalp); |
1564 | (unsigned long __user *)data); | ||
1565 | #else | 1601 | #else |
1566 | ret = put_user(child->thread.dabr, | 1602 | ret = put_user(child->thread.dabr, datalp); |
1567 | (unsigned long __user *)data); | ||
1568 | #endif | 1603 | #endif |
1569 | break; | 1604 | break; |
1570 | } | 1605 | } |
@@ -1580,7 +1615,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
1580 | return copy_regset_to_user(child, &user_ppc_native_view, | 1615 | return copy_regset_to_user(child, &user_ppc_native_view, |
1581 | REGSET_GPR, | 1616 | REGSET_GPR, |
1582 | 0, sizeof(struct pt_regs), | 1617 | 0, sizeof(struct pt_regs), |
1583 | (void __user *) data); | 1618 | datavp); |
1584 | 1619 | ||
1585 | #ifdef CONFIG_PPC64 | 1620 | #ifdef CONFIG_PPC64 |
1586 | case PTRACE_SETREGS64: | 1621 | case PTRACE_SETREGS64: |
@@ -1589,19 +1624,19 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
1589 | return copy_regset_from_user(child, &user_ppc_native_view, | 1624 | return copy_regset_from_user(child, &user_ppc_native_view, |
1590 | REGSET_GPR, | 1625 | REGSET_GPR, |
1591 | 0, sizeof(struct pt_regs), | 1626 | 0, sizeof(struct pt_regs), |
1592 | (const void __user *) data); | 1627 | datavp); |
1593 | 1628 | ||
1594 | case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */ | 1629 | case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */ |
1595 | return copy_regset_to_user(child, &user_ppc_native_view, | 1630 | return copy_regset_to_user(child, &user_ppc_native_view, |
1596 | REGSET_FPR, | 1631 | REGSET_FPR, |
1597 | 0, sizeof(elf_fpregset_t), | 1632 | 0, sizeof(elf_fpregset_t), |
1598 | (void __user *) data); | 1633 | datavp); |
1599 | 1634 | ||
1600 | case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */ | 1635 | case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */ |
1601 | return copy_regset_from_user(child, &user_ppc_native_view, | 1636 | return copy_regset_from_user(child, &user_ppc_native_view, |
1602 | REGSET_FPR, | 1637 | REGSET_FPR, |
1603 | 0, sizeof(elf_fpregset_t), | 1638 | 0, sizeof(elf_fpregset_t), |
1604 | (const void __user *) data); | 1639 | datavp); |
1605 | 1640 | ||
1606 | #ifdef CONFIG_ALTIVEC | 1641 | #ifdef CONFIG_ALTIVEC |
1607 | case PTRACE_GETVRREGS: | 1642 | case PTRACE_GETVRREGS: |
@@ -1609,40 +1644,40 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
1609 | REGSET_VMX, | 1644 | REGSET_VMX, |
1610 | 0, (33 * sizeof(vector128) + | 1645 | 0, (33 * sizeof(vector128) + |
1611 | sizeof(u32)), | 1646 | sizeof(u32)), |
1612 | (void __user *) data); | 1647 | datavp); |
1613 | 1648 | ||
1614 | case PTRACE_SETVRREGS: | 1649 | case PTRACE_SETVRREGS: |
1615 | return copy_regset_from_user(child, &user_ppc_native_view, | 1650 | return copy_regset_from_user(child, &user_ppc_native_view, |
1616 | REGSET_VMX, | 1651 | REGSET_VMX, |
1617 | 0, (33 * sizeof(vector128) + | 1652 | 0, (33 * sizeof(vector128) + |
1618 | sizeof(u32)), | 1653 | sizeof(u32)), |
1619 | (const void __user *) data); | 1654 | datavp); |
1620 | #endif | 1655 | #endif |
1621 | #ifdef CONFIG_VSX | 1656 | #ifdef CONFIG_VSX |
1622 | case PTRACE_GETVSRREGS: | 1657 | case PTRACE_GETVSRREGS: |
1623 | return copy_regset_to_user(child, &user_ppc_native_view, | 1658 | return copy_regset_to_user(child, &user_ppc_native_view, |
1624 | REGSET_VSX, | 1659 | REGSET_VSX, |
1625 | 0, 32 * sizeof(double), | 1660 | 0, 32 * sizeof(double), |
1626 | (void __user *) data); | 1661 | datavp); |
1627 | 1662 | ||
1628 | case PTRACE_SETVSRREGS: | 1663 | case PTRACE_SETVSRREGS: |
1629 | return copy_regset_from_user(child, &user_ppc_native_view, | 1664 | return copy_regset_from_user(child, &user_ppc_native_view, |
1630 | REGSET_VSX, | 1665 | REGSET_VSX, |
1631 | 0, 32 * sizeof(double), | 1666 | 0, 32 * sizeof(double), |
1632 | (const void __user *) data); | 1667 | datavp); |
1633 | #endif | 1668 | #endif |
1634 | #ifdef CONFIG_SPE | 1669 | #ifdef CONFIG_SPE |
1635 | case PTRACE_GETEVRREGS: | 1670 | case PTRACE_GETEVRREGS: |
1636 | /* Get the child spe register state. */ | 1671 | /* Get the child spe register state. */ |
1637 | return copy_regset_to_user(child, &user_ppc_native_view, | 1672 | return copy_regset_to_user(child, &user_ppc_native_view, |
1638 | REGSET_SPE, 0, 35 * sizeof(u32), | 1673 | REGSET_SPE, 0, 35 * sizeof(u32), |
1639 | (void __user *) data); | 1674 | datavp); |
1640 | 1675 | ||
1641 | case PTRACE_SETEVRREGS: | 1676 | case PTRACE_SETEVRREGS: |
1642 | /* Set the child spe register state. */ | 1677 | /* Set the child spe register state. */ |
1643 | return copy_regset_from_user(child, &user_ppc_native_view, | 1678 | return copy_regset_from_user(child, &user_ppc_native_view, |
1644 | REGSET_SPE, 0, 35 * sizeof(u32), | 1679 | REGSET_SPE, 0, 35 * sizeof(u32), |
1645 | (const void __user *) data); | 1680 | datavp); |
1646 | #endif | 1681 | #endif |
1647 | 1682 | ||
1648 | /* Old reverse args ptrace callss */ | 1683 | /* Old reverse args ptrace callss */ |
@@ -1679,9 +1714,12 @@ long do_syscall_trace_enter(struct pt_regs *regs) | |||
1679 | */ | 1714 | */ |
1680 | ret = -1L; | 1715 | ret = -1L; |
1681 | 1716 | ||
1717 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) | ||
1718 | trace_sys_enter(regs, regs->gpr[0]); | ||
1719 | |||
1682 | if (unlikely(current->audit_context)) { | 1720 | if (unlikely(current->audit_context)) { |
1683 | #ifdef CONFIG_PPC64 | 1721 | #ifdef CONFIG_PPC64 |
1684 | if (!test_thread_flag(TIF_32BIT)) | 1722 | if (!is_32bit_task()) |
1685 | audit_syscall_entry(AUDIT_ARCH_PPC64, | 1723 | audit_syscall_entry(AUDIT_ARCH_PPC64, |
1686 | regs->gpr[0], | 1724 | regs->gpr[0], |
1687 | regs->gpr[3], regs->gpr[4], | 1725 | regs->gpr[3], regs->gpr[4], |
@@ -1707,6 +1745,9 @@ void do_syscall_trace_leave(struct pt_regs *regs) | |||
1707 | audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS, | 1745 | audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS, |
1708 | regs->result); | 1746 | regs->result); |
1709 | 1747 | ||
1748 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) | ||
1749 | trace_sys_exit(regs, regs->result); | ||
1750 | |||
1710 | step = test_thread_flag(TIF_SINGLESTEP); | 1751 | step = test_thread_flag(TIF_SINGLESTEP); |
1711 | if (step || test_thread_flag(TIF_SYSCALL_TRACE)) | 1752 | if (step || test_thread_flag(TIF_SYSCALL_TRACE)) |
1712 | tracehook_report_syscall_exit(regs, step); | 1753 | tracehook_report_syscall_exit(regs, step); |
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c index 8a6daf4129f6..69c4be917d07 100644 --- a/arch/powerpc/kernel/ptrace32.c +++ b/arch/powerpc/kernel/ptrace32.c | |||
@@ -280,7 +280,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | |||
280 | /* We only support one DABR and no IABRS at the moment */ | 280 | /* We only support one DABR and no IABRS at the moment */ |
281 | if (addr > 0) | 281 | if (addr > 0) |
282 | break; | 282 | break; |
283 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | ||
284 | ret = put_user(child->thread.dac1, (u32 __user *)data); | ||
285 | #else | ||
283 | ret = put_user(child->thread.dabr, (u32 __user *)data); | 286 | ret = put_user(child->thread.dabr, (u32 __user *)data); |
287 | #endif | ||
284 | break; | 288 | break; |
285 | } | 289 | } |
286 | 290 | ||
@@ -312,6 +316,9 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | |||
312 | case PTRACE_SET_DEBUGREG: | 316 | case PTRACE_SET_DEBUGREG: |
313 | case PTRACE_SYSCALL: | 317 | case PTRACE_SYSCALL: |
314 | case PTRACE_CONT: | 318 | case PTRACE_CONT: |
319 | case PPC_PTRACE_GETHWDBGINFO: | ||
320 | case PPC_PTRACE_SETHWDEBUG: | ||
321 | case PPC_PTRACE_DELHWDEBUG: | ||
315 | ret = arch_ptrace(child, request, addr, data); | 322 | ret = arch_ptrace(child, request, addr, data); |
316 | break; | 323 | break; |
317 | 324 | ||
diff --git a/arch/powerpc/kernel/rtas-rtc.c b/arch/powerpc/kernel/rtas-rtc.c index 77578c093dda..c57c19358a26 100644 --- a/arch/powerpc/kernel/rtas-rtc.c +++ b/arch/powerpc/kernel/rtas-rtc.c | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <linux/init.h> | 4 | #include <linux/init.h> |
5 | #include <linux/rtc.h> | 5 | #include <linux/rtc.h> |
6 | #include <linux/delay.h> | 6 | #include <linux/delay.h> |
7 | #include <linux/ratelimit.h> | ||
7 | #include <asm/prom.h> | 8 | #include <asm/prom.h> |
8 | #include <asm/rtas.h> | 9 | #include <asm/rtas.h> |
9 | #include <asm/time.h> | 10 | #include <asm/time.h> |
@@ -29,9 +30,10 @@ unsigned long __init rtas_get_boot_time(void) | |||
29 | } | 30 | } |
30 | } while (wait_time && (get_tb() < max_wait_tb)); | 31 | } while (wait_time && (get_tb() < max_wait_tb)); |
31 | 32 | ||
32 | if (error != 0 && printk_ratelimit()) { | 33 | if (error != 0) { |
33 | printk(KERN_WARNING "error: reading the clock failed (%d)\n", | 34 | printk_ratelimited(KERN_WARNING |
34 | error); | 35 | "error: reading the clock failed (%d)\n", |
36 | error); | ||
35 | return 0; | 37 | return 0; |
36 | } | 38 | } |
37 | 39 | ||
@@ -55,19 +57,21 @@ void rtas_get_rtc_time(struct rtc_time *rtc_tm) | |||
55 | 57 | ||
56 | wait_time = rtas_busy_delay_time(error); | 58 | wait_time = rtas_busy_delay_time(error); |
57 | if (wait_time) { | 59 | if (wait_time) { |
58 | if (in_interrupt() && printk_ratelimit()) { | 60 | if (in_interrupt()) { |
59 | memset(rtc_tm, 0, sizeof(struct rtc_time)); | 61 | memset(rtc_tm, 0, sizeof(struct rtc_time)); |
60 | printk(KERN_WARNING "error: reading clock" | 62 | printk_ratelimited(KERN_WARNING |
61 | " would delay interrupt\n"); | 63 | "error: reading clock " |
64 | "would delay interrupt\n"); | ||
62 | return; /* delay not allowed */ | 65 | return; /* delay not allowed */ |
63 | } | 66 | } |
64 | msleep(wait_time); | 67 | msleep(wait_time); |
65 | } | 68 | } |
66 | } while (wait_time && (get_tb() < max_wait_tb)); | 69 | } while (wait_time && (get_tb() < max_wait_tb)); |
67 | 70 | ||
68 | if (error != 0 && printk_ratelimit()) { | 71 | if (error != 0) { |
69 | printk(KERN_WARNING "error: reading the clock failed (%d)\n", | 72 | printk_ratelimited(KERN_WARNING |
70 | error); | 73 | "error: reading the clock failed (%d)\n", |
74 | error); | ||
71 | return; | 75 | return; |
72 | } | 76 | } |
73 | 77 | ||
@@ -99,9 +103,10 @@ int rtas_set_rtc_time(struct rtc_time *tm) | |||
99 | } | 103 | } |
100 | } while (wait_time && (get_tb() < max_wait_tb)); | 104 | } while (wait_time && (get_tb() < max_wait_tb)); |
101 | 105 | ||
102 | if (error != 0 && printk_ratelimit()) | 106 | if (error != 0) |
103 | printk(KERN_WARNING "error: setting the clock failed (%d)\n", | 107 | printk_ratelimited(KERN_WARNING |
104 | error); | 108 | "error: setting the clock failed (%d)\n", |
109 | error); | ||
105 | 110 | ||
106 | return 0; | 111 | return 0; |
107 | } | 112 | } |
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 41048de3c6c3..271ff6318eda 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c | |||
@@ -41,6 +41,8 @@ | |||
41 | #include <asm/atomic.h> | 41 | #include <asm/atomic.h> |
42 | #include <asm/time.h> | 42 | #include <asm/time.h> |
43 | #include <asm/mmu.h> | 43 | #include <asm/mmu.h> |
44 | #include <asm/topology.h> | ||
45 | #include <asm/pSeries_reconfig.h> | ||
44 | 46 | ||
45 | struct rtas_t rtas = { | 47 | struct rtas_t rtas = { |
46 | .lock = __ARCH_SPIN_LOCK_UNLOCKED | 48 | .lock = __ARCH_SPIN_LOCK_UNLOCKED |
@@ -493,7 +495,7 @@ unsigned int rtas_busy_delay(int status) | |||
493 | 495 | ||
494 | might_sleep(); | 496 | might_sleep(); |
495 | ms = rtas_busy_delay_time(status); | 497 | ms = rtas_busy_delay_time(status); |
496 | if (ms) | 498 | if (ms && need_resched()) |
497 | msleep(ms); | 499 | msleep(ms); |
498 | 500 | ||
499 | return ms; | 501 | return ms; |
@@ -713,6 +715,7 @@ static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_w | |||
713 | int cpu; | 715 | int cpu; |
714 | 716 | ||
715 | slb_set_size(SLB_MIN_SIZE); | 717 | slb_set_size(SLB_MIN_SIZE); |
718 | stop_topology_update(); | ||
716 | printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id()); | 719 | printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id()); |
717 | 720 | ||
718 | while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) && | 721 | while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) && |
@@ -728,6 +731,8 @@ static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_w | |||
728 | rc = atomic_read(&data->error); | 731 | rc = atomic_read(&data->error); |
729 | 732 | ||
730 | atomic_set(&data->error, rc); | 733 | atomic_set(&data->error, rc); |
734 | start_topology_update(); | ||
735 | pSeries_coalesce_init(); | ||
731 | 736 | ||
732 | if (wake_when_done) { | 737 | if (wake_when_done) { |
733 | atomic_set(&data->done, 1); | 738 | atomic_set(&data->done, 1); |
@@ -805,7 +810,7 @@ static void rtas_percpu_suspend_me(void *info) | |||
805 | __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1); | 810 | __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1); |
806 | } | 811 | } |
807 | 812 | ||
808 | static int rtas_ibm_suspend_me(struct rtas_args *args) | 813 | int rtas_ibm_suspend_me(struct rtas_args *args) |
809 | { | 814 | { |
810 | long state; | 815 | long state; |
811 | long rc; | 816 | long rc; |
@@ -855,7 +860,7 @@ static int rtas_ibm_suspend_me(struct rtas_args *args) | |||
855 | return atomic_read(&data.error); | 860 | return atomic_read(&data.error); |
856 | } | 861 | } |
857 | #else /* CONFIG_PPC_PSERIES */ | 862 | #else /* CONFIG_PPC_PSERIES */ |
858 | static int rtas_ibm_suspend_me(struct rtas_args *args) | 863 | int rtas_ibm_suspend_me(struct rtas_args *args) |
859 | { | 864 | { |
860 | return -ENOSYS; | 865 | return -ENOSYS; |
861 | } | 866 | } |
@@ -969,7 +974,7 @@ void __init rtas_initialize(void) | |||
969 | */ | 974 | */ |
970 | #ifdef CONFIG_PPC64 | 975 | #ifdef CONFIG_PPC64 |
971 | if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) { | 976 | if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) { |
972 | rtas_region = min(memblock.rmo_size, RTAS_INSTANTIATE_MAX); | 977 | rtas_region = min(ppc64_rma_size, RTAS_INSTANTIATE_MAX); |
973 | ibm_suspend_me_token = rtas_token("ibm,suspend-me"); | 978 | ibm_suspend_me_token = rtas_token("ibm,suspend-me"); |
974 | } | 979 | } |
975 | #endif | 980 | #endif |
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c index 67a84d8f118d..bf5f5ce3a7bd 100644 --- a/arch/powerpc/kernel/rtas_flash.c +++ b/arch/powerpc/kernel/rtas_flash.c | |||
@@ -256,31 +256,16 @@ static ssize_t rtas_flash_read(struct file *file, char __user *buf, | |||
256 | struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode); | 256 | struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode); |
257 | struct rtas_update_flash_t *uf; | 257 | struct rtas_update_flash_t *uf; |
258 | char msg[RTAS_MSG_MAXLEN]; | 258 | char msg[RTAS_MSG_MAXLEN]; |
259 | int msglen; | ||
260 | 259 | ||
261 | uf = (struct rtas_update_flash_t *) dp->data; | 260 | uf = dp->data; |
262 | 261 | ||
263 | if (!strcmp(dp->name, FIRMWARE_FLASH_NAME)) { | 262 | if (!strcmp(dp->name, FIRMWARE_FLASH_NAME)) { |
264 | get_flash_status_msg(uf->status, msg); | 263 | get_flash_status_msg(uf->status, msg); |
265 | } else { /* FIRMWARE_UPDATE_NAME */ | 264 | } else { /* FIRMWARE_UPDATE_NAME */ |
266 | sprintf(msg, "%d\n", uf->status); | 265 | sprintf(msg, "%d\n", uf->status); |
267 | } | 266 | } |
268 | msglen = strlen(msg); | ||
269 | if (msglen > count) | ||
270 | msglen = count; | ||
271 | |||
272 | if (ppos && *ppos != 0) | ||
273 | return 0; /* be cheap */ | ||
274 | |||
275 | if (!access_ok(VERIFY_WRITE, buf, msglen)) | ||
276 | return -EINVAL; | ||
277 | 267 | ||
278 | if (copy_to_user(buf, msg, msglen)) | 268 | return simple_read_from_buffer(buf, count, ppos, msg, strlen(msg)); |
279 | return -EFAULT; | ||
280 | |||
281 | if (ppos) | ||
282 | *ppos = msglen; | ||
283 | return msglen; | ||
284 | } | 269 | } |
285 | 270 | ||
286 | /* constructor for flash_block_cache */ | 271 | /* constructor for flash_block_cache */ |
@@ -394,26 +379,13 @@ static ssize_t manage_flash_read(struct file *file, char __user *buf, | |||
394 | char msg[RTAS_MSG_MAXLEN]; | 379 | char msg[RTAS_MSG_MAXLEN]; |
395 | int msglen; | 380 | int msglen; |
396 | 381 | ||
397 | args_buf = (struct rtas_manage_flash_t *) dp->data; | 382 | args_buf = dp->data; |
398 | if (args_buf == NULL) | 383 | if (args_buf == NULL) |
399 | return 0; | 384 | return 0; |
400 | 385 | ||
401 | msglen = sprintf(msg, "%d\n", args_buf->status); | 386 | msglen = sprintf(msg, "%d\n", args_buf->status); |
402 | if (msglen > count) | ||
403 | msglen = count; | ||
404 | 387 | ||
405 | if (ppos && *ppos != 0) | 388 | return simple_read_from_buffer(buf, count, ppos, msg, msglen); |
406 | return 0; /* be cheap */ | ||
407 | |||
408 | if (!access_ok(VERIFY_WRITE, buf, msglen)) | ||
409 | return -EINVAL; | ||
410 | |||
411 | if (copy_to_user(buf, msg, msglen)) | ||
412 | return -EFAULT; | ||
413 | |||
414 | if (ppos) | ||
415 | *ppos = msglen; | ||
416 | return msglen; | ||
417 | } | 389 | } |
418 | 390 | ||
419 | static ssize_t manage_flash_write(struct file *file, const char __user *buf, | 391 | static ssize_t manage_flash_write(struct file *file, const char __user *buf, |
@@ -495,24 +467,11 @@ static ssize_t validate_flash_read(struct file *file, char __user *buf, | |||
495 | char msg[RTAS_MSG_MAXLEN]; | 467 | char msg[RTAS_MSG_MAXLEN]; |
496 | int msglen; | 468 | int msglen; |
497 | 469 | ||
498 | args_buf = (struct rtas_validate_flash_t *) dp->data; | 470 | args_buf = dp->data; |
499 | 471 | ||
500 | if (ppos && *ppos != 0) | ||
501 | return 0; /* be cheap */ | ||
502 | |||
503 | msglen = get_validate_flash_msg(args_buf, msg); | 472 | msglen = get_validate_flash_msg(args_buf, msg); |
504 | if (msglen > count) | ||
505 | msglen = count; | ||
506 | |||
507 | if (!access_ok(VERIFY_WRITE, buf, msglen)) | ||
508 | return -EINVAL; | ||
509 | |||
510 | if (copy_to_user(buf, msg, msglen)) | ||
511 | return -EFAULT; | ||
512 | 473 | ||
513 | if (ppos) | 474 | return simple_read_from_buffer(buf, count, ppos, msg, msglen); |
514 | *ppos = msglen; | ||
515 | return msglen; | ||
516 | } | 475 | } |
517 | 476 | ||
518 | static ssize_t validate_flash_write(struct file *file, const char __user *buf, | 477 | static ssize_t validate_flash_write(struct file *file, const char __user *buf, |
@@ -716,6 +675,7 @@ static const struct file_operations rtas_flash_operations = { | |||
716 | .write = rtas_flash_write, | 675 | .write = rtas_flash_write, |
717 | .open = rtas_excl_open, | 676 | .open = rtas_excl_open, |
718 | .release = rtas_flash_release, | 677 | .release = rtas_flash_release, |
678 | .llseek = default_llseek, | ||
719 | }; | 679 | }; |
720 | 680 | ||
721 | static const struct file_operations manage_flash_operations = { | 681 | static const struct file_operations manage_flash_operations = { |
@@ -724,6 +684,7 @@ static const struct file_operations manage_flash_operations = { | |||
724 | .write = manage_flash_write, | 684 | .write = manage_flash_write, |
725 | .open = rtas_excl_open, | 685 | .open = rtas_excl_open, |
726 | .release = rtas_excl_release, | 686 | .release = rtas_excl_release, |
687 | .llseek = default_llseek, | ||
727 | }; | 688 | }; |
728 | 689 | ||
729 | static const struct file_operations validate_flash_operations = { | 690 | static const struct file_operations validate_flash_operations = { |
@@ -732,6 +693,7 @@ static const struct file_operations validate_flash_operations = { | |||
732 | .write = validate_flash_write, | 693 | .write = validate_flash_write, |
733 | .open = rtas_excl_open, | 694 | .open = rtas_excl_open, |
734 | .release = validate_flash_release, | 695 | .release = validate_flash_release, |
696 | .llseek = default_llseek, | ||
735 | }; | 697 | }; |
736 | 698 | ||
737 | static int __init rtas_flash_init(void) | 699 | static int __init rtas_flash_init(void) |
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c index 638883e23e3a..67f6c3b51357 100644 --- a/arch/powerpc/kernel/rtasd.c +++ b/arch/powerpc/kernel/rtasd.c | |||
@@ -160,7 +160,7 @@ static int log_rtas_len(char * buf) | |||
160 | /* rtas fixed header */ | 160 | /* rtas fixed header */ |
161 | len = 8; | 161 | len = 8; |
162 | err = (struct rtas_error_log *)buf; | 162 | err = (struct rtas_error_log *)buf; |
163 | if (err->extended_log_length) { | 163 | if (err->extended && err->extended_log_length) { |
164 | 164 | ||
165 | /* extended header */ | 165 | /* extended header */ |
166 | len += err->extended_log_length; | 166 | len += err->extended_log_length; |
@@ -354,6 +354,7 @@ static const struct file_operations proc_rtas_log_operations = { | |||
354 | .poll = rtas_log_poll, | 354 | .poll = rtas_log_poll, |
355 | .open = rtas_log_open, | 355 | .open = rtas_log_open, |
356 | .release = rtas_log_release, | 356 | .release = rtas_log_release, |
357 | .llseek = noop_llseek, | ||
357 | }; | 358 | }; |
358 | 359 | ||
359 | static int enable_surveillance(int timeout) | 360 | static int enable_surveillance(int timeout) |
@@ -411,7 +412,8 @@ static void rtas_event_scan(struct work_struct *w) | |||
411 | 412 | ||
412 | get_online_cpus(); | 413 | get_online_cpus(); |
413 | 414 | ||
414 | cpu = cpumask_next(smp_processor_id(), cpu_online_mask); | 415 | /* raw_ OK because just using CPU as starting point. */ |
416 | cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); | ||
415 | if (cpu >= nr_cpu_ids) { | 417 | if (cpu >= nr_cpu_ids) { |
416 | cpu = cpumask_first(cpu_online_mask); | 418 | cpu = cpumask_first(cpu_online_mask); |
417 | 419 | ||
@@ -463,7 +465,7 @@ static void start_event_scan(void) | |||
463 | pr_debug("rtasd: will sleep for %d milliseconds\n", | 465 | pr_debug("rtasd: will sleep for %d milliseconds\n", |
464 | (30000 / rtas_event_scan_rate)); | 466 | (30000 / rtas_event_scan_rate)); |
465 | 467 | ||
466 | /* Retreive errors from nvram if any */ | 468 | /* Retrieve errors from nvram if any */ |
467 | retreive_nvram_error_log(); | 469 | retreive_nvram_error_log(); |
468 | 470 | ||
469 | schedule_delayed_work_on(cpumask_first(cpu_online_mask), | 471 | schedule_delayed_work_on(cpumask_first(cpu_online_mask), |
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 9d4882a46647..79fca2651b65 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c | |||
@@ -381,7 +381,7 @@ static void __init cpu_init_thread_core_maps(int tpc) | |||
381 | int i; | 381 | int i; |
382 | 382 | ||
383 | threads_per_core = tpc; | 383 | threads_per_core = tpc; |
384 | threads_core_mask = CPU_MASK_NONE; | 384 | cpumask_clear(&threads_core_mask); |
385 | 385 | ||
386 | /* This implementation only supports power of 2 number of threads | 386 | /* This implementation only supports power of 2 number of threads |
387 | * for simplicity and performance | 387 | * for simplicity and performance |
@@ -390,7 +390,7 @@ static void __init cpu_init_thread_core_maps(int tpc) | |||
390 | BUG_ON(tpc != (1 << threads_shift)); | 390 | BUG_ON(tpc != (1 << threads_shift)); |
391 | 391 | ||
392 | for (i = 0; i < tpc; i++) | 392 | for (i = 0; i < tpc; i++) |
393 | cpu_set(i, threads_core_mask); | 393 | cpumask_set_cpu(i, &threads_core_mask); |
394 | 394 | ||
395 | printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n", | 395 | printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n", |
396 | tpc, tpc > 1 ? "s" : ""); | 396 | tpc, tpc > 1 ? "s" : ""); |
@@ -404,7 +404,7 @@ static void __init cpu_init_thread_core_maps(int tpc) | |||
404 | * cpu_present_mask | 404 | * cpu_present_mask |
405 | * | 405 | * |
406 | * Having the possible map set up early allows us to restrict allocations | 406 | * Having the possible map set up early allows us to restrict allocations |
407 | * of things like irqstacks to num_possible_cpus() rather than NR_CPUS. | 407 | * of things like irqstacks to nr_cpu_ids rather than NR_CPUS. |
408 | * | 408 | * |
409 | * We do not initialize the online map here; cpus set their own bits in | 409 | * We do not initialize the online map here; cpus set their own bits in |
410 | * cpu_online_mask as they come up. | 410 | * cpu_online_mask as they come up. |
@@ -424,7 +424,7 @@ void __init smp_setup_cpu_maps(void) | |||
424 | 424 | ||
425 | DBG("smp_setup_cpu_maps()\n"); | 425 | DBG("smp_setup_cpu_maps()\n"); |
426 | 426 | ||
427 | while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) { | 427 | while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < nr_cpu_ids) { |
428 | const int *intserv; | 428 | const int *intserv; |
429 | int j, len; | 429 | int j, len; |
430 | 430 | ||
@@ -443,7 +443,7 @@ void __init smp_setup_cpu_maps(void) | |||
443 | intserv = &cpu; /* assume logical == phys */ | 443 | intserv = &cpu; /* assume logical == phys */ |
444 | } | 444 | } |
445 | 445 | ||
446 | for (j = 0; j < nthreads && cpu < NR_CPUS; j++) { | 446 | for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) { |
447 | DBG(" thread %d -> cpu %d (hard id %d)\n", | 447 | DBG(" thread %d -> cpu %d (hard id %d)\n", |
448 | j, cpu, intserv[j]); | 448 | j, cpu, intserv[j]); |
449 | set_cpu_present(cpu, true); | 449 | set_cpu_present(cpu, true); |
@@ -483,12 +483,12 @@ void __init smp_setup_cpu_maps(void) | |||
483 | if (cpu_has_feature(CPU_FTR_SMT)) | 483 | if (cpu_has_feature(CPU_FTR_SMT)) |
484 | maxcpus *= nthreads; | 484 | maxcpus *= nthreads; |
485 | 485 | ||
486 | if (maxcpus > NR_CPUS) { | 486 | if (maxcpus > nr_cpu_ids) { |
487 | printk(KERN_WARNING | 487 | printk(KERN_WARNING |
488 | "Partition configured for %d cpus, " | 488 | "Partition configured for %d cpus, " |
489 | "operating system maximum is %d.\n", | 489 | "operating system maximum is %d.\n", |
490 | maxcpus, NR_CPUS); | 490 | maxcpus, nr_cpu_ids); |
491 | maxcpus = NR_CPUS; | 491 | maxcpus = nr_cpu_ids; |
492 | } else | 492 | } else |
493 | printk(KERN_INFO "Partition configured for %d cpus.\n", | 493 | printk(KERN_INFO "Partition configured for %d cpus.\n", |
494 | maxcpus); | 494 | maxcpus); |
@@ -509,6 +509,9 @@ void __init smp_setup_cpu_maps(void) | |||
509 | */ | 509 | */ |
510 | cpu_init_thread_core_maps(nthreads); | 510 | cpu_init_thread_core_maps(nthreads); |
511 | 511 | ||
512 | /* Now that possible cpus are set, set nr_cpu_ids for later use */ | ||
513 | setup_nr_cpu_ids(); | ||
514 | |||
512 | free_unused_pacas(); | 515 | free_unused_pacas(); |
513 | } | 516 | } |
514 | #endif /* CONFIG_SMP */ | 517 | #endif /* CONFIG_SMP */ |
@@ -599,6 +602,10 @@ int check_legacy_ioport(unsigned long base_port) | |||
599 | * name instead */ | 602 | * name instead */ |
600 | if (!np) | 603 | if (!np) |
601 | np = of_find_node_by_name(NULL, "8042"); | 604 | np = of_find_node_by_name(NULL, "8042"); |
605 | if (np) { | ||
606 | of_i8042_kbd_irq = 1; | ||
607 | of_i8042_aux_irq = 12; | ||
608 | } | ||
602 | break; | 609 | break; |
603 | case FDC_BASE: /* FDC1 */ | 610 | case FDC_BASE: /* FDC1 */ |
604 | np = of_find_node_by_type(NULL, "fdc"); | 611 | np = of_find_node_by_type(NULL, "fdc"); |
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 93666f9cabf1..620d792b52e4 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c | |||
@@ -46,8 +46,9 @@ | |||
46 | 46 | ||
47 | extern void bootx_init(unsigned long r4, unsigned long phys); | 47 | extern void bootx_init(unsigned long r4, unsigned long phys); |
48 | 48 | ||
49 | int boot_cpuid; | 49 | int boot_cpuid = -1; |
50 | EXPORT_SYMBOL_GPL(boot_cpuid); | 50 | EXPORT_SYMBOL_GPL(boot_cpuid); |
51 | int __initdata boot_cpu_count; | ||
51 | int boot_cpuid_phys; | 52 | int boot_cpuid_phys; |
52 | 53 | ||
53 | int smp_hw_index[NR_CPUS]; | 54 | int smp_hw_index[NR_CPUS]; |
@@ -246,7 +247,7 @@ static void __init irqstack_early_init(void) | |||
246 | unsigned int i; | 247 | unsigned int i; |
247 | 248 | ||
248 | /* interrupt stacks must be in lowmem, we get that for free on ppc32 | 249 | /* interrupt stacks must be in lowmem, we get that for free on ppc32 |
249 | * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */ | 250 | * as the memblock is limited to lowmem by default */ |
250 | for_each_possible_cpu(i) { | 251 | for_each_possible_cpu(i) { |
251 | softirq_ctx[i] = (struct thread_info *) | 252 | softirq_ctx[i] = (struct thread_info *) |
252 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); | 253 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index e72690ec9b87..a88bf2713d41 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -62,6 +62,7 @@ | |||
62 | #include <asm/udbg.h> | 62 | #include <asm/udbg.h> |
63 | #include <asm/kexec.h> | 63 | #include <asm/kexec.h> |
64 | #include <asm/mmu_context.h> | 64 | #include <asm/mmu_context.h> |
65 | #include <asm/code-patching.h> | ||
65 | 66 | ||
66 | #include "setup.h" | 67 | #include "setup.h" |
67 | 68 | ||
@@ -72,6 +73,7 @@ | |||
72 | #endif | 73 | #endif |
73 | 74 | ||
74 | int boot_cpuid = 0; | 75 | int boot_cpuid = 0; |
76 | int __initdata boot_cpu_count; | ||
75 | u64 ppc64_pft_size; | 77 | u64 ppc64_pft_size; |
76 | 78 | ||
77 | /* Pick defaults since we might want to patch instructions | 79 | /* Pick defaults since we might want to patch instructions |
@@ -233,6 +235,7 @@ void early_setup_secondary(void) | |||
233 | void smp_release_cpus(void) | 235 | void smp_release_cpus(void) |
234 | { | 236 | { |
235 | unsigned long *ptr; | 237 | unsigned long *ptr; |
238 | int i; | ||
236 | 239 | ||
237 | DBG(" -> smp_release_cpus()\n"); | 240 | DBG(" -> smp_release_cpus()\n"); |
238 | 241 | ||
@@ -245,7 +248,16 @@ void smp_release_cpus(void) | |||
245 | ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop | 248 | ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop |
246 | - PHYSICAL_START); | 249 | - PHYSICAL_START); |
247 | *ptr = __pa(generic_secondary_smp_init); | 250 | *ptr = __pa(generic_secondary_smp_init); |
248 | mb(); | 251 | |
252 | /* And wait a bit for them to catch up */ | ||
253 | for (i = 0; i < 100000; i++) { | ||
254 | mb(); | ||
255 | HMT_low(); | ||
256 | if (boot_cpu_count == 0) | ||
257 | break; | ||
258 | udelay(1); | ||
259 | } | ||
260 | DBG("boot_cpu_count = %d\n", boot_cpu_count); | ||
249 | 261 | ||
250 | DBG(" <- smp_release_cpus()\n"); | 262 | DBG(" <- smp_release_cpus()\n"); |
251 | } | 263 | } |
@@ -423,22 +435,35 @@ void __init setup_system(void) | |||
423 | DBG(" <- setup_system()\n"); | 435 | DBG(" <- setup_system()\n"); |
424 | } | 436 | } |
425 | 437 | ||
426 | static u64 slb0_limit(void) | 438 | /* This returns the limit below which memory accesses to the linear |
439 | * mapping are guarnateed not to cause a TLB or SLB miss. This is | ||
440 | * used to allocate interrupt or emergency stacks for which our | ||
441 | * exception entry path doesn't deal with being interrupted. | ||
442 | */ | ||
443 | static u64 safe_stack_limit(void) | ||
427 | { | 444 | { |
428 | if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) { | 445 | #ifdef CONFIG_PPC_BOOK3E |
446 | /* Freescale BookE bolts the entire linear mapping */ | ||
447 | if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) | ||
448 | return linear_map_top; | ||
449 | /* Other BookE, we assume the first GB is bolted */ | ||
450 | return 1ul << 30; | ||
451 | #else | ||
452 | /* BookS, the first segment is bolted */ | ||
453 | if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) | ||
429 | return 1UL << SID_SHIFT_1T; | 454 | return 1UL << SID_SHIFT_1T; |
430 | } | ||
431 | return 1UL << SID_SHIFT; | 455 | return 1UL << SID_SHIFT; |
456 | #endif | ||
432 | } | 457 | } |
433 | 458 | ||
434 | static void __init irqstack_early_init(void) | 459 | static void __init irqstack_early_init(void) |
435 | { | 460 | { |
436 | u64 limit = slb0_limit(); | 461 | u64 limit = safe_stack_limit(); |
437 | unsigned int i; | 462 | unsigned int i; |
438 | 463 | ||
439 | /* | 464 | /* |
440 | * interrupt stacks must be under 256MB, we cannot afford to take | 465 | * Interrupt stacks must be in the first segment since we |
441 | * SLB misses on them. | 466 | * cannot afford to take SLB misses on them. |
442 | */ | 467 | */ |
443 | for_each_possible_cpu(i) { | 468 | for_each_possible_cpu(i) { |
444 | softirq_ctx[i] = (struct thread_info *) | 469 | softirq_ctx[i] = (struct thread_info *) |
@@ -453,6 +478,9 @@ static void __init irqstack_early_init(void) | |||
453 | #ifdef CONFIG_PPC_BOOK3E | 478 | #ifdef CONFIG_PPC_BOOK3E |
454 | static void __init exc_lvl_early_init(void) | 479 | static void __init exc_lvl_early_init(void) |
455 | { | 480 | { |
481 | extern unsigned int interrupt_base_book3e; | ||
482 | extern unsigned int exc_debug_debug_book3e; | ||
483 | |||
456 | unsigned int i; | 484 | unsigned int i; |
457 | 485 | ||
458 | for_each_possible_cpu(i) { | 486 | for_each_possible_cpu(i) { |
@@ -463,6 +491,10 @@ static void __init exc_lvl_early_init(void) | |||
463 | mcheckirq_ctx[i] = (struct thread_info *) | 491 | mcheckirq_ctx[i] = (struct thread_info *) |
464 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); | 492 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); |
465 | } | 493 | } |
494 | |||
495 | if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) | ||
496 | patch_branch(&interrupt_base_book3e + (0x040 / 4) + 1, | ||
497 | (unsigned long)&exc_debug_debug_book3e, 0); | ||
466 | } | 498 | } |
467 | #else | 499 | #else |
468 | #define exc_lvl_early_init() | 500 | #define exc_lvl_early_init() |
@@ -486,7 +518,7 @@ static void __init emergency_stack_init(void) | |||
486 | * bringup, we need to get at them in real mode. This means they | 518 | * bringup, we need to get at them in real mode. This means they |
487 | * must also be within the RMO region. | 519 | * must also be within the RMO region. |
488 | */ | 520 | */ |
489 | limit = min(slb0_limit(), memblock.rmo_size); | 521 | limit = min(safe_stack_limit(), ppc64_rma_size); |
490 | 522 | ||
491 | for_each_possible_cpu(i) { | 523 | for_each_possible_cpu(i) { |
492 | unsigned long sp; | 524 | unsigned long sp; |
@@ -497,9 +529,8 @@ static void __init emergency_stack_init(void) | |||
497 | } | 529 | } |
498 | 530 | ||
499 | /* | 531 | /* |
500 | * Called into from start_kernel, after lock_kernel has been called. | 532 | * Called into from start_kernel this initializes bootmem, which is used |
501 | * Initializes bootmem, which is unsed to manage page allocation until | 533 | * to manage page allocation until mem_init is called. |
502 | * mem_init is called. | ||
503 | */ | 534 | */ |
504 | void __init setup_arch(char **cmdline_p) | 535 | void __init setup_arch(char **cmdline_p) |
505 | { | 536 | { |
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index b96a3a010c26..78b76dc54dfb 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/errno.h> | 25 | #include <linux/errno.h> |
26 | #include <linux/elf.h> | 26 | #include <linux/elf.h> |
27 | #include <linux/ptrace.h> | 27 | #include <linux/ptrace.h> |
28 | #include <linux/ratelimit.h> | ||
28 | #ifdef CONFIG_PPC64 | 29 | #ifdef CONFIG_PPC64 |
29 | #include <linux/syscalls.h> | 30 | #include <linux/syscalls.h> |
30 | #include <linux/compat.h> | 31 | #include <linux/compat.h> |
@@ -892,11 +893,12 @@ badframe: | |||
892 | printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n", | 893 | printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n", |
893 | regs, frame, newsp); | 894 | regs, frame, newsp); |
894 | #endif | 895 | #endif |
895 | if (show_unhandled_signals && printk_ratelimit()) | 896 | if (show_unhandled_signals) |
896 | printk(KERN_INFO "%s[%d]: bad frame in handle_rt_signal32: " | 897 | printk_ratelimited(KERN_INFO |
897 | "%p nip %08lx lr %08lx\n", | 898 | "%s[%d]: bad frame in handle_rt_signal32: " |
898 | current->comm, current->pid, | 899 | "%p nip %08lx lr %08lx\n", |
899 | addr, regs->nip, regs->link); | 900 | current->comm, current->pid, |
901 | addr, regs->nip, regs->link); | ||
900 | 902 | ||
901 | force_sigsegv(sig, current); | 903 | force_sigsegv(sig, current); |
902 | return 0; | 904 | return 0; |
@@ -1058,11 +1060,12 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, | |||
1058 | return 0; | 1060 | return 0; |
1059 | 1061 | ||
1060 | bad: | 1062 | bad: |
1061 | if (show_unhandled_signals && printk_ratelimit()) | 1063 | if (show_unhandled_signals) |
1062 | printk(KERN_INFO "%s[%d]: bad frame in sys_rt_sigreturn: " | 1064 | printk_ratelimited(KERN_INFO |
1063 | "%p nip %08lx lr %08lx\n", | 1065 | "%s[%d]: bad frame in sys_rt_sigreturn: " |
1064 | current->comm, current->pid, | 1066 | "%p nip %08lx lr %08lx\n", |
1065 | rt_sf, regs->nip, regs->link); | 1067 | current->comm, current->pid, |
1068 | rt_sf, regs->nip, regs->link); | ||
1066 | 1069 | ||
1067 | force_sig(SIGSEGV, current); | 1070 | force_sig(SIGSEGV, current); |
1068 | return 0; | 1071 | return 0; |
@@ -1149,12 +1152,12 @@ int sys_debug_setcontext(struct ucontext __user *ctx, | |||
1149 | * We kill the task with a SIGSEGV in this situation. | 1152 | * We kill the task with a SIGSEGV in this situation. |
1150 | */ | 1153 | */ |
1151 | if (do_setcontext(ctx, regs, 1)) { | 1154 | if (do_setcontext(ctx, regs, 1)) { |
1152 | if (show_unhandled_signals && printk_ratelimit()) | 1155 | if (show_unhandled_signals) |
1153 | printk(KERN_INFO "%s[%d]: bad frame in " | 1156 | printk_ratelimited(KERN_INFO "%s[%d]: bad frame in " |
1154 | "sys_debug_setcontext: %p nip %08lx " | 1157 | "sys_debug_setcontext: %p nip %08lx " |
1155 | "lr %08lx\n", | 1158 | "lr %08lx\n", |
1156 | current->comm, current->pid, | 1159 | current->comm, current->pid, |
1157 | ctx, regs->nip, regs->link); | 1160 | ctx, regs->nip, regs->link); |
1158 | 1161 | ||
1159 | force_sig(SIGSEGV, current); | 1162 | force_sig(SIGSEGV, current); |
1160 | goto out; | 1163 | goto out; |
@@ -1236,11 +1239,12 @@ badframe: | |||
1236 | printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n", | 1239 | printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n", |
1237 | regs, frame, newsp); | 1240 | regs, frame, newsp); |
1238 | #endif | 1241 | #endif |
1239 | if (show_unhandled_signals && printk_ratelimit()) | 1242 | if (show_unhandled_signals) |
1240 | printk(KERN_INFO "%s[%d]: bad frame in handle_signal32: " | 1243 | printk_ratelimited(KERN_INFO |
1241 | "%p nip %08lx lr %08lx\n", | 1244 | "%s[%d]: bad frame in handle_signal32: " |
1242 | current->comm, current->pid, | 1245 | "%p nip %08lx lr %08lx\n", |
1243 | frame, regs->nip, regs->link); | 1246 | current->comm, current->pid, |
1247 | frame, regs->nip, regs->link); | ||
1244 | 1248 | ||
1245 | force_sigsegv(sig, current); | 1249 | force_sigsegv(sig, current); |
1246 | return 0; | 1250 | return 0; |
@@ -1288,11 +1292,12 @@ long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, | |||
1288 | return 0; | 1292 | return 0; |
1289 | 1293 | ||
1290 | badframe: | 1294 | badframe: |
1291 | if (show_unhandled_signals && printk_ratelimit()) | 1295 | if (show_unhandled_signals) |
1292 | printk(KERN_INFO "%s[%d]: bad frame in sys_sigreturn: " | 1296 | printk_ratelimited(KERN_INFO |
1293 | "%p nip %08lx lr %08lx\n", | 1297 | "%s[%d]: bad frame in sys_sigreturn: " |
1294 | current->comm, current->pid, | 1298 | "%p nip %08lx lr %08lx\n", |
1295 | addr, regs->nip, regs->link); | 1299 | current->comm, current->pid, |
1300 | addr, regs->nip, regs->link); | ||
1296 | 1301 | ||
1297 | force_sig(SIGSEGV, current); | 1302 | force_sig(SIGSEGV, current); |
1298 | return 0; | 1303 | return 0; |
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 27c4a4584f80..e91c736cc842 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/elf.h> | 24 | #include <linux/elf.h> |
25 | #include <linux/ptrace.h> | 25 | #include <linux/ptrace.h> |
26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
27 | #include <linux/ratelimit.h> | ||
27 | 28 | ||
28 | #include <asm/sigcontext.h> | 29 | #include <asm/sigcontext.h> |
29 | #include <asm/ucontext.h> | 30 | #include <asm/ucontext.h> |
@@ -380,10 +381,10 @@ badframe: | |||
380 | printk("badframe in sys_rt_sigreturn, regs=%p uc=%p &uc->uc_mcontext=%p\n", | 381 | printk("badframe in sys_rt_sigreturn, regs=%p uc=%p &uc->uc_mcontext=%p\n", |
381 | regs, uc, &uc->uc_mcontext); | 382 | regs, uc, &uc->uc_mcontext); |
382 | #endif | 383 | #endif |
383 | if (show_unhandled_signals && printk_ratelimit()) | 384 | if (show_unhandled_signals) |
384 | printk(regs->msr & MSR_SF ? fmt64 : fmt32, | 385 | printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, |
385 | current->comm, current->pid, "rt_sigreturn", | 386 | current->comm, current->pid, "rt_sigreturn", |
386 | (long)uc, regs->nip, regs->link); | 387 | (long)uc, regs->nip, regs->link); |
387 | 388 | ||
388 | force_sig(SIGSEGV, current); | 389 | force_sig(SIGSEGV, current); |
389 | return 0; | 390 | return 0; |
@@ -468,10 +469,10 @@ badframe: | |||
468 | printk("badframe in setup_rt_frame, regs=%p frame=%p newsp=%lx\n", | 469 | printk("badframe in setup_rt_frame, regs=%p frame=%p newsp=%lx\n", |
469 | regs, frame, newsp); | 470 | regs, frame, newsp); |
470 | #endif | 471 | #endif |
471 | if (show_unhandled_signals && printk_ratelimit()) | 472 | if (show_unhandled_signals) |
472 | printk(regs->msr & MSR_SF ? fmt64 : fmt32, | 473 | printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, |
473 | current->comm, current->pid, "setup_rt_frame", | 474 | current->comm, current->pid, "setup_rt_frame", |
474 | (long)frame, regs->nip, regs->link); | 475 | (long)frame, regs->nip, regs->link); |
475 | 476 | ||
476 | force_sigsegv(signr, current); | 477 | force_sigsegv(signr, current); |
477 | return 0; | 478 | return 0; |
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 0008bc58e826..8ebc6700b98d 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -57,6 +57,25 @@ | |||
57 | #define DBG(fmt...) | 57 | #define DBG(fmt...) |
58 | #endif | 58 | #endif |
59 | 59 | ||
60 | |||
61 | /* Store all idle threads, this can be reused instead of creating | ||
62 | * a new thread. Also avoids complicated thread destroy functionality | ||
63 | * for idle threads. | ||
64 | */ | ||
65 | #ifdef CONFIG_HOTPLUG_CPU | ||
66 | /* | ||
67 | * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is | ||
68 | * removed after init for !CONFIG_HOTPLUG_CPU. | ||
69 | */ | ||
70 | static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); | ||
71 | #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) | ||
72 | #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) | ||
73 | #else | ||
74 | static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; | ||
75 | #define get_idle_for_cpu(x) (idle_thread_array[(x)]) | ||
76 | #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) | ||
77 | #endif | ||
78 | |||
60 | struct thread_info *secondary_ti; | 79 | struct thread_info *secondary_ti; |
61 | 80 | ||
62 | DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); | 81 | DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); |
@@ -76,7 +95,7 @@ int smt_enabled_at_boot = 1; | |||
76 | static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; | 95 | static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; |
77 | 96 | ||
78 | #ifdef CONFIG_PPC64 | 97 | #ifdef CONFIG_PPC64 |
79 | void __devinit smp_generic_kick_cpu(int nr) | 98 | int __devinit smp_generic_kick_cpu(int nr) |
80 | { | 99 | { |
81 | BUG_ON(nr < 0 || nr >= NR_CPUS); | 100 | BUG_ON(nr < 0 || nr >= NR_CPUS); |
82 | 101 | ||
@@ -87,37 +106,10 @@ void __devinit smp_generic_kick_cpu(int nr) | |||
87 | */ | 106 | */ |
88 | paca[nr].cpu_start = 1; | 107 | paca[nr].cpu_start = 1; |
89 | smp_mb(); | 108 | smp_mb(); |
90 | } | ||
91 | #endif | ||
92 | 109 | ||
93 | void smp_message_recv(int msg) | 110 | return 0; |
94 | { | ||
95 | switch(msg) { | ||
96 | case PPC_MSG_CALL_FUNCTION: | ||
97 | generic_smp_call_function_interrupt(); | ||
98 | break; | ||
99 | case PPC_MSG_RESCHEDULE: | ||
100 | /* we notice need_resched on exit */ | ||
101 | break; | ||
102 | case PPC_MSG_CALL_FUNC_SINGLE: | ||
103 | generic_smp_call_function_single_interrupt(); | ||
104 | break; | ||
105 | case PPC_MSG_DEBUGGER_BREAK: | ||
106 | if (crash_ipi_function_ptr) { | ||
107 | crash_ipi_function_ptr(get_irq_regs()); | ||
108 | break; | ||
109 | } | ||
110 | #ifdef CONFIG_DEBUGGER | ||
111 | debugger_ipi(get_irq_regs()); | ||
112 | break; | ||
113 | #endif /* CONFIG_DEBUGGER */ | ||
114 | /* FALLTHROUGH */ | ||
115 | default: | ||
116 | printk("SMP %d: smp_message_recv(): unknown msg %d\n", | ||
117 | smp_processor_id(), msg); | ||
118 | break; | ||
119 | } | ||
120 | } | 111 | } |
112 | #endif | ||
121 | 113 | ||
122 | static irqreturn_t call_function_action(int irq, void *data) | 114 | static irqreturn_t call_function_action(int irq, void *data) |
123 | { | 115 | { |
@@ -127,7 +119,7 @@ static irqreturn_t call_function_action(int irq, void *data) | |||
127 | 119 | ||
128 | static irqreturn_t reschedule_action(int irq, void *data) | 120 | static irqreturn_t reschedule_action(int irq, void *data) |
129 | { | 121 | { |
130 | /* we just need the return path side effect of checking need_resched */ | 122 | scheduler_ipi(); |
131 | return IRQ_HANDLED; | 123 | return IRQ_HANDLED; |
132 | } | 124 | } |
133 | 125 | ||
@@ -139,7 +131,15 @@ static irqreturn_t call_function_single_action(int irq, void *data) | |||
139 | 131 | ||
140 | static irqreturn_t debug_ipi_action(int irq, void *data) | 132 | static irqreturn_t debug_ipi_action(int irq, void *data) |
141 | { | 133 | { |
142 | smp_message_recv(PPC_MSG_DEBUGGER_BREAK); | 134 | if (crash_ipi_function_ptr) { |
135 | crash_ipi_function_ptr(get_irq_regs()); | ||
136 | return IRQ_HANDLED; | ||
137 | } | ||
138 | |||
139 | #ifdef CONFIG_DEBUGGER | ||
140 | debugger_ipi(get_irq_regs()); | ||
141 | #endif /* CONFIG_DEBUGGER */ | ||
142 | |||
143 | return IRQ_HANDLED; | 143 | return IRQ_HANDLED; |
144 | } | 144 | } |
145 | 145 | ||
@@ -178,6 +178,66 @@ int smp_request_message_ipi(int virq, int msg) | |||
178 | return err; | 178 | return err; |
179 | } | 179 | } |
180 | 180 | ||
181 | #ifdef CONFIG_PPC_SMP_MUXED_IPI | ||
182 | struct cpu_messages { | ||
183 | int messages; /* current messages */ | ||
184 | unsigned long data; /* data for cause ipi */ | ||
185 | }; | ||
186 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message); | ||
187 | |||
188 | void smp_muxed_ipi_set_data(int cpu, unsigned long data) | ||
189 | { | ||
190 | struct cpu_messages *info = &per_cpu(ipi_message, cpu); | ||
191 | |||
192 | info->data = data; | ||
193 | } | ||
194 | |||
195 | void smp_muxed_ipi_message_pass(int cpu, int msg) | ||
196 | { | ||
197 | struct cpu_messages *info = &per_cpu(ipi_message, cpu); | ||
198 | char *message = (char *)&info->messages; | ||
199 | |||
200 | message[msg] = 1; | ||
201 | mb(); | ||
202 | smp_ops->cause_ipi(cpu, info->data); | ||
203 | } | ||
204 | |||
205 | void smp_muxed_ipi_resend(void) | ||
206 | { | ||
207 | struct cpu_messages *info = &__get_cpu_var(ipi_message); | ||
208 | |||
209 | if (info->messages) | ||
210 | smp_ops->cause_ipi(smp_processor_id(), info->data); | ||
211 | } | ||
212 | |||
213 | irqreturn_t smp_ipi_demux(void) | ||
214 | { | ||
215 | struct cpu_messages *info = &__get_cpu_var(ipi_message); | ||
216 | unsigned int all; | ||
217 | |||
218 | mb(); /* order any irq clear */ | ||
219 | |||
220 | do { | ||
221 | all = xchg_local(&info->messages, 0); | ||
222 | |||
223 | #ifdef __BIG_ENDIAN | ||
224 | if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION))) | ||
225 | generic_smp_call_function_interrupt(); | ||
226 | if (all & (1 << (24 - 8 * PPC_MSG_RESCHEDULE))) | ||
227 | scheduler_ipi(); | ||
228 | if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNC_SINGLE))) | ||
229 | generic_smp_call_function_single_interrupt(); | ||
230 | if (all & (1 << (24 - 8 * PPC_MSG_DEBUGGER_BREAK))) | ||
231 | debug_ipi_action(0, NULL); | ||
232 | #else | ||
233 | #error Unsupported ENDIAN | ||
234 | #endif | ||
235 | } while (info->messages); | ||
236 | |||
237 | return IRQ_HANDLED; | ||
238 | } | ||
239 | #endif /* CONFIG_PPC_SMP_MUXED_IPI */ | ||
240 | |||
181 | void smp_send_reschedule(int cpu) | 241 | void smp_send_reschedule(int cpu) |
182 | { | 242 | { |
183 | if (likely(smp_ops)) | 243 | if (likely(smp_ops)) |
@@ -197,11 +257,18 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask) | |||
197 | smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION); | 257 | smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION); |
198 | } | 258 | } |
199 | 259 | ||
200 | #ifdef CONFIG_DEBUGGER | 260 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) |
201 | void smp_send_debugger_break(int cpu) | 261 | void smp_send_debugger_break(void) |
202 | { | 262 | { |
203 | if (likely(smp_ops)) | 263 | int cpu; |
204 | smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); | 264 | int me = raw_smp_processor_id(); |
265 | |||
266 | if (unlikely(!smp_ops)) | ||
267 | return; | ||
268 | |||
269 | for_each_online_cpu(cpu) | ||
270 | if (cpu != me) | ||
271 | smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); | ||
205 | } | 272 | } |
206 | #endif | 273 | #endif |
207 | 274 | ||
@@ -209,9 +276,9 @@ void smp_send_debugger_break(int cpu) | |||
209 | void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) | 276 | void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) |
210 | { | 277 | { |
211 | crash_ipi_function_ptr = crash_ipi_callback; | 278 | crash_ipi_function_ptr = crash_ipi_callback; |
212 | if (crash_ipi_callback && smp_ops) { | 279 | if (crash_ipi_callback) { |
213 | mb(); | 280 | mb(); |
214 | smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_DEBUGGER_BREAK); | 281 | smp_send_debugger_break(); |
215 | } | 282 | } |
216 | } | 283 | } |
217 | #endif | 284 | #endif |
@@ -238,23 +305,6 @@ static void __devinit smp_store_cpu_info(int id) | |||
238 | per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); | 305 | per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); |
239 | } | 306 | } |
240 | 307 | ||
241 | static void __init smp_create_idle(unsigned int cpu) | ||
242 | { | ||
243 | struct task_struct *p; | ||
244 | |||
245 | /* create a process for the processor */ | ||
246 | p = fork_idle(cpu); | ||
247 | if (IS_ERR(p)) | ||
248 | panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); | ||
249 | #ifdef CONFIG_PPC64 | ||
250 | paca[cpu].__current = p; | ||
251 | paca[cpu].kstack = (unsigned long) task_thread_info(p) | ||
252 | + THREAD_SIZE - STACK_FRAME_OVERHEAD; | ||
253 | #endif | ||
254 | current_set[cpu] = task_thread_info(p); | ||
255 | task_thread_info(p)->cpu = cpu; | ||
256 | } | ||
257 | |||
258 | void __init smp_prepare_cpus(unsigned int max_cpus) | 308 | void __init smp_prepare_cpus(unsigned int max_cpus) |
259 | { | 309 | { |
260 | unsigned int cpu; | 310 | unsigned int cpu; |
@@ -288,10 +338,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
288 | max_cpus = NR_CPUS; | 338 | max_cpus = NR_CPUS; |
289 | else | 339 | else |
290 | max_cpus = 1; | 340 | max_cpus = 1; |
291 | |||
292 | for_each_possible_cpu(cpu) | ||
293 | if (cpu != boot_cpuid) | ||
294 | smp_create_idle(cpu); | ||
295 | } | 341 | } |
296 | 342 | ||
297 | void __devinit smp_prepare_boot_cpu(void) | 343 | void __devinit smp_prepare_boot_cpu(void) |
@@ -305,7 +351,7 @@ void __devinit smp_prepare_boot_cpu(void) | |||
305 | 351 | ||
306 | #ifdef CONFIG_HOTPLUG_CPU | 352 | #ifdef CONFIG_HOTPLUG_CPU |
307 | /* State of each CPU during hotplug phases */ | 353 | /* State of each CPU during hotplug phases */ |
308 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; | 354 | static DEFINE_PER_CPU(int, cpu_state) = { 0 }; |
309 | 355 | ||
310 | int generic_cpu_disable(void) | 356 | int generic_cpu_disable(void) |
311 | { | 357 | { |
@@ -317,30 +363,8 @@ int generic_cpu_disable(void) | |||
317 | set_cpu_online(cpu, false); | 363 | set_cpu_online(cpu, false); |
318 | #ifdef CONFIG_PPC64 | 364 | #ifdef CONFIG_PPC64 |
319 | vdso_data->processorCount--; | 365 | vdso_data->processorCount--; |
320 | fixup_irqs(cpu_online_mask); | ||
321 | #endif | ||
322 | return 0; | ||
323 | } | ||
324 | |||
325 | int generic_cpu_enable(unsigned int cpu) | ||
326 | { | ||
327 | /* Do the normal bootup if we haven't | ||
328 | * already bootstrapped. */ | ||
329 | if (system_state != SYSTEM_RUNNING) | ||
330 | return -ENOSYS; | ||
331 | |||
332 | /* get the target out of it's holding state */ | ||
333 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | ||
334 | smp_wmb(); | ||
335 | |||
336 | while (!cpu_online(cpu)) | ||
337 | cpu_relax(); | ||
338 | |||
339 | #ifdef CONFIG_PPC64 | ||
340 | fixup_irqs(cpu_online_mask); | ||
341 | /* counter the irq disable in fixup_irqs */ | ||
342 | local_irq_enable(); | ||
343 | #endif | 366 | #endif |
367 | migrate_irqs(); | ||
344 | return 0; | 368 | return 0; |
345 | } | 369 | } |
346 | 370 | ||
@@ -362,37 +386,89 @@ void generic_mach_cpu_die(void) | |||
362 | unsigned int cpu; | 386 | unsigned int cpu; |
363 | 387 | ||
364 | local_irq_disable(); | 388 | local_irq_disable(); |
389 | idle_task_exit(); | ||
365 | cpu = smp_processor_id(); | 390 | cpu = smp_processor_id(); |
366 | printk(KERN_DEBUG "CPU%d offline\n", cpu); | 391 | printk(KERN_DEBUG "CPU%d offline\n", cpu); |
367 | __get_cpu_var(cpu_state) = CPU_DEAD; | 392 | __get_cpu_var(cpu_state) = CPU_DEAD; |
368 | smp_wmb(); | 393 | smp_wmb(); |
369 | while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) | 394 | while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) |
370 | cpu_relax(); | 395 | cpu_relax(); |
371 | set_cpu_online(cpu, true); | 396 | } |
372 | local_irq_enable(); | 397 | |
398 | void generic_set_cpu_dead(unsigned int cpu) | ||
399 | { | ||
400 | per_cpu(cpu_state, cpu) = CPU_DEAD; | ||
373 | } | 401 | } |
374 | #endif | 402 | #endif |
375 | 403 | ||
376 | static int __devinit cpu_enable(unsigned int cpu) | 404 | struct create_idle { |
405 | struct work_struct work; | ||
406 | struct task_struct *idle; | ||
407 | struct completion done; | ||
408 | int cpu; | ||
409 | }; | ||
410 | |||
411 | static void __cpuinit do_fork_idle(struct work_struct *work) | ||
377 | { | 412 | { |
378 | if (smp_ops && smp_ops->cpu_enable) | 413 | struct create_idle *c_idle = |
379 | return smp_ops->cpu_enable(cpu); | 414 | container_of(work, struct create_idle, work); |
380 | 415 | ||
381 | return -ENOSYS; | 416 | c_idle->idle = fork_idle(c_idle->cpu); |
417 | complete(&c_idle->done); | ||
382 | } | 418 | } |
383 | 419 | ||
384 | int __cpuinit __cpu_up(unsigned int cpu) | 420 | static int __cpuinit create_idle(unsigned int cpu) |
385 | { | 421 | { |
386 | int c; | 422 | struct thread_info *ti; |
423 | struct create_idle c_idle = { | ||
424 | .cpu = cpu, | ||
425 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), | ||
426 | }; | ||
427 | INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); | ||
387 | 428 | ||
388 | secondary_ti = current_set[cpu]; | 429 | c_idle.idle = get_idle_for_cpu(cpu); |
389 | if (!cpu_enable(cpu)) | 430 | |
390 | return 0; | 431 | /* We can't use kernel_thread since we must avoid to |
432 | * reschedule the child. We use a workqueue because | ||
433 | * we want to fork from a kernel thread, not whatever | ||
434 | * userspace process happens to be trying to online us. | ||
435 | */ | ||
436 | if (!c_idle.idle) { | ||
437 | schedule_work(&c_idle.work); | ||
438 | wait_for_completion(&c_idle.done); | ||
439 | } else | ||
440 | init_idle(c_idle.idle, cpu); | ||
441 | if (IS_ERR(c_idle.idle)) { | ||
442 | pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle)); | ||
443 | return PTR_ERR(c_idle.idle); | ||
444 | } | ||
445 | ti = task_thread_info(c_idle.idle); | ||
446 | |||
447 | #ifdef CONFIG_PPC64 | ||
448 | paca[cpu].__current = c_idle.idle; | ||
449 | paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD; | ||
450 | #endif | ||
451 | ti->cpu = cpu; | ||
452 | current_set[cpu] = ti; | ||
453 | |||
454 | return 0; | ||
455 | } | ||
456 | |||
457 | int __cpuinit __cpu_up(unsigned int cpu) | ||
458 | { | ||
459 | int rc, c; | ||
391 | 460 | ||
392 | if (smp_ops == NULL || | 461 | if (smp_ops == NULL || |
393 | (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) | 462 | (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) |
394 | return -EINVAL; | 463 | return -EINVAL; |
395 | 464 | ||
465 | /* Make sure we have an idle thread */ | ||
466 | rc = create_idle(cpu); | ||
467 | if (rc) | ||
468 | return rc; | ||
469 | |||
470 | secondary_ti = current_set[cpu]; | ||
471 | |||
396 | /* Make sure callin-map entry is 0 (can be leftover a CPU | 472 | /* Make sure callin-map entry is 0 (can be leftover a CPU |
397 | * hotplug | 473 | * hotplug |
398 | */ | 474 | */ |
@@ -406,7 +482,11 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
406 | 482 | ||
407 | /* wake up cpus */ | 483 | /* wake up cpus */ |
408 | DBG("smp: kicking cpu %d\n", cpu); | 484 | DBG("smp: kicking cpu %d\n", cpu); |
409 | smp_ops->kick_cpu(cpu); | 485 | rc = smp_ops->kick_cpu(cpu); |
486 | if (rc) { | ||
487 | pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc); | ||
488 | return rc; | ||
489 | } | ||
410 | 490 | ||
411 | /* | 491 | /* |
412 | * wait to see if the cpu made a callin (is actually up). | 492 | * wait to see if the cpu made a callin (is actually up). |
@@ -466,6 +546,19 @@ out: | |||
466 | return id; | 546 | return id; |
467 | } | 547 | } |
468 | 548 | ||
549 | /* Helper routines for cpu to core mapping */ | ||
550 | int cpu_core_index_of_thread(int cpu) | ||
551 | { | ||
552 | return cpu >> threads_shift; | ||
553 | } | ||
554 | EXPORT_SYMBOL_GPL(cpu_core_index_of_thread); | ||
555 | |||
556 | int cpu_first_thread_of_core(int core) | ||
557 | { | ||
558 | return core << threads_shift; | ||
559 | } | ||
560 | EXPORT_SYMBOL_GPL(cpu_first_thread_of_core); | ||
561 | |||
469 | /* Must be called when no change can occur to cpu_present_mask, | 562 | /* Must be called when no change can occur to cpu_present_mask, |
470 | * i.e. during cpu online or offline. | 563 | * i.e. during cpu online or offline. |
471 | */ | 564 | */ |
@@ -489,7 +582,7 @@ static struct device_node *cpu_to_l2cache(int cpu) | |||
489 | } | 582 | } |
490 | 583 | ||
491 | /* Activate a secondary processor. */ | 584 | /* Activate a secondary processor. */ |
492 | int __devinit start_secondary(void *unused) | 585 | void __devinit start_secondary(void *unused) |
493 | { | 586 | { |
494 | unsigned int cpu = smp_processor_id(); | 587 | unsigned int cpu = smp_processor_id(); |
495 | struct device_node *l2_cache; | 588 | struct device_node *l2_cache; |
@@ -508,16 +601,17 @@ int __devinit start_secondary(void *unused) | |||
508 | if (smp_ops->take_timebase) | 601 | if (smp_ops->take_timebase) |
509 | smp_ops->take_timebase(); | 602 | smp_ops->take_timebase(); |
510 | 603 | ||
511 | if (system_state > SYSTEM_BOOTING) | ||
512 | snapshot_timebase(); | ||
513 | |||
514 | secondary_cpu_time_init(); | 604 | secondary_cpu_time_init(); |
515 | 605 | ||
606 | #ifdef CONFIG_PPC64 | ||
607 | if (system_state == SYSTEM_RUNNING) | ||
608 | vdso_data->processorCount++; | ||
609 | #endif | ||
516 | ipi_call_lock(); | 610 | ipi_call_lock(); |
517 | notify_cpu_starting(cpu); | 611 | notify_cpu_starting(cpu); |
518 | set_cpu_online(cpu, true); | 612 | set_cpu_online(cpu, true); |
519 | /* Update sibling maps */ | 613 | /* Update sibling maps */ |
520 | base = cpu_first_thread_in_core(cpu); | 614 | base = cpu_first_thread_sibling(cpu); |
521 | for (i = 0; i < threads_per_core; i++) { | 615 | for (i = 0; i < threads_per_core; i++) { |
522 | if (cpu_is_offline(base + i)) | 616 | if (cpu_is_offline(base + i)) |
523 | continue; | 617 | continue; |
@@ -548,7 +642,8 @@ int __devinit start_secondary(void *unused) | |||
548 | local_irq_enable(); | 642 | local_irq_enable(); |
549 | 643 | ||
550 | cpu_idle(); | 644 | cpu_idle(); |
551 | return 0; | 645 | |
646 | BUG(); | ||
552 | } | 647 | } |
553 | 648 | ||
554 | int setup_profiling_timer(unsigned int multiplier) | 649 | int setup_profiling_timer(unsigned int multiplier) |
@@ -565,7 +660,7 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
565 | * se we pin us down to CPU 0 for a short while | 660 | * se we pin us down to CPU 0 for a short while |
566 | */ | 661 | */ |
567 | alloc_cpumask_var(&old_mask, GFP_NOWAIT); | 662 | alloc_cpumask_var(&old_mask, GFP_NOWAIT); |
568 | cpumask_copy(old_mask, ¤t->cpus_allowed); | 663 | cpumask_copy(old_mask, tsk_cpus_allowed(current)); |
569 | set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid)); | 664 | set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid)); |
570 | 665 | ||
571 | if (smp_ops && smp_ops->setup_cpu) | 666 | if (smp_ops && smp_ops->setup_cpu) |
@@ -575,9 +670,20 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
575 | 670 | ||
576 | free_cpumask_var(old_mask); | 671 | free_cpumask_var(old_mask); |
577 | 672 | ||
578 | snapshot_timebases(); | 673 | if (smp_ops && smp_ops->bringup_done) |
674 | smp_ops->bringup_done(); | ||
579 | 675 | ||
580 | dump_numa_cpu_topology(); | 676 | dump_numa_cpu_topology(); |
677 | |||
678 | } | ||
679 | |||
680 | int arch_sd_sibling_asym_packing(void) | ||
681 | { | ||
682 | if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { | ||
683 | printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); | ||
684 | return SD_ASYM_PACKING; | ||
685 | } | ||
686 | return 0; | ||
581 | } | 687 | } |
582 | 688 | ||
583 | #ifdef CONFIG_HOTPLUG_CPU | 689 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -596,7 +702,7 @@ int __cpu_disable(void) | |||
596 | return err; | 702 | return err; |
597 | 703 | ||
598 | /* Update sibling maps */ | 704 | /* Update sibling maps */ |
599 | base = cpu_first_thread_in_core(cpu); | 705 | base = cpu_first_thread_sibling(cpu); |
600 | for (i = 0; i < threads_per_core; i++) { | 706 | for (i = 0; i < threads_per_core; i++) { |
601 | cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i)); | 707 | cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i)); |
602 | cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu)); | 708 | cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu)); |
@@ -643,5 +749,9 @@ void cpu_die(void) | |||
643 | { | 749 | { |
644 | if (ppc_md.cpu_die) | 750 | if (ppc_md.cpu_die) |
645 | ppc_md.cpu_die(); | 751 | ppc_md.cpu_die(); |
752 | |||
753 | /* If we return, we re-enter start_secondary */ | ||
754 | start_secondary_resume(); | ||
646 | } | 755 | } |
756 | |||
647 | #endif | 757 | #endif |
diff --git a/arch/powerpc/kernel/swsusp.c b/arch/powerpc/kernel/swsusp.c index 560c96119501..aa17b76dd427 100644 --- a/arch/powerpc/kernel/swsusp.c +++ b/arch/powerpc/kernel/swsusp.c | |||
@@ -10,7 +10,6 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
13 | #include <asm/suspend.h> | ||
14 | #include <asm/system.h> | 13 | #include <asm/system.h> |
15 | #include <asm/current.h> | 14 | #include <asm/current.h> |
16 | #include <asm/mmu_context.h> | 15 | #include <asm/mmu_context.h> |
diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S index b0754e237438..ba4dee3d233f 100644 --- a/arch/powerpc/kernel/swsusp_32.S +++ b/arch/powerpc/kernel/swsusp_32.S | |||
@@ -143,7 +143,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
143 | 143 | ||
144 | /* Disable MSR:DR to make sure we don't take a TLB or | 144 | /* Disable MSR:DR to make sure we don't take a TLB or |
145 | * hash miss during the copy, as our hash table will | 145 | * hash miss during the copy, as our hash table will |
146 | * for a while be unuseable. For .text, we assume we are | 146 | * for a while be unusable. For .text, we assume we are |
147 | * covered by a BAT. This works only for non-G5 at this | 147 | * covered by a BAT. This works only for non-G5 at this |
148 | * point. G5 will need a better approach, possibly using | 148 | * point. G5 will need a better approach, possibly using |
149 | * a small temporary hash table filled with large mappings, | 149 | * a small temporary hash table filled with large mappings, |
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index b1b6043a56c4..4e5bf1edc0f2 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/resource.h> | 23 | #include <linux/resource.h> |
24 | #include <linux/times.h> | 24 | #include <linux/times.h> |
25 | #include <linux/smp.h> | 25 | #include <linux/smp.h> |
26 | #include <linux/smp_lock.h> | ||
27 | #include <linux/sem.h> | 26 | #include <linux/sem.h> |
28 | #include <linux/msg.h> | 27 | #include <linux/msg.h> |
29 | #include <linux/shm.h> | 28 | #include <linux/shm.h> |
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index c0d8c2006bf4..f0f2199e64e1 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c | |||
@@ -182,6 +182,41 @@ static SYSDEV_ATTR(mmcra, 0600, show_mmcra, store_mmcra); | |||
182 | static SYSDEV_ATTR(spurr, 0600, show_spurr, NULL); | 182 | static SYSDEV_ATTR(spurr, 0600, show_spurr, NULL); |
183 | static SYSDEV_ATTR(dscr, 0600, show_dscr, store_dscr); | 183 | static SYSDEV_ATTR(dscr, 0600, show_dscr, store_dscr); |
184 | static SYSDEV_ATTR(purr, 0600, show_purr, store_purr); | 184 | static SYSDEV_ATTR(purr, 0600, show_purr, store_purr); |
185 | |||
186 | unsigned long dscr_default = 0; | ||
187 | EXPORT_SYMBOL(dscr_default); | ||
188 | |||
189 | static ssize_t show_dscr_default(struct sysdev_class *class, | ||
190 | struct sysdev_class_attribute *attr, char *buf) | ||
191 | { | ||
192 | return sprintf(buf, "%lx\n", dscr_default); | ||
193 | } | ||
194 | |||
195 | static ssize_t __used store_dscr_default(struct sysdev_class *class, | ||
196 | struct sysdev_class_attribute *attr, const char *buf, | ||
197 | size_t count) | ||
198 | { | ||
199 | unsigned long val; | ||
200 | int ret = 0; | ||
201 | |||
202 | ret = sscanf(buf, "%lx", &val); | ||
203 | if (ret != 1) | ||
204 | return -EINVAL; | ||
205 | dscr_default = val; | ||
206 | |||
207 | return count; | ||
208 | } | ||
209 | |||
210 | static SYSDEV_CLASS_ATTR(dscr_default, 0600, | ||
211 | show_dscr_default, store_dscr_default); | ||
212 | |||
213 | static void sysfs_create_dscr_default(void) | ||
214 | { | ||
215 | int err = 0; | ||
216 | if (cpu_has_feature(CPU_FTR_DSCR)) | ||
217 | err = sysfs_create_file(&cpu_sysdev_class.kset.kobj, | ||
218 | &attr_dscr_default.attr); | ||
219 | } | ||
185 | #endif /* CONFIG_PPC64 */ | 220 | #endif /* CONFIG_PPC64 */ |
186 | 221 | ||
187 | #ifdef HAS_PPC_PMC_PA6T | 222 | #ifdef HAS_PPC_PMC_PA6T |
@@ -617,6 +652,9 @@ static int __init topology_init(void) | |||
617 | if (cpu_online(cpu)) | 652 | if (cpu_online(cpu)) |
618 | register_cpu_online(cpu); | 653 | register_cpu_online(cpu); |
619 | } | 654 | } |
655 | #ifdef CONFIG_PPC64 | ||
656 | sysfs_create_dscr_default(); | ||
657 | #endif /* CONFIG_PPC64 */ | ||
620 | 658 | ||
621 | return 0; | 659 | return 0; |
622 | } | 660 | } |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 8533b3b83f5d..f33acfd872ad 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -53,7 +53,7 @@ | |||
53 | #include <linux/posix-timers.h> | 53 | #include <linux/posix-timers.h> |
54 | #include <linux/irq.h> | 54 | #include <linux/irq.h> |
55 | #include <linux/delay.h> | 55 | #include <linux/delay.h> |
56 | #include <linux/perf_event.h> | 56 | #include <linux/irq_work.h> |
57 | #include <asm/trace.h> | 57 | #include <asm/trace.h> |
58 | 58 | ||
59 | #include <asm/io.h> | 59 | #include <asm/io.h> |
@@ -155,16 +155,15 @@ EXPORT_SYMBOL_GPL(rtc_lock); | |||
155 | 155 | ||
156 | static u64 tb_to_ns_scale __read_mostly; | 156 | static u64 tb_to_ns_scale __read_mostly; |
157 | static unsigned tb_to_ns_shift __read_mostly; | 157 | static unsigned tb_to_ns_shift __read_mostly; |
158 | static unsigned long boot_tb __read_mostly; | 158 | static u64 boot_tb __read_mostly; |
159 | 159 | ||
160 | extern struct timezone sys_tz; | 160 | extern struct timezone sys_tz; |
161 | static long timezone_offset; | 161 | static long timezone_offset; |
162 | 162 | ||
163 | unsigned long ppc_proc_freq; | 163 | unsigned long ppc_proc_freq; |
164 | EXPORT_SYMBOL(ppc_proc_freq); | 164 | EXPORT_SYMBOL_GPL(ppc_proc_freq); |
165 | unsigned long ppc_tb_freq; | 165 | unsigned long ppc_tb_freq; |
166 | 166 | EXPORT_SYMBOL_GPL(ppc_tb_freq); | |
167 | static DEFINE_PER_CPU(u64, last_jiffy); | ||
168 | 167 | ||
169 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 168 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
170 | /* | 169 | /* |
@@ -185,6 +184,8 @@ DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta); | |||
185 | 184 | ||
186 | cputime_t cputime_one_jiffy; | 185 | cputime_t cputime_one_jiffy; |
187 | 186 | ||
187 | void (*dtl_consumer)(struct dtl_entry *, u64); | ||
188 | |||
188 | static void calc_cputime_factors(void) | 189 | static void calc_cputime_factors(void) |
189 | { | 190 | { |
190 | struct div_result res; | 191 | struct div_result res; |
@@ -200,62 +201,171 @@ static void calc_cputime_factors(void) | |||
200 | } | 201 | } |
201 | 202 | ||
202 | /* | 203 | /* |
203 | * Read the PURR on systems that have it, otherwise the timebase. | 204 | * Read the SPURR on systems that have it, otherwise the PURR, |
205 | * or if that doesn't exist return the timebase value passed in. | ||
204 | */ | 206 | */ |
205 | static u64 read_purr(void) | 207 | static u64 read_spurr(u64 tb) |
206 | { | 208 | { |
209 | if (cpu_has_feature(CPU_FTR_SPURR)) | ||
210 | return mfspr(SPRN_SPURR); | ||
207 | if (cpu_has_feature(CPU_FTR_PURR)) | 211 | if (cpu_has_feature(CPU_FTR_PURR)) |
208 | return mfspr(SPRN_PURR); | 212 | return mfspr(SPRN_PURR); |
209 | return mftb(); | 213 | return tb; |
210 | } | 214 | } |
211 | 215 | ||
216 | #ifdef CONFIG_PPC_SPLPAR | ||
217 | |||
212 | /* | 218 | /* |
213 | * Read the SPURR on systems that have it, otherwise the purr | 219 | * Scan the dispatch trace log and count up the stolen time. |
220 | * Should be called with interrupts disabled. | ||
214 | */ | 221 | */ |
215 | static u64 read_spurr(u64 purr) | 222 | static u64 scan_dispatch_log(u64 stop_tb) |
216 | { | 223 | { |
217 | /* | 224 | u64 i = local_paca->dtl_ridx; |
218 | * cpus without PURR won't have a SPURR | 225 | struct dtl_entry *dtl = local_paca->dtl_curr; |
219 | * We already know the former when we use this, so tell gcc | 226 | struct dtl_entry *dtl_end = local_paca->dispatch_log_end; |
227 | struct lppaca *vpa = local_paca->lppaca_ptr; | ||
228 | u64 tb_delta; | ||
229 | u64 stolen = 0; | ||
230 | u64 dtb; | ||
231 | |||
232 | if (!dtl) | ||
233 | return 0; | ||
234 | |||
235 | if (i == vpa->dtl_idx) | ||
236 | return 0; | ||
237 | while (i < vpa->dtl_idx) { | ||
238 | if (dtl_consumer) | ||
239 | dtl_consumer(dtl, i); | ||
240 | dtb = dtl->timebase; | ||
241 | tb_delta = dtl->enqueue_to_dispatch_time + | ||
242 | dtl->ready_to_enqueue_time; | ||
243 | barrier(); | ||
244 | if (i + N_DISPATCH_LOG < vpa->dtl_idx) { | ||
245 | /* buffer has overflowed */ | ||
246 | i = vpa->dtl_idx - N_DISPATCH_LOG; | ||
247 | dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG); | ||
248 | continue; | ||
249 | } | ||
250 | if (dtb > stop_tb) | ||
251 | break; | ||
252 | stolen += tb_delta; | ||
253 | ++i; | ||
254 | ++dtl; | ||
255 | if (dtl == dtl_end) | ||
256 | dtl = local_paca->dispatch_log; | ||
257 | } | ||
258 | local_paca->dtl_ridx = i; | ||
259 | local_paca->dtl_curr = dtl; | ||
260 | return stolen; | ||
261 | } | ||
262 | |||
263 | /* | ||
264 | * Accumulate stolen time by scanning the dispatch trace log. | ||
265 | * Called on entry from user mode. | ||
266 | */ | ||
267 | void accumulate_stolen_time(void) | ||
268 | { | ||
269 | u64 sst, ust; | ||
270 | |||
271 | u8 save_soft_enabled = local_paca->soft_enabled; | ||
272 | u8 save_hard_enabled = local_paca->hard_enabled; | ||
273 | |||
274 | /* We are called early in the exception entry, before | ||
275 | * soft/hard_enabled are sync'ed to the expected state | ||
276 | * for the exception. We are hard disabled but the PACA | ||
277 | * needs to reflect that so various debug stuff doesn't | ||
278 | * complain | ||
220 | */ | 279 | */ |
221 | if (cpu_has_feature(CPU_FTR_PURR) && cpu_has_feature(CPU_FTR_SPURR)) | 280 | local_paca->soft_enabled = 0; |
222 | return mfspr(SPRN_SPURR); | 281 | local_paca->hard_enabled = 0; |
223 | return purr; | 282 | |
283 | sst = scan_dispatch_log(local_paca->starttime_user); | ||
284 | ust = scan_dispatch_log(local_paca->starttime); | ||
285 | local_paca->system_time -= sst; | ||
286 | local_paca->user_time -= ust; | ||
287 | local_paca->stolen_time += ust + sst; | ||
288 | |||
289 | local_paca->soft_enabled = save_soft_enabled; | ||
290 | local_paca->hard_enabled = save_hard_enabled; | ||
291 | } | ||
292 | |||
293 | static inline u64 calculate_stolen_time(u64 stop_tb) | ||
294 | { | ||
295 | u64 stolen = 0; | ||
296 | |||
297 | if (get_paca()->dtl_ridx != get_paca()->lppaca_ptr->dtl_idx) { | ||
298 | stolen = scan_dispatch_log(stop_tb); | ||
299 | get_paca()->system_time -= stolen; | ||
300 | } | ||
301 | |||
302 | stolen += get_paca()->stolen_time; | ||
303 | get_paca()->stolen_time = 0; | ||
304 | return stolen; | ||
224 | } | 305 | } |
225 | 306 | ||
307 | #else /* CONFIG_PPC_SPLPAR */ | ||
308 | static inline u64 calculate_stolen_time(u64 stop_tb) | ||
309 | { | ||
310 | return 0; | ||
311 | } | ||
312 | |||
313 | #endif /* CONFIG_PPC_SPLPAR */ | ||
314 | |||
226 | /* | 315 | /* |
227 | * Account time for a transition between system, hard irq | 316 | * Account time for a transition between system, hard irq |
228 | * or soft irq state. | 317 | * or soft irq state. |
229 | */ | 318 | */ |
230 | void account_system_vtime(struct task_struct *tsk) | 319 | void account_system_vtime(struct task_struct *tsk) |
231 | { | 320 | { |
232 | u64 now, nowscaled, delta, deltascaled, sys_time; | 321 | u64 now, nowscaled, delta, deltascaled; |
233 | unsigned long flags; | 322 | unsigned long flags; |
323 | u64 stolen, udelta, sys_scaled, user_scaled; | ||
234 | 324 | ||
235 | local_irq_save(flags); | 325 | local_irq_save(flags); |
236 | now = read_purr(); | 326 | now = mftb(); |
237 | nowscaled = read_spurr(now); | 327 | nowscaled = read_spurr(now); |
238 | delta = now - get_paca()->startpurr; | 328 | get_paca()->system_time += now - get_paca()->starttime; |
329 | get_paca()->starttime = now; | ||
239 | deltascaled = nowscaled - get_paca()->startspurr; | 330 | deltascaled = nowscaled - get_paca()->startspurr; |
240 | get_paca()->startpurr = now; | ||
241 | get_paca()->startspurr = nowscaled; | 331 | get_paca()->startspurr = nowscaled; |
242 | if (!in_interrupt()) { | 332 | |
243 | /* deltascaled includes both user and system time. | 333 | stolen = calculate_stolen_time(now); |
244 | * Hence scale it based on the purr ratio to estimate | 334 | |
245 | * the system time */ | 335 | delta = get_paca()->system_time; |
246 | sys_time = get_paca()->system_time; | 336 | get_paca()->system_time = 0; |
247 | if (get_paca()->user_time) | 337 | udelta = get_paca()->user_time - get_paca()->utime_sspurr; |
248 | deltascaled = deltascaled * sys_time / | 338 | get_paca()->utime_sspurr = get_paca()->user_time; |
249 | (sys_time + get_paca()->user_time); | 339 | |
250 | delta += sys_time; | 340 | /* |
251 | get_paca()->system_time = 0; | 341 | * Because we don't read the SPURR on every kernel entry/exit, |
342 | * deltascaled includes both user and system SPURR ticks. | ||
343 | * Apportion these ticks to system SPURR ticks and user | ||
344 | * SPURR ticks in the same ratio as the system time (delta) | ||
345 | * and user time (udelta) values obtained from the timebase | ||
346 | * over the same interval. The system ticks get accounted here; | ||
347 | * the user ticks get saved up in paca->user_time_scaled to be | ||
348 | * used by account_process_tick. | ||
349 | */ | ||
350 | sys_scaled = delta; | ||
351 | user_scaled = udelta; | ||
352 | if (deltascaled != delta + udelta) { | ||
353 | if (udelta) { | ||
354 | sys_scaled = deltascaled * delta / (delta + udelta); | ||
355 | user_scaled = deltascaled - sys_scaled; | ||
356 | } else { | ||
357 | sys_scaled = deltascaled; | ||
358 | } | ||
359 | } | ||
360 | get_paca()->user_time_scaled += user_scaled; | ||
361 | |||
362 | if (in_interrupt() || idle_task(smp_processor_id()) != tsk) { | ||
363 | account_system_time(tsk, 0, delta, sys_scaled); | ||
364 | if (stolen) | ||
365 | account_steal_time(stolen); | ||
366 | } else { | ||
367 | account_idle_time(delta + stolen); | ||
252 | } | 368 | } |
253 | if (in_irq() || idle_task(smp_processor_id()) != tsk) | ||
254 | account_system_time(tsk, 0, delta, deltascaled); | ||
255 | else | ||
256 | account_idle_time(delta); | ||
257 | __get_cpu_var(cputime_last_delta) = delta; | ||
258 | __get_cpu_var(cputime_scaled_last_delta) = deltascaled; | ||
259 | local_irq_restore(flags); | 369 | local_irq_restore(flags); |
260 | } | 370 | } |
261 | EXPORT_SYMBOL_GPL(account_system_vtime); | 371 | EXPORT_SYMBOL_GPL(account_system_vtime); |
@@ -265,125 +375,26 @@ EXPORT_SYMBOL_GPL(account_system_vtime); | |||
265 | * by the exception entry and exit code to the generic process | 375 | * by the exception entry and exit code to the generic process |
266 | * user and system time records. | 376 | * user and system time records. |
267 | * Must be called with interrupts disabled. | 377 | * Must be called with interrupts disabled. |
378 | * Assumes that account_system_vtime() has been called recently | ||
379 | * (i.e. since the last entry from usermode) so that | ||
380 | * get_paca()->user_time_scaled is up to date. | ||
268 | */ | 381 | */ |
269 | void account_process_tick(struct task_struct *tsk, int user_tick) | 382 | void account_process_tick(struct task_struct *tsk, int user_tick) |
270 | { | 383 | { |
271 | cputime_t utime, utimescaled; | 384 | cputime_t utime, utimescaled; |
272 | 385 | ||
273 | utime = get_paca()->user_time; | 386 | utime = get_paca()->user_time; |
387 | utimescaled = get_paca()->user_time_scaled; | ||
274 | get_paca()->user_time = 0; | 388 | get_paca()->user_time = 0; |
275 | utimescaled = cputime_to_scaled(utime); | 389 | get_paca()->user_time_scaled = 0; |
390 | get_paca()->utime_sspurr = 0; | ||
276 | account_user_time(tsk, utime, utimescaled); | 391 | account_user_time(tsk, utime, utimescaled); |
277 | } | 392 | } |
278 | 393 | ||
279 | /* | ||
280 | * Stuff for accounting stolen time. | ||
281 | */ | ||
282 | struct cpu_purr_data { | ||
283 | int initialized; /* thread is running */ | ||
284 | u64 tb; /* last TB value read */ | ||
285 | u64 purr; /* last PURR value read */ | ||
286 | u64 spurr; /* last SPURR value read */ | ||
287 | }; | ||
288 | |||
289 | /* | ||
290 | * Each entry in the cpu_purr_data array is manipulated only by its | ||
291 | * "owner" cpu -- usually in the timer interrupt but also occasionally | ||
292 | * in process context for cpu online. As long as cpus do not touch | ||
293 | * each others' cpu_purr_data, disabling local interrupts is | ||
294 | * sufficient to serialize accesses. | ||
295 | */ | ||
296 | static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data); | ||
297 | |||
298 | static void snapshot_tb_and_purr(void *data) | ||
299 | { | ||
300 | unsigned long flags; | ||
301 | struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data); | ||
302 | |||
303 | local_irq_save(flags); | ||
304 | p->tb = get_tb_or_rtc(); | ||
305 | p->purr = mfspr(SPRN_PURR); | ||
306 | wmb(); | ||
307 | p->initialized = 1; | ||
308 | local_irq_restore(flags); | ||
309 | } | ||
310 | |||
311 | /* | ||
312 | * Called during boot when all cpus have come up. | ||
313 | */ | ||
314 | void snapshot_timebases(void) | ||
315 | { | ||
316 | if (!cpu_has_feature(CPU_FTR_PURR)) | ||
317 | return; | ||
318 | on_each_cpu(snapshot_tb_and_purr, NULL, 1); | ||
319 | } | ||
320 | |||
321 | /* | ||
322 | * Must be called with interrupts disabled. | ||
323 | */ | ||
324 | void calculate_steal_time(void) | ||
325 | { | ||
326 | u64 tb, purr; | ||
327 | s64 stolen; | ||
328 | struct cpu_purr_data *pme; | ||
329 | |||
330 | pme = &__get_cpu_var(cpu_purr_data); | ||
331 | if (!pme->initialized) | ||
332 | return; /* !CPU_FTR_PURR or early in early boot */ | ||
333 | tb = mftb(); | ||
334 | purr = mfspr(SPRN_PURR); | ||
335 | stolen = (tb - pme->tb) - (purr - pme->purr); | ||
336 | if (stolen > 0) { | ||
337 | if (idle_task(smp_processor_id()) != current) | ||
338 | account_steal_time(stolen); | ||
339 | else | ||
340 | account_idle_time(stolen); | ||
341 | } | ||
342 | pme->tb = tb; | ||
343 | pme->purr = purr; | ||
344 | } | ||
345 | |||
346 | #ifdef CONFIG_PPC_SPLPAR | ||
347 | /* | ||
348 | * Must be called before the cpu is added to the online map when | ||
349 | * a cpu is being brought up at runtime. | ||
350 | */ | ||
351 | static void snapshot_purr(void) | ||
352 | { | ||
353 | struct cpu_purr_data *pme; | ||
354 | unsigned long flags; | ||
355 | |||
356 | if (!cpu_has_feature(CPU_FTR_PURR)) | ||
357 | return; | ||
358 | local_irq_save(flags); | ||
359 | pme = &__get_cpu_var(cpu_purr_data); | ||
360 | pme->tb = mftb(); | ||
361 | pme->purr = mfspr(SPRN_PURR); | ||
362 | pme->initialized = 1; | ||
363 | local_irq_restore(flags); | ||
364 | } | ||
365 | |||
366 | #endif /* CONFIG_PPC_SPLPAR */ | ||
367 | |||
368 | #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */ | 394 | #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */ |
369 | #define calc_cputime_factors() | 395 | #define calc_cputime_factors() |
370 | #define calculate_steal_time() do { } while (0) | ||
371 | #endif | ||
372 | |||
373 | #if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)) | ||
374 | #define snapshot_purr() do { } while (0) | ||
375 | #endif | 396 | #endif |
376 | 397 | ||
377 | /* | ||
378 | * Called when a cpu comes up after the system has finished booting, | ||
379 | * i.e. as a result of a hotplug cpu action. | ||
380 | */ | ||
381 | void snapshot_timebase(void) | ||
382 | { | ||
383 | __get_cpu_var(last_jiffy) = get_tb_or_rtc(); | ||
384 | snapshot_purr(); | ||
385 | } | ||
386 | |||
387 | void __delay(unsigned long loops) | 398 | void __delay(unsigned long loops) |
388 | { | 399 | { |
389 | unsigned long start; | 400 | unsigned long start; |
@@ -493,60 +504,60 @@ void __init iSeries_time_init_early(void) | |||
493 | } | 504 | } |
494 | #endif /* CONFIG_PPC_ISERIES */ | 505 | #endif /* CONFIG_PPC_ISERIES */ |
495 | 506 | ||
496 | #ifdef CONFIG_PERF_EVENTS | 507 | #ifdef CONFIG_IRQ_WORK |
497 | 508 | ||
498 | /* | 509 | /* |
499 | * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable... | 510 | * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable... |
500 | */ | 511 | */ |
501 | #ifdef CONFIG_PPC64 | 512 | #ifdef CONFIG_PPC64 |
502 | static inline unsigned long test_perf_event_pending(void) | 513 | static inline unsigned long test_irq_work_pending(void) |
503 | { | 514 | { |
504 | unsigned long x; | 515 | unsigned long x; |
505 | 516 | ||
506 | asm volatile("lbz %0,%1(13)" | 517 | asm volatile("lbz %0,%1(13)" |
507 | : "=r" (x) | 518 | : "=r" (x) |
508 | : "i" (offsetof(struct paca_struct, perf_event_pending))); | 519 | : "i" (offsetof(struct paca_struct, irq_work_pending))); |
509 | return x; | 520 | return x; |
510 | } | 521 | } |
511 | 522 | ||
512 | static inline void set_perf_event_pending_flag(void) | 523 | static inline void set_irq_work_pending_flag(void) |
513 | { | 524 | { |
514 | asm volatile("stb %0,%1(13)" : : | 525 | asm volatile("stb %0,%1(13)" : : |
515 | "r" (1), | 526 | "r" (1), |
516 | "i" (offsetof(struct paca_struct, perf_event_pending))); | 527 | "i" (offsetof(struct paca_struct, irq_work_pending))); |
517 | } | 528 | } |
518 | 529 | ||
519 | static inline void clear_perf_event_pending(void) | 530 | static inline void clear_irq_work_pending(void) |
520 | { | 531 | { |
521 | asm volatile("stb %0,%1(13)" : : | 532 | asm volatile("stb %0,%1(13)" : : |
522 | "r" (0), | 533 | "r" (0), |
523 | "i" (offsetof(struct paca_struct, perf_event_pending))); | 534 | "i" (offsetof(struct paca_struct, irq_work_pending))); |
524 | } | 535 | } |
525 | 536 | ||
526 | #else /* 32-bit */ | 537 | #else /* 32-bit */ |
527 | 538 | ||
528 | DEFINE_PER_CPU(u8, perf_event_pending); | 539 | DEFINE_PER_CPU(u8, irq_work_pending); |
529 | 540 | ||
530 | #define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1 | 541 | #define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1 |
531 | #define test_perf_event_pending() __get_cpu_var(perf_event_pending) | 542 | #define test_irq_work_pending() __get_cpu_var(irq_work_pending) |
532 | #define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0 | 543 | #define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0 |
533 | 544 | ||
534 | #endif /* 32 vs 64 bit */ | 545 | #endif /* 32 vs 64 bit */ |
535 | 546 | ||
536 | void set_perf_event_pending(void) | 547 | void set_irq_work_pending(void) |
537 | { | 548 | { |
538 | preempt_disable(); | 549 | preempt_disable(); |
539 | set_perf_event_pending_flag(); | 550 | set_irq_work_pending_flag(); |
540 | set_dec(1); | 551 | set_dec(1); |
541 | preempt_enable(); | 552 | preempt_enable(); |
542 | } | 553 | } |
543 | 554 | ||
544 | #else /* CONFIG_PERF_EVENTS */ | 555 | #else /* CONFIG_IRQ_WORK */ |
545 | 556 | ||
546 | #define test_perf_event_pending() 0 | 557 | #define test_irq_work_pending() 0 |
547 | #define clear_perf_event_pending() | 558 | #define clear_irq_work_pending() |
548 | 559 | ||
549 | #endif /* CONFIG_PERF_EVENTS */ | 560 | #endif /* CONFIG_IRQ_WORK */ |
550 | 561 | ||
551 | /* | 562 | /* |
552 | * For iSeries shared processors, we have to let the hypervisor | 563 | * For iSeries shared processors, we have to let the hypervisor |
@@ -569,14 +580,21 @@ void timer_interrupt(struct pt_regs * regs) | |||
569 | struct clock_event_device *evt = &decrementer->event; | 580 | struct clock_event_device *evt = &decrementer->event; |
570 | u64 now; | 581 | u64 now; |
571 | 582 | ||
583 | /* Ensure a positive value is written to the decrementer, or else | ||
584 | * some CPUs will continue to take decrementer exceptions. | ||
585 | */ | ||
586 | set_dec(DECREMENTER_MAX); | ||
587 | |||
588 | /* Some implementations of hotplug will get timer interrupts while | ||
589 | * offline, just ignore these | ||
590 | */ | ||
591 | if (!cpu_online(smp_processor_id())) | ||
592 | return; | ||
593 | |||
572 | trace_timer_interrupt_entry(regs); | 594 | trace_timer_interrupt_entry(regs); |
573 | 595 | ||
574 | __get_cpu_var(irq_stat).timer_irqs++; | 596 | __get_cpu_var(irq_stat).timer_irqs++; |
575 | 597 | ||
576 | /* Ensure a positive value is written to the decrementer, or else | ||
577 | * some CPUs will continuue to take decrementer exceptions */ | ||
578 | set_dec(DECREMENTER_MAX); | ||
579 | |||
580 | #if defined(CONFIG_PPC32) && defined(CONFIG_PMAC) | 598 | #if defined(CONFIG_PPC32) && defined(CONFIG_PMAC) |
581 | if (atomic_read(&ppc_n_lost_interrupts) != 0) | 599 | if (atomic_read(&ppc_n_lost_interrupts) != 0) |
582 | do_IRQ(regs); | 600 | do_IRQ(regs); |
@@ -585,11 +603,9 @@ void timer_interrupt(struct pt_regs * regs) | |||
585 | old_regs = set_irq_regs(regs); | 603 | old_regs = set_irq_regs(regs); |
586 | irq_enter(); | 604 | irq_enter(); |
587 | 605 | ||
588 | calculate_steal_time(); | 606 | if (test_irq_work_pending()) { |
589 | 607 | clear_irq_work_pending(); | |
590 | if (test_perf_event_pending()) { | 608 | irq_work_run(); |
591 | clear_perf_event_pending(); | ||
592 | perf_event_do_pending(); | ||
593 | } | 609 | } |
594 | 610 | ||
595 | #ifdef CONFIG_PPC_ISERIES | 611 | #ifdef CONFIG_PPC_ISERIES |
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index a45a63c3a0c7..1a0141426cda 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/bug.h> | 34 | #include <linux/bug.h> |
35 | #include <linux/kdebug.h> | 35 | #include <linux/kdebug.h> |
36 | #include <linux/debugfs.h> | 36 | #include <linux/debugfs.h> |
37 | #include <linux/ratelimit.h> | ||
37 | 38 | ||
38 | #include <asm/emulated_ops.h> | 39 | #include <asm/emulated_ops.h> |
39 | #include <asm/pgtable.h> | 40 | #include <asm/pgtable.h> |
@@ -55,6 +56,7 @@ | |||
55 | #endif | 56 | #endif |
56 | #include <asm/kexec.h> | 57 | #include <asm/kexec.h> |
57 | #include <asm/ppc-opcode.h> | 58 | #include <asm/ppc-opcode.h> |
59 | #include <asm/rio.h> | ||
58 | 60 | ||
59 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) | 61 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) |
60 | int (*__debugger)(struct pt_regs *regs) __read_mostly; | 62 | int (*__debugger)(struct pt_regs *regs) __read_mostly; |
@@ -143,7 +145,6 @@ int die(const char *str, struct pt_regs *regs, long err) | |||
143 | #endif | 145 | #endif |
144 | printk("%s\n", ppc_md.name ? ppc_md.name : ""); | 146 | printk("%s\n", ppc_md.name ? ppc_md.name : ""); |
145 | 147 | ||
146 | sysfs_printk_last_file(); | ||
147 | if (notify_die(DIE_OOPS, str, regs, err, 255, | 148 | if (notify_die(DIE_OOPS, str, regs, err, 255, |
148 | SIGSEGV) == NOTIFY_STOP) | 149 | SIGSEGV) == NOTIFY_STOP) |
149 | return 1; | 150 | return 1; |
@@ -197,12 +198,11 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) | |||
197 | if (die("Exception in kernel mode", regs, signr)) | 198 | if (die("Exception in kernel mode", regs, signr)) |
198 | return; | 199 | return; |
199 | } else if (show_unhandled_signals && | 200 | } else if (show_unhandled_signals && |
200 | unhandled_signal(current, signr) && | 201 | unhandled_signal(current, signr)) { |
201 | printk_ratelimit()) { | 202 | printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, |
202 | printk(regs->msr & MSR_SF ? fmt64 : fmt32, | 203 | current->comm, current->pid, signr, |
203 | current->comm, current->pid, signr, | 204 | addr, regs->nip, regs->link, code); |
204 | addr, regs->nip, regs->link, code); | 205 | } |
205 | } | ||
206 | 206 | ||
207 | memset(&info, 0, sizeof(info)); | 207 | memset(&info, 0, sizeof(info)); |
208 | info.si_signo = signr; | 208 | info.si_signo = signr; |
@@ -221,7 +221,7 @@ void system_reset_exception(struct pt_regs *regs) | |||
221 | } | 221 | } |
222 | 222 | ||
223 | #ifdef CONFIG_KEXEC | 223 | #ifdef CONFIG_KEXEC |
224 | cpu_set(smp_processor_id(), cpus_in_sr); | 224 | cpumask_set_cpu(smp_processor_id(), &cpus_in_sr); |
225 | #endif | 225 | #endif |
226 | 226 | ||
227 | die("System Reset", regs, SIGABRT); | 227 | die("System Reset", regs, SIGABRT); |
@@ -425,6 +425,12 @@ int machine_check_e500mc(struct pt_regs *regs) | |||
425 | unsigned long reason = mcsr; | 425 | unsigned long reason = mcsr; |
426 | int recoverable = 1; | 426 | int recoverable = 1; |
427 | 427 | ||
428 | if (reason & MCSR_LD) { | ||
429 | recoverable = fsl_rio_mcheck_exception(regs); | ||
430 | if (recoverable == 1) | ||
431 | goto silent_out; | ||
432 | } | ||
433 | |||
428 | printk("Machine check in kernel mode.\n"); | 434 | printk("Machine check in kernel mode.\n"); |
429 | printk("Caused by (from MCSR=%lx): ", reason); | 435 | printk("Caused by (from MCSR=%lx): ", reason); |
430 | 436 | ||
@@ -500,6 +506,7 @@ int machine_check_e500mc(struct pt_regs *regs) | |||
500 | reason & MCSR_MEA ? "Effective" : "Physical", addr); | 506 | reason & MCSR_MEA ? "Effective" : "Physical", addr); |
501 | } | 507 | } |
502 | 508 | ||
509 | silent_out: | ||
503 | mtspr(SPRN_MCSR, mcsr); | 510 | mtspr(SPRN_MCSR, mcsr); |
504 | return mfspr(SPRN_MCSR) == 0 && recoverable; | 511 | return mfspr(SPRN_MCSR) == 0 && recoverable; |
505 | } | 512 | } |
@@ -508,6 +515,11 @@ int machine_check_e500(struct pt_regs *regs) | |||
508 | { | 515 | { |
509 | unsigned long reason = get_mc_reason(regs); | 516 | unsigned long reason = get_mc_reason(regs); |
510 | 517 | ||
518 | if (reason & MCSR_BUS_RBERR) { | ||
519 | if (fsl_rio_mcheck_exception(regs)) | ||
520 | return 1; | ||
521 | } | ||
522 | |||
511 | printk("Machine check in kernel mode.\n"); | 523 | printk("Machine check in kernel mode.\n"); |
512 | printk("Caused by (from MCSR=%lx): ", reason); | 524 | printk("Caused by (from MCSR=%lx): ", reason); |
513 | 525 | ||
@@ -538,6 +550,11 @@ int machine_check_e500(struct pt_regs *regs) | |||
538 | 550 | ||
539 | return 0; | 551 | return 0; |
540 | } | 552 | } |
553 | |||
554 | int machine_check_generic(struct pt_regs *regs) | ||
555 | { | ||
556 | return 0; | ||
557 | } | ||
541 | #elif defined(CONFIG_E200) | 558 | #elif defined(CONFIG_E200) |
542 | int machine_check_e200(struct pt_regs *regs) | 559 | int machine_check_e200(struct pt_regs *regs) |
543 | { | 560 | { |
@@ -621,12 +638,6 @@ void machine_check_exception(struct pt_regs *regs) | |||
621 | if (recover > 0) | 638 | if (recover > 0) |
622 | return; | 639 | return; |
623 | 640 | ||
624 | if (user_mode(regs)) { | ||
625 | regs->msr |= MSR_RI; | ||
626 | _exception(SIGBUS, regs, BUS_ADRERR, regs->nip); | ||
627 | return; | ||
628 | } | ||
629 | |||
630 | #if defined(CONFIG_8xx) && defined(CONFIG_PCI) | 641 | #if defined(CONFIG_8xx) && defined(CONFIG_PCI) |
631 | /* the qspan pci read routines can cause machine checks -- Cort | 642 | /* the qspan pci read routines can cause machine checks -- Cort |
632 | * | 643 | * |
@@ -638,16 +649,12 @@ void machine_check_exception(struct pt_regs *regs) | |||
638 | return; | 649 | return; |
639 | #endif | 650 | #endif |
640 | 651 | ||
641 | if (debugger_fault_handler(regs)) { | 652 | if (debugger_fault_handler(regs)) |
642 | regs->msr |= MSR_RI; | ||
643 | return; | 653 | return; |
644 | } | ||
645 | 654 | ||
646 | if (check_io_access(regs)) | 655 | if (check_io_access(regs)) |
647 | return; | 656 | return; |
648 | 657 | ||
649 | if (debugger_fault_handler(regs)) | ||
650 | return; | ||
651 | die("Machine check", regs, SIGBUS); | 658 | die("Machine check", regs, SIGBUS); |
652 | 659 | ||
653 | /* Must die if the interrupt is not recoverable */ | 660 | /* Must die if the interrupt is not recoverable */ |
@@ -914,6 +921,26 @@ static int emulate_instruction(struct pt_regs *regs) | |||
914 | return emulate_isel(regs, instword); | 921 | return emulate_isel(regs, instword); |
915 | } | 922 | } |
916 | 923 | ||
924 | #ifdef CONFIG_PPC64 | ||
925 | /* Emulate the mfspr rD, DSCR. */ | ||
926 | if (((instword & PPC_INST_MFSPR_DSCR_MASK) == PPC_INST_MFSPR_DSCR) && | ||
927 | cpu_has_feature(CPU_FTR_DSCR)) { | ||
928 | PPC_WARN_EMULATED(mfdscr, regs); | ||
929 | rd = (instword >> 21) & 0x1f; | ||
930 | regs->gpr[rd] = mfspr(SPRN_DSCR); | ||
931 | return 0; | ||
932 | } | ||
933 | /* Emulate the mtspr DSCR, rD. */ | ||
934 | if (((instword & PPC_INST_MTSPR_DSCR_MASK) == PPC_INST_MTSPR_DSCR) && | ||
935 | cpu_has_feature(CPU_FTR_DSCR)) { | ||
936 | PPC_WARN_EMULATED(mtdscr, regs); | ||
937 | rd = (instword >> 21) & 0x1f; | ||
938 | mtspr(SPRN_DSCR, regs->gpr[rd]); | ||
939 | current->thread.dscr_inherit = 1; | ||
940 | return 0; | ||
941 | } | ||
942 | #endif | ||
943 | |||
917 | return -EINVAL; | 944 | return -EINVAL; |
918 | } | 945 | } |
919 | 946 | ||
@@ -964,7 +991,7 @@ void __kprobes program_check_exception(struct pt_regs *regs) | |||
964 | * ESR_DST (!?) or 0. In the process of chasing this with the | 991 | * ESR_DST (!?) or 0. In the process of chasing this with the |
965 | * hardware people - not sure if it can happen on any illegal | 992 | * hardware people - not sure if it can happen on any illegal |
966 | * instruction or only on FP instructions, whether there is a | 993 | * instruction or only on FP instructions, whether there is a |
967 | * pattern to occurences etc. -dgibson 31/Mar/2003 */ | 994 | * pattern to occurrences etc. -dgibson 31/Mar/2003 */ |
968 | switch (do_mathemu(regs)) { | 995 | switch (do_mathemu(regs)) { |
969 | case 0: | 996 | case 0: |
970 | emulate_single_step(regs); | 997 | emulate_single_step(regs); |
@@ -1315,9 +1342,8 @@ void altivec_assist_exception(struct pt_regs *regs) | |||
1315 | } else { | 1342 | } else { |
1316 | /* didn't recognize the instruction */ | 1343 | /* didn't recognize the instruction */ |
1317 | /* XXX quick hack for now: set the non-Java bit in the VSCR */ | 1344 | /* XXX quick hack for now: set the non-Java bit in the VSCR */ |
1318 | if (printk_ratelimit()) | 1345 | printk_ratelimited(KERN_ERR "Unrecognized altivec instruction " |
1319 | printk(KERN_ERR "Unrecognized altivec instruction " | 1346 | "in %s at %lx\n", current->comm, regs->nip); |
1320 | "in %s at %lx\n", current->comm, regs->nip); | ||
1321 | current->thread.vscr.u[3] |= 0x10000; | 1347 | current->thread.vscr.u[3] |= 0x10000; |
1322 | } | 1348 | } |
1323 | } | 1349 | } |
@@ -1511,15 +1537,18 @@ struct ppc_emulated ppc_emulated = { | |||
1511 | #ifdef CONFIG_VSX | 1537 | #ifdef CONFIG_VSX |
1512 | WARN_EMULATED_SETUP(vsx), | 1538 | WARN_EMULATED_SETUP(vsx), |
1513 | #endif | 1539 | #endif |
1540 | #ifdef CONFIG_PPC64 | ||
1541 | WARN_EMULATED_SETUP(mfdscr), | ||
1542 | WARN_EMULATED_SETUP(mtdscr), | ||
1543 | #endif | ||
1514 | }; | 1544 | }; |
1515 | 1545 | ||
1516 | u32 ppc_warn_emulated; | 1546 | u32 ppc_warn_emulated; |
1517 | 1547 | ||
1518 | void ppc_warn_emulated_print(const char *type) | 1548 | void ppc_warn_emulated_print(const char *type) |
1519 | { | 1549 | { |
1520 | if (printk_ratelimit()) | 1550 | pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm, |
1521 | pr_warning("%s used emulated %s instruction\n", current->comm, | 1551 | type); |
1522 | type); | ||
1523 | } | 1552 | } |
1524 | 1553 | ||
1525 | static int __init ppc_warn_emulated_init(void) | 1554 | static int __init ppc_warn_emulated_init(void) |
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c index e39cad83c884..23d65abbedce 100644 --- a/arch/powerpc/kernel/udbg.c +++ b/arch/powerpc/kernel/udbg.c | |||
@@ -62,6 +62,8 @@ void __init udbg_early_init(void) | |||
62 | udbg_init_cpm(); | 62 | udbg_init_cpm(); |
63 | #elif defined(CONFIG_PPC_EARLY_DEBUG_USBGECKO) | 63 | #elif defined(CONFIG_PPC_EARLY_DEBUG_USBGECKO) |
64 | udbg_init_usbgecko(); | 64 | udbg_init_usbgecko(); |
65 | #elif defined(CONFIG_PPC_EARLY_DEBUG_WSP) | ||
66 | udbg_init_wsp(); | ||
65 | #endif | 67 | #endif |
66 | 68 | ||
67 | #ifdef CONFIG_PPC_EARLY_DEBUG | 69 | #ifdef CONFIG_PPC_EARLY_DEBUG |
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c index b4b167b33643..6837f839ab78 100644 --- a/arch/powerpc/kernel/udbg_16550.c +++ b/arch/powerpc/kernel/udbg_16550.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * udbg for NS16550 compatable serial ports | 2 | * udbg for NS16550 compatible serial ports |
3 | * | 3 | * |
4 | * Copyright (C) 2001-2005 PPC 64 Team, IBM Corp | 4 | * Copyright (C) 2001-2005 PPC 64 Team, IBM Corp |
5 | * | 5 | * |
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/types.h> | 11 | #include <linux/types.h> |
12 | #include <asm/udbg.h> | 12 | #include <asm/udbg.h> |
13 | #include <asm/io.h> | 13 | #include <asm/io.h> |
14 | #include <asm/reg_a2.h> | ||
14 | 15 | ||
15 | extern u8 real_readb(volatile u8 __iomem *addr); | 16 | extern u8 real_readb(volatile u8 __iomem *addr); |
16 | extern void real_writeb(u8 data, volatile u8 __iomem *addr); | 17 | extern void real_writeb(u8 data, volatile u8 __iomem *addr); |
@@ -298,3 +299,53 @@ void __init udbg_init_40x_realmode(void) | |||
298 | udbg_getc_poll = NULL; | 299 | udbg_getc_poll = NULL; |
299 | } | 300 | } |
300 | #endif /* CONFIG_PPC_EARLY_DEBUG_40x */ | 301 | #endif /* CONFIG_PPC_EARLY_DEBUG_40x */ |
302 | |||
303 | #ifdef CONFIG_PPC_EARLY_DEBUG_WSP | ||
304 | static void udbg_wsp_flush(void) | ||
305 | { | ||
306 | if (udbg_comport) { | ||
307 | while ((readb(&udbg_comport->lsr) & LSR_THRE) == 0) | ||
308 | /* wait for idle */; | ||
309 | } | ||
310 | } | ||
311 | |||
312 | static void udbg_wsp_putc(char c) | ||
313 | { | ||
314 | if (udbg_comport) { | ||
315 | if (c == '\n') | ||
316 | udbg_wsp_putc('\r'); | ||
317 | udbg_wsp_flush(); | ||
318 | writeb(c, &udbg_comport->thr); eieio(); | ||
319 | } | ||
320 | } | ||
321 | |||
322 | static int udbg_wsp_getc(void) | ||
323 | { | ||
324 | if (udbg_comport) { | ||
325 | while ((readb(&udbg_comport->lsr) & LSR_DR) == 0) | ||
326 | ; /* wait for char */ | ||
327 | return readb(&udbg_comport->rbr); | ||
328 | } | ||
329 | return -1; | ||
330 | } | ||
331 | |||
332 | static int udbg_wsp_getc_poll(void) | ||
333 | { | ||
334 | if (udbg_comport) | ||
335 | if (readb(&udbg_comport->lsr) & LSR_DR) | ||
336 | return readb(&udbg_comport->rbr); | ||
337 | return -1; | ||
338 | } | ||
339 | |||
340 | void __init udbg_init_wsp(void) | ||
341 | { | ||
342 | udbg_comport = (struct NS16550 __iomem *)WSP_UART_VIRT; | ||
343 | |||
344 | udbg_init_uart(udbg_comport, 57600, 50000000); | ||
345 | |||
346 | udbg_putc = udbg_wsp_putc; | ||
347 | udbg_flush = udbg_wsp_flush; | ||
348 | udbg_getc = udbg_wsp_getc; | ||
349 | udbg_getc_poll = udbg_wsp_getc_poll; | ||
350 | } | ||
351 | #endif /* CONFIG_PPC_EARLY_DEBUG_WSP */ | ||
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 13002fe206e7..142ab1008c3b 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c | |||
@@ -159,7 +159,7 @@ static void dump_vdso_pages(struct vm_area_struct * vma) | |||
159 | { | 159 | { |
160 | int i; | 160 | int i; |
161 | 161 | ||
162 | if (!vma || test_thread_flag(TIF_32BIT)) { | 162 | if (!vma || is_32bit_task()) { |
163 | printk("vDSO32 @ %016lx:\n", (unsigned long)vdso32_kbase); | 163 | printk("vDSO32 @ %016lx:\n", (unsigned long)vdso32_kbase); |
164 | for (i=0; i<vdso32_pages; i++) { | 164 | for (i=0; i<vdso32_pages; i++) { |
165 | struct page *pg = virt_to_page(vdso32_kbase + | 165 | struct page *pg = virt_to_page(vdso32_kbase + |
@@ -170,7 +170,7 @@ static void dump_vdso_pages(struct vm_area_struct * vma) | |||
170 | dump_one_vdso_page(pg, upg); | 170 | dump_one_vdso_page(pg, upg); |
171 | } | 171 | } |
172 | } | 172 | } |
173 | if (!vma || !test_thread_flag(TIF_32BIT)) { | 173 | if (!vma || !is_32bit_task()) { |
174 | printk("vDSO64 @ %016lx:\n", (unsigned long)vdso64_kbase); | 174 | printk("vDSO64 @ %016lx:\n", (unsigned long)vdso64_kbase); |
175 | for (i=0; i<vdso64_pages; i++) { | 175 | for (i=0; i<vdso64_pages; i++) { |
176 | struct page *pg = virt_to_page(vdso64_kbase + | 176 | struct page *pg = virt_to_page(vdso64_kbase + |
@@ -200,7 +200,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | |||
200 | return 0; | 200 | return 0; |
201 | 201 | ||
202 | #ifdef CONFIG_PPC64 | 202 | #ifdef CONFIG_PPC64 |
203 | if (test_thread_flag(TIF_32BIT)) { | 203 | if (is_32bit_task()) { |
204 | vdso_pagelist = vdso32_pagelist; | 204 | vdso_pagelist = vdso32_pagelist; |
205 | vdso_pages = vdso32_pages; | 205 | vdso_pages = vdso32_pages; |
206 | vdso_base = VDSO32_MBASE; | 206 | vdso_base = VDSO32_MBASE; |
@@ -820,17 +820,17 @@ static int __init vdso_init(void) | |||
820 | } | 820 | } |
821 | arch_initcall(vdso_init); | 821 | arch_initcall(vdso_init); |
822 | 822 | ||
823 | int in_gate_area_no_task(unsigned long addr) | 823 | int in_gate_area_no_mm(unsigned long addr) |
824 | { | 824 | { |
825 | return 0; | 825 | return 0; |
826 | } | 826 | } |
827 | 827 | ||
828 | int in_gate_area(struct task_struct *task, unsigned long addr) | 828 | int in_gate_area(struct mm_struct *mm, unsigned long addr) |
829 | { | 829 | { |
830 | return 0; | 830 | return 0; |
831 | } | 831 | } |
832 | 832 | ||
833 | struct vm_area_struct *get_gate_vma(struct task_struct *tsk) | 833 | struct vm_area_struct *get_gate_vma(struct mm_struct *mm) |
834 | { | 834 | { |
835 | return NULL; | 835 | return NULL; |
836 | } | 836 | } |
diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile index 51ead52141bd..9a7946c41738 100644 --- a/arch/powerpc/kernel/vdso32/Makefile +++ b/arch/powerpc/kernel/vdso32/Makefile | |||
@@ -14,10 +14,10 @@ obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32)) | |||
14 | 14 | ||
15 | GCOV_PROFILE := n | 15 | GCOV_PROFILE := n |
16 | 16 | ||
17 | EXTRA_CFLAGS := -shared -fno-common -fno-builtin | 17 | ccflags-y := -shared -fno-common -fno-builtin |
18 | EXTRA_CFLAGS += -nostdlib -Wl,-soname=linux-vdso32.so.1 \ | 18 | ccflags-y += -nostdlib -Wl,-soname=linux-vdso32.so.1 \ |
19 | $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) | 19 | $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) |
20 | EXTRA_AFLAGS := -D__VDSO32__ -s | 20 | asflags-y := -D__VDSO32__ -s |
21 | 21 | ||
22 | obj-y += vdso32_wrapper.o | 22 | obj-y += vdso32_wrapper.o |
23 | extra-y += vdso32.lds | 23 | extra-y += vdso32.lds |
diff --git a/arch/powerpc/kernel/vdso32/sigtramp.S b/arch/powerpc/kernel/vdso32/sigtramp.S index 68d49dd71dcc..cf0c9c9c24f9 100644 --- a/arch/powerpc/kernel/vdso32/sigtramp.S +++ b/arch/powerpc/kernel/vdso32/sigtramp.S | |||
@@ -19,7 +19,7 @@ | |||
19 | 19 | ||
20 | /* The nop here is a hack. The dwarf2 unwind routines subtract 1 from | 20 | /* The nop here is a hack. The dwarf2 unwind routines subtract 1 from |
21 | the return address to get an address in the middle of the presumed | 21 | the return address to get an address in the middle of the presumed |
22 | call instruction. Since we don't have a call here, we artifically | 22 | call instruction. Since we don't have a call here, we artificially |
23 | extend the range covered by the unwind info by adding a nop before | 23 | extend the range covered by the unwind info by adding a nop before |
24 | the real start. */ | 24 | the real start. */ |
25 | nop | 25 | nop |
diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile index 79da65d44a2a..8c500d8622e4 100644 --- a/arch/powerpc/kernel/vdso64/Makefile +++ b/arch/powerpc/kernel/vdso64/Makefile | |||
@@ -9,10 +9,10 @@ obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64)) | |||
9 | 9 | ||
10 | GCOV_PROFILE := n | 10 | GCOV_PROFILE := n |
11 | 11 | ||
12 | EXTRA_CFLAGS := -shared -fno-common -fno-builtin | 12 | ccflags-y := -shared -fno-common -fno-builtin |
13 | EXTRA_CFLAGS += -nostdlib -Wl,-soname=linux-vdso64.so.1 \ | 13 | ccflags-y += -nostdlib -Wl,-soname=linux-vdso64.so.1 \ |
14 | $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) | 14 | $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) |
15 | EXTRA_AFLAGS := -D__VDSO64__ -s | 15 | asflags-y := -D__VDSO64__ -s |
16 | 16 | ||
17 | obj-y += vdso64_wrapper.o | 17 | obj-y += vdso64_wrapper.o |
18 | extra-y += vdso64.lds | 18 | extra-y += vdso64.lds |
diff --git a/arch/powerpc/kernel/vdso64/sigtramp.S b/arch/powerpc/kernel/vdso64/sigtramp.S index 59eb59bb4082..45ea281e9a21 100644 --- a/arch/powerpc/kernel/vdso64/sigtramp.S +++ b/arch/powerpc/kernel/vdso64/sigtramp.S | |||
@@ -20,7 +20,7 @@ | |||
20 | 20 | ||
21 | /* The nop here is a hack. The dwarf2 unwind routines subtract 1 from | 21 | /* The nop here is a hack. The dwarf2 unwind routines subtract 1 from |
22 | the return address to get an address in the middle of the presumed | 22 | the return address to get an address in the middle of the presumed |
23 | call instruction. Since we don't have a call here, we artifically | 23 | call instruction. Since we don't have a call here, we artificially |
24 | extend the range covered by the unwind info by padding before the | 24 | extend the range covered by the unwind info by padding before the |
25 | real start. */ | 25 | real start. */ |
26 | nop | 26 | nop |
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S index fe460482fa68..4d5a3edff49e 100644 --- a/arch/powerpc/kernel/vector.S +++ b/arch/powerpc/kernel/vector.S | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <asm/cputable.h> | 5 | #include <asm/cputable.h> |
6 | #include <asm/thread_info.h> | 6 | #include <asm/thread_info.h> |
7 | #include <asm/page.h> | 7 | #include <asm/page.h> |
8 | #include <asm/ptrace.h> | ||
8 | 9 | ||
9 | /* | 10 | /* |
10 | * load_up_altivec(unused, unused, tsk) | 11 | * load_up_altivec(unused, unused, tsk) |
@@ -101,7 +102,7 @@ _GLOBAL(giveup_altivec) | |||
101 | MTMSRD(r5) /* enable use of VMX now */ | 102 | MTMSRD(r5) /* enable use of VMX now */ |
102 | isync | 103 | isync |
103 | PPC_LCMPI 0,r3,0 | 104 | PPC_LCMPI 0,r3,0 |
104 | beqlr- /* if no previous owner, done */ | 105 | beqlr /* if no previous owner, done */ |
105 | addi r3,r3,THREAD /* want THREAD of task */ | 106 | addi r3,r3,THREAD /* want THREAD of task */ |
106 | PPC_LL r5,PT_REGS(r3) | 107 | PPC_LL r5,PT_REGS(r3) |
107 | PPC_LCMPI 0,r5,0 | 108 | PPC_LCMPI 0,r5,0 |
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index fa3469ddaef8..1b695fdc362b 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c | |||
@@ -238,9 +238,7 @@ static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size) | |||
238 | * memory in this pool does not change. | 238 | * memory in this pool does not change. |
239 | */ | 239 | */ |
240 | if (spare_needed && reserve_freed) { | 240 | if (spare_needed && reserve_freed) { |
241 | tmp = min(spare_needed, min(reserve_freed, | 241 | tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT)); |
242 | (viodev->cmo.entitled - | ||
243 | VIO_CMO_MIN_ENT))); | ||
244 | 242 | ||
245 | vio_cmo.spare += tmp; | 243 | vio_cmo.spare += tmp; |
246 | viodev->cmo.entitled -= tmp; | 244 | viodev->cmo.entitled -= tmp; |
@@ -602,6 +600,11 @@ static void vio_dma_iommu_unmap_sg(struct device *dev, | |||
602 | vio_cmo_dealloc(viodev, alloc_size); | 600 | vio_cmo_dealloc(viodev, alloc_size); |
603 | } | 601 | } |
604 | 602 | ||
603 | static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask) | ||
604 | { | ||
605 | return dma_iommu_ops.dma_supported(dev, mask); | ||
606 | } | ||
607 | |||
605 | struct dma_map_ops vio_dma_mapping_ops = { | 608 | struct dma_map_ops vio_dma_mapping_ops = { |
606 | .alloc_coherent = vio_dma_iommu_alloc_coherent, | 609 | .alloc_coherent = vio_dma_iommu_alloc_coherent, |
607 | .free_coherent = vio_dma_iommu_free_coherent, | 610 | .free_coherent = vio_dma_iommu_free_coherent, |
@@ -609,6 +612,7 @@ struct dma_map_ops vio_dma_mapping_ops = { | |||
609 | .unmap_sg = vio_dma_iommu_unmap_sg, | 612 | .unmap_sg = vio_dma_iommu_unmap_sg, |
610 | .map_page = vio_dma_iommu_map_page, | 613 | .map_page = vio_dma_iommu_map_page, |
611 | .unmap_page = vio_dma_iommu_unmap_page, | 614 | .unmap_page = vio_dma_iommu_unmap_page, |
615 | .dma_supported = vio_dma_iommu_dma_supported, | ||
612 | 616 | ||
613 | }; | 617 | }; |
614 | 618 | ||
@@ -860,8 +864,7 @@ static void vio_cmo_bus_remove(struct vio_dev *viodev) | |||
860 | 864 | ||
861 | static void vio_cmo_set_dma_ops(struct vio_dev *viodev) | 865 | static void vio_cmo_set_dma_ops(struct vio_dev *viodev) |
862 | { | 866 | { |
863 | vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported; | 867 | set_dma_ops(&viodev->dev, &vio_dma_mapping_ops); |
864 | viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops; | ||
865 | } | 868 | } |
866 | 869 | ||
867 | /** | 870 | /** |
@@ -1184,7 +1187,12 @@ EXPORT_SYMBOL(vio_unregister_driver); | |||
1184 | /* vio_dev refcount hit 0 */ | 1187 | /* vio_dev refcount hit 0 */ |
1185 | static void __devinit vio_dev_release(struct device *dev) | 1188 | static void __devinit vio_dev_release(struct device *dev) |
1186 | { | 1189 | { |
1187 | /* XXX should free TCE table */ | 1190 | struct iommu_table *tbl = get_iommu_table_base(dev); |
1191 | |||
1192 | /* iSeries uses a common table for all vio devices */ | ||
1193 | if (!firmware_has_feature(FW_FEATURE_ISERIES) && tbl) | ||
1194 | iommu_free_table(tbl, dev->of_node ? | ||
1195 | dev->of_node->full_name : dev_name(dev)); | ||
1188 | of_node_put(dev->of_node); | 1196 | of_node_put(dev->of_node); |
1189 | kfree(to_vio_dev(dev)); | 1197 | kfree(to_vio_dev(dev)); |
1190 | } | 1198 | } |
@@ -1241,7 +1249,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node) | |||
1241 | if (firmware_has_feature(FW_FEATURE_CMO)) | 1249 | if (firmware_has_feature(FW_FEATURE_CMO)) |
1242 | vio_cmo_set_dma_ops(viodev); | 1250 | vio_cmo_set_dma_ops(viodev); |
1243 | else | 1251 | else |
1244 | viodev->dev.archdata.dma_ops = &dma_iommu_ops; | 1252 | set_dma_ops(&viodev->dev, &dma_iommu_ops); |
1245 | set_iommu_table_base(&viodev->dev, vio_build_iommu_table(viodev)); | 1253 | set_iommu_table_base(&viodev->dev, vio_build_iommu_table(viodev)); |
1246 | set_dev_node(&viodev->dev, of_node_to_nid(of_node)); | 1254 | set_dev_node(&viodev->dev, of_node_to_nid(of_node)); |
1247 | 1255 | ||
@@ -1249,13 +1257,16 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node) | |||
1249 | viodev->dev.parent = &vio_bus_device.dev; | 1257 | viodev->dev.parent = &vio_bus_device.dev; |
1250 | viodev->dev.bus = &vio_bus_type; | 1258 | viodev->dev.bus = &vio_bus_type; |
1251 | viodev->dev.release = vio_dev_release; | 1259 | viodev->dev.release = vio_dev_release; |
1260 | /* needed to ensure proper operation of coherent allocations | ||
1261 | * later, in case driver doesn't set it explicitly */ | ||
1262 | dma_set_mask(&viodev->dev, DMA_BIT_MASK(64)); | ||
1263 | dma_set_coherent_mask(&viodev->dev, DMA_BIT_MASK(64)); | ||
1252 | 1264 | ||
1253 | /* register with generic device framework */ | 1265 | /* register with generic device framework */ |
1254 | if (device_register(&viodev->dev)) { | 1266 | if (device_register(&viodev->dev)) { |
1255 | printk(KERN_ERR "%s: failed to register device %s\n", | 1267 | printk(KERN_ERR "%s: failed to register device %s\n", |
1256 | __func__, dev_name(&viodev->dev)); | 1268 | __func__, dev_name(&viodev->dev)); |
1257 | /* XXX free TCE table */ | 1269 | put_device(&viodev->dev); |
1258 | kfree(viodev); | ||
1259 | return NULL; | 1270 | return NULL; |
1260 | } | 1271 | } |
1261 | 1272 | ||
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 8a0deefac08d..920276c0f6a1 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S | |||
@@ -160,7 +160,7 @@ SECTIONS | |||
160 | INIT_RAM_FS | 160 | INIT_RAM_FS |
161 | } | 161 | } |
162 | 162 | ||
163 | PERCPU(PAGE_SIZE) | 163 | PERCPU_SECTION(L1_CACHE_BYTES) |
164 | 164 | ||
165 | . = ALIGN(8); | 165 | . = ALIGN(8); |
166 | .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) { | 166 | .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) { |