aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorGrant Likely <grant.likely@secretlab.ca>2010-05-22 02:36:56 -0400
committerGrant Likely <grant.likely@secretlab.ca>2010-05-22 02:36:56 -0400
commitcf9b59e9d3e008591d1f54830f570982bb307a0d (patch)
tree113478ce8fd8c832ba726ffdf59b82cb46356476 /arch/powerpc/kernel
parent44504b2bebf8b5823c59484e73096a7d6574471d (diff)
parentf4b87dee923342505e1ddba8d34ce9de33e75050 (diff)
Merge remote branch 'origin' into secretlab/next-devicetree
Merging in current state of Linus' tree to deal with merge conflicts and build failures in vio.c after merge. Conflicts: drivers/i2c/busses/i2c-cpm.c drivers/i2c/busses/i2c-mpc.c drivers/net/gianfar.c Also fixed up one line in arch/powerpc/kernel/vio.c to use the correct node pointer. Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/asm-offsets.c112
-rw-r--r--arch/powerpc/kernel/cputable.c29
-rw-r--r--arch/powerpc/kernel/crash.c38
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c5
-rw-r--r--arch/powerpc/kernel/entry_32.S5
-rw-r--r--arch/powerpc/kernel/entry_64.S9
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S13
-rw-r--r--arch/powerpc/kernel/head_32.S14
-rw-r--r--arch/powerpc/kernel/head_44x.S828
-rw-r--r--arch/powerpc/kernel/head_64.S4
-rw-r--r--arch/powerpc/kernel/head_8xx.S70
-rw-r--r--arch/powerpc/kernel/head_booke.h4
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S13
-rw-r--r--arch/powerpc/kernel/iommu.c12
-rw-r--r--arch/powerpc/kernel/irq.c23
-rw-r--r--arch/powerpc/kernel/kgdb.c11
-rw-r--r--arch/powerpc/kernel/kprobes.c3
-rw-r--r--arch/powerpc/kernel/lparcfg.c12
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c48
-rw-r--r--arch/powerpc/kernel/misc_32.S9
-rw-r--r--arch/powerpc/kernel/misc_64.S8
-rw-r--r--arch/powerpc/kernel/paca.c2
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c2
-rw-r--r--arch/powerpc/kernel/perf_event.c129
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c4
-rw-r--r--arch/powerpc/kernel/process.c3
-rw-r--r--arch/powerpc/kernel/prom_init.c3
-rw-r--r--arch/powerpc/kernel/ptrace.c103
-rw-r--r--arch/powerpc/kernel/rtas.c15
-rw-r--r--arch/powerpc/kernel/rtasd.c16
-rw-r--r--arch/powerpc/kernel/setup-common.c86
-rw-r--r--arch/powerpc/kernel/setup_64.c23
-rw-r--r--arch/powerpc/kernel/smp.c73
-rw-r--r--arch/powerpc/kernel/sysfs.c46
-rw-r--r--arch/powerpc/kernel/time.c60
-rw-r--r--arch/powerpc/kernel/traps.c47
-rw-r--r--arch/powerpc/kernel/vio.c26
37 files changed, 1405 insertions, 503 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 957ceb7059c5..496cc5b3984f 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -50,6 +50,9 @@
50#endif 50#endif
51#ifdef CONFIG_KVM 51#ifdef CONFIG_KVM
52#include <linux/kvm_host.h> 52#include <linux/kvm_host.h>
53#ifndef CONFIG_BOOKE
54#include <asm/kvm_book3s.h>
55#endif
53#endif 56#endif
54 57
55#ifdef CONFIG_PPC32 58#ifdef CONFIG_PPC32
@@ -105,6 +108,9 @@ int main(void)
105 DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe)); 108 DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe));
106#endif /* CONFIG_SPE */ 109#endif /* CONFIG_SPE */
107#endif /* CONFIG_PPC64 */ 110#endif /* CONFIG_PPC64 */
111#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
112 DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu));
113#endif
108 114
109 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 115 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
110 DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags)); 116 DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
@@ -133,7 +139,6 @@ int main(void)
133 DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr)); 139 DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
134 DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); 140 DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
135 DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled)); 141 DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
136 DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_event_pending));
137 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); 142 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
138#ifdef CONFIG_PPC_MM_SLICES 143#ifdef CONFIG_PPC_MM_SLICES
139 DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct, 144 DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
@@ -184,6 +189,7 @@ int main(void)
184#endif /* CONFIG_PPC_STD_MMU_64 */ 189#endif /* CONFIG_PPC_STD_MMU_64 */
185 DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); 190 DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
186 DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); 191 DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
192 DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state));
187 DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr)); 193 DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr));
188 DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr)); 194 DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr));
189 DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); 195 DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
@@ -191,33 +197,9 @@ int main(void)
191 DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset)); 197 DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset));
192 DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); 198 DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
193#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 199#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
194 DEFINE(PACA_KVM_IN_GUEST, offsetof(struct paca_struct, kvm_in_guest)); 200 DEFINE(PACA_KVM_SVCPU, offsetof(struct paca_struct, shadow_vcpu));
195 DEFINE(PACA_KVM_SLB, offsetof(struct paca_struct, kvm_slb)); 201 DEFINE(SVCPU_SLB, offsetof(struct kvmppc_book3s_shadow_vcpu, slb));
196 DEFINE(PACA_KVM_SLB_MAX, offsetof(struct paca_struct, kvm_slb_max)); 202 DEFINE(SVCPU_SLB_MAX, offsetof(struct kvmppc_book3s_shadow_vcpu, slb_max));
197 DEFINE(PACA_KVM_CR, offsetof(struct paca_struct, shadow_vcpu.cr));
198 DEFINE(PACA_KVM_XER, offsetof(struct paca_struct, shadow_vcpu.xer));
199 DEFINE(PACA_KVM_R0, offsetof(struct paca_struct, shadow_vcpu.gpr[0]));
200 DEFINE(PACA_KVM_R1, offsetof(struct paca_struct, shadow_vcpu.gpr[1]));
201 DEFINE(PACA_KVM_R2, offsetof(struct paca_struct, shadow_vcpu.gpr[2]));
202 DEFINE(PACA_KVM_R3, offsetof(struct paca_struct, shadow_vcpu.gpr[3]));
203 DEFINE(PACA_KVM_R4, offsetof(struct paca_struct, shadow_vcpu.gpr[4]));
204 DEFINE(PACA_KVM_R5, offsetof(struct paca_struct, shadow_vcpu.gpr[5]));
205 DEFINE(PACA_KVM_R6, offsetof(struct paca_struct, shadow_vcpu.gpr[6]));
206 DEFINE(PACA_KVM_R7, offsetof(struct paca_struct, shadow_vcpu.gpr[7]));
207 DEFINE(PACA_KVM_R8, offsetof(struct paca_struct, shadow_vcpu.gpr[8]));
208 DEFINE(PACA_KVM_R9, offsetof(struct paca_struct, shadow_vcpu.gpr[9]));
209 DEFINE(PACA_KVM_R10, offsetof(struct paca_struct, shadow_vcpu.gpr[10]));
210 DEFINE(PACA_KVM_R11, offsetof(struct paca_struct, shadow_vcpu.gpr[11]));
211 DEFINE(PACA_KVM_R12, offsetof(struct paca_struct, shadow_vcpu.gpr[12]));
212 DEFINE(PACA_KVM_R13, offsetof(struct paca_struct, shadow_vcpu.gpr[13]));
213 DEFINE(PACA_KVM_HOST_R1, offsetof(struct paca_struct, shadow_vcpu.host_r1));
214 DEFINE(PACA_KVM_HOST_R2, offsetof(struct paca_struct, shadow_vcpu.host_r2));
215 DEFINE(PACA_KVM_VMHANDLER, offsetof(struct paca_struct,
216 shadow_vcpu.vmhandler));
217 DEFINE(PACA_KVM_SCRATCH0, offsetof(struct paca_struct,
218 shadow_vcpu.scratch0));
219 DEFINE(PACA_KVM_SCRATCH1, offsetof(struct paca_struct,
220 shadow_vcpu.scratch1));
221#endif 203#endif
222#endif /* CONFIG_PPC64 */ 204#endif /* CONFIG_PPC64 */
223 205
@@ -228,8 +210,8 @@ int main(void)
228 /* Interrupt register frame */ 210 /* Interrupt register frame */
229 DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD); 211 DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
230 DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE); 212 DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE);
231#ifdef CONFIG_PPC64
232 DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)); 213 DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
214#ifdef CONFIG_PPC64
233 /* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */ 215 /* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */
234 DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); 216 DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
235 DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); 217 DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
@@ -412,9 +394,6 @@ int main(void)
412 DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); 394 DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
413 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); 395 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
414 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); 396 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
415 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
416 DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
417 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
418 DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr)); 397 DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr));
419 DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4)); 398 DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4));
420 DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); 399 DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
@@ -422,32 +401,81 @@ int main(void)
422 DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7)); 401 DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
423 DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid)); 402 DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
424 403
425 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); 404 /* book3s */
426 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); 405#ifdef CONFIG_PPC_BOOK3S
427 DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
428
429 /* book3s_64 */
430#ifdef CONFIG_PPC64
431 DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr));
432 DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip)); 406 DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip));
433 DEFINE(VCPU_HOST_R2, offsetof(struct kvm_vcpu, arch.host_r2));
434 DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr)); 407 DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr));
435 DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); 408 DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
436 DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
437 DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem)); 409 DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
438 DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter)); 410 DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
439 DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler)); 411 DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
440 DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall)); 412 DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall));
441 DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); 413 DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
414 DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) -
415 offsetof(struct kvmppc_vcpu_book3s, vcpu));
416 DEFINE(SVCPU_CR, offsetof(struct kvmppc_book3s_shadow_vcpu, cr));
417 DEFINE(SVCPU_XER, offsetof(struct kvmppc_book3s_shadow_vcpu, xer));
418 DEFINE(SVCPU_CTR, offsetof(struct kvmppc_book3s_shadow_vcpu, ctr));
419 DEFINE(SVCPU_LR, offsetof(struct kvmppc_book3s_shadow_vcpu, lr));
420 DEFINE(SVCPU_PC, offsetof(struct kvmppc_book3s_shadow_vcpu, pc));
421 DEFINE(SVCPU_R0, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[0]));
422 DEFINE(SVCPU_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[1]));
423 DEFINE(SVCPU_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[2]));
424 DEFINE(SVCPU_R3, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[3]));
425 DEFINE(SVCPU_R4, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[4]));
426 DEFINE(SVCPU_R5, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[5]));
427 DEFINE(SVCPU_R6, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[6]));
428 DEFINE(SVCPU_R7, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[7]));
429 DEFINE(SVCPU_R8, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[8]));
430 DEFINE(SVCPU_R9, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[9]));
431 DEFINE(SVCPU_R10, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[10]));
432 DEFINE(SVCPU_R11, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[11]));
433 DEFINE(SVCPU_R12, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[12]));
434 DEFINE(SVCPU_R13, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[13]));
435 DEFINE(SVCPU_HOST_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r1));
436 DEFINE(SVCPU_HOST_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r2));
437 DEFINE(SVCPU_VMHANDLER, offsetof(struct kvmppc_book3s_shadow_vcpu,
438 vmhandler));
439 DEFINE(SVCPU_SCRATCH0, offsetof(struct kvmppc_book3s_shadow_vcpu,
440 scratch0));
441 DEFINE(SVCPU_SCRATCH1, offsetof(struct kvmppc_book3s_shadow_vcpu,
442 scratch1));
443 DEFINE(SVCPU_IN_GUEST, offsetof(struct kvmppc_book3s_shadow_vcpu,
444 in_guest));
445 DEFINE(SVCPU_FAULT_DSISR, offsetof(struct kvmppc_book3s_shadow_vcpu,
446 fault_dsisr));
447 DEFINE(SVCPU_FAULT_DAR, offsetof(struct kvmppc_book3s_shadow_vcpu,
448 fault_dar));
449 DEFINE(SVCPU_LAST_INST, offsetof(struct kvmppc_book3s_shadow_vcpu,
450 last_inst));
451 DEFINE(SVCPU_SHADOW_SRR1, offsetof(struct kvmppc_book3s_shadow_vcpu,
452 shadow_srr1));
453#ifdef CONFIG_PPC_BOOK3S_32
454 DEFINE(SVCPU_SR, offsetof(struct kvmppc_book3s_shadow_vcpu, sr));
455#endif
442#else 456#else
443 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); 457 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
444 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); 458 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
445#endif /* CONFIG_PPC64 */ 459 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
460 DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
461 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
462 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
463 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
464 DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
465#endif /* CONFIG_PPC_BOOK3S */
446#endif 466#endif
447#ifdef CONFIG_44x 467#ifdef CONFIG_44x
448 DEFINE(PGD_T_LOG2, PGD_T_LOG2); 468 DEFINE(PGD_T_LOG2, PGD_T_LOG2);
449 DEFINE(PTE_T_LOG2, PTE_T_LOG2); 469 DEFINE(PTE_T_LOG2, PTE_T_LOG2);
450#endif 470#endif
471#ifdef CONFIG_FSL_BOOKE
472 DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam));
473 DEFINE(TLBCAM_MAS0, offsetof(struct tlbcam, MAS0));
474 DEFINE(TLBCAM_MAS1, offsetof(struct tlbcam, MAS1));
475 DEFINE(TLBCAM_MAS2, offsetof(struct tlbcam, MAS2));
476 DEFINE(TLBCAM_MAS3, offsetof(struct tlbcam, MAS3));
477 DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7));
478#endif
451 479
452#ifdef CONFIG_KVM_EXIT_TIMING 480#ifdef CONFIG_KVM_EXIT_TIMING
453 DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu, 481 DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 8af4949434b2..9556be903e96 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1701,6 +1701,35 @@ static struct cpu_spec __initdata cpu_specs[] = {
1701 .machine_check = machine_check_440A, 1701 .machine_check = machine_check_440A,
1702 .platform = "ppc440", 1702 .platform = "ppc440",
1703 }, 1703 },
1704 { /* 476 core */
1705 .pvr_mask = 0xffff0000,
1706 .pvr_value = 0x11a50000,
1707 .cpu_name = "476",
1708 .cpu_features = CPU_FTRS_47X,
1709 .cpu_user_features = COMMON_USER_BOOKE |
1710 PPC_FEATURE_HAS_FPU,
1711 .mmu_features = MMU_FTR_TYPE_47x |
1712 MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL,
1713 .icache_bsize = 32,
1714 .dcache_bsize = 128,
1715 .machine_check = machine_check_47x,
1716 .platform = "ppc470",
1717 },
1718 { /* 476 iss */
1719 .pvr_mask = 0xffff0000,
1720 .pvr_value = 0x00050000,
1721 .cpu_name = "476",
1722 .cpu_features = CPU_FTRS_47X,
1723 .cpu_user_features = COMMON_USER_BOOKE |
1724 PPC_FEATURE_HAS_FPU,
1725 .cpu_user_features = COMMON_USER_BOOKE,
1726 .mmu_features = MMU_FTR_TYPE_47x |
1727 MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL,
1728 .icache_bsize = 32,
1729 .dcache_bsize = 128,
1730 .machine_check = machine_check_47x,
1731 .platform = "ppc470",
1732 },
1704 { /* default match */ 1733 { /* default match */
1705 .pvr_mask = 0x00000000, 1734 .pvr_mask = 0x00000000,
1706 .pvr_value = 0x00000000, 1735 .pvr_value = 0x00000000,
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 6f4613dd05ef..8c066d6a8e4b 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -162,6 +162,32 @@ static void crash_kexec_prepare_cpus(int cpu)
162 /* Leave the IPI callback set */ 162 /* Leave the IPI callback set */
163} 163}
164 164
165/* wait for all the CPUs to hit real mode but timeout if they don't come in */
166static void crash_kexec_wait_realmode(int cpu)
167{
168 unsigned int msecs;
169 int i;
170
171 msecs = 10000;
172 for (i=0; i < NR_CPUS && msecs > 0; i++) {
173 if (i == cpu)
174 continue;
175
176 while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) {
177 barrier();
178 if (!cpu_possible(i)) {
179 break;
180 }
181 if (!cpu_online(i)) {
182 break;
183 }
184 msecs--;
185 mdelay(1);
186 }
187 }
188 mb();
189}
190
165/* 191/*
166 * This function will be called by secondary cpus or by kexec cpu 192 * This function will be called by secondary cpus or by kexec cpu
167 * if soft-reset is activated to stop some CPUs. 193 * if soft-reset is activated to stop some CPUs.
@@ -347,10 +373,12 @@ int crash_shutdown_unregister(crash_shutdown_t handler)
347EXPORT_SYMBOL(crash_shutdown_unregister); 373EXPORT_SYMBOL(crash_shutdown_unregister);
348 374
349static unsigned long crash_shutdown_buf[JMP_BUF_LEN]; 375static unsigned long crash_shutdown_buf[JMP_BUF_LEN];
376static int crash_shutdown_cpu = -1;
350 377
351static int handle_fault(struct pt_regs *regs) 378static int handle_fault(struct pt_regs *regs)
352{ 379{
353 longjmp(crash_shutdown_buf, 1); 380 if (crash_shutdown_cpu == smp_processor_id())
381 longjmp(crash_shutdown_buf, 1);
354 return 0; 382 return 0;
355} 383}
356 384
@@ -375,11 +403,14 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
375 for_each_irq(i) { 403 for_each_irq(i) {
376 struct irq_desc *desc = irq_to_desc(i); 404 struct irq_desc *desc = irq_to_desc(i);
377 405
406 if (!desc || !desc->chip || !desc->chip->eoi)
407 continue;
408
378 if (desc->status & IRQ_INPROGRESS) 409 if (desc->status & IRQ_INPROGRESS)
379 desc->chip->eoi(i); 410 desc->chip->eoi(i);
380 411
381 if (!(desc->status & IRQ_DISABLED)) 412 if (!(desc->status & IRQ_DISABLED))
382 desc->chip->disable(i); 413 desc->chip->shutdown(i);
383 } 414 }
384 415
385 /* 416 /*
@@ -388,6 +419,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
388 */ 419 */
389 old_handler = __debugger_fault_handler; 420 old_handler = __debugger_fault_handler;
390 __debugger_fault_handler = handle_fault; 421 __debugger_fault_handler = handle_fault;
422 crash_shutdown_cpu = smp_processor_id();
391 for (i = 0; crash_shutdown_handles[i]; i++) { 423 for (i = 0; crash_shutdown_handles[i]; i++) {
392 if (setjmp(crash_shutdown_buf) == 0) { 424 if (setjmp(crash_shutdown_buf) == 0) {
393 /* 425 /*
@@ -401,6 +433,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
401 asm volatile("sync; isync"); 433 asm volatile("sync; isync");
402 } 434 }
403 } 435 }
436 crash_shutdown_cpu = -1;
404 __debugger_fault_handler = old_handler; 437 __debugger_fault_handler = old_handler;
405 438
406 /* 439 /*
@@ -412,6 +445,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
412 crash_kexec_prepare_cpus(crashing_cpu); 445 crash_kexec_prepare_cpus(crashing_cpu);
413 cpu_set(crashing_cpu, cpus_in_crash); 446 cpu_set(crashing_cpu, cpus_in_crash);
414 crash_kexec_stop_spus(); 447 crash_kexec_stop_spus();
448 crash_kexec_wait_realmode(crashing_cpu);
415 if (ppc_md.kexec_cpu_down) 449 if (ppc_md.kexec_cpu_down)
416 ppc_md.kexec_cpu_down(1, 0); 450 ppc_md.kexec_cpu_down(1, 0);
417} 451}
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 59c928564a03..4ff4da2c238b 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * Contains routines needed to support swiotlb for ppc. 2 * Contains routines needed to support swiotlb for ppc.
3 * 3 *
4 * Copyright (C) 2009 Becky Bruce, Freescale Semiconductor 4 * Copyright (C) 2009-2010 Freescale Semiconductor, Inc.
5 * Author: Becky Bruce
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the 8 * under the terms of the GNU General Public License as published by the
@@ -70,7 +71,7 @@ static int ppc_swiotlb_bus_notify(struct notifier_block *nb,
70 sd->max_direct_dma_addr = 0; 71 sd->max_direct_dma_addr = 0;
71 72
72 /* May need to bounce if the device can't address all of DRAM */ 73 /* May need to bounce if the device can't address all of DRAM */
73 if (dma_get_mask(dev) < lmb_end_of_DRAM()) 74 if ((dma_get_mask(dev) + 1) < lmb_end_of_DRAM())
74 set_dma_ops(dev, &swiotlb_dma_ops); 75 set_dma_ops(dev, &swiotlb_dma_ops);
75 76
76 return NOTIFY_DONE; 77 return NOTIFY_DONE;
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 1175a8539e6c..ed4aeb96398b 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -373,11 +373,13 @@ syscall_exit_cont:
373 bnel- load_dbcr0 373 bnel- load_dbcr0
374#endif 374#endif
375#ifdef CONFIG_44x 375#ifdef CONFIG_44x
376BEGIN_MMU_FTR_SECTION
376 lis r4,icache_44x_need_flush@ha 377 lis r4,icache_44x_need_flush@ha
377 lwz r5,icache_44x_need_flush@l(r4) 378 lwz r5,icache_44x_need_flush@l(r4)
378 cmplwi cr0,r5,0 379 cmplwi cr0,r5,0
379 bne- 2f 380 bne- 2f
3801: 3811:
382END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
381#endif /* CONFIG_44x */ 383#endif /* CONFIG_44x */
382BEGIN_FTR_SECTION 384BEGIN_FTR_SECTION
383 lwarx r7,0,r1 385 lwarx r7,0,r1
@@ -848,6 +850,9 @@ resume_kernel:
848 /* interrupts are hard-disabled at this point */ 850 /* interrupts are hard-disabled at this point */
849restore: 851restore:
850#ifdef CONFIG_44x 852#ifdef CONFIG_44x
853BEGIN_MMU_FTR_SECTION
854 b 1f
855END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
851 lis r4,icache_44x_need_flush@ha 856 lis r4,icache_44x_need_flush@ha
852 lwz r5,icache_44x_need_flush@l(r4) 857 lwz r5,icache_44x_need_flush@l(r4)
853 cmplwi cr0,r5,0 858 cmplwi cr0,r5,0
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 07109d843787..42e9d908914a 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -556,15 +556,6 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
5562: 5562:
557 TRACE_AND_RESTORE_IRQ(r5); 557 TRACE_AND_RESTORE_IRQ(r5);
558 558
559#ifdef CONFIG_PERF_EVENTS
560 /* check paca->perf_event_pending if we're enabling ints */
561 lbz r3,PACAPERFPEND(r13)
562 and. r3,r3,r5
563 beq 27f
564 bl .perf_event_do_pending
56527:
566#endif /* CONFIG_PERF_EVENTS */
567
568 /* extract EE bit and use it to restore paca->hard_enabled */ 559 /* extract EE bit and use it to restore paca->hard_enabled */
569 ld r3,_MSR(r1) 560 ld r3,_MSR(r1)
570 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */ 561 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index e3be98ffe2a7..3e423fbad6bc 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -735,8 +735,11 @@ _STATIC(do_hash_page)
735 std r3,_DAR(r1) 735 std r3,_DAR(r1)
736 std r4,_DSISR(r1) 736 std r4,_DSISR(r1)
737 737
738 andis. r0,r4,0xa450 /* weird error? */ 738 andis. r0,r4,0xa410 /* weird error? */
739 bne- handle_page_fault /* if not, try to insert a HPTE */ 739 bne- handle_page_fault /* if not, try to insert a HPTE */
740 andis. r0,r4,DSISR_DABRMATCH@h
741 bne- handle_dabr_fault
742
740BEGIN_FTR_SECTION 743BEGIN_FTR_SECTION
741 andis. r0,r4,0x0020 /* Is it a segment table fault? */ 744 andis. r0,r4,0x0020 /* Is it a segment table fault? */
742 bne- do_ste_alloc /* If so handle it */ 745 bne- do_ste_alloc /* If so handle it */
@@ -823,6 +826,14 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
823 bl .raw_local_irq_restore 826 bl .raw_local_irq_restore
824 b 11f 827 b 11f
825 828
829/* We have a data breakpoint exception - handle it */
830handle_dabr_fault:
831 ld r4,_DAR(r1)
832 ld r5,_DSISR(r1)
833 addi r3,r1,STACK_FRAME_OVERHEAD
834 bl .do_dabr
835 b .ret_from_except_lite
836
826/* Here we have a page fault that hash_page can't handle. */ 837/* Here we have a page fault that hash_page can't handle. */
827handle_page_fault: 838handle_page_fault:
828 ENABLE_INTS 839 ENABLE_INTS
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index e025e89fe93e..98c4b29a56f4 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -33,6 +33,7 @@
33#include <asm/asm-offsets.h> 33#include <asm/asm-offsets.h>
34#include <asm/ptrace.h> 34#include <asm/ptrace.h>
35#include <asm/bug.h> 35#include <asm/bug.h>
36#include <asm/kvm_book3s_asm.h>
36 37
37/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */ 38/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
38#define LOAD_BAT(n, reg, RA, RB) \ 39#define LOAD_BAT(n, reg, RA, RB) \
@@ -303,6 +304,7 @@ __secondary_hold_acknowledge:
303 */ 304 */
304#define EXCEPTION(n, label, hdlr, xfer) \ 305#define EXCEPTION(n, label, hdlr, xfer) \
305 . = n; \ 306 . = n; \
307 DO_KVM n; \
306label: \ 308label: \
307 EXCEPTION_PROLOG; \ 309 EXCEPTION_PROLOG; \
308 addi r3,r1,STACK_FRAME_OVERHEAD; \ 310 addi r3,r1,STACK_FRAME_OVERHEAD; \
@@ -358,6 +360,7 @@ i##n: \
358 * -- paulus. 360 * -- paulus.
359 */ 361 */
360 . = 0x200 362 . = 0x200
363 DO_KVM 0x200
361 mtspr SPRN_SPRG_SCRATCH0,r10 364 mtspr SPRN_SPRG_SCRATCH0,r10
362 mtspr SPRN_SPRG_SCRATCH1,r11 365 mtspr SPRN_SPRG_SCRATCH1,r11
363 mfcr r10 366 mfcr r10
@@ -381,6 +384,7 @@ i##n: \
381 384
382/* Data access exception. */ 385/* Data access exception. */
383 . = 0x300 386 . = 0x300
387 DO_KVM 0x300
384DataAccess: 388DataAccess:
385 EXCEPTION_PROLOG 389 EXCEPTION_PROLOG
386 mfspr r10,SPRN_DSISR 390 mfspr r10,SPRN_DSISR
@@ -397,6 +401,7 @@ DataAccess:
397 401
398/* Instruction access exception. */ 402/* Instruction access exception. */
399 . = 0x400 403 . = 0x400
404 DO_KVM 0x400
400InstructionAccess: 405InstructionAccess:
401 EXCEPTION_PROLOG 406 EXCEPTION_PROLOG
402 andis. r0,r9,0x4000 /* no pte found? */ 407 andis. r0,r9,0x4000 /* no pte found? */
@@ -413,6 +418,7 @@ InstructionAccess:
413 418
414/* Alignment exception */ 419/* Alignment exception */
415 . = 0x600 420 . = 0x600
421 DO_KVM 0x600
416Alignment: 422Alignment:
417 EXCEPTION_PROLOG 423 EXCEPTION_PROLOG
418 mfspr r4,SPRN_DAR 424 mfspr r4,SPRN_DAR
@@ -427,6 +433,7 @@ Alignment:
427 433
428/* Floating-point unavailable */ 434/* Floating-point unavailable */
429 . = 0x800 435 . = 0x800
436 DO_KVM 0x800
430FPUnavailable: 437FPUnavailable:
431BEGIN_FTR_SECTION 438BEGIN_FTR_SECTION
432/* 439/*
@@ -450,6 +457,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
450 457
451/* System call */ 458/* System call */
452 . = 0xc00 459 . = 0xc00
460 DO_KVM 0xc00
453SystemCall: 461SystemCall:
454 EXCEPTION_PROLOG 462 EXCEPTION_PROLOG
455 EXC_XFER_EE_LITE(0xc00, DoSyscall) 463 EXC_XFER_EE_LITE(0xc00, DoSyscall)
@@ -467,9 +475,11 @@ SystemCall:
467 * by executing an altivec instruction. 475 * by executing an altivec instruction.
468 */ 476 */
469 . = 0xf00 477 . = 0xf00
478 DO_KVM 0xf00
470 b PerformanceMonitor 479 b PerformanceMonitor
471 480
472 . = 0xf20 481 . = 0xf20
482 DO_KVM 0xf20
473 b AltiVecUnavailable 483 b AltiVecUnavailable
474 484
475/* 485/*
@@ -882,6 +892,10 @@ __secondary_start:
882 RFI 892 RFI
883#endif /* CONFIG_SMP */ 893#endif /* CONFIG_SMP */
884 894
895#ifdef CONFIG_KVM_BOOK3S_HANDLER
896#include "../kvm/book3s_rmhandlers.S"
897#endif
898
885/* 899/*
886 * Those generic dummy functions are kept for CPUs not 900 * Those generic dummy functions are kept for CPUs not
887 * included in CONFIG_6xx 901 * included in CONFIG_6xx
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 711368b993f2..5ab484ef06a7 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -37,6 +37,7 @@
37#include <asm/thread_info.h> 37#include <asm/thread_info.h>
38#include <asm/ppc_asm.h> 38#include <asm/ppc_asm.h>
39#include <asm/asm-offsets.h> 39#include <asm/asm-offsets.h>
40#include <asm/synch.h>
40#include "head_booke.h" 41#include "head_booke.h"
41 42
42 43
@@ -69,165 +70,7 @@ _ENTRY(_start);
69 mr r27,r7 70 mr r27,r7
70 li r24,0 /* CPU number */ 71 li r24,0 /* CPU number */
71 72
72/* 73 bl init_cpu_state
73 * In case the firmware didn't do it, we apply some workarounds
74 * that are good for all 440 core variants here
75 */
76 mfspr r3,SPRN_CCR0
77 rlwinm r3,r3,0,0,27 /* disable icache prefetch */
78 isync
79 mtspr SPRN_CCR0,r3
80 isync
81 sync
82
83/*
84 * Set up the initial MMU state
85 *
86 * We are still executing code at the virtual address
87 * mappings set by the firmware for the base of RAM.
88 *
89 * We first invalidate all TLB entries but the one
90 * we are running from. We then load the KERNELBASE
91 * mappings so we can begin to use kernel addresses
92 * natively and so the interrupt vector locations are
93 * permanently pinned (necessary since Book E
94 * implementations always have translation enabled).
95 *
96 * TODO: Use the known TLB entry we are running from to
97 * determine which physical region we are located
98 * in. This can be used to determine where in RAM
99 * (on a shared CPU system) or PCI memory space
100 * (on a DRAMless system) we are located.
101 * For now, we assume a perfect world which means
102 * we are located at the base of DRAM (physical 0).
103 */
104
105/*
106 * Search TLB for entry that we are currently using.
107 * Invalidate all entries but the one we are using.
108 */
109 /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
110 mfspr r3,SPRN_PID /* Get PID */
111 mfmsr r4 /* Get MSR */
112 andi. r4,r4,MSR_IS@l /* TS=1? */
113 beq wmmucr /* If not, leave STS=0 */
114 oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */
115wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */
116 sync
117
118 bl invstr /* Find our address */
119invstr: mflr r5 /* Make it accessible */
120 tlbsx r23,0,r5 /* Find entry we are in */
121 li r4,0 /* Start at TLB entry 0 */
122 li r3,0 /* Set PAGEID inval value */
1231: cmpw r23,r4 /* Is this our entry? */
124 beq skpinv /* If so, skip the inval */
125 tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */
126skpinv: addi r4,r4,1 /* Increment */
127 cmpwi r4,64 /* Are we done? */
128 bne 1b /* If not, repeat */
129 isync /* If so, context change */
130
131/*
132 * Configure and load pinned entry into TLB slot 63.
133 */
134
135 lis r3,PAGE_OFFSET@h
136 ori r3,r3,PAGE_OFFSET@l
137
138 /* Kernel is at the base of RAM */
139 li r4, 0 /* Load the kernel physical address */
140
141 /* Load the kernel PID = 0 */
142 li r0,0
143 mtspr SPRN_PID,r0
144 sync
145
146 /* Initialize MMUCR */
147 li r5,0
148 mtspr SPRN_MMUCR,r5
149 sync
150
151 /* pageid fields */
152 clrrwi r3,r3,10 /* Mask off the effective page number */
153 ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
154
155 /* xlat fields */
156 clrrwi r4,r4,10 /* Mask off the real page number */
157 /* ERPN is 0 for first 4GB page */
158
159 /* attrib fields */
160 /* Added guarded bit to protect against speculative loads/stores */
161 li r5,0
162 ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
163
164 li r0,63 /* TLB slot 63 */
165
166 tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
167 tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
168 tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
169
170 /* Force context change */
171 mfmsr r0
172 mtspr SPRN_SRR1, r0
173 lis r0,3f@h
174 ori r0,r0,3f@l
175 mtspr SPRN_SRR0,r0
176 sync
177 rfi
178
179 /* If necessary, invalidate original entry we used */
1803: cmpwi r23,63
181 beq 4f
182 li r6,0
183 tlbwe r6,r23,PPC44x_TLB_PAGEID
184 isync
185
1864:
187#ifdef CONFIG_PPC_EARLY_DEBUG_44x
188 /* Add UART mapping for early debug. */
189
190 /* pageid fields */
191 lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
192 ori r3,r3,PPC44x_TLB_VALID|PPC44x_TLB_TS|PPC44x_TLB_64K
193
194 /* xlat fields */
195 lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
196 ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
197
198 /* attrib fields */
199 li r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G)
200 li r0,62 /* TLB slot 0 */
201
202 tlbwe r3,r0,PPC44x_TLB_PAGEID
203 tlbwe r4,r0,PPC44x_TLB_XLAT
204 tlbwe r5,r0,PPC44x_TLB_ATTRIB
205
206 /* Force context change */
207 isync
208#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
209
210 /* Establish the interrupt vector offsets */
211 SET_IVOR(0, CriticalInput);
212 SET_IVOR(1, MachineCheck);
213 SET_IVOR(2, DataStorage);
214 SET_IVOR(3, InstructionStorage);
215 SET_IVOR(4, ExternalInput);
216 SET_IVOR(5, Alignment);
217 SET_IVOR(6, Program);
218 SET_IVOR(7, FloatingPointUnavailable);
219 SET_IVOR(8, SystemCall);
220 SET_IVOR(9, AuxillaryProcessorUnavailable);
221 SET_IVOR(10, Decrementer);
222 SET_IVOR(11, FixedIntervalTimer);
223 SET_IVOR(12, WatchdogTimer);
224 SET_IVOR(13, DataTLBError);
225 SET_IVOR(14, InstructionTLBError);
226 SET_IVOR(15, DebugCrit);
227
228 /* Establish the interrupt vector base */
229 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
230 mtspr SPRN_IVPR,r4
231 74
232 /* 75 /*
233 * This is where the main kernel code starts. 76 * This is where the main kernel code starts.
@@ -349,7 +192,7 @@ interrupt_base:
349#endif 192#endif
350 193
351 /* Data TLB Error Interrupt */ 194 /* Data TLB Error Interrupt */
352 START_EXCEPTION(DataTLBError) 195 START_EXCEPTION(DataTLBError44x)
353 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ 196 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
354 mtspr SPRN_SPRG_WSCRATCH1, r11 197 mtspr SPRN_SPRG_WSCRATCH1, r11
355 mtspr SPRN_SPRG_WSCRATCH2, r12 198 mtspr SPRN_SPRG_WSCRATCH2, r12
@@ -440,7 +283,7 @@ tlb_44x_patch_hwater_D:
440 mfspr r10,SPRN_DEAR 283 mfspr r10,SPRN_DEAR
441 284
442 /* Jump to common tlb load */ 285 /* Jump to common tlb load */
443 b finish_tlb_load 286 b finish_tlb_load_44x
444 287
4452: 2882:
446 /* The bailout. Restore registers to pre-exception conditions 289 /* The bailout. Restore registers to pre-exception conditions
@@ -460,7 +303,7 @@ tlb_44x_patch_hwater_D:
460 * information from different registers and bailout 303 * information from different registers and bailout
461 * to a different point. 304 * to a different point.
462 */ 305 */
463 START_EXCEPTION(InstructionTLBError) 306 START_EXCEPTION(InstructionTLBError44x)
464 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ 307 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
465 mtspr SPRN_SPRG_WSCRATCH1, r11 308 mtspr SPRN_SPRG_WSCRATCH1, r11
466 mtspr SPRN_SPRG_WSCRATCH2, r12 309 mtspr SPRN_SPRG_WSCRATCH2, r12
@@ -536,7 +379,7 @@ tlb_44x_patch_hwater_I:
536 mfspr r10,SPRN_SRR0 379 mfspr r10,SPRN_SRR0
537 380
538 /* Jump to common TLB load point */ 381 /* Jump to common TLB load point */
539 b finish_tlb_load 382 b finish_tlb_load_44x
540 383
5412: 3842:
542 /* The bailout. Restore registers to pre-exception conditions 385 /* The bailout. Restore registers to pre-exception conditions
@@ -550,15 +393,7 @@ tlb_44x_patch_hwater_I:
550 mfspr r10, SPRN_SPRG_RSCRATCH0 393 mfspr r10, SPRN_SPRG_RSCRATCH0
551 b InstructionStorage 394 b InstructionStorage
552 395
553 /* Debug Interrupt */
554 DEBUG_CRIT_EXCEPTION
555
556/*
557 * Local functions
558 */
559
560/* 396/*
561
562 * Both the instruction and data TLB miss get to this 397 * Both the instruction and data TLB miss get to this
563 * point to load the TLB. 398 * point to load the TLB.
564 * r10 - EA of fault 399 * r10 - EA of fault
@@ -568,7 +403,7 @@ tlb_44x_patch_hwater_I:
568 * MMUCR - loaded with proper value when we get here 403 * MMUCR - loaded with proper value when we get here
569 * Upon exit, we reload everything and RFI. 404 * Upon exit, we reload everything and RFI.
570 */ 405 */
571finish_tlb_load: 406finish_tlb_load_44x:
572 /* Combine RPN & ERPN an write WS 0 */ 407 /* Combine RPN & ERPN an write WS 0 */
573 rlwimi r11,r12,0,0,31-PAGE_SHIFT 408 rlwimi r11,r12,0,0,31-PAGE_SHIFT
574 tlbwe r11,r13,PPC44x_TLB_XLAT 409 tlbwe r11,r13,PPC44x_TLB_XLAT
@@ -601,6 +436,227 @@ finish_tlb_load:
601 mfspr r10, SPRN_SPRG_RSCRATCH0 436 mfspr r10, SPRN_SPRG_RSCRATCH0
602 rfi /* Force context change */ 437 rfi /* Force context change */
603 438
439/* TLB error interrupts for 476
440 */
441#ifdef CONFIG_PPC_47x
442 START_EXCEPTION(DataTLBError47x)
443 mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */
444 mtspr SPRN_SPRG_WSCRATCH1,r11
445 mtspr SPRN_SPRG_WSCRATCH2,r12
446 mtspr SPRN_SPRG_WSCRATCH3,r13
447 mfcr r11
448 mtspr SPRN_SPRG_WSCRATCH4,r11
449 mfspr r10,SPRN_DEAR /* Get faulting address */
450
451 /* If we are faulting a kernel address, we have to use the
452 * kernel page tables.
453 */
454 lis r11,PAGE_OFFSET@h
455 cmplw cr0,r10,r11
456 blt+ 3f
457 lis r11,swapper_pg_dir@h
458 ori r11,r11, swapper_pg_dir@l
459 li r12,0 /* MMUCR = 0 */
460 b 4f
461
462 /* Get the PGD for the current thread and setup MMUCR */
4633: mfspr r11,SPRN_SPRG3
464 lwz r11,PGDIR(r11)
465 mfspr r12,SPRN_PID /* Get PID */
4664: mtspr SPRN_MMUCR,r12 /* Set MMUCR */
467
468 /* Mask of required permission bits. Note that while we
469 * do copy ESR:ST to _PAGE_RW position as trying to write
470 * to an RO page is pretty common, we don't do it with
471 * _PAGE_DIRTY. We could do it, but it's a fairly rare
472 * event so I'd rather take the overhead when it happens
473 * rather than adding an instruction here. We should measure
474 * whether the whole thing is worth it in the first place
475 * as we could avoid loading SPRN_ESR completely in the first
476 * place...
477 *
478 * TODO: Is it worth doing that mfspr & rlwimi in the first
479 * place or can we save a couple of instructions here ?
480 */
481 mfspr r12,SPRN_ESR
482 li r13,_PAGE_PRESENT|_PAGE_ACCESSED
483 rlwimi r13,r12,10,30,30
484
485 /* Load the PTE */
486 /* Compute pgdir/pmd offset */
487 rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
488 lwzx r11,r12,r11 /* Get pgd/pmd entry */
489
490 /* Word 0 is EPN,V,TS,DSIZ */
491 li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
492 rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/
493 li r12,0
494 tlbwe r10,r12,0
495
496 /* XXX can we do better ? Need to make sure tlbwe has established
497 * latch V bit in MMUCR0 before the PTE is loaded further down */
498#ifdef CONFIG_SMP
499 isync
500#endif
501
502 rlwinm. r12,r11,0,0,20 /* Extract pt base address */
503 /* Compute pte address */
504 rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
505 beq 2f /* Bail if no table */
506 lwz r11,0(r12) /* Get high word of pte entry */
507
508 /* XXX can we do better ? maybe insert a known 0 bit from r11 into the
509 * bottom of r12 to create a data dependency... We can also use r10
510 * as destination nowadays
511 */
512#ifdef CONFIG_SMP
513 lwsync
514#endif
515 lwz r12,4(r12) /* Get low word of pte entry */
516
517 andc. r13,r13,r12 /* Check permission */
518
519 /* Jump to common tlb load */
520 beq finish_tlb_load_47x
521
5222: /* The bailout. Restore registers to pre-exception conditions
523 * and call the heavyweights to help us out.
524 */
525 mfspr r11,SPRN_SPRG_RSCRATCH4
526 mtcr r11
527 mfspr r13,SPRN_SPRG_RSCRATCH3
528 mfspr r12,SPRN_SPRG_RSCRATCH2
529 mfspr r11,SPRN_SPRG_RSCRATCH1
530 mfspr r10,SPRN_SPRG_RSCRATCH0
531 b DataStorage
532
533 /* Instruction TLB Error Interrupt */
534 /*
535 * Nearly the same as above, except we get our
536 * information from different registers and bailout
537 * to a different point.
538 */
539 START_EXCEPTION(InstructionTLBError47x)
540 mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */
541 mtspr SPRN_SPRG_WSCRATCH1,r11
542 mtspr SPRN_SPRG_WSCRATCH2,r12
543 mtspr SPRN_SPRG_WSCRATCH3,r13
544 mfcr r11
545 mtspr SPRN_SPRG_WSCRATCH4,r11
546 mfspr r10,SPRN_SRR0 /* Get faulting address */
547
548 /* If we are faulting a kernel address, we have to use the
549 * kernel page tables.
550 */
551 lis r11,PAGE_OFFSET@h
552 cmplw cr0,r10,r11
553 blt+ 3f
554 lis r11,swapper_pg_dir@h
555 ori r11,r11, swapper_pg_dir@l
556 li r12,0 /* MMUCR = 0 */
557 b 4f
558
559 /* Get the PGD for the current thread and setup MMUCR */
5603: mfspr r11,SPRN_SPRG_THREAD
561 lwz r11,PGDIR(r11)
562 mfspr r12,SPRN_PID /* Get PID */
5634: mtspr SPRN_MMUCR,r12 /* Set MMUCR */
564
565 /* Make up the required permissions */
566 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
567
568 /* Load PTE */
569 /* Compute pgdir/pmd offset */
570 rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
571 lwzx r11,r12,r11 /* Get pgd/pmd entry */
572
573 /* Word 0 is EPN,V,TS,DSIZ */
574 li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
575 rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/
576 li r12,0
577 tlbwe r10,r12,0
578
579 /* XXX can we do better ? Need to make sure tlbwe has established
580 * latch V bit in MMUCR0 before the PTE is loaded further down */
581#ifdef CONFIG_SMP
582 isync
583#endif
584
585 rlwinm. r12,r11,0,0,20 /* Extract pt base address */
586 /* Compute pte address */
587 rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
588 beq 2f /* Bail if no table */
589
590 lwz r11,0(r12) /* Get high word of pte entry */
591 /* XXX can we do better ? maybe insert a known 0 bit from r11 into the
592 * bottom of r12 to create a data dependency... We can also use r10
593 * as destination nowadays
594 */
595#ifdef CONFIG_SMP
596 lwsync
597#endif
598 lwz r12,4(r12) /* Get low word of pte entry */
599
600 andc. r13,r13,r12 /* Check permission */
601
602 /* Jump to common TLB load point */
603 beq finish_tlb_load_47x
604
6052: /* The bailout. Restore registers to pre-exception conditions
606 * and call the heavyweights to help us out.
607 */
608 mfspr r11, SPRN_SPRG_RSCRATCH4
609 mtcr r11
610 mfspr r13, SPRN_SPRG_RSCRATCH3
611 mfspr r12, SPRN_SPRG_RSCRATCH2
612 mfspr r11, SPRN_SPRG_RSCRATCH1
613 mfspr r10, SPRN_SPRG_RSCRATCH0
614 b InstructionStorage
615
616/*
617 * Both the instruction and data TLB miss get to this
618 * point to load the TLB.
619 * r10 - free to use
620 * r11 - PTE high word value
621 * r12 - PTE low word value
622 * r13 - free to use
623 * MMUCR - loaded with proper value when we get here
624 * Upon exit, we reload everything and RFI.
625 */
626finish_tlb_load_47x:
627 /* Combine RPN & ERPN an write WS 1 */
628 rlwimi r11,r12,0,0,31-PAGE_SHIFT
629 tlbwe r11,r13,1
630
631 /* And make up word 2 */
632 li r10,0xf85 /* Mask to apply from PTE */
633 rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */
634 and r11,r12,r10 /* Mask PTE bits to keep */
635 andi. r10,r12,_PAGE_USER /* User page ? */
636 beq 1f /* nope, leave U bits empty */
637 rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
6381: tlbwe r11,r13,2
639
640 /* Done...restore registers and get out of here.
641 */
642 mfspr r11, SPRN_SPRG_RSCRATCH4
643 mtcr r11
644 mfspr r13, SPRN_SPRG_RSCRATCH3
645 mfspr r12, SPRN_SPRG_RSCRATCH2
646 mfspr r11, SPRN_SPRG_RSCRATCH1
647 mfspr r10, SPRN_SPRG_RSCRATCH0
648 rfi
649
650#endif /* CONFIG_PPC_47x */
651
652 /* Debug Interrupt */
653 /*
654 * This statement needs to exist at the end of the IVPR
655 * definition just in case you end up taking a debug
656 * exception within another exception.
657 */
658 DEBUG_CRIT_EXCEPTION
659
604/* 660/*
605 * Global functions 661 * Global functions
606 */ 662 */
@@ -647,6 +703,428 @@ _GLOBAL(set_context)
647 blr 703 blr
648 704
649/* 705/*
706 * Init CPU state. This is called at boot time or for secondary CPUs
707 * to setup initial TLB entries, setup IVORs, etc...
708 *
709 */
710_GLOBAL(init_cpu_state)
711 mflr r22
712#ifdef CONFIG_PPC_47x
713 /* We use the PVR to differenciate 44x cores from 476 */
714 mfspr r3,SPRN_PVR
715 srwi r3,r3,16
716 cmplwi cr0,r3,PVR_476@h
717 beq head_start_47x
718 cmplwi cr0,r3,PVR_476_ISS@h
719 beq head_start_47x
720#endif /* CONFIG_PPC_47x */
721
722/*
723 * In case the firmware didn't do it, we apply some workarounds
724 * that are good for all 440 core variants here
725 */
726 mfspr r3,SPRN_CCR0
727 rlwinm r3,r3,0,0,27 /* disable icache prefetch */
728 isync
729 mtspr SPRN_CCR0,r3
730 isync
731 sync
732
733/*
734 * Set up the initial MMU state for 44x
735 *
736 * We are still executing code at the virtual address
737 * mappings set by the firmware for the base of RAM.
738 *
739 * We first invalidate all TLB entries but the one
740 * we are running from. We then load the KERNELBASE
741 * mappings so we can begin to use kernel addresses
742 * natively and so the interrupt vector locations are
743 * permanently pinned (necessary since Book E
744 * implementations always have translation enabled).
745 *
746 * TODO: Use the known TLB entry we are running from to
747 * determine which physical region we are located
748 * in. This can be used to determine where in RAM
749 * (on a shared CPU system) or PCI memory space
750 * (on a DRAMless system) we are located.
751 * For now, we assume a perfect world which means
752 * we are located at the base of DRAM (physical 0).
753 */
754
755/*
756 * Search TLB for entry that we are currently using.
757 * Invalidate all entries but the one we are using.
758 */
759 /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
760 mfspr r3,SPRN_PID /* Get PID */
761 mfmsr r4 /* Get MSR */
762 andi. r4,r4,MSR_IS@l /* TS=1? */
763 beq wmmucr /* If not, leave STS=0 */
764 oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */
765wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */
766 sync
767
768 bl invstr /* Find our address */
769invstr: mflr r5 /* Make it accessible */
770 tlbsx r23,0,r5 /* Find entry we are in */
771 li r4,0 /* Start at TLB entry 0 */
772 li r3,0 /* Set PAGEID inval value */
7731: cmpw r23,r4 /* Is this our entry? */
774 beq skpinv /* If so, skip the inval */
775 tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */
776skpinv: addi r4,r4,1 /* Increment */
777 cmpwi r4,64 /* Are we done? */
778 bne 1b /* If not, repeat */
779 isync /* If so, context change */
780
781/*
782 * Configure and load pinned entry into TLB slot 63.
783 */
784
785 lis r3,PAGE_OFFSET@h
786 ori r3,r3,PAGE_OFFSET@l
787
788 /* Kernel is at the base of RAM */
789 li r4, 0 /* Load the kernel physical address */
790
791 /* Load the kernel PID = 0 */
792 li r0,0
793 mtspr SPRN_PID,r0
794 sync
795
796 /* Initialize MMUCR */
797 li r5,0
798 mtspr SPRN_MMUCR,r5
799 sync
800
801 /* pageid fields */
802 clrrwi r3,r3,10 /* Mask off the effective page number */
803 ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
804
805 /* xlat fields */
806 clrrwi r4,r4,10 /* Mask off the real page number */
807 /* ERPN is 0 for first 4GB page */
808
809 /* attrib fields */
810 /* Added guarded bit to protect against speculative loads/stores */
811 li r5,0
812 ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
813
814 li r0,63 /* TLB slot 63 */
815
816 tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
817 tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
818 tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
819
820 /* Force context change */
821 mfmsr r0
822 mtspr SPRN_SRR1, r0
823 lis r0,3f@h
824 ori r0,r0,3f@l
825 mtspr SPRN_SRR0,r0
826 sync
827 rfi
828
829 /* If necessary, invalidate original entry we used */
8303: cmpwi r23,63
831 beq 4f
832 li r6,0
833 tlbwe r6,r23,PPC44x_TLB_PAGEID
834 isync
835
8364:
837#ifdef CONFIG_PPC_EARLY_DEBUG_44x
838 /* Add UART mapping for early debug. */
839
840 /* pageid fields */
841 lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
842 ori r3,r3,PPC44x_TLB_VALID|PPC44x_TLB_TS|PPC44x_TLB_64K
843
844 /* xlat fields */
845 lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
846 ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
847
848 /* attrib fields */
849 li r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G)
850 li r0,62 /* TLB slot 0 */
851
852 tlbwe r3,r0,PPC44x_TLB_PAGEID
853 tlbwe r4,r0,PPC44x_TLB_XLAT
854 tlbwe r5,r0,PPC44x_TLB_ATTRIB
855
856 /* Force context change */
857 isync
858#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
859
860 /* Establish the interrupt vector offsets */
861 SET_IVOR(0, CriticalInput);
862 SET_IVOR(1, MachineCheck);
863 SET_IVOR(2, DataStorage);
864 SET_IVOR(3, InstructionStorage);
865 SET_IVOR(4, ExternalInput);
866 SET_IVOR(5, Alignment);
867 SET_IVOR(6, Program);
868 SET_IVOR(7, FloatingPointUnavailable);
869 SET_IVOR(8, SystemCall);
870 SET_IVOR(9, AuxillaryProcessorUnavailable);
871 SET_IVOR(10, Decrementer);
872 SET_IVOR(11, FixedIntervalTimer);
873 SET_IVOR(12, WatchdogTimer);
874 SET_IVOR(13, DataTLBError44x);
875 SET_IVOR(14, InstructionTLBError44x);
876 SET_IVOR(15, DebugCrit);
877
878 b head_start_common
879
880
881#ifdef CONFIG_PPC_47x
882
883#ifdef CONFIG_SMP
884
885/* Entry point for secondary 47x processors */
886_GLOBAL(start_secondary_47x)
887 mr r24,r3 /* CPU number */
888
889 bl init_cpu_state
890
891 /* Now we need to bolt the rest of kernel memory which
892 * is done in C code. We must be careful because our task
893 * struct or our stack can (and will probably) be out
894 * of reach of the initial 256M TLB entry, so we use a
895 * small temporary stack in .bss for that. This works
896 * because only one CPU at a time can be in this code
897 */
898 lis r1,temp_boot_stack@h
899 ori r1,r1,temp_boot_stack@l
900 addi r1,r1,1024-STACK_FRAME_OVERHEAD
901 li r0,0
902 stw r0,0(r1)
903 bl mmu_init_secondary
904
905 /* Now we can get our task struct and real stack pointer */
906
907 /* Get current_thread_info and current */
908 lis r1,secondary_ti@ha
909 lwz r1,secondary_ti@l(r1)
910 lwz r2,TI_TASK(r1)
911
912 /* Current stack pointer */
913 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
914 li r0,0
915 stw r0,0(r1)
916
917 /* Kernel stack for exception entry in SPRG3 */
918 addi r4,r2,THREAD /* init task's THREAD */
919 mtspr SPRN_SPRG3,r4
920
921 b start_secondary
922
923#endif /* CONFIG_SMP */
924
925/*
926 * Set up the initial MMU state for 44x
927 *
928 * We are still executing code at the virtual address
929 * mappings set by the firmware for the base of RAM.
930 */
931
932head_start_47x:
933 /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
934 mfspr r3,SPRN_PID /* Get PID */
935 mfmsr r4 /* Get MSR */
936 andi. r4,r4,MSR_IS@l /* TS=1? */
937 beq 1f /* If not, leave STS=0 */
938 oris r3,r3,PPC47x_MMUCR_STS@h /* Set STS=1 */
9391: mtspr SPRN_MMUCR,r3 /* Put MMUCR */
940 sync
941
942 /* Find the entry we are running from */
943 bl 1f
9441: mflr r23
945 tlbsx r23,0,r23
946 tlbre r24,r23,0
947 tlbre r25,r23,1
948 tlbre r26,r23,2
949
950/*
951 * Cleanup time
952 */
953
954 /* Initialize MMUCR */
955 li r5,0
956 mtspr SPRN_MMUCR,r5
957 sync
958
959clear_all_utlb_entries:
960
961 #; Set initial values.
962
963 addis r3,0,0x8000
964 addi r4,0,0
965 addi r5,0,0
966 b clear_utlb_entry
967
968 #; Align the loop to speed things up.
969
970 .align 6
971
972clear_utlb_entry:
973
974 tlbwe r4,r3,0
975 tlbwe r5,r3,1
976 tlbwe r5,r3,2
977 addis r3,r3,0x2000
978 cmpwi r3,0
979 bne clear_utlb_entry
980 addis r3,0,0x8000
981 addis r4,r4,0x100
982 cmpwi r4,0
983 bne clear_utlb_entry
984
985 #; Restore original entry.
986
987 oris r23,r23,0x8000 /* specify the way */
988 tlbwe r24,r23,0
989 tlbwe r25,r23,1
990 tlbwe r26,r23,2
991
992/*
993 * Configure and load pinned entry into TLB for the kernel core
994 */
995
996 lis r3,PAGE_OFFSET@h
997 ori r3,r3,PAGE_OFFSET@l
998
999 /* Kernel is at the base of RAM */
1000 li r4, 0 /* Load the kernel physical address */
1001
1002 /* Load the kernel PID = 0 */
1003 li r0,0
1004 mtspr SPRN_PID,r0
1005 sync
1006
1007 /* Word 0 */
1008 clrrwi r3,r3,12 /* Mask off the effective page number */
1009 ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_256M
1010
1011 /* Word 1 */
1012 clrrwi r4,r4,12 /* Mask off the real page number */
1013 /* ERPN is 0 for first 4GB page */
1014 /* Word 2 */
1015 li r5,0
1016 ori r5,r5,PPC47x_TLB2_S_RWX
1017#ifdef CONFIG_SMP
1018 ori r5,r5,PPC47x_TLB2_M
1019#endif
1020
1021 /* We write to way 0 and bolted 0 */
1022 lis r0,0x8800
1023 tlbwe r3,r0,0
1024 tlbwe r4,r0,1
1025 tlbwe r5,r0,2
1026
1027/*
1028 * Configure SSPCR, ISPCR and USPCR for now to search everything, we can fix
1029 * them up later
1030 */
1031 LOAD_REG_IMMEDIATE(r3, 0x9abcdef0)
1032 mtspr SPRN_SSPCR,r3
1033 mtspr SPRN_USPCR,r3
1034 LOAD_REG_IMMEDIATE(r3, 0x12345670)
1035 mtspr SPRN_ISPCR,r3
1036
1037 /* Force context change */
1038 mfmsr r0
1039 mtspr SPRN_SRR1, r0
1040 lis r0,3f@h
1041 ori r0,r0,3f@l
1042 mtspr SPRN_SRR0,r0
1043 sync
1044 rfi
1045
1046 /* Invalidate original entry we used */
10473:
1048 rlwinm r24,r24,0,21,19 /* clear the "valid" bit */
1049 tlbwe r24,r23,0
1050 addi r24,0,0
1051 tlbwe r24,r23,1
1052 tlbwe r24,r23,2
1053 isync /* Clear out the shadow TLB entries */
1054
1055#ifdef CONFIG_PPC_EARLY_DEBUG_44x
1056 /* Add UART mapping for early debug. */
1057
1058 /* Word 0 */
1059 lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
1060 ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_TS | PPC47x_TLB0_1M
1061
1062 /* Word 1 */
1063 lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
1064 ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
1065
1066 /* Word 2 */
1067 li r5,(PPC47x_TLB2_S_RW | PPC47x_TLB2_IMG)
1068
1069 /* Bolted in way 0, bolt slot 5, we -hope- we don't hit the same
1070 * congruence class as the kernel, we need to make sure of it at
1071 * some point
1072 */
1073 lis r0,0x8d00
1074 tlbwe r3,r0,0
1075 tlbwe r4,r0,1
1076 tlbwe r5,r0,2
1077
1078 /* Force context change */
1079 isync
1080#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
1081
1082 /* Establish the interrupt vector offsets */
1083 SET_IVOR(0, CriticalInput);
1084 SET_IVOR(1, MachineCheckA);
1085 SET_IVOR(2, DataStorage);
1086 SET_IVOR(3, InstructionStorage);
1087 SET_IVOR(4, ExternalInput);
1088 SET_IVOR(5, Alignment);
1089 SET_IVOR(6, Program);
1090 SET_IVOR(7, FloatingPointUnavailable);
1091 SET_IVOR(8, SystemCall);
1092 SET_IVOR(9, AuxillaryProcessorUnavailable);
1093 SET_IVOR(10, Decrementer);
1094 SET_IVOR(11, FixedIntervalTimer);
1095 SET_IVOR(12, WatchdogTimer);
1096 SET_IVOR(13, DataTLBError47x);
1097 SET_IVOR(14, InstructionTLBError47x);
1098 SET_IVOR(15, DebugCrit);
1099
1100 /* We configure icbi to invalidate 128 bytes at a time since the
1101 * current 32-bit kernel code isn't too happy with icache != dcache
1102 * block size
1103 */
1104 mfspr r3,SPRN_CCR0
1105 oris r3,r3,0x0020
1106 mtspr SPRN_CCR0,r3
1107 isync
1108
1109#endif /* CONFIG_PPC_47x */
1110
1111/*
1112 * Here we are back to code that is common between 44x and 47x
1113 *
1114 * We proceed to further kernel initialization and return to the
1115 * main kernel entry
1116 */
1117head_start_common:
1118 /* Establish the interrupt vector base */
1119 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
1120 mtspr SPRN_IVPR,r4
1121
1122 addis r22,r22,KERNELBASE@h
1123 mtlr r22
1124 isync
1125 blr
1126
1127/*
650 * We put a few things here that have to be page-aligned. This stuff 1128 * We put a few things here that have to be page-aligned. This stuff
651 * goes at the beginning of the data segment, which is page-aligned. 1129 * goes at the beginning of the data segment, which is page-aligned.
652 */ 1130 */
@@ -671,3 +1149,9 @@ swapper_pg_dir:
671 */ 1149 */
672abatron_pteptrs: 1150abatron_pteptrs:
673 .space 8 1151 .space 8
1152
1153#ifdef CONFIG_SMP
1154 .align 12
1155temp_boot_stack:
1156 .space 1024
1157#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index bed9a29ee383..844a44b64472 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -37,7 +37,7 @@
37#include <asm/firmware.h> 37#include <asm/firmware.h>
38#include <asm/page_64.h> 38#include <asm/page_64.h>
39#include <asm/irqflags.h> 39#include <asm/irqflags.h>
40#include <asm/kvm_book3s_64_asm.h> 40#include <asm/kvm_book3s_asm.h>
41 41
42/* The physical memory is layed out such that the secondary processor 42/* The physical memory is layed out such that the secondary processor
43 * spin code sits at 0x0000...0x00ff. On server, the vectors follow 43 * spin code sits at 0x0000...0x00ff. On server, the vectors follow
@@ -169,7 +169,7 @@ exception_marker:
169/* KVM trampoline code needs to be close to the interrupt handlers */ 169/* KVM trampoline code needs to be close to the interrupt handlers */
170 170
171#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 171#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
172#include "../kvm/book3s_64_rmhandlers.S" 172#include "../kvm/book3s_rmhandlers.S"
173#endif 173#endif
174 174
175_GLOBAL(generic_secondary_thread_init) 175_GLOBAL(generic_secondary_thread_init)
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 3ef743fa5d7c..1f1a04b5c2a4 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -71,9 +71,6 @@ _ENTRY(_start);
71 * in the first level table, but that would require many changes to the 71 * in the first level table, but that would require many changes to the
72 * Linux page directory/table functions that I don't want to do right now. 72 * Linux page directory/table functions that I don't want to do right now.
73 * 73 *
74 * I used to use SPRG2 for a temporary register in the TLB handler, but it
75 * has since been put to other uses. I now use a hack to save a register
76 * and the CCR at memory location 0.....Someday I'll fix this.....
77 * -- Dan 74 * -- Dan
78 */ 75 */
79 .globl __start 76 .globl __start
@@ -302,8 +299,13 @@ InstructionTLBMiss:
302 DO_8xx_CPU6(0x3f80, r3) 299 DO_8xx_CPU6(0x3f80, r3)
303 mtspr SPRN_M_TW, r10 /* Save a couple of working registers */ 300 mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
304 mfcr r10 301 mfcr r10
302#ifdef CONFIG_8xx_CPU6
305 stw r10, 0(r0) 303 stw r10, 0(r0)
306 stw r11, 4(r0) 304 stw r11, 4(r0)
305#else
306 mtspr SPRN_DAR, r10
307 mtspr SPRN_SPRG2, r11
308#endif
307 mfspr r10, SPRN_SRR0 /* Get effective address of fault */ 309 mfspr r10, SPRN_SRR0 /* Get effective address of fault */
308#ifdef CONFIG_8xx_CPU15 310#ifdef CONFIG_8xx_CPU15
309 addi r11, r10, 0x1000 311 addi r11, r10, 0x1000
@@ -318,12 +320,16 @@ InstructionTLBMiss:
318 /* If we are faulting a kernel address, we have to use the 320 /* If we are faulting a kernel address, we have to use the
319 * kernel page tables. 321 * kernel page tables.
320 */ 322 */
323#ifdef CONFIG_MODULES
324 /* Only modules will cause ITLB Misses as we always
325 * pin the first 8MB of kernel memory */
321 andi. r11, r10, 0x0800 /* Address >= 0x80000000 */ 326 andi. r11, r10, 0x0800 /* Address >= 0x80000000 */
322 beq 3f 327 beq 3f
323 lis r11, swapper_pg_dir@h 328 lis r11, swapper_pg_dir@h
324 ori r11, r11, swapper_pg_dir@l 329 ori r11, r11, swapper_pg_dir@l
325 rlwimi r10, r11, 0, 2, 19 330 rlwimi r10, r11, 0, 2, 19
3263: 3313:
332#endif
327 lwz r11, 0(r10) /* Get the level 1 entry */ 333 lwz r11, 0(r10) /* Get the level 1 entry */
328 rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */ 334 rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
329 beq 2f /* If zero, don't try to find a pte */ 335 beq 2f /* If zero, don't try to find a pte */
@@ -339,31 +345,35 @@ InstructionTLBMiss:
339 mfspr r11, SPRN_MD_TWC /* ....and get the pte address */ 345 mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
340 lwz r10, 0(r11) /* Get the pte */ 346 lwz r10, 0(r11) /* Get the pte */
341 347
348#ifdef CONFIG_SWAP
342 andi. r11, r10, _PAGE_ACCESSED | _PAGE_PRESENT 349 andi. r11, r10, _PAGE_ACCESSED | _PAGE_PRESENT
343 cmpwi cr0, r11, _PAGE_ACCESSED | _PAGE_PRESENT 350 cmpwi cr0, r11, _PAGE_ACCESSED | _PAGE_PRESENT
344 bne- cr0, 2f 351 bne- cr0, 2f
345 352#endif
346 /* Clear PP lsb, 0x400 */
347 rlwinm r10, r10, 0, 22, 20
348
349 /* The Linux PTE won't go exactly into the MMU TLB. 353 /* The Linux PTE won't go exactly into the MMU TLB.
350 * Software indicator bits 22 and 28 must be clear. 354 * Software indicator bits 21 and 28 must be clear.
351 * Software indicator bits 24, 25, 26, and 27 must be 355 * Software indicator bits 24, 25, 26, and 27 must be
352 * set. All other Linux PTE bits control the behavior 356 * set. All other Linux PTE bits control the behavior
353 * of the MMU. 357 * of the MMU.
354 */ 358 */
355 li r11, 0x00f0 359 li r11, 0x00f0
356 rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ 360 rlwimi r10, r11, 0, 0x07f8 /* Set 24-27, clear 21-23,28 */
357 DO_8xx_CPU6(0x2d80, r3) 361 DO_8xx_CPU6(0x2d80, r3)
358 mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ 362 mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
359 363
360 mfspr r10, SPRN_M_TW /* Restore registers */ 364 /* Restore registers */
365#ifndef CONFIG_8xx_CPU6
366 mfspr r10, SPRN_DAR
367 mtcr r10
368 mtspr SPRN_DAR, r11 /* Tag DAR */
369 mfspr r11, SPRN_SPRG2
370#else
361 lwz r11, 0(r0) 371 lwz r11, 0(r0)
362 mtcr r11 372 mtcr r11
363 lwz r11, 4(r0) 373 lwz r11, 4(r0)
364#ifdef CONFIG_8xx_CPU6
365 lwz r3, 8(r0) 374 lwz r3, 8(r0)
366#endif 375#endif
376 mfspr r10, SPRN_M_TW
367 rfi 377 rfi
3682: 3782:
369 mfspr r11, SPRN_SRR1 379 mfspr r11, SPRN_SRR1
@@ -373,13 +383,20 @@ InstructionTLBMiss:
373 rlwinm r11, r11, 0, 0xffff 383 rlwinm r11, r11, 0, 0xffff
374 mtspr SPRN_SRR1, r11 384 mtspr SPRN_SRR1, r11
375 385
376 mfspr r10, SPRN_M_TW /* Restore registers */ 386 /* Restore registers */
387#ifndef CONFIG_8xx_CPU6
388 mfspr r10, SPRN_DAR
389 mtcr r10
390 li r11, 0x00f0
391 mtspr SPRN_DAR, r11 /* Tag DAR */
392 mfspr r11, SPRN_SPRG2
393#else
377 lwz r11, 0(r0) 394 lwz r11, 0(r0)
378 mtcr r11 395 mtcr r11
379 lwz r11, 4(r0) 396 lwz r11, 4(r0)
380#ifdef CONFIG_8xx_CPU6
381 lwz r3, 8(r0) 397 lwz r3, 8(r0)
382#endif 398#endif
399 mfspr r10, SPRN_M_TW
383 b InstructionAccess 400 b InstructionAccess
384 401
385 . = 0x1200 402 . = 0x1200
@@ -390,8 +407,13 @@ DataStoreTLBMiss:
390 DO_8xx_CPU6(0x3f80, r3) 407 DO_8xx_CPU6(0x3f80, r3)
391 mtspr SPRN_M_TW, r10 /* Save a couple of working registers */ 408 mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
392 mfcr r10 409 mfcr r10
410#ifdef CONFIG_8xx_CPU6
393 stw r10, 0(r0) 411 stw r10, 0(r0)
394 stw r11, 4(r0) 412 stw r11, 4(r0)
413#else
414 mtspr SPRN_DAR, r10
415 mtspr SPRN_SPRG2, r11
416#endif
395 mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */ 417 mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
396 418
397 /* If we are faulting a kernel address, we have to use the 419 /* If we are faulting a kernel address, we have to use the
@@ -438,15 +460,14 @@ DataStoreTLBMiss:
438 * r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5)); 460 * r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5));
439 * r10 = (r10 & ~PRESENT) | r11; 461 * r10 = (r10 & ~PRESENT) | r11;
440 */ 462 */
463#ifdef CONFIG_SWAP
441 rlwinm r11, r10, 32-5, _PAGE_PRESENT 464 rlwinm r11, r10, 32-5, _PAGE_PRESENT
442 and r11, r11, r10 465 and r11, r11, r10
443 rlwimi r10, r11, 0, _PAGE_PRESENT 466 rlwimi r10, r11, 0, _PAGE_PRESENT
444 467#endif
445 /* Honour kernel RO, User NA */ 468 /* Honour kernel RO, User NA */
446 /* 0x200 == Extended encoding, bit 22 */ 469 /* 0x200 == Extended encoding, bit 22 */
447 /* r11 = (r10 & _PAGE_USER) >> 2 */ 470 rlwimi r10, r10, 32-2, 0x200 /* Copy USER to bit 22, 0x200 */
448 rlwinm r11, r10, 32-2, 0x200
449 or r10, r11, r10
450 /* r11 = (r10 & _PAGE_RW) >> 1 */ 471 /* r11 = (r10 & _PAGE_RW) >> 1 */
451 rlwinm r11, r10, 32-1, 0x200 472 rlwinm r11, r10, 32-1, 0x200
452 or r10, r11, r10 473 or r10, r11, r10
@@ -460,18 +481,24 @@ DataStoreTLBMiss:
460 * of the MMU. 481 * of the MMU.
461 */ 482 */
4622: li r11, 0x00f0 4832: li r11, 0x00f0
463 mtspr SPRN_DAR,r11 /* Tag DAR */
464 rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ 484 rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
465 DO_8xx_CPU6(0x3d80, r3) 485 DO_8xx_CPU6(0x3d80, r3)
466 mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ 486 mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
467 487
468 mfspr r10, SPRN_M_TW /* Restore registers */ 488 /* Restore registers */
489#ifndef CONFIG_8xx_CPU6
490 mfspr r10, SPRN_DAR
491 mtcr r10
492 mtspr SPRN_DAR, r11 /* Tag DAR */
493 mfspr r11, SPRN_SPRG2
494#else
495 mtspr SPRN_DAR, r11 /* Tag DAR */
469 lwz r11, 0(r0) 496 lwz r11, 0(r0)
470 mtcr r11 497 mtcr r11
471 lwz r11, 4(r0) 498 lwz r11, 4(r0)
472#ifdef CONFIG_8xx_CPU6
473 lwz r3, 8(r0) 499 lwz r3, 8(r0)
474#endif 500#endif
501 mfspr r10, SPRN_M_TW
475 rfi 502 rfi
476 503
477/* This is an instruction TLB error on the MPC8xx. This could be due 504/* This is an instruction TLB error on the MPC8xx. This could be due
@@ -683,9 +710,6 @@ start_here:
683 tophys(r4,r2) 710 tophys(r4,r2)
684 addi r4,r4,THREAD /* init task's THREAD */ 711 addi r4,r4,THREAD /* init task's THREAD */
685 mtspr SPRN_SPRG_THREAD,r4 712 mtspr SPRN_SPRG_THREAD,r4
686 li r3,0
687 /* XXX What is that for ? SPRG2 appears otherwise unused on 8xx */
688 mtspr SPRN_SPRG2,r3 /* 0 => r1 has kernel sp */
689 713
690 /* stack */ 714 /* stack */
691 lis r1,init_thread_union@ha 715 lis r1,init_thread_union@ha
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index 50504ae39cb7..a0bf158c8b47 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -1,6 +1,7 @@
1#ifndef __HEAD_BOOKE_H__ 1#ifndef __HEAD_BOOKE_H__
2#define __HEAD_BOOKE_H__ 2#define __HEAD_BOOKE_H__
3 3
4#include <asm/ptrace.h> /* for STACK_FRAME_REGS_MARKER */
4/* 5/*
5 * Macros used for common Book-e exception handling 6 * Macros used for common Book-e exception handling
6 */ 7 */
@@ -48,6 +49,9 @@
48 stw r10,0(r11); \ 49 stw r10,0(r11); \
49 rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ 50 rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
50 stw r0,GPR0(r11); \ 51 stw r0,GPR0(r11); \
52 lis r10, STACK_FRAME_REGS_MARKER@ha;/* exception frame marker */ \
53 addi r10, r10, STACK_FRAME_REGS_MARKER@l; \
54 stw r10, 8(r11); \
51 SAVE_4GPRS(3, r11); \ 55 SAVE_4GPRS(3, r11); \
52 SAVE_2GPRS(7, r11) 56 SAVE_2GPRS(7, r11)
53 57
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 725526547994..edd4a57fd29e 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -639,6 +639,13 @@ interrupt_base:
639 rlwinm r12,r12,0,16,1 639 rlwinm r12,r12,0,16,1
640 mtspr SPRN_MAS1,r12 640 mtspr SPRN_MAS1,r12
641 641
642 /* Make up the required permissions for kernel code */
643#ifdef CONFIG_PTE_64BIT
644 li r13,_PAGE_PRESENT | _PAGE_BAP_SX
645 oris r13,r13,_PAGE_ACCESSED@h
646#else
647 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
648#endif
642 b 4f 649 b 4f
643 650
644 /* Get the PGD for the current thread */ 651 /* Get the PGD for the current thread */
@@ -646,15 +653,15 @@ interrupt_base:
646 mfspr r11,SPRN_SPRG_THREAD 653 mfspr r11,SPRN_SPRG_THREAD
647 lwz r11,PGDIR(r11) 654 lwz r11,PGDIR(r11)
648 655
6494: 656 /* Make up the required permissions for user code */
650 /* Make up the required permissions */
651#ifdef CONFIG_PTE_64BIT 657#ifdef CONFIG_PTE_64BIT
652 li r13,_PAGE_PRESENT | _PAGE_EXEC 658 li r13,_PAGE_PRESENT | _PAGE_BAP_UX
653 oris r13,r13,_PAGE_ACCESSED@h 659 oris r13,r13,_PAGE_ACCESSED@h
654#else 660#else
655 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC 661 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
656#endif 662#endif
657 663
6644:
658 FIND_PTE 665 FIND_PTE
659 andc. r13,r13,r11 /* Check permission */ 666 andc. r13,r13,r11 /* Check permission */
660 667
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index ec94f906ea43..d5839179ec77 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -43,20 +43,9 @@
43#define DBG(...) 43#define DBG(...)
44 44
45static int novmerge; 45static int novmerge;
46static int protect4gb = 1;
47 46
48static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int); 47static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
49 48
50static int __init setup_protect4gb(char *str)
51{
52 if (strcmp(str, "on") == 0)
53 protect4gb = 1;
54 else if (strcmp(str, "off") == 0)
55 protect4gb = 0;
56
57 return 1;
58}
59
60static int __init setup_iommu(char *str) 49static int __init setup_iommu(char *str)
61{ 50{
62 if (!strcmp(str, "novmerge")) 51 if (!strcmp(str, "novmerge"))
@@ -66,7 +55,6 @@ static int __init setup_iommu(char *str)
66 return 1; 55 return 1;
67} 56}
68 57
69__setup("protect4gb=", setup_protect4gb);
70__setup("iommu=", setup_iommu); 58__setup("iommu=", setup_iommu);
71 59
72static unsigned long iommu_range_alloc(struct device *dev, 60static unsigned long iommu_range_alloc(struct device *dev,
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 64f6f2031c22..30817d9b20cb 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -53,7 +53,6 @@
53#include <linux/bootmem.h> 53#include <linux/bootmem.h>
54#include <linux/pci.h> 54#include <linux/pci.h>
55#include <linux/debugfs.h> 55#include <linux/debugfs.h>
56#include <linux/perf_event.h>
57 56
58#include <asm/uaccess.h> 57#include <asm/uaccess.h>
59#include <asm/system.h> 58#include <asm/system.h>
@@ -145,11 +144,6 @@ notrace void raw_local_irq_restore(unsigned long en)
145 } 144 }
146#endif /* CONFIG_PPC_STD_MMU_64 */ 145#endif /* CONFIG_PPC_STD_MMU_64 */
147 146
148 if (test_perf_event_pending()) {
149 clear_perf_event_pending();
150 perf_event_do_pending();
151 }
152
153 /* 147 /*
154 * if (get_paca()->hard_enabled) return; 148 * if (get_paca()->hard_enabled) return;
155 * But again we need to take care that gcc gets hard_enabled directly 149 * But again we need to take care that gcc gets hard_enabled directly
@@ -290,30 +284,33 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
290} 284}
291 285
292#ifdef CONFIG_HOTPLUG_CPU 286#ifdef CONFIG_HOTPLUG_CPU
293void fixup_irqs(cpumask_t map) 287void fixup_irqs(const struct cpumask *map)
294{ 288{
295 struct irq_desc *desc; 289 struct irq_desc *desc;
296 unsigned int irq; 290 unsigned int irq;
297 static int warned; 291 static int warned;
292 cpumask_var_t mask;
298 293
299 for_each_irq(irq) { 294 alloc_cpumask_var(&mask, GFP_KERNEL);
300 cpumask_t mask;
301 295
296 for_each_irq(irq) {
302 desc = irq_to_desc(irq); 297 desc = irq_to_desc(irq);
303 if (desc && desc->status & IRQ_PER_CPU) 298 if (desc && desc->status & IRQ_PER_CPU)
304 continue; 299 continue;
305 300
306 cpumask_and(&mask, desc->affinity, &map); 301 cpumask_and(mask, desc->affinity, map);
307 if (any_online_cpu(mask) == NR_CPUS) { 302 if (cpumask_any(mask) >= nr_cpu_ids) {
308 printk("Breaking affinity for irq %i\n", irq); 303 printk("Breaking affinity for irq %i\n", irq);
309 mask = map; 304 cpumask_copy(mask, map);
310 } 305 }
311 if (desc->chip->set_affinity) 306 if (desc->chip->set_affinity)
312 desc->chip->set_affinity(irq, &mask); 307 desc->chip->set_affinity(irq, mask);
313 else if (desc->action && !(warned++)) 308 else if (desc->action && !(warned++))
314 printk("Cannot set affinity for irq %i\n", irq); 309 printk("Cannot set affinity for irq %i\n", irq);
315 } 310 }
316 311
312 free_cpumask_var(mask);
313
317 local_irq_enable(); 314 local_irq_enable();
318 mdelay(1); 315 mdelay(1);
319 local_irq_disable(); 316 local_irq_disable();
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 41bada0298c8..82a7b228c81a 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -20,6 +20,7 @@
20#include <linux/smp.h> 20#include <linux/smp.h>
21#include <linux/signal.h> 21#include <linux/signal.h>
22#include <linux/ptrace.h> 22#include <linux/ptrace.h>
23#include <linux/kdebug.h>
23#include <asm/current.h> 24#include <asm/current.h>
24#include <asm/processor.h> 25#include <asm/processor.h>
25#include <asm/machdep.h> 26#include <asm/machdep.h>
@@ -115,7 +116,8 @@ void kgdb_roundup_cpus(unsigned long flags)
115/* KGDB functions to use existing PowerPC64 hooks. */ 116/* KGDB functions to use existing PowerPC64 hooks. */
116static int kgdb_debugger(struct pt_regs *regs) 117static int kgdb_debugger(struct pt_regs *regs)
117{ 118{
118 return kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs); 119 return !kgdb_handle_exception(1, computeSignal(TRAP(regs)),
120 DIE_OOPS, regs);
119} 121}
120 122
121static int kgdb_handle_breakpoint(struct pt_regs *regs) 123static int kgdb_handle_breakpoint(struct pt_regs *regs)
@@ -123,7 +125,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
123 if (user_mode(regs)) 125 if (user_mode(regs))
124 return 0; 126 return 0;
125 127
126 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0) 128 if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0)
127 return 0; 129 return 0;
128 130
129 if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr)) 131 if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
@@ -309,6 +311,11 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
309 (unsigned long)(((void *)gdb_regs) + NUMREGBYTES)); 311 (unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
310} 312}
311 313
314void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
315{
316 regs->nip = pc;
317}
318
312/* 319/*
313 * This function does PowerPC specific procesing for interfacing to gdb. 320 * This function does PowerPC specific procesing for interfacing to gdb.
314 */ 321 */
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index b36f074524ad..c533525ca56a 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -114,6 +114,9 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
114#ifdef CONFIG_PPC_ADV_DEBUG_REGS 114#ifdef CONFIG_PPC_ADV_DEBUG_REGS
115 regs->msr &= ~MSR_CE; 115 regs->msr &= ~MSR_CE;
116 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM); 116 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
117#ifdef CONFIG_PPC_47x
118 isync();
119#endif
117#endif 120#endif
118 121
119 /* 122 /*
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index c2c70e1b32cd..50362b6ef6e9 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -38,7 +38,7 @@
38#include <asm/vio.h> 38#include <asm/vio.h>
39#include <asm/mmu.h> 39#include <asm/mmu.h>
40 40
41#define MODULE_VERS "1.8" 41#define MODULE_VERS "1.9"
42#define MODULE_NAME "lparcfg" 42#define MODULE_NAME "lparcfg"
43 43
44/* #define LPARCFG_DEBUG */ 44/* #define LPARCFG_DEBUG */
@@ -487,6 +487,14 @@ static void splpar_dispatch_data(struct seq_file *m)
487 seq_printf(m, "dispatch_dispersions=%lu\n", dispatch_dispersions); 487 seq_printf(m, "dispatch_dispersions=%lu\n", dispatch_dispersions);
488} 488}
489 489
490static void parse_em_data(struct seq_file *m)
491{
492 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
493
494 if (plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
495 seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]);
496}
497
490static int pseries_lparcfg_data(struct seq_file *m, void *v) 498static int pseries_lparcfg_data(struct seq_file *m, void *v)
491{ 499{
492 int partition_potential_processors; 500 int partition_potential_processors;
@@ -541,6 +549,8 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
541 549
542 seq_printf(m, "slb_size=%d\n", mmu_slb_size); 550 seq_printf(m, "slb_size=%d\n", mmu_slb_size);
543 551
552 parse_em_data(m);
553
544 return 0; 554 return 0;
545} 555}
546 556
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 040bd1de8d99..26f9900f773c 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -155,33 +155,38 @@ void kexec_copy_flush(struct kimage *image)
155 155
156#ifdef CONFIG_SMP 156#ifdef CONFIG_SMP
157 157
158/* FIXME: we should schedule this function to be called on all cpus based 158static int kexec_all_irq_disabled = 0;
159 * on calling the interrupts, but we would like to call it off irq level 159
160 * so that the interrupt controller is clean.
161 */
162static void kexec_smp_down(void *arg) 160static void kexec_smp_down(void *arg)
163{ 161{
162 local_irq_disable();
163 mb(); /* make sure our irqs are disabled before we say they are */
164 get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
165 while(kexec_all_irq_disabled == 0)
166 cpu_relax();
167 mb(); /* make sure all irqs are disabled before this */
168 /*
169 * Now every CPU has IRQs off, we can clear out any pending
170 * IPIs and be sure that no more will come in after this.
171 */
164 if (ppc_md.kexec_cpu_down) 172 if (ppc_md.kexec_cpu_down)
165 ppc_md.kexec_cpu_down(0, 1); 173 ppc_md.kexec_cpu_down(0, 1);
166 174
167 local_irq_disable();
168 kexec_smp_wait(); 175 kexec_smp_wait();
169 /* NOTREACHED */ 176 /* NOTREACHED */
170} 177}
171 178
172static void kexec_prepare_cpus(void) 179static void kexec_prepare_cpus_wait(int wait_state)
173{ 180{
174 int my_cpu, i, notified=-1; 181 int my_cpu, i, notified=-1;
175 182
176 smp_call_function(kexec_smp_down, NULL, /* wait */0);
177 my_cpu = get_cpu(); 183 my_cpu = get_cpu();
178 184 /* Make sure each CPU has atleast made it to the state we need */
179 /* check the others cpus are now down (via paca hw cpu id == -1) */
180 for (i=0; i < NR_CPUS; i++) { 185 for (i=0; i < NR_CPUS; i++) {
181 if (i == my_cpu) 186 if (i == my_cpu)
182 continue; 187 continue;
183 188
184 while (paca[i].hw_cpu_id != -1) { 189 while (paca[i].kexec_state < wait_state) {
185 barrier(); 190 barrier();
186 if (!cpu_possible(i)) { 191 if (!cpu_possible(i)) {
187 printk("kexec: cpu %d hw_cpu_id %d is not" 192 printk("kexec: cpu %d hw_cpu_id %d is not"
@@ -201,20 +206,35 @@ static void kexec_prepare_cpus(void)
201 } 206 }
202 if (i != notified) { 207 if (i != notified) {
203 printk( "kexec: waiting for cpu %d (physical" 208 printk( "kexec: waiting for cpu %d (physical"
204 " %d) to go down\n", 209 " %d) to enter %i state\n",
205 i, paca[i].hw_cpu_id); 210 i, paca[i].hw_cpu_id, wait_state);
206 notified = i; 211 notified = i;
207 } 212 }
208 } 213 }
209 } 214 }
215 mb();
216}
217
218static void kexec_prepare_cpus(void)
219{
220
221 smp_call_function(kexec_smp_down, NULL, /* wait */0);
222 local_irq_disable();
223 mb(); /* make sure IRQs are disabled before we say they are */
224 get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
225
226 kexec_prepare_cpus_wait(KEXEC_STATE_IRQS_OFF);
227 /* we are sure every CPU has IRQs off at this point */
228 kexec_all_irq_disabled = 1;
210 229
211 /* after we tell the others to go down */ 230 /* after we tell the others to go down */
212 if (ppc_md.kexec_cpu_down) 231 if (ppc_md.kexec_cpu_down)
213 ppc_md.kexec_cpu_down(0, 0); 232 ppc_md.kexec_cpu_down(0, 0);
214 233
215 put_cpu(); 234 /* Before removing MMU mapings make sure all CPUs have entered real mode */
235 kexec_prepare_cpus_wait(KEXEC_STATE_REAL_MODE);
216 236
217 local_irq_disable(); 237 put_cpu();
218} 238}
219 239
220#else /* ! SMP */ 240#else /* ! SMP */
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 8649f536f8df..8043d1b73cf0 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -441,7 +441,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
441 addi r3,r3,L1_CACHE_BYTES 441 addi r3,r3,L1_CACHE_BYTES
442 bdnz 0b 442 bdnz 0b
443 sync 443 sync
444#ifndef CONFIG_44x 444#ifdef CONFIG_44x
445 /* We don't flush the icache on 44x. Those have a virtual icache 445 /* We don't flush the icache on 44x. Those have a virtual icache
446 * and we don't have access to the virtual address here (it's 446 * and we don't have access to the virtual address here (it's
447 * not the page vaddr but where it's mapped in user space). The 447 * not the page vaddr but where it's mapped in user space). The
@@ -449,15 +449,19 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
449 * a change in the address space occurs, before returning to 449 * a change in the address space occurs, before returning to
450 * user space 450 * user space
451 */ 451 */
452BEGIN_MMU_FTR_SECTION
453 blr
454END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_44x)
455#endif /* CONFIG_44x */
452 mtctr r4 456 mtctr r4
4531: icbi 0,r6 4571: icbi 0,r6
454 addi r6,r6,L1_CACHE_BYTES 458 addi r6,r6,L1_CACHE_BYTES
455 bdnz 1b 459 bdnz 1b
456 sync 460 sync
457 isync 461 isync
458#endif /* CONFIG_44x */
459 blr 462 blr
460 463
464#ifndef CONFIG_BOOKE
461/* 465/*
462 * Flush a particular page from the data cache to RAM, identified 466 * Flush a particular page from the data cache to RAM, identified
463 * by its physical address. We turn off the MMU so we can just use 467 * by its physical address. We turn off the MMU so we can just use
@@ -490,6 +494,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
490 mtmsr r10 /* restore DR */ 494 mtmsr r10 /* restore DR */
491 isync 495 isync
492 blr 496 blr
497#endif /* CONFIG_BOOKE */
493 498
494/* 499/*
495 * Clear pages using the dcbz instruction, which doesn't cause any 500 * Clear pages using the dcbz instruction, which doesn't cause any
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index a5cf9c1356a6..a2b18dffa03e 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -24,6 +24,7 @@
24#include <asm/asm-offsets.h> 24#include <asm/asm-offsets.h>
25#include <asm/cputable.h> 25#include <asm/cputable.h>
26#include <asm/thread_info.h> 26#include <asm/thread_info.h>
27#include <asm/kexec.h>
27 28
28 .text 29 .text
29 30
@@ -471,6 +472,10 @@ _GLOBAL(kexec_wait)
4711: mflr r5 4721: mflr r5
472 addi r5,r5,kexec_flag-1b 473 addi r5,r5,kexec_flag-1b
473 474
475 li r4,KEXEC_STATE_REAL_MODE
476 stb r4,PACAKEXECSTATE(r13)
477 SYNC
478
47499: HMT_LOW 47999: HMT_LOW
475#ifdef CONFIG_KEXEC /* use no memory without kexec */ 480#ifdef CONFIG_KEXEC /* use no memory without kexec */
476 lwz r4,0(r5) 481 lwz r4,0(r5)
@@ -494,14 +499,11 @@ kexec_flag:
494 * note: this is a terminal routine, it does not save lr 499 * note: this is a terminal routine, it does not save lr
495 * 500 *
496 * get phys id from paca 501 * get phys id from paca
497 * set paca id to -1 to say we got here
498 * switch to real mode 502 * switch to real mode
499 * join other cpus in kexec_wait(phys_id) 503 * join other cpus in kexec_wait(phys_id)
500 */ 504 */
501_GLOBAL(kexec_smp_wait) 505_GLOBAL(kexec_smp_wait)
502 lhz r3,PACAHWCPUID(r13) 506 lhz r3,PACAHWCPUID(r13)
503 li r4,-1
504 sth r4,PACAHWCPUID(r13) /* let others know we left */
505 bl real_mode 507 bl real_mode
506 b .kexec_wait 508 b .kexec_wait
507 509
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 0c40c6f476fe..f88acf0218db 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -18,6 +18,7 @@
18#include <asm/pgtable.h> 18#include <asm/pgtable.h>
19#include <asm/iseries/lpar_map.h> 19#include <asm/iseries/lpar_map.h>
20#include <asm/iseries/hv_types.h> 20#include <asm/iseries/hv_types.h>
21#include <asm/kexec.h>
21 22
22/* This symbol is provided by the linker - let it fill in the paca 23/* This symbol is provided by the linker - let it fill in the paca
23 * field correctly */ 24 * field correctly */
@@ -97,6 +98,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
97 new_paca->kernelbase = (unsigned long) _stext; 98 new_paca->kernelbase = (unsigned long) _stext;
98 new_paca->kernel_msr = MSR_KERNEL; 99 new_paca->kernel_msr = MSR_KERNEL;
99 new_paca->hw_cpu_id = 0xffff; 100 new_paca->hw_cpu_id = 0xffff;
101 new_paca->kexec_state = KEXEC_STATE_NONE;
100 new_paca->__current = &init_task; 102 new_paca->__current = &init_task;
101#ifdef CONFIG_PPC_STD_MMU_64 103#ifdef CONFIG_PPC_STD_MMU_64
102 new_paca->slb_shadow_ptr = &slb_shadow[cpu]; 104 new_paca->slb_shadow_ptr = &slb_shadow[cpu];
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index cd11d5ca80df..6ddb795f83e8 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -310,6 +310,8 @@ static void __devinit __of_scan_bus(struct device_node *node,
310 /* Scan direct children */ 310 /* Scan direct children */
311 for_each_child_of_node(node, child) { 311 for_each_child_of_node(node, child) {
312 pr_debug(" * %s\n", child->full_name); 312 pr_debug(" * %s\n", child->full_name);
313 if (!of_device_is_available(child))
314 continue;
313 reg = of_get_property(child, "reg", &reglen); 315 reg = of_get_property(child, "reg", &reglen);
314 if (reg == NULL || reglen < 20) 316 if (reg == NULL || reglen < 20)
315 continue; 317 continue;
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index 08460a2e9f41..43b83c35cf54 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -35,6 +35,9 @@ struct cpu_hw_events {
35 u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; 35 u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
36 unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; 36 unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
37 unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; 37 unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
38
39 unsigned int group_flag;
40 int n_txn_start;
38}; 41};
39DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); 42DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
40 43
@@ -718,66 +721,6 @@ static int collect_events(struct perf_event *group, int max_count,
718 return n; 721 return n;
719} 722}
720 723
721static void event_sched_in(struct perf_event *event)
722{
723 event->state = PERF_EVENT_STATE_ACTIVE;
724 event->oncpu = smp_processor_id();
725 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
726 if (is_software_event(event))
727 event->pmu->enable(event);
728}
729
730/*
731 * Called to enable a whole group of events.
732 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
733 * Assumes the caller has disabled interrupts and has
734 * frozen the PMU with hw_perf_save_disable.
735 */
736int hw_perf_group_sched_in(struct perf_event *group_leader,
737 struct perf_cpu_context *cpuctx,
738 struct perf_event_context *ctx)
739{
740 struct cpu_hw_events *cpuhw;
741 long i, n, n0;
742 struct perf_event *sub;
743
744 if (!ppmu)
745 return 0;
746 cpuhw = &__get_cpu_var(cpu_hw_events);
747 n0 = cpuhw->n_events;
748 n = collect_events(group_leader, ppmu->n_counter - n0,
749 &cpuhw->event[n0], &cpuhw->events[n0],
750 &cpuhw->flags[n0]);
751 if (n < 0)
752 return -EAGAIN;
753 if (check_excludes(cpuhw->event, cpuhw->flags, n0, n))
754 return -EAGAIN;
755 i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n + n0);
756 if (i < 0)
757 return -EAGAIN;
758 cpuhw->n_events = n0 + n;
759 cpuhw->n_added += n;
760
761 /*
762 * OK, this group can go on; update event states etc.,
763 * and enable any software events
764 */
765 for (i = n0; i < n0 + n; ++i)
766 cpuhw->event[i]->hw.config = cpuhw->events[i];
767 cpuctx->active_oncpu += n;
768 n = 1;
769 event_sched_in(group_leader);
770 list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
771 if (sub->state != PERF_EVENT_STATE_OFF) {
772 event_sched_in(sub);
773 ++n;
774 }
775 }
776 ctx->nr_active += n;
777
778 return 1;
779}
780
781/* 724/*
782 * Add a event to the PMU. 725 * Add a event to the PMU.
783 * If all events are not already frozen, then we disable and 726 * If all events are not already frozen, then we disable and
@@ -805,12 +748,22 @@ static int power_pmu_enable(struct perf_event *event)
805 cpuhw->event[n0] = event; 748 cpuhw->event[n0] = event;
806 cpuhw->events[n0] = event->hw.config; 749 cpuhw->events[n0] = event->hw.config;
807 cpuhw->flags[n0] = event->hw.event_base; 750 cpuhw->flags[n0] = event->hw.event_base;
751
752 /*
753 * If group events scheduling transaction was started,
754 * skip the schedulability test here, it will be peformed
755 * at commit time(->commit_txn) as a whole
756 */
757 if (cpuhw->group_flag & PERF_EVENT_TXN_STARTED)
758 goto nocheck;
759
808 if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) 760 if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
809 goto out; 761 goto out;
810 if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1)) 762 if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1))
811 goto out; 763 goto out;
812
813 event->hw.config = cpuhw->events[n0]; 764 event->hw.config = cpuhw->events[n0];
765
766nocheck:
814 ++cpuhw->n_events; 767 ++cpuhw->n_events;
815 ++cpuhw->n_added; 768 ++cpuhw->n_added;
816 769
@@ -896,11 +849,65 @@ static void power_pmu_unthrottle(struct perf_event *event)
896 local_irq_restore(flags); 849 local_irq_restore(flags);
897} 850}
898 851
852/*
853 * Start group events scheduling transaction
854 * Set the flag to make pmu::enable() not perform the
855 * schedulability test, it will be performed at commit time
856 */
857void power_pmu_start_txn(const struct pmu *pmu)
858{
859 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
860
861 cpuhw->group_flag |= PERF_EVENT_TXN_STARTED;
862 cpuhw->n_txn_start = cpuhw->n_events;
863}
864
865/*
866 * Stop group events scheduling transaction
867 * Clear the flag and pmu::enable() will perform the
868 * schedulability test.
869 */
870void power_pmu_cancel_txn(const struct pmu *pmu)
871{
872 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
873
874 cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED;
875}
876
877/*
878 * Commit group events scheduling transaction
879 * Perform the group schedulability test as a whole
880 * Return 0 if success
881 */
882int power_pmu_commit_txn(const struct pmu *pmu)
883{
884 struct cpu_hw_events *cpuhw;
885 long i, n;
886
887 if (!ppmu)
888 return -EAGAIN;
889 cpuhw = &__get_cpu_var(cpu_hw_events);
890 n = cpuhw->n_events;
891 if (check_excludes(cpuhw->event, cpuhw->flags, 0, n))
892 return -EAGAIN;
893 i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n);
894 if (i < 0)
895 return -EAGAIN;
896
897 for (i = cpuhw->n_txn_start; i < n; ++i)
898 cpuhw->event[i]->hw.config = cpuhw->events[i];
899
900 return 0;
901}
902
899struct pmu power_pmu = { 903struct pmu power_pmu = {
900 .enable = power_pmu_enable, 904 .enable = power_pmu_enable,
901 .disable = power_pmu_disable, 905 .disable = power_pmu_disable,
902 .read = power_pmu_read, 906 .read = power_pmu_read,
903 .unthrottle = power_pmu_unthrottle, 907 .unthrottle = power_pmu_unthrottle,
908 .start_txn = power_pmu_start_txn,
909 .cancel_txn = power_pmu_cancel_txn,
910 .commit_txn = power_pmu_commit_txn,
904}; 911};
905 912
906/* 913/*
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index ab3e392ac63c..bc9f39d2598b 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -101,6 +101,10 @@ EXPORT_SYMBOL(pci_dram_offset);
101EXPORT_SYMBOL(start_thread); 101EXPORT_SYMBOL(start_thread);
102EXPORT_SYMBOL(kernel_thread); 102EXPORT_SYMBOL(kernel_thread);
103 103
104#ifndef CONFIG_BOOKE
105EXPORT_SYMBOL_GPL(cvt_df);
106EXPORT_SYMBOL_GPL(cvt_fd);
107#endif
104EXPORT_SYMBOL(giveup_fpu); 108EXPORT_SYMBOL(giveup_fpu);
105#ifdef CONFIG_ALTIVEC 109#ifdef CONFIG_ALTIVEC
106EXPORT_SYMBOL(giveup_altivec); 110EXPORT_SYMBOL(giveup_altivec);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index e4d71ced97ef..9d255b4f0a0e 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -371,6 +371,9 @@ int set_dabr(unsigned long dabr)
371 /* XXX should we have a CPU_FTR_HAS_DABR ? */ 371 /* XXX should we have a CPU_FTR_HAS_DABR ? */
372#ifdef CONFIG_PPC_ADV_DEBUG_REGS 372#ifdef CONFIG_PPC_ADV_DEBUG_REGS
373 mtspr(SPRN_DAC1, dabr); 373 mtspr(SPRN_DAC1, dabr);
374#ifdef CONFIG_PPC_47x
375 isync();
376#endif
374#elif defined(CONFIG_PPC_BOOK3S) 377#elif defined(CONFIG_PPC_BOOK3S)
375 mtspr(SPRN_DABR, dabr); 378 mtspr(SPRN_DABR, dabr);
376#endif 379#endif
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 5f306c4946e5..97d4bd9442d3 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -653,6 +653,7 @@ static void __init early_cmdline_parse(void)
653#else 653#else
654#define OV5_CMO 0x00 654#define OV5_CMO 0x00
655#endif 655#endif
656#define OV5_TYPE1_AFFINITY 0x80 /* Type 1 NUMA affinity */
656 657
657/* Option Vector 6: IBM PAPR hints */ 658/* Option Vector 6: IBM PAPR hints */
658#define OV6_LINUX 0x02 /* Linux is our OS */ 659#define OV6_LINUX 0x02 /* Linux is our OS */
@@ -706,7 +707,7 @@ static unsigned char ibm_architecture_vec[] = {
706 OV5_DONATE_DEDICATE_CPU | OV5_MSI, 707 OV5_DONATE_DEDICATE_CPU | OV5_MSI,
707 0, 708 0,
708 OV5_CMO, 709 OV5_CMO,
709 0, 710 OV5_TYPE1_AFFINITY,
710 0, 711 0,
711 0, 712 0,
712 0, 713 0,
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index ed2cfe17d25e..7a0c0199ea28 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -39,6 +39,109 @@
39#include <asm/system.h> 39#include <asm/system.h>
40 40
41/* 41/*
42 * The parameter save area on the stack is used to store arguments being passed
43 * to callee function and is located at fixed offset from stack pointer.
44 */
45#ifdef CONFIG_PPC32
46#define PARAMETER_SAVE_AREA_OFFSET 24 /* bytes */
47#else /* CONFIG_PPC32 */
48#define PARAMETER_SAVE_AREA_OFFSET 48 /* bytes */
49#endif
50
51struct pt_regs_offset {
52 const char *name;
53 int offset;
54};
55
56#define STR(s) #s /* convert to string */
57#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
58#define GPR_OFFSET_NAME(num) \
59 {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
60#define REG_OFFSET_END {.name = NULL, .offset = 0}
61
62static const struct pt_regs_offset regoffset_table[] = {
63 GPR_OFFSET_NAME(0),
64 GPR_OFFSET_NAME(1),
65 GPR_OFFSET_NAME(2),
66 GPR_OFFSET_NAME(3),
67 GPR_OFFSET_NAME(4),
68 GPR_OFFSET_NAME(5),
69 GPR_OFFSET_NAME(6),
70 GPR_OFFSET_NAME(7),
71 GPR_OFFSET_NAME(8),
72 GPR_OFFSET_NAME(9),
73 GPR_OFFSET_NAME(10),
74 GPR_OFFSET_NAME(11),
75 GPR_OFFSET_NAME(12),
76 GPR_OFFSET_NAME(13),
77 GPR_OFFSET_NAME(14),
78 GPR_OFFSET_NAME(15),
79 GPR_OFFSET_NAME(16),
80 GPR_OFFSET_NAME(17),
81 GPR_OFFSET_NAME(18),
82 GPR_OFFSET_NAME(19),
83 GPR_OFFSET_NAME(20),
84 GPR_OFFSET_NAME(21),
85 GPR_OFFSET_NAME(22),
86 GPR_OFFSET_NAME(23),
87 GPR_OFFSET_NAME(24),
88 GPR_OFFSET_NAME(25),
89 GPR_OFFSET_NAME(26),
90 GPR_OFFSET_NAME(27),
91 GPR_OFFSET_NAME(28),
92 GPR_OFFSET_NAME(29),
93 GPR_OFFSET_NAME(30),
94 GPR_OFFSET_NAME(31),
95 REG_OFFSET_NAME(nip),
96 REG_OFFSET_NAME(msr),
97 REG_OFFSET_NAME(ctr),
98 REG_OFFSET_NAME(link),
99 REG_OFFSET_NAME(xer),
100 REG_OFFSET_NAME(ccr),
101#ifdef CONFIG_PPC64
102 REG_OFFSET_NAME(softe),
103#else
104 REG_OFFSET_NAME(mq),
105#endif
106 REG_OFFSET_NAME(trap),
107 REG_OFFSET_NAME(dar),
108 REG_OFFSET_NAME(dsisr),
109 REG_OFFSET_END,
110};
111
112/**
113 * regs_query_register_offset() - query register offset from its name
114 * @name: the name of a register
115 *
116 * regs_query_register_offset() returns the offset of a register in struct
117 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
118 */
119int regs_query_register_offset(const char *name)
120{
121 const struct pt_regs_offset *roff;
122 for (roff = regoffset_table; roff->name != NULL; roff++)
123 if (!strcmp(roff->name, name))
124 return roff->offset;
125 return -EINVAL;
126}
127
128/**
129 * regs_query_register_name() - query register name from its offset
130 * @offset: the offset of a register in struct pt_regs.
131 *
132 * regs_query_register_name() returns the name of a register from its
133 * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
134 */
135const char *regs_query_register_name(unsigned int offset)
136{
137 const struct pt_regs_offset *roff;
138 for (roff = regoffset_table; roff->name != NULL; roff++)
139 if (roff->offset == offset)
140 return roff->name;
141 return NULL;
142}
143
144/*
42 * does not yet catch signals sent when the child dies. 145 * does not yet catch signals sent when the child dies.
43 * in exit.c or in signal.c. 146 * in exit.c or in signal.c.
44 */ 147 */
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 74367841615a..0e1ec6f746f6 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -691,10 +691,14 @@ void rtas_os_term(char *str)
691{ 691{
692 int status; 692 int status;
693 693
694 if (panic_timeout) 694 /*
695 return; 695 * Firmware with the ibm,extended-os-term property is guaranteed
696 696 * to always return from an ibm,os-term call. Earlier versions without
697 if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term")) 697 * this property may terminate the partition which we want to avoid
698 * since it interferes with panic_timeout.
699 */
700 if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term") ||
701 RTAS_UNKNOWN_SERVICE == rtas_token("ibm,extended-os-term"))
698 return; 702 return;
699 703
700 snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str); 704 snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str);
@@ -705,8 +709,7 @@ void rtas_os_term(char *str)
705 } while (rtas_busy_delay(status)); 709 } while (rtas_busy_delay(status));
706 710
707 if (status != 0) 711 if (status != 0)
708 printk(KERN_EMERG "ibm,os-term call failed %d\n", 712 printk(KERN_EMERG "ibm,os-term call failed %d\n", status);
709 status);
710} 713}
711 714
712static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE; 715static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE;
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index 4190eae7850a..638883e23e3a 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -411,9 +411,9 @@ static void rtas_event_scan(struct work_struct *w)
411 411
412 get_online_cpus(); 412 get_online_cpus();
413 413
414 cpu = next_cpu(smp_processor_id(), cpu_online_map); 414 cpu = cpumask_next(smp_processor_id(), cpu_online_mask);
415 if (cpu == NR_CPUS) { 415 if (cpu >= nr_cpu_ids) {
416 cpu = first_cpu(cpu_online_map); 416 cpu = cpumask_first(cpu_online_mask);
417 417
418 if (first_pass) { 418 if (first_pass) {
419 first_pass = 0; 419 first_pass = 0;
@@ -466,8 +466,8 @@ static void start_event_scan(void)
466 /* Retreive errors from nvram if any */ 466 /* Retreive errors from nvram if any */
467 retreive_nvram_error_log(); 467 retreive_nvram_error_log();
468 468
469 schedule_delayed_work_on(first_cpu(cpu_online_map), &event_scan_work, 469 schedule_delayed_work_on(cpumask_first(cpu_online_mask),
470 event_scan_delay); 470 &event_scan_work, event_scan_delay);
471} 471}
472 472
473static int __init rtas_init(void) 473static int __init rtas_init(void)
@@ -490,6 +490,12 @@ static int __init rtas_init(void)
490 return -ENODEV; 490 return -ENODEV;
491 } 491 }
492 492
493 if (!rtas_event_scan_rate) {
494 /* Broken firmware: take a rate of zero to mean don't scan */
495 printk(KERN_DEBUG "rtasd: scan rate is 0, not scanning\n");
496 return 0;
497 }
498
493 /* Make room for the sequence number */ 499 /* Make room for the sequence number */
494 rtas_error_log_max = rtas_get_error_log_max(); 500 rtas_error_log_max = rtas_get_error_log_max();
495 rtas_error_log_buffer_max = rtas_error_log_max + sizeof(int); 501 rtas_error_log_buffer_max = rtas_error_log_max + sizeof(int);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 48f0a008b20b..5e4d852f640c 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -161,45 +161,44 @@ extern u32 cpu_temp_both(unsigned long cpu);
161DEFINE_PER_CPU(unsigned int, cpu_pvr); 161DEFINE_PER_CPU(unsigned int, cpu_pvr);
162#endif 162#endif
163 163
164static int show_cpuinfo(struct seq_file *m, void *v) 164static void show_cpuinfo_summary(struct seq_file *m)
165{ 165{
166 unsigned long cpu_id = (unsigned long)v - 1; 166 struct device_node *root;
167 unsigned int pvr; 167 const char *model = NULL;
168 unsigned short maj;
169 unsigned short min;
170
171 if (cpu_id == NR_CPUS) {
172 struct device_node *root;
173 const char *model = NULL;
174#if defined(CONFIG_SMP) && defined(CONFIG_PPC32) 168#if defined(CONFIG_SMP) && defined(CONFIG_PPC32)
175 unsigned long bogosum = 0; 169 unsigned long bogosum = 0;
176 int i; 170 int i;
177 for_each_online_cpu(i) 171 for_each_online_cpu(i)
178 bogosum += loops_per_jiffy; 172 bogosum += loops_per_jiffy;
179 seq_printf(m, "total bogomips\t: %lu.%02lu\n", 173 seq_printf(m, "total bogomips\t: %lu.%02lu\n",
180 bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); 174 bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
181#endif /* CONFIG_SMP && CONFIG_PPC32 */ 175#endif /* CONFIG_SMP && CONFIG_PPC32 */
182 seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq); 176 seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
183 if (ppc_md.name) 177 if (ppc_md.name)
184 seq_printf(m, "platform\t: %s\n", ppc_md.name); 178 seq_printf(m, "platform\t: %s\n", ppc_md.name);
185 root = of_find_node_by_path("/"); 179 root = of_find_node_by_path("/");
186 if (root) 180 if (root)
187 model = of_get_property(root, "model", NULL); 181 model = of_get_property(root, "model", NULL);
188 if (model) 182 if (model)
189 seq_printf(m, "model\t\t: %s\n", model); 183 seq_printf(m, "model\t\t: %s\n", model);
190 of_node_put(root); 184 of_node_put(root);
191 185
192 if (ppc_md.show_cpuinfo != NULL) 186 if (ppc_md.show_cpuinfo != NULL)
193 ppc_md.show_cpuinfo(m); 187 ppc_md.show_cpuinfo(m);
194 188
195#ifdef CONFIG_PPC32 189#ifdef CONFIG_PPC32
196 /* Display the amount of memory */ 190 /* Display the amount of memory */
197 seq_printf(m, "Memory\t\t: %d MB\n", 191 seq_printf(m, "Memory\t\t: %d MB\n",
198 (unsigned int)(total_memory / (1024 * 1024))); 192 (unsigned int)(total_memory / (1024 * 1024)));
199#endif 193#endif
194}
200 195
201 return 0; 196static int show_cpuinfo(struct seq_file *m, void *v)
202 } 197{
198 unsigned long cpu_id = (unsigned long)v - 1;
199 unsigned int pvr;
200 unsigned short maj;
201 unsigned short min;
203 202
204 /* We only show online cpus: disable preempt (overzealous, I 203 /* We only show online cpus: disable preempt (overzealous, I
205 * knew) to prevent cpu going down. */ 204 * knew) to prevent cpu going down. */
@@ -308,19 +307,28 @@ static int show_cpuinfo(struct seq_file *m, void *v)
308#endif 307#endif
309 308
310 preempt_enable(); 309 preempt_enable();
310
311 /* If this is the last cpu, print the summary */
312 if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids)
313 show_cpuinfo_summary(m);
314
311 return 0; 315 return 0;
312} 316}
313 317
314static void *c_start(struct seq_file *m, loff_t *pos) 318static void *c_start(struct seq_file *m, loff_t *pos)
315{ 319{
316 unsigned long i = *pos; 320 if (*pos == 0) /* just in case, cpu 0 is not the first */
317 321 *pos = cpumask_first(cpu_online_mask);
318 return i <= NR_CPUS ? (void *)(i + 1) : NULL; 322 else
323 *pos = cpumask_next(*pos - 1, cpu_online_mask);
324 if ((*pos) < nr_cpu_ids)
325 return (void *)(unsigned long)(*pos + 1);
326 return NULL;
319} 327}
320 328
321static void *c_next(struct seq_file *m, void *v, loff_t *pos) 329static void *c_next(struct seq_file *m, void *v, loff_t *pos)
322{ 330{
323 ++*pos; 331 (*pos)++;
324 return c_start(m, pos); 332 return c_start(m, pos);
325} 333}
326 334
@@ -386,14 +394,14 @@ static void __init cpu_init_thread_core_maps(int tpc)
386 394
387/** 395/**
388 * setup_cpu_maps - initialize the following cpu maps: 396 * setup_cpu_maps - initialize the following cpu maps:
389 * cpu_possible_map 397 * cpu_possible_mask
390 * cpu_present_map 398 * cpu_present_mask
391 * 399 *
392 * Having the possible map set up early allows us to restrict allocations 400 * Having the possible map set up early allows us to restrict allocations
393 * of things like irqstacks to num_possible_cpus() rather than NR_CPUS. 401 * of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
394 * 402 *
395 * We do not initialize the online map here; cpus set their own bits in 403 * We do not initialize the online map here; cpus set their own bits in
396 * cpu_online_map as they come up. 404 * cpu_online_mask as they come up.
397 * 405 *
398 * This function is valid only for Open Firmware systems. finish_device_tree 406 * This function is valid only for Open Firmware systems. finish_device_tree
399 * must be called before using this. 407 * must be called before using this.
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 914389158a9b..f3fb5a79de52 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -424,9 +424,18 @@ void __init setup_system(void)
424 DBG(" <- setup_system()\n"); 424 DBG(" <- setup_system()\n");
425} 425}
426 426
427static u64 slb0_limit(void)
428{
429 if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) {
430 return 1UL << SID_SHIFT_1T;
431 }
432 return 1UL << SID_SHIFT;
433}
434
427#ifdef CONFIG_IRQSTACKS 435#ifdef CONFIG_IRQSTACKS
428static void __init irqstack_early_init(void) 436static void __init irqstack_early_init(void)
429{ 437{
438 u64 limit = slb0_limit();
430 unsigned int i; 439 unsigned int i;
431 440
432 /* 441 /*
@@ -436,10 +445,10 @@ static void __init irqstack_early_init(void)
436 for_each_possible_cpu(i) { 445 for_each_possible_cpu(i) {
437 softirq_ctx[i] = (struct thread_info *) 446 softirq_ctx[i] = (struct thread_info *)
438 __va(lmb_alloc_base(THREAD_SIZE, 447 __va(lmb_alloc_base(THREAD_SIZE,
439 THREAD_SIZE, 0x10000000)); 448 THREAD_SIZE, limit));
440 hardirq_ctx[i] = (struct thread_info *) 449 hardirq_ctx[i] = (struct thread_info *)
441 __va(lmb_alloc_base(THREAD_SIZE, 450 __va(lmb_alloc_base(THREAD_SIZE,
442 THREAD_SIZE, 0x10000000)); 451 THREAD_SIZE, limit));
443 } 452 }
444} 453}
445#else 454#else
@@ -470,7 +479,7 @@ static void __init exc_lvl_early_init(void)
470 */ 479 */
471static void __init emergency_stack_init(void) 480static void __init emergency_stack_init(void)
472{ 481{
473 unsigned long limit; 482 u64 limit;
474 unsigned int i; 483 unsigned int i;
475 484
476 /* 485 /*
@@ -482,7 +491,7 @@ static void __init emergency_stack_init(void)
482 * bringup, we need to get at them in real mode. This means they 491 * bringup, we need to get at them in real mode. This means they
483 * must also be within the RMO region. 492 * must also be within the RMO region.
484 */ 493 */
485 limit = min(0x10000000ULL, lmb.rmo_size); 494 limit = min(slb0_limit(), lmb.rmo_size);
486 495
487 for_each_possible_cpu(i) { 496 for_each_possible_cpu(i) {
488 unsigned long sp; 497 unsigned long sp;
@@ -573,12 +582,6 @@ void ppc64_boot_msg(unsigned int src, const char *msg)
573 printk("[boot]%04x %s\n", src, msg); 582 printk("[boot]%04x %s\n", src, msg);
574} 583}
575 584
576void cpu_die(void)
577{
578 if (ppc_md.cpu_die)
579 ppc_md.cpu_die();
580}
581
582#ifdef CONFIG_SMP 585#ifdef CONFIG_SMP
583#define PCPU_DYN_SIZE () 586#define PCPU_DYN_SIZE ()
584 587
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index c2ee14498077..5c196d1086d9 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -59,8 +59,8 @@
59 59
60struct thread_info *secondary_ti; 60struct thread_info *secondary_ti;
61 61
62DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; 62DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
63DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE; 63DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
64 64
65EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 65EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
66EXPORT_PER_CPU_SYMBOL(cpu_core_map); 66EXPORT_PER_CPU_SYMBOL(cpu_core_map);
@@ -271,6 +271,16 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
271 smp_store_cpu_info(boot_cpuid); 271 smp_store_cpu_info(boot_cpuid);
272 cpu_callin_map[boot_cpuid] = 1; 272 cpu_callin_map[boot_cpuid] = 1;
273 273
274 for_each_possible_cpu(cpu) {
275 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
276 GFP_KERNEL, cpu_to_node(cpu));
277 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
278 GFP_KERNEL, cpu_to_node(cpu));
279 }
280
281 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
282 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
283
274 if (smp_ops) 284 if (smp_ops)
275 if (smp_ops->probe) 285 if (smp_ops->probe)
276 max_cpus = smp_ops->probe(); 286 max_cpus = smp_ops->probe();
@@ -289,10 +299,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
289void __devinit smp_prepare_boot_cpu(void) 299void __devinit smp_prepare_boot_cpu(void)
290{ 300{
291 BUG_ON(smp_processor_id() != boot_cpuid); 301 BUG_ON(smp_processor_id() != boot_cpuid);
292
293 set_cpu_online(boot_cpuid, true);
294 cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid));
295 cpu_set(boot_cpuid, per_cpu(cpu_core_map, boot_cpuid));
296#ifdef CONFIG_PPC64 302#ifdef CONFIG_PPC64
297 paca[boot_cpuid].__current = current; 303 paca[boot_cpuid].__current = current;
298#endif 304#endif
@@ -313,7 +319,7 @@ int generic_cpu_disable(void)
313 set_cpu_online(cpu, false); 319 set_cpu_online(cpu, false);
314#ifdef CONFIG_PPC64 320#ifdef CONFIG_PPC64
315 vdso_data->processorCount--; 321 vdso_data->processorCount--;
316 fixup_irqs(cpu_online_map); 322 fixup_irqs(cpu_online_mask);
317#endif 323#endif
318 return 0; 324 return 0;
319} 325}
@@ -333,7 +339,7 @@ int generic_cpu_enable(unsigned int cpu)
333 cpu_relax(); 339 cpu_relax();
334 340
335#ifdef CONFIG_PPC64 341#ifdef CONFIG_PPC64
336 fixup_irqs(cpu_online_map); 342 fixup_irqs(cpu_online_mask);
337 /* counter the irq disable in fixup_irqs */ 343 /* counter the irq disable in fixup_irqs */
338 local_irq_enable(); 344 local_irq_enable();
339#endif 345#endif
@@ -462,7 +468,7 @@ out:
462 return id; 468 return id;
463} 469}
464 470
465/* Must be called when no change can occur to cpu_present_map, 471/* Must be called when no change can occur to cpu_present_mask,
466 * i.e. during cpu online or offline. 472 * i.e. during cpu online or offline.
467 */ 473 */
468static struct device_node *cpu_to_l2cache(int cpu) 474static struct device_node *cpu_to_l2cache(int cpu)
@@ -495,6 +501,14 @@ int __devinit start_secondary(void *unused)
495 current->active_mm = &init_mm; 501 current->active_mm = &init_mm;
496 502
497 smp_store_cpu_info(cpu); 503 smp_store_cpu_info(cpu);
504
505#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
506 /* Clear any pending timer interrupts */
507 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
508
509 /* Enable decrementer interrupt */
510 mtspr(SPRN_TCR, TCR_DIE);
511#endif
498 set_dec(tb_ticks_per_jiffy); 512 set_dec(tb_ticks_per_jiffy);
499 preempt_disable(); 513 preempt_disable();
500 cpu_callin_map[cpu] = 1; 514 cpu_callin_map[cpu] = 1;
@@ -517,15 +531,15 @@ int __devinit start_secondary(void *unused)
517 for (i = 0; i < threads_per_core; i++) { 531 for (i = 0; i < threads_per_core; i++) {
518 if (cpu_is_offline(base + i)) 532 if (cpu_is_offline(base + i))
519 continue; 533 continue;
520 cpu_set(cpu, per_cpu(cpu_sibling_map, base + i)); 534 cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
521 cpu_set(base + i, per_cpu(cpu_sibling_map, cpu)); 535 cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
522 536
523 /* cpu_core_map should be a superset of 537 /* cpu_core_map should be a superset of
524 * cpu_sibling_map even if we don't have cache 538 * cpu_sibling_map even if we don't have cache
525 * information, so update the former here, too. 539 * information, so update the former here, too.
526 */ 540 */
527 cpu_set(cpu, per_cpu(cpu_core_map, base +i)); 541 cpumask_set_cpu(cpu, cpu_core_mask(base + i));
528 cpu_set(base + i, per_cpu(cpu_core_map, cpu)); 542 cpumask_set_cpu(base + i, cpu_core_mask(cpu));
529 } 543 }
530 l2_cache = cpu_to_l2cache(cpu); 544 l2_cache = cpu_to_l2cache(cpu);
531 for_each_online_cpu(i) { 545 for_each_online_cpu(i) {
@@ -533,8 +547,8 @@ int __devinit start_secondary(void *unused)
533 if (!np) 547 if (!np)
534 continue; 548 continue;
535 if (np == l2_cache) { 549 if (np == l2_cache) {
536 cpu_set(cpu, per_cpu(cpu_core_map, i)); 550 cpumask_set_cpu(cpu, cpu_core_mask(i));
537 cpu_set(i, per_cpu(cpu_core_map, cpu)); 551 cpumask_set_cpu(i, cpu_core_mask(cpu));
538 } 552 }
539 of_node_put(np); 553 of_node_put(np);
540 } 554 }
@@ -554,19 +568,22 @@ int setup_profiling_timer(unsigned int multiplier)
554 568
555void __init smp_cpus_done(unsigned int max_cpus) 569void __init smp_cpus_done(unsigned int max_cpus)
556{ 570{
557 cpumask_t old_mask; 571 cpumask_var_t old_mask;
558 572
559 /* We want the setup_cpu() here to be called from CPU 0, but our 573 /* We want the setup_cpu() here to be called from CPU 0, but our
560 * init thread may have been "borrowed" by another CPU in the meantime 574 * init thread may have been "borrowed" by another CPU in the meantime
561 * se we pin us down to CPU 0 for a short while 575 * se we pin us down to CPU 0 for a short while
562 */ 576 */
563 old_mask = current->cpus_allowed; 577 alloc_cpumask_var(&old_mask, GFP_NOWAIT);
564 set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid)); 578 cpumask_copy(old_mask, &current->cpus_allowed);
579 set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
565 580
566 if (smp_ops && smp_ops->setup_cpu) 581 if (smp_ops && smp_ops->setup_cpu)
567 smp_ops->setup_cpu(boot_cpuid); 582 smp_ops->setup_cpu(boot_cpuid);
568 583
569 set_cpus_allowed(current, old_mask); 584 set_cpus_allowed_ptr(current, old_mask);
585
586 free_cpumask_var(old_mask);
570 587
571 snapshot_timebases(); 588 snapshot_timebases();
572 589
@@ -591,10 +608,10 @@ int __cpu_disable(void)
591 /* Update sibling maps */ 608 /* Update sibling maps */
592 base = cpu_first_thread_in_core(cpu); 609 base = cpu_first_thread_in_core(cpu);
593 for (i = 0; i < threads_per_core; i++) { 610 for (i = 0; i < threads_per_core; i++) {
594 cpu_clear(cpu, per_cpu(cpu_sibling_map, base + i)); 611 cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
595 cpu_clear(base + i, per_cpu(cpu_sibling_map, cpu)); 612 cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
596 cpu_clear(cpu, per_cpu(cpu_core_map, base +i)); 613 cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
597 cpu_clear(base + i, per_cpu(cpu_core_map, cpu)); 614 cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
598 } 615 }
599 616
600 l2_cache = cpu_to_l2cache(cpu); 617 l2_cache = cpu_to_l2cache(cpu);
@@ -603,8 +620,8 @@ int __cpu_disable(void)
603 if (!np) 620 if (!np)
604 continue; 621 continue;
605 if (np == l2_cache) { 622 if (np == l2_cache) {
606 cpu_clear(cpu, per_cpu(cpu_core_map, i)); 623 cpumask_clear_cpu(cpu, cpu_core_mask(i));
607 cpu_clear(i, per_cpu(cpu_core_map, cpu)); 624 cpumask_clear_cpu(i, cpu_core_mask(cpu));
608 } 625 }
609 of_node_put(np); 626 of_node_put(np);
610 } 627 }
@@ -631,4 +648,10 @@ void cpu_hotplug_driver_unlock()
631{ 648{
632 mutex_unlock(&powerpc_cpu_hotplug_driver_mutex); 649 mutex_unlock(&powerpc_cpu_hotplug_driver_mutex);
633} 650}
651
652void cpu_die(void)
653{
654 if (ppc_md.cpu_die)
655 ppc_md.cpu_die();
656}
634#endif 657#endif
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index e235e52dc4fe..c0d8c2006bf4 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -35,7 +35,7 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices);
35#ifdef CONFIG_PPC64 35#ifdef CONFIG_PPC64
36 36
37/* Time in microseconds we delay before sleeping in the idle loop */ 37/* Time in microseconds we delay before sleeping in the idle loop */
38DEFINE_PER_CPU(unsigned long, smt_snooze_delay) = { 100 }; 38DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 };
39 39
40static ssize_t store_smt_snooze_delay(struct sys_device *dev, 40static ssize_t store_smt_snooze_delay(struct sys_device *dev,
41 struct sysdev_attribute *attr, 41 struct sysdev_attribute *attr,
@@ -44,9 +44,9 @@ static ssize_t store_smt_snooze_delay(struct sys_device *dev,
44{ 44{
45 struct cpu *cpu = container_of(dev, struct cpu, sysdev); 45 struct cpu *cpu = container_of(dev, struct cpu, sysdev);
46 ssize_t ret; 46 ssize_t ret;
47 unsigned long snooze; 47 long snooze;
48 48
49 ret = sscanf(buf, "%lu", &snooze); 49 ret = sscanf(buf, "%ld", &snooze);
50 if (ret != 1) 50 if (ret != 1)
51 return -EINVAL; 51 return -EINVAL;
52 52
@@ -61,53 +61,23 @@ static ssize_t show_smt_snooze_delay(struct sys_device *dev,
61{ 61{
62 struct cpu *cpu = container_of(dev, struct cpu, sysdev); 62 struct cpu *cpu = container_of(dev, struct cpu, sysdev);
63 63
64 return sprintf(buf, "%lu\n", per_cpu(smt_snooze_delay, cpu->sysdev.id)); 64 return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->sysdev.id));
65} 65}
66 66
67static SYSDEV_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay, 67static SYSDEV_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
68 store_smt_snooze_delay); 68 store_smt_snooze_delay);
69 69
70/* Only parse OF options if the matching cmdline option was not specified */
71static int smt_snooze_cmdline;
72
73static int __init smt_setup(void)
74{
75 struct device_node *options;
76 const unsigned int *val;
77 unsigned int cpu;
78
79 if (!cpu_has_feature(CPU_FTR_SMT))
80 return -ENODEV;
81
82 options = of_find_node_by_path("/options");
83 if (!options)
84 return -ENODEV;
85
86 val = of_get_property(options, "ibm,smt-snooze-delay", NULL);
87 if (!smt_snooze_cmdline && val) {
88 for_each_possible_cpu(cpu)
89 per_cpu(smt_snooze_delay, cpu) = *val;
90 }
91
92 of_node_put(options);
93 return 0;
94}
95__initcall(smt_setup);
96
97static int __init setup_smt_snooze_delay(char *str) 70static int __init setup_smt_snooze_delay(char *str)
98{ 71{
99 unsigned int cpu; 72 unsigned int cpu;
100 int snooze; 73 long snooze;
101 74
102 if (!cpu_has_feature(CPU_FTR_SMT)) 75 if (!cpu_has_feature(CPU_FTR_SMT))
103 return 1; 76 return 1;
104 77
105 smt_snooze_cmdline = 1; 78 snooze = simple_strtol(str, NULL, 10);
106 79 for_each_possible_cpu(cpu)
107 if (get_option(&str, &snooze)) { 80 per_cpu(smt_snooze_delay, cpu) = snooze;
108 for_each_possible_cpu(cpu)
109 per_cpu(smt_snooze_delay, cpu) = snooze;
110 }
111 81
112 return 1; 82 return 1;
113} 83}
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 1b16b9a3e49a..0441bbdadbd1 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -532,25 +532,60 @@ void __init iSeries_time_init_early(void)
532} 532}
533#endif /* CONFIG_PPC_ISERIES */ 533#endif /* CONFIG_PPC_ISERIES */
534 534
535#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_PPC32) 535#ifdef CONFIG_PERF_EVENTS
536DEFINE_PER_CPU(u8, perf_event_pending);
537 536
538void set_perf_event_pending(void) 537/*
538 * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
539 */
540#ifdef CONFIG_PPC64
541static inline unsigned long test_perf_event_pending(void)
539{ 542{
540 get_cpu_var(perf_event_pending) = 1; 543 unsigned long x;
541 set_dec(1); 544
542 put_cpu_var(perf_event_pending); 545 asm volatile("lbz %0,%1(13)"
546 : "=r" (x)
547 : "i" (offsetof(struct paca_struct, perf_event_pending)));
548 return x;
543} 549}
544 550
551static inline void set_perf_event_pending_flag(void)
552{
553 asm volatile("stb %0,%1(13)" : :
554 "r" (1),
555 "i" (offsetof(struct paca_struct, perf_event_pending)));
556}
557
558static inline void clear_perf_event_pending(void)
559{
560 asm volatile("stb %0,%1(13)" : :
561 "r" (0),
562 "i" (offsetof(struct paca_struct, perf_event_pending)));
563}
564
565#else /* 32-bit */
566
567DEFINE_PER_CPU(u8, perf_event_pending);
568
569#define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1
545#define test_perf_event_pending() __get_cpu_var(perf_event_pending) 570#define test_perf_event_pending() __get_cpu_var(perf_event_pending)
546#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0 571#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0
547 572
548#else /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */ 573#endif /* 32 vs 64 bit */
574
575void set_perf_event_pending(void)
576{
577 preempt_disable();
578 set_perf_event_pending_flag();
579 set_dec(1);
580 preempt_enable();
581}
582
583#else /* CONFIG_PERF_EVENTS */
549 584
550#define test_perf_event_pending() 0 585#define test_perf_event_pending() 0
551#define clear_perf_event_pending() 586#define clear_perf_event_pending()
552 587
553#endif /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */ 588#endif /* CONFIG_PERF_EVENTS */
554 589
555/* 590/*
556 * For iSeries shared processors, we have to let the hypervisor 591 * For iSeries shared processors, we have to let the hypervisor
@@ -582,10 +617,6 @@ void timer_interrupt(struct pt_regs * regs)
582 set_dec(DECREMENTER_MAX); 617 set_dec(DECREMENTER_MAX);
583 618
584#ifdef CONFIG_PPC32 619#ifdef CONFIG_PPC32
585 if (test_perf_event_pending()) {
586 clear_perf_event_pending();
587 perf_event_do_pending();
588 }
589 if (atomic_read(&ppc_n_lost_interrupts) != 0) 620 if (atomic_read(&ppc_n_lost_interrupts) != 0)
590 do_IRQ(regs); 621 do_IRQ(regs);
591#endif 622#endif
@@ -604,6 +635,11 @@ void timer_interrupt(struct pt_regs * regs)
604 635
605 calculate_steal_time(); 636 calculate_steal_time();
606 637
638 if (test_perf_event_pending()) {
639 clear_perf_event_pending();
640 perf_event_do_pending();
641 }
642
607#ifdef CONFIG_PPC_ISERIES 643#ifdef CONFIG_PPC_ISERIES
608 if (firmware_has_feature(FW_FEATURE_ISERIES)) 644 if (firmware_has_feature(FW_FEATURE_ISERIES))
609 get_lppaca()->int_dword.fields.decr_int = 0; 645 get_lppaca()->int_dword.fields.decr_int = 0;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 29d128eb6c43..3031fc712ad0 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -380,6 +380,46 @@ int machine_check_440A(struct pt_regs *regs)
380 } 380 }
381 return 0; 381 return 0;
382} 382}
383
384int machine_check_47x(struct pt_regs *regs)
385{
386 unsigned long reason = get_mc_reason(regs);
387 u32 mcsr;
388
389 printk(KERN_ERR "Machine check in kernel mode.\n");
390 if (reason & ESR_IMCP) {
391 printk(KERN_ERR
392 "Instruction Synchronous Machine Check exception\n");
393 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
394 return 0;
395 }
396 mcsr = mfspr(SPRN_MCSR);
397 if (mcsr & MCSR_IB)
398 printk(KERN_ERR "Instruction Read PLB Error\n");
399 if (mcsr & MCSR_DRB)
400 printk(KERN_ERR "Data Read PLB Error\n");
401 if (mcsr & MCSR_DWB)
402 printk(KERN_ERR "Data Write PLB Error\n");
403 if (mcsr & MCSR_TLBP)
404 printk(KERN_ERR "TLB Parity Error\n");
405 if (mcsr & MCSR_ICP) {
406 flush_instruction_cache();
407 printk(KERN_ERR "I-Cache Parity Error\n");
408 }
409 if (mcsr & MCSR_DCSP)
410 printk(KERN_ERR "D-Cache Search Parity Error\n");
411 if (mcsr & PPC47x_MCSR_GPR)
412 printk(KERN_ERR "GPR Parity Error\n");
413 if (mcsr & PPC47x_MCSR_FPR)
414 printk(KERN_ERR "FPR Parity Error\n");
415 if (mcsr & PPC47x_MCSR_IPR)
416 printk(KERN_ERR "Machine Check exception is imprecise\n");
417
418 /* Clear MCSR */
419 mtspr(SPRN_MCSR, mcsr);
420
421 return 0;
422}
383#elif defined(CONFIG_E500) 423#elif defined(CONFIG_E500)
384int machine_check_e500(struct pt_regs *regs) 424int machine_check_e500(struct pt_regs *regs)
385{ 425{
@@ -815,12 +855,15 @@ void __kprobes program_check_exception(struct pt_regs *regs)
815 return; 855 return;
816 } 856 }
817 if (reason & REASON_TRAP) { 857 if (reason & REASON_TRAP) {
858 /* Debugger is first in line to stop recursive faults in
859 * rcu_lock, notify_die, or atomic_notifier_call_chain */
860 if (debugger_bpt(regs))
861 return;
862
818 /* trap exception */ 863 /* trap exception */
819 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) 864 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
820 == NOTIFY_STOP) 865 == NOTIFY_STOP)
821 return; 866 return;
822 if (debugger_bpt(regs))
823 return;
824 867
825 if (!(regs->msr & MSR_PR) && /* not user-mode */ 868 if (!(regs->msr & MSR_PR) && /* not user-mode */
826 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { 869 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 4cdd0f6df8bf..00b9436f7652 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -645,8 +645,10 @@ void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired)
645 found = 1; 645 found = 1;
646 break; 646 break;
647 } 647 }
648 if (!found) 648 if (!found) {
649 spin_unlock_irqrestore(&vio_cmo.lock, flags);
649 return; 650 return;
651 }
650 652
651 /* Increase/decrease in desired device entitlement */ 653 /* Increase/decrease in desired device entitlement */
652 if (desired >= viodev->cmo.desired) { 654 if (desired >= viodev->cmo.desired) {
@@ -958,9 +960,12 @@ viodev_cmo_rd_attr(allocated);
958 960
959static ssize_t name_show(struct device *, struct device_attribute *, char *); 961static ssize_t name_show(struct device *, struct device_attribute *, char *);
960static ssize_t devspec_show(struct device *, struct device_attribute *, char *); 962static ssize_t devspec_show(struct device *, struct device_attribute *, char *);
963static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
964 char *buf);
961static struct device_attribute vio_cmo_dev_attrs[] = { 965static struct device_attribute vio_cmo_dev_attrs[] = {
962 __ATTR_RO(name), 966 __ATTR_RO(name),
963 __ATTR_RO(devspec), 967 __ATTR_RO(devspec),
968 __ATTR_RO(modalias),
964 __ATTR(cmo_desired, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH, 969 __ATTR(cmo_desired, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
965 viodev_cmo_desired_show, viodev_cmo_desired_set), 970 viodev_cmo_desired_show, viodev_cmo_desired_set),
966 __ATTR(cmo_entitled, S_IRUGO, viodev_cmo_entitled_show, NULL), 971 __ATTR(cmo_entitled, S_IRUGO, viodev_cmo_entitled_show, NULL),
@@ -1320,9 +1325,27 @@ static ssize_t devspec_show(struct device *dev,
1320 return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none"); 1325 return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none");
1321} 1326}
1322 1327
1328static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
1329 char *buf)
1330{
1331 const struct vio_dev *vio_dev = to_vio_dev(dev);
1332 struct device_node *dn;
1333 const char *cp;
1334
1335 dn = dev->of_node;
1336 if (!dn)
1337 return -ENODEV;
1338 cp = of_get_property(dn, "compatible", NULL);
1339 if (!cp)
1340 return -ENODEV;
1341
1342 return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
1343}
1344
1323static struct device_attribute vio_dev_attrs[] = { 1345static struct device_attribute vio_dev_attrs[] = {
1324 __ATTR_RO(name), 1346 __ATTR_RO(name),
1325 __ATTR_RO(devspec), 1347 __ATTR_RO(devspec),
1348 __ATTR_RO(modalias),
1326 __ATTR_NULL 1349 __ATTR_NULL
1327}; 1350};
1328 1351
@@ -1365,6 +1388,7 @@ static struct bus_type vio_bus_type = {
1365 .match = vio_bus_match, 1388 .match = vio_bus_match,
1366 .probe = vio_bus_probe, 1389 .probe = vio_bus_probe,
1367 .remove = vio_bus_remove, 1390 .remove = vio_bus_remove,
1391 .pm = GENERIC_SUBSYS_PM_OPS,
1368}; 1392};
1369 1393
1370/** 1394/**