aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/asm-offsets.c190
-rw-r--r--arch/powerpc/kernel/cpu_setup_power7.S22
-rw-r--r--arch/powerpc/kernel/cpu_setup_ppc970.S26
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S228
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S8
-rw-r--r--arch/powerpc/kernel/idle_power7.S2
-rw-r--r--arch/powerpc/kernel/paca.c2
-rw-r--r--arch/powerpc/kernel/process.c4
-rw-r--r--arch/powerpc/kernel/setup-common.c3
-rw-r--r--arch/powerpc/kernel/setup_64.c3
-rw-r--r--arch/powerpc/kernel/smp.c1
-rw-r--r--arch/powerpc/kernel/traps.c5
12 files changed, 338 insertions, 156 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 36e1c8a29be8..54b935f2f5de 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -128,6 +128,7 @@ int main(void)
128 DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page)); 128 DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
129 /* paca */ 129 /* paca */
130 DEFINE(PACA_SIZE, sizeof(struct paca_struct)); 130 DEFINE(PACA_SIZE, sizeof(struct paca_struct));
131 DEFINE(PACA_LOCK_TOKEN, offsetof(struct paca_struct, lock_token));
131 DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index)); 132 DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index));
132 DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start)); 133 DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start));
133 DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack)); 134 DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
@@ -187,7 +188,9 @@ int main(void)
187 DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1)); 188 DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
188 DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int)); 189 DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
189 DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int)); 190 DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
191 DEFINE(LPPACA_PMCINUSE, offsetof(struct lppaca, pmcregs_in_use));
190 DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx)); 192 DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx));
193 DEFINE(LPPACA_YIELDCOUNT, offsetof(struct lppaca, yield_count));
191 DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx)); 194 DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx));
192#endif /* CONFIG_PPC_STD_MMU_64 */ 195#endif /* CONFIG_PPC_STD_MMU_64 */
193 DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); 196 DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
@@ -198,11 +201,6 @@ int main(void)
198 DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); 201 DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
199 DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); 202 DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
200 DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); 203 DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
201#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
202 DEFINE(PACA_KVM_SVCPU, offsetof(struct paca_struct, shadow_vcpu));
203 DEFINE(SVCPU_SLB, offsetof(struct kvmppc_book3s_shadow_vcpu, slb));
204 DEFINE(SVCPU_SLB_MAX, offsetof(struct kvmppc_book3s_shadow_vcpu, slb_max));
205#endif
206#endif /* CONFIG_PPC64 */ 204#endif /* CONFIG_PPC64 */
207 205
208 /* RTAS */ 206 /* RTAS */
@@ -397,67 +395,160 @@ int main(void)
397 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); 395 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
398 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); 396 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
399 DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); 397 DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave));
398 DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr));
399 DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fpscr));
400#ifdef CONFIG_ALTIVEC
401 DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr));
402 DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vscr));
403#endif
404#ifdef CONFIG_VSX
405 DEFINE(VCPU_VSRS, offsetof(struct kvm_vcpu, arch.vsr));
406#endif
407 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
408 DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
409 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
410 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
411 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
412#ifdef CONFIG_KVM_BOOK3S_64_HV
413 DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr));
414 DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0));
415 DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1));
416 DEFINE(VCPU_SPRG0, offsetof(struct kvm_vcpu, arch.shregs.sprg0));
417 DEFINE(VCPU_SPRG1, offsetof(struct kvm_vcpu, arch.shregs.sprg1));
418 DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2));
419 DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3));
420#endif
400 DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4)); 421 DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4));
401 DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); 422 DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
402 DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6)); 423 DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6));
403 DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7)); 424 DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
404 DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid)); 425 DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
426 DEFINE(VCPU_SHADOW_PID1, offsetof(struct kvm_vcpu, arch.shadow_pid1));
405 DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared)); 427 DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared));
406 DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr)); 428 DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
429 DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
407 430
408 /* book3s */ 431 /* book3s */
432#ifdef CONFIG_KVM_BOOK3S_64_HV
433 DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
434 DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1));
435 DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
436 DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
437 DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1));
438 DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock));
439 DEFINE(KVM_ONLINE_CPUS, offsetof(struct kvm, online_vcpus.counter));
440 DEFINE(KVM_LAST_VCPU, offsetof(struct kvm, arch.last_vcpu));
441 DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr));
442 DEFINE(KVM_RMOR, offsetof(struct kvm, arch.rmor));
443 DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr));
444 DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
445#endif
409#ifdef CONFIG_PPC_BOOK3S 446#ifdef CONFIG_PPC_BOOK3S
447 DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
448 DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
410 DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip)); 449 DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip));
411 DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr)); 450 DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr));
412 DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); 451 DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr));
452 DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr));
453 DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr));
454 DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr));
455 DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor));
456 DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl));
457 DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr));
413 DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem)); 458 DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
414 DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter)); 459 DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
415 DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler)); 460 DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
416 DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall)); 461 DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall));
417 DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); 462 DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
463 DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec));
464 DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires));
465 DEFINE(VCPU_PENDING_EXC, offsetof(struct kvm_vcpu, arch.pending_exceptions));
466 DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa));
467 DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
468 DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
469 DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
470 DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
471 DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
472 DEFINE(VCPU_LAST_CPU, offsetof(struct kvm_vcpu, arch.last_cpu));
473 DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr));
474 DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar));
475 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
476 DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
477 DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid));
478 DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
479 DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
480 DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
418 DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) - 481 DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) -
419 offsetof(struct kvmppc_vcpu_book3s, vcpu)); 482 offsetof(struct kvmppc_vcpu_book3s, vcpu));
420 DEFINE(SVCPU_CR, offsetof(struct kvmppc_book3s_shadow_vcpu, cr)); 483 DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
421 DEFINE(SVCPU_XER, offsetof(struct kvmppc_book3s_shadow_vcpu, xer)); 484 DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv));
422 DEFINE(SVCPU_CTR, offsetof(struct kvmppc_book3s_shadow_vcpu, ctr)); 485 DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
423 DEFINE(SVCPU_LR, offsetof(struct kvmppc_book3s_shadow_vcpu, lr)); 486
424 DEFINE(SVCPU_PC, offsetof(struct kvmppc_book3s_shadow_vcpu, pc)); 487#ifdef CONFIG_PPC_BOOK3S_64
425 DEFINE(SVCPU_R0, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[0])); 488#ifdef CONFIG_KVM_BOOK3S_PR
426 DEFINE(SVCPU_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[1])); 489# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f))
427 DEFINE(SVCPU_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[2])); 490#else
428 DEFINE(SVCPU_R3, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[3])); 491# define SVCPU_FIELD(x, f)
429 DEFINE(SVCPU_R4, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[4])); 492#endif
430 DEFINE(SVCPU_R5, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[5])); 493# define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, kvm_hstate.f))
431 DEFINE(SVCPU_R6, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[6])); 494#else /* 32-bit */
432 DEFINE(SVCPU_R7, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[7])); 495# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, f))
433 DEFINE(SVCPU_R8, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[8])); 496# define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, hstate.f))
434 DEFINE(SVCPU_R9, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[9])); 497#endif
435 DEFINE(SVCPU_R10, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[10])); 498
436 DEFINE(SVCPU_R11, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[11])); 499 SVCPU_FIELD(SVCPU_CR, cr);
437 DEFINE(SVCPU_R12, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[12])); 500 SVCPU_FIELD(SVCPU_XER, xer);
438 DEFINE(SVCPU_R13, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[13])); 501 SVCPU_FIELD(SVCPU_CTR, ctr);
439 DEFINE(SVCPU_HOST_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r1)); 502 SVCPU_FIELD(SVCPU_LR, lr);
440 DEFINE(SVCPU_HOST_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r2)); 503 SVCPU_FIELD(SVCPU_PC, pc);
441 DEFINE(SVCPU_VMHANDLER, offsetof(struct kvmppc_book3s_shadow_vcpu, 504 SVCPU_FIELD(SVCPU_R0, gpr[0]);
442 vmhandler)); 505 SVCPU_FIELD(SVCPU_R1, gpr[1]);
443 DEFINE(SVCPU_SCRATCH0, offsetof(struct kvmppc_book3s_shadow_vcpu, 506 SVCPU_FIELD(SVCPU_R2, gpr[2]);
444 scratch0)); 507 SVCPU_FIELD(SVCPU_R3, gpr[3]);
445 DEFINE(SVCPU_SCRATCH1, offsetof(struct kvmppc_book3s_shadow_vcpu, 508 SVCPU_FIELD(SVCPU_R4, gpr[4]);
446 scratch1)); 509 SVCPU_FIELD(SVCPU_R5, gpr[5]);
447 DEFINE(SVCPU_IN_GUEST, offsetof(struct kvmppc_book3s_shadow_vcpu, 510 SVCPU_FIELD(SVCPU_R6, gpr[6]);
448 in_guest)); 511 SVCPU_FIELD(SVCPU_R7, gpr[7]);
449 DEFINE(SVCPU_FAULT_DSISR, offsetof(struct kvmppc_book3s_shadow_vcpu, 512 SVCPU_FIELD(SVCPU_R8, gpr[8]);
450 fault_dsisr)); 513 SVCPU_FIELD(SVCPU_R9, gpr[9]);
451 DEFINE(SVCPU_FAULT_DAR, offsetof(struct kvmppc_book3s_shadow_vcpu, 514 SVCPU_FIELD(SVCPU_R10, gpr[10]);
452 fault_dar)); 515 SVCPU_FIELD(SVCPU_R11, gpr[11]);
453 DEFINE(SVCPU_LAST_INST, offsetof(struct kvmppc_book3s_shadow_vcpu, 516 SVCPU_FIELD(SVCPU_R12, gpr[12]);
454 last_inst)); 517 SVCPU_FIELD(SVCPU_R13, gpr[13]);
455 DEFINE(SVCPU_SHADOW_SRR1, offsetof(struct kvmppc_book3s_shadow_vcpu, 518 SVCPU_FIELD(SVCPU_FAULT_DSISR, fault_dsisr);
456 shadow_srr1)); 519 SVCPU_FIELD(SVCPU_FAULT_DAR, fault_dar);
520 SVCPU_FIELD(SVCPU_LAST_INST, last_inst);
521 SVCPU_FIELD(SVCPU_SHADOW_SRR1, shadow_srr1);
457#ifdef CONFIG_PPC_BOOK3S_32 522#ifdef CONFIG_PPC_BOOK3S_32
458 DEFINE(SVCPU_SR, offsetof(struct kvmppc_book3s_shadow_vcpu, sr)); 523 SVCPU_FIELD(SVCPU_SR, sr);
459#endif 524#endif
460#else 525#ifdef CONFIG_PPC64
526 SVCPU_FIELD(SVCPU_SLB, slb);
527 SVCPU_FIELD(SVCPU_SLB_MAX, slb_max);
528#endif
529
530 HSTATE_FIELD(HSTATE_HOST_R1, host_r1);
531 HSTATE_FIELD(HSTATE_HOST_R2, host_r2);
532 HSTATE_FIELD(HSTATE_HOST_MSR, host_msr);
533 HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
534 HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
535 HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
536 HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
537
538#ifdef CONFIG_KVM_BOOK3S_64_HV
539 HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
540 HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore);
541 HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
542 HSTATE_FIELD(HSTATE_MMCR, host_mmcr);
543 HSTATE_FIELD(HSTATE_PMC, host_pmc);
544 HSTATE_FIELD(HSTATE_PURR, host_purr);
545 HSTATE_FIELD(HSTATE_SPURR, host_spurr);
546 HSTATE_FIELD(HSTATE_DSCR, host_dscr);
547 HSTATE_FIELD(HSTATE_DABR, dabr);
548 HSTATE_FIELD(HSTATE_DECEXP, dec_expires);
549#endif /* CONFIG_KVM_BOOK3S_64_HV */
550
551#else /* CONFIG_PPC_BOOK3S */
461 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); 552 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
462 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); 553 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
463 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); 554 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
@@ -467,7 +558,7 @@ int main(void)
467 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); 558 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
468 DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); 559 DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
469#endif /* CONFIG_PPC_BOOK3S */ 560#endif /* CONFIG_PPC_BOOK3S */
470#endif 561#endif /* CONFIG_KVM */
471 562
472#ifdef CONFIG_KVM_GUEST 563#ifdef CONFIG_KVM_GUEST
473 DEFINE(KVM_MAGIC_SCRATCH1, offsetof(struct kvm_vcpu_arch_shared, 564 DEFINE(KVM_MAGIC_SCRATCH1, offsetof(struct kvm_vcpu_arch_shared,
@@ -497,6 +588,13 @@ int main(void)
497 DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7)); 588 DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7));
498#endif 589#endif
499 590
591#if defined(CONFIG_KVM) && defined(CONFIG_SPE)
592 DEFINE(VCPU_EVR, offsetof(struct kvm_vcpu, arch.evr[0]));
593 DEFINE(VCPU_ACC, offsetof(struct kvm_vcpu, arch.acc));
594 DEFINE(VCPU_SPEFSCR, offsetof(struct kvm_vcpu, arch.spefscr));
595 DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr));
596#endif
597
500#ifdef CONFIG_KVM_EXIT_TIMING 598#ifdef CONFIG_KVM_EXIT_TIMING
501 DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu, 599 DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
502 arch.timing_exit.tv32.tbu)); 600 arch.timing_exit.tv32.tbu));
diff --git a/arch/powerpc/kernel/cpu_setup_power7.S b/arch/powerpc/kernel/cpu_setup_power7.S
index 4f9a93fcfe07..76797c5105d6 100644
--- a/arch/powerpc/kernel/cpu_setup_power7.S
+++ b/arch/powerpc/kernel/cpu_setup_power7.S
@@ -45,12 +45,12 @@ _GLOBAL(__restore_cpu_power7)
45 blr 45 blr
46 46
47__init_hvmode_206: 47__init_hvmode_206:
48 /* Disable CPU_FTR_HVMODE_206 and exit if MSR:HV is not set */ 48 /* Disable CPU_FTR_HVMODE and exit if MSR:HV is not set */
49 mfmsr r3 49 mfmsr r3
50 rldicl. r0,r3,4,63 50 rldicl. r0,r3,4,63
51 bnelr 51 bnelr
52 ld r5,CPU_SPEC_FEATURES(r4) 52 ld r5,CPU_SPEC_FEATURES(r4)
53 LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE_206) 53 LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE)
54 xor r5,r5,r6 54 xor r5,r5,r6
55 std r5,CPU_SPEC_FEATURES(r4) 55 std r5,CPU_SPEC_FEATURES(r4)
56 blr 56 blr
@@ -61,19 +61,23 @@ __init_LPCR:
61 * LPES = 0b01 (HSRR0/1 used for 0x500) 61 * LPES = 0b01 (HSRR0/1 used for 0x500)
62 * PECE = 0b111 62 * PECE = 0b111
63 * DPFD = 4 63 * DPFD = 4
64 * HDICE = 0
65 * VC = 0b100 (VPM0=1, VPM1=0, ISL=0)
66 * VRMASD = 0b10000 (L=1, LP=00)
64 * 67 *
65 * Other bits untouched for now 68 * Other bits untouched for now
66 */ 69 */
67 mfspr r3,SPRN_LPCR 70 mfspr r3,SPRN_LPCR
68 ori r3,r3,(LPCR_LPES0|LPCR_LPES1) 71 li r5,1
69 xori r3,r3, LPCR_LPES0 72 rldimi r3,r5, LPCR_LPES_SH, 64-LPCR_LPES_SH-2
70 ori r3,r3,(LPCR_PECE0|LPCR_PECE1|LPCR_PECE2) 73 ori r3,r3,(LPCR_PECE0|LPCR_PECE1|LPCR_PECE2)
71 li r5,7
72 sldi r5,r5,LPCR_DPFD_SH
73 andc r3,r3,r5
74 li r5,4 74 li r5,4
75 sldi r5,r5,LPCR_DPFD_SH 75 rldimi r3,r5, LPCR_DPFD_SH, 64-LPCR_DPFD_SH-3
76 or r3,r3,r5 76 clrrdi r3,r3,1 /* clear HDICE */
77 li r5,4
78 rldimi r3,r5, LPCR_VC_SH, 0
79 li r5,0x10
80 rldimi r3,r5, LPCR_VRMASD_SH, 64-LPCR_VRMASD_SH-5
77 mtspr SPRN_LPCR,r3 81 mtspr SPRN_LPCR,r3
78 isync 82 isync
79 blr 83 blr
diff --git a/arch/powerpc/kernel/cpu_setup_ppc970.S b/arch/powerpc/kernel/cpu_setup_ppc970.S
index 27f2507279d8..12fac8df01c5 100644
--- a/arch/powerpc/kernel/cpu_setup_ppc970.S
+++ b/arch/powerpc/kernel/cpu_setup_ppc970.S
@@ -76,7 +76,7 @@ _GLOBAL(__setup_cpu_ppc970)
76 /* Do nothing if not running in HV mode */ 76 /* Do nothing if not running in HV mode */
77 mfmsr r0 77 mfmsr r0
78 rldicl. r0,r0,4,63 78 rldicl. r0,r0,4,63
79 beqlr 79 beq no_hv_mode
80 80
81 mfspr r0,SPRN_HID0 81 mfspr r0,SPRN_HID0
82 li r11,5 /* clear DOZE and SLEEP */ 82 li r11,5 /* clear DOZE and SLEEP */
@@ -90,7 +90,7 @@ _GLOBAL(__setup_cpu_ppc970MP)
90 /* Do nothing if not running in HV mode */ 90 /* Do nothing if not running in HV mode */
91 mfmsr r0 91 mfmsr r0
92 rldicl. r0,r0,4,63 92 rldicl. r0,r0,4,63
93 beqlr 93 beq no_hv_mode
94 94
95 mfspr r0,SPRN_HID0 95 mfspr r0,SPRN_HID0
96 li r11,0x15 /* clear DOZE and SLEEP */ 96 li r11,0x15 /* clear DOZE and SLEEP */
@@ -109,6 +109,14 @@ load_hids:
109 sync 109 sync
110 isync 110 isync
111 111
112 /* Try to set LPES = 01 in HID4 */
113 mfspr r0,SPRN_HID4
114 clrldi r0,r0,1 /* clear LPES0 */
115 ori r0,r0,HID4_LPES1 /* set LPES1 */
116 sync
117 mtspr SPRN_HID4,r0
118 isync
119
112 /* Save away cpu state */ 120 /* Save away cpu state */
113 LOAD_REG_ADDR(r5,cpu_state_storage) 121 LOAD_REG_ADDR(r5,cpu_state_storage)
114 122
@@ -117,11 +125,21 @@ load_hids:
117 std r3,CS_HID0(r5) 125 std r3,CS_HID0(r5)
118 mfspr r3,SPRN_HID1 126 mfspr r3,SPRN_HID1
119 std r3,CS_HID1(r5) 127 std r3,CS_HID1(r5)
120 mfspr r3,SPRN_HID4 128 mfspr r4,SPRN_HID4
121 std r3,CS_HID4(r5) 129 std r4,CS_HID4(r5)
122 mfspr r3,SPRN_HID5 130 mfspr r3,SPRN_HID5
123 std r3,CS_HID5(r5) 131 std r3,CS_HID5(r5)
124 132
133 /* See if we successfully set LPES1 to 1; if not we are in Apple mode */
134 andi. r4,r4,HID4_LPES1
135 bnelr
136
137no_hv_mode:
138 /* Disable CPU_FTR_HVMODE and exit, since we don't have HV mode */
139 ld r5,CPU_SPEC_FEATURES(r4)
140 LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE)
141 andc r5,r5,r6
142 std r5,CPU_SPEC_FEATURES(r4)
125 blr 143 blr
126 144
127/* Called with no MMU context (typically MSR:IR/DR off) to 145/* Called with no MMU context (typically MSR:IR/DR off) to
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index a85f4874cba7..41b02c792aa3 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -40,7 +40,6 @@ __start_interrupts:
40 .globl system_reset_pSeries; 40 .globl system_reset_pSeries;
41system_reset_pSeries: 41system_reset_pSeries:
42 HMT_MEDIUM; 42 HMT_MEDIUM;
43 DO_KVM 0x100;
44 SET_SCRATCH0(r13) 43 SET_SCRATCH0(r13)
45#ifdef CONFIG_PPC_P7_NAP 44#ifdef CONFIG_PPC_P7_NAP
46BEGIN_FTR_SECTION 45BEGIN_FTR_SECTION
@@ -50,82 +49,73 @@ BEGIN_FTR_SECTION
50 * state loss at this time. 49 * state loss at this time.
51 */ 50 */
52 mfspr r13,SPRN_SRR1 51 mfspr r13,SPRN_SRR1
53 rlwinm r13,r13,47-31,30,31 52 rlwinm. r13,r13,47-31,30,31
54 cmpwi cr0,r13,1 53 beq 9f
55 bne 1f 54
56 b .power7_wakeup_noloss 55 /* waking up from powersave (nap) state */
571: cmpwi cr0,r13,2 56 cmpwi cr1,r13,2
58 bne 1f
59 b .power7_wakeup_loss
60 /* Total loss of HV state is fatal, we could try to use the 57 /* Total loss of HV state is fatal, we could try to use the
61 * PIR to locate a PACA, then use an emergency stack etc... 58 * PIR to locate a PACA, then use an emergency stack etc...
62 * but for now, let's just stay stuck here 59 * but for now, let's just stay stuck here
63 */ 60 */
641: cmpwi cr0,r13,3 61 bgt cr1,.
65 beq . 62 GET_PACA(r13)
66END_FTR_SECTION_IFSET(CPU_FTR_HVMODE_206) 63
64#ifdef CONFIG_KVM_BOOK3S_64_HV
65 lbz r0,PACAPROCSTART(r13)
66 cmpwi r0,0x80
67 bne 1f
68 li r0,0
69 stb r0,PACAPROCSTART(r13)
70 b kvm_start_guest
711:
72#endif
73
74 beq cr1,2f
75 b .power7_wakeup_noloss
762: b .power7_wakeup_loss
779:
78END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
67#endif /* CONFIG_PPC_P7_NAP */ 79#endif /* CONFIG_PPC_P7_NAP */
68 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD) 80 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
81 NOTEST, 0x100)
69 82
70 . = 0x200 83 . = 0x200
71_machine_check_pSeries: 84machine_check_pSeries_1:
72 HMT_MEDIUM 85 /* This is moved out of line as it can be patched by FW, but
73 DO_KVM 0x200 86 * some code path might still want to branch into the original
74 SET_SCRATCH0(r13) 87 * vector
75 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, EXC_STD) 88 */
89 b machine_check_pSeries
76 90
77 . = 0x300 91 . = 0x300
78 .globl data_access_pSeries 92 .globl data_access_pSeries
79data_access_pSeries: 93data_access_pSeries:
80 HMT_MEDIUM 94 HMT_MEDIUM
81 DO_KVM 0x300
82 SET_SCRATCH0(r13) 95 SET_SCRATCH0(r13)
96#ifndef CONFIG_POWER4_ONLY
83BEGIN_FTR_SECTION 97BEGIN_FTR_SECTION
84 GET_PACA(r13) 98 b data_access_check_stab
85 std r9,PACA_EXSLB+EX_R9(r13) 99data_access_not_stab:
86 std r10,PACA_EXSLB+EX_R10(r13) 100END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
87 mfspr r10,SPRN_DAR 101#endif
88 mfspr r9,SPRN_DSISR 102 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
89 srdi r10,r10,60 103 KVMTEST_PR, 0x300)
90 rlwimi r10,r9,16,0x20
91 mfcr r9
92 cmpwi r10,0x2c
93 beq do_stab_bolted_pSeries
94 ld r10,PACA_EXSLB+EX_R10(r13)
95 std r11,PACA_EXGEN+EX_R11(r13)
96 ld r11,PACA_EXSLB+EX_R9(r13)
97 std r12,PACA_EXGEN+EX_R12(r13)
98 GET_SCRATCH0(r12)
99 std r10,PACA_EXGEN+EX_R10(r13)
100 std r11,PACA_EXGEN+EX_R9(r13)
101 std r12,PACA_EXGEN+EX_R13(r13)
102 EXCEPTION_PROLOG_PSERIES_1(data_access_common, EXC_STD)
103FTR_SECTION_ELSE
104 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD)
105ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB)
106 104
107 . = 0x380 105 . = 0x380
108 .globl data_access_slb_pSeries 106 .globl data_access_slb_pSeries
109data_access_slb_pSeries: 107data_access_slb_pSeries:
110 HMT_MEDIUM 108 HMT_MEDIUM
111 DO_KVM 0x380
112 SET_SCRATCH0(r13) 109 SET_SCRATCH0(r13)
113 GET_PACA(r13) 110 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380)
114 std r3,PACA_EXSLB+EX_R3(r13) 111 std r3,PACA_EXSLB+EX_R3(r13)
115 mfspr r3,SPRN_DAR 112 mfspr r3,SPRN_DAR
116 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
117 mfcr r9
118#ifdef __DISABLED__ 113#ifdef __DISABLED__
119 /* Keep that around for when we re-implement dynamic VSIDs */ 114 /* Keep that around for when we re-implement dynamic VSIDs */
120 cmpdi r3,0 115 cmpdi r3,0
121 bge slb_miss_user_pseries 116 bge slb_miss_user_pseries
122#endif /* __DISABLED__ */ 117#endif /* __DISABLED__ */
123 std r10,PACA_EXSLB+EX_R10(r13) 118 mfspr r12,SPRN_SRR1
124 std r11,PACA_EXSLB+EX_R11(r13)
125 std r12,PACA_EXSLB+EX_R12(r13)
126 GET_SCRATCH0(r10)
127 std r10,PACA_EXSLB+EX_R13(r13)
128 mfspr r12,SPRN_SRR1 /* and SRR1 */
129#ifndef CONFIG_RELOCATABLE 119#ifndef CONFIG_RELOCATABLE
130 b .slb_miss_realmode 120 b .slb_miss_realmode
131#else 121#else
@@ -147,24 +137,16 @@ data_access_slb_pSeries:
147 .globl instruction_access_slb_pSeries 137 .globl instruction_access_slb_pSeries
148instruction_access_slb_pSeries: 138instruction_access_slb_pSeries:
149 HMT_MEDIUM 139 HMT_MEDIUM
150 DO_KVM 0x480
151 SET_SCRATCH0(r13) 140 SET_SCRATCH0(r13)
152 GET_PACA(r13) 141 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
153 std r3,PACA_EXSLB+EX_R3(r13) 142 std r3,PACA_EXSLB+EX_R3(r13)
154 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ 143 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
155 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
156 mfcr r9
157#ifdef __DISABLED__ 144#ifdef __DISABLED__
158 /* Keep that around for when we re-implement dynamic VSIDs */ 145 /* Keep that around for when we re-implement dynamic VSIDs */
159 cmpdi r3,0 146 cmpdi r3,0
160 bge slb_miss_user_pseries 147 bge slb_miss_user_pseries
161#endif /* __DISABLED__ */ 148#endif /* __DISABLED__ */
162 std r10,PACA_EXSLB+EX_R10(r13) 149 mfspr r12,SPRN_SRR1
163 std r11,PACA_EXSLB+EX_R11(r13)
164 std r12,PACA_EXSLB+EX_R12(r13)
165 GET_SCRATCH0(r10)
166 std r10,PACA_EXSLB+EX_R13(r13)
167 mfspr r12,SPRN_SRR1 /* and SRR1 */
168#ifndef CONFIG_RELOCATABLE 150#ifndef CONFIG_RELOCATABLE
169 b .slb_miss_realmode 151 b .slb_miss_realmode
170#else 152#else
@@ -184,26 +166,46 @@ instruction_access_slb_pSeries:
184hardware_interrupt_pSeries: 166hardware_interrupt_pSeries:
185hardware_interrupt_hv: 167hardware_interrupt_hv:
186 BEGIN_FTR_SECTION 168 BEGIN_FTR_SECTION
187 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD) 169 _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
170 EXC_HV, SOFTEN_TEST_HV)
171 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
188 FTR_SECTION_ELSE 172 FTR_SECTION_ELSE
189 _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV) 173 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
190 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_HVMODE_206) 174 EXC_STD, SOFTEN_TEST_HV_201)
175 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
176 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
191 177
192 STD_EXCEPTION_PSERIES(0x600, 0x600, alignment) 178 STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
179 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
180
193 STD_EXCEPTION_PSERIES(0x700, 0x700, program_check) 181 STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
182 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700)
183
194 STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable) 184 STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
185 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
195 186
196 MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer) 187 MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
197 MASKABLE_EXCEPTION_HV(0x980, 0x980, decrementer) 188 MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer)
198 189
199 STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a) 190 STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
191 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
192
200 STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b) 193 STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
194 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00)
201 195
202 . = 0xc00 196 . = 0xc00
203 .globl system_call_pSeries 197 .globl system_call_pSeries
204system_call_pSeries: 198system_call_pSeries:
205 HMT_MEDIUM 199 HMT_MEDIUM
206 DO_KVM 0xc00 200#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
201 SET_SCRATCH0(r13)
202 GET_PACA(r13)
203 std r9,PACA_EXGEN+EX_R9(r13)
204 std r10,PACA_EXGEN+EX_R10(r13)
205 mfcr r9
206 KVMTEST(0xc00)
207 GET_SCRATCH0(r13)
208#endif
207BEGIN_FTR_SECTION 209BEGIN_FTR_SECTION
208 cmpdi r0,0x1ebe 210 cmpdi r0,0x1ebe
209 beq- 1f 211 beq- 1f
@@ -220,6 +222,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
220 rfid 222 rfid
221 b . /* prevent speculative execution */ 223 b . /* prevent speculative execution */
222 224
225 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
226
223/* Fast LE/BE switch system call */ 227/* Fast LE/BE switch system call */
2241: mfspr r12,SPRN_SRR1 2281: mfspr r12,SPRN_SRR1
225 xori r12,r12,MSR_LE 229 xori r12,r12,MSR_LE
@@ -228,6 +232,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
228 b . 232 b .
229 233
230 STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step) 234 STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
235 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
231 236
232 /* At 0xe??? we have a bunch of hypervisor exceptions, we branch 237 /* At 0xe??? we have a bunch of hypervisor exceptions, we branch
233 * out of line to handle them 238 * out of line to handle them
@@ -262,30 +267,93 @@ vsx_unavailable_pSeries_1:
262 267
263#ifdef CONFIG_CBE_RAS 268#ifdef CONFIG_CBE_RAS
264 STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error) 269 STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
270 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
265#endif /* CONFIG_CBE_RAS */ 271#endif /* CONFIG_CBE_RAS */
272
266 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint) 273 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
274 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
275
267#ifdef CONFIG_CBE_RAS 276#ifdef CONFIG_CBE_RAS
268 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance) 277 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
278 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
269#endif /* CONFIG_CBE_RAS */ 279#endif /* CONFIG_CBE_RAS */
280
270 STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist) 281 STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
282 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700)
283
271#ifdef CONFIG_CBE_RAS 284#ifdef CONFIG_CBE_RAS
272 STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal) 285 STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
286 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
273#endif /* CONFIG_CBE_RAS */ 287#endif /* CONFIG_CBE_RAS */
274 288
275 . = 0x3000 289 . = 0x3000
276 290
277/*** Out of line interrupts support ***/ 291/*** Out of line interrupts support ***/
278 292
293 /* moved from 0x200 */
294machine_check_pSeries:
295 .globl machine_check_fwnmi
296machine_check_fwnmi:
297 HMT_MEDIUM
298 SET_SCRATCH0(r13) /* save r13 */
299 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common,
300 EXC_STD, KVMTEST, 0x200)
301 KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
302
303#ifndef CONFIG_POWER4_ONLY
304 /* moved from 0x300 */
305data_access_check_stab:
306 GET_PACA(r13)
307 std r9,PACA_EXSLB+EX_R9(r13)
308 std r10,PACA_EXSLB+EX_R10(r13)
309 mfspr r10,SPRN_DAR
310 mfspr r9,SPRN_DSISR
311 srdi r10,r10,60
312 rlwimi r10,r9,16,0x20
313#ifdef CONFIG_KVM_BOOK3S_PR
314 lbz r9,HSTATE_IN_GUEST(r13)
315 rlwimi r10,r9,8,0x300
316#endif
317 mfcr r9
318 cmpwi r10,0x2c
319 beq do_stab_bolted_pSeries
320 mtcrf 0x80,r9
321 ld r9,PACA_EXSLB+EX_R9(r13)
322 ld r10,PACA_EXSLB+EX_R10(r13)
323 b data_access_not_stab
324do_stab_bolted_pSeries:
325 std r11,PACA_EXSLB+EX_R11(r13)
326 std r12,PACA_EXSLB+EX_R12(r13)
327 GET_SCRATCH0(r10)
328 std r10,PACA_EXSLB+EX_R13(r13)
329 EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
330#endif /* CONFIG_POWER4_ONLY */
331
332 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x300)
333 KVM_HANDLER_PR_SKIP(PACA_EXSLB, EXC_STD, 0x380)
334 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
335 KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
336 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
337 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
338
339 .align 7
279 /* moved from 0xe00 */ 340 /* moved from 0xe00 */
280 STD_EXCEPTION_HV(., 0xe00, h_data_storage) 341 STD_EXCEPTION_HV(., 0xe02, h_data_storage)
281 STD_EXCEPTION_HV(., 0xe20, h_instr_storage) 342 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02)
282 STD_EXCEPTION_HV(., 0xe40, emulation_assist) 343 STD_EXCEPTION_HV(., 0xe22, h_instr_storage)
283 STD_EXCEPTION_HV(., 0xe60, hmi_exception) /* need to flush cache ? */ 344 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
345 STD_EXCEPTION_HV(., 0xe42, emulation_assist)
346 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
347 STD_EXCEPTION_HV(., 0xe62, hmi_exception) /* need to flush cache ? */
348 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
284 349
285 /* moved from 0xf00 */ 350 /* moved from 0xf00 */
286 STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor) 351 STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor)
352 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
287 STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable) 353 STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable)
354 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
288 STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable) 355 STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable)
356 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
289 357
290/* 358/*
291 * An interrupt came in while soft-disabled; clear EE in SRR1, 359 * An interrupt came in while soft-disabled; clear EE in SRR1,
@@ -317,14 +385,6 @@ masked_Hinterrupt:
317 hrfid 385 hrfid
318 b . 386 b .
319 387
320 .align 7
321do_stab_bolted_pSeries:
322 std r11,PACA_EXSLB+EX_R11(r13)
323 std r12,PACA_EXSLB+EX_R12(r13)
324 GET_SCRATCH0(r10)
325 std r10,PACA_EXSLB+EX_R13(r13)
326 EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
327
328#ifdef CONFIG_PPC_PSERIES 388#ifdef CONFIG_PPC_PSERIES
329/* 389/*
330 * Vectors for the FWNMI option. Share common code. 390 * Vectors for the FWNMI option. Share common code.
@@ -334,14 +394,8 @@ do_stab_bolted_pSeries:
334system_reset_fwnmi: 394system_reset_fwnmi:
335 HMT_MEDIUM 395 HMT_MEDIUM
336 SET_SCRATCH0(r13) /* save r13 */ 396 SET_SCRATCH0(r13) /* save r13 */
337 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD) 397 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
338 398 NOTEST, 0x100)
339 .globl machine_check_fwnmi
340 .align 7
341machine_check_fwnmi:
342 HMT_MEDIUM
343 SET_SCRATCH0(r13) /* save r13 */
344 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, EXC_STD)
345 399
346#endif /* CONFIG_PPC_PSERIES */ 400#endif /* CONFIG_PPC_PSERIES */
347 401
@@ -376,7 +430,11 @@ slb_miss_user_pseries:
376/* KVM's trampoline code needs to be close to the interrupt handlers */ 430/* KVM's trampoline code needs to be close to the interrupt handlers */
377 431
378#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 432#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
433#ifdef CONFIG_KVM_BOOK3S_PR
379#include "../kvm/book3s_rmhandlers.S" 434#include "../kvm/book3s_rmhandlers.S"
435#else
436#include "../kvm/book3s_hv_rmhandlers.S"
437#endif
380#endif 438#endif
381 439
382 .align 7 440 .align 7
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 5ecf54cfa7d4..fe37dd0dfd17 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -656,7 +656,7 @@ load_up_spe:
656 cmpi 0,r4,0 656 cmpi 0,r4,0
657 beq 1f 657 beq 1f
658 addi r4,r4,THREAD /* want THREAD of last_task_used_spe */ 658 addi r4,r4,THREAD /* want THREAD of last_task_used_spe */
659 SAVE_32EVRS(0,r10,r4) 659 SAVE_32EVRS(0,r10,r4,THREAD_EVR0)
660 evxor evr10, evr10, evr10 /* clear out evr10 */ 660 evxor evr10, evr10, evr10 /* clear out evr10 */
661 evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */ 661 evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */
662 li r5,THREAD_ACC 662 li r5,THREAD_ACC
@@ -676,7 +676,7 @@ load_up_spe:
676 stw r4,THREAD_USED_SPE(r5) 676 stw r4,THREAD_USED_SPE(r5)
677 evlddx evr4,r10,r5 677 evlddx evr4,r10,r5
678 evmra evr4,evr4 678 evmra evr4,evr4
679 REST_32EVRS(0,r10,r5) 679 REST_32EVRS(0,r10,r5,THREAD_EVR0)
680#ifndef CONFIG_SMP 680#ifndef CONFIG_SMP
681 subi r4,r5,THREAD 681 subi r4,r5,THREAD
682 stw r4,last_task_used_spe@l(r3) 682 stw r4,last_task_used_spe@l(r3)
@@ -787,13 +787,11 @@ _GLOBAL(giveup_spe)
787 addi r3,r3,THREAD /* want THREAD of task */ 787 addi r3,r3,THREAD /* want THREAD of task */
788 lwz r5,PT_REGS(r3) 788 lwz r5,PT_REGS(r3)
789 cmpi 0,r5,0 789 cmpi 0,r5,0
790 SAVE_32EVRS(0, r4, r3) 790 SAVE_32EVRS(0, r4, r3, THREAD_EVR0)
791 evxor evr6, evr6, evr6 /* clear out evr6 */ 791 evxor evr6, evr6, evr6 /* clear out evr6 */
792 evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */ 792 evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */
793 li r4,THREAD_ACC 793 li r4,THREAD_ACC
794 evstddx evr6, r4, r3 /* save off accumulator */ 794 evstddx evr6, r4, r3 /* save off accumulator */
795 mfspr r6,SPRN_SPEFSCR
796 stw r6,THREAD_SPEFSCR(r3) /* save spefscr register value */
797 beq 1f 795 beq 1f
798 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) 796 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
799 lis r3,MSR_SPE@h 797 lis r3,MSR_SPE@h
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index f8f0bc7f1d4f..3a70845a51c7 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -73,7 +73,6 @@ _GLOBAL(power7_idle)
73 b . 73 b .
74 74
75_GLOBAL(power7_wakeup_loss) 75_GLOBAL(power7_wakeup_loss)
76 GET_PACA(r13)
77 ld r1,PACAR1(r13) 76 ld r1,PACAR1(r13)
78 REST_NVGPRS(r1) 77 REST_NVGPRS(r1)
79 REST_GPR(2, r1) 78 REST_GPR(2, r1)
@@ -87,7 +86,6 @@ _GLOBAL(power7_wakeup_loss)
87 rfid 86 rfid
88 87
89_GLOBAL(power7_wakeup_noloss) 88_GLOBAL(power7_wakeup_noloss)
90 GET_PACA(r13)
91 ld r1,PACAR1(r13) 89 ld r1,PACAR1(r13)
92 ld r4,_MSR(r1) 90 ld r4,_MSR(r1)
93 ld r5,_NIP(r1) 91 ld r5,_NIP(r1)
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index efeb88184182..0a5a899846bb 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -167,7 +167,7 @@ void setup_paca(struct paca_struct *new_paca)
167 * if we do a GET_PACA() before the feature fixups have been 167 * if we do a GET_PACA() before the feature fixups have been
168 * applied 168 * applied
169 */ 169 */
170 if (cpu_has_feature(CPU_FTR_HVMODE_206)) 170 if (cpu_has_feature(CPU_FTR_HVMODE))
171 mtspr(SPRN_SPRG_HPACA, local_paca); 171 mtspr(SPRN_SPRG_HPACA, local_paca);
172#endif 172#endif
173 mtspr(SPRN_SPRG_PACA, local_paca); 173 mtspr(SPRN_SPRG_PACA, local_paca);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 91e52df3d81d..ec2d0edeb134 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -96,6 +96,7 @@ void flush_fp_to_thread(struct task_struct *tsk)
96 preempt_enable(); 96 preempt_enable();
97 } 97 }
98} 98}
99EXPORT_SYMBOL_GPL(flush_fp_to_thread);
99 100
100void enable_kernel_fp(void) 101void enable_kernel_fp(void)
101{ 102{
@@ -145,6 +146,7 @@ void flush_altivec_to_thread(struct task_struct *tsk)
145 preempt_enable(); 146 preempt_enable();
146 } 147 }
147} 148}
149EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
148#endif /* CONFIG_ALTIVEC */ 150#endif /* CONFIG_ALTIVEC */
149 151
150#ifdef CONFIG_VSX 152#ifdef CONFIG_VSX
@@ -186,6 +188,7 @@ void flush_vsx_to_thread(struct task_struct *tsk)
186 preempt_enable(); 188 preempt_enable();
187 } 189 }
188} 190}
191EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
189#endif /* CONFIG_VSX */ 192#endif /* CONFIG_VSX */
190 193
191#ifdef CONFIG_SPE 194#ifdef CONFIG_SPE
@@ -213,6 +216,7 @@ void flush_spe_to_thread(struct task_struct *tsk)
213#ifdef CONFIG_SMP 216#ifdef CONFIG_SMP
214 BUG_ON(tsk != current); 217 BUG_ON(tsk != current);
215#endif 218#endif
219 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
216 giveup_spe(tsk); 220 giveup_spe(tsk);
217 } 221 }
218 preempt_enable(); 222 preempt_enable();
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 79fca2651b65..22051ef04bd9 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -375,6 +375,9 @@ void __init check_for_initrd(void)
375 375
376int threads_per_core, threads_shift; 376int threads_per_core, threads_shift;
377cpumask_t threads_core_mask; 377cpumask_t threads_core_mask;
378EXPORT_SYMBOL_GPL(threads_per_core);
379EXPORT_SYMBOL_GPL(threads_shift);
380EXPORT_SYMBOL_GPL(threads_core_mask);
378 381
379static void __init cpu_init_thread_core_maps(int tpc) 382static void __init cpu_init_thread_core_maps(int tpc)
380{ 383{
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index a88bf2713d41..532054f24ecb 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -63,6 +63,7 @@
63#include <asm/kexec.h> 63#include <asm/kexec.h>
64#include <asm/mmu_context.h> 64#include <asm/mmu_context.h>
65#include <asm/code-patching.h> 65#include <asm/code-patching.h>
66#include <asm/kvm_ppc.h>
66 67
67#include "setup.h" 68#include "setup.h"
68 69
@@ -580,6 +581,8 @@ void __init setup_arch(char **cmdline_p)
580 /* Initialize the MMU context management stuff */ 581 /* Initialize the MMU context management stuff */
581 mmu_context_init(); 582 mmu_context_init();
582 583
584 kvm_rma_init();
585
583 ppc64_boot_msg(0x15, "Setup Done"); 586 ppc64_boot_msg(0x15, "Setup Done");
584} 587}
585 588
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 8ebc6700b98d..09a85a9045d6 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -243,6 +243,7 @@ void smp_send_reschedule(int cpu)
243 if (likely(smp_ops)) 243 if (likely(smp_ops))
244 smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE); 244 smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
245} 245}
246EXPORT_SYMBOL_GPL(smp_send_reschedule);
246 247
247void arch_send_call_function_single_ipi(int cpu) 248void arch_send_call_function_single_ipi(int cpu)
248{ 249{
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 1a0141426cda..f19d9777d3c1 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1387,10 +1387,7 @@ void SPEFloatingPointException(struct pt_regs *regs)
1387 int code = 0; 1387 int code = 0;
1388 int err; 1388 int err;
1389 1389
1390 preempt_disable(); 1390 flush_spe_to_thread(current);
1391 if (regs->msr & MSR_SPE)
1392 giveup_spe(current);
1393 preempt_enable();
1394 1391
1395 spefscr = current->thread.spefscr; 1392 spefscr = current->thread.spefscr;
1396 fpexc_mode = current->thread.fpexc_mode; 1393 fpexc_mode = current->thread.fpexc_mode;