diff options
author | Michael Neuling <mikey@neuling.org> | 2014-01-08 05:25:21 -0500 |
---|---|---|
committer | Alexander Graf <agraf@suse.de> | 2014-01-27 10:01:00 -0500 |
commit | b005255e12a311d2c87ea70a7c7b192b2187c22c (patch) | |
tree | 739892c411d395a7e4181bc2dc58c54d851d595f | |
parent | e0b7ec058c0eb7ba8d5d937d81de2bd16db6970e (diff) |
KVM: PPC: Book3S HV: Context-switch new POWER8 SPRs
This adds fields to the struct kvm_vcpu_arch to store the new
guest-accessible SPRs on POWER8, adds code to the get/set_one_reg
functions to allow userspace to access this state, and adds code to
the guest entry and exit to context-switch these SPRs between host
and guest.
Note that DPDES (Directed Privileged Doorbell Exception State) is
shared between threads on a core; hence we store it in struct
kvmppc_vcore and have the master thread save and restore it.
Signed-off-by: Michael Neuling <mikey@neuling.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
-rw-r--r-- | arch/powerpc/include/asm/kvm_host.h | 25 | ||||
-rw-r--r-- | arch/powerpc/include/asm/reg.h | 17 | ||||
-rw-r--r-- | arch/powerpc/include/uapi/asm/kvm.h | 1 | ||||
-rw-r--r-- | arch/powerpc/kernel/asm-offsets.c | 23 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv.c | 153 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rmhandlers.S | 145 |
6 files changed, 361 insertions, 3 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index b850544dbc3f..81c92d1d7978 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h | |||
@@ -304,6 +304,7 @@ struct kvmppc_vcore { | |||
304 | ulong lpcr; | 304 | ulong lpcr; |
305 | u32 arch_compat; | 305 | u32 arch_compat; |
306 | ulong pcr; | 306 | ulong pcr; |
307 | ulong dpdes; /* doorbell state (POWER8) */ | ||
307 | }; | 308 | }; |
308 | 309 | ||
309 | #define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff) | 310 | #define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff) |
@@ -448,6 +449,7 @@ struct kvm_vcpu_arch { | |||
448 | ulong pc; | 449 | ulong pc; |
449 | ulong ctr; | 450 | ulong ctr; |
450 | ulong lr; | 451 | ulong lr; |
452 | ulong tar; | ||
451 | 453 | ||
452 | ulong xer; | 454 | ulong xer; |
453 | u32 cr; | 455 | u32 cr; |
@@ -457,13 +459,32 @@ struct kvm_vcpu_arch { | |||
457 | ulong guest_owned_ext; | 459 | ulong guest_owned_ext; |
458 | ulong purr; | 460 | ulong purr; |
459 | ulong spurr; | 461 | ulong spurr; |
462 | ulong ic; | ||
463 | ulong vtb; | ||
460 | ulong dscr; | 464 | ulong dscr; |
461 | ulong amr; | 465 | ulong amr; |
462 | ulong uamor; | 466 | ulong uamor; |
467 | ulong iamr; | ||
463 | u32 ctrl; | 468 | u32 ctrl; |
464 | ulong dabr; | 469 | ulong dabr; |
470 | ulong dawr; | ||
471 | ulong dawrx; | ||
472 | ulong ciabr; | ||
465 | ulong cfar; | 473 | ulong cfar; |
466 | ulong ppr; | 474 | ulong ppr; |
475 | ulong pspb; | ||
476 | ulong fscr; | ||
477 | ulong tfhar; | ||
478 | ulong tfiar; | ||
479 | ulong texasr; | ||
480 | ulong ebbhr; | ||
481 | ulong ebbrr; | ||
482 | ulong bescr; | ||
483 | ulong csigr; | ||
484 | ulong tacr; | ||
485 | ulong tcscr; | ||
486 | ulong acop; | ||
487 | ulong wort; | ||
467 | ulong shadow_srr1; | 488 | ulong shadow_srr1; |
468 | #endif | 489 | #endif |
469 | u32 vrsave; /* also USPRG0 */ | 490 | u32 vrsave; /* also USPRG0 */ |
@@ -498,10 +519,12 @@ struct kvm_vcpu_arch { | |||
498 | u32 ccr1; | 519 | u32 ccr1; |
499 | u32 dbsr; | 520 | u32 dbsr; |
500 | 521 | ||
501 | u64 mmcr[3]; | 522 | u64 mmcr[5]; |
502 | u32 pmc[8]; | 523 | u32 pmc[8]; |
524 | u32 spmc[2]; | ||
503 | u64 siar; | 525 | u64 siar; |
504 | u64 sdar; | 526 | u64 sdar; |
527 | u64 sier; | ||
505 | 528 | ||
506 | #ifdef CONFIG_KVM_EXIT_TIMING | 529 | #ifdef CONFIG_KVM_EXIT_TIMING |
507 | struct mutex exit_timing_lock; | 530 | struct mutex exit_timing_lock; |
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 5c45787d551e..2f41e6475648 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
@@ -223,6 +223,11 @@ | |||
223 | #define CTRL_TE 0x00c00000 /* thread enable */ | 223 | #define CTRL_TE 0x00c00000 /* thread enable */ |
224 | #define CTRL_RUNLATCH 0x1 | 224 | #define CTRL_RUNLATCH 0x1 |
225 | #define SPRN_DAWR 0xB4 | 225 | #define SPRN_DAWR 0xB4 |
226 | #define SPRN_CIABR 0xBB | ||
227 | #define CIABR_PRIV 0x3 | ||
228 | #define CIABR_PRIV_USER 1 | ||
229 | #define CIABR_PRIV_SUPER 2 | ||
230 | #define CIABR_PRIV_HYPER 3 | ||
226 | #define SPRN_DAWRX 0xBC | 231 | #define SPRN_DAWRX 0xBC |
227 | #define DAWRX_USER (1UL << 0) | 232 | #define DAWRX_USER (1UL << 0) |
228 | #define DAWRX_KERNEL (1UL << 1) | 233 | #define DAWRX_KERNEL (1UL << 1) |
@@ -260,6 +265,8 @@ | |||
260 | #define SPRN_HRMOR 0x139 /* Real mode offset register */ | 265 | #define SPRN_HRMOR 0x139 /* Real mode offset register */ |
261 | #define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */ | 266 | #define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */ |
262 | #define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */ | 267 | #define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */ |
268 | #define SPRN_IC 0x350 /* Virtual Instruction Count */ | ||
269 | #define SPRN_VTB 0x351 /* Virtual Time Base */ | ||
263 | /* HFSCR and FSCR bit numbers are the same */ | 270 | /* HFSCR and FSCR bit numbers are the same */ |
264 | #define FSCR_TAR_LG 8 /* Enable Target Address Register */ | 271 | #define FSCR_TAR_LG 8 /* Enable Target Address Register */ |
265 | #define FSCR_EBB_LG 7 /* Enable Event Based Branching */ | 272 | #define FSCR_EBB_LG 7 /* Enable Event Based Branching */ |
@@ -368,6 +375,8 @@ | |||
368 | #define DER_EBRKE 0x00000002 /* External Breakpoint Interrupt */ | 375 | #define DER_EBRKE 0x00000002 /* External Breakpoint Interrupt */ |
369 | #define DER_DPIE 0x00000001 /* Dev. Port Nonmaskable Request */ | 376 | #define DER_DPIE 0x00000001 /* Dev. Port Nonmaskable Request */ |
370 | #define SPRN_DMISS 0x3D0 /* Data TLB Miss Register */ | 377 | #define SPRN_DMISS 0x3D0 /* Data TLB Miss Register */ |
378 | #define SPRN_DHDES 0x0B1 /* Directed Hyp. Doorbell Exc. State */ | ||
379 | #define SPRN_DPDES 0x0B0 /* Directed Priv. Doorbell Exc. State */ | ||
371 | #define SPRN_EAR 0x11A /* External Address Register */ | 380 | #define SPRN_EAR 0x11A /* External Address Register */ |
372 | #define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */ | 381 | #define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */ |
373 | #define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */ | 382 | #define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */ |
@@ -427,6 +436,7 @@ | |||
427 | #define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */ | 436 | #define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */ |
428 | #define SPRN_IABR2 0x3FA /* 83xx */ | 437 | #define SPRN_IABR2 0x3FA /* 83xx */ |
429 | #define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */ | 438 | #define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */ |
439 | #define SPRN_IAMR 0x03D /* Instr. Authority Mask Reg */ | ||
430 | #define SPRN_HID4 0x3F4 /* 970 HID4 */ | 440 | #define SPRN_HID4 0x3F4 /* 970 HID4 */ |
431 | #define HID4_LPES0 (1ul << (63-0)) /* LPAR env. sel. bit 0 */ | 441 | #define HID4_LPES0 (1ul << (63-0)) /* LPAR env. sel. bit 0 */ |
432 | #define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */ | 442 | #define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */ |
@@ -541,6 +551,7 @@ | |||
541 | #define SPRN_PIR 0x3FF /* Processor Identification Register */ | 551 | #define SPRN_PIR 0x3FF /* Processor Identification Register */ |
542 | #endif | 552 | #endif |
543 | #define SPRN_TIR 0x1BE /* Thread Identification Register */ | 553 | #define SPRN_TIR 0x1BE /* Thread Identification Register */ |
554 | #define SPRN_PSPB 0x09F /* Problem State Priority Boost reg */ | ||
544 | #define SPRN_PTEHI 0x3D5 /* 981 7450 PTE HI word (S/W TLB load) */ | 555 | #define SPRN_PTEHI 0x3D5 /* 981 7450 PTE HI word (S/W TLB load) */ |
545 | #define SPRN_PTELO 0x3D6 /* 982 7450 PTE LO word (S/W TLB load) */ | 556 | #define SPRN_PTELO 0x3D6 /* 982 7450 PTE LO word (S/W TLB load) */ |
546 | #define SPRN_PURR 0x135 /* Processor Utilization of Resources Reg */ | 557 | #define SPRN_PURR 0x135 /* Processor Utilization of Resources Reg */ |
@@ -682,6 +693,7 @@ | |||
682 | #define SPRN_EBBHR 804 /* Event based branch handler register */ | 693 | #define SPRN_EBBHR 804 /* Event based branch handler register */ |
683 | #define SPRN_EBBRR 805 /* Event based branch return register */ | 694 | #define SPRN_EBBRR 805 /* Event based branch return register */ |
684 | #define SPRN_BESCR 806 /* Branch event status and control register */ | 695 | #define SPRN_BESCR 806 /* Branch event status and control register */ |
696 | #define SPRN_WORT 895 /* Workload optimization register - thread */ | ||
685 | 697 | ||
686 | #define SPRN_PMC1 787 | 698 | #define SPRN_PMC1 787 |
687 | #define SPRN_PMC2 788 | 699 | #define SPRN_PMC2 788 |
@@ -698,6 +710,11 @@ | |||
698 | #define SIER_SIHV 0x1000000 /* Sampled MSR_HV */ | 710 | #define SIER_SIHV 0x1000000 /* Sampled MSR_HV */ |
699 | #define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */ | 711 | #define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */ |
700 | #define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */ | 712 | #define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */ |
713 | #define SPRN_TACR 888 | ||
714 | #define SPRN_TCSCR 889 | ||
715 | #define SPRN_CSIGR 890 | ||
716 | #define SPRN_SPMC1 892 | ||
717 | #define SPRN_SPMC2 893 | ||
701 | 718 | ||
702 | /* When EBB is enabled, some of MMCR0/MMCR2/SIER are user accessible */ | 719 | /* When EBB is enabled, some of MMCR0/MMCR2/SIER are user accessible */ |
703 | #define MMCR0_USER_MASK (MMCR0_FC | MMCR0_PMXE | MMCR0_PMAO) | 720 | #define MMCR0_USER_MASK (MMCR0_FC | MMCR0_PMXE | MMCR0_PMAO) |
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index 6836ec79a830..a586fb9b77bd 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h | |||
@@ -545,6 +545,7 @@ struct kvm_get_htab_header { | |||
545 | #define KVM_REG_PPC_TCSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1) | 545 | #define KVM_REG_PPC_TCSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1) |
546 | #define KVM_REG_PPC_PID (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2) | 546 | #define KVM_REG_PPC_PID (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2) |
547 | #define KVM_REG_PPC_ACOP (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3) | 547 | #define KVM_REG_PPC_ACOP (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3) |
548 | #define KVM_REG_PPC_WORT (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb4) | ||
548 | 549 | ||
549 | #define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4) | 550 | #define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4) |
550 | #define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5) | 551 | #define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5) |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 332ae66883e4..043900abbbb0 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -432,6 +432,7 @@ int main(void) | |||
432 | DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); | 432 | DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); |
433 | DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); | 433 | DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); |
434 | DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); | 434 | DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); |
435 | DEFINE(VCPU_TAR, offsetof(struct kvm_vcpu, arch.tar)); | ||
435 | DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); | 436 | DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); |
436 | DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); | 437 | DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); |
437 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | 438 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
@@ -484,11 +485,17 @@ int main(void) | |||
484 | DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id)); | 485 | DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id)); |
485 | DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr)); | 486 | DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr)); |
486 | DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr)); | 487 | DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr)); |
488 | DEFINE(VCPU_IC, offsetof(struct kvm_vcpu, arch.ic)); | ||
489 | DEFINE(VCPU_VTB, offsetof(struct kvm_vcpu, arch.vtb)); | ||
487 | DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr)); | 490 | DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr)); |
488 | DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr)); | 491 | DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr)); |
489 | DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor)); | 492 | DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor)); |
493 | DEFINE(VCPU_IAMR, offsetof(struct kvm_vcpu, arch.iamr)); | ||
490 | DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl)); | 494 | DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl)); |
491 | DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr)); | 495 | DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr)); |
496 | DEFINE(VCPU_DAWR, offsetof(struct kvm_vcpu, arch.dawr)); | ||
497 | DEFINE(VCPU_DAWRX, offsetof(struct kvm_vcpu, arch.dawrx)); | ||
498 | DEFINE(VCPU_CIABR, offsetof(struct kvm_vcpu, arch.ciabr)); | ||
492 | DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); | 499 | DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); |
493 | DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec)); | 500 | DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec)); |
494 | DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires)); | 501 | DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires)); |
@@ -497,8 +504,10 @@ int main(void) | |||
497 | DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded)); | 504 | DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded)); |
498 | DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr)); | 505 | DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr)); |
499 | DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc)); | 506 | DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc)); |
507 | DEFINE(VCPU_SPMC, offsetof(struct kvm_vcpu, arch.spmc)); | ||
500 | DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar)); | 508 | DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar)); |
501 | DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar)); | 509 | DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar)); |
510 | DEFINE(VCPU_SIER, offsetof(struct kvm_vcpu, arch.sier)); | ||
502 | DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb)); | 511 | DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb)); |
503 | DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max)); | 512 | DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max)); |
504 | DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr)); | 513 | DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr)); |
@@ -508,6 +517,19 @@ int main(void) | |||
508 | DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); | 517 | DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); |
509 | DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar)); | 518 | DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar)); |
510 | DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr)); | 519 | DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr)); |
520 | DEFINE(VCPU_FSCR, offsetof(struct kvm_vcpu, arch.fscr)); | ||
521 | DEFINE(VCPU_PSPB, offsetof(struct kvm_vcpu, arch.pspb)); | ||
522 | DEFINE(VCPU_TFHAR, offsetof(struct kvm_vcpu, arch.tfhar)); | ||
523 | DEFINE(VCPU_TFIAR, offsetof(struct kvm_vcpu, arch.tfiar)); | ||
524 | DEFINE(VCPU_TEXASR, offsetof(struct kvm_vcpu, arch.texasr)); | ||
525 | DEFINE(VCPU_EBBHR, offsetof(struct kvm_vcpu, arch.ebbhr)); | ||
526 | DEFINE(VCPU_EBBRR, offsetof(struct kvm_vcpu, arch.ebbrr)); | ||
527 | DEFINE(VCPU_BESCR, offsetof(struct kvm_vcpu, arch.bescr)); | ||
528 | DEFINE(VCPU_CSIGR, offsetof(struct kvm_vcpu, arch.csigr)); | ||
529 | DEFINE(VCPU_TACR, offsetof(struct kvm_vcpu, arch.tacr)); | ||
530 | DEFINE(VCPU_TCSCR, offsetof(struct kvm_vcpu, arch.tcscr)); | ||
531 | DEFINE(VCPU_ACOP, offsetof(struct kvm_vcpu, arch.acop)); | ||
532 | DEFINE(VCPU_WORT, offsetof(struct kvm_vcpu, arch.wort)); | ||
511 | DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1)); | 533 | DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1)); |
512 | DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count)); | 534 | DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count)); |
513 | DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count)); | 535 | DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count)); |
@@ -517,6 +539,7 @@ int main(void) | |||
517 | DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset)); | 539 | DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset)); |
518 | DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr)); | 540 | DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr)); |
519 | DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr)); | 541 | DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr)); |
542 | DEFINE(VCORE_DPDES, offsetof(struct kvmppc_vcore, dpdes)); | ||
520 | DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige)); | 543 | DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige)); |
521 | DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv)); | 544 | DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv)); |
522 | DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb)); | 545 | DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb)); |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 7da53cd215db..5b08ddf91d2d 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -800,7 +800,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, | |||
800 | case KVM_REG_PPC_UAMOR: | 800 | case KVM_REG_PPC_UAMOR: |
801 | *val = get_reg_val(id, vcpu->arch.uamor); | 801 | *val = get_reg_val(id, vcpu->arch.uamor); |
802 | break; | 802 | break; |
803 | case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA: | 803 | case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS: |
804 | i = id - KVM_REG_PPC_MMCR0; | 804 | i = id - KVM_REG_PPC_MMCR0; |
805 | *val = get_reg_val(id, vcpu->arch.mmcr[i]); | 805 | *val = get_reg_val(id, vcpu->arch.mmcr[i]); |
806 | break; | 806 | break; |
@@ -808,12 +808,85 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, | |||
808 | i = id - KVM_REG_PPC_PMC1; | 808 | i = id - KVM_REG_PPC_PMC1; |
809 | *val = get_reg_val(id, vcpu->arch.pmc[i]); | 809 | *val = get_reg_val(id, vcpu->arch.pmc[i]); |
810 | break; | 810 | break; |
811 | case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2: | ||
812 | i = id - KVM_REG_PPC_SPMC1; | ||
813 | *val = get_reg_val(id, vcpu->arch.spmc[i]); | ||
814 | break; | ||
811 | case KVM_REG_PPC_SIAR: | 815 | case KVM_REG_PPC_SIAR: |
812 | *val = get_reg_val(id, vcpu->arch.siar); | 816 | *val = get_reg_val(id, vcpu->arch.siar); |
813 | break; | 817 | break; |
814 | case KVM_REG_PPC_SDAR: | 818 | case KVM_REG_PPC_SDAR: |
815 | *val = get_reg_val(id, vcpu->arch.sdar); | 819 | *val = get_reg_val(id, vcpu->arch.sdar); |
816 | break; | 820 | break; |
821 | case KVM_REG_PPC_SIER: | ||
822 | *val = get_reg_val(id, vcpu->arch.sier); | ||
823 | break; | ||
824 | case KVM_REG_PPC_IAMR: | ||
825 | *val = get_reg_val(id, vcpu->arch.iamr); | ||
826 | break; | ||
827 | case KVM_REG_PPC_TFHAR: | ||
828 | *val = get_reg_val(id, vcpu->arch.tfhar); | ||
829 | break; | ||
830 | case KVM_REG_PPC_TFIAR: | ||
831 | *val = get_reg_val(id, vcpu->arch.tfiar); | ||
832 | break; | ||
833 | case KVM_REG_PPC_TEXASR: | ||
834 | *val = get_reg_val(id, vcpu->arch.texasr); | ||
835 | break; | ||
836 | case KVM_REG_PPC_FSCR: | ||
837 | *val = get_reg_val(id, vcpu->arch.fscr); | ||
838 | break; | ||
839 | case KVM_REG_PPC_PSPB: | ||
840 | *val = get_reg_val(id, vcpu->arch.pspb); | ||
841 | break; | ||
842 | case KVM_REG_PPC_EBBHR: | ||
843 | *val = get_reg_val(id, vcpu->arch.ebbhr); | ||
844 | break; | ||
845 | case KVM_REG_PPC_EBBRR: | ||
846 | *val = get_reg_val(id, vcpu->arch.ebbrr); | ||
847 | break; | ||
848 | case KVM_REG_PPC_BESCR: | ||
849 | *val = get_reg_val(id, vcpu->arch.bescr); | ||
850 | break; | ||
851 | case KVM_REG_PPC_TAR: | ||
852 | *val = get_reg_val(id, vcpu->arch.tar); | ||
853 | break; | ||
854 | case KVM_REG_PPC_DPDES: | ||
855 | *val = get_reg_val(id, vcpu->arch.vcore->dpdes); | ||
856 | break; | ||
857 | case KVM_REG_PPC_DAWR: | ||
858 | *val = get_reg_val(id, vcpu->arch.dawr); | ||
859 | break; | ||
860 | case KVM_REG_PPC_DAWRX: | ||
861 | *val = get_reg_val(id, vcpu->arch.dawrx); | ||
862 | break; | ||
863 | case KVM_REG_PPC_CIABR: | ||
864 | *val = get_reg_val(id, vcpu->arch.ciabr); | ||
865 | break; | ||
866 | case KVM_REG_PPC_IC: | ||
867 | *val = get_reg_val(id, vcpu->arch.ic); | ||
868 | break; | ||
869 | case KVM_REG_PPC_VTB: | ||
870 | *val = get_reg_val(id, vcpu->arch.vtb); | ||
871 | break; | ||
872 | case KVM_REG_PPC_CSIGR: | ||
873 | *val = get_reg_val(id, vcpu->arch.csigr); | ||
874 | break; | ||
875 | case KVM_REG_PPC_TACR: | ||
876 | *val = get_reg_val(id, vcpu->arch.tacr); | ||
877 | break; | ||
878 | case KVM_REG_PPC_TCSCR: | ||
879 | *val = get_reg_val(id, vcpu->arch.tcscr); | ||
880 | break; | ||
881 | case KVM_REG_PPC_PID: | ||
882 | *val = get_reg_val(id, vcpu->arch.pid); | ||
883 | break; | ||
884 | case KVM_REG_PPC_ACOP: | ||
885 | *val = get_reg_val(id, vcpu->arch.acop); | ||
886 | break; | ||
887 | case KVM_REG_PPC_WORT: | ||
888 | *val = get_reg_val(id, vcpu->arch.wort); | ||
889 | break; | ||
817 | case KVM_REG_PPC_VPA_ADDR: | 890 | case KVM_REG_PPC_VPA_ADDR: |
818 | spin_lock(&vcpu->arch.vpa_update_lock); | 891 | spin_lock(&vcpu->arch.vpa_update_lock); |
819 | *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); | 892 | *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); |
@@ -882,7 +955,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, | |||
882 | case KVM_REG_PPC_UAMOR: | 955 | case KVM_REG_PPC_UAMOR: |
883 | vcpu->arch.uamor = set_reg_val(id, *val); | 956 | vcpu->arch.uamor = set_reg_val(id, *val); |
884 | break; | 957 | break; |
885 | case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA: | 958 | case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS: |
886 | i = id - KVM_REG_PPC_MMCR0; | 959 | i = id - KVM_REG_PPC_MMCR0; |
887 | vcpu->arch.mmcr[i] = set_reg_val(id, *val); | 960 | vcpu->arch.mmcr[i] = set_reg_val(id, *val); |
888 | break; | 961 | break; |
@@ -890,12 +963,88 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, | |||
890 | i = id - KVM_REG_PPC_PMC1; | 963 | i = id - KVM_REG_PPC_PMC1; |
891 | vcpu->arch.pmc[i] = set_reg_val(id, *val); | 964 | vcpu->arch.pmc[i] = set_reg_val(id, *val); |
892 | break; | 965 | break; |
966 | case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2: | ||
967 | i = id - KVM_REG_PPC_SPMC1; | ||
968 | vcpu->arch.spmc[i] = set_reg_val(id, *val); | ||
969 | break; | ||
893 | case KVM_REG_PPC_SIAR: | 970 | case KVM_REG_PPC_SIAR: |
894 | vcpu->arch.siar = set_reg_val(id, *val); | 971 | vcpu->arch.siar = set_reg_val(id, *val); |
895 | break; | 972 | break; |
896 | case KVM_REG_PPC_SDAR: | 973 | case KVM_REG_PPC_SDAR: |
897 | vcpu->arch.sdar = set_reg_val(id, *val); | 974 | vcpu->arch.sdar = set_reg_val(id, *val); |
898 | break; | 975 | break; |
976 | case KVM_REG_PPC_SIER: | ||
977 | vcpu->arch.sier = set_reg_val(id, *val); | ||
978 | break; | ||
979 | case KVM_REG_PPC_IAMR: | ||
980 | vcpu->arch.iamr = set_reg_val(id, *val); | ||
981 | break; | ||
982 | case KVM_REG_PPC_TFHAR: | ||
983 | vcpu->arch.tfhar = set_reg_val(id, *val); | ||
984 | break; | ||
985 | case KVM_REG_PPC_TFIAR: | ||
986 | vcpu->arch.tfiar = set_reg_val(id, *val); | ||
987 | break; | ||
988 | case KVM_REG_PPC_TEXASR: | ||
989 | vcpu->arch.texasr = set_reg_val(id, *val); | ||
990 | break; | ||
991 | case KVM_REG_PPC_FSCR: | ||
992 | vcpu->arch.fscr = set_reg_val(id, *val); | ||
993 | break; | ||
994 | case KVM_REG_PPC_PSPB: | ||
995 | vcpu->arch.pspb = set_reg_val(id, *val); | ||
996 | break; | ||
997 | case KVM_REG_PPC_EBBHR: | ||
998 | vcpu->arch.ebbhr = set_reg_val(id, *val); | ||
999 | break; | ||
1000 | case KVM_REG_PPC_EBBRR: | ||
1001 | vcpu->arch.ebbrr = set_reg_val(id, *val); | ||
1002 | break; | ||
1003 | case KVM_REG_PPC_BESCR: | ||
1004 | vcpu->arch.bescr = set_reg_val(id, *val); | ||
1005 | break; | ||
1006 | case KVM_REG_PPC_TAR: | ||
1007 | vcpu->arch.tar = set_reg_val(id, *val); | ||
1008 | break; | ||
1009 | case KVM_REG_PPC_DPDES: | ||
1010 | vcpu->arch.vcore->dpdes = set_reg_val(id, *val); | ||
1011 | break; | ||
1012 | case KVM_REG_PPC_DAWR: | ||
1013 | vcpu->arch.dawr = set_reg_val(id, *val); | ||
1014 | break; | ||
1015 | case KVM_REG_PPC_DAWRX: | ||
1016 | vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP; | ||
1017 | break; | ||
1018 | case KVM_REG_PPC_CIABR: | ||
1019 | vcpu->arch.ciabr = set_reg_val(id, *val); | ||
1020 | /* Don't allow setting breakpoints in hypervisor code */ | ||
1021 | if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) | ||
1022 | vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */ | ||
1023 | break; | ||
1024 | case KVM_REG_PPC_IC: | ||
1025 | vcpu->arch.ic = set_reg_val(id, *val); | ||
1026 | break; | ||
1027 | case KVM_REG_PPC_VTB: | ||
1028 | vcpu->arch.vtb = set_reg_val(id, *val); | ||
1029 | break; | ||
1030 | case KVM_REG_PPC_CSIGR: | ||
1031 | vcpu->arch.csigr = set_reg_val(id, *val); | ||
1032 | break; | ||
1033 | case KVM_REG_PPC_TACR: | ||
1034 | vcpu->arch.tacr = set_reg_val(id, *val); | ||
1035 | break; | ||
1036 | case KVM_REG_PPC_TCSCR: | ||
1037 | vcpu->arch.tcscr = set_reg_val(id, *val); | ||
1038 | break; | ||
1039 | case KVM_REG_PPC_PID: | ||
1040 | vcpu->arch.pid = set_reg_val(id, *val); | ||
1041 | break; | ||
1042 | case KVM_REG_PPC_ACOP: | ||
1043 | vcpu->arch.acop = set_reg_val(id, *val); | ||
1044 | break; | ||
1045 | case KVM_REG_PPC_WORT: | ||
1046 | vcpu->arch.wort = set_reg_val(id, *val); | ||
1047 | break; | ||
899 | case KVM_REG_PPC_VPA_ADDR: | 1048 | case KVM_REG_PPC_VPA_ADDR: |
900 | addr = set_reg_val(id, *val); | 1049 | addr = set_reg_val(id, *val); |
901 | r = -EINVAL; | 1050 | r = -EINVAL; |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 8bbe91bdb6da..691dd1ef555b 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -460,6 +460,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |||
460 | beq 38f | 460 | beq 38f |
461 | mtspr SPRN_PCR, r7 | 461 | mtspr SPRN_PCR, r7 |
462 | 38: | 462 | 38: |
463 | |||
464 | BEGIN_FTR_SECTION | ||
465 | /* DPDES is shared between threads */ | ||
466 | ld r8, VCORE_DPDES(r5) | ||
467 | mtspr SPRN_DPDES, r8 | ||
468 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | ||
469 | |||
463 | li r0,1 | 470 | li r0,1 |
464 | stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ | 471 | stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ |
465 | b 10f | 472 | b 10f |
@@ -659,6 +666,18 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |||
659 | mtspr SPRN_MMCRA, r6 | 666 | mtspr SPRN_MMCRA, r6 |
660 | mtspr SPRN_SIAR, r7 | 667 | mtspr SPRN_SIAR, r7 |
661 | mtspr SPRN_SDAR, r8 | 668 | mtspr SPRN_SDAR, r8 |
669 | BEGIN_FTR_SECTION | ||
670 | ld r5, VCPU_MMCR + 24(r4) | ||
671 | ld r6, VCPU_SIER(r4) | ||
672 | lwz r7, VCPU_PMC + 24(r4) | ||
673 | lwz r8, VCPU_PMC + 28(r4) | ||
674 | ld r9, VCPU_MMCR + 32(r4) | ||
675 | mtspr SPRN_MMCR2, r5 | ||
676 | mtspr SPRN_SIER, r6 | ||
677 | mtspr SPRN_SPMC1, r7 | ||
678 | mtspr SPRN_SPMC2, r8 | ||
679 | mtspr SPRN_MMCRS, r9 | ||
680 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | ||
662 | mtspr SPRN_MMCR0, r3 | 681 | mtspr SPRN_MMCR0, r3 |
663 | isync | 682 | isync |
664 | 683 | ||
@@ -690,6 +709,61 @@ BEGIN_FTR_SECTION | |||
690 | mtspr SPRN_DSCR, r5 | 709 | mtspr SPRN_DSCR, r5 |
691 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | 710 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) |
692 | 711 | ||
712 | BEGIN_FTR_SECTION | ||
713 | /* Skip next section on POWER7 or PPC970 */ | ||
714 | b 8f | ||
715 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) | ||
716 | /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */ | ||
717 | mfmsr r8 | ||
718 | li r0, 1 | ||
719 | rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG | ||
720 | mtmsrd r8 | ||
721 | |||
722 | /* Load up POWER8-specific registers */ | ||
723 | ld r5, VCPU_IAMR(r4) | ||
724 | lwz r6, VCPU_PSPB(r4) | ||
725 | ld r7, VCPU_FSCR(r4) | ||
726 | mtspr SPRN_IAMR, r5 | ||
727 | mtspr SPRN_PSPB, r6 | ||
728 | mtspr SPRN_FSCR, r7 | ||
729 | ld r5, VCPU_DAWR(r4) | ||
730 | ld r6, VCPU_DAWRX(r4) | ||
731 | ld r7, VCPU_CIABR(r4) | ||
732 | ld r8, VCPU_TAR(r4) | ||
733 | mtspr SPRN_DAWR, r5 | ||
734 | mtspr SPRN_DAWRX, r6 | ||
735 | mtspr SPRN_CIABR, r7 | ||
736 | mtspr SPRN_TAR, r8 | ||
737 | ld r5, VCPU_IC(r4) | ||
738 | ld r6, VCPU_VTB(r4) | ||
739 | mtspr SPRN_IC, r5 | ||
740 | mtspr SPRN_VTB, r6 | ||
741 | ld r5, VCPU_TFHAR(r4) | ||
742 | ld r6, VCPU_TFIAR(r4) | ||
743 | ld r7, VCPU_TEXASR(r4) | ||
744 | ld r8, VCPU_EBBHR(r4) | ||
745 | mtspr SPRN_TFHAR, r5 | ||
746 | mtspr SPRN_TFIAR, r6 | ||
747 | mtspr SPRN_TEXASR, r7 | ||
748 | mtspr SPRN_EBBHR, r8 | ||
749 | ld r5, VCPU_EBBRR(r4) | ||
750 | ld r6, VCPU_BESCR(r4) | ||
751 | ld r7, VCPU_CSIGR(r4) | ||
752 | ld r8, VCPU_TACR(r4) | ||
753 | mtspr SPRN_EBBRR, r5 | ||
754 | mtspr SPRN_BESCR, r6 | ||
755 | mtspr SPRN_CSIGR, r7 | ||
756 | mtspr SPRN_TACR, r8 | ||
757 | ld r5, VCPU_TCSCR(r4) | ||
758 | ld r6, VCPU_ACOP(r4) | ||
759 | lwz r7, VCPU_GUEST_PID(r4) | ||
760 | ld r8, VCPU_WORT(r4) | ||
761 | mtspr SPRN_TCSCR, r5 | ||
762 | mtspr SPRN_ACOP, r6 | ||
763 | mtspr SPRN_PID, r7 | ||
764 | mtspr SPRN_WORT, r8 | ||
765 | 8: | ||
766 | |||
693 | /* | 767 | /* |
694 | * Set the decrementer to the guest decrementer. | 768 | * Set the decrementer to the guest decrementer. |
695 | */ | 769 | */ |
@@ -1081,6 +1155,54 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201) | |||
1081 | add r5,r5,r6 | 1155 | add r5,r5,r6 |
1082 | std r5,VCPU_DEC_EXPIRES(r9) | 1156 | std r5,VCPU_DEC_EXPIRES(r9) |
1083 | 1157 | ||
1158 | BEGIN_FTR_SECTION | ||
1159 | b 8f | ||
1160 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) | ||
1161 | /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */ | ||
1162 | mfmsr r8 | ||
1163 | li r0, 1 | ||
1164 | rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG | ||
1165 | mtmsrd r8 | ||
1166 | |||
1167 | /* Save POWER8-specific registers */ | ||
1168 | mfspr r5, SPRN_IAMR | ||
1169 | mfspr r6, SPRN_PSPB | ||
1170 | mfspr r7, SPRN_FSCR | ||
1171 | std r5, VCPU_IAMR(r9) | ||
1172 | stw r6, VCPU_PSPB(r9) | ||
1173 | std r7, VCPU_FSCR(r9) | ||
1174 | mfspr r5, SPRN_IC | ||
1175 | mfspr r6, SPRN_VTB | ||
1176 | mfspr r7, SPRN_TAR | ||
1177 | std r5, VCPU_IC(r9) | ||
1178 | std r6, VCPU_VTB(r9) | ||
1179 | std r7, VCPU_TAR(r9) | ||
1180 | mfspr r5, SPRN_TFHAR | ||
1181 | mfspr r6, SPRN_TFIAR | ||
1182 | mfspr r7, SPRN_TEXASR | ||
1183 | mfspr r8, SPRN_EBBHR | ||
1184 | std r5, VCPU_TFHAR(r9) | ||
1185 | std r6, VCPU_TFIAR(r9) | ||
1186 | std r7, VCPU_TEXASR(r9) | ||
1187 | std r8, VCPU_EBBHR(r9) | ||
1188 | mfspr r5, SPRN_EBBRR | ||
1189 | mfspr r6, SPRN_BESCR | ||
1190 | mfspr r7, SPRN_CSIGR | ||
1191 | mfspr r8, SPRN_TACR | ||
1192 | std r5, VCPU_EBBRR(r9) | ||
1193 | std r6, VCPU_BESCR(r9) | ||
1194 | std r7, VCPU_CSIGR(r9) | ||
1195 | std r8, VCPU_TACR(r9) | ||
1196 | mfspr r5, SPRN_TCSCR | ||
1197 | mfspr r6, SPRN_ACOP | ||
1198 | mfspr r7, SPRN_PID | ||
1199 | mfspr r8, SPRN_WORT | ||
1200 | std r5, VCPU_TCSCR(r9) | ||
1201 | std r6, VCPU_ACOP(r9) | ||
1202 | stw r7, VCPU_GUEST_PID(r9) | ||
1203 | std r8, VCPU_WORT(r9) | ||
1204 | 8: | ||
1205 | |||
1084 | /* Save and reset AMR and UAMOR before turning on the MMU */ | 1206 | /* Save and reset AMR and UAMOR before turning on the MMU */ |
1085 | BEGIN_FTR_SECTION | 1207 | BEGIN_FTR_SECTION |
1086 | mfspr r5,SPRN_AMR | 1208 | mfspr r5,SPRN_AMR |
@@ -1190,6 +1312,20 @@ BEGIN_FTR_SECTION | |||
1190 | stw r10, VCPU_PMC + 24(r9) | 1312 | stw r10, VCPU_PMC + 24(r9) |
1191 | stw r11, VCPU_PMC + 28(r9) | 1313 | stw r11, VCPU_PMC + 28(r9) |
1192 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | 1314 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) |
1315 | BEGIN_FTR_SECTION | ||
1316 | mfspr r4, SPRN_MMCR2 | ||
1317 | mfspr r5, SPRN_SIER | ||
1318 | mfspr r6, SPRN_SPMC1 | ||
1319 | mfspr r7, SPRN_SPMC2 | ||
1320 | mfspr r8, SPRN_MMCRS | ||
1321 | std r4, VCPU_MMCR + 24(r9) | ||
1322 | std r5, VCPU_SIER(r9) | ||
1323 | stw r6, VCPU_PMC + 24(r9) | ||
1324 | stw r7, VCPU_PMC + 28(r9) | ||
1325 | std r8, VCPU_MMCR + 32(r9) | ||
1326 | lis r4, 0x8000 | ||
1327 | mtspr SPRN_MMCRS, r4 | ||
1328 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | ||
1193 | 22: | 1329 | 22: |
1194 | /* Clear out SLB */ | 1330 | /* Clear out SLB */ |
1195 | li r5,0 | 1331 | li r5,0 |
@@ -1290,6 +1426,15 @@ secondary_too_late: | |||
1290 | mtspr SPRN_LPID,r7 | 1426 | mtspr SPRN_LPID,r7 |
1291 | isync | 1427 | isync |
1292 | 1428 | ||
1429 | BEGIN_FTR_SECTION | ||
1430 | /* DPDES is shared between threads */ | ||
1431 | mfspr r7, SPRN_DPDES | ||
1432 | std r7, VCORE_DPDES(r5) | ||
1433 | /* clear DPDES so we don't get guest doorbells in the host */ | ||
1434 | li r8, 0 | ||
1435 | mtspr SPRN_DPDES, r8 | ||
1436 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | ||
1437 | |||
1293 | /* Subtract timebase offset from timebase */ | 1438 | /* Subtract timebase offset from timebase */ |
1294 | ld r8,VCORE_TB_OFFSET(r5) | 1439 | ld r8,VCORE_TB_OFFSET(r5) |
1295 | cmpdi r8,0 | 1440 | cmpdi r8,0 |