diff options
40 files changed, 2313 insertions, 1233 deletions
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index aa9e785c59c2..7841b8a60657 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h | |||
| @@ -134,7 +134,13 @@ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip); | |||
| 134 | void pnv_power9_force_smt4_catch(void); | 134 | void pnv_power9_force_smt4_catch(void); |
| 135 | void pnv_power9_force_smt4_release(void); | 135 | void pnv_power9_force_smt4_release(void); |
| 136 | 136 | ||
| 137 | /* Transaction memory related */ | ||
| 137 | void tm_enable(void); | 138 | void tm_enable(void); |
| 138 | void tm_disable(void); | 139 | void tm_disable(void); |
| 139 | void tm_abort(uint8_t cause); | 140 | void tm_abort(uint8_t cause); |
| 141 | |||
| 142 | struct kvm_vcpu; | ||
| 143 | void _kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr); | ||
| 144 | void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr); | ||
| 145 | |||
| 140 | #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */ | 146 | #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */ |
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index e7377b73cfec..1f345a0b6ba2 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h | |||
| @@ -104,6 +104,7 @@ struct kvmppc_vcore { | |||
| 104 | ulong vtb; /* virtual timebase */ | 104 | ulong vtb; /* virtual timebase */ |
| 105 | ulong conferring_threads; | 105 | ulong conferring_threads; |
| 106 | unsigned int halt_poll_ns; | 106 | unsigned int halt_poll_ns; |
| 107 | atomic_t online_count; | ||
| 107 | }; | 108 | }; |
| 108 | 109 | ||
| 109 | struct kvmppc_vcpu_book3s { | 110 | struct kvmppc_vcpu_book3s { |
| @@ -209,6 +210,7 @@ extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) | |||
| 209 | extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, | 210 | extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, |
| 210 | unsigned int vec); | 211 | unsigned int vec); |
| 211 | extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags); | 212 | extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags); |
| 213 | extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac); | ||
| 212 | extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, | 214 | extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, |
| 213 | bool upper, u32 val); | 215 | bool upper, u32 val); |
| 214 | extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); | 216 | extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); |
| @@ -256,6 +258,21 @@ extern int kvmppc_hcall_impl_pr(unsigned long cmd); | |||
| 256 | extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd); | 258 | extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd); |
| 257 | extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu); | 259 | extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu); |
| 258 | extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu); | 260 | extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu); |
| 261 | |||
| 262 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 263 | void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu); | ||
| 264 | void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu); | ||
| 265 | void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu); | ||
| 266 | void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu); | ||
| 267 | #else | ||
| 268 | static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {} | ||
| 269 | static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {} | ||
| 270 | static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {} | ||
| 271 | static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {} | ||
| 272 | #endif | ||
| 273 | |||
| 274 | void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); | ||
| 275 | |||
| 259 | extern int kvm_irq_bypass; | 276 | extern int kvm_irq_bypass; |
| 260 | 277 | ||
| 261 | static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) | 278 | static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) |
| @@ -274,12 +291,12 @@ static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) | |||
| 274 | 291 | ||
| 275 | static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) | 292 | static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) |
| 276 | { | 293 | { |
| 277 | vcpu->arch.gpr[num] = val; | 294 | vcpu->arch.regs.gpr[num] = val; |
| 278 | } | 295 | } |
| 279 | 296 | ||
| 280 | static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) | 297 | static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) |
| 281 | { | 298 | { |
| 282 | return vcpu->arch.gpr[num]; | 299 | return vcpu->arch.regs.gpr[num]; |
| 283 | } | 300 | } |
| 284 | 301 | ||
| 285 | static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) | 302 | static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) |
| @@ -294,42 +311,42 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) | |||
| 294 | 311 | ||
| 295 | static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val) | 312 | static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val) |
| 296 | { | 313 | { |
| 297 | vcpu->arch.xer = val; | 314 | vcpu->arch.regs.xer = val; |
| 298 | } | 315 | } |
| 299 | 316 | ||
| 300 | static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu) | 317 | static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu) |
| 301 | { | 318 | { |
| 302 | return vcpu->arch.xer; | 319 | return vcpu->arch.regs.xer; |
| 303 | } | 320 | } |
| 304 | 321 | ||
| 305 | static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) | 322 | static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) |
| 306 | { | 323 | { |
| 307 | vcpu->arch.ctr = val; | 324 | vcpu->arch.regs.ctr = val; |
| 308 | } | 325 | } |
| 309 | 326 | ||
| 310 | static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) | 327 | static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) |
| 311 | { | 328 | { |
| 312 | return vcpu->arch.ctr; | 329 | return vcpu->arch.regs.ctr; |
| 313 | } | 330 | } |
| 314 | 331 | ||
| 315 | static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) | 332 | static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) |
| 316 | { | 333 | { |
| 317 | vcpu->arch.lr = val; | 334 | vcpu->arch.regs.link = val; |
| 318 | } | 335 | } |
| 319 | 336 | ||
| 320 | static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) | 337 | static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) |
| 321 | { | 338 | { |
| 322 | return vcpu->arch.lr; | 339 | return vcpu->arch.regs.link; |
| 323 | } | 340 | } |
| 324 | 341 | ||
| 325 | static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) | 342 | static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) |
| 326 | { | 343 | { |
| 327 | vcpu->arch.pc = val; | 344 | vcpu->arch.regs.nip = val; |
| 328 | } | 345 | } |
| 329 | 346 | ||
| 330 | static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) | 347 | static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) |
| 331 | { | 348 | { |
| 332 | return vcpu->arch.pc; | 349 | return vcpu->arch.regs.nip; |
| 333 | } | 350 | } |
| 334 | 351 | ||
| 335 | static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu); | 352 | static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu); |
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index c424e44f4c00..dc435a5af7d6 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h | |||
| @@ -483,15 +483,15 @@ static inline u64 sanitize_msr(u64 msr) | |||
| 483 | static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu) | 483 | static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu) |
| 484 | { | 484 | { |
| 485 | vcpu->arch.cr = vcpu->arch.cr_tm; | 485 | vcpu->arch.cr = vcpu->arch.cr_tm; |
| 486 | vcpu->arch.xer = vcpu->arch.xer_tm; | 486 | vcpu->arch.regs.xer = vcpu->arch.xer_tm; |
| 487 | vcpu->arch.lr = vcpu->arch.lr_tm; | 487 | vcpu->arch.regs.link = vcpu->arch.lr_tm; |
| 488 | vcpu->arch.ctr = vcpu->arch.ctr_tm; | 488 | vcpu->arch.regs.ctr = vcpu->arch.ctr_tm; |
| 489 | vcpu->arch.amr = vcpu->arch.amr_tm; | 489 | vcpu->arch.amr = vcpu->arch.amr_tm; |
| 490 | vcpu->arch.ppr = vcpu->arch.ppr_tm; | 490 | vcpu->arch.ppr = vcpu->arch.ppr_tm; |
| 491 | vcpu->arch.dscr = vcpu->arch.dscr_tm; | 491 | vcpu->arch.dscr = vcpu->arch.dscr_tm; |
| 492 | vcpu->arch.tar = vcpu->arch.tar_tm; | 492 | vcpu->arch.tar = vcpu->arch.tar_tm; |
| 493 | memcpy(vcpu->arch.gpr, vcpu->arch.gpr_tm, | 493 | memcpy(vcpu->arch.regs.gpr, vcpu->arch.gpr_tm, |
| 494 | sizeof(vcpu->arch.gpr)); | 494 | sizeof(vcpu->arch.regs.gpr)); |
| 495 | vcpu->arch.fp = vcpu->arch.fp_tm; | 495 | vcpu->arch.fp = vcpu->arch.fp_tm; |
| 496 | vcpu->arch.vr = vcpu->arch.vr_tm; | 496 | vcpu->arch.vr = vcpu->arch.vr_tm; |
| 497 | vcpu->arch.vrsave = vcpu->arch.vrsave_tm; | 497 | vcpu->arch.vrsave = vcpu->arch.vrsave_tm; |
| @@ -500,15 +500,15 @@ static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu) | |||
| 500 | static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu) | 500 | static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu) |
| 501 | { | 501 | { |
| 502 | vcpu->arch.cr_tm = vcpu->arch.cr; | 502 | vcpu->arch.cr_tm = vcpu->arch.cr; |
| 503 | vcpu->arch.xer_tm = vcpu->arch.xer; | 503 | vcpu->arch.xer_tm = vcpu->arch.regs.xer; |
| 504 | vcpu->arch.lr_tm = vcpu->arch.lr; | 504 | vcpu->arch.lr_tm = vcpu->arch.regs.link; |
| 505 | vcpu->arch.ctr_tm = vcpu->arch.ctr; | 505 | vcpu->arch.ctr_tm = vcpu->arch.regs.ctr; |
| 506 | vcpu->arch.amr_tm = vcpu->arch.amr; | 506 | vcpu->arch.amr_tm = vcpu->arch.amr; |
| 507 | vcpu->arch.ppr_tm = vcpu->arch.ppr; | 507 | vcpu->arch.ppr_tm = vcpu->arch.ppr; |
| 508 | vcpu->arch.dscr_tm = vcpu->arch.dscr; | 508 | vcpu->arch.dscr_tm = vcpu->arch.dscr; |
| 509 | vcpu->arch.tar_tm = vcpu->arch.tar; | 509 | vcpu->arch.tar_tm = vcpu->arch.tar; |
| 510 | memcpy(vcpu->arch.gpr_tm, vcpu->arch.gpr, | 510 | memcpy(vcpu->arch.gpr_tm, vcpu->arch.regs.gpr, |
| 511 | sizeof(vcpu->arch.gpr)); | 511 | sizeof(vcpu->arch.regs.gpr)); |
| 512 | vcpu->arch.fp_tm = vcpu->arch.fp; | 512 | vcpu->arch.fp_tm = vcpu->arch.fp; |
| 513 | vcpu->arch.vr_tm = vcpu->arch.vr; | 513 | vcpu->arch.vr_tm = vcpu->arch.vr; |
| 514 | vcpu->arch.vrsave_tm = vcpu->arch.vrsave; | 514 | vcpu->arch.vrsave_tm = vcpu->arch.vrsave; |
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h index bc6e29e4dfd4..d513e3ed1c65 100644 --- a/arch/powerpc/include/asm/kvm_booke.h +++ b/arch/powerpc/include/asm/kvm_booke.h | |||
| @@ -36,12 +36,12 @@ | |||
| 36 | 36 | ||
| 37 | static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) | 37 | static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) |
| 38 | { | 38 | { |
| 39 | vcpu->arch.gpr[num] = val; | 39 | vcpu->arch.regs.gpr[num] = val; |
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) | 42 | static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) |
| 43 | { | 43 | { |
| 44 | return vcpu->arch.gpr[num]; | 44 | return vcpu->arch.regs.gpr[num]; |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) | 47 | static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) |
| @@ -56,12 +56,12 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) | |||
| 56 | 56 | ||
| 57 | static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val) | 57 | static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val) |
| 58 | { | 58 | { |
| 59 | vcpu->arch.xer = val; | 59 | vcpu->arch.regs.xer = val; |
| 60 | } | 60 | } |
| 61 | 61 | ||
| 62 | static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu) | 62 | static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu) |
| 63 | { | 63 | { |
| 64 | return vcpu->arch.xer; | 64 | return vcpu->arch.regs.xer; |
| 65 | } | 65 | } |
| 66 | 66 | ||
| 67 | static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) | 67 | static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) |
| @@ -72,32 +72,32 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) | |||
| 72 | 72 | ||
| 73 | static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) | 73 | static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) |
| 74 | { | 74 | { |
| 75 | vcpu->arch.ctr = val; | 75 | vcpu->arch.regs.ctr = val; |
| 76 | } | 76 | } |
| 77 | 77 | ||
| 78 | static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) | 78 | static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) |
| 79 | { | 79 | { |
| 80 | return vcpu->arch.ctr; | 80 | return vcpu->arch.regs.ctr; |
| 81 | } | 81 | } |
| 82 | 82 | ||
| 83 | static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) | 83 | static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) |
| 84 | { | 84 | { |
| 85 | vcpu->arch.lr = val; | 85 | vcpu->arch.regs.link = val; |
| 86 | } | 86 | } |
| 87 | 87 | ||
| 88 | static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) | 88 | static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) |
| 89 | { | 89 | { |
| 90 | return vcpu->arch.lr; | 90 | return vcpu->arch.regs.link; |
| 91 | } | 91 | } |
| 92 | 92 | ||
| 93 | static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) | 93 | static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) |
| 94 | { | 94 | { |
| 95 | vcpu->arch.pc = val; | 95 | vcpu->arch.regs.nip = val; |
| 96 | } | 96 | } |
| 97 | 97 | ||
| 98 | static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) | 98 | static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) |
| 99 | { | 99 | { |
| 100 | return vcpu->arch.pc; | 100 | return vcpu->arch.regs.nip; |
| 101 | } | 101 | } |
| 102 | 102 | ||
| 103 | static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) | 103 | static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) |
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 17498e9a26e4..fa4efa7e88f7 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h | |||
| @@ -269,7 +269,6 @@ struct kvm_arch { | |||
| 269 | unsigned long host_lpcr; | 269 | unsigned long host_lpcr; |
| 270 | unsigned long sdr1; | 270 | unsigned long sdr1; |
| 271 | unsigned long host_sdr1; | 271 | unsigned long host_sdr1; |
| 272 | int tlbie_lock; | ||
| 273 | unsigned long lpcr; | 272 | unsigned long lpcr; |
| 274 | unsigned long vrma_slb_v; | 273 | unsigned long vrma_slb_v; |
| 275 | int mmu_ready; | 274 | int mmu_ready; |
| @@ -454,6 +453,12 @@ struct mmio_hpte_cache { | |||
| 454 | #define KVMPPC_VSX_COPY_WORD 1 | 453 | #define KVMPPC_VSX_COPY_WORD 1 |
| 455 | #define KVMPPC_VSX_COPY_DWORD 2 | 454 | #define KVMPPC_VSX_COPY_DWORD 2 |
| 456 | #define KVMPPC_VSX_COPY_DWORD_LOAD_DUMP 3 | 455 | #define KVMPPC_VSX_COPY_DWORD_LOAD_DUMP 3 |
| 456 | #define KVMPPC_VSX_COPY_WORD_LOAD_DUMP 4 | ||
| 457 | |||
| 458 | #define KVMPPC_VMX_COPY_BYTE 8 | ||
| 459 | #define KVMPPC_VMX_COPY_HWORD 9 | ||
| 460 | #define KVMPPC_VMX_COPY_WORD 10 | ||
| 461 | #define KVMPPC_VMX_COPY_DWORD 11 | ||
| 457 | 462 | ||
| 458 | struct openpic; | 463 | struct openpic; |
| 459 | 464 | ||
| @@ -486,7 +491,7 @@ struct kvm_vcpu_arch { | |||
| 486 | struct kvmppc_book3s_shadow_vcpu *shadow_vcpu; | 491 | struct kvmppc_book3s_shadow_vcpu *shadow_vcpu; |
| 487 | #endif | 492 | #endif |
| 488 | 493 | ||
| 489 | ulong gpr[32]; | 494 | struct pt_regs regs; |
| 490 | 495 | ||
| 491 | struct thread_fp_state fp; | 496 | struct thread_fp_state fp; |
| 492 | 497 | ||
| @@ -521,14 +526,10 @@ struct kvm_vcpu_arch { | |||
| 521 | u32 qpr[32]; | 526 | u32 qpr[32]; |
| 522 | #endif | 527 | #endif |
| 523 | 528 | ||
| 524 | ulong pc; | ||
| 525 | ulong ctr; | ||
| 526 | ulong lr; | ||
| 527 | #ifdef CONFIG_PPC_BOOK3S | 529 | #ifdef CONFIG_PPC_BOOK3S |
| 528 | ulong tar; | 530 | ulong tar; |
| 529 | #endif | 531 | #endif |
| 530 | 532 | ||
| 531 | ulong xer; | ||
| 532 | u32 cr; | 533 | u32 cr; |
| 533 | 534 | ||
| 534 | #ifdef CONFIG_PPC_BOOK3S | 535 | #ifdef CONFIG_PPC_BOOK3S |
| @@ -626,7 +627,6 @@ struct kvm_vcpu_arch { | |||
| 626 | 627 | ||
| 627 | struct thread_vr_state vr_tm; | 628 | struct thread_vr_state vr_tm; |
| 628 | u32 vrsave_tm; /* also USPRG0 */ | 629 | u32 vrsave_tm; /* also USPRG0 */ |
| 629 | |||
| 630 | #endif | 630 | #endif |
| 631 | 631 | ||
| 632 | #ifdef CONFIG_KVM_EXIT_TIMING | 632 | #ifdef CONFIG_KVM_EXIT_TIMING |
| @@ -681,16 +681,17 @@ struct kvm_vcpu_arch { | |||
| 681 | * Number of simulations for vsx. | 681 | * Number of simulations for vsx. |
| 682 | * If we use 2*8bytes to simulate 1*16bytes, | 682 | * If we use 2*8bytes to simulate 1*16bytes, |
| 683 | * then the number should be 2 and | 683 | * then the number should be 2 and |
| 684 | * mmio_vsx_copy_type=KVMPPC_VSX_COPY_DWORD. | 684 | * mmio_copy_type=KVMPPC_VSX_COPY_DWORD. |
| 685 | * If we use 4*4bytes to simulate 1*16bytes, | 685 | * If we use 4*4bytes to simulate 1*16bytes, |
| 686 | * the number should be 4 and | 686 | * the number should be 4 and |
| 687 | * mmio_vsx_copy_type=KVMPPC_VSX_COPY_WORD. | 687 | * mmio_vsx_copy_type=KVMPPC_VSX_COPY_WORD. |
| 688 | */ | 688 | */ |
| 689 | u8 mmio_vsx_copy_nums; | 689 | u8 mmio_vsx_copy_nums; |
| 690 | u8 mmio_vsx_offset; | 690 | u8 mmio_vsx_offset; |
| 691 | u8 mmio_vsx_copy_type; | ||
| 692 | u8 mmio_vsx_tx_sx_enabled; | 691 | u8 mmio_vsx_tx_sx_enabled; |
| 693 | u8 mmio_vmx_copy_nums; | 692 | u8 mmio_vmx_copy_nums; |
| 693 | u8 mmio_vmx_offset; | ||
| 694 | u8 mmio_copy_type; | ||
| 694 | u8 osi_needed; | 695 | u8 osi_needed; |
| 695 | u8 osi_enabled; | 696 | u8 osi_enabled; |
| 696 | u8 papr_enabled; | 697 | u8 papr_enabled; |
| @@ -772,6 +773,8 @@ struct kvm_vcpu_arch { | |||
| 772 | u64 busy_preempt; | 773 | u64 busy_preempt; |
| 773 | 774 | ||
| 774 | u32 emul_inst; | 775 | u32 emul_inst; |
| 776 | |||
| 777 | u32 online; | ||
| 775 | #endif | 778 | #endif |
| 776 | 779 | ||
| 777 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING | 780 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index abe7032cdb54..e991821dd7fa 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h | |||
| @@ -52,7 +52,7 @@ enum emulation_result { | |||
| 52 | EMULATE_EXIT_USER, /* emulation requires exit to user-space */ | 52 | EMULATE_EXIT_USER, /* emulation requires exit to user-space */ |
| 53 | }; | 53 | }; |
| 54 | 54 | ||
| 55 | enum instruction_type { | 55 | enum instruction_fetch_type { |
| 56 | INST_GENERIC, | 56 | INST_GENERIC, |
| 57 | INST_SC, /* system call */ | 57 | INST_SC, /* system call */ |
| 58 | }; | 58 | }; |
| @@ -81,10 +81,10 @@ extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 81 | extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | 81 | extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 82 | unsigned int rt, unsigned int bytes, | 82 | unsigned int rt, unsigned int bytes, |
| 83 | int is_default_endian, int mmio_sign_extend); | 83 | int is_default_endian, int mmio_sign_extend); |
| 84 | extern int kvmppc_handle_load128_by2x64(struct kvm_run *run, | 84 | extern int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 85 | struct kvm_vcpu *vcpu, unsigned int rt, int is_default_endian); | 85 | unsigned int rt, unsigned int bytes, int is_default_endian); |
| 86 | extern int kvmppc_handle_store128_by2x64(struct kvm_run *run, | 86 | extern int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 87 | struct kvm_vcpu *vcpu, unsigned int rs, int is_default_endian); | 87 | unsigned int rs, unsigned int bytes, int is_default_endian); |
| 88 | extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | 88 | extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 89 | u64 val, unsigned int bytes, | 89 | u64 val, unsigned int bytes, |
| 90 | int is_default_endian); | 90 | int is_default_endian); |
| @@ -93,7 +93,7 @@ extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 93 | int is_default_endian); | 93 | int is_default_endian); |
| 94 | 94 | ||
| 95 | extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, | 95 | extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, |
| 96 | enum instruction_type type, u32 *inst); | 96 | enum instruction_fetch_type type, u32 *inst); |
| 97 | 97 | ||
| 98 | extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, | 98 | extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, |
| 99 | bool data); | 99 | bool data); |
| @@ -265,6 +265,8 @@ union kvmppc_one_reg { | |||
| 265 | vector128 vval; | 265 | vector128 vval; |
| 266 | u64 vsxval[2]; | 266 | u64 vsxval[2]; |
| 267 | u32 vsx32val[4]; | 267 | u32 vsx32val[4]; |
| 268 | u16 vsx16val[8]; | ||
| 269 | u8 vsx8val[16]; | ||
| 268 | struct { | 270 | struct { |
| 269 | u64 addr; | 271 | u64 addr; |
| 270 | u64 length; | 272 | u64 length; |
| @@ -324,13 +326,14 @@ struct kvmppc_ops { | |||
| 324 | int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info); | 326 | int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info); |
| 325 | int (*set_smt_mode)(struct kvm *kvm, unsigned long mode, | 327 | int (*set_smt_mode)(struct kvm *kvm, unsigned long mode, |
| 326 | unsigned long flags); | 328 | unsigned long flags); |
| 329 | void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr); | ||
| 327 | }; | 330 | }; |
| 328 | 331 | ||
| 329 | extern struct kvmppc_ops *kvmppc_hv_ops; | 332 | extern struct kvmppc_ops *kvmppc_hv_ops; |
| 330 | extern struct kvmppc_ops *kvmppc_pr_ops; | 333 | extern struct kvmppc_ops *kvmppc_pr_ops; |
| 331 | 334 | ||
| 332 | static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu, | 335 | static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu, |
| 333 | enum instruction_type type, u32 *inst) | 336 | enum instruction_fetch_type type, u32 *inst) |
| 334 | { | 337 | { |
| 335 | int ret = EMULATE_DONE; | 338 | int ret = EMULATE_DONE; |
| 336 | u32 fetched_inst; | 339 | u32 fetched_inst; |
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 75c5b2cd9d66..562568414cf4 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
| @@ -385,6 +385,7 @@ | |||
| 385 | #define SPRN_PSSCR 0x357 /* Processor Stop Status and Control Register (ISA 3.0) */ | 385 | #define SPRN_PSSCR 0x357 /* Processor Stop Status and Control Register (ISA 3.0) */ |
| 386 | #define SPRN_PSSCR_PR 0x337 /* PSSCR ISA 3.0, privileged mode access */ | 386 | #define SPRN_PSSCR_PR 0x337 /* PSSCR ISA 3.0, privileged mode access */ |
| 387 | #define SPRN_PMCR 0x374 /* Power Management Control Register */ | 387 | #define SPRN_PMCR 0x374 /* Power Management Control Register */ |
| 388 | #define SPRN_RWMR 0x375 /* Region-Weighting Mode Register */ | ||
| 388 | 389 | ||
| 389 | /* HFSCR and FSCR bit numbers are the same */ | 390 | /* HFSCR and FSCR bit numbers are the same */ |
| 390 | #define FSCR_SCV_LG 12 /* Enable System Call Vectored */ | 391 | #define FSCR_SCV_LG 12 /* Enable System Call Vectored */ |
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index 833ed9a16adf..1b32b56a03d3 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h | |||
| @@ -633,6 +633,7 @@ struct kvm_ppc_cpu_char { | |||
| 633 | #define KVM_REG_PPC_PSSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd) | 633 | #define KVM_REG_PPC_PSSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd) |
| 634 | 634 | ||
| 635 | #define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe) | 635 | #define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe) |
| 636 | #define KVM_REG_PPC_ONLINE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbf) | ||
| 636 | 637 | ||
| 637 | /* Transactional Memory checkpointed state: | 638 | /* Transactional Memory checkpointed state: |
| 638 | * This is all GPRs, all VSX regs and a subset of SPRs | 639 | * This is all GPRs, all VSX regs and a subset of SPRs |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 9fc9e0977009..0a0544335950 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
| @@ -426,20 +426,20 @@ int main(void) | |||
| 426 | OFFSET(VCPU_HOST_STACK, kvm_vcpu, arch.host_stack); | 426 | OFFSET(VCPU_HOST_STACK, kvm_vcpu, arch.host_stack); |
| 427 | OFFSET(VCPU_HOST_PID, kvm_vcpu, arch.host_pid); | 427 | OFFSET(VCPU_HOST_PID, kvm_vcpu, arch.host_pid); |
| 428 | OFFSET(VCPU_GUEST_PID, kvm_vcpu, arch.pid); | 428 | OFFSET(VCPU_GUEST_PID, kvm_vcpu, arch.pid); |
| 429 | OFFSET(VCPU_GPRS, kvm_vcpu, arch.gpr); | 429 | OFFSET(VCPU_GPRS, kvm_vcpu, arch.regs.gpr); |
| 430 | OFFSET(VCPU_VRSAVE, kvm_vcpu, arch.vrsave); | 430 | OFFSET(VCPU_VRSAVE, kvm_vcpu, arch.vrsave); |
| 431 | OFFSET(VCPU_FPRS, kvm_vcpu, arch.fp.fpr); | 431 | OFFSET(VCPU_FPRS, kvm_vcpu, arch.fp.fpr); |
| 432 | #ifdef CONFIG_ALTIVEC | 432 | #ifdef CONFIG_ALTIVEC |
| 433 | OFFSET(VCPU_VRS, kvm_vcpu, arch.vr.vr); | 433 | OFFSET(VCPU_VRS, kvm_vcpu, arch.vr.vr); |
| 434 | #endif | 434 | #endif |
| 435 | OFFSET(VCPU_XER, kvm_vcpu, arch.xer); | 435 | OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer); |
| 436 | OFFSET(VCPU_CTR, kvm_vcpu, arch.ctr); | 436 | OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr); |
| 437 | OFFSET(VCPU_LR, kvm_vcpu, arch.lr); | 437 | OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link); |
| 438 | #ifdef CONFIG_PPC_BOOK3S | 438 | #ifdef CONFIG_PPC_BOOK3S |
| 439 | OFFSET(VCPU_TAR, kvm_vcpu, arch.tar); | 439 | OFFSET(VCPU_TAR, kvm_vcpu, arch.tar); |
| 440 | #endif | 440 | #endif |
| 441 | OFFSET(VCPU_CR, kvm_vcpu, arch.cr); | 441 | OFFSET(VCPU_CR, kvm_vcpu, arch.cr); |
| 442 | OFFSET(VCPU_PC, kvm_vcpu, arch.pc); | 442 | OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip); |
| 443 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | 443 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
| 444 | OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr); | 444 | OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr); |
| 445 | OFFSET(VCPU_SRR0, kvm_vcpu, arch.shregs.srr0); | 445 | OFFSET(VCPU_SRR0, kvm_vcpu, arch.shregs.srr0); |
| @@ -696,10 +696,10 @@ int main(void) | |||
| 696 | 696 | ||
| 697 | #else /* CONFIG_PPC_BOOK3S */ | 697 | #else /* CONFIG_PPC_BOOK3S */ |
| 698 | OFFSET(VCPU_CR, kvm_vcpu, arch.cr); | 698 | OFFSET(VCPU_CR, kvm_vcpu, arch.cr); |
| 699 | OFFSET(VCPU_XER, kvm_vcpu, arch.xer); | 699 | OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer); |
| 700 | OFFSET(VCPU_LR, kvm_vcpu, arch.lr); | 700 | OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link); |
| 701 | OFFSET(VCPU_CTR, kvm_vcpu, arch.ctr); | 701 | OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr); |
| 702 | OFFSET(VCPU_PC, kvm_vcpu, arch.pc); | 702 | OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip); |
| 703 | OFFSET(VCPU_SPRG9, kvm_vcpu, arch.sprg9); | 703 | OFFSET(VCPU_SPRG9, kvm_vcpu, arch.sprg9); |
| 704 | OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst); | 704 | OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst); |
| 705 | OFFSET(VCPU_FAULT_DEAR, kvm_vcpu, arch.fault_dear); | 705 | OFFSET(VCPU_FAULT_DEAR, kvm_vcpu, arch.fault_dear); |
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index 4b19da8c87ae..f872c04bb5b1 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile | |||
| @@ -63,6 +63,9 @@ kvm-pr-y := \ | |||
| 63 | book3s_64_mmu.o \ | 63 | book3s_64_mmu.o \ |
| 64 | book3s_32_mmu.o | 64 | book3s_32_mmu.o |
| 65 | 65 | ||
| 66 | kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \ | ||
| 67 | tm.o | ||
| 68 | |||
| 66 | ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE | 69 | ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE |
| 67 | kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \ | 70 | kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \ |
| 68 | book3s_rmhandlers.o | 71 | book3s_rmhandlers.o |
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 97d4a112648f..edaf4720d156 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c | |||
| @@ -134,7 +134,7 @@ void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) | |||
| 134 | { | 134 | { |
| 135 | kvmppc_unfixup_split_real(vcpu); | 135 | kvmppc_unfixup_split_real(vcpu); |
| 136 | kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu)); | 136 | kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu)); |
| 137 | kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags); | 137 | kvmppc_set_srr1(vcpu, (kvmppc_get_msr(vcpu) & ~0x783f0000ul) | flags); |
| 138 | kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec); | 138 | kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec); |
| 139 | vcpu->arch.mmu.reset_msr(vcpu); | 139 | vcpu->arch.mmu.reset_msr(vcpu); |
| 140 | } | 140 | } |
| @@ -256,18 +256,15 @@ void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar, | |||
| 256 | { | 256 | { |
| 257 | kvmppc_set_dar(vcpu, dar); | 257 | kvmppc_set_dar(vcpu, dar); |
| 258 | kvmppc_set_dsisr(vcpu, flags); | 258 | kvmppc_set_dsisr(vcpu, flags); |
| 259 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); | 259 | kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0); |
| 260 | } | 260 | } |
| 261 | EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage); /* used by kvm_hv */ | 261 | EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage); |
| 262 | 262 | ||
| 263 | void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags) | 263 | void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags) |
| 264 | { | 264 | { |
| 265 | u64 msr = kvmppc_get_msr(vcpu); | 265 | kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, flags); |
| 266 | msr &= ~(SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT); | ||
| 267 | msr |= flags & (SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT); | ||
| 268 | kvmppc_set_msr_fast(vcpu, msr); | ||
| 269 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); | ||
| 270 | } | 266 | } |
| 267 | EXPORT_SYMBOL_GPL(kvmppc_core_queue_inst_storage); | ||
| 271 | 268 | ||
| 272 | static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, | 269 | static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, |
| 273 | unsigned int priority) | 270 | unsigned int priority) |
| @@ -450,8 +447,8 @@ int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid, | |||
| 450 | return r; | 447 | return r; |
| 451 | } | 448 | } |
| 452 | 449 | ||
| 453 | int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, | 450 | int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, |
| 454 | u32 *inst) | 451 | enum instruction_fetch_type type, u32 *inst) |
| 455 | { | 452 | { |
| 456 | ulong pc = kvmppc_get_pc(vcpu); | 453 | ulong pc = kvmppc_get_pc(vcpu); |
| 457 | int r; | 454 | int r; |
| @@ -509,8 +506,6 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
| 509 | { | 506 | { |
| 510 | int i; | 507 | int i; |
| 511 | 508 | ||
| 512 | vcpu_load(vcpu); | ||
| 513 | |||
| 514 | regs->pc = kvmppc_get_pc(vcpu); | 509 | regs->pc = kvmppc_get_pc(vcpu); |
| 515 | regs->cr = kvmppc_get_cr(vcpu); | 510 | regs->cr = kvmppc_get_cr(vcpu); |
| 516 | regs->ctr = kvmppc_get_ctr(vcpu); | 511 | regs->ctr = kvmppc_get_ctr(vcpu); |
| @@ -532,7 +527,6 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
| 532 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | 527 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
| 533 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); | 528 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); |
| 534 | 529 | ||
| 535 | vcpu_put(vcpu); | ||
| 536 | return 0; | 530 | return 0; |
| 537 | } | 531 | } |
| 538 | 532 | ||
| @@ -540,8 +534,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
| 540 | { | 534 | { |
| 541 | int i; | 535 | int i; |
| 542 | 536 | ||
| 543 | vcpu_load(vcpu); | ||
| 544 | |||
| 545 | kvmppc_set_pc(vcpu, regs->pc); | 537 | kvmppc_set_pc(vcpu, regs->pc); |
| 546 | kvmppc_set_cr(vcpu, regs->cr); | 538 | kvmppc_set_cr(vcpu, regs->cr); |
| 547 | kvmppc_set_ctr(vcpu, regs->ctr); | 539 | kvmppc_set_ctr(vcpu, regs->ctr); |
| @@ -562,7 +554,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
| 562 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | 554 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
| 563 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); | 555 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); |
| 564 | 556 | ||
| 565 | vcpu_put(vcpu); | ||
| 566 | return 0; | 557 | return 0; |
| 567 | } | 558 | } |
| 568 | 559 | ||
diff --git a/arch/powerpc/kvm/book3s.h b/arch/powerpc/kvm/book3s.h index 4ad5e287b8bc..14ef03501d21 100644 --- a/arch/powerpc/kvm/book3s.h +++ b/arch/powerpc/kvm/book3s.h | |||
| @@ -31,4 +31,10 @@ extern int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, | |||
| 31 | extern int kvmppc_book3s_init_pr(void); | 31 | extern int kvmppc_book3s_init_pr(void); |
| 32 | extern void kvmppc_book3s_exit_pr(void); | 32 | extern void kvmppc_book3s_exit_pr(void); |
| 33 | 33 | ||
| 34 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 35 | extern void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val); | ||
| 36 | #else | ||
| 37 | static inline void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val) {} | ||
| 38 | #endif | ||
| 39 | |||
| 34 | #endif | 40 | #endif |
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c index 1992676c7a94..45c8ea4a0487 100644 --- a/arch/powerpc/kvm/book3s_32_mmu.c +++ b/arch/powerpc/kvm/book3s_32_mmu.c | |||
| @@ -52,7 +52,7 @@ | |||
| 52 | static inline bool check_debug_ip(struct kvm_vcpu *vcpu) | 52 | static inline bool check_debug_ip(struct kvm_vcpu *vcpu) |
| 53 | { | 53 | { |
| 54 | #ifdef DEBUG_MMU_PTE_IP | 54 | #ifdef DEBUG_MMU_PTE_IP |
| 55 | return vcpu->arch.pc == DEBUG_MMU_PTE_IP; | 55 | return vcpu->arch.regs.nip == DEBUG_MMU_PTE_IP; |
| 56 | #else | 56 | #else |
| 57 | return true; | 57 | return true; |
| 58 | #endif | 58 | #endif |
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index a93d719edc90..cf9d686e8162 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c | |||
| @@ -38,7 +38,16 @@ | |||
| 38 | 38 | ||
| 39 | static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu) | 39 | static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu) |
| 40 | { | 40 | { |
| 41 | kvmppc_set_msr(vcpu, vcpu->arch.intr_msr); | 41 | unsigned long msr = vcpu->arch.intr_msr; |
| 42 | unsigned long cur_msr = kvmppc_get_msr(vcpu); | ||
| 43 | |||
| 44 | /* If transactional, change to suspend mode on IRQ delivery */ | ||
| 45 | if (MSR_TM_TRANSACTIONAL(cur_msr)) | ||
| 46 | msr |= MSR_TS_S; | ||
| 47 | else | ||
| 48 | msr |= cur_msr & MSR_TS_MASK; | ||
| 49 | |||
| 50 | kvmppc_set_msr(vcpu, msr); | ||
| 42 | } | 51 | } |
| 43 | 52 | ||
| 44 | static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe( | 53 | static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe( |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 1b3fcafc685e..7f3a8cf5d66f 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
| @@ -272,6 +272,9 @@ int kvmppc_mmu_hv_init(void) | |||
| 272 | if (!cpu_has_feature(CPU_FTR_HVMODE)) | 272 | if (!cpu_has_feature(CPU_FTR_HVMODE)) |
| 273 | return -EINVAL; | 273 | return -EINVAL; |
| 274 | 274 | ||
| 275 | if (!mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE)) | ||
| 276 | return -EINVAL; | ||
| 277 | |||
| 275 | /* POWER7 has 10-bit LPIDs (12-bit in POWER8) */ | 278 | /* POWER7 has 10-bit LPIDs (12-bit in POWER8) */ |
| 276 | host_lpid = mfspr(SPRN_LPID); | 279 | host_lpid = mfspr(SPRN_LPID); |
| 277 | rsvd_lpid = LPID_RSVD; | 280 | rsvd_lpid = LPID_RSVD; |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 481da8f93fa4..176f911ee983 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c | |||
| @@ -139,44 +139,24 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
| 139 | return 0; | 139 | return 0; |
| 140 | } | 140 | } |
| 141 | 141 | ||
| 142 | #ifdef CONFIG_PPC_64K_PAGES | ||
| 143 | #define MMU_BASE_PSIZE MMU_PAGE_64K | ||
| 144 | #else | ||
| 145 | #define MMU_BASE_PSIZE MMU_PAGE_4K | ||
| 146 | #endif | ||
| 147 | |||
| 148 | static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, | 142 | static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, |
| 149 | unsigned int pshift) | 143 | unsigned int pshift) |
| 150 | { | 144 | { |
| 151 | int psize = MMU_BASE_PSIZE; | 145 | unsigned long psize = PAGE_SIZE; |
| 152 | 146 | ||
| 153 | if (pshift >= PUD_SHIFT) | 147 | if (pshift) |
| 154 | psize = MMU_PAGE_1G; | 148 | psize = 1UL << pshift; |
| 155 | else if (pshift >= PMD_SHIFT) | 149 | |
| 156 | psize = MMU_PAGE_2M; | 150 | addr &= ~(psize - 1); |
| 157 | addr &= ~0xfffUL; | 151 | radix__flush_tlb_lpid_page(kvm->arch.lpid, addr, psize); |
| 158 | addr |= mmu_psize_defs[psize].ap << 5; | ||
| 159 | asm volatile("ptesync": : :"memory"); | ||
| 160 | asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1) | ||
| 161 | : : "r" (addr), "r" (kvm->arch.lpid) : "memory"); | ||
| 162 | if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) | ||
| 163 | asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1) | ||
| 164 | : : "r" (addr), "r" (kvm->arch.lpid) : "memory"); | ||
| 165 | asm volatile("eieio ; tlbsync ; ptesync": : :"memory"); | ||
| 166 | } | 152 | } |
| 167 | 153 | ||
| 168 | static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned long addr) | 154 | static void kvmppc_radix_flush_pwc(struct kvm *kvm) |
| 169 | { | 155 | { |
| 170 | unsigned long rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */ | 156 | radix__flush_pwc_lpid(kvm->arch.lpid); |
| 171 | |||
| 172 | asm volatile("ptesync": : :"memory"); | ||
| 173 | /* RIC=1 PRS=0 R=1 IS=2 */ | ||
| 174 | asm volatile(PPC_TLBIE_5(%0, %1, 1, 0, 1) | ||
| 175 | : : "r" (rb), "r" (kvm->arch.lpid) : "memory"); | ||
| 176 | asm volatile("eieio ; tlbsync ; ptesync": : :"memory"); | ||
| 177 | } | 157 | } |
| 178 | 158 | ||
| 179 | unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, | 159 | static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, |
| 180 | unsigned long clr, unsigned long set, | 160 | unsigned long clr, unsigned long set, |
| 181 | unsigned long addr, unsigned int shift) | 161 | unsigned long addr, unsigned int shift) |
| 182 | { | 162 | { |
| @@ -228,6 +208,167 @@ static void kvmppc_pmd_free(pmd_t *pmdp) | |||
| 228 | kmem_cache_free(kvm_pmd_cache, pmdp); | 208 | kmem_cache_free(kvm_pmd_cache, pmdp); |
| 229 | } | 209 | } |
| 230 | 210 | ||
| 211 | static void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, | ||
| 212 | unsigned long gpa, unsigned int shift) | ||
| 213 | |||
| 214 | { | ||
| 215 | unsigned long page_size = 1ul << shift; | ||
| 216 | unsigned long old; | ||
| 217 | |||
| 218 | old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift); | ||
| 219 | kvmppc_radix_tlbie_page(kvm, gpa, shift); | ||
| 220 | if (old & _PAGE_DIRTY) { | ||
| 221 | unsigned long gfn = gpa >> PAGE_SHIFT; | ||
| 222 | struct kvm_memory_slot *memslot; | ||
| 223 | |||
| 224 | memslot = gfn_to_memslot(kvm, gfn); | ||
| 225 | if (memslot && memslot->dirty_bitmap) | ||
| 226 | kvmppc_update_dirty_map(memslot, gfn, page_size); | ||
| 227 | } | ||
| 228 | } | ||
| 229 | |||
| 230 | /* | ||
| 231 | * kvmppc_free_p?d are used to free existing page tables, and recursively | ||
| 232 | * descend and clear and free children. | ||
| 233 | * Callers are responsible for flushing the PWC. | ||
| 234 | * | ||
| 235 | * When page tables are being unmapped/freed as part of page fault path | ||
| 236 | * (full == false), ptes are not expected. There is code to unmap them | ||
| 237 | * and emit a warning if encountered, but there may already be data | ||
| 238 | * corruption due to the unexpected mappings. | ||
| 239 | */ | ||
| 240 | static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full) | ||
| 241 | { | ||
| 242 | if (full) { | ||
| 243 | memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE); | ||
| 244 | } else { | ||
| 245 | pte_t *p = pte; | ||
| 246 | unsigned long it; | ||
| 247 | |||
| 248 | for (it = 0; it < PTRS_PER_PTE; ++it, ++p) { | ||
| 249 | if (pte_val(*p) == 0) | ||
| 250 | continue; | ||
| 251 | WARN_ON_ONCE(1); | ||
| 252 | kvmppc_unmap_pte(kvm, p, | ||
| 253 | pte_pfn(*p) << PAGE_SHIFT, | ||
| 254 | PAGE_SHIFT); | ||
| 255 | } | ||
| 256 | } | ||
| 257 | |||
| 258 | kvmppc_pte_free(pte); | ||
| 259 | } | ||
| 260 | |||
| 261 | static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full) | ||
| 262 | { | ||
| 263 | unsigned long im; | ||
| 264 | pmd_t *p = pmd; | ||
| 265 | |||
| 266 | for (im = 0; im < PTRS_PER_PMD; ++im, ++p) { | ||
| 267 | if (!pmd_present(*p)) | ||
| 268 | continue; | ||
| 269 | if (pmd_is_leaf(*p)) { | ||
| 270 | if (full) { | ||
| 271 | pmd_clear(p); | ||
| 272 | } else { | ||
| 273 | WARN_ON_ONCE(1); | ||
| 274 | kvmppc_unmap_pte(kvm, (pte_t *)p, | ||
| 275 | pte_pfn(*(pte_t *)p) << PAGE_SHIFT, | ||
| 276 | PMD_SHIFT); | ||
| 277 | } | ||
| 278 | } else { | ||
| 279 | pte_t *pte; | ||
| 280 | |||
| 281 | pte = pte_offset_map(p, 0); | ||
| 282 | kvmppc_unmap_free_pte(kvm, pte, full); | ||
| 283 | pmd_clear(p); | ||
| 284 | } | ||
| 285 | } | ||
| 286 | kvmppc_pmd_free(pmd); | ||
| 287 | } | ||
| 288 | |||
| 289 | static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud) | ||
| 290 | { | ||
| 291 | unsigned long iu; | ||
| 292 | pud_t *p = pud; | ||
| 293 | |||
| 294 | for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) { | ||
| 295 | if (!pud_present(*p)) | ||
| 296 | continue; | ||
| 297 | if (pud_huge(*p)) { | ||
| 298 | pud_clear(p); | ||
| 299 | } else { | ||
| 300 | pmd_t *pmd; | ||
| 301 | |||
| 302 | pmd = pmd_offset(p, 0); | ||
| 303 | kvmppc_unmap_free_pmd(kvm, pmd, true); | ||
| 304 | pud_clear(p); | ||
| 305 | } | ||
| 306 | } | ||
| 307 | pud_free(kvm->mm, pud); | ||
| 308 | } | ||
| 309 | |||
| 310 | void kvmppc_free_radix(struct kvm *kvm) | ||
| 311 | { | ||
| 312 | unsigned long ig; | ||
| 313 | pgd_t *pgd; | ||
| 314 | |||
| 315 | if (!kvm->arch.pgtable) | ||
| 316 | return; | ||
| 317 | pgd = kvm->arch.pgtable; | ||
| 318 | for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) { | ||
| 319 | pud_t *pud; | ||
| 320 | |||
| 321 | if (!pgd_present(*pgd)) | ||
| 322 | continue; | ||
| 323 | pud = pud_offset(pgd, 0); | ||
| 324 | kvmppc_unmap_free_pud(kvm, pud); | ||
| 325 | pgd_clear(pgd); | ||
| 326 | } | ||
| 327 | pgd_free(kvm->mm, kvm->arch.pgtable); | ||
| 328 | kvm->arch.pgtable = NULL; | ||
| 329 | } | ||
| 330 | |||
| 331 | static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd, | ||
| 332 | unsigned long gpa) | ||
| 333 | { | ||
| 334 | pte_t *pte = pte_offset_kernel(pmd, 0); | ||
| 335 | |||
| 336 | /* | ||
| 337 | * Clearing the pmd entry then flushing the PWC ensures that the pte | ||
| 338 | * page no longer be cached by the MMU, so can be freed without | ||
| 339 | * flushing the PWC again. | ||
| 340 | */ | ||
| 341 | pmd_clear(pmd); | ||
| 342 | kvmppc_radix_flush_pwc(kvm); | ||
| 343 | |||
| 344 | kvmppc_unmap_free_pte(kvm, pte, false); | ||
| 345 | } | ||
| 346 | |||
| 347 | static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud, | ||
| 348 | unsigned long gpa) | ||
| 349 | { | ||
| 350 | pmd_t *pmd = pmd_offset(pud, 0); | ||
| 351 | |||
| 352 | /* | ||
| 353 | * Clearing the pud entry then flushing the PWC ensures that the pmd | ||
| 354 | * page and any children pte pages will no longer be cached by the MMU, | ||
| 355 | * so can be freed without flushing the PWC again. | ||
| 356 | */ | ||
| 357 | pud_clear(pud); | ||
| 358 | kvmppc_radix_flush_pwc(kvm); | ||
| 359 | |||
| 360 | kvmppc_unmap_free_pmd(kvm, pmd, false); | ||
| 361 | } | ||
| 362 | |||
| 363 | /* | ||
| 364 | * There are a number of bits which may differ between different faults to | ||
| 365 | * the same partition scope entry. RC bits, in the course of cleaning and | ||
| 366 | * aging. And the write bit can change, either the access could have been | ||
| 367 | * upgraded, or a read fault could happen concurrently with a write fault | ||
| 368 | * that sets those bits first. | ||
| 369 | */ | ||
| 370 | #define PTE_BITS_MUST_MATCH (~(_PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)) | ||
| 371 | |||
| 231 | static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, | 372 | static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, |
| 232 | unsigned int level, unsigned long mmu_seq) | 373 | unsigned int level, unsigned long mmu_seq) |
| 233 | { | 374 | { |
| @@ -235,7 +376,6 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, | |||
| 235 | pud_t *pud, *new_pud = NULL; | 376 | pud_t *pud, *new_pud = NULL; |
| 236 | pmd_t *pmd, *new_pmd = NULL; | 377 | pmd_t *pmd, *new_pmd = NULL; |
| 237 | pte_t *ptep, *new_ptep = NULL; | 378 | pte_t *ptep, *new_ptep = NULL; |
| 238 | unsigned long old; | ||
| 239 | int ret; | 379 | int ret; |
| 240 | 380 | ||
| 241 | /* Traverse the guest's 2nd-level tree, allocate new levels needed */ | 381 | /* Traverse the guest's 2nd-level tree, allocate new levels needed */ |
| @@ -273,42 +413,39 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, | |||
| 273 | if (pud_huge(*pud)) { | 413 | if (pud_huge(*pud)) { |
| 274 | unsigned long hgpa = gpa & PUD_MASK; | 414 | unsigned long hgpa = gpa & PUD_MASK; |
| 275 | 415 | ||
| 416 | /* Check if we raced and someone else has set the same thing */ | ||
| 417 | if (level == 2) { | ||
| 418 | if (pud_raw(*pud) == pte_raw(pte)) { | ||
| 419 | ret = 0; | ||
| 420 | goto out_unlock; | ||
| 421 | } | ||
| 422 | /* Valid 1GB page here already, add our extra bits */ | ||
| 423 | WARN_ON_ONCE((pud_val(*pud) ^ pte_val(pte)) & | ||
| 424 | PTE_BITS_MUST_MATCH); | ||
| 425 | kvmppc_radix_update_pte(kvm, (pte_t *)pud, | ||
| 426 | 0, pte_val(pte), hgpa, PUD_SHIFT); | ||
| 427 | ret = 0; | ||
| 428 | goto out_unlock; | ||
| 429 | } | ||
| 276 | /* | 430 | /* |
| 277 | * If we raced with another CPU which has just put | 431 | * If we raced with another CPU which has just put |
| 278 | * a 1GB pte in after we saw a pmd page, try again. | 432 | * a 1GB pte in after we saw a pmd page, try again. |
| 279 | */ | 433 | */ |
| 280 | if (level <= 1 && !new_pmd) { | 434 | if (!new_pmd) { |
| 281 | ret = -EAGAIN; | 435 | ret = -EAGAIN; |
| 282 | goto out_unlock; | 436 | goto out_unlock; |
| 283 | } | 437 | } |
| 284 | /* Check if we raced and someone else has set the same thing */ | ||
| 285 | if (level == 2 && pud_raw(*pud) == pte_raw(pte)) { | ||
| 286 | ret = 0; | ||
| 287 | goto out_unlock; | ||
| 288 | } | ||
| 289 | /* Valid 1GB page here already, remove it */ | 438 | /* Valid 1GB page here already, remove it */ |
| 290 | old = kvmppc_radix_update_pte(kvm, (pte_t *)pud, | 439 | kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT); |
| 291 | ~0UL, 0, hgpa, PUD_SHIFT); | ||
| 292 | kvmppc_radix_tlbie_page(kvm, hgpa, PUD_SHIFT); | ||
| 293 | if (old & _PAGE_DIRTY) { | ||
| 294 | unsigned long gfn = hgpa >> PAGE_SHIFT; | ||
| 295 | struct kvm_memory_slot *memslot; | ||
| 296 | memslot = gfn_to_memslot(kvm, gfn); | ||
| 297 | if (memslot && memslot->dirty_bitmap) | ||
| 298 | kvmppc_update_dirty_map(memslot, | ||
| 299 | gfn, PUD_SIZE); | ||
| 300 | } | ||
| 301 | } | 440 | } |
| 302 | if (level == 2) { | 441 | if (level == 2) { |
| 303 | if (!pud_none(*pud)) { | 442 | if (!pud_none(*pud)) { |
| 304 | /* | 443 | /* |
| 305 | * There's a page table page here, but we wanted to | 444 | * There's a page table page here, but we wanted to |
| 306 | * install a large page, so remove and free the page | 445 | * install a large page, so remove and free the page |
| 307 | * table page. new_pmd will be NULL since level == 2. | 446 | * table page. |
| 308 | */ | 447 | */ |
| 309 | new_pmd = pmd_offset(pud, 0); | 448 | kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa); |
| 310 | pud_clear(pud); | ||
| 311 | kvmppc_radix_flush_pwc(kvm, gpa); | ||
| 312 | } | 449 | } |
| 313 | kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte); | 450 | kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte); |
| 314 | ret = 0; | 451 | ret = 0; |
| @@ -324,42 +461,40 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, | |||
| 324 | if (pmd_is_leaf(*pmd)) { | 461 | if (pmd_is_leaf(*pmd)) { |
| 325 | unsigned long lgpa = gpa & PMD_MASK; | 462 | unsigned long lgpa = gpa & PMD_MASK; |
| 326 | 463 | ||
| 464 | /* Check if we raced and someone else has set the same thing */ | ||
| 465 | if (level == 1) { | ||
| 466 | if (pmd_raw(*pmd) == pte_raw(pte)) { | ||
| 467 | ret = 0; | ||
| 468 | goto out_unlock; | ||
| 469 | } | ||
| 470 | /* Valid 2MB page here already, add our extra bits */ | ||
| 471 | WARN_ON_ONCE((pmd_val(*pmd) ^ pte_val(pte)) & | ||
| 472 | PTE_BITS_MUST_MATCH); | ||
| 473 | kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd), | ||
| 474 | 0, pte_val(pte), lgpa, PMD_SHIFT); | ||
| 475 | ret = 0; | ||
| 476 | goto out_unlock; | ||
| 477 | } | ||
| 478 | |||
| 327 | /* | 479 | /* |
| 328 | * If we raced with another CPU which has just put | 480 | * If we raced with another CPU which has just put |
| 329 | * a 2MB pte in after we saw a pte page, try again. | 481 | * a 2MB pte in after we saw a pte page, try again. |
| 330 | */ | 482 | */ |
| 331 | if (level == 0 && !new_ptep) { | 483 | if (!new_ptep) { |
| 332 | ret = -EAGAIN; | 484 | ret = -EAGAIN; |
| 333 | goto out_unlock; | 485 | goto out_unlock; |
| 334 | } | 486 | } |
| 335 | /* Check if we raced and someone else has set the same thing */ | ||
| 336 | if (level == 1 && pmd_raw(*pmd) == pte_raw(pte)) { | ||
| 337 | ret = 0; | ||
| 338 | goto out_unlock; | ||
| 339 | } | ||
| 340 | /* Valid 2MB page here already, remove it */ | 487 | /* Valid 2MB page here already, remove it */ |
| 341 | old = kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd), | 488 | kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT); |
| 342 | ~0UL, 0, lgpa, PMD_SHIFT); | ||
| 343 | kvmppc_radix_tlbie_page(kvm, lgpa, PMD_SHIFT); | ||
| 344 | if (old & _PAGE_DIRTY) { | ||
| 345 | unsigned long gfn = lgpa >> PAGE_SHIFT; | ||
| 346 | struct kvm_memory_slot *memslot; | ||
| 347 | memslot = gfn_to_memslot(kvm, gfn); | ||
| 348 | if (memslot && memslot->dirty_bitmap) | ||
| 349 | kvmppc_update_dirty_map(memslot, | ||
| 350 | gfn, PMD_SIZE); | ||
| 351 | } | ||
| 352 | } | 489 | } |
| 353 | if (level == 1) { | 490 | if (level == 1) { |
| 354 | if (!pmd_none(*pmd)) { | 491 | if (!pmd_none(*pmd)) { |
| 355 | /* | 492 | /* |
| 356 | * There's a page table page here, but we wanted to | 493 | * There's a page table page here, but we wanted to |
| 357 | * install a large page, so remove and free the page | 494 | * install a large page, so remove and free the page |
| 358 | * table page. new_ptep will be NULL since level == 1. | 495 | * table page. |
| 359 | */ | 496 | */ |
| 360 | new_ptep = pte_offset_kernel(pmd, 0); | 497 | kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa); |
| 361 | pmd_clear(pmd); | ||
| 362 | kvmppc_radix_flush_pwc(kvm, gpa); | ||
| 363 | } | 498 | } |
| 364 | kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte); | 499 | kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte); |
| 365 | ret = 0; | 500 | ret = 0; |
| @@ -378,12 +513,12 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, | |||
| 378 | ret = 0; | 513 | ret = 0; |
| 379 | goto out_unlock; | 514 | goto out_unlock; |
| 380 | } | 515 | } |
| 381 | /* PTE was previously valid, so invalidate it */ | 516 | /* Valid page here already, add our extra bits */ |
| 382 | old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_PRESENT, | 517 | WARN_ON_ONCE((pte_val(*ptep) ^ pte_val(pte)) & |
| 383 | 0, gpa, 0); | 518 | PTE_BITS_MUST_MATCH); |
| 384 | kvmppc_radix_tlbie_page(kvm, gpa, 0); | 519 | kvmppc_radix_update_pte(kvm, ptep, 0, pte_val(pte), gpa, 0); |
| 385 | if (old & _PAGE_DIRTY) | 520 | ret = 0; |
| 386 | mark_page_dirty(kvm, gpa >> PAGE_SHIFT); | 521 | goto out_unlock; |
| 387 | } | 522 | } |
| 388 | kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte); | 523 | kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte); |
| 389 | ret = 0; | 524 | ret = 0; |
| @@ -565,9 +700,13 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 565 | unsigned long mask = (1ul << shift) - PAGE_SIZE; | 700 | unsigned long mask = (1ul << shift) - PAGE_SIZE; |
| 566 | pte = __pte(pte_val(pte) | (hva & mask)); | 701 | pte = __pte(pte_val(pte) | (hva & mask)); |
| 567 | } | 702 | } |
| 568 | if (!(writing || upgrade_write)) | 703 | pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED); |
| 569 | pte = __pte(pte_val(pte) & ~ _PAGE_WRITE); | 704 | if (writing || upgrade_write) { |
| 570 | pte = __pte(pte_val(pte) | _PAGE_EXEC); | 705 | if (pte_val(pte) & _PAGE_WRITE) |
| 706 | pte = __pte(pte_val(pte) | _PAGE_DIRTY); | ||
| 707 | } else { | ||
| 708 | pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY)); | ||
| 709 | } | ||
| 571 | } | 710 | } |
| 572 | 711 | ||
| 573 | /* Allocate space in the tree and write the PTE */ | 712 | /* Allocate space in the tree and write the PTE */ |
| @@ -734,51 +873,6 @@ int kvmppc_init_vm_radix(struct kvm *kvm) | |||
| 734 | return 0; | 873 | return 0; |
| 735 | } | 874 | } |
| 736 | 875 | ||
| 737 | void kvmppc_free_radix(struct kvm *kvm) | ||
| 738 | { | ||
| 739 | unsigned long ig, iu, im; | ||
| 740 | pte_t *pte; | ||
| 741 | pmd_t *pmd; | ||
| 742 | pud_t *pud; | ||
| 743 | pgd_t *pgd; | ||
| 744 | |||
| 745 | if (!kvm->arch.pgtable) | ||
| 746 | return; | ||
| 747 | pgd = kvm->arch.pgtable; | ||
| 748 | for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) { | ||
| 749 | if (!pgd_present(*pgd)) | ||
| 750 | continue; | ||
| 751 | pud = pud_offset(pgd, 0); | ||
| 752 | for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++pud) { | ||
| 753 | if (!pud_present(*pud)) | ||
| 754 | continue; | ||
| 755 | if (pud_huge(*pud)) { | ||
| 756 | pud_clear(pud); | ||
| 757 | continue; | ||
| 758 | } | ||
| 759 | pmd = pmd_offset(pud, 0); | ||
| 760 | for (im = 0; im < PTRS_PER_PMD; ++im, ++pmd) { | ||
| 761 | if (pmd_is_leaf(*pmd)) { | ||
| 762 | pmd_clear(pmd); | ||
| 763 | continue; | ||
| 764 | } | ||
| 765 | if (!pmd_present(*pmd)) | ||
| 766 | continue; | ||
| 767 | pte = pte_offset_map(pmd, 0); | ||
| 768 | memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE); | ||
| 769 | kvmppc_pte_free(pte); | ||
| 770 | pmd_clear(pmd); | ||
| 771 | } | ||
| 772 | kvmppc_pmd_free(pmd_offset(pud, 0)); | ||
| 773 | pud_clear(pud); | ||
| 774 | } | ||
| 775 | pud_free(kvm->mm, pud_offset(pgd, 0)); | ||
| 776 | pgd_clear(pgd); | ||
| 777 | } | ||
| 778 | pgd_free(kvm->mm, kvm->arch.pgtable); | ||
| 779 | kvm->arch.pgtable = NULL; | ||
| 780 | } | ||
| 781 | |||
| 782 | static void pte_ctor(void *addr) | 876 | static void pte_ctor(void *addr) |
| 783 | { | 877 | { |
| 784 | memset(addr, 0, RADIX_PTE_TABLE_SIZE); | 878 | memset(addr, 0, RADIX_PTE_TABLE_SIZE); |
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 4dffa611376d..d066e37551ec 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c | |||
| @@ -176,14 +176,12 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, | |||
| 176 | 176 | ||
| 177 | if (!tbltmp) | 177 | if (!tbltmp) |
| 178 | continue; | 178 | continue; |
| 179 | /* | 179 | /* Make sure hardware table parameters are compatible */ |
| 180 | * Make sure hardware table parameters are exactly the same; | 180 | if ((tbltmp->it_page_shift <= stt->page_shift) && |
| 181 | * this is used in the TCE handlers where boundary checks | 181 | (tbltmp->it_offset << tbltmp->it_page_shift == |
| 182 | * use only the first attached table. | 182 | stt->offset << stt->page_shift) && |
| 183 | */ | 183 | (tbltmp->it_size << tbltmp->it_page_shift == |
| 184 | if ((tbltmp->it_page_shift == stt->page_shift) && | 184 | stt->size << stt->page_shift)) { |
| 185 | (tbltmp->it_offset == stt->offset) && | ||
| 186 | (tbltmp->it_size == stt->size)) { | ||
| 187 | /* | 185 | /* |
| 188 | * Reference the table to avoid races with | 186 | * Reference the table to avoid races with |
| 189 | * add/remove DMA windows. | 187 | * add/remove DMA windows. |
| @@ -237,7 +235,7 @@ static void release_spapr_tce_table(struct rcu_head *head) | |||
| 237 | kfree(stt); | 235 | kfree(stt); |
| 238 | } | 236 | } |
| 239 | 237 | ||
| 240 | static int kvm_spapr_tce_fault(struct vm_fault *vmf) | 238 | static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf) |
| 241 | { | 239 | { |
| 242 | struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data; | 240 | struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data; |
| 243 | struct page *page; | 241 | struct page *page; |
| @@ -302,7 +300,8 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, | |||
| 302 | int ret = -ENOMEM; | 300 | int ret = -ENOMEM; |
| 303 | int i; | 301 | int i; |
| 304 | 302 | ||
| 305 | if (!args->size) | 303 | if (!args->size || args->page_shift < 12 || args->page_shift > 34 || |
| 304 | (args->offset + args->size > (ULLONG_MAX >> args->page_shift))) | ||
| 306 | return -EINVAL; | 305 | return -EINVAL; |
| 307 | 306 | ||
| 308 | size = _ALIGN_UP(args->size, PAGE_SIZE >> 3); | 307 | size = _ALIGN_UP(args->size, PAGE_SIZE >> 3); |
| @@ -396,7 +395,7 @@ static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm, | |||
| 396 | return H_SUCCESS; | 395 | return H_SUCCESS; |
| 397 | } | 396 | } |
| 398 | 397 | ||
| 399 | static long kvmppc_tce_iommu_unmap(struct kvm *kvm, | 398 | static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm, |
| 400 | struct iommu_table *tbl, unsigned long entry) | 399 | struct iommu_table *tbl, unsigned long entry) |
| 401 | { | 400 | { |
| 402 | enum dma_data_direction dir = DMA_NONE; | 401 | enum dma_data_direction dir = DMA_NONE; |
| @@ -416,7 +415,24 @@ static long kvmppc_tce_iommu_unmap(struct kvm *kvm, | |||
| 416 | return ret; | 415 | return ret; |
| 417 | } | 416 | } |
| 418 | 417 | ||
| 419 | long kvmppc_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl, | 418 | static long kvmppc_tce_iommu_unmap(struct kvm *kvm, |
| 419 | struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl, | ||
| 420 | unsigned long entry) | ||
| 421 | { | ||
| 422 | unsigned long i, ret = H_SUCCESS; | ||
| 423 | unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); | ||
| 424 | unsigned long io_entry = entry * subpages; | ||
| 425 | |||
| 426 | for (i = 0; i < subpages; ++i) { | ||
| 427 | ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i); | ||
| 428 | if (ret != H_SUCCESS) | ||
| 429 | break; | ||
| 430 | } | ||
| 431 | |||
| 432 | return ret; | ||
| 433 | } | ||
| 434 | |||
| 435 | long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, | ||
| 420 | unsigned long entry, unsigned long ua, | 436 | unsigned long entry, unsigned long ua, |
| 421 | enum dma_data_direction dir) | 437 | enum dma_data_direction dir) |
| 422 | { | 438 | { |
| @@ -453,6 +469,27 @@ long kvmppc_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl, | |||
| 453 | return 0; | 469 | return 0; |
| 454 | } | 470 | } |
| 455 | 471 | ||
| 472 | static long kvmppc_tce_iommu_map(struct kvm *kvm, | ||
| 473 | struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl, | ||
| 474 | unsigned long entry, unsigned long ua, | ||
| 475 | enum dma_data_direction dir) | ||
| 476 | { | ||
| 477 | unsigned long i, pgoff, ret = H_SUCCESS; | ||
| 478 | unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); | ||
| 479 | unsigned long io_entry = entry * subpages; | ||
| 480 | |||
| 481 | for (i = 0, pgoff = 0; i < subpages; | ||
| 482 | ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) { | ||
| 483 | |||
| 484 | ret = kvmppc_tce_iommu_do_map(kvm, tbl, | ||
| 485 | io_entry + i, ua + pgoff, dir); | ||
| 486 | if (ret != H_SUCCESS) | ||
| 487 | break; | ||
| 488 | } | ||
| 489 | |||
| 490 | return ret; | ||
| 491 | } | ||
| 492 | |||
| 456 | long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, | 493 | long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, |
| 457 | unsigned long ioba, unsigned long tce) | 494 | unsigned long ioba, unsigned long tce) |
| 458 | { | 495 | { |
| @@ -491,10 +528,10 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, | |||
| 491 | 528 | ||
| 492 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { | 529 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { |
| 493 | if (dir == DMA_NONE) | 530 | if (dir == DMA_NONE) |
| 494 | ret = kvmppc_tce_iommu_unmap(vcpu->kvm, | 531 | ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt, |
| 495 | stit->tbl, entry); | 532 | stit->tbl, entry); |
| 496 | else | 533 | else |
| 497 | ret = kvmppc_tce_iommu_map(vcpu->kvm, stit->tbl, | 534 | ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl, |
| 498 | entry, ua, dir); | 535 | entry, ua, dir); |
| 499 | 536 | ||
| 500 | if (ret == H_SUCCESS) | 537 | if (ret == H_SUCCESS) |
| @@ -570,7 +607,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, | |||
| 570 | return H_PARAMETER; | 607 | return H_PARAMETER; |
| 571 | 608 | ||
| 572 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { | 609 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { |
| 573 | ret = kvmppc_tce_iommu_map(vcpu->kvm, | 610 | ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, |
| 574 | stit->tbl, entry + i, ua, | 611 | stit->tbl, entry + i, ua, |
| 575 | iommu_tce_direction(tce)); | 612 | iommu_tce_direction(tce)); |
| 576 | 613 | ||
| @@ -615,10 +652,10 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu, | |||
| 615 | return H_PARAMETER; | 652 | return H_PARAMETER; |
| 616 | 653 | ||
| 617 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { | 654 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { |
| 618 | unsigned long entry = ioba >> stit->tbl->it_page_shift; | 655 | unsigned long entry = ioba >> stt->page_shift; |
| 619 | 656 | ||
| 620 | for (i = 0; i < npages; ++i) { | 657 | for (i = 0; i < npages; ++i) { |
| 621 | ret = kvmppc_tce_iommu_unmap(vcpu->kvm, | 658 | ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt, |
| 622 | stit->tbl, entry + i); | 659 | stit->tbl, entry + i); |
| 623 | 660 | ||
| 624 | if (ret == H_SUCCESS) | 661 | if (ret == H_SUCCESS) |
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index 6651f736a0b1..925fc316a104 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c | |||
| @@ -221,7 +221,7 @@ static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm, | |||
| 221 | return H_SUCCESS; | 221 | return H_SUCCESS; |
| 222 | } | 222 | } |
| 223 | 223 | ||
| 224 | static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm, | 224 | static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm, |
| 225 | struct iommu_table *tbl, unsigned long entry) | 225 | struct iommu_table *tbl, unsigned long entry) |
| 226 | { | 226 | { |
| 227 | enum dma_data_direction dir = DMA_NONE; | 227 | enum dma_data_direction dir = DMA_NONE; |
| @@ -245,7 +245,24 @@ static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm, | |||
| 245 | return ret; | 245 | return ret; |
| 246 | } | 246 | } |
| 247 | 247 | ||
| 248 | static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl, | 248 | static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm, |
| 249 | struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl, | ||
| 250 | unsigned long entry) | ||
| 251 | { | ||
| 252 | unsigned long i, ret = H_SUCCESS; | ||
| 253 | unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); | ||
| 254 | unsigned long io_entry = entry * subpages; | ||
| 255 | |||
| 256 | for (i = 0; i < subpages; ++i) { | ||
| 257 | ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i); | ||
| 258 | if (ret != H_SUCCESS) | ||
| 259 | break; | ||
| 260 | } | ||
| 261 | |||
| 262 | return ret; | ||
| 263 | } | ||
| 264 | |||
| 265 | static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, | ||
| 249 | unsigned long entry, unsigned long ua, | 266 | unsigned long entry, unsigned long ua, |
| 250 | enum dma_data_direction dir) | 267 | enum dma_data_direction dir) |
| 251 | { | 268 | { |
| @@ -290,6 +307,27 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl, | |||
| 290 | return 0; | 307 | return 0; |
| 291 | } | 308 | } |
| 292 | 309 | ||
| 310 | static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, | ||
| 311 | struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl, | ||
| 312 | unsigned long entry, unsigned long ua, | ||
| 313 | enum dma_data_direction dir) | ||
| 314 | { | ||
| 315 | unsigned long i, pgoff, ret = H_SUCCESS; | ||
| 316 | unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); | ||
| 317 | unsigned long io_entry = entry * subpages; | ||
| 318 | |||
| 319 | for (i = 0, pgoff = 0; i < subpages; | ||
| 320 | ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) { | ||
| 321 | |||
| 322 | ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl, | ||
| 323 | io_entry + i, ua + pgoff, dir); | ||
| 324 | if (ret != H_SUCCESS) | ||
| 325 | break; | ||
| 326 | } | ||
| 327 | |||
| 328 | return ret; | ||
| 329 | } | ||
| 330 | |||
| 293 | long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, | 331 | long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, |
| 294 | unsigned long ioba, unsigned long tce) | 332 | unsigned long ioba, unsigned long tce) |
| 295 | { | 333 | { |
| @@ -327,10 +365,10 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, | |||
| 327 | 365 | ||
| 328 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { | 366 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { |
| 329 | if (dir == DMA_NONE) | 367 | if (dir == DMA_NONE) |
| 330 | ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, | 368 | ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt, |
| 331 | stit->tbl, entry); | 369 | stit->tbl, entry); |
| 332 | else | 370 | else |
| 333 | ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, | 371 | ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt, |
| 334 | stit->tbl, entry, ua, dir); | 372 | stit->tbl, entry, ua, dir); |
| 335 | 373 | ||
| 336 | if (ret == H_SUCCESS) | 374 | if (ret == H_SUCCESS) |
| @@ -477,7 +515,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, | |||
| 477 | return H_PARAMETER; | 515 | return H_PARAMETER; |
| 478 | 516 | ||
| 479 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { | 517 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { |
| 480 | ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, | 518 | ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt, |
| 481 | stit->tbl, entry + i, ua, | 519 | stit->tbl, entry + i, ua, |
| 482 | iommu_tce_direction(tce)); | 520 | iommu_tce_direction(tce)); |
| 483 | 521 | ||
| @@ -526,10 +564,10 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu, | |||
| 526 | return H_PARAMETER; | 564 | return H_PARAMETER; |
| 527 | 565 | ||
| 528 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { | 566 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { |
| 529 | unsigned long entry = ioba >> stit->tbl->it_page_shift; | 567 | unsigned long entry = ioba >> stt->page_shift; |
| 530 | 568 | ||
| 531 | for (i = 0; i < npages; ++i) { | 569 | for (i = 0; i < npages; ++i) { |
| 532 | ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, | 570 | ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt, |
| 533 | stit->tbl, entry + i); | 571 | stit->tbl, entry + i); |
| 534 | 572 | ||
| 535 | if (ret == H_SUCCESS) | 573 | if (ret == H_SUCCESS) |
| @@ -571,7 +609,7 @@ long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, | |||
| 571 | page = stt->pages[idx / TCES_PER_PAGE]; | 609 | page = stt->pages[idx / TCES_PER_PAGE]; |
| 572 | tbl = (u64 *)page_address(page); | 610 | tbl = (u64 *)page_address(page); |
| 573 | 611 | ||
| 574 | vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE]; | 612 | vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE]; |
| 575 | 613 | ||
| 576 | return H_SUCCESS; | 614 | return H_SUCCESS; |
| 577 | } | 615 | } |
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index 68d68983948e..36b11c5a0dbb 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c | |||
| @@ -23,7 +23,9 @@ | |||
| 23 | #include <asm/reg.h> | 23 | #include <asm/reg.h> |
| 24 | #include <asm/switch_to.h> | 24 | #include <asm/switch_to.h> |
| 25 | #include <asm/time.h> | 25 | #include <asm/time.h> |
| 26 | #include <asm/tm.h> | ||
| 26 | #include "book3s.h" | 27 | #include "book3s.h" |
| 28 | #include <asm/asm-prototypes.h> | ||
| 27 | 29 | ||
| 28 | #define OP_19_XOP_RFID 18 | 30 | #define OP_19_XOP_RFID 18 |
| 29 | #define OP_19_XOP_RFI 50 | 31 | #define OP_19_XOP_RFI 50 |
| @@ -47,6 +49,12 @@ | |||
| 47 | #define OP_31_XOP_EIOIO 854 | 49 | #define OP_31_XOP_EIOIO 854 |
| 48 | #define OP_31_XOP_SLBMFEE 915 | 50 | #define OP_31_XOP_SLBMFEE 915 |
| 49 | 51 | ||
| 52 | #define OP_31_XOP_TBEGIN 654 | ||
| 53 | #define OP_31_XOP_TABORT 910 | ||
| 54 | |||
| 55 | #define OP_31_XOP_TRECLAIM 942 | ||
| 56 | #define OP_31_XOP_TRCHKPT 1006 | ||
| 57 | |||
| 50 | /* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */ | 58 | /* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */ |
| 51 | #define OP_31_XOP_DCBZ 1010 | 59 | #define OP_31_XOP_DCBZ 1010 |
| 52 | 60 | ||
| @@ -87,6 +95,157 @@ static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level) | |||
| 87 | return true; | 95 | return true; |
| 88 | } | 96 | } |
| 89 | 97 | ||
| 98 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 99 | static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu) | ||
| 100 | { | ||
| 101 | memcpy(&vcpu->arch.gpr_tm[0], &vcpu->arch.regs.gpr[0], | ||
| 102 | sizeof(vcpu->arch.gpr_tm)); | ||
| 103 | memcpy(&vcpu->arch.fp_tm, &vcpu->arch.fp, | ||
| 104 | sizeof(struct thread_fp_state)); | ||
| 105 | memcpy(&vcpu->arch.vr_tm, &vcpu->arch.vr, | ||
| 106 | sizeof(struct thread_vr_state)); | ||
| 107 | vcpu->arch.ppr_tm = vcpu->arch.ppr; | ||
| 108 | vcpu->arch.dscr_tm = vcpu->arch.dscr; | ||
| 109 | vcpu->arch.amr_tm = vcpu->arch.amr; | ||
| 110 | vcpu->arch.ctr_tm = vcpu->arch.regs.ctr; | ||
| 111 | vcpu->arch.tar_tm = vcpu->arch.tar; | ||
| 112 | vcpu->arch.lr_tm = vcpu->arch.regs.link; | ||
| 113 | vcpu->arch.cr_tm = vcpu->arch.cr; | ||
| 114 | vcpu->arch.xer_tm = vcpu->arch.regs.xer; | ||
| 115 | vcpu->arch.vrsave_tm = vcpu->arch.vrsave; | ||
| 116 | } | ||
| 117 | |||
| 118 | static inline void kvmppc_copyfrom_vcpu_tm(struct kvm_vcpu *vcpu) | ||
| 119 | { | ||
| 120 | memcpy(&vcpu->arch.regs.gpr[0], &vcpu->arch.gpr_tm[0], | ||
| 121 | sizeof(vcpu->arch.regs.gpr)); | ||
| 122 | memcpy(&vcpu->arch.fp, &vcpu->arch.fp_tm, | ||
| 123 | sizeof(struct thread_fp_state)); | ||
| 124 | memcpy(&vcpu->arch.vr, &vcpu->arch.vr_tm, | ||
| 125 | sizeof(struct thread_vr_state)); | ||
| 126 | vcpu->arch.ppr = vcpu->arch.ppr_tm; | ||
| 127 | vcpu->arch.dscr = vcpu->arch.dscr_tm; | ||
| 128 | vcpu->arch.amr = vcpu->arch.amr_tm; | ||
| 129 | vcpu->arch.regs.ctr = vcpu->arch.ctr_tm; | ||
| 130 | vcpu->arch.tar = vcpu->arch.tar_tm; | ||
| 131 | vcpu->arch.regs.link = vcpu->arch.lr_tm; | ||
| 132 | vcpu->arch.cr = vcpu->arch.cr_tm; | ||
| 133 | vcpu->arch.regs.xer = vcpu->arch.xer_tm; | ||
| 134 | vcpu->arch.vrsave = vcpu->arch.vrsave_tm; | ||
| 135 | } | ||
| 136 | |||
| 137 | static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val) | ||
| 138 | { | ||
| 139 | unsigned long guest_msr = kvmppc_get_msr(vcpu); | ||
| 140 | int fc_val = ra_val ? ra_val : 1; | ||
| 141 | uint64_t texasr; | ||
| 142 | |||
| 143 | /* CR0 = 0 | MSR[TS] | 0 */ | ||
| 144 | vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) | | ||
| 145 | (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1)) | ||
| 146 | << CR0_SHIFT); | ||
| 147 | |||
| 148 | preempt_disable(); | ||
| 149 | tm_enable(); | ||
| 150 | texasr = mfspr(SPRN_TEXASR); | ||
| 151 | kvmppc_save_tm_pr(vcpu); | ||
| 152 | kvmppc_copyfrom_vcpu_tm(vcpu); | ||
| 153 | |||
| 154 | /* failure recording depends on Failure Summary bit */ | ||
| 155 | if (!(texasr & TEXASR_FS)) { | ||
| 156 | texasr &= ~TEXASR_FC; | ||
| 157 | texasr |= ((u64)fc_val << TEXASR_FC_LG) | TEXASR_FS; | ||
| 158 | |||
| 159 | texasr &= ~(TEXASR_PR | TEXASR_HV); | ||
| 160 | if (kvmppc_get_msr(vcpu) & MSR_PR) | ||
| 161 | texasr |= TEXASR_PR; | ||
| 162 | |||
| 163 | if (kvmppc_get_msr(vcpu) & MSR_HV) | ||
| 164 | texasr |= TEXASR_HV; | ||
| 165 | |||
| 166 | vcpu->arch.texasr = texasr; | ||
| 167 | vcpu->arch.tfiar = kvmppc_get_pc(vcpu); | ||
| 168 | mtspr(SPRN_TEXASR, texasr); | ||
| 169 | mtspr(SPRN_TFIAR, vcpu->arch.tfiar); | ||
| 170 | } | ||
| 171 | tm_disable(); | ||
| 172 | /* | ||
| 173 | * treclaim need quit to non-transactional state. | ||
| 174 | */ | ||
| 175 | guest_msr &= ~(MSR_TS_MASK); | ||
| 176 | kvmppc_set_msr(vcpu, guest_msr); | ||
| 177 | preempt_enable(); | ||
| 178 | |||
| 179 | if (vcpu->arch.shadow_fscr & FSCR_TAR) | ||
| 180 | mtspr(SPRN_TAR, vcpu->arch.tar); | ||
| 181 | } | ||
| 182 | |||
| 183 | static void kvmppc_emulate_trchkpt(struct kvm_vcpu *vcpu) | ||
| 184 | { | ||
| 185 | unsigned long guest_msr = kvmppc_get_msr(vcpu); | ||
| 186 | |||
| 187 | preempt_disable(); | ||
| 188 | /* | ||
| 189 | * need flush FP/VEC/VSX to vcpu save area before | ||
| 190 | * copy. | ||
| 191 | */ | ||
| 192 | kvmppc_giveup_ext(vcpu, MSR_VSX); | ||
| 193 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); | ||
| 194 | kvmppc_copyto_vcpu_tm(vcpu); | ||
| 195 | kvmppc_save_tm_sprs(vcpu); | ||
| 196 | |||
| 197 | /* | ||
| 198 | * as a result of trecheckpoint. set TS to suspended. | ||
| 199 | */ | ||
| 200 | guest_msr &= ~(MSR_TS_MASK); | ||
| 201 | guest_msr |= MSR_TS_S; | ||
| 202 | kvmppc_set_msr(vcpu, guest_msr); | ||
| 203 | kvmppc_restore_tm_pr(vcpu); | ||
| 204 | preempt_enable(); | ||
| 205 | } | ||
| 206 | |||
| 207 | /* emulate tabort. at guest privilege state */ | ||
| 208 | void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val) | ||
| 209 | { | ||
| 210 | /* currently we only emulate tabort. but no emulation of other | ||
| 211 | * tabort variants since there is no kernel usage of them at | ||
| 212 | * present. | ||
| 213 | */ | ||
| 214 | unsigned long guest_msr = kvmppc_get_msr(vcpu); | ||
| 215 | uint64_t org_texasr; | ||
| 216 | |||
| 217 | preempt_disable(); | ||
| 218 | tm_enable(); | ||
| 219 | org_texasr = mfspr(SPRN_TEXASR); | ||
| 220 | tm_abort(ra_val); | ||
| 221 | |||
| 222 | /* CR0 = 0 | MSR[TS] | 0 */ | ||
| 223 | vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) | | ||
| 224 | (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1)) | ||
| 225 | << CR0_SHIFT); | ||
| 226 | |||
| 227 | vcpu->arch.texasr = mfspr(SPRN_TEXASR); | ||
| 228 | /* failure recording depends on Failure Summary bit, | ||
| 229 | * and tabort will be treated as nops in non-transactional | ||
| 230 | * state. | ||
| 231 | */ | ||
| 232 | if (!(org_texasr & TEXASR_FS) && | ||
| 233 | MSR_TM_ACTIVE(guest_msr)) { | ||
| 234 | vcpu->arch.texasr &= ~(TEXASR_PR | TEXASR_HV); | ||
| 235 | if (guest_msr & MSR_PR) | ||
| 236 | vcpu->arch.texasr |= TEXASR_PR; | ||
| 237 | |||
| 238 | if (guest_msr & MSR_HV) | ||
| 239 | vcpu->arch.texasr |= TEXASR_HV; | ||
| 240 | |||
| 241 | vcpu->arch.tfiar = kvmppc_get_pc(vcpu); | ||
| 242 | } | ||
| 243 | tm_disable(); | ||
| 244 | preempt_enable(); | ||
| 245 | } | ||
| 246 | |||
| 247 | #endif | ||
| 248 | |||
| 90 | int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, | 249 | int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 91 | unsigned int inst, int *advance) | 250 | unsigned int inst, int *advance) |
| 92 | { | 251 | { |
| @@ -117,11 +276,28 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 117 | case 19: | 276 | case 19: |
| 118 | switch (get_xop(inst)) { | 277 | switch (get_xop(inst)) { |
| 119 | case OP_19_XOP_RFID: | 278 | case OP_19_XOP_RFID: |
| 120 | case OP_19_XOP_RFI: | 279 | case OP_19_XOP_RFI: { |
| 280 | unsigned long srr1 = kvmppc_get_srr1(vcpu); | ||
| 281 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 282 | unsigned long cur_msr = kvmppc_get_msr(vcpu); | ||
| 283 | |||
| 284 | /* | ||
| 285 | * add rules to fit in ISA specification regarding TM | ||
| 286 | * state transistion in TM disable/Suspended state, | ||
| 287 | * and target TM state is TM inactive(00) state. (the | ||
| 288 | * change should be suppressed). | ||
| 289 | */ | ||
| 290 | if (((cur_msr & MSR_TM) == 0) && | ||
| 291 | ((srr1 & MSR_TM) == 0) && | ||
| 292 | MSR_TM_SUSPENDED(cur_msr) && | ||
| 293 | !MSR_TM_ACTIVE(srr1)) | ||
| 294 | srr1 |= MSR_TS_S; | ||
| 295 | #endif | ||
| 121 | kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu)); | 296 | kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu)); |
| 122 | kvmppc_set_msr(vcpu, kvmppc_get_srr1(vcpu)); | 297 | kvmppc_set_msr(vcpu, srr1); |
| 123 | *advance = 0; | 298 | *advance = 0; |
| 124 | break; | 299 | break; |
| 300 | } | ||
| 125 | 301 | ||
| 126 | default: | 302 | default: |
| 127 | emulated = EMULATE_FAIL; | 303 | emulated = EMULATE_FAIL; |
| @@ -304,6 +480,140 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 304 | 480 | ||
| 305 | break; | 481 | break; |
| 306 | } | 482 | } |
| 483 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 484 | case OP_31_XOP_TBEGIN: | ||
| 485 | { | ||
| 486 | if (!cpu_has_feature(CPU_FTR_TM)) | ||
| 487 | break; | ||
| 488 | |||
| 489 | if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { | ||
| 490 | kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); | ||
| 491 | emulated = EMULATE_AGAIN; | ||
| 492 | break; | ||
| 493 | } | ||
| 494 | |||
| 495 | if (!(kvmppc_get_msr(vcpu) & MSR_PR)) { | ||
| 496 | preempt_disable(); | ||
| 497 | vcpu->arch.cr = (CR0_TBEGIN_FAILURE | | ||
| 498 | (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT))); | ||
| 499 | |||
| 500 | vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT | | ||
| 501 | (((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT)) | ||
| 502 | << TEXASR_FC_LG)); | ||
| 503 | |||
| 504 | if ((inst >> 21) & 0x1) | ||
| 505 | vcpu->arch.texasr |= TEXASR_ROT; | ||
| 506 | |||
| 507 | if (kvmppc_get_msr(vcpu) & MSR_HV) | ||
| 508 | vcpu->arch.texasr |= TEXASR_HV; | ||
| 509 | |||
| 510 | vcpu->arch.tfhar = kvmppc_get_pc(vcpu) + 4; | ||
| 511 | vcpu->arch.tfiar = kvmppc_get_pc(vcpu); | ||
| 512 | |||
| 513 | kvmppc_restore_tm_sprs(vcpu); | ||
| 514 | preempt_enable(); | ||
| 515 | } else | ||
| 516 | emulated = EMULATE_FAIL; | ||
| 517 | break; | ||
| 518 | } | ||
| 519 | case OP_31_XOP_TABORT: | ||
| 520 | { | ||
| 521 | ulong guest_msr = kvmppc_get_msr(vcpu); | ||
| 522 | unsigned long ra_val = 0; | ||
| 523 | |||
| 524 | if (!cpu_has_feature(CPU_FTR_TM)) | ||
| 525 | break; | ||
| 526 | |||
| 527 | if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { | ||
| 528 | kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); | ||
| 529 | emulated = EMULATE_AGAIN; | ||
| 530 | break; | ||
| 531 | } | ||
| 532 | |||
| 533 | /* only emulate for privilege guest, since problem state | ||
| 534 | * guest can run with TM enabled and we don't expect to | ||
| 535 | * trap at here for that case. | ||
| 536 | */ | ||
| 537 | WARN_ON(guest_msr & MSR_PR); | ||
| 538 | |||
| 539 | if (ra) | ||
| 540 | ra_val = kvmppc_get_gpr(vcpu, ra); | ||
| 541 | |||
| 542 | kvmppc_emulate_tabort(vcpu, ra_val); | ||
| 543 | break; | ||
| 544 | } | ||
| 545 | case OP_31_XOP_TRECLAIM: | ||
| 546 | { | ||
| 547 | ulong guest_msr = kvmppc_get_msr(vcpu); | ||
| 548 | unsigned long ra_val = 0; | ||
| 549 | |||
| 550 | if (!cpu_has_feature(CPU_FTR_TM)) | ||
| 551 | break; | ||
| 552 | |||
| 553 | if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { | ||
| 554 | kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); | ||
| 555 | emulated = EMULATE_AGAIN; | ||
| 556 | break; | ||
| 557 | } | ||
| 558 | |||
| 559 | /* generate interrupts based on priorities */ | ||
| 560 | if (guest_msr & MSR_PR) { | ||
| 561 | /* Privileged Instruction type Program Interrupt */ | ||
| 562 | kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV); | ||
| 563 | emulated = EMULATE_AGAIN; | ||
| 564 | break; | ||
| 565 | } | ||
| 566 | |||
| 567 | if (!MSR_TM_ACTIVE(guest_msr)) { | ||
| 568 | /* TM bad thing interrupt */ | ||
| 569 | kvmppc_core_queue_program(vcpu, SRR1_PROGTM); | ||
| 570 | emulated = EMULATE_AGAIN; | ||
| 571 | break; | ||
| 572 | } | ||
| 573 | |||
| 574 | if (ra) | ||
| 575 | ra_val = kvmppc_get_gpr(vcpu, ra); | ||
| 576 | kvmppc_emulate_treclaim(vcpu, ra_val); | ||
| 577 | break; | ||
| 578 | } | ||
| 579 | case OP_31_XOP_TRCHKPT: | ||
| 580 | { | ||
| 581 | ulong guest_msr = kvmppc_get_msr(vcpu); | ||
| 582 | unsigned long texasr; | ||
| 583 | |||
| 584 | if (!cpu_has_feature(CPU_FTR_TM)) | ||
| 585 | break; | ||
| 586 | |||
| 587 | if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { | ||
| 588 | kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); | ||
| 589 | emulated = EMULATE_AGAIN; | ||
| 590 | break; | ||
| 591 | } | ||
| 592 | |||
| 593 | /* generate interrupt based on priorities */ | ||
| 594 | if (guest_msr & MSR_PR) { | ||
| 595 | /* Privileged Instruction type Program Intr */ | ||
| 596 | kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV); | ||
| 597 | emulated = EMULATE_AGAIN; | ||
| 598 | break; | ||
| 599 | } | ||
| 600 | |||
| 601 | tm_enable(); | ||
| 602 | texasr = mfspr(SPRN_TEXASR); | ||
| 603 | tm_disable(); | ||
| 604 | |||
| 605 | if (MSR_TM_ACTIVE(guest_msr) || | ||
| 606 | !(texasr & (TEXASR_FS))) { | ||
| 607 | /* TM bad thing interrupt */ | ||
| 608 | kvmppc_core_queue_program(vcpu, SRR1_PROGTM); | ||
| 609 | emulated = EMULATE_AGAIN; | ||
| 610 | break; | ||
| 611 | } | ||
| 612 | |||
| 613 | kvmppc_emulate_trchkpt(vcpu); | ||
| 614 | break; | ||
| 615 | } | ||
| 616 | #endif | ||
| 307 | default: | 617 | default: |
| 308 | emulated = EMULATE_FAIL; | 618 | emulated = EMULATE_FAIL; |
| 309 | } | 619 | } |
| @@ -465,13 +775,38 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) | |||
| 465 | break; | 775 | break; |
| 466 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 776 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
| 467 | case SPRN_TFHAR: | 777 | case SPRN_TFHAR: |
| 468 | vcpu->arch.tfhar = spr_val; | ||
| 469 | break; | ||
| 470 | case SPRN_TEXASR: | 778 | case SPRN_TEXASR: |
| 471 | vcpu->arch.texasr = spr_val; | ||
| 472 | break; | ||
| 473 | case SPRN_TFIAR: | 779 | case SPRN_TFIAR: |
| 474 | vcpu->arch.tfiar = spr_val; | 780 | if (!cpu_has_feature(CPU_FTR_TM)) |
| 781 | break; | ||
| 782 | |||
| 783 | if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { | ||
| 784 | kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); | ||
| 785 | emulated = EMULATE_AGAIN; | ||
| 786 | break; | ||
| 787 | } | ||
| 788 | |||
| 789 | if (MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)) && | ||
| 790 | !((MSR_TM_SUSPENDED(kvmppc_get_msr(vcpu))) && | ||
| 791 | (sprn == SPRN_TFHAR))) { | ||
| 792 | /* it is illegal to mtspr() TM regs in | ||
| 793 | * other than non-transactional state, with | ||
| 794 | * the exception of TFHAR in suspend state. | ||
| 795 | */ | ||
| 796 | kvmppc_core_queue_program(vcpu, SRR1_PROGTM); | ||
| 797 | emulated = EMULATE_AGAIN; | ||
| 798 | break; | ||
| 799 | } | ||
| 800 | |||
| 801 | tm_enable(); | ||
| 802 | if (sprn == SPRN_TFHAR) | ||
| 803 | mtspr(SPRN_TFHAR, spr_val); | ||
| 804 | else if (sprn == SPRN_TEXASR) | ||
| 805 | mtspr(SPRN_TEXASR, spr_val); | ||
| 806 | else | ||
| 807 | mtspr(SPRN_TFIAR, spr_val); | ||
| 808 | tm_disable(); | ||
| 809 | |||
| 475 | break; | 810 | break; |
| 476 | #endif | 811 | #endif |
| 477 | #endif | 812 | #endif |
| @@ -618,13 +953,25 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val | |||
| 618 | break; | 953 | break; |
| 619 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 954 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
| 620 | case SPRN_TFHAR: | 955 | case SPRN_TFHAR: |
| 621 | *spr_val = vcpu->arch.tfhar; | ||
| 622 | break; | ||
| 623 | case SPRN_TEXASR: | 956 | case SPRN_TEXASR: |
| 624 | *spr_val = vcpu->arch.texasr; | ||
| 625 | break; | ||
| 626 | case SPRN_TFIAR: | 957 | case SPRN_TFIAR: |
| 627 | *spr_val = vcpu->arch.tfiar; | 958 | if (!cpu_has_feature(CPU_FTR_TM)) |
| 959 | break; | ||
| 960 | |||
| 961 | if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { | ||
| 962 | kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); | ||
| 963 | emulated = EMULATE_AGAIN; | ||
| 964 | break; | ||
| 965 | } | ||
| 966 | |||
| 967 | tm_enable(); | ||
| 968 | if (sprn == SPRN_TFHAR) | ||
| 969 | *spr_val = mfspr(SPRN_TFHAR); | ||
| 970 | else if (sprn == SPRN_TEXASR) | ||
| 971 | *spr_val = mfspr(SPRN_TEXASR); | ||
| 972 | else if (sprn == SPRN_TFIAR) | ||
| 973 | *spr_val = mfspr(SPRN_TFIAR); | ||
| 974 | tm_disable(); | ||
| 628 | break; | 975 | break; |
| 629 | #endif | 976 | #endif |
| 630 | #endif | 977 | #endif |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 8858ab8b6ca4..de686b340f4a 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
| @@ -123,6 +123,32 @@ static bool no_mixing_hpt_and_radix; | |||
| 123 | static void kvmppc_end_cede(struct kvm_vcpu *vcpu); | 123 | static void kvmppc_end_cede(struct kvm_vcpu *vcpu); |
| 124 | static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); | 124 | static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); |
| 125 | 125 | ||
| 126 | /* | ||
| 127 | * RWMR values for POWER8. These control the rate at which PURR | ||
| 128 | * and SPURR count and should be set according to the number of | ||
| 129 | * online threads in the vcore being run. | ||
| 130 | */ | ||
| 131 | #define RWMR_RPA_P8_1THREAD 0x164520C62609AECA | ||
| 132 | #define RWMR_RPA_P8_2THREAD 0x7FFF2908450D8DA9 | ||
| 133 | #define RWMR_RPA_P8_3THREAD 0x164520C62609AECA | ||
| 134 | #define RWMR_RPA_P8_4THREAD 0x199A421245058DA9 | ||
| 135 | #define RWMR_RPA_P8_5THREAD 0x164520C62609AECA | ||
| 136 | #define RWMR_RPA_P8_6THREAD 0x164520C62609AECA | ||
| 137 | #define RWMR_RPA_P8_7THREAD 0x164520C62609AECA | ||
| 138 | #define RWMR_RPA_P8_8THREAD 0x164520C62609AECA | ||
| 139 | |||
| 140 | static unsigned long p8_rwmr_values[MAX_SMT_THREADS + 1] = { | ||
| 141 | RWMR_RPA_P8_1THREAD, | ||
| 142 | RWMR_RPA_P8_1THREAD, | ||
| 143 | RWMR_RPA_P8_2THREAD, | ||
| 144 | RWMR_RPA_P8_3THREAD, | ||
| 145 | RWMR_RPA_P8_4THREAD, | ||
| 146 | RWMR_RPA_P8_5THREAD, | ||
| 147 | RWMR_RPA_P8_6THREAD, | ||
| 148 | RWMR_RPA_P8_7THREAD, | ||
| 149 | RWMR_RPA_P8_8THREAD, | ||
| 150 | }; | ||
| 151 | |||
| 126 | static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc, | 152 | static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc, |
| 127 | int *ip) | 153 | int *ip) |
| 128 | { | 154 | { |
| @@ -371,13 +397,13 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu) | |||
| 371 | 397 | ||
| 372 | pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id); | 398 | pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id); |
| 373 | pr_err("pc = %.16lx msr = %.16llx trap = %x\n", | 399 | pr_err("pc = %.16lx msr = %.16llx trap = %x\n", |
| 374 | vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap); | 400 | vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap); |
| 375 | for (r = 0; r < 16; ++r) | 401 | for (r = 0; r < 16; ++r) |
| 376 | pr_err("r%2d = %.16lx r%d = %.16lx\n", | 402 | pr_err("r%2d = %.16lx r%d = %.16lx\n", |
| 377 | r, kvmppc_get_gpr(vcpu, r), | 403 | r, kvmppc_get_gpr(vcpu, r), |
| 378 | r+16, kvmppc_get_gpr(vcpu, r+16)); | 404 | r+16, kvmppc_get_gpr(vcpu, r+16)); |
| 379 | pr_err("ctr = %.16lx lr = %.16lx\n", | 405 | pr_err("ctr = %.16lx lr = %.16lx\n", |
| 380 | vcpu->arch.ctr, vcpu->arch.lr); | 406 | vcpu->arch.regs.ctr, vcpu->arch.regs.link); |
| 381 | pr_err("srr0 = %.16llx srr1 = %.16llx\n", | 407 | pr_err("srr0 = %.16llx srr1 = %.16llx\n", |
| 382 | vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); | 408 | vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); |
| 383 | pr_err("sprg0 = %.16llx sprg1 = %.16llx\n", | 409 | pr_err("sprg0 = %.16llx sprg1 = %.16llx\n", |
| @@ -385,7 +411,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu) | |||
| 385 | pr_err("sprg2 = %.16llx sprg3 = %.16llx\n", | 411 | pr_err("sprg2 = %.16llx sprg3 = %.16llx\n", |
| 386 | vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); | 412 | vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); |
| 387 | pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n", | 413 | pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n", |
| 388 | vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr); | 414 | vcpu->arch.cr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr); |
| 389 | pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); | 415 | pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); |
| 390 | pr_err("fault dar = %.16lx dsisr = %.8x\n", | 416 | pr_err("fault dar = %.16lx dsisr = %.8x\n", |
| 391 | vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); | 417 | vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); |
| @@ -1526,6 +1552,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, | |||
| 1526 | *val = get_reg_val(id, vcpu->arch.dec_expires + | 1552 | *val = get_reg_val(id, vcpu->arch.dec_expires + |
| 1527 | vcpu->arch.vcore->tb_offset); | 1553 | vcpu->arch.vcore->tb_offset); |
| 1528 | break; | 1554 | break; |
| 1555 | case KVM_REG_PPC_ONLINE: | ||
| 1556 | *val = get_reg_val(id, vcpu->arch.online); | ||
| 1557 | break; | ||
| 1529 | default: | 1558 | default: |
| 1530 | r = -EINVAL; | 1559 | r = -EINVAL; |
| 1531 | break; | 1560 | break; |
| @@ -1757,6 +1786,14 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, | |||
| 1757 | vcpu->arch.dec_expires = set_reg_val(id, *val) - | 1786 | vcpu->arch.dec_expires = set_reg_val(id, *val) - |
| 1758 | vcpu->arch.vcore->tb_offset; | 1787 | vcpu->arch.vcore->tb_offset; |
| 1759 | break; | 1788 | break; |
| 1789 | case KVM_REG_PPC_ONLINE: | ||
| 1790 | i = set_reg_val(id, *val); | ||
| 1791 | if (i && !vcpu->arch.online) | ||
| 1792 | atomic_inc(&vcpu->arch.vcore->online_count); | ||
| 1793 | else if (!i && vcpu->arch.online) | ||
| 1794 | atomic_dec(&vcpu->arch.vcore->online_count); | ||
| 1795 | vcpu->arch.online = i; | ||
| 1796 | break; | ||
| 1760 | default: | 1797 | default: |
| 1761 | r = -EINVAL; | 1798 | r = -EINVAL; |
| 1762 | break; | 1799 | break; |
| @@ -2850,6 +2887,25 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) | |||
| 2850 | } | 2887 | } |
| 2851 | } | 2888 | } |
| 2852 | 2889 | ||
| 2890 | /* | ||
| 2891 | * On POWER8, set RWMR register. | ||
| 2892 | * Since it only affects PURR and SPURR, it doesn't affect | ||
| 2893 | * the host, so we don't save/restore the host value. | ||
| 2894 | */ | ||
| 2895 | if (is_power8) { | ||
| 2896 | unsigned long rwmr_val = RWMR_RPA_P8_8THREAD; | ||
| 2897 | int n_online = atomic_read(&vc->online_count); | ||
| 2898 | |||
| 2899 | /* | ||
| 2900 | * Use the 8-thread value if we're doing split-core | ||
| 2901 | * or if the vcore's online count looks bogus. | ||
| 2902 | */ | ||
| 2903 | if (split == 1 && threads_per_subcore == MAX_SMT_THREADS && | ||
| 2904 | n_online >= 1 && n_online <= MAX_SMT_THREADS) | ||
| 2905 | rwmr_val = p8_rwmr_values[n_online]; | ||
| 2906 | mtspr(SPRN_RWMR, rwmr_val); | ||
| 2907 | } | ||
| 2908 | |||
| 2853 | /* Start all the threads */ | 2909 | /* Start all the threads */ |
| 2854 | active = 0; | 2910 | active = 0; |
| 2855 | for (sub = 0; sub < core_info.n_subcores; ++sub) { | 2911 | for (sub = 0; sub < core_info.n_subcores; ++sub) { |
| @@ -2902,6 +2958,32 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) | |||
| 2902 | for (sub = 0; sub < core_info.n_subcores; ++sub) | 2958 | for (sub = 0; sub < core_info.n_subcores; ++sub) |
| 2903 | spin_unlock(&core_info.vc[sub]->lock); | 2959 | spin_unlock(&core_info.vc[sub]->lock); |
| 2904 | 2960 | ||
| 2961 | if (kvm_is_radix(vc->kvm)) { | ||
| 2962 | int tmp = pcpu; | ||
| 2963 | |||
| 2964 | /* | ||
| 2965 | * Do we need to flush the process scoped TLB for the LPAR? | ||
| 2966 | * | ||
| 2967 | * On POWER9, individual threads can come in here, but the | ||
| 2968 | * TLB is shared between the 4 threads in a core, hence | ||
| 2969 | * invalidating on one thread invalidates for all. | ||
| 2970 | * Thus we make all 4 threads use the same bit here. | ||
| 2971 | * | ||
| 2972 | * Hash must be flushed in realmode in order to use tlbiel. | ||
| 2973 | */ | ||
| 2974 | mtspr(SPRN_LPID, vc->kvm->arch.lpid); | ||
| 2975 | isync(); | ||
| 2976 | |||
| 2977 | if (cpu_has_feature(CPU_FTR_ARCH_300)) | ||
| 2978 | tmp &= ~0x3UL; | ||
| 2979 | |||
| 2980 | if (cpumask_test_cpu(tmp, &vc->kvm->arch.need_tlb_flush)) { | ||
| 2981 | radix__local_flush_tlb_lpid_guest(vc->kvm->arch.lpid); | ||
| 2982 | /* Clear the bit after the TLB flush */ | ||
| 2983 | cpumask_clear_cpu(tmp, &vc->kvm->arch.need_tlb_flush); | ||
| 2984 | } | ||
| 2985 | } | ||
| 2986 | |||
| 2905 | /* | 2987 | /* |
| 2906 | * Interrupts will be enabled once we get into the guest, | 2988 | * Interrupts will be enabled once we get into the guest, |
| 2907 | * so tell lockdep that we're about to enable interrupts. | 2989 | * so tell lockdep that we're about to enable interrupts. |
| @@ -3356,6 +3438,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
| 3356 | } | 3438 | } |
| 3357 | #endif | 3439 | #endif |
| 3358 | 3440 | ||
| 3441 | /* | ||
| 3442 | * Force online to 1 for the sake of old userspace which doesn't | ||
| 3443 | * set it. | ||
| 3444 | */ | ||
| 3445 | if (!vcpu->arch.online) { | ||
| 3446 | atomic_inc(&vcpu->arch.vcore->online_count); | ||
| 3447 | vcpu->arch.online = 1; | ||
| 3448 | } | ||
| 3449 | |||
| 3359 | kvmppc_core_prepare_to_enter(vcpu); | 3450 | kvmppc_core_prepare_to_enter(vcpu); |
| 3360 | 3451 | ||
| 3361 | /* No need to go into the guest when all we'll do is come back out */ | 3452 | /* No need to go into the guest when all we'll do is come back out */ |
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index de18299f92b7..d4a3f4da409b 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/cma.h> | 18 | #include <linux/cma.h> |
| 19 | #include <linux/bitops.h> | 19 | #include <linux/bitops.h> |
| 20 | 20 | ||
| 21 | #include <asm/asm-prototypes.h> | ||
| 21 | #include <asm/cputable.h> | 22 | #include <asm/cputable.h> |
| 22 | #include <asm/kvm_ppc.h> | 23 | #include <asm/kvm_ppc.h> |
| 23 | #include <asm/kvm_book3s.h> | 24 | #include <asm/kvm_book3s.h> |
| @@ -211,9 +212,9 @@ long kvmppc_h_random(struct kvm_vcpu *vcpu) | |||
| 211 | 212 | ||
| 212 | /* Only need to do the expensive mfmsr() on radix */ | 213 | /* Only need to do the expensive mfmsr() on radix */ |
| 213 | if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR)) | 214 | if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR)) |
| 214 | r = powernv_get_random_long(&vcpu->arch.gpr[4]); | 215 | r = powernv_get_random_long(&vcpu->arch.regs.gpr[4]); |
| 215 | else | 216 | else |
| 216 | r = powernv_get_random_real_mode(&vcpu->arch.gpr[4]); | 217 | r = powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]); |
| 217 | if (r) | 218 | if (r) |
| 218 | return H_SUCCESS; | 219 | return H_SUCCESS; |
| 219 | 220 | ||
| @@ -562,7 +563,7 @@ unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu) | |||
| 562 | { | 563 | { |
| 563 | if (!kvmppc_xics_enabled(vcpu)) | 564 | if (!kvmppc_xics_enabled(vcpu)) |
| 564 | return H_TOO_HARD; | 565 | return H_TOO_HARD; |
| 565 | vcpu->arch.gpr[5] = get_tb(); | 566 | vcpu->arch.regs.gpr[5] = get_tb(); |
| 566 | if (xive_enabled()) { | 567 | if (xive_enabled()) { |
| 567 | if (is_rm()) | 568 | if (is_rm()) |
| 568 | return xive_rm_h_xirr(vcpu); | 569 | return xive_rm_h_xirr(vcpu); |
| @@ -633,7 +634,19 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) | |||
| 633 | 634 | ||
| 634 | void kvmppc_bad_interrupt(struct pt_regs *regs) | 635 | void kvmppc_bad_interrupt(struct pt_regs *regs) |
| 635 | { | 636 | { |
| 636 | die("Bad interrupt in KVM entry/exit code", regs, SIGABRT); | 637 | /* |
| 638 | * 100 could happen at any time, 200 can happen due to invalid real | ||
| 639 | * address access for example (or any time due to a hardware problem). | ||
| 640 | */ | ||
| 641 | if (TRAP(regs) == 0x100) { | ||
| 642 | get_paca()->in_nmi++; | ||
| 643 | system_reset_exception(regs); | ||
| 644 | get_paca()->in_nmi--; | ||
| 645 | } else if (TRAP(regs) == 0x200) { | ||
| 646 | machine_check_exception(regs); | ||
| 647 | } else { | ||
| 648 | die("Bad interrupt in KVM entry/exit code", regs, SIGABRT); | ||
| 649 | } | ||
| 637 | panic("Bad KVM trap"); | 650 | panic("Bad KVM trap"); |
| 638 | } | 651 | } |
| 639 | 652 | ||
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S index 0e8493033288..82f2ff9410b6 100644 --- a/arch/powerpc/kvm/book3s_hv_interrupts.S +++ b/arch/powerpc/kvm/book3s_hv_interrupts.S | |||
| @@ -137,7 +137,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |||
| 137 | /* | 137 | /* |
| 138 | * We return here in virtual mode after the guest exits | 138 | * We return here in virtual mode after the guest exits |
| 139 | * with something that we can't handle in real mode. | 139 | * with something that we can't handle in real mode. |
| 140 | * Interrupts are enabled again at this point. | 140 | * Interrupts are still hard-disabled. |
| 141 | */ | 141 | */ |
| 142 | 142 | ||
| 143 | /* | 143 | /* |
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 78e6a392330f..1f22d9e977d4 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c | |||
| @@ -418,7 +418,8 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, | |||
| 418 | long pte_index, unsigned long pteh, unsigned long ptel) | 418 | long pte_index, unsigned long pteh, unsigned long ptel) |
| 419 | { | 419 | { |
| 420 | return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel, | 420 | return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel, |
| 421 | vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]); | 421 | vcpu->arch.pgdir, true, |
| 422 | &vcpu->arch.regs.gpr[4]); | ||
| 422 | } | 423 | } |
| 423 | 424 | ||
| 424 | #ifdef __BIG_ENDIAN__ | 425 | #ifdef __BIG_ENDIAN__ |
| @@ -434,24 +435,6 @@ static inline int is_mmio_hpte(unsigned long v, unsigned long r) | |||
| 434 | (HPTE_R_KEY_HI | HPTE_R_KEY_LO)); | 435 | (HPTE_R_KEY_HI | HPTE_R_KEY_LO)); |
| 435 | } | 436 | } |
| 436 | 437 | ||
| 437 | static inline int try_lock_tlbie(unsigned int *lock) | ||
| 438 | { | ||
| 439 | unsigned int tmp, old; | ||
| 440 | unsigned int token = LOCK_TOKEN; | ||
| 441 | |||
| 442 | asm volatile("1:lwarx %1,0,%2\n" | ||
| 443 | " cmpwi cr0,%1,0\n" | ||
| 444 | " bne 2f\n" | ||
| 445 | " stwcx. %3,0,%2\n" | ||
| 446 | " bne- 1b\n" | ||
| 447 | " isync\n" | ||
| 448 | "2:" | ||
| 449 | : "=&r" (tmp), "=&r" (old) | ||
| 450 | : "r" (lock), "r" (token) | ||
| 451 | : "cc", "memory"); | ||
| 452 | return old == 0; | ||
| 453 | } | ||
| 454 | |||
| 455 | static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues, | 438 | static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues, |
| 456 | long npages, int global, bool need_sync) | 439 | long npages, int global, bool need_sync) |
| 457 | { | 440 | { |
| @@ -463,8 +446,6 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues, | |||
| 463 | * the RS field, this is backwards-compatible with P7 and P8. | 446 | * the RS field, this is backwards-compatible with P7 and P8. |
| 464 | */ | 447 | */ |
| 465 | if (global) { | 448 | if (global) { |
| 466 | while (!try_lock_tlbie(&kvm->arch.tlbie_lock)) | ||
| 467 | cpu_relax(); | ||
| 468 | if (need_sync) | 449 | if (need_sync) |
| 469 | asm volatile("ptesync" : : : "memory"); | 450 | asm volatile("ptesync" : : : "memory"); |
| 470 | for (i = 0; i < npages; ++i) { | 451 | for (i = 0; i < npages; ++i) { |
| @@ -483,7 +464,6 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues, | |||
| 483 | } | 464 | } |
| 484 | 465 | ||
| 485 | asm volatile("eieio; tlbsync; ptesync" : : : "memory"); | 466 | asm volatile("eieio; tlbsync; ptesync" : : : "memory"); |
| 486 | kvm->arch.tlbie_lock = 0; | ||
| 487 | } else { | 467 | } else { |
| 488 | if (need_sync) | 468 | if (need_sync) |
| 489 | asm volatile("ptesync" : : : "memory"); | 469 | asm volatile("ptesync" : : : "memory"); |
| @@ -561,13 +541,13 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags, | |||
| 561 | unsigned long pte_index, unsigned long avpn) | 541 | unsigned long pte_index, unsigned long avpn) |
| 562 | { | 542 | { |
| 563 | return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn, | 543 | return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn, |
| 564 | &vcpu->arch.gpr[4]); | 544 | &vcpu->arch.regs.gpr[4]); |
| 565 | } | 545 | } |
| 566 | 546 | ||
| 567 | long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) | 547 | long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) |
| 568 | { | 548 | { |
| 569 | struct kvm *kvm = vcpu->kvm; | 549 | struct kvm *kvm = vcpu->kvm; |
| 570 | unsigned long *args = &vcpu->arch.gpr[4]; | 550 | unsigned long *args = &vcpu->arch.regs.gpr[4]; |
| 571 | __be64 *hp, *hptes[4]; | 551 | __be64 *hp, *hptes[4]; |
| 572 | unsigned long tlbrb[4]; | 552 | unsigned long tlbrb[4]; |
| 573 | long int i, j, k, n, found, indexes[4]; | 553 | long int i, j, k, n, found, indexes[4]; |
| @@ -787,8 +767,8 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, | |||
| 787 | r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C)); | 767 | r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C)); |
| 788 | r &= ~HPTE_GR_RESERVED; | 768 | r &= ~HPTE_GR_RESERVED; |
| 789 | } | 769 | } |
| 790 | vcpu->arch.gpr[4 + i * 2] = v; | 770 | vcpu->arch.regs.gpr[4 + i * 2] = v; |
| 791 | vcpu->arch.gpr[5 + i * 2] = r; | 771 | vcpu->arch.regs.gpr[5 + i * 2] = r; |
| 792 | } | 772 | } |
| 793 | return H_SUCCESS; | 773 | return H_SUCCESS; |
| 794 | } | 774 | } |
| @@ -834,7 +814,7 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags, | |||
| 834 | } | 814 | } |
| 835 | } | 815 | } |
| 836 | } | 816 | } |
| 837 | vcpu->arch.gpr[4] = gr; | 817 | vcpu->arch.regs.gpr[4] = gr; |
| 838 | ret = H_SUCCESS; | 818 | ret = H_SUCCESS; |
| 839 | out: | 819 | out: |
| 840 | unlock_hpte(hpte, v & ~HPTE_V_HVLOCK); | 820 | unlock_hpte(hpte, v & ~HPTE_V_HVLOCK); |
| @@ -881,7 +861,7 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags, | |||
| 881 | kvmppc_set_dirty_from_hpte(kvm, v, gr); | 861 | kvmppc_set_dirty_from_hpte(kvm, v, gr); |
| 882 | } | 862 | } |
| 883 | } | 863 | } |
| 884 | vcpu->arch.gpr[4] = gr; | 864 | vcpu->arch.regs.gpr[4] = gr; |
| 885 | ret = H_SUCCESS; | 865 | ret = H_SUCCESS; |
| 886 | out: | 866 | out: |
| 887 | unlock_hpte(hpte, v & ~HPTE_V_HVLOCK); | 867 | unlock_hpte(hpte, v & ~HPTE_V_HVLOCK); |
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c index 2a862618f072..758d1d23215e 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_xics.c +++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c | |||
| @@ -517,7 +517,7 @@ unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu) | |||
| 517 | } while (!icp_rm_try_update(icp, old_state, new_state)); | 517 | } while (!icp_rm_try_update(icp, old_state, new_state)); |
| 518 | 518 | ||
| 519 | /* Return the result in GPR4 */ | 519 | /* Return the result in GPR4 */ |
| 520 | vcpu->arch.gpr[4] = xirr; | 520 | vcpu->arch.regs.gpr[4] = xirr; |
| 521 | 521 | ||
| 522 | return check_too_hard(xics, icp); | 522 | return check_too_hard(xics, icp); |
| 523 | } | 523 | } |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index b97d261d3b89..153988d878e8 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
| @@ -39,8 +39,6 @@ BEGIN_FTR_SECTION; \ | |||
| 39 | extsw reg, reg; \ | 39 | extsw reg, reg; \ |
| 40 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) | 40 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) |
| 41 | 41 | ||
| 42 | #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) | ||
| 43 | |||
| 44 | /* Values in HSTATE_NAPPING(r13) */ | 42 | /* Values in HSTATE_NAPPING(r13) */ |
| 45 | #define NAPPING_CEDE 1 | 43 | #define NAPPING_CEDE 1 |
| 46 | #define NAPPING_NOVCPU 2 | 44 | #define NAPPING_NOVCPU 2 |
| @@ -639,6 +637,10 @@ kvmppc_hv_entry: | |||
| 639 | /* Primary thread switches to guest partition. */ | 637 | /* Primary thread switches to guest partition. */ |
| 640 | cmpwi r6,0 | 638 | cmpwi r6,0 |
| 641 | bne 10f | 639 | bne 10f |
| 640 | |||
| 641 | /* Radix has already switched LPID and flushed core TLB */ | ||
| 642 | bne cr7, 22f | ||
| 643 | |||
| 642 | lwz r7,KVM_LPID(r9) | 644 | lwz r7,KVM_LPID(r9) |
| 643 | BEGIN_FTR_SECTION | 645 | BEGIN_FTR_SECTION |
| 644 | ld r6,KVM_SDR1(r9) | 646 | ld r6,KVM_SDR1(r9) |
| @@ -650,7 +652,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) | |||
| 650 | mtspr SPRN_LPID,r7 | 652 | mtspr SPRN_LPID,r7 |
| 651 | isync | 653 | isync |
| 652 | 654 | ||
| 653 | /* See if we need to flush the TLB */ | 655 | /* See if we need to flush the TLB. Hash has to be done in RM */ |
| 654 | lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */ | 656 | lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */ |
| 655 | BEGIN_FTR_SECTION | 657 | BEGIN_FTR_SECTION |
| 656 | /* | 658 | /* |
| @@ -677,15 +679,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |||
| 677 | li r7,0x800 /* IS field = 0b10 */ | 679 | li r7,0x800 /* IS field = 0b10 */ |
| 678 | ptesync | 680 | ptesync |
| 679 | li r0,0 /* RS for P9 version of tlbiel */ | 681 | li r0,0 /* RS for P9 version of tlbiel */ |
| 680 | bne cr7, 29f | ||
| 681 | 28: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */ | 682 | 28: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */ |
| 682 | addi r7,r7,0x1000 | 683 | addi r7,r7,0x1000 |
| 683 | bdnz 28b | 684 | bdnz 28b |
| 684 | b 30f | 685 | ptesync |
| 685 | 29: PPC_TLBIEL(7,0,2,1,1) /* for radix, RIC=2, PRS=1, R=1 */ | ||
| 686 | addi r7,r7,0x1000 | ||
| 687 | bdnz 29b | ||
| 688 | 30: ptesync | ||
| 689 | 23: ldarx r7,0,r6 /* clear the bit after TLB flushed */ | 686 | 23: ldarx r7,0,r6 /* clear the bit after TLB flushed */ |
| 690 | andc r7,r7,r8 | 687 | andc r7,r7,r8 |
| 691 | stdcx. r7,0,r6 | 688 | stdcx. r7,0,r6 |
| @@ -799,7 +796,10 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) | |||
| 799 | /* | 796 | /* |
| 800 | * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR | 797 | * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR |
| 801 | */ | 798 | */ |
| 802 | bl kvmppc_restore_tm | 799 | mr r3, r4 |
| 800 | ld r4, VCPU_MSR(r3) | ||
| 801 | bl kvmppc_restore_tm_hv | ||
| 802 | ld r4, HSTATE_KVM_VCPU(r13) | ||
| 803 | 91: | 803 | 91: |
| 804 | #endif | 804 | #endif |
| 805 | 805 | ||
| @@ -1783,7 +1783,10 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) | |||
| 1783 | /* | 1783 | /* |
| 1784 | * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR | 1784 | * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR |
| 1785 | */ | 1785 | */ |
| 1786 | bl kvmppc_save_tm | 1786 | mr r3, r9 |
| 1787 | ld r4, VCPU_MSR(r3) | ||
| 1788 | bl kvmppc_save_tm_hv | ||
| 1789 | ld r9, HSTATE_KVM_VCPU(r13) | ||
| 1787 | 91: | 1790 | 91: |
| 1788 | #endif | 1791 | #endif |
| 1789 | 1792 | ||
| @@ -2686,8 +2689,9 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) | |||
| 2686 | /* | 2689 | /* |
| 2687 | * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR | 2690 | * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR |
| 2688 | */ | 2691 | */ |
| 2689 | ld r9, HSTATE_KVM_VCPU(r13) | 2692 | ld r3, HSTATE_KVM_VCPU(r13) |
| 2690 | bl kvmppc_save_tm | 2693 | ld r4, VCPU_MSR(r3) |
| 2694 | bl kvmppc_save_tm_hv | ||
| 2691 | 91: | 2695 | 91: |
| 2692 | #endif | 2696 | #endif |
| 2693 | 2697 | ||
| @@ -2805,7 +2809,10 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) | |||
| 2805 | /* | 2809 | /* |
| 2806 | * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR | 2810 | * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR |
| 2807 | */ | 2811 | */ |
| 2808 | bl kvmppc_restore_tm | 2812 | mr r3, r4 |
| 2813 | ld r4, VCPU_MSR(r3) | ||
| 2814 | bl kvmppc_restore_tm_hv | ||
| 2815 | ld r4, HSTATE_KVM_VCPU(r13) | ||
| 2809 | 91: | 2816 | 91: |
| 2810 | #endif | 2817 | #endif |
| 2811 | 2818 | ||
| @@ -3126,11 +3133,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
| 3126 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 3133 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
| 3127 | /* | 3134 | /* |
| 3128 | * Save transactional state and TM-related registers. | 3135 | * Save transactional state and TM-related registers. |
| 3129 | * Called with r9 pointing to the vcpu struct. | 3136 | * Called with r3 pointing to the vcpu struct and r4 containing |
| 3137 | * the guest MSR value. | ||
| 3130 | * This can modify all checkpointed registers, but | 3138 | * This can modify all checkpointed registers, but |
| 3131 | * restores r1, r2 and r9 (vcpu pointer) before exit. | 3139 | * restores r1 and r2 before exit. |
| 3132 | */ | 3140 | */ |
| 3133 | kvmppc_save_tm: | 3141 | kvmppc_save_tm_hv: |
| 3142 | /* See if we need to handle fake suspend mode */ | ||
| 3143 | BEGIN_FTR_SECTION | ||
| 3144 | b __kvmppc_save_tm | ||
| 3145 | END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) | ||
| 3146 | |||
| 3147 | lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */ | ||
| 3148 | cmpwi r0, 0 | ||
| 3149 | beq __kvmppc_save_tm | ||
| 3150 | |||
| 3151 | /* The following code handles the fake_suspend = 1 case */ | ||
| 3134 | mflr r0 | 3152 | mflr r0 |
| 3135 | std r0, PPC_LR_STKOFF(r1) | 3153 | std r0, PPC_LR_STKOFF(r1) |
| 3136 | stdu r1, -PPC_MIN_STKFRM(r1) | 3154 | stdu r1, -PPC_MIN_STKFRM(r1) |
| @@ -3141,59 +3159,37 @@ kvmppc_save_tm: | |||
| 3141 | rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG | 3159 | rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG |
| 3142 | mtmsrd r8 | 3160 | mtmsrd r8 |
| 3143 | 3161 | ||
| 3144 | ld r5, VCPU_MSR(r9) | ||
| 3145 | rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 | ||
| 3146 | beq 1f /* TM not active in guest. */ | ||
| 3147 | |||
| 3148 | std r1, HSTATE_HOST_R1(r13) | ||
| 3149 | li r3, TM_CAUSE_KVM_RESCHED | ||
| 3150 | |||
| 3151 | BEGIN_FTR_SECTION | ||
| 3152 | lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */ | ||
| 3153 | cmpwi r0, 0 | ||
| 3154 | beq 3f | ||
| 3155 | rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */ | 3162 | rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */ |
| 3156 | beq 4f | 3163 | beq 4f |
| 3157 | BEGIN_FTR_SECTION_NESTED(96) | 3164 | BEGIN_FTR_SECTION |
| 3158 | bl pnv_power9_force_smt4_catch | 3165 | bl pnv_power9_force_smt4_catch |
| 3159 | END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96) | 3166 | END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) |
| 3160 | nop | 3167 | nop |
| 3161 | b 6f | ||
| 3162 | 3: | ||
| 3163 | /* Emulation of the treclaim instruction needs TEXASR before treclaim */ | ||
| 3164 | mfspr r6, SPRN_TEXASR | ||
| 3165 | std r6, VCPU_ORIG_TEXASR(r9) | ||
| 3166 | 6: | ||
| 3167 | END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) | ||
| 3168 | 3168 | ||
| 3169 | /* Clear the MSR RI since r1, r13 are all going to be foobar. */ | 3169 | std r1, HSTATE_HOST_R1(r13) |
| 3170 | |||
| 3171 | /* Clear the MSR RI since r1, r13 may be foobar. */ | ||
| 3170 | li r5, 0 | 3172 | li r5, 0 |
| 3171 | mtmsrd r5, 1 | 3173 | mtmsrd r5, 1 |
| 3172 | 3174 | ||
| 3173 | /* All GPRs are volatile at this point. */ | 3175 | /* We have to treclaim here because that's the only way to do S->N */ |
| 3176 | li r3, TM_CAUSE_KVM_RESCHED | ||
| 3174 | TRECLAIM(R3) | 3177 | TRECLAIM(R3) |
| 3175 | 3178 | ||
| 3176 | /* Temporarily store r13 and r9 so we have some regs to play with */ | ||
| 3177 | SET_SCRATCH0(r13) | ||
| 3178 | GET_PACA(r13) | ||
| 3179 | std r9, PACATMSCRATCH(r13) | ||
| 3180 | |||
| 3181 | /* If doing TM emulation on POWER9 DD2.2, check for fake suspend mode */ | ||
| 3182 | BEGIN_FTR_SECTION | ||
| 3183 | lbz r9, HSTATE_FAKE_SUSPEND(r13) | ||
| 3184 | cmpwi r9, 0 | ||
| 3185 | beq 2f | ||
| 3186 | /* | 3179 | /* |
| 3187 | * We were in fake suspend, so we are not going to save the | 3180 | * We were in fake suspend, so we are not going to save the |
| 3188 | * register state as the guest checkpointed state (since | 3181 | * register state as the guest checkpointed state (since |
| 3189 | * we already have it), therefore we can now use any volatile GPR. | 3182 | * we already have it), therefore we can now use any volatile GPR. |
| 3190 | */ | 3183 | */ |
| 3191 | /* Reload stack pointer and TOC. */ | 3184 | /* Reload PACA pointer, stack pointer and TOC. */ |
| 3185 | GET_PACA(r13) | ||
| 3192 | ld r1, HSTATE_HOST_R1(r13) | 3186 | ld r1, HSTATE_HOST_R1(r13) |
| 3193 | ld r2, PACATOC(r13) | 3187 | ld r2, PACATOC(r13) |
| 3188 | |||
| 3194 | /* Set MSR RI now we have r1 and r13 back. */ | 3189 | /* Set MSR RI now we have r1 and r13 back. */ |
| 3195 | li r5, MSR_RI | 3190 | li r5, MSR_RI |
| 3196 | mtmsrd r5, 1 | 3191 | mtmsrd r5, 1 |
| 3192 | |||
| 3197 | HMT_MEDIUM | 3193 | HMT_MEDIUM |
| 3198 | ld r6, HSTATE_DSCR(r13) | 3194 | ld r6, HSTATE_DSCR(r13) |
| 3199 | mtspr SPRN_DSCR, r6 | 3195 | mtspr SPRN_DSCR, r6 |
| @@ -3208,85 +3204,9 @@ END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96) | |||
| 3208 | li r0, PSSCR_FAKE_SUSPEND | 3204 | li r0, PSSCR_FAKE_SUSPEND |
| 3209 | andc r3, r3, r0 | 3205 | andc r3, r3, r0 |
| 3210 | mtspr SPRN_PSSCR, r3 | 3206 | mtspr SPRN_PSSCR, r3 |
| 3211 | ld r9, HSTATE_KVM_VCPU(r13) | ||
| 3212 | /* Don't save TEXASR, use value from last exit in real suspend state */ | ||
| 3213 | b 11f | ||
| 3214 | 2: | ||
| 3215 | END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) | ||
| 3216 | 3207 | ||
| 3208 | /* Don't save TEXASR, use value from last exit in real suspend state */ | ||
| 3217 | ld r9, HSTATE_KVM_VCPU(r13) | 3209 | ld r9, HSTATE_KVM_VCPU(r13) |
| 3218 | |||
| 3219 | /* Get a few more GPRs free. */ | ||
| 3220 | std r29, VCPU_GPRS_TM(29)(r9) | ||
| 3221 | std r30, VCPU_GPRS_TM(30)(r9) | ||
| 3222 | std r31, VCPU_GPRS_TM(31)(r9) | ||
| 3223 | |||
| 3224 | /* Save away PPR and DSCR soon so don't run with user values. */ | ||
| 3225 | mfspr r31, SPRN_PPR | ||
| 3226 | HMT_MEDIUM | ||
| 3227 | mfspr r30, SPRN_DSCR | ||
| 3228 | ld r29, HSTATE_DSCR(r13) | ||
| 3229 | mtspr SPRN_DSCR, r29 | ||
| 3230 | |||
| 3231 | /* Save all but r9, r13 & r29-r31 */ | ||
| 3232 | reg = 0 | ||
| 3233 | .rept 29 | ||
| 3234 | .if (reg != 9) && (reg != 13) | ||
| 3235 | std reg, VCPU_GPRS_TM(reg)(r9) | ||
| 3236 | .endif | ||
| 3237 | reg = reg + 1 | ||
| 3238 | .endr | ||
| 3239 | /* ... now save r13 */ | ||
| 3240 | GET_SCRATCH0(r4) | ||
| 3241 | std r4, VCPU_GPRS_TM(13)(r9) | ||
| 3242 | /* ... and save r9 */ | ||
| 3243 | ld r4, PACATMSCRATCH(r13) | ||
| 3244 | std r4, VCPU_GPRS_TM(9)(r9) | ||
| 3245 | |||
| 3246 | /* Reload stack pointer and TOC. */ | ||
| 3247 | ld r1, HSTATE_HOST_R1(r13) | ||
| 3248 | ld r2, PACATOC(r13) | ||
| 3249 | |||
| 3250 | /* Set MSR RI now we have r1 and r13 back. */ | ||
| 3251 | li r5, MSR_RI | ||
| 3252 | mtmsrd r5, 1 | ||
| 3253 | |||
| 3254 | /* Save away checkpinted SPRs. */ | ||
| 3255 | std r31, VCPU_PPR_TM(r9) | ||
| 3256 | std r30, VCPU_DSCR_TM(r9) | ||
| 3257 | mflr r5 | ||
| 3258 | mfcr r6 | ||
| 3259 | mfctr r7 | ||
| 3260 | mfspr r8, SPRN_AMR | ||
| 3261 | mfspr r10, SPRN_TAR | ||
| 3262 | mfxer r11 | ||
| 3263 | std r5, VCPU_LR_TM(r9) | ||
| 3264 | stw r6, VCPU_CR_TM(r9) | ||
| 3265 | std r7, VCPU_CTR_TM(r9) | ||
| 3266 | std r8, VCPU_AMR_TM(r9) | ||
| 3267 | std r10, VCPU_TAR_TM(r9) | ||
| 3268 | std r11, VCPU_XER_TM(r9) | ||
| 3269 | |||
| 3270 | /* Restore r12 as trap number. */ | ||
| 3271 | lwz r12, VCPU_TRAP(r9) | ||
| 3272 | |||
| 3273 | /* Save FP/VSX. */ | ||
| 3274 | addi r3, r9, VCPU_FPRS_TM | ||
| 3275 | bl store_fp_state | ||
| 3276 | addi r3, r9, VCPU_VRS_TM | ||
| 3277 | bl store_vr_state | ||
| 3278 | mfspr r6, SPRN_VRSAVE | ||
| 3279 | stw r6, VCPU_VRSAVE_TM(r9) | ||
| 3280 | 1: | ||
| 3281 | /* | ||
| 3282 | * We need to save these SPRs after the treclaim so that the software | ||
| 3283 | * error code is recorded correctly in the TEXASR. Also the user may | ||
| 3284 | * change these outside of a transaction, so they must always be | ||
| 3285 | * context switched. | ||
| 3286 | */ | ||
| 3287 | mfspr r7, SPRN_TEXASR | ||
| 3288 | std r7, VCPU_TEXASR(r9) | ||
| 3289 | 11: | ||
| 3290 | mfspr r5, SPRN_TFHAR | 3210 | mfspr r5, SPRN_TFHAR |
| 3291 | mfspr r6, SPRN_TFIAR | 3211 | mfspr r6, SPRN_TFIAR |
| 3292 | std r5, VCPU_TFHAR(r9) | 3212 | std r5, VCPU_TFHAR(r9) |
| @@ -3299,149 +3219,63 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) | |||
| 3299 | 3219 | ||
| 3300 | /* | 3220 | /* |
| 3301 | * Restore transactional state and TM-related registers. | 3221 | * Restore transactional state and TM-related registers. |
| 3302 | * Called with r4 pointing to the vcpu struct. | 3222 | * Called with r3 pointing to the vcpu struct |
| 3223 | * and r4 containing the guest MSR value. | ||
| 3303 | * This potentially modifies all checkpointed registers. | 3224 | * This potentially modifies all checkpointed registers. |
| 3304 | * It restores r1, r2, r4 from the PACA. | 3225 | * It restores r1 and r2 from the PACA. |
| 3305 | */ | 3226 | */ |
| 3306 | kvmppc_restore_tm: | 3227 | kvmppc_restore_tm_hv: |
| 3228 | /* | ||
| 3229 | * If we are doing TM emulation for the guest on a POWER9 DD2, | ||
| 3230 | * then we don't actually do a trechkpt -- we either set up | ||
| 3231 | * fake-suspend mode, or emulate a TM rollback. | ||
| 3232 | */ | ||
| 3233 | BEGIN_FTR_SECTION | ||
| 3234 | b __kvmppc_restore_tm | ||
| 3235 | END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) | ||
| 3307 | mflr r0 | 3236 | mflr r0 |
| 3308 | std r0, PPC_LR_STKOFF(r1) | 3237 | std r0, PPC_LR_STKOFF(r1) |
| 3309 | 3238 | ||
| 3310 | /* Turn on TM/FP/VSX/VMX so we can restore them. */ | 3239 | li r0, 0 |
| 3240 | stb r0, HSTATE_FAKE_SUSPEND(r13) | ||
| 3241 | |||
| 3242 | /* Turn on TM so we can restore TM SPRs */ | ||
| 3311 | mfmsr r5 | 3243 | mfmsr r5 |
| 3312 | li r6, MSR_TM >> 32 | 3244 | li r0, 1 |
| 3313 | sldi r6, r6, 32 | 3245 | rldimi r5, r0, MSR_TM_LG, 63-MSR_TM_LG |
| 3314 | or r5, r5, r6 | ||
| 3315 | ori r5, r5, MSR_FP | ||
| 3316 | oris r5, r5, (MSR_VEC | MSR_VSX)@h | ||
| 3317 | mtmsrd r5 | 3246 | mtmsrd r5 |
| 3318 | 3247 | ||
| 3319 | /* | 3248 | /* |
| 3320 | * The user may change these outside of a transaction, so they must | 3249 | * The user may change these outside of a transaction, so they must |
| 3321 | * always be context switched. | 3250 | * always be context switched. |
| 3322 | */ | 3251 | */ |
| 3323 | ld r5, VCPU_TFHAR(r4) | 3252 | ld r5, VCPU_TFHAR(r3) |
| 3324 | ld r6, VCPU_TFIAR(r4) | 3253 | ld r6, VCPU_TFIAR(r3) |
| 3325 | ld r7, VCPU_TEXASR(r4) | 3254 | ld r7, VCPU_TEXASR(r3) |
| 3326 | mtspr SPRN_TFHAR, r5 | 3255 | mtspr SPRN_TFHAR, r5 |
| 3327 | mtspr SPRN_TFIAR, r6 | 3256 | mtspr SPRN_TFIAR, r6 |
| 3328 | mtspr SPRN_TEXASR, r7 | 3257 | mtspr SPRN_TEXASR, r7 |
| 3329 | 3258 | ||
| 3330 | li r0, 0 | 3259 | rldicl. r5, r4, 64 - MSR_TS_S_LG, 62 |
| 3331 | stb r0, HSTATE_FAKE_SUSPEND(r13) | ||
| 3332 | ld r5, VCPU_MSR(r4) | ||
| 3333 | rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 | ||
| 3334 | beqlr /* TM not active in guest */ | 3260 | beqlr /* TM not active in guest */ |
| 3335 | std r1, HSTATE_HOST_R1(r13) | ||
| 3336 | 3261 | ||
| 3337 | /* Make sure the failure summary is set, otherwise we'll program check | 3262 | /* Make sure the failure summary is set */ |
| 3338 | * when we trechkpt. It's possible that this might have been not set | ||
| 3339 | * on a kvmppc_set_one_reg() call but we shouldn't let this crash the | ||
| 3340 | * host. | ||
| 3341 | */ | ||
| 3342 | oris r7, r7, (TEXASR_FS)@h | 3263 | oris r7, r7, (TEXASR_FS)@h |
| 3343 | mtspr SPRN_TEXASR, r7 | 3264 | mtspr SPRN_TEXASR, r7 |
| 3344 | 3265 | ||
| 3345 | /* | ||
| 3346 | * If we are doing TM emulation for the guest on a POWER9 DD2, | ||
| 3347 | * then we don't actually do a trechkpt -- we either set up | ||
| 3348 | * fake-suspend mode, or emulate a TM rollback. | ||
| 3349 | */ | ||
| 3350 | BEGIN_FTR_SECTION | ||
| 3351 | b .Ldo_tm_fake_load | ||
| 3352 | END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) | ||
| 3353 | |||
| 3354 | /* | ||
| 3355 | * We need to load up the checkpointed state for the guest. | ||
| 3356 | * We need to do this early as it will blow away any GPRs, VSRs and | ||
| 3357 | * some SPRs. | ||
| 3358 | */ | ||
| 3359 | |||
| 3360 | mr r31, r4 | ||
| 3361 | addi r3, r31, VCPU_FPRS_TM | ||
| 3362 | bl load_fp_state | ||
| 3363 | addi r3, r31, VCPU_VRS_TM | ||
| 3364 | bl load_vr_state | ||
| 3365 | mr r4, r31 | ||
| 3366 | lwz r7, VCPU_VRSAVE_TM(r4) | ||
| 3367 | mtspr SPRN_VRSAVE, r7 | ||
| 3368 | |||
| 3369 | ld r5, VCPU_LR_TM(r4) | ||
| 3370 | lwz r6, VCPU_CR_TM(r4) | ||
| 3371 | ld r7, VCPU_CTR_TM(r4) | ||
| 3372 | ld r8, VCPU_AMR_TM(r4) | ||
| 3373 | ld r9, VCPU_TAR_TM(r4) | ||
| 3374 | ld r10, VCPU_XER_TM(r4) | ||
| 3375 | mtlr r5 | ||
| 3376 | mtcr r6 | ||
| 3377 | mtctr r7 | ||
| 3378 | mtspr SPRN_AMR, r8 | ||
| 3379 | mtspr SPRN_TAR, r9 | ||
| 3380 | mtxer r10 | ||
| 3381 | |||
| 3382 | /* | ||
| 3383 | * Load up PPR and DSCR values but don't put them in the actual SPRs | ||
| 3384 | * till the last moment to avoid running with userspace PPR and DSCR for | ||
| 3385 | * too long. | ||
| 3386 | */ | ||
| 3387 | ld r29, VCPU_DSCR_TM(r4) | ||
| 3388 | ld r30, VCPU_PPR_TM(r4) | ||
| 3389 | |||
| 3390 | std r2, PACATMSCRATCH(r13) /* Save TOC */ | ||
| 3391 | |||
| 3392 | /* Clear the MSR RI since r1, r13 are all going to be foobar. */ | ||
| 3393 | li r5, 0 | ||
| 3394 | mtmsrd r5, 1 | ||
| 3395 | |||
| 3396 | /* Load GPRs r0-r28 */ | ||
| 3397 | reg = 0 | ||
| 3398 | .rept 29 | ||
| 3399 | ld reg, VCPU_GPRS_TM(reg)(r31) | ||
| 3400 | reg = reg + 1 | ||
| 3401 | .endr | ||
| 3402 | |||
| 3403 | mtspr SPRN_DSCR, r29 | ||
| 3404 | mtspr SPRN_PPR, r30 | ||
| 3405 | |||
| 3406 | /* Load final GPRs */ | ||
| 3407 | ld 29, VCPU_GPRS_TM(29)(r31) | ||
| 3408 | ld 30, VCPU_GPRS_TM(30)(r31) | ||
| 3409 | ld 31, VCPU_GPRS_TM(31)(r31) | ||
| 3410 | |||
| 3411 | /* TM checkpointed state is now setup. All GPRs are now volatile. */ | ||
| 3412 | TRECHKPT | ||
| 3413 | |||
| 3414 | /* Now let's get back the state we need. */ | ||
| 3415 | HMT_MEDIUM | ||
| 3416 | GET_PACA(r13) | ||
| 3417 | ld r29, HSTATE_DSCR(r13) | ||
| 3418 | mtspr SPRN_DSCR, r29 | ||
| 3419 | ld r4, HSTATE_KVM_VCPU(r13) | ||
| 3420 | ld r1, HSTATE_HOST_R1(r13) | ||
| 3421 | ld r2, PACATMSCRATCH(r13) | ||
| 3422 | |||
| 3423 | /* Set the MSR RI since we have our registers back. */ | ||
| 3424 | li r5, MSR_RI | ||
| 3425 | mtmsrd r5, 1 | ||
| 3426 | 9: | ||
| 3427 | ld r0, PPC_LR_STKOFF(r1) | ||
| 3428 | mtlr r0 | ||
| 3429 | blr | ||
| 3430 | |||
| 3431 | .Ldo_tm_fake_load: | ||
| 3432 | cmpwi r5, 1 /* check for suspended state */ | 3266 | cmpwi r5, 1 /* check for suspended state */ |
| 3433 | bgt 10f | 3267 | bgt 10f |
| 3434 | stb r5, HSTATE_FAKE_SUSPEND(r13) | 3268 | stb r5, HSTATE_FAKE_SUSPEND(r13) |
| 3435 | b 9b /* and return */ | 3269 | b 9f /* and return */ |
| 3436 | 10: stdu r1, -PPC_MIN_STKFRM(r1) | 3270 | 10: stdu r1, -PPC_MIN_STKFRM(r1) |
| 3437 | /* guest is in transactional state, so simulate rollback */ | 3271 | /* guest is in transactional state, so simulate rollback */ |
| 3438 | mr r3, r4 | ||
| 3439 | bl kvmhv_emulate_tm_rollback | 3272 | bl kvmhv_emulate_tm_rollback |
| 3440 | nop | 3273 | nop |
| 3441 | ld r4, HSTATE_KVM_VCPU(r13) /* our vcpu pointer has been trashed */ | ||
| 3442 | addi r1, r1, PPC_MIN_STKFRM | 3274 | addi r1, r1, PPC_MIN_STKFRM |
| 3443 | b 9b | 3275 | 9: ld r0, PPC_LR_STKOFF(r1) |
| 3444 | #endif | 3276 | mtlr r0 |
| 3277 | blr | ||
| 3278 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ | ||
| 3445 | 3279 | ||
| 3446 | /* | 3280 | /* |
| 3447 | * We come here if we get any exception or interrupt while we are | 3281 | * We come here if we get any exception or interrupt while we are |
| @@ -3572,6 +3406,8 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) | |||
| 3572 | bcl 20, 31, .+4 | 3406 | bcl 20, 31, .+4 |
| 3573 | 5: mflr r3 | 3407 | 5: mflr r3 |
| 3574 | addi r3, r3, 9f - 5b | 3408 | addi r3, r3, 9f - 5b |
| 3409 | li r4, -1 | ||
| 3410 | rldimi r3, r4, 62, 0 /* ensure 0xc000000000000000 bits are set */ | ||
| 3575 | ld r4, PACAKMSR(r13) | 3411 | ld r4, PACAKMSR(r13) |
| 3576 | mtspr SPRN_SRR0, r3 | 3412 | mtspr SPRN_SRR0, r3 |
| 3577 | mtspr SPRN_SRR1, r4 | 3413 | mtspr SPRN_SRR1, r4 |
diff --git a/arch/powerpc/kvm/book3s_hv_tm.c b/arch/powerpc/kvm/book3s_hv_tm.c index bf710ad3a6d7..008285058f9b 100644 --- a/arch/powerpc/kvm/book3s_hv_tm.c +++ b/arch/powerpc/kvm/book3s_hv_tm.c | |||
| @@ -19,7 +19,7 @@ static void emulate_tx_failure(struct kvm_vcpu *vcpu, u64 failure_cause) | |||
| 19 | u64 texasr, tfiar; | 19 | u64 texasr, tfiar; |
| 20 | u64 msr = vcpu->arch.shregs.msr; | 20 | u64 msr = vcpu->arch.shregs.msr; |
| 21 | 21 | ||
| 22 | tfiar = vcpu->arch.pc & ~0x3ull; | 22 | tfiar = vcpu->arch.regs.nip & ~0x3ull; |
| 23 | texasr = (failure_cause << 56) | TEXASR_ABORT | TEXASR_FS | TEXASR_EXACT; | 23 | texasr = (failure_cause << 56) | TEXASR_ABORT | TEXASR_FS | TEXASR_EXACT; |
| 24 | if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) | 24 | if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) |
| 25 | texasr |= TEXASR_SUSP; | 25 | texasr |= TEXASR_SUSP; |
| @@ -57,8 +57,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) | |||
| 57 | (newmsr & MSR_TM))); | 57 | (newmsr & MSR_TM))); |
| 58 | newmsr = sanitize_msr(newmsr); | 58 | newmsr = sanitize_msr(newmsr); |
| 59 | vcpu->arch.shregs.msr = newmsr; | 59 | vcpu->arch.shregs.msr = newmsr; |
| 60 | vcpu->arch.cfar = vcpu->arch.pc - 4; | 60 | vcpu->arch.cfar = vcpu->arch.regs.nip - 4; |
| 61 | vcpu->arch.pc = vcpu->arch.shregs.srr0; | 61 | vcpu->arch.regs.nip = vcpu->arch.shregs.srr0; |
| 62 | return RESUME_GUEST; | 62 | return RESUME_GUEST; |
| 63 | 63 | ||
| 64 | case PPC_INST_RFEBB: | 64 | case PPC_INST_RFEBB: |
| @@ -90,8 +90,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) | |||
| 90 | vcpu->arch.bescr = bescr; | 90 | vcpu->arch.bescr = bescr; |
| 91 | msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; | 91 | msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; |
| 92 | vcpu->arch.shregs.msr = msr; | 92 | vcpu->arch.shregs.msr = msr; |
| 93 | vcpu->arch.cfar = vcpu->arch.pc - 4; | 93 | vcpu->arch.cfar = vcpu->arch.regs.nip - 4; |
| 94 | vcpu->arch.pc = vcpu->arch.ebbrr; | 94 | vcpu->arch.regs.nip = vcpu->arch.ebbrr; |
| 95 | return RESUME_GUEST; | 95 | return RESUME_GUEST; |
| 96 | 96 | ||
| 97 | case PPC_INST_MTMSRD: | 97 | case PPC_INST_MTMSRD: |
diff --git a/arch/powerpc/kvm/book3s_hv_tm_builtin.c b/arch/powerpc/kvm/book3s_hv_tm_builtin.c index d98ccfd2b88c..b2c7c6fca4f9 100644 --- a/arch/powerpc/kvm/book3s_hv_tm_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_tm_builtin.c | |||
| @@ -35,8 +35,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) | |||
| 35 | return 0; | 35 | return 0; |
| 36 | newmsr = sanitize_msr(newmsr); | 36 | newmsr = sanitize_msr(newmsr); |
| 37 | vcpu->arch.shregs.msr = newmsr; | 37 | vcpu->arch.shregs.msr = newmsr; |
| 38 | vcpu->arch.cfar = vcpu->arch.pc - 4; | 38 | vcpu->arch.cfar = vcpu->arch.regs.nip - 4; |
| 39 | vcpu->arch.pc = vcpu->arch.shregs.srr0; | 39 | vcpu->arch.regs.nip = vcpu->arch.shregs.srr0; |
| 40 | return 1; | 40 | return 1; |
| 41 | 41 | ||
| 42 | case PPC_INST_RFEBB: | 42 | case PPC_INST_RFEBB: |
| @@ -58,8 +58,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) | |||
| 58 | mtspr(SPRN_BESCR, bescr); | 58 | mtspr(SPRN_BESCR, bescr); |
| 59 | msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; | 59 | msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; |
| 60 | vcpu->arch.shregs.msr = msr; | 60 | vcpu->arch.shregs.msr = msr; |
| 61 | vcpu->arch.cfar = vcpu->arch.pc - 4; | 61 | vcpu->arch.cfar = vcpu->arch.regs.nip - 4; |
| 62 | vcpu->arch.pc = mfspr(SPRN_EBBRR); | 62 | vcpu->arch.regs.nip = mfspr(SPRN_EBBRR); |
| 63 | return 1; | 63 | return 1; |
| 64 | 64 | ||
| 65 | case PPC_INST_MTMSRD: | 65 | case PPC_INST_MTMSRD: |
| @@ -103,7 +103,7 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) | |||
| 103 | void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu) | 103 | void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu) |
| 104 | { | 104 | { |
| 105 | vcpu->arch.shregs.msr &= ~MSR_TS_MASK; /* go to N state */ | 105 | vcpu->arch.shregs.msr &= ~MSR_TS_MASK; /* go to N state */ |
| 106 | vcpu->arch.pc = vcpu->arch.tfhar; | 106 | vcpu->arch.regs.nip = vcpu->arch.tfhar; |
| 107 | copy_from_checkpoint(vcpu); | 107 | copy_from_checkpoint(vcpu); |
| 108 | vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0xa0000000; | 108 | vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0xa0000000; |
| 109 | } | 109 | } |
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index d3f304d06adf..c3b8006f0eac 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
| @@ -42,6 +42,8 @@ | |||
| 42 | #include <linux/highmem.h> | 42 | #include <linux/highmem.h> |
| 43 | #include <linux/module.h> | 43 | #include <linux/module.h> |
| 44 | #include <linux/miscdevice.h> | 44 | #include <linux/miscdevice.h> |
| 45 | #include <asm/asm-prototypes.h> | ||
| 46 | #include <asm/tm.h> | ||
| 45 | 47 | ||
| 46 | #include "book3s.h" | 48 | #include "book3s.h" |
| 47 | 49 | ||
| @@ -53,7 +55,9 @@ | |||
| 53 | 55 | ||
| 54 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | 56 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, |
| 55 | ulong msr); | 57 | ulong msr); |
| 56 | static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); | 58 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 59 | static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac); | ||
| 60 | #endif | ||
| 57 | 61 | ||
| 58 | /* Some compatibility defines */ | 62 | /* Some compatibility defines */ |
| 59 | #ifdef CONFIG_PPC_BOOK3S_32 | 63 | #ifdef CONFIG_PPC_BOOK3S_32 |
| @@ -114,6 +118,8 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) | |||
| 114 | 118 | ||
| 115 | if (kvmppc_is_split_real(vcpu)) | 119 | if (kvmppc_is_split_real(vcpu)) |
| 116 | kvmppc_fixup_split_real(vcpu); | 120 | kvmppc_fixup_split_real(vcpu); |
| 121 | |||
| 122 | kvmppc_restore_tm_pr(vcpu); | ||
| 117 | } | 123 | } |
| 118 | 124 | ||
| 119 | static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) | 125 | static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) |
| @@ -133,6 +139,7 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) | |||
| 133 | 139 | ||
| 134 | kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); | 140 | kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); |
| 135 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); | 141 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); |
| 142 | kvmppc_save_tm_pr(vcpu); | ||
| 136 | 143 | ||
| 137 | /* Enable AIL if supported */ | 144 | /* Enable AIL if supported */ |
| 138 | if (cpu_has_feature(CPU_FTR_HVMODE) && | 145 | if (cpu_has_feature(CPU_FTR_HVMODE) && |
| @@ -147,25 +154,25 @@ void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu) | |||
| 147 | { | 154 | { |
| 148 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | 155 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
| 149 | 156 | ||
| 150 | svcpu->gpr[0] = vcpu->arch.gpr[0]; | 157 | svcpu->gpr[0] = vcpu->arch.regs.gpr[0]; |
| 151 | svcpu->gpr[1] = vcpu->arch.gpr[1]; | 158 | svcpu->gpr[1] = vcpu->arch.regs.gpr[1]; |
| 152 | svcpu->gpr[2] = vcpu->arch.gpr[2]; | 159 | svcpu->gpr[2] = vcpu->arch.regs.gpr[2]; |
| 153 | svcpu->gpr[3] = vcpu->arch.gpr[3]; | 160 | svcpu->gpr[3] = vcpu->arch.regs.gpr[3]; |
| 154 | svcpu->gpr[4] = vcpu->arch.gpr[4]; | 161 | svcpu->gpr[4] = vcpu->arch.regs.gpr[4]; |
| 155 | svcpu->gpr[5] = vcpu->arch.gpr[5]; | 162 | svcpu->gpr[5] = vcpu->arch.regs.gpr[5]; |
| 156 | svcpu->gpr[6] = vcpu->arch.gpr[6]; | 163 | svcpu->gpr[6] = vcpu->arch.regs.gpr[6]; |
| 157 | svcpu->gpr[7] = vcpu->arch.gpr[7]; | 164 | svcpu->gpr[7] = vcpu->arch.regs.gpr[7]; |
| 158 | svcpu->gpr[8] = vcpu->arch.gpr[8]; | 165 | svcpu->gpr[8] = vcpu->arch.regs.gpr[8]; |
| 159 | svcpu->gpr[9] = vcpu->arch.gpr[9]; | 166 | svcpu->gpr[9] = vcpu->arch.regs.gpr[9]; |
| 160 | svcpu->gpr[10] = vcpu->arch.gpr[10]; | 167 | svcpu->gpr[10] = vcpu->arch.regs.gpr[10]; |
| 161 | svcpu->gpr[11] = vcpu->arch.gpr[11]; | 168 | svcpu->gpr[11] = vcpu->arch.regs.gpr[11]; |
| 162 | svcpu->gpr[12] = vcpu->arch.gpr[12]; | 169 | svcpu->gpr[12] = vcpu->arch.regs.gpr[12]; |
| 163 | svcpu->gpr[13] = vcpu->arch.gpr[13]; | 170 | svcpu->gpr[13] = vcpu->arch.regs.gpr[13]; |
| 164 | svcpu->cr = vcpu->arch.cr; | 171 | svcpu->cr = vcpu->arch.cr; |
| 165 | svcpu->xer = vcpu->arch.xer; | 172 | svcpu->xer = vcpu->arch.regs.xer; |
| 166 | svcpu->ctr = vcpu->arch.ctr; | 173 | svcpu->ctr = vcpu->arch.regs.ctr; |
| 167 | svcpu->lr = vcpu->arch.lr; | 174 | svcpu->lr = vcpu->arch.regs.link; |
| 168 | svcpu->pc = vcpu->arch.pc; | 175 | svcpu->pc = vcpu->arch.regs.nip; |
| 169 | #ifdef CONFIG_PPC_BOOK3S_64 | 176 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 170 | svcpu->shadow_fscr = vcpu->arch.shadow_fscr; | 177 | svcpu->shadow_fscr = vcpu->arch.shadow_fscr; |
| 171 | #endif | 178 | #endif |
| @@ -182,10 +189,45 @@ void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu) | |||
| 182 | svcpu_put(svcpu); | 189 | svcpu_put(svcpu); |
| 183 | } | 190 | } |
| 184 | 191 | ||
| 192 | static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) | ||
| 193 | { | ||
| 194 | ulong guest_msr = kvmppc_get_msr(vcpu); | ||
| 195 | ulong smsr = guest_msr; | ||
| 196 | |||
| 197 | /* Guest MSR values */ | ||
| 198 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 199 | smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE | | ||
| 200 | MSR_TM | MSR_TS_MASK; | ||
| 201 | #else | ||
| 202 | smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE; | ||
| 203 | #endif | ||
| 204 | /* Process MSR values */ | ||
| 205 | smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; | ||
| 206 | /* External providers the guest reserved */ | ||
| 207 | smsr |= (guest_msr & vcpu->arch.guest_owned_ext); | ||
| 208 | /* 64-bit Process MSR values */ | ||
| 209 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
| 210 | smsr |= MSR_ISF | MSR_HV; | ||
| 211 | #endif | ||
| 212 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 213 | /* | ||
| 214 | * in guest privileged state, we want to fail all TM transactions. | ||
| 215 | * So disable MSR TM bit so that all tbegin. will be able to be | ||
| 216 | * trapped into host. | ||
| 217 | */ | ||
| 218 | if (!(guest_msr & MSR_PR)) | ||
| 219 | smsr &= ~MSR_TM; | ||
| 220 | #endif | ||
| 221 | vcpu->arch.shadow_msr = smsr; | ||
| 222 | } | ||
| 223 | |||
| 185 | /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ | 224 | /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ |
| 186 | void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu) | 225 | void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu) |
| 187 | { | 226 | { |
| 188 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | 227 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
| 228 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 229 | ulong old_msr; | ||
| 230 | #endif | ||
| 189 | 231 | ||
| 190 | /* | 232 | /* |
| 191 | * Maybe we were already preempted and synced the svcpu from | 233 | * Maybe we were already preempted and synced the svcpu from |
| @@ -194,25 +236,25 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu) | |||
| 194 | if (!svcpu->in_use) | 236 | if (!svcpu->in_use) |
| 195 | goto out; | 237 | goto out; |
| 196 | 238 | ||
| 197 | vcpu->arch.gpr[0] = svcpu->gpr[0]; | 239 | vcpu->arch.regs.gpr[0] = svcpu->gpr[0]; |
| 198 | vcpu->arch.gpr[1] = svcpu->gpr[1]; | 240 | vcpu->arch.regs.gpr[1] = svcpu->gpr[1]; |
| 199 | vcpu->arch.gpr[2] = svcpu->gpr[2]; | 241 | vcpu->arch.regs.gpr[2] = svcpu->gpr[2]; |
| 200 | vcpu->arch.gpr[3] = svcpu->gpr[3]; | 242 | vcpu->arch.regs.gpr[3] = svcpu->gpr[3]; |
| 201 | vcpu->arch.gpr[4] = svcpu->gpr[4]; | 243 | vcpu->arch.regs.gpr[4] = svcpu->gpr[4]; |
| 202 | vcpu->arch.gpr[5] = svcpu->gpr[5]; | 244 | vcpu->arch.regs.gpr[5] = svcpu->gpr[5]; |
| 203 | vcpu->arch.gpr[6] = svcpu->gpr[6]; | 245 | vcpu->arch.regs.gpr[6] = svcpu->gpr[6]; |
| 204 | vcpu->arch.gpr[7] = svcpu->gpr[7]; | 246 | vcpu->arch.regs.gpr[7] = svcpu->gpr[7]; |
| 205 | vcpu->arch.gpr[8] = svcpu->gpr[8]; | 247 | vcpu->arch.regs.gpr[8] = svcpu->gpr[8]; |
| 206 | vcpu->arch.gpr[9] = svcpu->gpr[9]; | 248 | vcpu->arch.regs.gpr[9] = svcpu->gpr[9]; |
| 207 | vcpu->arch.gpr[10] = svcpu->gpr[10]; | 249 | vcpu->arch.regs.gpr[10] = svcpu->gpr[10]; |
| 208 | vcpu->arch.gpr[11] = svcpu->gpr[11]; | 250 | vcpu->arch.regs.gpr[11] = svcpu->gpr[11]; |
| 209 | vcpu->arch.gpr[12] = svcpu->gpr[12]; | 251 | vcpu->arch.regs.gpr[12] = svcpu->gpr[12]; |
| 210 | vcpu->arch.gpr[13] = svcpu->gpr[13]; | 252 | vcpu->arch.regs.gpr[13] = svcpu->gpr[13]; |
| 211 | vcpu->arch.cr = svcpu->cr; | 253 | vcpu->arch.cr = svcpu->cr; |
| 212 | vcpu->arch.xer = svcpu->xer; | 254 | vcpu->arch.regs.xer = svcpu->xer; |
| 213 | vcpu->arch.ctr = svcpu->ctr; | 255 | vcpu->arch.regs.ctr = svcpu->ctr; |
| 214 | vcpu->arch.lr = svcpu->lr; | 256 | vcpu->arch.regs.link = svcpu->lr; |
| 215 | vcpu->arch.pc = svcpu->pc; | 257 | vcpu->arch.regs.nip = svcpu->pc; |
| 216 | vcpu->arch.shadow_srr1 = svcpu->shadow_srr1; | 258 | vcpu->arch.shadow_srr1 = svcpu->shadow_srr1; |
| 217 | vcpu->arch.fault_dar = svcpu->fault_dar; | 259 | vcpu->arch.fault_dar = svcpu->fault_dar; |
| 218 | vcpu->arch.fault_dsisr = svcpu->fault_dsisr; | 260 | vcpu->arch.fault_dsisr = svcpu->fault_dsisr; |
| @@ -228,12 +270,116 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu) | |||
| 228 | to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb; | 270 | to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb; |
| 229 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) | 271 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) |
| 230 | vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic; | 272 | vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic; |
| 273 | |||
| 274 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 275 | /* | ||
| 276 | * Unlike other MSR bits, MSR[TS]bits can be changed at guest without | ||
| 277 | * notifying host: | ||
| 278 | * modified by unprivileged instructions like "tbegin"/"tend"/ | ||
| 279 | * "tresume"/"tsuspend" in PR KVM guest. | ||
| 280 | * | ||
| 281 | * It is necessary to sync here to calculate a correct shadow_msr. | ||
| 282 | * | ||
| 283 | * privileged guest's tbegin will be failed at present. So we | ||
| 284 | * only take care of problem state guest. | ||
| 285 | */ | ||
| 286 | old_msr = kvmppc_get_msr(vcpu); | ||
| 287 | if (unlikely((old_msr & MSR_PR) && | ||
| 288 | (vcpu->arch.shadow_srr1 & (MSR_TS_MASK)) != | ||
| 289 | (old_msr & (MSR_TS_MASK)))) { | ||
| 290 | old_msr &= ~(MSR_TS_MASK); | ||
| 291 | old_msr |= (vcpu->arch.shadow_srr1 & (MSR_TS_MASK)); | ||
| 292 | kvmppc_set_msr_fast(vcpu, old_msr); | ||
| 293 | kvmppc_recalc_shadow_msr(vcpu); | ||
| 294 | } | ||
| 295 | #endif | ||
| 296 | |||
| 231 | svcpu->in_use = false; | 297 | svcpu->in_use = false; |
| 232 | 298 | ||
| 233 | out: | 299 | out: |
| 234 | svcpu_put(svcpu); | 300 | svcpu_put(svcpu); |
| 235 | } | 301 | } |
| 236 | 302 | ||
| 303 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 304 | void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) | ||
| 305 | { | ||
| 306 | tm_enable(); | ||
| 307 | vcpu->arch.tfhar = mfspr(SPRN_TFHAR); | ||
| 308 | vcpu->arch.texasr = mfspr(SPRN_TEXASR); | ||
| 309 | vcpu->arch.tfiar = mfspr(SPRN_TFIAR); | ||
| 310 | tm_disable(); | ||
| 311 | } | ||
| 312 | |||
| 313 | void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) | ||
| 314 | { | ||
| 315 | tm_enable(); | ||
| 316 | mtspr(SPRN_TFHAR, vcpu->arch.tfhar); | ||
| 317 | mtspr(SPRN_TEXASR, vcpu->arch.texasr); | ||
| 318 | mtspr(SPRN_TFIAR, vcpu->arch.tfiar); | ||
| 319 | tm_disable(); | ||
| 320 | } | ||
| 321 | |||
| 322 | /* loadup math bits which is enabled at kvmppc_get_msr() but not enabled at | ||
| 323 | * hardware. | ||
| 324 | */ | ||
| 325 | static void kvmppc_handle_lost_math_exts(struct kvm_vcpu *vcpu) | ||
| 326 | { | ||
| 327 | ulong exit_nr; | ||
| 328 | ulong ext_diff = (kvmppc_get_msr(vcpu) & ~vcpu->arch.guest_owned_ext) & | ||
| 329 | (MSR_FP | MSR_VEC | MSR_VSX); | ||
| 330 | |||
| 331 | if (!ext_diff) | ||
| 332 | return; | ||
| 333 | |||
| 334 | if (ext_diff == MSR_FP) | ||
| 335 | exit_nr = BOOK3S_INTERRUPT_FP_UNAVAIL; | ||
| 336 | else if (ext_diff == MSR_VEC) | ||
| 337 | exit_nr = BOOK3S_INTERRUPT_ALTIVEC; | ||
| 338 | else | ||
| 339 | exit_nr = BOOK3S_INTERRUPT_VSX; | ||
| 340 | |||
| 341 | kvmppc_handle_ext(vcpu, exit_nr, ext_diff); | ||
| 342 | } | ||
| 343 | |||
| 344 | void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) | ||
| 345 | { | ||
| 346 | if (!(MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)))) { | ||
| 347 | kvmppc_save_tm_sprs(vcpu); | ||
| 348 | return; | ||
| 349 | } | ||
| 350 | |||
| 351 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); | ||
| 352 | kvmppc_giveup_ext(vcpu, MSR_VSX); | ||
| 353 | |||
| 354 | preempt_disable(); | ||
| 355 | _kvmppc_save_tm_pr(vcpu, mfmsr()); | ||
| 356 | preempt_enable(); | ||
| 357 | } | ||
| 358 | |||
| 359 | void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) | ||
| 360 | { | ||
| 361 | if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) { | ||
| 362 | kvmppc_restore_tm_sprs(vcpu); | ||
| 363 | if (kvmppc_get_msr(vcpu) & MSR_TM) { | ||
| 364 | kvmppc_handle_lost_math_exts(vcpu); | ||
| 365 | if (vcpu->arch.fscr & FSCR_TAR) | ||
| 366 | kvmppc_handle_fac(vcpu, FSCR_TAR_LG); | ||
| 367 | } | ||
| 368 | return; | ||
| 369 | } | ||
| 370 | |||
| 371 | preempt_disable(); | ||
| 372 | _kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu)); | ||
| 373 | preempt_enable(); | ||
| 374 | |||
| 375 | if (kvmppc_get_msr(vcpu) & MSR_TM) { | ||
| 376 | kvmppc_handle_lost_math_exts(vcpu); | ||
| 377 | if (vcpu->arch.fscr & FSCR_TAR) | ||
| 378 | kvmppc_handle_fac(vcpu, FSCR_TAR_LG); | ||
| 379 | } | ||
| 380 | } | ||
| 381 | #endif | ||
| 382 | |||
| 237 | static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) | 383 | static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) |
| 238 | { | 384 | { |
| 239 | int r = 1; /* Indicate we want to get back into the guest */ | 385 | int r = 1; /* Indicate we want to get back into the guest */ |
| @@ -306,32 +452,29 @@ static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte) | |||
| 306 | 452 | ||
| 307 | /*****************************************/ | 453 | /*****************************************/ |
| 308 | 454 | ||
| 309 | static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) | ||
| 310 | { | ||
| 311 | ulong guest_msr = kvmppc_get_msr(vcpu); | ||
| 312 | ulong smsr = guest_msr; | ||
| 313 | |||
| 314 | /* Guest MSR values */ | ||
| 315 | smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE; | ||
| 316 | /* Process MSR values */ | ||
| 317 | smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; | ||
| 318 | /* External providers the guest reserved */ | ||
| 319 | smsr |= (guest_msr & vcpu->arch.guest_owned_ext); | ||
| 320 | /* 64-bit Process MSR values */ | ||
| 321 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
| 322 | smsr |= MSR_ISF | MSR_HV; | ||
| 323 | #endif | ||
| 324 | vcpu->arch.shadow_msr = smsr; | ||
| 325 | } | ||
| 326 | |||
| 327 | static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) | 455 | static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) |
| 328 | { | 456 | { |
| 329 | ulong old_msr = kvmppc_get_msr(vcpu); | 457 | ulong old_msr; |
| 458 | |||
| 459 | /* For PAPR guest, make sure MSR reflects guest mode */ | ||
| 460 | if (vcpu->arch.papr_enabled) | ||
| 461 | msr = (msr & ~MSR_HV) | MSR_ME; | ||
| 330 | 462 | ||
| 331 | #ifdef EXIT_DEBUG | 463 | #ifdef EXIT_DEBUG |
| 332 | printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); | 464 | printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); |
| 333 | #endif | 465 | #endif |
| 334 | 466 | ||
| 467 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 468 | /* We should never target guest MSR to TS=10 && PR=0, | ||
| 469 | * since we always fail transaction for guest privilege | ||
| 470 | * state. | ||
| 471 | */ | ||
| 472 | if (!(msr & MSR_PR) && MSR_TM_TRANSACTIONAL(msr)) | ||
| 473 | kvmppc_emulate_tabort(vcpu, | ||
| 474 | TM_CAUSE_KVM_FAC_UNAV | TM_CAUSE_PERSISTENT); | ||
| 475 | #endif | ||
| 476 | |||
| 477 | old_msr = kvmppc_get_msr(vcpu); | ||
| 335 | msr &= to_book3s(vcpu)->msr_mask; | 478 | msr &= to_book3s(vcpu)->msr_mask; |
| 336 | kvmppc_set_msr_fast(vcpu, msr); | 479 | kvmppc_set_msr_fast(vcpu, msr); |
| 337 | kvmppc_recalc_shadow_msr(vcpu); | 480 | kvmppc_recalc_shadow_msr(vcpu); |
| @@ -387,6 +530,11 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) | |||
| 387 | /* Preload FPU if it's enabled */ | 530 | /* Preload FPU if it's enabled */ |
| 388 | if (kvmppc_get_msr(vcpu) & MSR_FP) | 531 | if (kvmppc_get_msr(vcpu) & MSR_FP) |
| 389 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); | 532 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); |
| 533 | |||
| 534 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 535 | if (kvmppc_get_msr(vcpu) & MSR_TM) | ||
| 536 | kvmppc_handle_lost_math_exts(vcpu); | ||
| 537 | #endif | ||
| 390 | } | 538 | } |
| 391 | 539 | ||
| 392 | void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr) | 540 | void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr) |
| @@ -584,24 +732,20 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 584 | pte.may_execute = !data; | 732 | pte.may_execute = !data; |
| 585 | } | 733 | } |
| 586 | 734 | ||
| 587 | if (page_found == -ENOENT) { | 735 | if (page_found == -ENOENT || page_found == -EPERM) { |
| 588 | /* Page not found in guest PTE entries */ | 736 | /* Page not found in guest PTE entries, or protection fault */ |
| 589 | u64 ssrr1 = vcpu->arch.shadow_srr1; | 737 | u64 flags; |
| 590 | u64 msr = kvmppc_get_msr(vcpu); | 738 | |
| 591 | kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); | 739 | if (page_found == -EPERM) |
| 592 | kvmppc_set_dsisr(vcpu, vcpu->arch.fault_dsisr); | 740 | flags = DSISR_PROTFAULT; |
| 593 | kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL)); | 741 | else |
| 594 | kvmppc_book3s_queue_irqprio(vcpu, vec); | 742 | flags = DSISR_NOHPTE; |
| 595 | } else if (page_found == -EPERM) { | 743 | if (data) { |
| 596 | /* Storage protection */ | 744 | flags |= vcpu->arch.fault_dsisr & DSISR_ISSTORE; |
| 597 | u32 dsisr = vcpu->arch.fault_dsisr; | 745 | kvmppc_core_queue_data_storage(vcpu, eaddr, flags); |
| 598 | u64 ssrr1 = vcpu->arch.shadow_srr1; | 746 | } else { |
| 599 | u64 msr = kvmppc_get_msr(vcpu); | 747 | kvmppc_core_queue_inst_storage(vcpu, flags); |
| 600 | kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); | 748 | } |
| 601 | dsisr = (dsisr & ~DSISR_NOHPTE) | DSISR_PROTFAULT; | ||
| 602 | kvmppc_set_dsisr(vcpu, dsisr); | ||
| 603 | kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL)); | ||
| 604 | kvmppc_book3s_queue_irqprio(vcpu, vec); | ||
| 605 | } else if (page_found == -EINVAL) { | 749 | } else if (page_found == -EINVAL) { |
| 606 | /* Page not found in guest SLB */ | 750 | /* Page not found in guest SLB */ |
| 607 | kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); | 751 | kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); |
| @@ -683,7 +827,7 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) | |||
| 683 | } | 827 | } |
| 684 | 828 | ||
| 685 | /* Give up facility (TAR / EBB / DSCR) */ | 829 | /* Give up facility (TAR / EBB / DSCR) */ |
| 686 | static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac) | 830 | void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac) |
| 687 | { | 831 | { |
| 688 | #ifdef CONFIG_PPC_BOOK3S_64 | 832 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 689 | if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) { | 833 | if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) { |
| @@ -802,7 +946,7 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu) | |||
| 802 | 946 | ||
| 803 | #ifdef CONFIG_PPC_BOOK3S_64 | 947 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 804 | 948 | ||
| 805 | static void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac) | 949 | void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac) |
| 806 | { | 950 | { |
| 807 | /* Inject the Interrupt Cause field and trigger a guest interrupt */ | 951 | /* Inject the Interrupt Cause field and trigger a guest interrupt */ |
| 808 | vcpu->arch.fscr &= ~(0xffULL << 56); | 952 | vcpu->arch.fscr &= ~(0xffULL << 56); |
| @@ -864,6 +1008,18 @@ static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac) | |||
| 864 | break; | 1008 | break; |
| 865 | } | 1009 | } |
| 866 | 1010 | ||
| 1011 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 1012 | /* Since we disabled MSR_TM at privilege state, the mfspr instruction | ||
| 1013 | * for TM spr can trigger TM fac unavailable. In this case, the | ||
| 1014 | * emulation is handled by kvmppc_emulate_fac(), which invokes | ||
| 1015 | * kvmppc_emulate_mfspr() finally. But note the mfspr can include | ||
| 1016 | * RT for NV registers. So it need to restore those NV reg to reflect | ||
| 1017 | * the update. | ||
| 1018 | */ | ||
| 1019 | if ((fac == FSCR_TM_LG) && !(kvmppc_get_msr(vcpu) & MSR_PR)) | ||
| 1020 | return RESUME_GUEST_NV; | ||
| 1021 | #endif | ||
| 1022 | |||
| 867 | return RESUME_GUEST; | 1023 | return RESUME_GUEST; |
| 868 | } | 1024 | } |
| 869 | 1025 | ||
| @@ -872,7 +1028,12 @@ void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr) | |||
| 872 | if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) { | 1028 | if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) { |
| 873 | /* TAR got dropped, drop it in shadow too */ | 1029 | /* TAR got dropped, drop it in shadow too */ |
| 874 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); | 1030 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); |
| 1031 | } else if (!(vcpu->arch.fscr & FSCR_TAR) && (fscr & FSCR_TAR)) { | ||
| 1032 | vcpu->arch.fscr = fscr; | ||
| 1033 | kvmppc_handle_fac(vcpu, FSCR_TAR_LG); | ||
| 1034 | return; | ||
| 875 | } | 1035 | } |
| 1036 | |||
| 876 | vcpu->arch.fscr = fscr; | 1037 | vcpu->arch.fscr = fscr; |
| 877 | } | 1038 | } |
| 878 | #endif | 1039 | #endif |
| @@ -1017,10 +1178,8 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 1017 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); | 1178 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); |
| 1018 | r = RESUME_GUEST; | 1179 | r = RESUME_GUEST; |
| 1019 | } else { | 1180 | } else { |
| 1020 | u64 msr = kvmppc_get_msr(vcpu); | 1181 | kvmppc_core_queue_inst_storage(vcpu, |
| 1021 | msr |= shadow_srr1 & 0x58000000; | 1182 | shadow_srr1 & 0x58000000); |
| 1022 | kvmppc_set_msr_fast(vcpu, msr); | ||
| 1023 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | ||
| 1024 | r = RESUME_GUEST; | 1183 | r = RESUME_GUEST; |
| 1025 | } | 1184 | } |
| 1026 | break; | 1185 | break; |
| @@ -1059,9 +1218,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 1059 | r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); | 1218 | r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); |
| 1060 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | 1219 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
| 1061 | } else { | 1220 | } else { |
| 1062 | kvmppc_set_dar(vcpu, dar); | 1221 | kvmppc_core_queue_data_storage(vcpu, dar, fault_dsisr); |
| 1063 | kvmppc_set_dsisr(vcpu, fault_dsisr); | ||
| 1064 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | ||
| 1065 | r = RESUME_GUEST; | 1222 | r = RESUME_GUEST; |
| 1066 | } | 1223 | } |
| 1067 | break; | 1224 | break; |
| @@ -1092,10 +1249,13 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 1092 | case BOOK3S_INTERRUPT_EXTERNAL: | 1249 | case BOOK3S_INTERRUPT_EXTERNAL: |
| 1093 | case BOOK3S_INTERRUPT_EXTERNAL_LEVEL: | 1250 | case BOOK3S_INTERRUPT_EXTERNAL_LEVEL: |
| 1094 | case BOOK3S_INTERRUPT_EXTERNAL_HV: | 1251 | case BOOK3S_INTERRUPT_EXTERNAL_HV: |
| 1252 | case BOOK3S_INTERRUPT_H_VIRT: | ||
| 1095 | vcpu->stat.ext_intr_exits++; | 1253 | vcpu->stat.ext_intr_exits++; |
| 1096 | r = RESUME_GUEST; | 1254 | r = RESUME_GUEST; |
| 1097 | break; | 1255 | break; |
| 1256 | case BOOK3S_INTERRUPT_HMI: | ||
| 1098 | case BOOK3S_INTERRUPT_PERFMON: | 1257 | case BOOK3S_INTERRUPT_PERFMON: |
| 1258 | case BOOK3S_INTERRUPT_SYSTEM_RESET: | ||
| 1099 | r = RESUME_GUEST; | 1259 | r = RESUME_GUEST; |
| 1100 | break; | 1260 | break; |
| 1101 | case BOOK3S_INTERRUPT_PROGRAM: | 1261 | case BOOK3S_INTERRUPT_PROGRAM: |
| @@ -1225,8 +1385,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 1225 | } | 1385 | } |
| 1226 | #ifdef CONFIG_PPC_BOOK3S_64 | 1386 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 1227 | case BOOK3S_INTERRUPT_FAC_UNAVAIL: | 1387 | case BOOK3S_INTERRUPT_FAC_UNAVAIL: |
| 1228 | kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56); | 1388 | r = kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56); |
| 1229 | r = RESUME_GUEST; | ||
| 1230 | break; | 1389 | break; |
| 1231 | #endif | 1390 | #endif |
| 1232 | case BOOK3S_INTERRUPT_MACHINE_CHECK: | 1391 | case BOOK3S_INTERRUPT_MACHINE_CHECK: |
| @@ -1379,6 +1538,73 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, | |||
| 1379 | else | 1538 | else |
| 1380 | *val = get_reg_val(id, 0); | 1539 | *val = get_reg_val(id, 0); |
| 1381 | break; | 1540 | break; |
| 1541 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 1542 | case KVM_REG_PPC_TFHAR: | ||
| 1543 | *val = get_reg_val(id, vcpu->arch.tfhar); | ||
| 1544 | break; | ||
| 1545 | case KVM_REG_PPC_TFIAR: | ||
| 1546 | *val = get_reg_val(id, vcpu->arch.tfiar); | ||
| 1547 | break; | ||
| 1548 | case KVM_REG_PPC_TEXASR: | ||
| 1549 | *val = get_reg_val(id, vcpu->arch.texasr); | ||
| 1550 | break; | ||
| 1551 | case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31: | ||
| 1552 | *val = get_reg_val(id, | ||
| 1553 | vcpu->arch.gpr_tm[id-KVM_REG_PPC_TM_GPR0]); | ||
| 1554 | break; | ||
| 1555 | case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63: | ||
| 1556 | { | ||
| 1557 | int i, j; | ||
| 1558 | |||
| 1559 | i = id - KVM_REG_PPC_TM_VSR0; | ||
| 1560 | if (i < 32) | ||
| 1561 | for (j = 0; j < TS_FPRWIDTH; j++) | ||
| 1562 | val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; | ||
| 1563 | else { | ||
| 1564 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | ||
| 1565 | val->vval = vcpu->arch.vr_tm.vr[i-32]; | ||
| 1566 | else | ||
| 1567 | r = -ENXIO; | ||
| 1568 | } | ||
| 1569 | break; | ||
| 1570 | } | ||
| 1571 | case KVM_REG_PPC_TM_CR: | ||
| 1572 | *val = get_reg_val(id, vcpu->arch.cr_tm); | ||
| 1573 | break; | ||
| 1574 | case KVM_REG_PPC_TM_XER: | ||
| 1575 | *val = get_reg_val(id, vcpu->arch.xer_tm); | ||
| 1576 | break; | ||
| 1577 | case KVM_REG_PPC_TM_LR: | ||
| 1578 | *val = get_reg_val(id, vcpu->arch.lr_tm); | ||
| 1579 | break; | ||
| 1580 | case KVM_REG_PPC_TM_CTR: | ||
| 1581 | *val = get_reg_val(id, vcpu->arch.ctr_tm); | ||
| 1582 | break; | ||
| 1583 | case KVM_REG_PPC_TM_FPSCR: | ||
| 1584 | *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); | ||
| 1585 | break; | ||
| 1586 | case KVM_REG_PPC_TM_AMR: | ||
| 1587 | *val = get_reg_val(id, vcpu->arch.amr_tm); | ||
| 1588 | break; | ||
| 1589 | case KVM_REG_PPC_TM_PPR: | ||
| 1590 | *val = get_reg_val(id, vcpu->arch.ppr_tm); | ||
| 1591 | break; | ||
| 1592 | case KVM_REG_PPC_TM_VRSAVE: | ||
| 1593 | *val = get_reg_val(id, vcpu->arch.vrsave_tm); | ||
| 1594 | break; | ||
| 1595 | case KVM_REG_PPC_TM_VSCR: | ||
| 1596 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | ||
| 1597 | *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); | ||
| 1598 | else | ||
| 1599 | r = -ENXIO; | ||
| 1600 | break; | ||
| 1601 | case KVM_REG_PPC_TM_DSCR: | ||
| 1602 | *val = get_reg_val(id, vcpu->arch.dscr_tm); | ||
| 1603 | break; | ||
| 1604 | case KVM_REG_PPC_TM_TAR: | ||
| 1605 | *val = get_reg_val(id, vcpu->arch.tar_tm); | ||
| 1606 | break; | ||
| 1607 | #endif | ||
| 1382 | default: | 1608 | default: |
| 1383 | r = -EINVAL; | 1609 | r = -EINVAL; |
| 1384 | break; | 1610 | break; |
| @@ -1412,6 +1638,72 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, | |||
| 1412 | case KVM_REG_PPC_LPCR_64: | 1638 | case KVM_REG_PPC_LPCR_64: |
| 1413 | kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val)); | 1639 | kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val)); |
| 1414 | break; | 1640 | break; |
| 1641 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 1642 | case KVM_REG_PPC_TFHAR: | ||
| 1643 | vcpu->arch.tfhar = set_reg_val(id, *val); | ||
| 1644 | break; | ||
| 1645 | case KVM_REG_PPC_TFIAR: | ||
| 1646 | vcpu->arch.tfiar = set_reg_val(id, *val); | ||
| 1647 | break; | ||
| 1648 | case KVM_REG_PPC_TEXASR: | ||
| 1649 | vcpu->arch.texasr = set_reg_val(id, *val); | ||
| 1650 | break; | ||
| 1651 | case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31: | ||
| 1652 | vcpu->arch.gpr_tm[id - KVM_REG_PPC_TM_GPR0] = | ||
| 1653 | set_reg_val(id, *val); | ||
| 1654 | break; | ||
| 1655 | case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63: | ||
| 1656 | { | ||
| 1657 | int i, j; | ||
| 1658 | |||
| 1659 | i = id - KVM_REG_PPC_TM_VSR0; | ||
| 1660 | if (i < 32) | ||
| 1661 | for (j = 0; j < TS_FPRWIDTH; j++) | ||
| 1662 | vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; | ||
| 1663 | else | ||
| 1664 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | ||
| 1665 | vcpu->arch.vr_tm.vr[i-32] = val->vval; | ||
| 1666 | else | ||
| 1667 | r = -ENXIO; | ||
| 1668 | break; | ||
| 1669 | } | ||
| 1670 | case KVM_REG_PPC_TM_CR: | ||
| 1671 | vcpu->arch.cr_tm = set_reg_val(id, *val); | ||
| 1672 | break; | ||
| 1673 | case KVM_REG_PPC_TM_XER: | ||
| 1674 | vcpu->arch.xer_tm = set_reg_val(id, *val); | ||
| 1675 | break; | ||
| 1676 | case KVM_REG_PPC_TM_LR: | ||
| 1677 | vcpu->arch.lr_tm = set_reg_val(id, *val); | ||
| 1678 | break; | ||
| 1679 | case KVM_REG_PPC_TM_CTR: | ||
| 1680 | vcpu->arch.ctr_tm = set_reg_val(id, *val); | ||
| 1681 | break; | ||
| 1682 | case KVM_REG_PPC_TM_FPSCR: | ||
| 1683 | vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); | ||
| 1684 | break; | ||
| 1685 | case KVM_REG_PPC_TM_AMR: | ||
| 1686 | vcpu->arch.amr_tm = set_reg_val(id, *val); | ||
| 1687 | break; | ||
| 1688 | case KVM_REG_PPC_TM_PPR: | ||
| 1689 | vcpu->arch.ppr_tm = set_reg_val(id, *val); | ||
| 1690 | break; | ||
| 1691 | case KVM_REG_PPC_TM_VRSAVE: | ||
| 1692 | vcpu->arch.vrsave_tm = set_reg_val(id, *val); | ||
| 1693 | break; | ||
| 1694 | case KVM_REG_PPC_TM_VSCR: | ||
| 1695 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | ||
| 1696 | vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); | ||
| 1697 | else | ||
| 1698 | r = -ENXIO; | ||
| 1699 | break; | ||
| 1700 | case KVM_REG_PPC_TM_DSCR: | ||
| 1701 | vcpu->arch.dscr_tm = set_reg_val(id, *val); | ||
| 1702 | break; | ||
| 1703 | case KVM_REG_PPC_TM_TAR: | ||
| 1704 | vcpu->arch.tar_tm = set_reg_val(id, *val); | ||
| 1705 | break; | ||
| 1706 | #endif | ||
| 1415 | default: | 1707 | default: |
| 1416 | r = -EINVAL; | 1708 | r = -EINVAL; |
| 1417 | break; | 1709 | break; |
| @@ -1687,6 +1979,17 @@ static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm, | |||
| 1687 | 1979 | ||
| 1688 | return 0; | 1980 | return 0; |
| 1689 | } | 1981 | } |
| 1982 | |||
| 1983 | static int kvm_configure_mmu_pr(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg) | ||
| 1984 | { | ||
| 1985 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) | ||
| 1986 | return -ENODEV; | ||
| 1987 | /* Require flags and process table base and size to all be zero. */ | ||
| 1988 | if (cfg->flags || cfg->process_table) | ||
| 1989 | return -EINVAL; | ||
| 1990 | return 0; | ||
| 1991 | } | ||
| 1992 | |||
| 1690 | #else | 1993 | #else |
| 1691 | static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm, | 1994 | static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm, |
| 1692 | struct kvm_ppc_smmu_info *info) | 1995 | struct kvm_ppc_smmu_info *info) |
| @@ -1735,9 +2038,12 @@ static void kvmppc_core_destroy_vm_pr(struct kvm *kvm) | |||
| 1735 | static int kvmppc_core_check_processor_compat_pr(void) | 2038 | static int kvmppc_core_check_processor_compat_pr(void) |
| 1736 | { | 2039 | { |
| 1737 | /* | 2040 | /* |
| 1738 | * Disable KVM for Power9 untill the required bits merged. | 2041 | * PR KVM can work on POWER9 inside a guest partition |
| 2042 | * running in HPT mode. It can't work if we are using | ||
| 2043 | * radix translation (because radix provides no way for | ||
| 2044 | * a process to have unique translations in quadrant 3). | ||
| 1739 | */ | 2045 | */ |
| 1740 | if (cpu_has_feature(CPU_FTR_ARCH_300)) | 2046 | if (cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled()) |
| 1741 | return -EIO; | 2047 | return -EIO; |
| 1742 | return 0; | 2048 | return 0; |
| 1743 | } | 2049 | } |
| @@ -1781,7 +2087,9 @@ static struct kvmppc_ops kvm_ops_pr = { | |||
| 1781 | .arch_vm_ioctl = kvm_arch_vm_ioctl_pr, | 2087 | .arch_vm_ioctl = kvm_arch_vm_ioctl_pr, |
| 1782 | #ifdef CONFIG_PPC_BOOK3S_64 | 2088 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 1783 | .hcall_implemented = kvmppc_hcall_impl_pr, | 2089 | .hcall_implemented = kvmppc_hcall_impl_pr, |
| 2090 | .configure_mmu = kvm_configure_mmu_pr, | ||
| 1784 | #endif | 2091 | #endif |
| 2092 | .giveup_ext = kvmppc_giveup_ext, | ||
| 1785 | }; | 2093 | }; |
| 1786 | 2094 | ||
| 1787 | 2095 | ||
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S index 93a180ceefad..98ccc7ec5d48 100644 --- a/arch/powerpc/kvm/book3s_segment.S +++ b/arch/powerpc/kvm/book3s_segment.S | |||
| @@ -383,6 +383,19 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |||
| 383 | */ | 383 | */ |
| 384 | 384 | ||
| 385 | PPC_LL r6, HSTATE_HOST_MSR(r13) | 385 | PPC_LL r6, HSTATE_HOST_MSR(r13) |
| 386 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 387 | /* | ||
| 388 | * We don't want to change MSR[TS] bits via rfi here. | ||
| 389 | * The actual TM handling logic will be in host with | ||
| 390 | * recovered DR/IR bits after HSTATE_VMHANDLER. | ||
| 391 | * And MSR_TM can be enabled in HOST_MSR so rfid may | ||
| 392 | * not suppress this change and can lead to exception. | ||
| 393 | * Manually set MSR to prevent TS state change here. | ||
| 394 | */ | ||
| 395 | mfmsr r7 | ||
| 396 | rldicl r7, r7, 64 - MSR_TS_S_LG, 62 | ||
| 397 | rldimi r6, r7, MSR_TS_S_LG, 63 - MSR_TS_T_LG | ||
| 398 | #endif | ||
| 386 | PPC_LL r8, HSTATE_VMHANDLER(r13) | 399 | PPC_LL r8, HSTATE_VMHANDLER(r13) |
| 387 | 400 | ||
| 388 | #ifdef CONFIG_PPC64 | 401 | #ifdef CONFIG_PPC64 |
diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c index 99c3620b40d9..6e41ba7ec8f4 100644 --- a/arch/powerpc/kvm/book3s_xive_template.c +++ b/arch/powerpc/kvm/book3s_xive_template.c | |||
| @@ -334,7 +334,7 @@ X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu) | |||
| 334 | */ | 334 | */ |
| 335 | 335 | ||
| 336 | /* Return interrupt and old CPPR in GPR4 */ | 336 | /* Return interrupt and old CPPR in GPR4 */ |
| 337 | vcpu->arch.gpr[4] = hirq | (old_cppr << 24); | 337 | vcpu->arch.regs.gpr[4] = hirq | (old_cppr << 24); |
| 338 | 338 | ||
| 339 | return H_SUCCESS; | 339 | return H_SUCCESS; |
| 340 | } | 340 | } |
| @@ -369,7 +369,7 @@ X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long | |||
| 369 | hirq = GLUE(X_PFX,scan_interrupts)(xc, pending, scan_poll); | 369 | hirq = GLUE(X_PFX,scan_interrupts)(xc, pending, scan_poll); |
| 370 | 370 | ||
| 371 | /* Return interrupt and old CPPR in GPR4 */ | 371 | /* Return interrupt and old CPPR in GPR4 */ |
| 372 | vcpu->arch.gpr[4] = hirq | (xc->cppr << 24); | 372 | vcpu->arch.regs.gpr[4] = hirq | (xc->cppr << 24); |
| 373 | 373 | ||
| 374 | return H_SUCCESS; | 374 | return H_SUCCESS; |
| 375 | } | 375 | } |
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 876d4f294fdd..a9ca016da670 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
| @@ -77,8 +77,10 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) | |||
| 77 | { | 77 | { |
| 78 | int i; | 78 | int i; |
| 79 | 79 | ||
| 80 | printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr); | 80 | printk("pc: %08lx msr: %08llx\n", vcpu->arch.regs.nip, |
| 81 | printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr); | 81 | vcpu->arch.shared->msr); |
| 82 | printk("lr: %08lx ctr: %08lx\n", vcpu->arch.regs.link, | ||
| 83 | vcpu->arch.regs.ctr); | ||
| 82 | printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0, | 84 | printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0, |
| 83 | vcpu->arch.shared->srr1); | 85 | vcpu->arch.shared->srr1); |
| 84 | 86 | ||
| @@ -491,24 +493,25 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | |||
| 491 | if (allowed) { | 493 | if (allowed) { |
| 492 | switch (int_class) { | 494 | switch (int_class) { |
| 493 | case INT_CLASS_NONCRIT: | 495 | case INT_CLASS_NONCRIT: |
| 494 | set_guest_srr(vcpu, vcpu->arch.pc, | 496 | set_guest_srr(vcpu, vcpu->arch.regs.nip, |
| 495 | vcpu->arch.shared->msr); | 497 | vcpu->arch.shared->msr); |
| 496 | break; | 498 | break; |
| 497 | case INT_CLASS_CRIT: | 499 | case INT_CLASS_CRIT: |
| 498 | set_guest_csrr(vcpu, vcpu->arch.pc, | 500 | set_guest_csrr(vcpu, vcpu->arch.regs.nip, |
| 499 | vcpu->arch.shared->msr); | 501 | vcpu->arch.shared->msr); |
| 500 | break; | 502 | break; |
| 501 | case INT_CLASS_DBG: | 503 | case INT_CLASS_DBG: |
| 502 | set_guest_dsrr(vcpu, vcpu->arch.pc, | 504 | set_guest_dsrr(vcpu, vcpu->arch.regs.nip, |
| 503 | vcpu->arch.shared->msr); | 505 | vcpu->arch.shared->msr); |
| 504 | break; | 506 | break; |
| 505 | case INT_CLASS_MC: | 507 | case INT_CLASS_MC: |
| 506 | set_guest_mcsrr(vcpu, vcpu->arch.pc, | 508 | set_guest_mcsrr(vcpu, vcpu->arch.regs.nip, |
| 507 | vcpu->arch.shared->msr); | 509 | vcpu->arch.shared->msr); |
| 508 | break; | 510 | break; |
| 509 | } | 511 | } |
| 510 | 512 | ||
| 511 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; | 513 | vcpu->arch.regs.nip = vcpu->arch.ivpr | |
| 514 | vcpu->arch.ivor[priority]; | ||
| 512 | if (update_esr == true) | 515 | if (update_esr == true) |
| 513 | kvmppc_set_esr(vcpu, vcpu->arch.queued_esr); | 516 | kvmppc_set_esr(vcpu, vcpu->arch.queued_esr); |
| 514 | if (update_dear == true) | 517 | if (update_dear == true) |
| @@ -826,7 +829,7 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
| 826 | 829 | ||
| 827 | case EMULATE_FAIL: | 830 | case EMULATE_FAIL: |
| 828 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", | 831 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", |
| 829 | __func__, vcpu->arch.pc, vcpu->arch.last_inst); | 832 | __func__, vcpu->arch.regs.nip, vcpu->arch.last_inst); |
| 830 | /* For debugging, encode the failing instruction and | 833 | /* For debugging, encode the failing instruction and |
| 831 | * report it to userspace. */ | 834 | * report it to userspace. */ |
| 832 | run->hw.hardware_exit_reason = ~0ULL << 32; | 835 | run->hw.hardware_exit_reason = ~0ULL << 32; |
| @@ -875,7 +878,7 @@ static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
| 875 | */ | 878 | */ |
| 876 | vcpu->arch.dbsr = 0; | 879 | vcpu->arch.dbsr = 0; |
| 877 | run->debug.arch.status = 0; | 880 | run->debug.arch.status = 0; |
| 878 | run->debug.arch.address = vcpu->arch.pc; | 881 | run->debug.arch.address = vcpu->arch.regs.nip; |
| 879 | 882 | ||
| 880 | if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) { | 883 | if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) { |
| 881 | run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT; | 884 | run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT; |
| @@ -971,7 +974,7 @@ static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 971 | 974 | ||
| 972 | case EMULATE_FAIL: | 975 | case EMULATE_FAIL: |
| 973 | pr_debug("%s: load instruction from guest address %lx failed\n", | 976 | pr_debug("%s: load instruction from guest address %lx failed\n", |
| 974 | __func__, vcpu->arch.pc); | 977 | __func__, vcpu->arch.regs.nip); |
| 975 | /* For debugging, encode the failing instruction and | 978 | /* For debugging, encode the failing instruction and |
| 976 | * report it to userspace. */ | 979 | * report it to userspace. */ |
| 977 | run->hw.hardware_exit_reason = ~0ULL << 32; | 980 | run->hw.hardware_exit_reason = ~0ULL << 32; |
| @@ -1169,7 +1172,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 1169 | case BOOKE_INTERRUPT_SPE_FP_DATA: | 1172 | case BOOKE_INTERRUPT_SPE_FP_DATA: |
| 1170 | case BOOKE_INTERRUPT_SPE_FP_ROUND: | 1173 | case BOOKE_INTERRUPT_SPE_FP_ROUND: |
| 1171 | printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n", | 1174 | printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n", |
| 1172 | __func__, exit_nr, vcpu->arch.pc); | 1175 | __func__, exit_nr, vcpu->arch.regs.nip); |
| 1173 | run->hw.hardware_exit_reason = exit_nr; | 1176 | run->hw.hardware_exit_reason = exit_nr; |
| 1174 | r = RESUME_HOST; | 1177 | r = RESUME_HOST; |
| 1175 | break; | 1178 | break; |
| @@ -1299,7 +1302,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 1299 | } | 1302 | } |
| 1300 | 1303 | ||
| 1301 | case BOOKE_INTERRUPT_ITLB_MISS: { | 1304 | case BOOKE_INTERRUPT_ITLB_MISS: { |
| 1302 | unsigned long eaddr = vcpu->arch.pc; | 1305 | unsigned long eaddr = vcpu->arch.regs.nip; |
| 1303 | gpa_t gpaddr; | 1306 | gpa_t gpaddr; |
| 1304 | gfn_t gfn; | 1307 | gfn_t gfn; |
| 1305 | int gtlb_index; | 1308 | int gtlb_index; |
| @@ -1391,7 +1394,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
| 1391 | int i; | 1394 | int i; |
| 1392 | int r; | 1395 | int r; |
| 1393 | 1396 | ||
| 1394 | vcpu->arch.pc = 0; | 1397 | vcpu->arch.regs.nip = 0; |
| 1395 | vcpu->arch.shared->pir = vcpu->vcpu_id; | 1398 | vcpu->arch.shared->pir = vcpu->vcpu_id; |
| 1396 | kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ | 1399 | kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ |
| 1397 | kvmppc_set_msr(vcpu, 0); | 1400 | kvmppc_set_msr(vcpu, 0); |
| @@ -1440,10 +1443,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
| 1440 | 1443 | ||
| 1441 | vcpu_load(vcpu); | 1444 | vcpu_load(vcpu); |
| 1442 | 1445 | ||
| 1443 | regs->pc = vcpu->arch.pc; | 1446 | regs->pc = vcpu->arch.regs.nip; |
| 1444 | regs->cr = kvmppc_get_cr(vcpu); | 1447 | regs->cr = kvmppc_get_cr(vcpu); |
| 1445 | regs->ctr = vcpu->arch.ctr; | 1448 | regs->ctr = vcpu->arch.regs.ctr; |
| 1446 | regs->lr = vcpu->arch.lr; | 1449 | regs->lr = vcpu->arch.regs.link; |
| 1447 | regs->xer = kvmppc_get_xer(vcpu); | 1450 | regs->xer = kvmppc_get_xer(vcpu); |
| 1448 | regs->msr = vcpu->arch.shared->msr; | 1451 | regs->msr = vcpu->arch.shared->msr; |
| 1449 | regs->srr0 = kvmppc_get_srr0(vcpu); | 1452 | regs->srr0 = kvmppc_get_srr0(vcpu); |
| @@ -1471,10 +1474,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
| 1471 | 1474 | ||
| 1472 | vcpu_load(vcpu); | 1475 | vcpu_load(vcpu); |
| 1473 | 1476 | ||
| 1474 | vcpu->arch.pc = regs->pc; | 1477 | vcpu->arch.regs.nip = regs->pc; |
| 1475 | kvmppc_set_cr(vcpu, regs->cr); | 1478 | kvmppc_set_cr(vcpu, regs->cr); |
| 1476 | vcpu->arch.ctr = regs->ctr; | 1479 | vcpu->arch.regs.ctr = regs->ctr; |
| 1477 | vcpu->arch.lr = regs->lr; | 1480 | vcpu->arch.regs.link = regs->lr; |
| 1478 | kvmppc_set_xer(vcpu, regs->xer); | 1481 | kvmppc_set_xer(vcpu, regs->xer); |
| 1479 | kvmppc_set_msr(vcpu, regs->msr); | 1482 | kvmppc_set_msr(vcpu, regs->msr); |
| 1480 | kvmppc_set_srr0(vcpu, regs->srr0); | 1483 | kvmppc_set_srr0(vcpu, regs->srr0); |
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c index a82f64502de1..d23e582f0fee 100644 --- a/arch/powerpc/kvm/booke_emulate.c +++ b/arch/powerpc/kvm/booke_emulate.c | |||
| @@ -34,19 +34,19 @@ | |||
| 34 | 34 | ||
| 35 | static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) | 35 | static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) |
| 36 | { | 36 | { |
| 37 | vcpu->arch.pc = vcpu->arch.shared->srr0; | 37 | vcpu->arch.regs.nip = vcpu->arch.shared->srr0; |
| 38 | kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); | 38 | kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); |
| 39 | } | 39 | } |
| 40 | 40 | ||
| 41 | static void kvmppc_emul_rfdi(struct kvm_vcpu *vcpu) | 41 | static void kvmppc_emul_rfdi(struct kvm_vcpu *vcpu) |
| 42 | { | 42 | { |
| 43 | vcpu->arch.pc = vcpu->arch.dsrr0; | 43 | vcpu->arch.regs.nip = vcpu->arch.dsrr0; |
| 44 | kvmppc_set_msr(vcpu, vcpu->arch.dsrr1); | 44 | kvmppc_set_msr(vcpu, vcpu->arch.dsrr1); |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu) | 47 | static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu) |
| 48 | { | 48 | { |
| 49 | vcpu->arch.pc = vcpu->arch.csrr0; | 49 | vcpu->arch.regs.nip = vcpu->arch.csrr0; |
| 50 | kvmppc_set_msr(vcpu, vcpu->arch.csrr1); | 50 | kvmppc_set_msr(vcpu, vcpu->arch.csrr1); |
| 51 | } | 51 | } |
| 52 | 52 | ||
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index 990db69a1d0b..3f8189eb56ed 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c | |||
| @@ -53,7 +53,7 @@ static int dbell2prio(ulong param) | |||
| 53 | 53 | ||
| 54 | static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb) | 54 | static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb) |
| 55 | { | 55 | { |
| 56 | ulong param = vcpu->arch.gpr[rb]; | 56 | ulong param = vcpu->arch.regs.gpr[rb]; |
| 57 | int prio = dbell2prio(param); | 57 | int prio = dbell2prio(param); |
| 58 | 58 | ||
| 59 | if (prio < 0) | 59 | if (prio < 0) |
| @@ -65,7 +65,7 @@ static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb) | |||
| 65 | 65 | ||
| 66 | static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb) | 66 | static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb) |
| 67 | { | 67 | { |
| 68 | ulong param = vcpu->arch.gpr[rb]; | 68 | ulong param = vcpu->arch.regs.gpr[rb]; |
| 69 | int prio = dbell2prio(rb); | 69 | int prio = dbell2prio(rb); |
| 70 | int pir = param & PPC_DBELL_PIR_MASK; | 70 | int pir = param & PPC_DBELL_PIR_MASK; |
| 71 | int i; | 71 | int i; |
| @@ -94,7 +94,7 @@ static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 94 | switch (get_oc(inst)) { | 94 | switch (get_oc(inst)) { |
| 95 | case EHPRIV_OC_DEBUG: | 95 | case EHPRIV_OC_DEBUG: |
| 96 | run->exit_reason = KVM_EXIT_DEBUG; | 96 | run->exit_reason = KVM_EXIT_DEBUG; |
| 97 | run->debug.arch.address = vcpu->arch.pc; | 97 | run->debug.arch.address = vcpu->arch.regs.nip; |
| 98 | run->debug.arch.status = 0; | 98 | run->debug.arch.status = 0; |
| 99 | kvmppc_account_exit(vcpu, DEBUG_EXITS); | 99 | kvmppc_account_exit(vcpu, DEBUG_EXITS); |
| 100 | emulated = EMULATE_EXIT_USER; | 100 | emulated = EMULATE_EXIT_USER; |
diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c index ddbf8f0284c0..24296f4cadc6 100644 --- a/arch/powerpc/kvm/e500_mmu.c +++ b/arch/powerpc/kvm/e500_mmu.c | |||
| @@ -513,7 +513,7 @@ void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu) | |||
| 513 | { | 513 | { |
| 514 | unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); | 514 | unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); |
| 515 | 515 | ||
| 516 | kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as); | 516 | kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.regs.nip, as); |
| 517 | } | 517 | } |
| 518 | 518 | ||
| 519 | void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu) | 519 | void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu) |
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index c878b4ffb86f..8f2985e46f6f 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c | |||
| @@ -625,8 +625,8 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, | |||
| 625 | } | 625 | } |
| 626 | 626 | ||
| 627 | #ifdef CONFIG_KVM_BOOKE_HV | 627 | #ifdef CONFIG_KVM_BOOKE_HV |
| 628 | int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, | 628 | int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, |
| 629 | u32 *instr) | 629 | enum instruction_fetch_type type, u32 *instr) |
| 630 | { | 630 | { |
| 631 | gva_t geaddr; | 631 | gva_t geaddr; |
| 632 | hpa_t addr; | 632 | hpa_t addr; |
| @@ -715,8 +715,8 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, | |||
| 715 | return EMULATE_DONE; | 715 | return EMULATE_DONE; |
| 716 | } | 716 | } |
| 717 | #else | 717 | #else |
| 718 | int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, | 718 | int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, |
| 719 | u32 *instr) | 719 | enum instruction_fetch_type type, u32 *instr) |
| 720 | { | 720 | { |
| 721 | return EMULATE_AGAIN; | 721 | return EMULATE_AGAIN; |
| 722 | } | 722 | } |
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c index a382e15135e6..afde788be141 100644 --- a/arch/powerpc/kvm/emulate_loadstore.c +++ b/arch/powerpc/kvm/emulate_loadstore.c | |||
| @@ -31,6 +31,7 @@ | |||
| 31 | #include <asm/kvm_ppc.h> | 31 | #include <asm/kvm_ppc.h> |
| 32 | #include <asm/disassemble.h> | 32 | #include <asm/disassemble.h> |
| 33 | #include <asm/ppc-opcode.h> | 33 | #include <asm/ppc-opcode.h> |
| 34 | #include <asm/sstep.h> | ||
| 34 | #include "timing.h" | 35 | #include "timing.h" |
| 35 | #include "trace.h" | 36 | #include "trace.h" |
| 36 | 37 | ||
| @@ -84,8 +85,9 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) | |||
| 84 | struct kvm_run *run = vcpu->run; | 85 | struct kvm_run *run = vcpu->run; |
| 85 | u32 inst; | 86 | u32 inst; |
| 86 | int ra, rs, rt; | 87 | int ra, rs, rt; |
| 87 | enum emulation_result emulated; | 88 | enum emulation_result emulated = EMULATE_FAIL; |
| 88 | int advance = 1; | 89 | int advance = 1; |
| 90 | struct instruction_op op; | ||
| 89 | 91 | ||
| 90 | /* this default type might be overwritten by subcategories */ | 92 | /* this default type might be overwritten by subcategories */ |
| 91 | kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); | 93 | kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); |
| @@ -107,580 +109,276 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) | |||
| 107 | vcpu->arch.mmio_vsx_tx_sx_enabled = get_tx_or_sx(inst); | 109 | vcpu->arch.mmio_vsx_tx_sx_enabled = get_tx_or_sx(inst); |
| 108 | vcpu->arch.mmio_vsx_copy_nums = 0; | 110 | vcpu->arch.mmio_vsx_copy_nums = 0; |
| 109 | vcpu->arch.mmio_vsx_offset = 0; | 111 | vcpu->arch.mmio_vsx_offset = 0; |
| 110 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_NONE; | 112 | vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE; |
| 111 | vcpu->arch.mmio_sp64_extend = 0; | 113 | vcpu->arch.mmio_sp64_extend = 0; |
| 112 | vcpu->arch.mmio_sign_extend = 0; | 114 | vcpu->arch.mmio_sign_extend = 0; |
| 113 | vcpu->arch.mmio_vmx_copy_nums = 0; | 115 | vcpu->arch.mmio_vmx_copy_nums = 0; |
| 116 | vcpu->arch.mmio_vmx_offset = 0; | ||
| 117 | vcpu->arch.mmio_host_swabbed = 0; | ||
| 114 | 118 | ||
| 115 | switch (get_op(inst)) { | 119 | emulated = EMULATE_FAIL; |
| 116 | case 31: | 120 | vcpu->arch.regs.msr = vcpu->arch.shared->msr; |
| 117 | switch (get_xop(inst)) { | 121 | vcpu->arch.regs.ccr = vcpu->arch.cr; |
| 118 | case OP_31_XOP_LWZX: | 122 | if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) { |
| 119 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | 123 | int type = op.type & INSTR_TYPE_MASK; |
| 120 | break; | 124 | int size = GETSIZE(op.type); |
| 121 | |||
| 122 | case OP_31_XOP_LWZUX: | ||
| 123 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | ||
| 124 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 125 | break; | ||
| 126 | 125 | ||
| 127 | case OP_31_XOP_LBZX: | 126 | switch (type) { |
| 128 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 127 | case LOAD: { |
| 129 | break; | 128 | int instr_byte_swap = op.type & BYTEREV; |
| 130 | |||
| 131 | case OP_31_XOP_LBZUX: | ||
| 132 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | ||
| 133 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 134 | break; | ||
| 135 | 129 | ||
| 136 | case OP_31_XOP_STDX: | 130 | if (op.type & SIGNEXT) |
| 137 | emulated = kvmppc_handle_store(run, vcpu, | 131 | emulated = kvmppc_handle_loads(run, vcpu, |
| 138 | kvmppc_get_gpr(vcpu, rs), 8, 1); | 132 | op.reg, size, !instr_byte_swap); |
| 139 | break; | 133 | else |
| 134 | emulated = kvmppc_handle_load(run, vcpu, | ||
| 135 | op.reg, size, !instr_byte_swap); | ||
| 140 | 136 | ||
| 141 | case OP_31_XOP_STDUX: | 137 | if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) |
| 142 | emulated = kvmppc_handle_store(run, vcpu, | 138 | kvmppc_set_gpr(vcpu, op.update_reg, op.ea); |
| 143 | kvmppc_get_gpr(vcpu, rs), 8, 1); | ||
| 144 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 145 | break; | ||
| 146 | 139 | ||
| 147 | case OP_31_XOP_STWX: | ||
| 148 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 149 | kvmppc_get_gpr(vcpu, rs), 4, 1); | ||
| 150 | break; | ||
| 151 | |||
| 152 | case OP_31_XOP_STWUX: | ||
| 153 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 154 | kvmppc_get_gpr(vcpu, rs), 4, 1); | ||
| 155 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 156 | break; | ||
| 157 | |||
| 158 | case OP_31_XOP_STBX: | ||
| 159 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 160 | kvmppc_get_gpr(vcpu, rs), 1, 1); | ||
| 161 | break; | ||
| 162 | |||
| 163 | case OP_31_XOP_STBUX: | ||
| 164 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 165 | kvmppc_get_gpr(vcpu, rs), 1, 1); | ||
| 166 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 167 | break; | ||
| 168 | |||
| 169 | case OP_31_XOP_LHAX: | ||
| 170 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | ||
| 171 | break; | ||
| 172 | |||
| 173 | case OP_31_XOP_LHAUX: | ||
| 174 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | ||
| 175 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 176 | break; | ||
| 177 | |||
| 178 | case OP_31_XOP_LHZX: | ||
| 179 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | ||
| 180 | break; | ||
| 181 | |||
| 182 | case OP_31_XOP_LHZUX: | ||
| 183 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | ||
| 184 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 185 | break; | ||
| 186 | |||
| 187 | case OP_31_XOP_STHX: | ||
| 188 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 189 | kvmppc_get_gpr(vcpu, rs), 2, 1); | ||
| 190 | break; | ||
| 191 | |||
| 192 | case OP_31_XOP_STHUX: | ||
| 193 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 194 | kvmppc_get_gpr(vcpu, rs), 2, 1); | ||
| 195 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 196 | break; | ||
| 197 | |||
| 198 | case OP_31_XOP_DCBST: | ||
| 199 | case OP_31_XOP_DCBF: | ||
| 200 | case OP_31_XOP_DCBI: | ||
| 201 | /* Do nothing. The guest is performing dcbi because | ||
| 202 | * hardware DMA is not snooped by the dcache, but | ||
| 203 | * emulated DMA either goes through the dcache as | ||
| 204 | * normal writes, or the host kernel has handled dcache | ||
| 205 | * coherence. */ | ||
| 206 | break; | ||
| 207 | |||
| 208 | case OP_31_XOP_LWBRX: | ||
| 209 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); | ||
| 210 | break; | ||
| 211 | |||
| 212 | case OP_31_XOP_STWBRX: | ||
| 213 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 214 | kvmppc_get_gpr(vcpu, rs), 4, 0); | ||
| 215 | break; | 140 | break; |
| 216 | 141 | } | |
| 217 | case OP_31_XOP_LHBRX: | ||
| 218 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); | ||
| 219 | break; | ||
| 220 | |||
| 221 | case OP_31_XOP_STHBRX: | ||
| 222 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 223 | kvmppc_get_gpr(vcpu, rs), 2, 0); | ||
| 224 | break; | ||
| 225 | |||
| 226 | case OP_31_XOP_LDBRX: | ||
| 227 | emulated = kvmppc_handle_load(run, vcpu, rt, 8, 0); | ||
| 228 | break; | ||
| 229 | |||
| 230 | case OP_31_XOP_STDBRX: | ||
| 231 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 232 | kvmppc_get_gpr(vcpu, rs), 8, 0); | ||
| 233 | break; | ||
| 234 | |||
| 235 | case OP_31_XOP_LDX: | ||
| 236 | emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); | ||
| 237 | break; | ||
| 238 | |||
| 239 | case OP_31_XOP_LDUX: | ||
| 240 | emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); | ||
| 241 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 242 | break; | ||
| 243 | |||
| 244 | case OP_31_XOP_LWAX: | ||
| 245 | emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1); | ||
| 246 | break; | ||
| 247 | |||
| 248 | case OP_31_XOP_LWAUX: | ||
| 249 | emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1); | ||
| 250 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 251 | break; | ||
| 252 | |||
| 253 | #ifdef CONFIG_PPC_FPU | 142 | #ifdef CONFIG_PPC_FPU |
| 254 | case OP_31_XOP_LFSX: | 143 | case LOAD_FP: |
| 255 | if (kvmppc_check_fp_disabled(vcpu)) | ||
| 256 | return EMULATE_DONE; | ||
| 257 | vcpu->arch.mmio_sp64_extend = 1; | ||
| 258 | emulated = kvmppc_handle_load(run, vcpu, | ||
| 259 | KVM_MMIO_REG_FPR|rt, 4, 1); | ||
| 260 | break; | ||
| 261 | |||
| 262 | case OP_31_XOP_LFSUX: | ||
| 263 | if (kvmppc_check_fp_disabled(vcpu)) | 144 | if (kvmppc_check_fp_disabled(vcpu)) |
| 264 | return EMULATE_DONE; | 145 | return EMULATE_DONE; |
| 265 | vcpu->arch.mmio_sp64_extend = 1; | ||
| 266 | emulated = kvmppc_handle_load(run, vcpu, | ||
| 267 | KVM_MMIO_REG_FPR|rt, 4, 1); | ||
| 268 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 269 | break; | ||
| 270 | 146 | ||
| 271 | case OP_31_XOP_LFDX: | 147 | if (op.type & FPCONV) |
| 272 | if (kvmppc_check_fp_disabled(vcpu)) | 148 | vcpu->arch.mmio_sp64_extend = 1; |
| 273 | return EMULATE_DONE; | ||
| 274 | emulated = kvmppc_handle_load(run, vcpu, | ||
| 275 | KVM_MMIO_REG_FPR|rt, 8, 1); | ||
| 276 | break; | ||
| 277 | 149 | ||
| 278 | case OP_31_XOP_LFDUX: | 150 | if (op.type & SIGNEXT) |
| 279 | if (kvmppc_check_fp_disabled(vcpu)) | 151 | emulated = kvmppc_handle_loads(run, vcpu, |
| 280 | return EMULATE_DONE; | 152 | KVM_MMIO_REG_FPR|op.reg, size, 1); |
| 281 | emulated = kvmppc_handle_load(run, vcpu, | 153 | else |
| 282 | KVM_MMIO_REG_FPR|rt, 8, 1); | 154 | emulated = kvmppc_handle_load(run, vcpu, |
| 283 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | 155 | KVM_MMIO_REG_FPR|op.reg, size, 1); |
| 284 | break; | ||
| 285 | |||
| 286 | case OP_31_XOP_LFIWAX: | ||
| 287 | if (kvmppc_check_fp_disabled(vcpu)) | ||
| 288 | return EMULATE_DONE; | ||
| 289 | emulated = kvmppc_handle_loads(run, vcpu, | ||
| 290 | KVM_MMIO_REG_FPR|rt, 4, 1); | ||
| 291 | break; | ||
| 292 | 156 | ||
| 293 | case OP_31_XOP_LFIWZX: | 157 | if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) |
| 294 | if (kvmppc_check_fp_disabled(vcpu)) | 158 | kvmppc_set_gpr(vcpu, op.update_reg, op.ea); |
| 295 | return EMULATE_DONE; | ||
| 296 | emulated = kvmppc_handle_load(run, vcpu, | ||
| 297 | KVM_MMIO_REG_FPR|rt, 4, 1); | ||
| 298 | break; | ||
| 299 | 159 | ||
| 300 | case OP_31_XOP_STFSX: | ||
| 301 | if (kvmppc_check_fp_disabled(vcpu)) | ||
| 302 | return EMULATE_DONE; | ||
| 303 | vcpu->arch.mmio_sp64_extend = 1; | ||
| 304 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 305 | VCPU_FPR(vcpu, rs), 4, 1); | ||
| 306 | break; | 160 | break; |
| 307 | 161 | #endif | |
| 308 | case OP_31_XOP_STFSUX: | 162 | #ifdef CONFIG_ALTIVEC |
| 309 | if (kvmppc_check_fp_disabled(vcpu)) | 163 | case LOAD_VMX: |
| 310 | return EMULATE_DONE; | 164 | if (kvmppc_check_altivec_disabled(vcpu)) |
| 311 | vcpu->arch.mmio_sp64_extend = 1; | ||
| 312 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 313 | VCPU_FPR(vcpu, rs), 4, 1); | ||
| 314 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 315 | break; | ||
| 316 | |||
| 317 | case OP_31_XOP_STFDX: | ||
| 318 | if (kvmppc_check_fp_disabled(vcpu)) | ||
| 319 | return EMULATE_DONE; | ||
| 320 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 321 | VCPU_FPR(vcpu, rs), 8, 1); | ||
| 322 | break; | ||
| 323 | |||
| 324 | case OP_31_XOP_STFDUX: | ||
| 325 | if (kvmppc_check_fp_disabled(vcpu)) | ||
| 326 | return EMULATE_DONE; | 165 | return EMULATE_DONE; |
| 327 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 328 | VCPU_FPR(vcpu, rs), 8, 1); | ||
| 329 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 330 | break; | ||
| 331 | 166 | ||
| 332 | case OP_31_XOP_STFIWX: | 167 | /* Hardware enforces alignment of VMX accesses */ |
| 333 | if (kvmppc_check_fp_disabled(vcpu)) | 168 | vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1); |
| 334 | return EMULATE_DONE; | 169 | vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1); |
| 335 | emulated = kvmppc_handle_store(run, vcpu, | 170 | |
| 336 | VCPU_FPR(vcpu, rs), 4, 1); | 171 | if (size == 16) { /* lvx */ |
| 172 | vcpu->arch.mmio_copy_type = | ||
| 173 | KVMPPC_VMX_COPY_DWORD; | ||
| 174 | } else if (size == 4) { /* lvewx */ | ||
| 175 | vcpu->arch.mmio_copy_type = | ||
| 176 | KVMPPC_VMX_COPY_WORD; | ||
| 177 | } else if (size == 2) { /* lvehx */ | ||
| 178 | vcpu->arch.mmio_copy_type = | ||
| 179 | KVMPPC_VMX_COPY_HWORD; | ||
| 180 | } else if (size == 1) { /* lvebx */ | ||
| 181 | vcpu->arch.mmio_copy_type = | ||
| 182 | KVMPPC_VMX_COPY_BYTE; | ||
| 183 | } else | ||
| 184 | break; | ||
| 185 | |||
| 186 | vcpu->arch.mmio_vmx_offset = | ||
| 187 | (vcpu->arch.vaddr_accessed & 0xf)/size; | ||
| 188 | |||
| 189 | if (size == 16) { | ||
| 190 | vcpu->arch.mmio_vmx_copy_nums = 2; | ||
| 191 | emulated = kvmppc_handle_vmx_load(run, | ||
| 192 | vcpu, KVM_MMIO_REG_VMX|op.reg, | ||
| 193 | 8, 1); | ||
| 194 | } else { | ||
| 195 | vcpu->arch.mmio_vmx_copy_nums = 1; | ||
| 196 | emulated = kvmppc_handle_vmx_load(run, vcpu, | ||
| 197 | KVM_MMIO_REG_VMX|op.reg, | ||
| 198 | size, 1); | ||
| 199 | } | ||
| 337 | break; | 200 | break; |
| 338 | #endif | 201 | #endif |
| 339 | |||
| 340 | #ifdef CONFIG_VSX | 202 | #ifdef CONFIG_VSX |
| 341 | case OP_31_XOP_LXSDX: | 203 | case LOAD_VSX: { |
| 342 | if (kvmppc_check_vsx_disabled(vcpu)) | 204 | int io_size_each; |
| 343 | return EMULATE_DONE; | 205 | |
| 344 | vcpu->arch.mmio_vsx_copy_nums = 1; | 206 | if (op.vsx_flags & VSX_CHECK_VEC) { |
| 345 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; | 207 | if (kvmppc_check_altivec_disabled(vcpu)) |
| 346 | emulated = kvmppc_handle_vsx_load(run, vcpu, | 208 | return EMULATE_DONE; |
| 347 | KVM_MMIO_REG_VSX|rt, 8, 1, 0); | 209 | } else { |
| 348 | break; | 210 | if (kvmppc_check_vsx_disabled(vcpu)) |
| 349 | 211 | return EMULATE_DONE; | |
| 350 | case OP_31_XOP_LXSSPX: | 212 | } |
| 351 | if (kvmppc_check_vsx_disabled(vcpu)) | 213 | |
| 352 | return EMULATE_DONE; | 214 | if (op.vsx_flags & VSX_FPCONV) |
| 353 | vcpu->arch.mmio_vsx_copy_nums = 1; | 215 | vcpu->arch.mmio_sp64_extend = 1; |
| 354 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; | 216 | |
| 355 | vcpu->arch.mmio_sp64_extend = 1; | 217 | if (op.element_size == 8) { |
| 356 | emulated = kvmppc_handle_vsx_load(run, vcpu, | 218 | if (op.vsx_flags & VSX_SPLAT) |
| 357 | KVM_MMIO_REG_VSX|rt, 4, 1, 0); | 219 | vcpu->arch.mmio_copy_type = |
| 358 | break; | 220 | KVMPPC_VSX_COPY_DWORD_LOAD_DUMP; |
| 221 | else | ||
| 222 | vcpu->arch.mmio_copy_type = | ||
| 223 | KVMPPC_VSX_COPY_DWORD; | ||
| 224 | } else if (op.element_size == 4) { | ||
| 225 | if (op.vsx_flags & VSX_SPLAT) | ||
| 226 | vcpu->arch.mmio_copy_type = | ||
| 227 | KVMPPC_VSX_COPY_WORD_LOAD_DUMP; | ||
| 228 | else | ||
| 229 | vcpu->arch.mmio_copy_type = | ||
| 230 | KVMPPC_VSX_COPY_WORD; | ||
| 231 | } else | ||
| 232 | break; | ||
| 233 | |||
| 234 | if (size < op.element_size) { | ||
| 235 | /* precision convert case: lxsspx, etc */ | ||
| 236 | vcpu->arch.mmio_vsx_copy_nums = 1; | ||
| 237 | io_size_each = size; | ||
| 238 | } else { /* lxvw4x, lxvd2x, etc */ | ||
| 239 | vcpu->arch.mmio_vsx_copy_nums = | ||
| 240 | size/op.element_size; | ||
| 241 | io_size_each = op.element_size; | ||
| 242 | } | ||
| 359 | 243 | ||
| 360 | case OP_31_XOP_LXSIWAX: | ||
| 361 | if (kvmppc_check_vsx_disabled(vcpu)) | ||
| 362 | return EMULATE_DONE; | ||
| 363 | vcpu->arch.mmio_vsx_copy_nums = 1; | ||
| 364 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; | ||
| 365 | emulated = kvmppc_handle_vsx_load(run, vcpu, | 244 | emulated = kvmppc_handle_vsx_load(run, vcpu, |
| 366 | KVM_MMIO_REG_VSX|rt, 4, 1, 1); | 245 | KVM_MMIO_REG_VSX | (op.reg & 0x1f), |
| 367 | break; | 246 | io_size_each, 1, op.type & SIGNEXT); |
| 368 | |||
| 369 | case OP_31_XOP_LXSIWZX: | ||
| 370 | if (kvmppc_check_vsx_disabled(vcpu)) | ||
| 371 | return EMULATE_DONE; | ||
| 372 | vcpu->arch.mmio_vsx_copy_nums = 1; | ||
| 373 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; | ||
| 374 | emulated = kvmppc_handle_vsx_load(run, vcpu, | ||
| 375 | KVM_MMIO_REG_VSX|rt, 4, 1, 0); | ||
| 376 | break; | 247 | break; |
| 248 | } | ||
| 249 | #endif | ||
| 250 | case STORE: | ||
| 251 | /* if need byte reverse, op.val has been reversed by | ||
| 252 | * analyse_instr(). | ||
| 253 | */ | ||
| 254 | emulated = kvmppc_handle_store(run, vcpu, op.val, | ||
| 255 | size, 1); | ||
| 377 | 256 | ||
| 378 | case OP_31_XOP_LXVD2X: | 257 | if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) |
| 379 | /* | 258 | kvmppc_set_gpr(vcpu, op.update_reg, op.ea); |
| 380 | * In this case, the official load/store process is like this: | ||
| 381 | * Step1, exit from vm by page fault isr, then kvm save vsr. | ||
| 382 | * Please see guest_exit_cont->store_fp_state->SAVE_32VSRS | ||
| 383 | * as reference. | ||
| 384 | * | ||
| 385 | * Step2, copy data between memory and VCPU | ||
| 386 | * Notice: for LXVD2X/STXVD2X/LXVW4X/STXVW4X, we use | ||
| 387 | * 2copies*8bytes or 4copies*4bytes | ||
| 388 | * to simulate one copy of 16bytes. | ||
| 389 | * Also there is an endian issue here, we should notice the | ||
| 390 | * layout of memory. | ||
| 391 | * Please see MARCO of LXVD2X_ROT/STXVD2X_ROT as more reference. | ||
| 392 | * If host is little-endian, kvm will call XXSWAPD for | ||
| 393 | * LXVD2X_ROT/STXVD2X_ROT. | ||
| 394 | * So, if host is little-endian, | ||
| 395 | * the postion of memeory should be swapped. | ||
| 396 | * | ||
| 397 | * Step3, return to guest, kvm reset register. | ||
| 398 | * Please see kvmppc_hv_entry->load_fp_state->REST_32VSRS | ||
| 399 | * as reference. | ||
| 400 | */ | ||
| 401 | if (kvmppc_check_vsx_disabled(vcpu)) | ||
| 402 | return EMULATE_DONE; | ||
| 403 | vcpu->arch.mmio_vsx_copy_nums = 2; | ||
| 404 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; | ||
| 405 | emulated = kvmppc_handle_vsx_load(run, vcpu, | ||
| 406 | KVM_MMIO_REG_VSX|rt, 8, 1, 0); | ||
| 407 | break; | ||
| 408 | 259 | ||
| 409 | case OP_31_XOP_LXVW4X: | ||
| 410 | if (kvmppc_check_vsx_disabled(vcpu)) | ||
| 411 | return EMULATE_DONE; | ||
| 412 | vcpu->arch.mmio_vsx_copy_nums = 4; | ||
| 413 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD; | ||
| 414 | emulated = kvmppc_handle_vsx_load(run, vcpu, | ||
| 415 | KVM_MMIO_REG_VSX|rt, 4, 1, 0); | ||
| 416 | break; | 260 | break; |
| 417 | 261 | #ifdef CONFIG_PPC_FPU | |
| 418 | case OP_31_XOP_LXVDSX: | 262 | case STORE_FP: |
| 419 | if (kvmppc_check_vsx_disabled(vcpu)) | 263 | if (kvmppc_check_fp_disabled(vcpu)) |
| 420 | return EMULATE_DONE; | 264 | return EMULATE_DONE; |
| 421 | vcpu->arch.mmio_vsx_copy_nums = 1; | ||
| 422 | vcpu->arch.mmio_vsx_copy_type = | ||
| 423 | KVMPPC_VSX_COPY_DWORD_LOAD_DUMP; | ||
| 424 | emulated = kvmppc_handle_vsx_load(run, vcpu, | ||
| 425 | KVM_MMIO_REG_VSX|rt, 8, 1, 0); | ||
| 426 | break; | ||
| 427 | 265 | ||
| 428 | case OP_31_XOP_STXSDX: | 266 | /* The FP registers need to be flushed so that |
| 429 | if (kvmppc_check_vsx_disabled(vcpu)) | 267 | * kvmppc_handle_store() can read actual FP vals |
| 430 | return EMULATE_DONE; | 268 | * from vcpu->arch. |
| 431 | vcpu->arch.mmio_vsx_copy_nums = 1; | 269 | */ |
| 432 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; | 270 | if (vcpu->kvm->arch.kvm_ops->giveup_ext) |
| 433 | emulated = kvmppc_handle_vsx_store(run, vcpu, | 271 | vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, |
| 434 | rs, 8, 1); | 272 | MSR_FP); |
| 435 | break; | ||
| 436 | 273 | ||
| 437 | case OP_31_XOP_STXSSPX: | 274 | if (op.type & FPCONV) |
| 438 | if (kvmppc_check_vsx_disabled(vcpu)) | 275 | vcpu->arch.mmio_sp64_extend = 1; |
| 439 | return EMULATE_DONE; | ||
| 440 | vcpu->arch.mmio_vsx_copy_nums = 1; | ||
| 441 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; | ||
| 442 | vcpu->arch.mmio_sp64_extend = 1; | ||
| 443 | emulated = kvmppc_handle_vsx_store(run, vcpu, | ||
| 444 | rs, 4, 1); | ||
| 445 | break; | ||
| 446 | 276 | ||
| 447 | case OP_31_XOP_STXSIWX: | 277 | emulated = kvmppc_handle_store(run, vcpu, |
| 448 | if (kvmppc_check_vsx_disabled(vcpu)) | 278 | VCPU_FPR(vcpu, op.reg), size, 1); |
| 449 | return EMULATE_DONE; | ||
| 450 | vcpu->arch.mmio_vsx_offset = 1; | ||
| 451 | vcpu->arch.mmio_vsx_copy_nums = 1; | ||
| 452 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD; | ||
| 453 | emulated = kvmppc_handle_vsx_store(run, vcpu, | ||
| 454 | rs, 4, 1); | ||
| 455 | break; | ||
| 456 | 279 | ||
| 457 | case OP_31_XOP_STXVD2X: | 280 | if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) |
| 458 | if (kvmppc_check_vsx_disabled(vcpu)) | 281 | kvmppc_set_gpr(vcpu, op.update_reg, op.ea); |
| 459 | return EMULATE_DONE; | ||
| 460 | vcpu->arch.mmio_vsx_copy_nums = 2; | ||
| 461 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; | ||
| 462 | emulated = kvmppc_handle_vsx_store(run, vcpu, | ||
| 463 | rs, 8, 1); | ||
| 464 | break; | ||
| 465 | 282 | ||
| 466 | case OP_31_XOP_STXVW4X: | ||
| 467 | if (kvmppc_check_vsx_disabled(vcpu)) | ||
| 468 | return EMULATE_DONE; | ||
| 469 | vcpu->arch.mmio_vsx_copy_nums = 4; | ||
| 470 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD; | ||
| 471 | emulated = kvmppc_handle_vsx_store(run, vcpu, | ||
| 472 | rs, 4, 1); | ||
| 473 | break; | 283 | break; |
| 474 | #endif /* CONFIG_VSX */ | 284 | #endif |
| 475 | |||
| 476 | #ifdef CONFIG_ALTIVEC | 285 | #ifdef CONFIG_ALTIVEC |
| 477 | case OP_31_XOP_LVX: | 286 | case STORE_VMX: |
| 478 | if (kvmppc_check_altivec_disabled(vcpu)) | 287 | if (kvmppc_check_altivec_disabled(vcpu)) |
| 479 | return EMULATE_DONE; | 288 | return EMULATE_DONE; |
| 480 | vcpu->arch.vaddr_accessed &= ~0xFULL; | ||
| 481 | vcpu->arch.paddr_accessed &= ~0xFULL; | ||
| 482 | vcpu->arch.mmio_vmx_copy_nums = 2; | ||
| 483 | emulated = kvmppc_handle_load128_by2x64(run, vcpu, | ||
| 484 | KVM_MMIO_REG_VMX|rt, 1); | ||
| 485 | break; | ||
| 486 | 289 | ||
| 487 | case OP_31_XOP_STVX: | 290 | /* Hardware enforces alignment of VMX accesses. */ |
| 488 | if (kvmppc_check_altivec_disabled(vcpu)) | 291 | vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1); |
| 489 | return EMULATE_DONE; | 292 | vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1); |
| 490 | vcpu->arch.vaddr_accessed &= ~0xFULL; | 293 | |
| 491 | vcpu->arch.paddr_accessed &= ~0xFULL; | 294 | if (vcpu->kvm->arch.kvm_ops->giveup_ext) |
| 492 | vcpu->arch.mmio_vmx_copy_nums = 2; | 295 | vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, |
| 493 | emulated = kvmppc_handle_store128_by2x64(run, vcpu, | 296 | MSR_VEC); |
| 494 | rs, 1); | 297 | if (size == 16) { /* stvx */ |
| 495 | break; | 298 | vcpu->arch.mmio_copy_type = |
| 496 | #endif /* CONFIG_ALTIVEC */ | 299 | KVMPPC_VMX_COPY_DWORD; |
| 300 | } else if (size == 4) { /* stvewx */ | ||
| 301 | vcpu->arch.mmio_copy_type = | ||
| 302 | KVMPPC_VMX_COPY_WORD; | ||
| 303 | } else if (size == 2) { /* stvehx */ | ||
| 304 | vcpu->arch.mmio_copy_type = | ||
| 305 | KVMPPC_VMX_COPY_HWORD; | ||
| 306 | } else if (size == 1) { /* stvebx */ | ||
| 307 | vcpu->arch.mmio_copy_type = | ||
| 308 | KVMPPC_VMX_COPY_BYTE; | ||
| 309 | } else | ||
| 310 | break; | ||
| 311 | |||
| 312 | vcpu->arch.mmio_vmx_offset = | ||
| 313 | (vcpu->arch.vaddr_accessed & 0xf)/size; | ||
| 314 | |||
| 315 | if (size == 16) { | ||
| 316 | vcpu->arch.mmio_vmx_copy_nums = 2; | ||
| 317 | emulated = kvmppc_handle_vmx_store(run, | ||
| 318 | vcpu, op.reg, 8, 1); | ||
| 319 | } else { | ||
| 320 | vcpu->arch.mmio_vmx_copy_nums = 1; | ||
| 321 | emulated = kvmppc_handle_vmx_store(run, | ||
| 322 | vcpu, op.reg, size, 1); | ||
| 323 | } | ||
| 497 | 324 | ||
| 498 | default: | ||
| 499 | emulated = EMULATE_FAIL; | ||
| 500 | break; | 325 | break; |
| 501 | } | ||
| 502 | break; | ||
| 503 | |||
| 504 | case OP_LWZ: | ||
| 505 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | ||
| 506 | break; | ||
| 507 | |||
| 508 | #ifdef CONFIG_PPC_FPU | ||
| 509 | case OP_STFS: | ||
| 510 | if (kvmppc_check_fp_disabled(vcpu)) | ||
| 511 | return EMULATE_DONE; | ||
| 512 | vcpu->arch.mmio_sp64_extend = 1; | ||
| 513 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 514 | VCPU_FPR(vcpu, rs), | ||
| 515 | 4, 1); | ||
| 516 | break; | ||
| 517 | |||
| 518 | case OP_STFSU: | ||
| 519 | if (kvmppc_check_fp_disabled(vcpu)) | ||
| 520 | return EMULATE_DONE; | ||
| 521 | vcpu->arch.mmio_sp64_extend = 1; | ||
| 522 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 523 | VCPU_FPR(vcpu, rs), | ||
| 524 | 4, 1); | ||
| 525 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 526 | break; | ||
| 527 | |||
| 528 | case OP_STFD: | ||
| 529 | if (kvmppc_check_fp_disabled(vcpu)) | ||
| 530 | return EMULATE_DONE; | ||
| 531 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 532 | VCPU_FPR(vcpu, rs), | ||
| 533 | 8, 1); | ||
| 534 | break; | ||
| 535 | |||
| 536 | case OP_STFDU: | ||
| 537 | if (kvmppc_check_fp_disabled(vcpu)) | ||
| 538 | return EMULATE_DONE; | ||
| 539 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 540 | VCPU_FPR(vcpu, rs), | ||
| 541 | 8, 1); | ||
| 542 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 543 | break; | ||
| 544 | #endif | 326 | #endif |
| 327 | #ifdef CONFIG_VSX | ||
| 328 | case STORE_VSX: { | ||
| 329 | int io_size_each; | ||
| 330 | |||
| 331 | if (op.vsx_flags & VSX_CHECK_VEC) { | ||
| 332 | if (kvmppc_check_altivec_disabled(vcpu)) | ||
| 333 | return EMULATE_DONE; | ||
| 334 | } else { | ||
| 335 | if (kvmppc_check_vsx_disabled(vcpu)) | ||
| 336 | return EMULATE_DONE; | ||
| 337 | } | ||
| 338 | |||
| 339 | if (vcpu->kvm->arch.kvm_ops->giveup_ext) | ||
| 340 | vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, | ||
| 341 | MSR_VSX); | ||
| 342 | |||
| 343 | if (op.vsx_flags & VSX_FPCONV) | ||
| 344 | vcpu->arch.mmio_sp64_extend = 1; | ||
| 345 | |||
| 346 | if (op.element_size == 8) | ||
| 347 | vcpu->arch.mmio_copy_type = | ||
| 348 | KVMPPC_VSX_COPY_DWORD; | ||
| 349 | else if (op.element_size == 4) | ||
| 350 | vcpu->arch.mmio_copy_type = | ||
| 351 | KVMPPC_VSX_COPY_WORD; | ||
| 352 | else | ||
| 353 | break; | ||
| 354 | |||
| 355 | if (size < op.element_size) { | ||
| 356 | /* precise conversion case, like stxsspx */ | ||
| 357 | vcpu->arch.mmio_vsx_copy_nums = 1; | ||
| 358 | io_size_each = size; | ||
| 359 | } else { /* stxvw4x, stxvd2x, etc */ | ||
| 360 | vcpu->arch.mmio_vsx_copy_nums = | ||
| 361 | size/op.element_size; | ||
| 362 | io_size_each = op.element_size; | ||
| 363 | } | ||
| 545 | 364 | ||
| 546 | case OP_LD: | 365 | emulated = kvmppc_handle_vsx_store(run, vcpu, |
| 547 | rt = get_rt(inst); | 366 | op.reg & 0x1f, io_size_each, 1); |
| 548 | switch (inst & 3) { | ||
| 549 | case 0: /* ld */ | ||
| 550 | emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); | ||
| 551 | break; | ||
| 552 | case 1: /* ldu */ | ||
| 553 | emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); | ||
| 554 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 555 | break; | ||
| 556 | case 2: /* lwa */ | ||
| 557 | emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1); | ||
| 558 | break; | 367 | break; |
| 559 | default: | ||
| 560 | emulated = EMULATE_FAIL; | ||
| 561 | } | 368 | } |
| 562 | break; | 369 | #endif |
| 563 | 370 | case CACHEOP: | |
| 564 | case OP_LWZU: | 371 | /* Do nothing. The guest is performing dcbi because |
| 565 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | 372 | * hardware DMA is not snooped by the dcache, but |
| 566 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | 373 | * emulated DMA either goes through the dcache as |
| 567 | break; | 374 | * normal writes, or the host kernel has handled dcache |
| 568 | 375 | * coherence. | |
| 569 | case OP_LBZ: | 376 | */ |
| 570 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 377 | emulated = EMULATE_DONE; |
| 571 | break; | ||
| 572 | |||
| 573 | case OP_LBZU: | ||
| 574 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | ||
| 575 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 576 | break; | ||
| 577 | |||
| 578 | case OP_STW: | ||
| 579 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 580 | kvmppc_get_gpr(vcpu, rs), | ||
| 581 | 4, 1); | ||
| 582 | break; | ||
| 583 | |||
| 584 | case OP_STD: | ||
| 585 | rs = get_rs(inst); | ||
| 586 | switch (inst & 3) { | ||
| 587 | case 0: /* std */ | ||
| 588 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 589 | kvmppc_get_gpr(vcpu, rs), 8, 1); | ||
| 590 | break; | ||
| 591 | case 1: /* stdu */ | ||
| 592 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 593 | kvmppc_get_gpr(vcpu, rs), 8, 1); | ||
| 594 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 595 | break; | 378 | break; |
| 596 | default: | 379 | default: |
| 597 | emulated = EMULATE_FAIL; | 380 | break; |
| 598 | } | 381 | } |
| 599 | break; | ||
| 600 | |||
| 601 | case OP_STWU: | ||
| 602 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 603 | kvmppc_get_gpr(vcpu, rs), 4, 1); | ||
| 604 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 605 | break; | ||
| 606 | |||
| 607 | case OP_STB: | ||
| 608 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 609 | kvmppc_get_gpr(vcpu, rs), 1, 1); | ||
| 610 | break; | ||
| 611 | |||
| 612 | case OP_STBU: | ||
| 613 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 614 | kvmppc_get_gpr(vcpu, rs), 1, 1); | ||
| 615 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 616 | break; | ||
| 617 | |||
| 618 | case OP_LHZ: | ||
| 619 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | ||
| 620 | break; | ||
| 621 | |||
| 622 | case OP_LHZU: | ||
| 623 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | ||
| 624 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 625 | break; | ||
| 626 | |||
| 627 | case OP_LHA: | ||
| 628 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | ||
| 629 | break; | ||
| 630 | |||
| 631 | case OP_LHAU: | ||
| 632 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | ||
| 633 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 634 | break; | ||
| 635 | |||
| 636 | case OP_STH: | ||
| 637 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 638 | kvmppc_get_gpr(vcpu, rs), 2, 1); | ||
| 639 | break; | ||
| 640 | |||
| 641 | case OP_STHU: | ||
| 642 | emulated = kvmppc_handle_store(run, vcpu, | ||
| 643 | kvmppc_get_gpr(vcpu, rs), 2, 1); | ||
| 644 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 645 | break; | ||
| 646 | |||
| 647 | #ifdef CONFIG_PPC_FPU | ||
| 648 | case OP_LFS: | ||
| 649 | if (kvmppc_check_fp_disabled(vcpu)) | ||
| 650 | return EMULATE_DONE; | ||
| 651 | vcpu->arch.mmio_sp64_extend = 1; | ||
| 652 | emulated = kvmppc_handle_load(run, vcpu, | ||
| 653 | KVM_MMIO_REG_FPR|rt, 4, 1); | ||
| 654 | break; | ||
| 655 | |||
| 656 | case OP_LFSU: | ||
| 657 | if (kvmppc_check_fp_disabled(vcpu)) | ||
| 658 | return EMULATE_DONE; | ||
| 659 | vcpu->arch.mmio_sp64_extend = 1; | ||
| 660 | emulated = kvmppc_handle_load(run, vcpu, | ||
| 661 | KVM_MMIO_REG_FPR|rt, 4, 1); | ||
| 662 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 663 | break; | ||
| 664 | |||
| 665 | case OP_LFD: | ||
| 666 | if (kvmppc_check_fp_disabled(vcpu)) | ||
| 667 | return EMULATE_DONE; | ||
| 668 | emulated = kvmppc_handle_load(run, vcpu, | ||
| 669 | KVM_MMIO_REG_FPR|rt, 8, 1); | ||
| 670 | break; | ||
| 671 | |||
| 672 | case OP_LFDU: | ||
| 673 | if (kvmppc_check_fp_disabled(vcpu)) | ||
| 674 | return EMULATE_DONE; | ||
| 675 | emulated = kvmppc_handle_load(run, vcpu, | ||
| 676 | KVM_MMIO_REG_FPR|rt, 8, 1); | ||
| 677 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | ||
| 678 | break; | ||
| 679 | #endif | ||
| 680 | |||
| 681 | default: | ||
| 682 | emulated = EMULATE_FAIL; | ||
| 683 | break; | ||
| 684 | } | 382 | } |
| 685 | 383 | ||
| 686 | if (emulated == EMULATE_FAIL) { | 384 | if (emulated == EMULATE_FAIL) { |
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 3764d000872e..0e8c20c5eaac 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
| @@ -648,9 +648,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
| 648 | #endif | 648 | #endif |
| 649 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 649 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
| 650 | case KVM_CAP_PPC_HTM: | 650 | case KVM_CAP_PPC_HTM: |
| 651 | r = hv_enabled && | 651 | r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) || |
| 652 | (!!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) || | 652 | (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)); |
| 653 | cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)); | ||
| 654 | break; | 653 | break; |
| 655 | #endif | 654 | #endif |
| 656 | default: | 655 | default: |
| @@ -907,6 +906,26 @@ static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu, | |||
| 907 | } | 906 | } |
| 908 | } | 907 | } |
| 909 | 908 | ||
| 909 | static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu, | ||
| 910 | u32 gpr) | ||
| 911 | { | ||
| 912 | union kvmppc_one_reg val; | ||
| 913 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; | ||
| 914 | |||
| 915 | if (vcpu->arch.mmio_vsx_tx_sx_enabled) { | ||
| 916 | val.vsx32val[0] = gpr; | ||
| 917 | val.vsx32val[1] = gpr; | ||
| 918 | val.vsx32val[2] = gpr; | ||
| 919 | val.vsx32val[3] = gpr; | ||
| 920 | VCPU_VSX_VR(vcpu, index) = val.vval; | ||
| 921 | } else { | ||
| 922 | val.vsx32val[0] = gpr; | ||
| 923 | val.vsx32val[1] = gpr; | ||
| 924 | VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0]; | ||
| 925 | VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0]; | ||
| 926 | } | ||
| 927 | } | ||
| 928 | |||
| 910 | static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, | 929 | static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, |
| 911 | u32 gpr32) | 930 | u32 gpr32) |
| 912 | { | 931 | { |
| @@ -933,30 +952,110 @@ static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, | |||
| 933 | #endif /* CONFIG_VSX */ | 952 | #endif /* CONFIG_VSX */ |
| 934 | 953 | ||
| 935 | #ifdef CONFIG_ALTIVEC | 954 | #ifdef CONFIG_ALTIVEC |
| 955 | static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu, | ||
| 956 | int index, int element_size) | ||
| 957 | { | ||
| 958 | int offset; | ||
| 959 | int elts = sizeof(vector128)/element_size; | ||
| 960 | |||
| 961 | if ((index < 0) || (index >= elts)) | ||
| 962 | return -1; | ||
| 963 | |||
| 964 | if (kvmppc_need_byteswap(vcpu)) | ||
| 965 | offset = elts - index - 1; | ||
| 966 | else | ||
| 967 | offset = index; | ||
| 968 | |||
| 969 | return offset; | ||
| 970 | } | ||
| 971 | |||
| 972 | static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu, | ||
| 973 | int index) | ||
| 974 | { | ||
| 975 | return kvmppc_get_vmx_offset_generic(vcpu, index, 8); | ||
| 976 | } | ||
| 977 | |||
| 978 | static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu, | ||
| 979 | int index) | ||
| 980 | { | ||
| 981 | return kvmppc_get_vmx_offset_generic(vcpu, index, 4); | ||
| 982 | } | ||
| 983 | |||
| 984 | static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu, | ||
| 985 | int index) | ||
| 986 | { | ||
| 987 | return kvmppc_get_vmx_offset_generic(vcpu, index, 2); | ||
| 988 | } | ||
| 989 | |||
| 990 | static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu, | ||
| 991 | int index) | ||
| 992 | { | ||
| 993 | return kvmppc_get_vmx_offset_generic(vcpu, index, 1); | ||
| 994 | } | ||
| 995 | |||
| 996 | |||
| 936 | static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu, | 997 | static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu, |
| 937 | u64 gpr) | 998 | u64 gpr) |
| 938 | { | 999 | { |
| 1000 | union kvmppc_one_reg val; | ||
| 1001 | int offset = kvmppc_get_vmx_dword_offset(vcpu, | ||
| 1002 | vcpu->arch.mmio_vmx_offset); | ||
| 939 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; | 1003 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
| 940 | u32 hi, lo; | ||
| 941 | u32 di; | ||
| 942 | 1004 | ||
| 943 | #ifdef __BIG_ENDIAN | 1005 | if (offset == -1) |
| 944 | hi = gpr >> 32; | 1006 | return; |
| 945 | lo = gpr & 0xffffffff; | 1007 | |
| 946 | #else | 1008 | val.vval = VCPU_VSX_VR(vcpu, index); |
| 947 | lo = gpr >> 32; | 1009 | val.vsxval[offset] = gpr; |
| 948 | hi = gpr & 0xffffffff; | 1010 | VCPU_VSX_VR(vcpu, index) = val.vval; |
| 949 | #endif | 1011 | } |
| 1012 | |||
| 1013 | static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu, | ||
| 1014 | u32 gpr32) | ||
| 1015 | { | ||
| 1016 | union kvmppc_one_reg val; | ||
| 1017 | int offset = kvmppc_get_vmx_word_offset(vcpu, | ||
| 1018 | vcpu->arch.mmio_vmx_offset); | ||
| 1019 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; | ||
| 950 | 1020 | ||
| 951 | di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */ | 1021 | if (offset == -1) |
| 952 | if (di > 1) | ||
| 953 | return; | 1022 | return; |
| 954 | 1023 | ||
| 955 | if (vcpu->arch.mmio_host_swabbed) | 1024 | val.vval = VCPU_VSX_VR(vcpu, index); |
| 956 | di = 1 - di; | 1025 | val.vsx32val[offset] = gpr32; |
| 1026 | VCPU_VSX_VR(vcpu, index) = val.vval; | ||
| 1027 | } | ||
| 1028 | |||
| 1029 | static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu, | ||
| 1030 | u16 gpr16) | ||
| 1031 | { | ||
| 1032 | union kvmppc_one_reg val; | ||
| 1033 | int offset = kvmppc_get_vmx_hword_offset(vcpu, | ||
| 1034 | vcpu->arch.mmio_vmx_offset); | ||
| 1035 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; | ||
| 1036 | |||
| 1037 | if (offset == -1) | ||
| 1038 | return; | ||
| 1039 | |||
| 1040 | val.vval = VCPU_VSX_VR(vcpu, index); | ||
| 1041 | val.vsx16val[offset] = gpr16; | ||
| 1042 | VCPU_VSX_VR(vcpu, index) = val.vval; | ||
| 1043 | } | ||
| 1044 | |||
| 1045 | static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu, | ||
| 1046 | u8 gpr8) | ||
| 1047 | { | ||
| 1048 | union kvmppc_one_reg val; | ||
| 1049 | int offset = kvmppc_get_vmx_byte_offset(vcpu, | ||
| 1050 | vcpu->arch.mmio_vmx_offset); | ||
| 1051 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; | ||
| 1052 | |||
| 1053 | if (offset == -1) | ||
| 1054 | return; | ||
| 957 | 1055 | ||
| 958 | VCPU_VSX_VR(vcpu, index).u[di * 2] = hi; | 1056 | val.vval = VCPU_VSX_VR(vcpu, index); |
| 959 | VCPU_VSX_VR(vcpu, index).u[di * 2 + 1] = lo; | 1057 | val.vsx8val[offset] = gpr8; |
| 1058 | VCPU_VSX_VR(vcpu, index) = val.vval; | ||
| 960 | } | 1059 | } |
| 961 | #endif /* CONFIG_ALTIVEC */ | 1060 | #endif /* CONFIG_ALTIVEC */ |
| 962 | 1061 | ||
| @@ -1041,6 +1140,9 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, | |||
| 1041 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); | 1140 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); |
| 1042 | break; | 1141 | break; |
| 1043 | case KVM_MMIO_REG_FPR: | 1142 | case KVM_MMIO_REG_FPR: |
| 1143 | if (vcpu->kvm->arch.kvm_ops->giveup_ext) | ||
| 1144 | vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP); | ||
| 1145 | |||
| 1044 | VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; | 1146 | VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; |
| 1045 | break; | 1147 | break; |
| 1046 | #ifdef CONFIG_PPC_BOOK3S | 1148 | #ifdef CONFIG_PPC_BOOK3S |
| @@ -1054,18 +1156,36 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, | |||
| 1054 | #endif | 1156 | #endif |
| 1055 | #ifdef CONFIG_VSX | 1157 | #ifdef CONFIG_VSX |
| 1056 | case KVM_MMIO_REG_VSX: | 1158 | case KVM_MMIO_REG_VSX: |
| 1057 | if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_DWORD) | 1159 | if (vcpu->kvm->arch.kvm_ops->giveup_ext) |
| 1160 | vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX); | ||
| 1161 | |||
| 1162 | if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD) | ||
| 1058 | kvmppc_set_vsr_dword(vcpu, gpr); | 1163 | kvmppc_set_vsr_dword(vcpu, gpr); |
| 1059 | else if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_WORD) | 1164 | else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD) |
| 1060 | kvmppc_set_vsr_word(vcpu, gpr); | 1165 | kvmppc_set_vsr_word(vcpu, gpr); |
| 1061 | else if (vcpu->arch.mmio_vsx_copy_type == | 1166 | else if (vcpu->arch.mmio_copy_type == |
| 1062 | KVMPPC_VSX_COPY_DWORD_LOAD_DUMP) | 1167 | KVMPPC_VSX_COPY_DWORD_LOAD_DUMP) |
| 1063 | kvmppc_set_vsr_dword_dump(vcpu, gpr); | 1168 | kvmppc_set_vsr_dword_dump(vcpu, gpr); |
| 1169 | else if (vcpu->arch.mmio_copy_type == | ||
| 1170 | KVMPPC_VSX_COPY_WORD_LOAD_DUMP) | ||
| 1171 | kvmppc_set_vsr_word_dump(vcpu, gpr); | ||
| 1064 | break; | 1172 | break; |
| 1065 | #endif | 1173 | #endif |
| 1066 | #ifdef CONFIG_ALTIVEC | 1174 | #ifdef CONFIG_ALTIVEC |
| 1067 | case KVM_MMIO_REG_VMX: | 1175 | case KVM_MMIO_REG_VMX: |
| 1068 | kvmppc_set_vmx_dword(vcpu, gpr); | 1176 | if (vcpu->kvm->arch.kvm_ops->giveup_ext) |
| 1177 | vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC); | ||
| 1178 | |||
| 1179 | if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD) | ||
| 1180 | kvmppc_set_vmx_dword(vcpu, gpr); | ||
| 1181 | else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD) | ||
| 1182 | kvmppc_set_vmx_word(vcpu, gpr); | ||
| 1183 | else if (vcpu->arch.mmio_copy_type == | ||
| 1184 | KVMPPC_VMX_COPY_HWORD) | ||
| 1185 | kvmppc_set_vmx_hword(vcpu, gpr); | ||
| 1186 | else if (vcpu->arch.mmio_copy_type == | ||
| 1187 | KVMPPC_VMX_COPY_BYTE) | ||
| 1188 | kvmppc_set_vmx_byte(vcpu, gpr); | ||
| 1069 | break; | 1189 | break; |
| 1070 | #endif | 1190 | #endif |
| 1071 | default: | 1191 | default: |
| @@ -1228,7 +1348,7 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) | |||
| 1228 | u32 dword_offset, word_offset; | 1348 | u32 dword_offset, word_offset; |
| 1229 | union kvmppc_one_reg reg; | 1349 | union kvmppc_one_reg reg; |
| 1230 | int vsx_offset = 0; | 1350 | int vsx_offset = 0; |
| 1231 | int copy_type = vcpu->arch.mmio_vsx_copy_type; | 1351 | int copy_type = vcpu->arch.mmio_copy_type; |
| 1232 | int result = 0; | 1352 | int result = 0; |
| 1233 | 1353 | ||
| 1234 | switch (copy_type) { | 1354 | switch (copy_type) { |
| @@ -1344,14 +1464,16 @@ static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu, | |||
| 1344 | #endif /* CONFIG_VSX */ | 1464 | #endif /* CONFIG_VSX */ |
| 1345 | 1465 | ||
| 1346 | #ifdef CONFIG_ALTIVEC | 1466 | #ifdef CONFIG_ALTIVEC |
| 1347 | /* handle quadword load access in two halves */ | 1467 | int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 1348 | int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu, | 1468 | unsigned int rt, unsigned int bytes, int is_default_endian) |
| 1349 | unsigned int rt, int is_default_endian) | ||
| 1350 | { | 1469 | { |
| 1351 | enum emulation_result emulated = EMULATE_DONE; | 1470 | enum emulation_result emulated = EMULATE_DONE; |
| 1352 | 1471 | ||
| 1472 | if (vcpu->arch.mmio_vsx_copy_nums > 2) | ||
| 1473 | return EMULATE_FAIL; | ||
| 1474 | |||
| 1353 | while (vcpu->arch.mmio_vmx_copy_nums) { | 1475 | while (vcpu->arch.mmio_vmx_copy_nums) { |
| 1354 | emulated = __kvmppc_handle_load(run, vcpu, rt, 8, | 1476 | emulated = __kvmppc_handle_load(run, vcpu, rt, bytes, |
| 1355 | is_default_endian, 0); | 1477 | is_default_endian, 0); |
| 1356 | 1478 | ||
| 1357 | if (emulated != EMULATE_DONE) | 1479 | if (emulated != EMULATE_DONE) |
| @@ -1359,55 +1481,127 @@ int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 1359 | 1481 | ||
| 1360 | vcpu->arch.paddr_accessed += run->mmio.len; | 1482 | vcpu->arch.paddr_accessed += run->mmio.len; |
| 1361 | vcpu->arch.mmio_vmx_copy_nums--; | 1483 | vcpu->arch.mmio_vmx_copy_nums--; |
| 1484 | vcpu->arch.mmio_vmx_offset++; | ||
| 1362 | } | 1485 | } |
| 1363 | 1486 | ||
| 1364 | return emulated; | 1487 | return emulated; |
| 1365 | } | 1488 | } |
| 1366 | 1489 | ||
| 1367 | static inline int kvmppc_get_vmx_data(struct kvm_vcpu *vcpu, int rs, u64 *val) | 1490 | int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val) |
| 1368 | { | 1491 | { |
| 1369 | vector128 vrs = VCPU_VSX_VR(vcpu, rs); | 1492 | union kvmppc_one_reg reg; |
| 1370 | u32 di; | 1493 | int vmx_offset = 0; |
| 1371 | u64 w0, w1; | 1494 | int result = 0; |
| 1372 | 1495 | ||
| 1373 | di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */ | 1496 | vmx_offset = |
| 1374 | if (di > 1) | 1497 | kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset); |
| 1498 | |||
| 1499 | if (vmx_offset == -1) | ||
| 1375 | return -1; | 1500 | return -1; |
| 1376 | 1501 | ||
| 1377 | if (vcpu->arch.mmio_host_swabbed) | 1502 | reg.vval = VCPU_VSX_VR(vcpu, index); |
| 1378 | di = 1 - di; | 1503 | *val = reg.vsxval[vmx_offset]; |
| 1379 | 1504 | ||
| 1380 | w0 = vrs.u[di * 2]; | 1505 | return result; |
| 1381 | w1 = vrs.u[di * 2 + 1]; | 1506 | } |
| 1382 | 1507 | ||
| 1383 | #ifdef __BIG_ENDIAN | 1508 | int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val) |
| 1384 | *val = (w0 << 32) | w1; | 1509 | { |
| 1385 | #else | 1510 | union kvmppc_one_reg reg; |
| 1386 | *val = (w1 << 32) | w0; | 1511 | int vmx_offset = 0; |
| 1387 | #endif | 1512 | int result = 0; |
| 1388 | return 0; | 1513 | |
| 1514 | vmx_offset = | ||
| 1515 | kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset); | ||
| 1516 | |||
| 1517 | if (vmx_offset == -1) | ||
| 1518 | return -1; | ||
| 1519 | |||
| 1520 | reg.vval = VCPU_VSX_VR(vcpu, index); | ||
| 1521 | *val = reg.vsx32val[vmx_offset]; | ||
| 1522 | |||
| 1523 | return result; | ||
| 1524 | } | ||
| 1525 | |||
| 1526 | int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val) | ||
| 1527 | { | ||
| 1528 | union kvmppc_one_reg reg; | ||
| 1529 | int vmx_offset = 0; | ||
| 1530 | int result = 0; | ||
| 1531 | |||
| 1532 | vmx_offset = | ||
| 1533 | kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset); | ||
| 1534 | |||
| 1535 | if (vmx_offset == -1) | ||
| 1536 | return -1; | ||
| 1537 | |||
| 1538 | reg.vval = VCPU_VSX_VR(vcpu, index); | ||
| 1539 | *val = reg.vsx16val[vmx_offset]; | ||
| 1540 | |||
| 1541 | return result; | ||
| 1389 | } | 1542 | } |
| 1390 | 1543 | ||
| 1391 | /* handle quadword store in two halves */ | 1544 | int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val) |
| 1392 | int kvmppc_handle_store128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu, | 1545 | { |
| 1393 | unsigned int rs, int is_default_endian) | 1546 | union kvmppc_one_reg reg; |
| 1547 | int vmx_offset = 0; | ||
| 1548 | int result = 0; | ||
| 1549 | |||
| 1550 | vmx_offset = | ||
| 1551 | kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset); | ||
| 1552 | |||
| 1553 | if (vmx_offset == -1) | ||
| 1554 | return -1; | ||
| 1555 | |||
| 1556 | reg.vval = VCPU_VSX_VR(vcpu, index); | ||
| 1557 | *val = reg.vsx8val[vmx_offset]; | ||
| 1558 | |||
| 1559 | return result; | ||
| 1560 | } | ||
| 1561 | |||
| 1562 | int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | ||
| 1563 | unsigned int rs, unsigned int bytes, int is_default_endian) | ||
| 1394 | { | 1564 | { |
| 1395 | u64 val = 0; | 1565 | u64 val = 0; |
| 1566 | unsigned int index = rs & KVM_MMIO_REG_MASK; | ||
| 1396 | enum emulation_result emulated = EMULATE_DONE; | 1567 | enum emulation_result emulated = EMULATE_DONE; |
| 1397 | 1568 | ||
| 1569 | if (vcpu->arch.mmio_vsx_copy_nums > 2) | ||
| 1570 | return EMULATE_FAIL; | ||
| 1571 | |||
| 1398 | vcpu->arch.io_gpr = rs; | 1572 | vcpu->arch.io_gpr = rs; |
| 1399 | 1573 | ||
| 1400 | while (vcpu->arch.mmio_vmx_copy_nums) { | 1574 | while (vcpu->arch.mmio_vmx_copy_nums) { |
| 1401 | if (kvmppc_get_vmx_data(vcpu, rs, &val) == -1) | 1575 | switch (vcpu->arch.mmio_copy_type) { |
| 1576 | case KVMPPC_VMX_COPY_DWORD: | ||
| 1577 | if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1) | ||
| 1578 | return EMULATE_FAIL; | ||
| 1579 | |||
| 1580 | break; | ||
| 1581 | case KVMPPC_VMX_COPY_WORD: | ||
| 1582 | if (kvmppc_get_vmx_word(vcpu, index, &val) == -1) | ||
| 1583 | return EMULATE_FAIL; | ||
| 1584 | break; | ||
| 1585 | case KVMPPC_VMX_COPY_HWORD: | ||
| 1586 | if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1) | ||
| 1587 | return EMULATE_FAIL; | ||
| 1588 | break; | ||
| 1589 | case KVMPPC_VMX_COPY_BYTE: | ||
| 1590 | if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1) | ||
| 1591 | return EMULATE_FAIL; | ||
| 1592 | break; | ||
| 1593 | default: | ||
| 1402 | return EMULATE_FAIL; | 1594 | return EMULATE_FAIL; |
| 1595 | } | ||
| 1403 | 1596 | ||
| 1404 | emulated = kvmppc_handle_store(run, vcpu, val, 8, | 1597 | emulated = kvmppc_handle_store(run, vcpu, val, bytes, |
| 1405 | is_default_endian); | 1598 | is_default_endian); |
| 1406 | if (emulated != EMULATE_DONE) | 1599 | if (emulated != EMULATE_DONE) |
| 1407 | break; | 1600 | break; |
| 1408 | 1601 | ||
| 1409 | vcpu->arch.paddr_accessed += run->mmio.len; | 1602 | vcpu->arch.paddr_accessed += run->mmio.len; |
| 1410 | vcpu->arch.mmio_vmx_copy_nums--; | 1603 | vcpu->arch.mmio_vmx_copy_nums--; |
| 1604 | vcpu->arch.mmio_vmx_offset++; | ||
| 1411 | } | 1605 | } |
| 1412 | 1606 | ||
| 1413 | return emulated; | 1607 | return emulated; |
| @@ -1422,11 +1616,11 @@ static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu, | |||
| 1422 | vcpu->arch.paddr_accessed += run->mmio.len; | 1616 | vcpu->arch.paddr_accessed += run->mmio.len; |
| 1423 | 1617 | ||
| 1424 | if (!vcpu->mmio_is_write) { | 1618 | if (!vcpu->mmio_is_write) { |
| 1425 | emulated = kvmppc_handle_load128_by2x64(run, vcpu, | 1619 | emulated = kvmppc_handle_vmx_load(run, vcpu, |
| 1426 | vcpu->arch.io_gpr, 1); | 1620 | vcpu->arch.io_gpr, run->mmio.len, 1); |
| 1427 | } else { | 1621 | } else { |
| 1428 | emulated = kvmppc_handle_store128_by2x64(run, vcpu, | 1622 | emulated = kvmppc_handle_vmx_store(run, vcpu, |
| 1429 | vcpu->arch.io_gpr, 1); | 1623 | vcpu->arch.io_gpr, run->mmio.len, 1); |
| 1430 | } | 1624 | } |
| 1431 | 1625 | ||
| 1432 | switch (emulated) { | 1626 | switch (emulated) { |
| @@ -1570,8 +1764,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
| 1570 | } | 1764 | } |
| 1571 | #endif | 1765 | #endif |
| 1572 | #ifdef CONFIG_ALTIVEC | 1766 | #ifdef CONFIG_ALTIVEC |
| 1573 | if (vcpu->arch.mmio_vmx_copy_nums > 0) | 1767 | if (vcpu->arch.mmio_vmx_copy_nums > 0) { |
| 1574 | vcpu->arch.mmio_vmx_copy_nums--; | 1768 | vcpu->arch.mmio_vmx_copy_nums--; |
| 1769 | vcpu->arch.mmio_vmx_offset++; | ||
| 1770 | } | ||
| 1575 | 1771 | ||
| 1576 | if (vcpu->arch.mmio_vmx_copy_nums > 0) { | 1772 | if (vcpu->arch.mmio_vmx_copy_nums > 0) { |
| 1577 | r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run); | 1773 | r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run); |
| @@ -1784,16 +1980,16 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
| 1784 | void __user *argp = (void __user *)arg; | 1980 | void __user *argp = (void __user *)arg; |
| 1785 | long r; | 1981 | long r; |
| 1786 | 1982 | ||
| 1787 | vcpu_load(vcpu); | ||
| 1788 | |||
| 1789 | switch (ioctl) { | 1983 | switch (ioctl) { |
| 1790 | case KVM_ENABLE_CAP: | 1984 | case KVM_ENABLE_CAP: |
| 1791 | { | 1985 | { |
| 1792 | struct kvm_enable_cap cap; | 1986 | struct kvm_enable_cap cap; |
| 1793 | r = -EFAULT; | 1987 | r = -EFAULT; |
| 1988 | vcpu_load(vcpu); | ||
| 1794 | if (copy_from_user(&cap, argp, sizeof(cap))) | 1989 | if (copy_from_user(&cap, argp, sizeof(cap))) |
| 1795 | goto out; | 1990 | goto out; |
| 1796 | r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); | 1991 | r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); |
| 1992 | vcpu_put(vcpu); | ||
| 1797 | break; | 1993 | break; |
| 1798 | } | 1994 | } |
| 1799 | 1995 | ||
| @@ -1815,9 +2011,11 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
| 1815 | case KVM_DIRTY_TLB: { | 2011 | case KVM_DIRTY_TLB: { |
| 1816 | struct kvm_dirty_tlb dirty; | 2012 | struct kvm_dirty_tlb dirty; |
| 1817 | r = -EFAULT; | 2013 | r = -EFAULT; |
| 2014 | vcpu_load(vcpu); | ||
| 1818 | if (copy_from_user(&dirty, argp, sizeof(dirty))) | 2015 | if (copy_from_user(&dirty, argp, sizeof(dirty))) |
| 1819 | goto out; | 2016 | goto out; |
| 1820 | r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); | 2017 | r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); |
| 2018 | vcpu_put(vcpu); | ||
| 1821 | break; | 2019 | break; |
| 1822 | } | 2020 | } |
| 1823 | #endif | 2021 | #endif |
| @@ -1826,7 +2024,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
| 1826 | } | 2024 | } |
| 1827 | 2025 | ||
| 1828 | out: | 2026 | out: |
| 1829 | vcpu_put(vcpu); | ||
| 1830 | return r; | 2027 | return r; |
| 1831 | } | 2028 | } |
| 1832 | 2029 | ||
diff --git a/arch/powerpc/kvm/tm.S b/arch/powerpc/kvm/tm.S new file mode 100644 index 000000000000..90e330f21356 --- /dev/null +++ b/arch/powerpc/kvm/tm.S | |||
| @@ -0,0 +1,384 @@ | |||
| 1 | /* | ||
| 2 | * This program is free software; you can redistribute it and/or modify | ||
| 3 | * it under the terms of the GNU General Public License, version 2, as | ||
| 4 | * published by the Free Software Foundation. | ||
| 5 | * | ||
| 6 | * This program is distributed in the hope that it will be useful, | ||
| 7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 9 | * GNU General Public License for more details. | ||
| 10 | * | ||
| 11 | * Derived from book3s_hv_rmhandlers.S, which is: | ||
| 12 | * | ||
| 13 | * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | ||
| 14 | * | ||
| 15 | */ | ||
| 16 | |||
| 17 | #include <asm/reg.h> | ||
| 18 | #include <asm/ppc_asm.h> | ||
| 19 | #include <asm/asm-offsets.h> | ||
| 20 | #include <asm/export.h> | ||
| 21 | #include <asm/tm.h> | ||
| 22 | #include <asm/cputable.h> | ||
| 23 | |||
| 24 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 25 | #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) | ||
| 26 | |||
| 27 | /* | ||
| 28 | * Save transactional state and TM-related registers. | ||
| 29 | * Called with: | ||
| 30 | * - r3 pointing to the vcpu struct | ||
| 31 | * - r4 points to the MSR with current TS bits: | ||
| 32 | * (For HV KVM, it is VCPU_MSR ; For PR KVM, it is host MSR). | ||
| 33 | * This can modify all checkpointed registers, but | ||
| 34 | * restores r1, r2 before exit. | ||
| 35 | */ | ||
| 36 | _GLOBAL(__kvmppc_save_tm) | ||
| 37 | mflr r0 | ||
| 38 | std r0, PPC_LR_STKOFF(r1) | ||
| 39 | |||
| 40 | /* Turn on TM. */ | ||
| 41 | mfmsr r8 | ||
| 42 | li r0, 1 | ||
| 43 | rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG | ||
| 44 | ori r8, r8, MSR_FP | ||
| 45 | oris r8, r8, (MSR_VEC | MSR_VSX)@h | ||
| 46 | mtmsrd r8 | ||
| 47 | |||
| 48 | rldicl. r4, r4, 64 - MSR_TS_S_LG, 62 | ||
| 49 | beq 1f /* TM not active in guest. */ | ||
| 50 | |||
| 51 | std r1, HSTATE_SCRATCH2(r13) | ||
| 52 | std r3, HSTATE_SCRATCH1(r13) | ||
| 53 | |||
| 54 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | ||
| 55 | BEGIN_FTR_SECTION | ||
| 56 | /* Emulation of the treclaim instruction needs TEXASR before treclaim */ | ||
| 57 | mfspr r6, SPRN_TEXASR | ||
| 58 | std r6, VCPU_ORIG_TEXASR(r3) | ||
| 59 | END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) | ||
| 60 | #endif | ||
| 61 | |||
| 62 | /* Clear the MSR RI since r1, r13 are all going to be foobar. */ | ||
| 63 | li r5, 0 | ||
| 64 | mtmsrd r5, 1 | ||
| 65 | |||
| 66 | li r3, TM_CAUSE_KVM_RESCHED | ||
| 67 | |||
| 68 | /* All GPRs are volatile at this point. */ | ||
| 69 | TRECLAIM(R3) | ||
| 70 | |||
| 71 | /* Temporarily store r13 and r9 so we have some regs to play with */ | ||
| 72 | SET_SCRATCH0(r13) | ||
| 73 | GET_PACA(r13) | ||
| 74 | std r9, PACATMSCRATCH(r13) | ||
| 75 | ld r9, HSTATE_SCRATCH1(r13) | ||
| 76 | |||
| 77 | /* Get a few more GPRs free. */ | ||
| 78 | std r29, VCPU_GPRS_TM(29)(r9) | ||
| 79 | std r30, VCPU_GPRS_TM(30)(r9) | ||
| 80 | std r31, VCPU_GPRS_TM(31)(r9) | ||
| 81 | |||
| 82 | /* Save away PPR and DSCR soon so don't run with user values. */ | ||
| 83 | mfspr r31, SPRN_PPR | ||
| 84 | HMT_MEDIUM | ||
| 85 | mfspr r30, SPRN_DSCR | ||
| 86 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | ||
| 87 | ld r29, HSTATE_DSCR(r13) | ||
| 88 | mtspr SPRN_DSCR, r29 | ||
| 89 | #endif | ||
| 90 | |||
| 91 | /* Save all but r9, r13 & r29-r31 */ | ||
| 92 | reg = 0 | ||
| 93 | .rept 29 | ||
| 94 | .if (reg != 9) && (reg != 13) | ||
| 95 | std reg, VCPU_GPRS_TM(reg)(r9) | ||
| 96 | .endif | ||
| 97 | reg = reg + 1 | ||
| 98 | .endr | ||
| 99 | /* ... now save r13 */ | ||
| 100 | GET_SCRATCH0(r4) | ||
| 101 | std r4, VCPU_GPRS_TM(13)(r9) | ||
| 102 | /* ... and save r9 */ | ||
| 103 | ld r4, PACATMSCRATCH(r13) | ||
| 104 | std r4, VCPU_GPRS_TM(9)(r9) | ||
| 105 | |||
| 106 | /* Reload stack pointer and TOC. */ | ||
| 107 | ld r1, HSTATE_SCRATCH2(r13) | ||
| 108 | ld r2, PACATOC(r13) | ||
| 109 | |||
| 110 | /* Set MSR RI now we have r1 and r13 back. */ | ||
| 111 | li r5, MSR_RI | ||
| 112 | mtmsrd r5, 1 | ||
| 113 | |||
| 114 | /* Save away checkpinted SPRs. */ | ||
| 115 | std r31, VCPU_PPR_TM(r9) | ||
| 116 | std r30, VCPU_DSCR_TM(r9) | ||
| 117 | mflr r5 | ||
| 118 | mfcr r6 | ||
| 119 | mfctr r7 | ||
| 120 | mfspr r8, SPRN_AMR | ||
| 121 | mfspr r10, SPRN_TAR | ||
| 122 | mfxer r11 | ||
| 123 | std r5, VCPU_LR_TM(r9) | ||
| 124 | stw r6, VCPU_CR_TM(r9) | ||
| 125 | std r7, VCPU_CTR_TM(r9) | ||
| 126 | std r8, VCPU_AMR_TM(r9) | ||
| 127 | std r10, VCPU_TAR_TM(r9) | ||
| 128 | std r11, VCPU_XER_TM(r9) | ||
| 129 | |||
| 130 | /* Restore r12 as trap number. */ | ||
| 131 | lwz r12, VCPU_TRAP(r9) | ||
| 132 | |||
| 133 | /* Save FP/VSX. */ | ||
| 134 | addi r3, r9, VCPU_FPRS_TM | ||
| 135 | bl store_fp_state | ||
| 136 | addi r3, r9, VCPU_VRS_TM | ||
| 137 | bl store_vr_state | ||
| 138 | mfspr r6, SPRN_VRSAVE | ||
| 139 | stw r6, VCPU_VRSAVE_TM(r9) | ||
| 140 | 1: | ||
| 141 | /* | ||
| 142 | * We need to save these SPRs after the treclaim so that the software | ||
| 143 | * error code is recorded correctly in the TEXASR. Also the user may | ||
| 144 | * change these outside of a transaction, so they must always be | ||
| 145 | * context switched. | ||
| 146 | */ | ||
| 147 | mfspr r7, SPRN_TEXASR | ||
| 148 | std r7, VCPU_TEXASR(r9) | ||
| 149 | 11: | ||
| 150 | mfspr r5, SPRN_TFHAR | ||
| 151 | mfspr r6, SPRN_TFIAR | ||
| 152 | std r5, VCPU_TFHAR(r9) | ||
| 153 | std r6, VCPU_TFIAR(r9) | ||
| 154 | |||
| 155 | ld r0, PPC_LR_STKOFF(r1) | ||
| 156 | mtlr r0 | ||
| 157 | blr | ||
| 158 | |||
| 159 | /* | ||
| 160 | * _kvmppc_save_tm_pr() is a wrapper around __kvmppc_save_tm(), so that it can | ||
| 161 | * be invoked from C function by PR KVM only. | ||
| 162 | */ | ||
| 163 | _GLOBAL(_kvmppc_save_tm_pr) | ||
| 164 | mflr r5 | ||
| 165 | std r5, PPC_LR_STKOFF(r1) | ||
| 166 | stdu r1, -SWITCH_FRAME_SIZE(r1) | ||
| 167 | SAVE_NVGPRS(r1) | ||
| 168 | |||
| 169 | /* save MSR since TM/math bits might be impacted | ||
| 170 | * by __kvmppc_save_tm(). | ||
| 171 | */ | ||
| 172 | mfmsr r5 | ||
| 173 | SAVE_GPR(5, r1) | ||
| 174 | |||
| 175 | /* also save DSCR/CR/TAR so that it can be recovered later */ | ||
| 176 | mfspr r6, SPRN_DSCR | ||
| 177 | SAVE_GPR(6, r1) | ||
| 178 | |||
| 179 | mfcr r7 | ||
| 180 | stw r7, _CCR(r1) | ||
| 181 | |||
| 182 | mfspr r8, SPRN_TAR | ||
| 183 | SAVE_GPR(8, r1) | ||
| 184 | |||
| 185 | bl __kvmppc_save_tm | ||
| 186 | |||
| 187 | REST_GPR(8, r1) | ||
| 188 | mtspr SPRN_TAR, r8 | ||
| 189 | |||
| 190 | ld r7, _CCR(r1) | ||
| 191 | mtcr r7 | ||
| 192 | |||
| 193 | REST_GPR(6, r1) | ||
| 194 | mtspr SPRN_DSCR, r6 | ||
| 195 | |||
| 196 | /* need preserve current MSR's MSR_TS bits */ | ||
| 197 | REST_GPR(5, r1) | ||
| 198 | mfmsr r6 | ||
| 199 | rldicl r6, r6, 64 - MSR_TS_S_LG, 62 | ||
| 200 | rldimi r5, r6, MSR_TS_S_LG, 63 - MSR_TS_T_LG | ||
| 201 | mtmsrd r5 | ||
| 202 | |||
| 203 | REST_NVGPRS(r1) | ||
| 204 | addi r1, r1, SWITCH_FRAME_SIZE | ||
| 205 | ld r5, PPC_LR_STKOFF(r1) | ||
| 206 | mtlr r5 | ||
| 207 | blr | ||
| 208 | |||
| 209 | EXPORT_SYMBOL_GPL(_kvmppc_save_tm_pr); | ||
| 210 | |||
| 211 | /* | ||
| 212 | * Restore transactional state and TM-related registers. | ||
| 213 | * Called with: | ||
| 214 | * - r3 pointing to the vcpu struct. | ||
| 215 | * - r4 is the guest MSR with desired TS bits: | ||
| 216 | * For HV KVM, it is VCPU_MSR | ||
| 217 | * For PR KVM, it is provided by caller | ||
| 218 | * This potentially modifies all checkpointed registers. | ||
| 219 | * It restores r1, r2 from the PACA. | ||
| 220 | */ | ||
| 221 | _GLOBAL(__kvmppc_restore_tm) | ||
| 222 | mflr r0 | ||
| 223 | std r0, PPC_LR_STKOFF(r1) | ||
| 224 | |||
| 225 | /* Turn on TM/FP/VSX/VMX so we can restore them. */ | ||
| 226 | mfmsr r5 | ||
| 227 | li r6, MSR_TM >> 32 | ||
| 228 | sldi r6, r6, 32 | ||
| 229 | or r5, r5, r6 | ||
| 230 | ori r5, r5, MSR_FP | ||
| 231 | oris r5, r5, (MSR_VEC | MSR_VSX)@h | ||
| 232 | mtmsrd r5 | ||
| 233 | |||
| 234 | /* | ||
| 235 | * The user may change these outside of a transaction, so they must | ||
| 236 | * always be context switched. | ||
| 237 | */ | ||
| 238 | ld r5, VCPU_TFHAR(r3) | ||
| 239 | ld r6, VCPU_TFIAR(r3) | ||
| 240 | ld r7, VCPU_TEXASR(r3) | ||
| 241 | mtspr SPRN_TFHAR, r5 | ||
| 242 | mtspr SPRN_TFIAR, r6 | ||
| 243 | mtspr SPRN_TEXASR, r7 | ||
| 244 | |||
| 245 | mr r5, r4 | ||
| 246 | rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 | ||
| 247 | beqlr /* TM not active in guest */ | ||
| 248 | std r1, HSTATE_SCRATCH2(r13) | ||
| 249 | |||
| 250 | /* Make sure the failure summary is set, otherwise we'll program check | ||
| 251 | * when we trechkpt. It's possible that this might have been not set | ||
| 252 | * on a kvmppc_set_one_reg() call but we shouldn't let this crash the | ||
| 253 | * host. | ||
| 254 | */ | ||
| 255 | oris r7, r7, (TEXASR_FS)@h | ||
| 256 | mtspr SPRN_TEXASR, r7 | ||
| 257 | |||
| 258 | /* | ||
| 259 | * We need to load up the checkpointed state for the guest. | ||
| 260 | * We need to do this early as it will blow away any GPRs, VSRs and | ||
| 261 | * some SPRs. | ||
| 262 | */ | ||
| 263 | |||
| 264 | mr r31, r3 | ||
| 265 | addi r3, r31, VCPU_FPRS_TM | ||
| 266 | bl load_fp_state | ||
| 267 | addi r3, r31, VCPU_VRS_TM | ||
| 268 | bl load_vr_state | ||
| 269 | mr r3, r31 | ||
| 270 | lwz r7, VCPU_VRSAVE_TM(r3) | ||
| 271 | mtspr SPRN_VRSAVE, r7 | ||
| 272 | |||
| 273 | ld r5, VCPU_LR_TM(r3) | ||
| 274 | lwz r6, VCPU_CR_TM(r3) | ||
| 275 | ld r7, VCPU_CTR_TM(r3) | ||
| 276 | ld r8, VCPU_AMR_TM(r3) | ||
| 277 | ld r9, VCPU_TAR_TM(r3) | ||
| 278 | ld r10, VCPU_XER_TM(r3) | ||
| 279 | mtlr r5 | ||
| 280 | mtcr r6 | ||
| 281 | mtctr r7 | ||
| 282 | mtspr SPRN_AMR, r8 | ||
| 283 | mtspr SPRN_TAR, r9 | ||
| 284 | mtxer r10 | ||
| 285 | |||
| 286 | /* | ||
| 287 | * Load up PPR and DSCR values but don't put them in the actual SPRs | ||
| 288 | * till the last moment to avoid running with userspace PPR and DSCR for | ||
| 289 | * too long. | ||
| 290 | */ | ||
| 291 | ld r29, VCPU_DSCR_TM(r3) | ||
| 292 | ld r30, VCPU_PPR_TM(r3) | ||
| 293 | |||
| 294 | std r2, PACATMSCRATCH(r13) /* Save TOC */ | ||
| 295 | |||
| 296 | /* Clear the MSR RI since r1, r13 are all going to be foobar. */ | ||
| 297 | li r5, 0 | ||
| 298 | mtmsrd r5, 1 | ||
| 299 | |||
| 300 | /* Load GPRs r0-r28 */ | ||
| 301 | reg = 0 | ||
| 302 | .rept 29 | ||
| 303 | ld reg, VCPU_GPRS_TM(reg)(r31) | ||
| 304 | reg = reg + 1 | ||
| 305 | .endr | ||
| 306 | |||
| 307 | mtspr SPRN_DSCR, r29 | ||
| 308 | mtspr SPRN_PPR, r30 | ||
| 309 | |||
| 310 | /* Load final GPRs */ | ||
| 311 | ld 29, VCPU_GPRS_TM(29)(r31) | ||
| 312 | ld 30, VCPU_GPRS_TM(30)(r31) | ||
| 313 | ld 31, VCPU_GPRS_TM(31)(r31) | ||
| 314 | |||
| 315 | /* TM checkpointed state is now setup. All GPRs are now volatile. */ | ||
| 316 | TRECHKPT | ||
| 317 | |||
| 318 | /* Now let's get back the state we need. */ | ||
| 319 | HMT_MEDIUM | ||
| 320 | GET_PACA(r13) | ||
| 321 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | ||
| 322 | ld r29, HSTATE_DSCR(r13) | ||
| 323 | mtspr SPRN_DSCR, r29 | ||
| 324 | #endif | ||
| 325 | ld r1, HSTATE_SCRATCH2(r13) | ||
| 326 | ld r2, PACATMSCRATCH(r13) | ||
| 327 | |||
| 328 | /* Set the MSR RI since we have our registers back. */ | ||
| 329 | li r5, MSR_RI | ||
| 330 | mtmsrd r5, 1 | ||
| 331 | ld r0, PPC_LR_STKOFF(r1) | ||
| 332 | mtlr r0 | ||
| 333 | blr | ||
| 334 | |||
| 335 | /* | ||
| 336 | * _kvmppc_restore_tm_pr() is a wrapper around __kvmppc_restore_tm(), so that it | ||
| 337 | * can be invoked from C function by PR KVM only. | ||
| 338 | */ | ||
| 339 | _GLOBAL(_kvmppc_restore_tm_pr) | ||
| 340 | mflr r5 | ||
| 341 | std r5, PPC_LR_STKOFF(r1) | ||
| 342 | stdu r1, -SWITCH_FRAME_SIZE(r1) | ||
| 343 | SAVE_NVGPRS(r1) | ||
| 344 | |||
| 345 | /* save MSR to avoid TM/math bits change */ | ||
| 346 | mfmsr r5 | ||
| 347 | SAVE_GPR(5, r1) | ||
| 348 | |||
| 349 | /* also save DSCR/CR/TAR so that it can be recovered later */ | ||
| 350 | mfspr r6, SPRN_DSCR | ||
| 351 | SAVE_GPR(6, r1) | ||
| 352 | |||
| 353 | mfcr r7 | ||
| 354 | stw r7, _CCR(r1) | ||
| 355 | |||
| 356 | mfspr r8, SPRN_TAR | ||
| 357 | SAVE_GPR(8, r1) | ||
| 358 | |||
| 359 | bl __kvmppc_restore_tm | ||
| 360 | |||
| 361 | REST_GPR(8, r1) | ||
| 362 | mtspr SPRN_TAR, r8 | ||
| 363 | |||
| 364 | ld r7, _CCR(r1) | ||
| 365 | mtcr r7 | ||
| 366 | |||
| 367 | REST_GPR(6, r1) | ||
| 368 | mtspr SPRN_DSCR, r6 | ||
| 369 | |||
| 370 | /* need preserve current MSR's MSR_TS bits */ | ||
| 371 | REST_GPR(5, r1) | ||
| 372 | mfmsr r6 | ||
| 373 | rldicl r6, r6, 64 - MSR_TS_S_LG, 62 | ||
| 374 | rldimi r5, r6, MSR_TS_S_LG, 63 - MSR_TS_T_LG | ||
| 375 | mtmsrd r5 | ||
| 376 | |||
| 377 | REST_NVGPRS(r1) | ||
| 378 | addi r1, r1, SWITCH_FRAME_SIZE | ||
| 379 | ld r5, PPC_LR_STKOFF(r1) | ||
| 380 | mtlr r5 | ||
| 381 | blr | ||
| 382 | |||
| 383 | EXPORT_SYMBOL_GPL(_kvmppc_restore_tm_pr); | ||
| 384 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index d0dd35d582da..559a12b6184d 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -4429,16 +4429,14 @@ static int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) | |||
| 4429 | goto out_vmcs; | 4429 | goto out_vmcs; |
| 4430 | memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE); | 4430 | memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE); |
| 4431 | 4431 | ||
| 4432 | #if IS_ENABLED(CONFIG_HYPERV) | 4432 | if (IS_ENABLED(CONFIG_HYPERV) && |
| 4433 | if (static_branch_unlikely(&enable_evmcs) && | 4433 | static_branch_unlikely(&enable_evmcs) && |
| 4434 | (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) { | 4434 | (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) { |
| 4435 | struct hv_enlightened_vmcs *evmcs = | 4435 | struct hv_enlightened_vmcs *evmcs = |
| 4436 | (struct hv_enlightened_vmcs *)loaded_vmcs->vmcs; | 4436 | (struct hv_enlightened_vmcs *)loaded_vmcs->vmcs; |
| 4437 | 4437 | ||
| 4438 | evmcs->hv_enlightenments_control.msr_bitmap = 1; | 4438 | evmcs->hv_enlightenments_control.msr_bitmap = 1; |
| 4439 | } | 4439 | } |
| 4440 | #endif | ||
| 4441 | |||
| 4442 | } | 4440 | } |
| 4443 | return 0; | 4441 | return 0; |
| 4444 | 4442 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6bcecc325e7e..0046aa70205a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -8567,7 +8567,7 @@ int kvm_arch_hardware_setup(void) | |||
| 8567 | /* | 8567 | /* |
| 8568 | * Make sure the user can only configure tsc_khz values that | 8568 | * Make sure the user can only configure tsc_khz values that |
| 8569 | * fit into a signed integer. | 8569 | * fit into a signed integer. |
| 8570 | * A min value is not calculated needed because it will always | 8570 | * A min value is not calculated because it will always |
| 8571 | * be 1 on all machines. | 8571 | * be 1 on all machines. |
| 8572 | */ | 8572 | */ |
| 8573 | u64 max = min(0x7fffffffULL, | 8573 | u64 max = min(0x7fffffffULL, |
