aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-31 11:37:32 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-31 11:37:32 -0500
commite2a0f813e0d53014b78aae76f0359c8a41f05eeb (patch)
tree08cbd30d7e407e8d1009338aeda56e895afb6d9d
parente30b82bbe098d9514ed0e9b5ec372daf7429e0f7 (diff)
parentb73117c49364551ff789db7c424a115ac5b77850 (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull more KVM updates from Paolo Bonzini: "Second batch of KVM updates. Some minor x86 fixes, two s390 guest features that need some handling in the host, and all the PPC changes. The PPC changes include support for little-endian guests and enablement for new POWER8 features" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (45 commits) x86, kvm: correctly access the KVM_CPUID_FEATURES leaf at 0x40000101 x86, kvm: cache the base of the KVM cpuid leaves kvm: x86: move KVM_CAP_HYPERV_TIME outside #ifdef KVM: PPC: Book3S PR: Cope with doorbell interrupts KVM: PPC: Book3S HV: Add software abort codes for transactional memory KVM: PPC: Book3S HV: Add new state for transactional memory powerpc/Kconfig: Make TM select VSX and VMX KVM: PPC: Book3S HV: Basic little-endian guest support KVM: PPC: Book3S HV: Add support for DABRX register on POWER7 KVM: PPC: Book3S HV: Prepare for host using hypervisor doorbells KVM: PPC: Book3S HV: Handle new LPCR bits on POWER8 KVM: PPC: Book3S HV: Handle guest using doorbells for IPIs KVM: PPC: Book3S HV: Consolidate code that checks reason for wake from nap KVM: PPC: Book3S HV: Implement architecture compatibility modes for POWER8 KVM: PPC: Book3S HV: Add handler for HV facility unavailable KVM: PPC: Book3S HV: Flush the correct number of TLB sets on POWER8 KVM: PPC: Book3S HV: Context-switch new POWER8 SPRs KVM: PPC: Book3S HV: Align physical and virtual CPU thread numbers KVM: PPC: Book3S HV: Don't set DABR on POWER8 kvm/ppc: IRQ disabling cleanup ...
-rw-r--r--Documentation/virtual/kvm/api.txt1
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/include/asm/epapr_hcalls.h111
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h3
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h27
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h1
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h6
-rw-r--r--arch/powerpc/include/asm/kvm_host.h61
-rw-r--r--arch/powerpc/include/asm/kvm_para.h80
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h13
-rw-r--r--arch/powerpc/include/asm/pgtable.h21
-rw-r--r--arch/powerpc/include/asm/reg.h43
-rw-r--r--arch/powerpc/include/asm/switch_to.h2
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h3
-rw-r--r--arch/powerpc/include/uapi/asm/tm.h2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c50
-rw-r--r--arch/powerpc/kernel/kvm.c41
-rw-r--r--arch/powerpc/kvm/44x.c4
-rw-r--r--arch/powerpc/kvm/book3s.c46
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c5
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c4
-rw-r--r--arch/powerpc/kvm/book3s_exports.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv.c319
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S8
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c8
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S1191
-rw-r--r--arch/powerpc/kvm/book3s_paired_singles.c169
-rw-r--r--arch/powerpc/kvm/book3s_pr.c155
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S47
-rw-r--r--arch/powerpc/kvm/book3s_segment.S2
-rw-r--r--arch/powerpc/kvm/book3s_xics.c4
-rw-r--r--arch/powerpc/kvm/booke.c44
-rw-r--r--arch/powerpc/kvm/booke.h5
-rw-r--r--arch/powerpc/kvm/bookehv_interrupts.S11
-rw-r--r--arch/powerpc/kvm/e500.c4
-rw-r--r--arch/powerpc/kvm/e500.h8
-rw-r--r--arch/powerpc/kvm/e500_mmu.c2
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c59
-rw-r--r--arch/powerpc/kvm/e500mc.c4
-rw-r--r--arch/powerpc/kvm/emulate.c1
-rw-r--r--arch/powerpc/kvm/mpic.c1
-rw-r--r--arch/powerpc/kvm/powerpc.c58
-rw-r--r--arch/s390/include/asm/kvm_host.h15
-rw-r--r--arch/s390/kvm/intercept.c11
-rw-r--r--arch/s390/kvm/kvm-s390.c17
-rw-r--r--arch/s390/kvm/kvm-s390.h6
-rw-r--r--arch/x86/include/asm/kvm_para.h33
-rw-r--r--arch/x86/kernel/kvm.c32
-rw-r--r--arch/x86/kvm/cpuid.h8
-rw-r--r--arch/x86/kvm/lapic.h2
-rw-r--r--arch/x86/kvm/vmx.c9
-rw-r--r--arch/x86/kvm/x86.c41
-rw-r--r--drivers/s390/kvm/virtio_ccw.c11
53 files changed, 1704 insertions, 1111 deletions
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 366bf4b47ef4..6cd63a9010fb 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -1838,6 +1838,7 @@ registers, find a list below:
1838 PPC | KVM_REG_PPC_LPCR | 64 1838 PPC | KVM_REG_PPC_LPCR | 64
1839 PPC | KVM_REG_PPC_PPR | 64 1839 PPC | KVM_REG_PPC_PPR | 64
1840 PPC | KVM_REG_PPC_ARCH_COMPAT 32 1840 PPC | KVM_REG_PPC_ARCH_COMPAT 32
1841 PPC | KVM_REG_PPC_DABRX | 32
1841 PPC | KVM_REG_PPC_TM_GPR0 | 64 1842 PPC | KVM_REG_PPC_TM_GPR0 | 64
1842 ... 1843 ...
1843 PPC | KVM_REG_PPC_TM_GPR31 | 64 1844 PPC | KVM_REG_PPC_TM_GPR31 | 64
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index a5e5d2ec380b..957bf344c0f5 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -342,6 +342,8 @@ config PPC_TRANSACTIONAL_MEM
342 bool "Transactional Memory support for POWERPC" 342 bool "Transactional Memory support for POWERPC"
343 depends on PPC_BOOK3S_64 343 depends on PPC_BOOK3S_64
344 depends on SMP 344 depends on SMP
345 select ALTIVEC
346 select VSX
345 default n 347 default n
346 ---help--- 348 ---help---
347 Support user-mode Transactional Memory on POWERPC. 349 Support user-mode Transactional Memory on POWERPC.
diff --git a/arch/powerpc/include/asm/epapr_hcalls.h b/arch/powerpc/include/asm/epapr_hcalls.h
index 86b0ac79990c..334459ad145b 100644
--- a/arch/powerpc/include/asm/epapr_hcalls.h
+++ b/arch/powerpc/include/asm/epapr_hcalls.h
@@ -460,5 +460,116 @@ static inline unsigned int ev_idle(void)
460 460
461 return r3; 461 return r3;
462} 462}
463
464#ifdef CONFIG_EPAPR_PARAVIRT
465static inline unsigned long epapr_hypercall(unsigned long *in,
466 unsigned long *out,
467 unsigned long nr)
468{
469 unsigned long register r0 asm("r0");
470 unsigned long register r3 asm("r3") = in[0];
471 unsigned long register r4 asm("r4") = in[1];
472 unsigned long register r5 asm("r5") = in[2];
473 unsigned long register r6 asm("r6") = in[3];
474 unsigned long register r7 asm("r7") = in[4];
475 unsigned long register r8 asm("r8") = in[5];
476 unsigned long register r9 asm("r9") = in[6];
477 unsigned long register r10 asm("r10") = in[7];
478 unsigned long register r11 asm("r11") = nr;
479 unsigned long register r12 asm("r12");
480
481 asm volatile("bl epapr_hypercall_start"
482 : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6),
483 "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11),
484 "=r"(r12)
485 : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8),
486 "r"(r9), "r"(r10), "r"(r11)
487 : "memory", "cc", "xer", "ctr", "lr");
488
489 out[0] = r4;
490 out[1] = r5;
491 out[2] = r6;
492 out[3] = r7;
493 out[4] = r8;
494 out[5] = r9;
495 out[6] = r10;
496 out[7] = r11;
497
498 return r3;
499}
500#else
501static unsigned long epapr_hypercall(unsigned long *in,
502 unsigned long *out,
503 unsigned long nr)
504{
505 return EV_UNIMPLEMENTED;
506}
507#endif
508
509static inline long epapr_hypercall0_1(unsigned int nr, unsigned long *r2)
510{
511 unsigned long in[8];
512 unsigned long out[8];
513 unsigned long r;
514
515 r = epapr_hypercall(in, out, nr);
516 *r2 = out[0];
517
518 return r;
519}
520
521static inline long epapr_hypercall0(unsigned int nr)
522{
523 unsigned long in[8];
524 unsigned long out[8];
525
526 return epapr_hypercall(in, out, nr);
527}
528
529static inline long epapr_hypercall1(unsigned int nr, unsigned long p1)
530{
531 unsigned long in[8];
532 unsigned long out[8];
533
534 in[0] = p1;
535 return epapr_hypercall(in, out, nr);
536}
537
538static inline long epapr_hypercall2(unsigned int nr, unsigned long p1,
539 unsigned long p2)
540{
541 unsigned long in[8];
542 unsigned long out[8];
543
544 in[0] = p1;
545 in[1] = p2;
546 return epapr_hypercall(in, out, nr);
547}
548
549static inline long epapr_hypercall3(unsigned int nr, unsigned long p1,
550 unsigned long p2, unsigned long p3)
551{
552 unsigned long in[8];
553 unsigned long out[8];
554
555 in[0] = p1;
556 in[1] = p2;
557 in[2] = p3;
558 return epapr_hypercall(in, out, nr);
559}
560
561static inline long epapr_hypercall4(unsigned int nr, unsigned long p1,
562 unsigned long p2, unsigned long p3,
563 unsigned long p4)
564{
565 unsigned long in[8];
566 unsigned long out[8];
567
568 in[0] = p1;
569 in[1] = p2;
570 in[2] = p3;
571 in[3] = p4;
572 return epapr_hypercall(in, out, nr);
573}
463#endif /* !__ASSEMBLY__ */ 574#endif /* !__ASSEMBLY__ */
464#endif /* _EPAPR_HCALLS_H */ 575#endif /* _EPAPR_HCALLS_H */
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 1503d8c7c41b..19eb74a95b59 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -92,14 +92,17 @@
92#define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800 92#define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800
93#define BOOK3S_INTERRUPT_DECREMENTER 0x900 93#define BOOK3S_INTERRUPT_DECREMENTER 0x900
94#define BOOK3S_INTERRUPT_HV_DECREMENTER 0x980 94#define BOOK3S_INTERRUPT_HV_DECREMENTER 0x980
95#define BOOK3S_INTERRUPT_DOORBELL 0xa00
95#define BOOK3S_INTERRUPT_SYSCALL 0xc00 96#define BOOK3S_INTERRUPT_SYSCALL 0xc00
96#define BOOK3S_INTERRUPT_TRACE 0xd00 97#define BOOK3S_INTERRUPT_TRACE 0xd00
97#define BOOK3S_INTERRUPT_H_DATA_STORAGE 0xe00 98#define BOOK3S_INTERRUPT_H_DATA_STORAGE 0xe00
98#define BOOK3S_INTERRUPT_H_INST_STORAGE 0xe20 99#define BOOK3S_INTERRUPT_H_INST_STORAGE 0xe20
99#define BOOK3S_INTERRUPT_H_EMUL_ASSIST 0xe40 100#define BOOK3S_INTERRUPT_H_EMUL_ASSIST 0xe40
101#define BOOK3S_INTERRUPT_H_DOORBELL 0xe80
100#define BOOK3S_INTERRUPT_PERFMON 0xf00 102#define BOOK3S_INTERRUPT_PERFMON 0xf00
101#define BOOK3S_INTERRUPT_ALTIVEC 0xf20 103#define BOOK3S_INTERRUPT_ALTIVEC 0xf20
102#define BOOK3S_INTERRUPT_VSX 0xf40 104#define BOOK3S_INTERRUPT_VSX 0xf40
105#define BOOK3S_INTERRUPT_H_FAC_UNAVAIL 0xf80
103 106
104#define BOOK3S_IRQPRIO_SYSTEM_RESET 0 107#define BOOK3S_IRQPRIO_SYSTEM_RESET 0
105#define BOOK3S_IRQPRIO_DATA_SEGMENT 1 108#define BOOK3S_IRQPRIO_DATA_SEGMENT 1
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index bc23b1ba7980..83851aabfdc8 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -186,9 +186,6 @@ extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
186 186
187extern void kvmppc_entry_trampoline(void); 187extern void kvmppc_entry_trampoline(void);
188extern void kvmppc_hv_entry_trampoline(void); 188extern void kvmppc_hv_entry_trampoline(void);
189extern void kvmppc_load_up_fpu(void);
190extern void kvmppc_load_up_altivec(void);
191extern void kvmppc_load_up_vsx(void);
192extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); 189extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
193extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); 190extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
194extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); 191extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
@@ -271,16 +268,25 @@ static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
271 return vcpu->arch.pc; 268 return vcpu->arch.pc;
272} 269}
273 270
274static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) 271static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
275{ 272{
276 ulong pc = kvmppc_get_pc(vcpu); 273 return (vcpu->arch.shared->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
274}
277 275
276static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc)
277{
278 /* Load the instruction manually if it failed to do so in the 278 /* Load the instruction manually if it failed to do so in the
279 * exit path */ 279 * exit path */
280 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) 280 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
281 kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false); 281 kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
282 282
283 return vcpu->arch.last_inst; 283 return kvmppc_need_byteswap(vcpu) ? swab32(vcpu->arch.last_inst) :
284 vcpu->arch.last_inst;
285}
286
287static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
288{
289 return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu));
284} 290}
285 291
286/* 292/*
@@ -290,14 +296,7 @@ static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
290 */ 296 */
291static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu) 297static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
292{ 298{
293 ulong pc = kvmppc_get_pc(vcpu) - 4; 299 return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu) - 4);
294
295 /* Load the instruction manually if it failed to do so in the
296 * exit path */
297 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
298 kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
299
300 return vcpu->arch.last_inst;
301} 300}
302 301
303static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) 302static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 192917d2239c..f3a91dc02c98 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -88,6 +88,7 @@ struct kvmppc_host_state {
88 u8 hwthread_req; 88 u8 hwthread_req;
89 u8 hwthread_state; 89 u8 hwthread_state;
90 u8 host_ipi; 90 u8 host_ipi;
91 u8 ptid;
91 struct kvm_vcpu *kvm_vcpu; 92 struct kvm_vcpu *kvm_vcpu;
92 struct kvmppc_vcore *kvm_vcore; 93 struct kvmppc_vcore *kvm_vcore;
93 unsigned long xics_phys; 94 unsigned long xics_phys;
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index dd8f61510dfd..80d46b5a7efb 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -63,6 +63,12 @@ static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
63 return vcpu->arch.xer; 63 return vcpu->arch.xer;
64} 64}
65 65
66static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
67{
68 /* XXX Would need to check TLB entry */
69 return false;
70}
71
66static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) 72static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
67{ 73{
68 return vcpu->arch.last_inst; 74 return vcpu->arch.last_inst;
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 237d1d25b448..1eaea2dea174 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -288,6 +288,7 @@ struct kvmppc_vcore {
288 int n_woken; 288 int n_woken;
289 int nap_count; 289 int nap_count;
290 int napping_threads; 290 int napping_threads;
291 int first_vcpuid;
291 u16 pcpu; 292 u16 pcpu;
292 u16 last_cpu; 293 u16 last_cpu;
293 u8 vcore_state; 294 u8 vcore_state;
@@ -298,10 +299,12 @@ struct kvmppc_vcore {
298 u64 stolen_tb; 299 u64 stolen_tb;
299 u64 preempt_tb; 300 u64 preempt_tb;
300 struct kvm_vcpu *runner; 301 struct kvm_vcpu *runner;
302 struct kvm *kvm;
301 u64 tb_offset; /* guest timebase - host timebase */ 303 u64 tb_offset; /* guest timebase - host timebase */
302 ulong lpcr; 304 ulong lpcr;
303 u32 arch_compat; 305 u32 arch_compat;
304 ulong pcr; 306 ulong pcr;
307 ulong dpdes; /* doorbell state (POWER8) */
305}; 308};
306 309
307#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff) 310#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
@@ -410,8 +413,7 @@ struct kvm_vcpu_arch {
410 413
411 ulong gpr[32]; 414 ulong gpr[32];
412 415
413 u64 fpr[32]; 416 struct thread_fp_state fp;
414 u64 fpscr;
415 417
416#ifdef CONFIG_SPE 418#ifdef CONFIG_SPE
417 ulong evr[32]; 419 ulong evr[32];
@@ -420,12 +422,7 @@ struct kvm_vcpu_arch {
420 u64 acc; 422 u64 acc;
421#endif 423#endif
422#ifdef CONFIG_ALTIVEC 424#ifdef CONFIG_ALTIVEC
423 vector128 vr[32]; 425 struct thread_vr_state vr;
424 vector128 vscr;
425#endif
426
427#ifdef CONFIG_VSX
428 u64 vsr[64];
429#endif 426#endif
430 427
431#ifdef CONFIG_KVM_BOOKE_HV 428#ifdef CONFIG_KVM_BOOKE_HV
@@ -452,6 +449,7 @@ struct kvm_vcpu_arch {
452 ulong pc; 449 ulong pc;
453 ulong ctr; 450 ulong ctr;
454 ulong lr; 451 ulong lr;
452 ulong tar;
455 453
456 ulong xer; 454 ulong xer;
457 u32 cr; 455 u32 cr;
@@ -461,13 +459,30 @@ struct kvm_vcpu_arch {
461 ulong guest_owned_ext; 459 ulong guest_owned_ext;
462 ulong purr; 460 ulong purr;
463 ulong spurr; 461 ulong spurr;
462 ulong ic;
463 ulong vtb;
464 ulong dscr; 464 ulong dscr;
465 ulong amr; 465 ulong amr;
466 ulong uamor; 466 ulong uamor;
467 ulong iamr;
467 u32 ctrl; 468 u32 ctrl;
469 u32 dabrx;
468 ulong dabr; 470 ulong dabr;
471 ulong dawr;
472 ulong dawrx;
473 ulong ciabr;
469 ulong cfar; 474 ulong cfar;
470 ulong ppr; 475 ulong ppr;
476 ulong pspb;
477 ulong fscr;
478 ulong ebbhr;
479 ulong ebbrr;
480 ulong bescr;
481 ulong csigr;
482 ulong tacr;
483 ulong tcscr;
484 ulong acop;
485 ulong wort;
471 ulong shadow_srr1; 486 ulong shadow_srr1;
472#endif 487#endif
473 u32 vrsave; /* also USPRG0 */ 488 u32 vrsave; /* also USPRG0 */
@@ -502,10 +517,33 @@ struct kvm_vcpu_arch {
502 u32 ccr1; 517 u32 ccr1;
503 u32 dbsr; 518 u32 dbsr;
504 519
505 u64 mmcr[3]; 520 u64 mmcr[5];
506 u32 pmc[8]; 521 u32 pmc[8];
522 u32 spmc[2];
507 u64 siar; 523 u64 siar;
508 u64 sdar; 524 u64 sdar;
525 u64 sier;
526#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
527 u64 tfhar;
528 u64 texasr;
529 u64 tfiar;
530
531 u32 cr_tm;
532 u64 lr_tm;
533 u64 ctr_tm;
534 u64 amr_tm;
535 u64 ppr_tm;
536 u64 dscr_tm;
537 u64 tar_tm;
538
539 ulong gpr_tm[32];
540
541 struct thread_fp_state fp_tm;
542
543 struct thread_vr_state vr_tm;
544 u32 vrsave_tm; /* also USPRG0 */
545
546#endif
509 547
510#ifdef CONFIG_KVM_EXIT_TIMING 548#ifdef CONFIG_KVM_EXIT_TIMING
511 struct mutex exit_timing_lock; 549 struct mutex exit_timing_lock;
@@ -546,6 +584,7 @@ struct kvm_vcpu_arch {
546#endif 584#endif
547 gpa_t paddr_accessed; 585 gpa_t paddr_accessed;
548 gva_t vaddr_accessed; 586 gva_t vaddr_accessed;
587 pgd_t *pgdir;
549 588
550 u8 io_gpr; /* GPR used as IO source/target */ 589 u8 io_gpr; /* GPR used as IO source/target */
551 u8 mmio_is_bigendian; 590 u8 mmio_is_bigendian;
@@ -603,7 +642,6 @@ struct kvm_vcpu_arch {
603 struct list_head run_list; 642 struct list_head run_list;
604 struct task_struct *run_task; 643 struct task_struct *run_task;
605 struct kvm_run *kvm_run; 644 struct kvm_run *kvm_run;
606 pgd_t *pgdir;
607 645
608 spinlock_t vpa_update_lock; 646 spinlock_t vpa_update_lock;
609 struct kvmppc_vpa vpa; 647 struct kvmppc_vpa vpa;
@@ -616,9 +654,12 @@ struct kvm_vcpu_arch {
616 spinlock_t tbacct_lock; 654 spinlock_t tbacct_lock;
617 u64 busy_stolen; 655 u64 busy_stolen;
618 u64 busy_preempt; 656 u64 busy_preempt;
657 unsigned long intr_msr;
619#endif 658#endif
620}; 659};
621 660
661#define VCPU_FPR(vcpu, i) (vcpu)->arch.fp.fpr[i][TS_FPROFFSET]
662
622/* Values for vcpu->arch.state */ 663/* Values for vcpu->arch.state */
623#define KVMPPC_VCPU_NOTREADY 0 664#define KVMPPC_VCPU_NOTREADY 0
624#define KVMPPC_VCPU_RUNNABLE 1 665#define KVMPPC_VCPU_RUNNABLE 1
diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h
index 2b119654b4c1..336a91acb8b1 100644
--- a/arch/powerpc/include/asm/kvm_para.h
+++ b/arch/powerpc/include/asm/kvm_para.h
@@ -39,10 +39,6 @@ static inline int kvm_para_available(void)
39 return 1; 39 return 1;
40} 40}
41 41
42extern unsigned long kvm_hypercall(unsigned long *in,
43 unsigned long *out,
44 unsigned long nr);
45
46#else 42#else
47 43
48static inline int kvm_para_available(void) 44static inline int kvm_para_available(void)
@@ -50,82 +46,8 @@ static inline int kvm_para_available(void)
50 return 0; 46 return 0;
51} 47}
52 48
53static unsigned long kvm_hypercall(unsigned long *in,
54 unsigned long *out,
55 unsigned long nr)
56{
57 return EV_UNIMPLEMENTED;
58}
59
60#endif 49#endif
61 50
62static inline long kvm_hypercall0_1(unsigned int nr, unsigned long *r2)
63{
64 unsigned long in[8];
65 unsigned long out[8];
66 unsigned long r;
67
68 r = kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
69 *r2 = out[0];
70
71 return r;
72}
73
74static inline long kvm_hypercall0(unsigned int nr)
75{
76 unsigned long in[8];
77 unsigned long out[8];
78
79 return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
80}
81
82static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
83{
84 unsigned long in[8];
85 unsigned long out[8];
86
87 in[0] = p1;
88 return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
89}
90
91static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
92 unsigned long p2)
93{
94 unsigned long in[8];
95 unsigned long out[8];
96
97 in[0] = p1;
98 in[1] = p2;
99 return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
100}
101
102static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
103 unsigned long p2, unsigned long p3)
104{
105 unsigned long in[8];
106 unsigned long out[8];
107
108 in[0] = p1;
109 in[1] = p2;
110 in[2] = p3;
111 return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
112}
113
114static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
115 unsigned long p2, unsigned long p3,
116 unsigned long p4)
117{
118 unsigned long in[8];
119 unsigned long out[8];
120
121 in[0] = p1;
122 in[1] = p2;
123 in[2] = p3;
124 in[3] = p4;
125 return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
126}
127
128
129static inline unsigned int kvm_arch_para_features(void) 51static inline unsigned int kvm_arch_para_features(void)
130{ 52{
131 unsigned long r; 53 unsigned long r;
@@ -133,7 +55,7 @@ static inline unsigned int kvm_arch_para_features(void)
133 if (!kvm_para_available()) 55 if (!kvm_para_available())
134 return 0; 56 return 0;
135 57
136 if(kvm_hypercall0_1(KVM_HC_FEATURES, &r)) 58 if(epapr_hypercall0_1(KVM_HCALL_TOKEN(KVM_HC_FEATURES), &r))
137 return 0; 59 return 0;
138 60
139 return r; 61 return r;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index c8317fbf92c4..fcd53f0d34ba 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -54,12 +54,13 @@ extern void kvmppc_handler_highmem(void);
54extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu); 54extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
55extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 55extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
56 unsigned int rt, unsigned int bytes, 56 unsigned int rt, unsigned int bytes,
57 int is_bigendian); 57 int is_default_endian);
58extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, 58extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
59 unsigned int rt, unsigned int bytes, 59 unsigned int rt, unsigned int bytes,
60 int is_bigendian); 60 int is_default_endian);
61extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 61extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
62 u64 val, unsigned int bytes, int is_bigendian); 62 u64 val, unsigned int bytes,
63 int is_default_endian);
63 64
64extern int kvmppc_emulate_instruction(struct kvm_run *run, 65extern int kvmppc_emulate_instruction(struct kvm_run *run,
65 struct kvm_vcpu *vcpu); 66 struct kvm_vcpu *vcpu);
@@ -455,6 +456,12 @@ static inline void kvmppc_fix_ee_before_entry(void)
455 trace_hardirqs_on(); 456 trace_hardirqs_on();
456 457
457#ifdef CONFIG_PPC64 458#ifdef CONFIG_PPC64
459 /*
460 * To avoid races, the caller must have gone directly from having
461 * interrupts fully-enabled to hard-disabled.
462 */
463 WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
464
458 /* Only need to enable IRQs by hard enabling them after this */ 465 /* Only need to enable IRQs by hard enabling them after this */
459 local_paca->irq_happened = 0; 466 local_paca->irq_happened = 0;
460 local_paca->soft_enabled = 1; 467 local_paca->soft_enabled = 1;
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index b999ca318985..f83b6f3e1b39 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -287,6 +287,27 @@ extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
287#endif 287#endif
288pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, 288pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
289 unsigned *shift); 289 unsigned *shift);
290
291static inline pte_t *lookup_linux_ptep(pgd_t *pgdir, unsigned long hva,
292 unsigned long *pte_sizep)
293{
294 pte_t *ptep;
295 unsigned long ps = *pte_sizep;
296 unsigned int shift;
297
298 ptep = find_linux_pte_or_hugepte(pgdir, hva, &shift);
299 if (!ptep)
300 return NULL;
301 if (shift)
302 *pte_sizep = 1ul << shift;
303 else
304 *pte_sizep = PAGE_SIZE;
305
306 if (ps > *pte_sizep)
307 return NULL;
308
309 return ptep;
310}
290#endif /* __ASSEMBLY__ */ 311#endif /* __ASSEMBLY__ */
291 312
292#endif /* __KERNEL__ */ 313#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 62b114e079cf..90c06ec6eff5 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -223,17 +223,26 @@
223#define CTRL_TE 0x00c00000 /* thread enable */ 223#define CTRL_TE 0x00c00000 /* thread enable */
224#define CTRL_RUNLATCH 0x1 224#define CTRL_RUNLATCH 0x1
225#define SPRN_DAWR 0xB4 225#define SPRN_DAWR 0xB4
226#define SPRN_CIABR 0xBB
227#define CIABR_PRIV 0x3
228#define CIABR_PRIV_USER 1
229#define CIABR_PRIV_SUPER 2
230#define CIABR_PRIV_HYPER 3
226#define SPRN_DAWRX 0xBC 231#define SPRN_DAWRX 0xBC
227#define DAWRX_USER (1UL << 0) 232#define DAWRX_USER __MASK(0)
228#define DAWRX_KERNEL (1UL << 1) 233#define DAWRX_KERNEL __MASK(1)
229#define DAWRX_HYP (1UL << 2) 234#define DAWRX_HYP __MASK(2)
235#define DAWRX_WTI __MASK(3)
236#define DAWRX_WT __MASK(4)
237#define DAWRX_DR __MASK(5)
238#define DAWRX_DW __MASK(6)
230#define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */ 239#define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */
231#define SPRN_DABR2 0x13D /* e300 */ 240#define SPRN_DABR2 0x13D /* e300 */
232#define SPRN_DABRX 0x3F7 /* Data Address Breakpoint Register Extension */ 241#define SPRN_DABRX 0x3F7 /* Data Address Breakpoint Register Extension */
233#define DABRX_USER (1UL << 0) 242#define DABRX_USER __MASK(0)
234#define DABRX_KERNEL (1UL << 1) 243#define DABRX_KERNEL __MASK(1)
235#define DABRX_HYP (1UL << 2) 244#define DABRX_HYP __MASK(2)
236#define DABRX_BTI (1UL << 3) 245#define DABRX_BTI __MASK(3)
237#define DABRX_ALL (DABRX_BTI | DABRX_HYP | DABRX_KERNEL | DABRX_USER) 246#define DABRX_ALL (DABRX_BTI | DABRX_HYP | DABRX_KERNEL | DABRX_USER)
238#define SPRN_DAR 0x013 /* Data Address Register */ 247#define SPRN_DAR 0x013 /* Data Address Register */
239#define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */ 248#define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
@@ -260,6 +269,8 @@
260#define SPRN_HRMOR 0x139 /* Real mode offset register */ 269#define SPRN_HRMOR 0x139 /* Real mode offset register */
261#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */ 270#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
262#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */ 271#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
272#define SPRN_IC 0x350 /* Virtual Instruction Count */
273#define SPRN_VTB 0x351 /* Virtual Time Base */
263/* HFSCR and FSCR bit numbers are the same */ 274/* HFSCR and FSCR bit numbers are the same */
264#define FSCR_TAR_LG 8 /* Enable Target Address Register */ 275#define FSCR_TAR_LG 8 /* Enable Target Address Register */
265#define FSCR_EBB_LG 7 /* Enable Event Based Branching */ 276#define FSCR_EBB_LG 7 /* Enable Event Based Branching */
@@ -298,9 +309,13 @@
298#define LPCR_RMLS 0x1C000000 /* impl dependent rmo limit sel */ 309#define LPCR_RMLS 0x1C000000 /* impl dependent rmo limit sel */
299#define LPCR_RMLS_SH (63-37) 310#define LPCR_RMLS_SH (63-37)
300#define LPCR_ILE 0x02000000 /* !HV irqs set MSR:LE */ 311#define LPCR_ILE 0x02000000 /* !HV irqs set MSR:LE */
312#define LPCR_AIL 0x01800000 /* Alternate interrupt location */
301#define LPCR_AIL_0 0x00000000 /* MMU off exception offset 0x0 */ 313#define LPCR_AIL_0 0x00000000 /* MMU off exception offset 0x0 */
302#define LPCR_AIL_3 0x01800000 /* MMU on exception offset 0xc00...4xxx */ 314#define LPCR_AIL_3 0x01800000 /* MMU on exception offset 0xc00...4xxx */
303#define LPCR_PECE 0x00007000 /* powersave exit cause enable */ 315#define LPCR_ONL 0x00040000 /* online - PURR/SPURR count */
316#define LPCR_PECE 0x0001f000 /* powersave exit cause enable */
317#define LPCR_PECEDP 0x00010000 /* directed priv dbells cause exit */
318#define LPCR_PECEDH 0x00008000 /* directed hyp dbells cause exit */
304#define LPCR_PECE0 0x00004000 /* ext. exceptions can cause exit */ 319#define LPCR_PECE0 0x00004000 /* ext. exceptions can cause exit */
305#define LPCR_PECE1 0x00002000 /* decrementer can cause exit */ 320#define LPCR_PECE1 0x00002000 /* decrementer can cause exit */
306#define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */ 321#define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */
@@ -322,6 +337,8 @@
322#define SPRN_PCR 0x152 /* Processor compatibility register */ 337#define SPRN_PCR 0x152 /* Processor compatibility register */
323#define PCR_VEC_DIS (1ul << (63-0)) /* Vec. disable (bit NA since POWER8) */ 338#define PCR_VEC_DIS (1ul << (63-0)) /* Vec. disable (bit NA since POWER8) */
324#define PCR_VSX_DIS (1ul << (63-1)) /* VSX disable (bit NA since POWER8) */ 339#define PCR_VSX_DIS (1ul << (63-1)) /* VSX disable (bit NA since POWER8) */
340#define PCR_TM_DIS (1ul << (63-2)) /* Trans. memory disable (POWER8) */
341#define PCR_ARCH_206 0x4 /* Architecture 2.06 */
325#define PCR_ARCH_205 0x2 /* Architecture 2.05 */ 342#define PCR_ARCH_205 0x2 /* Architecture 2.05 */
326#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */ 343#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */
327#define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */ 344#define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */
@@ -368,6 +385,8 @@
368#define DER_EBRKE 0x00000002 /* External Breakpoint Interrupt */ 385#define DER_EBRKE 0x00000002 /* External Breakpoint Interrupt */
369#define DER_DPIE 0x00000001 /* Dev. Port Nonmaskable Request */ 386#define DER_DPIE 0x00000001 /* Dev. Port Nonmaskable Request */
370#define SPRN_DMISS 0x3D0 /* Data TLB Miss Register */ 387#define SPRN_DMISS 0x3D0 /* Data TLB Miss Register */
388#define SPRN_DHDES 0x0B1 /* Directed Hyp. Doorbell Exc. State */
389#define SPRN_DPDES 0x0B0 /* Directed Priv. Doorbell Exc. State */
371#define SPRN_EAR 0x11A /* External Address Register */ 390#define SPRN_EAR 0x11A /* External Address Register */
372#define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */ 391#define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */
373#define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */ 392#define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */
@@ -427,6 +446,7 @@
427#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */ 446#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
428#define SPRN_IABR2 0x3FA /* 83xx */ 447#define SPRN_IABR2 0x3FA /* 83xx */
429#define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */ 448#define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */
449#define SPRN_IAMR 0x03D /* Instr. Authority Mask Reg */
430#define SPRN_HID4 0x3F4 /* 970 HID4 */ 450#define SPRN_HID4 0x3F4 /* 970 HID4 */
431#define HID4_LPES0 (1ul << (63-0)) /* LPAR env. sel. bit 0 */ 451#define HID4_LPES0 (1ul << (63-0)) /* LPAR env. sel. bit 0 */
432#define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */ 452#define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */
@@ -541,6 +561,7 @@
541#define SPRN_PIR 0x3FF /* Processor Identification Register */ 561#define SPRN_PIR 0x3FF /* Processor Identification Register */
542#endif 562#endif
543#define SPRN_TIR 0x1BE /* Thread Identification Register */ 563#define SPRN_TIR 0x1BE /* Thread Identification Register */
564#define SPRN_PSPB 0x09F /* Problem State Priority Boost reg */
544#define SPRN_PTEHI 0x3D5 /* 981 7450 PTE HI word (S/W TLB load) */ 565#define SPRN_PTEHI 0x3D5 /* 981 7450 PTE HI word (S/W TLB load) */
545#define SPRN_PTELO 0x3D6 /* 982 7450 PTE LO word (S/W TLB load) */ 566#define SPRN_PTELO 0x3D6 /* 982 7450 PTE LO word (S/W TLB load) */
546#define SPRN_PURR 0x135 /* Processor Utilization of Resources Reg */ 567#define SPRN_PURR 0x135 /* Processor Utilization of Resources Reg */
@@ -682,6 +703,7 @@
682#define SPRN_EBBHR 804 /* Event based branch handler register */ 703#define SPRN_EBBHR 804 /* Event based branch handler register */
683#define SPRN_EBBRR 805 /* Event based branch return register */ 704#define SPRN_EBBRR 805 /* Event based branch return register */
684#define SPRN_BESCR 806 /* Branch event status and control register */ 705#define SPRN_BESCR 806 /* Branch event status and control register */
706#define SPRN_WORT 895 /* Workload optimization register - thread */
685 707
686#define SPRN_PMC1 787 708#define SPRN_PMC1 787
687#define SPRN_PMC2 788 709#define SPRN_PMC2 788
@@ -698,6 +720,11 @@
698#define SIER_SIHV 0x1000000 /* Sampled MSR_HV */ 720#define SIER_SIHV 0x1000000 /* Sampled MSR_HV */
699#define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */ 721#define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */
700#define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */ 722#define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */
723#define SPRN_TACR 888
724#define SPRN_TCSCR 889
725#define SPRN_CSIGR 890
726#define SPRN_SPMC1 892
727#define SPRN_SPMC2 893
701 728
702/* When EBB is enabled, some of MMCR0/MMCR2/SIER are user accessible */ 729/* When EBB is enabled, some of MMCR0/MMCR2/SIER are user accessible */
703#define MMCR0_USER_MASK (MMCR0_FC | MMCR0_PMXE | MMCR0_PMAO) 730#define MMCR0_USER_MASK (MMCR0_FC | MMCR0_PMXE | MMCR0_PMAO)
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index aace90547614..0e83e7d8c73f 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -25,10 +25,8 @@ static inline void save_tar(struct thread_struct *prev)
25static inline void save_tar(struct thread_struct *prev) {} 25static inline void save_tar(struct thread_struct *prev) {}
26#endif 26#endif
27 27
28extern void load_up_fpu(void);
29extern void enable_kernel_fp(void); 28extern void enable_kernel_fp(void);
30extern void enable_kernel_altivec(void); 29extern void enable_kernel_altivec(void);
31extern void load_up_altivec(struct task_struct *);
32extern int emulate_altivec(struct pt_regs *); 30extern int emulate_altivec(struct pt_regs *);
33extern void __giveup_vsx(struct task_struct *); 31extern void __giveup_vsx(struct task_struct *);
34extern void giveup_vsx(struct task_struct *); 32extern void giveup_vsx(struct task_struct *);
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 6836ec79a830..a6665be4f3ab 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -545,6 +545,7 @@ struct kvm_get_htab_header {
545#define KVM_REG_PPC_TCSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1) 545#define KVM_REG_PPC_TCSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1)
546#define KVM_REG_PPC_PID (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2) 546#define KVM_REG_PPC_PID (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2)
547#define KVM_REG_PPC_ACOP (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3) 547#define KVM_REG_PPC_ACOP (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3)
548#define KVM_REG_PPC_WORT (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb4)
548 549
549#define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4) 550#define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4)
550#define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5) 551#define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5)
@@ -553,6 +554,8 @@ struct kvm_get_htab_header {
553/* Architecture compatibility level */ 554/* Architecture compatibility level */
554#define KVM_REG_PPC_ARCH_COMPAT (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb7) 555#define KVM_REG_PPC_ARCH_COMPAT (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb7)
555 556
557#define KVM_REG_PPC_DABRX (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb8)
558
556/* Transactional Memory checkpointed state: 559/* Transactional Memory checkpointed state:
557 * This is all GPRs, all VSX regs and a subset of SPRs 560 * This is all GPRs, all VSX regs and a subset of SPRs
558 */ 561 */
diff --git a/arch/powerpc/include/uapi/asm/tm.h b/arch/powerpc/include/uapi/asm/tm.h
index 85059a00f560..5d836b7c1176 100644
--- a/arch/powerpc/include/uapi/asm/tm.h
+++ b/arch/powerpc/include/uapi/asm/tm.h
@@ -6,6 +6,8 @@
6 * the failure is persistent. PAPR saves 0xff-0xe0 for the hypervisor. 6 * the failure is persistent. PAPR saves 0xff-0xe0 for the hypervisor.
7 */ 7 */
8#define TM_CAUSE_PERSISTENT 0x01 8#define TM_CAUSE_PERSISTENT 0x01
9#define TM_CAUSE_KVM_RESCHED 0xe0 /* From PAPR */
10#define TM_CAUSE_KVM_FAC_UNAV 0xe2 /* From PAPR */
9#define TM_CAUSE_RESCHED 0xde 11#define TM_CAUSE_RESCHED 0xde
10#define TM_CAUSE_TLBI 0xdc 12#define TM_CAUSE_TLBI 0xdc
11#define TM_CAUSE_FAC_UNAV 0xda 13#define TM_CAUSE_FAC_UNAV 0xda
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 8d1d94d9c649..b5aacf72ae6f 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -438,18 +438,14 @@ int main(void)
438 DEFINE(VCPU_GUEST_PID, offsetof(struct kvm_vcpu, arch.pid)); 438 DEFINE(VCPU_GUEST_PID, offsetof(struct kvm_vcpu, arch.pid));
439 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); 439 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
440 DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); 440 DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave));
441 DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr)); 441 DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fp.fpr));
442 DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fpscr));
443#ifdef CONFIG_ALTIVEC 442#ifdef CONFIG_ALTIVEC
444 DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr)); 443 DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr.vr));
445 DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vscr));
446#endif
447#ifdef CONFIG_VSX
448 DEFINE(VCPU_VSRS, offsetof(struct kvm_vcpu, arch.vsr));
449#endif 444#endif
450 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); 445 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
451 DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); 446 DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
452 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); 447 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
448 DEFINE(VCPU_TAR, offsetof(struct kvm_vcpu, arch.tar));
453 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); 449 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
454 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); 450 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
455#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 451#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
@@ -497,16 +493,24 @@ int main(void)
497 DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); 493 DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
498 DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr)); 494 DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
499 DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty)); 495 DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty));
496 DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr));
500#endif 497#endif
501#ifdef CONFIG_PPC_BOOK3S 498#ifdef CONFIG_PPC_BOOK3S
502 DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id)); 499 DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
503 DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr)); 500 DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr));
504 DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr)); 501 DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr));
502 DEFINE(VCPU_IC, offsetof(struct kvm_vcpu, arch.ic));
503 DEFINE(VCPU_VTB, offsetof(struct kvm_vcpu, arch.vtb));
505 DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr)); 504 DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr));
506 DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr)); 505 DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr));
507 DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor)); 506 DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor));
507 DEFINE(VCPU_IAMR, offsetof(struct kvm_vcpu, arch.iamr));
508 DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl)); 508 DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl));
509 DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr)); 509 DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr));
510 DEFINE(VCPU_DABRX, offsetof(struct kvm_vcpu, arch.dabrx));
511 DEFINE(VCPU_DAWR, offsetof(struct kvm_vcpu, arch.dawr));
512 DEFINE(VCPU_DAWRX, offsetof(struct kvm_vcpu, arch.dawrx));
513 DEFINE(VCPU_CIABR, offsetof(struct kvm_vcpu, arch.ciabr));
510 DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); 514 DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
511 DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec)); 515 DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec));
512 DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires)); 516 DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires));
@@ -515,8 +519,10 @@ int main(void)
515 DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded)); 519 DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
516 DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr)); 520 DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
517 DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc)); 521 DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
522 DEFINE(VCPU_SPMC, offsetof(struct kvm_vcpu, arch.spmc));
518 DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar)); 523 DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar));
519 DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar)); 524 DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar));
525 DEFINE(VCPU_SIER, offsetof(struct kvm_vcpu, arch.sier));
520 DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb)); 526 DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
521 DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max)); 527 DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
522 DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr)); 528 DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
@@ -524,20 +530,47 @@ int main(void)
524 DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar)); 530 DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar));
525 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); 531 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
526 DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); 532 DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
527 DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid));
528 DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar)); 533 DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar));
529 DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr)); 534 DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr));
535 DEFINE(VCPU_FSCR, offsetof(struct kvm_vcpu, arch.fscr));
536 DEFINE(VCPU_PSPB, offsetof(struct kvm_vcpu, arch.pspb));
537 DEFINE(VCPU_EBBHR, offsetof(struct kvm_vcpu, arch.ebbhr));
538 DEFINE(VCPU_EBBRR, offsetof(struct kvm_vcpu, arch.ebbrr));
539 DEFINE(VCPU_BESCR, offsetof(struct kvm_vcpu, arch.bescr));
540 DEFINE(VCPU_CSIGR, offsetof(struct kvm_vcpu, arch.csigr));
541 DEFINE(VCPU_TACR, offsetof(struct kvm_vcpu, arch.tacr));
542 DEFINE(VCPU_TCSCR, offsetof(struct kvm_vcpu, arch.tcscr));
543 DEFINE(VCPU_ACOP, offsetof(struct kvm_vcpu, arch.acop));
544 DEFINE(VCPU_WORT, offsetof(struct kvm_vcpu, arch.wort));
530 DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1)); 545 DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
531 DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count)); 546 DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
532 DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count)); 547 DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
533 DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest)); 548 DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
534 DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads)); 549 DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads));
550 DEFINE(VCORE_KVM, offsetof(struct kvmppc_vcore, kvm));
535 DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset)); 551 DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset));
536 DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr)); 552 DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr));
537 DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr)); 553 DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr));
554 DEFINE(VCORE_DPDES, offsetof(struct kvmppc_vcore, dpdes));
538 DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige)); 555 DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
539 DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv)); 556 DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv));
540 DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb)); 557 DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
558#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
559 DEFINE(VCPU_TFHAR, offsetof(struct kvm_vcpu, arch.tfhar));
560 DEFINE(VCPU_TFIAR, offsetof(struct kvm_vcpu, arch.tfiar));
561 DEFINE(VCPU_TEXASR, offsetof(struct kvm_vcpu, arch.texasr));
562 DEFINE(VCPU_GPR_TM, offsetof(struct kvm_vcpu, arch.gpr_tm));
563 DEFINE(VCPU_FPRS_TM, offsetof(struct kvm_vcpu, arch.fp_tm.fpr));
564 DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr));
565 DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm));
566 DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm));
567 DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm));
568 DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm));
569 DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm));
570 DEFINE(VCPU_PPR_TM, offsetof(struct kvm_vcpu, arch.ppr_tm));
571 DEFINE(VCPU_DSCR_TM, offsetof(struct kvm_vcpu, arch.dscr_tm));
572 DEFINE(VCPU_TAR_TM, offsetof(struct kvm_vcpu, arch.tar_tm));
573#endif
541 574
542#ifdef CONFIG_PPC_BOOK3S_64 575#ifdef CONFIG_PPC_BOOK3S_64
543#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 576#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
@@ -602,6 +635,7 @@ int main(void)
602 HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys); 635 HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
603 HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr); 636 HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr);
604 HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi); 637 HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi);
638 HSTATE_FIELD(HSTATE_PTID, ptid);
605 HSTATE_FIELD(HSTATE_MMCR, host_mmcr); 639 HSTATE_FIELD(HSTATE_MMCR, host_mmcr);
606 HSTATE_FIELD(HSTATE_PMC, host_pmc); 640 HSTATE_FIELD(HSTATE_PMC, host_pmc);
607 HSTATE_FIELD(HSTATE_PURR, host_purr); 641 HSTATE_FIELD(HSTATE_PURR, host_purr);
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index db28032e320e..6a0175297b0d 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -413,13 +413,13 @@ static void kvm_map_magic_page(void *data)
413{ 413{
414 u32 *features = data; 414 u32 *features = data;
415 415
416 ulong in[8]; 416 ulong in[8] = {0};
417 ulong out[8]; 417 ulong out[8];
418 418
419 in[0] = KVM_MAGIC_PAGE; 419 in[0] = KVM_MAGIC_PAGE;
420 in[1] = KVM_MAGIC_PAGE; 420 in[1] = KVM_MAGIC_PAGE;
421 421
422 kvm_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE)); 422 epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
423 423
424 *features = out[0]; 424 *features = out[0];
425} 425}
@@ -711,43 +711,6 @@ static void kvm_use_magic_page(void)
711 kvm_patching_worked ? "worked" : "failed"); 711 kvm_patching_worked ? "worked" : "failed");
712} 712}
713 713
714unsigned long kvm_hypercall(unsigned long *in,
715 unsigned long *out,
716 unsigned long nr)
717{
718 unsigned long register r0 asm("r0");
719 unsigned long register r3 asm("r3") = in[0];
720 unsigned long register r4 asm("r4") = in[1];
721 unsigned long register r5 asm("r5") = in[2];
722 unsigned long register r6 asm("r6") = in[3];
723 unsigned long register r7 asm("r7") = in[4];
724 unsigned long register r8 asm("r8") = in[5];
725 unsigned long register r9 asm("r9") = in[6];
726 unsigned long register r10 asm("r10") = in[7];
727 unsigned long register r11 asm("r11") = nr;
728 unsigned long register r12 asm("r12");
729
730 asm volatile("bl epapr_hypercall_start"
731 : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6),
732 "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11),
733 "=r"(r12)
734 : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8),
735 "r"(r9), "r"(r10), "r"(r11)
736 : "memory", "cc", "xer", "ctr", "lr");
737
738 out[0] = r4;
739 out[1] = r5;
740 out[2] = r6;
741 out[3] = r7;
742 out[4] = r8;
743 out[5] = r9;
744 out[6] = r10;
745 out[7] = r11;
746
747 return r3;
748}
749EXPORT_SYMBOL_GPL(kvm_hypercall);
750
751static __init void kvm_free_tmp(void) 714static __init void kvm_free_tmp(void)
752{ 715{
753 free_reserved_area(&kvm_tmp[kvm_tmp_index], 716 free_reserved_area(&kvm_tmp[kvm_tmp_index],
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
index 93221e87b911..9cb4b0a36031 100644
--- a/arch/powerpc/kvm/44x.c
+++ b/arch/powerpc/kvm/44x.c
@@ -21,6 +21,8 @@
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/err.h> 22#include <linux/err.h>
23#include <linux/export.h> 23#include <linux/export.h>
24#include <linux/module.h>
25#include <linux/miscdevice.h>
24 26
25#include <asm/reg.h> 27#include <asm/reg.h>
26#include <asm/cputable.h> 28#include <asm/cputable.h>
@@ -231,3 +233,5 @@ static void __exit kvmppc_44x_exit(void)
231 233
232module_init(kvmppc_44x_init); 234module_init(kvmppc_44x_init);
233module_exit(kvmppc_44x_exit); 235module_exit(kvmppc_44x_exit);
236MODULE_ALIAS_MISCDEV(KVM_MINOR);
237MODULE_ALIAS("devname:kvm");
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 8912608b7e1b..94e597e6f15c 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -18,6 +18,8 @@
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/export.h> 19#include <linux/export.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/module.h>
22#include <linux/miscdevice.h>
21 23
22#include <asm/reg.h> 24#include <asm/reg.h>
23#include <asm/cputable.h> 25#include <asm/cputable.h>
@@ -575,10 +577,10 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
575 break; 577 break;
576 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: 578 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
577 i = reg->id - KVM_REG_PPC_FPR0; 579 i = reg->id - KVM_REG_PPC_FPR0;
578 val = get_reg_val(reg->id, vcpu->arch.fpr[i]); 580 val = get_reg_val(reg->id, VCPU_FPR(vcpu, i));
579 break; 581 break;
580 case KVM_REG_PPC_FPSCR: 582 case KVM_REG_PPC_FPSCR:
581 val = get_reg_val(reg->id, vcpu->arch.fpscr); 583 val = get_reg_val(reg->id, vcpu->arch.fp.fpscr);
582 break; 584 break;
583#ifdef CONFIG_ALTIVEC 585#ifdef CONFIG_ALTIVEC
584 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: 586 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
@@ -586,19 +588,30 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
586 r = -ENXIO; 588 r = -ENXIO;
587 break; 589 break;
588 } 590 }
589 val.vval = vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0]; 591 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
590 break; 592 break;
591 case KVM_REG_PPC_VSCR: 593 case KVM_REG_PPC_VSCR:
592 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 594 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
593 r = -ENXIO; 595 r = -ENXIO;
594 break; 596 break;
595 } 597 }
596 val = get_reg_val(reg->id, vcpu->arch.vscr.u[3]); 598 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
597 break; 599 break;
598 case KVM_REG_PPC_VRSAVE: 600 case KVM_REG_PPC_VRSAVE:
599 val = get_reg_val(reg->id, vcpu->arch.vrsave); 601 val = get_reg_val(reg->id, vcpu->arch.vrsave);
600 break; 602 break;
601#endif /* CONFIG_ALTIVEC */ 603#endif /* CONFIG_ALTIVEC */
604#ifdef CONFIG_VSX
605 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
606 if (cpu_has_feature(CPU_FTR_VSX)) {
607 long int i = reg->id - KVM_REG_PPC_VSR0;
608 val.vsxval[0] = vcpu->arch.fp.fpr[i][0];
609 val.vsxval[1] = vcpu->arch.fp.fpr[i][1];
610 } else {
611 r = -ENXIO;
612 }
613 break;
614#endif /* CONFIG_VSX */
602 case KVM_REG_PPC_DEBUG_INST: { 615 case KVM_REG_PPC_DEBUG_INST: {
603 u32 opcode = INS_TW; 616 u32 opcode = INS_TW;
604 r = copy_to_user((u32 __user *)(long)reg->addr, 617 r = copy_to_user((u32 __user *)(long)reg->addr,
@@ -654,10 +667,10 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
654 break; 667 break;
655 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: 668 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
656 i = reg->id - KVM_REG_PPC_FPR0; 669 i = reg->id - KVM_REG_PPC_FPR0;
657 vcpu->arch.fpr[i] = set_reg_val(reg->id, val); 670 VCPU_FPR(vcpu, i) = set_reg_val(reg->id, val);
658 break; 671 break;
659 case KVM_REG_PPC_FPSCR: 672 case KVM_REG_PPC_FPSCR:
660 vcpu->arch.fpscr = set_reg_val(reg->id, val); 673 vcpu->arch.fp.fpscr = set_reg_val(reg->id, val);
661 break; 674 break;
662#ifdef CONFIG_ALTIVEC 675#ifdef CONFIG_ALTIVEC
663 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: 676 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
@@ -665,14 +678,14 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
665 r = -ENXIO; 678 r = -ENXIO;
666 break; 679 break;
667 } 680 }
668 vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; 681 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
669 break; 682 break;
670 case KVM_REG_PPC_VSCR: 683 case KVM_REG_PPC_VSCR:
671 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 684 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
672 r = -ENXIO; 685 r = -ENXIO;
673 break; 686 break;
674 } 687 }
675 vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val); 688 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
676 break; 689 break;
677 case KVM_REG_PPC_VRSAVE: 690 case KVM_REG_PPC_VRSAVE:
678 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { 691 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
@@ -682,6 +695,17 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
682 vcpu->arch.vrsave = set_reg_val(reg->id, val); 695 vcpu->arch.vrsave = set_reg_val(reg->id, val);
683 break; 696 break;
684#endif /* CONFIG_ALTIVEC */ 697#endif /* CONFIG_ALTIVEC */
698#ifdef CONFIG_VSX
699 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
700 if (cpu_has_feature(CPU_FTR_VSX)) {
701 long int i = reg->id - KVM_REG_PPC_VSR0;
702 vcpu->arch.fp.fpr[i][0] = val.vsxval[0];
703 vcpu->arch.fp.fpr[i][1] = val.vsxval[1];
704 } else {
705 r = -ENXIO;
706 }
707 break;
708#endif /* CONFIG_VSX */
685#ifdef CONFIG_KVM_XICS 709#ifdef CONFIG_KVM_XICS
686 case KVM_REG_PPC_ICP_STATE: 710 case KVM_REG_PPC_ICP_STATE:
687 if (!vcpu->arch.icp) { 711 if (!vcpu->arch.icp) {
@@ -879,3 +903,9 @@ static void kvmppc_book3s_exit(void)
879 903
880module_init(kvmppc_book3s_init); 904module_init(kvmppc_book3s_init);
881module_exit(kvmppc_book3s_exit); 905module_exit(kvmppc_book3s_exit);
906
907/* On 32bit this is our one and only kernel module */
908#ifdef CONFIG_KVM_BOOK3S_32
909MODULE_ALIAS_MISCDEV(KVM_MINOR);
910MODULE_ALIAS("devname:kvm");
911#endif
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index 3a0abd2e5a15..5fac89dfe4cd 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -243,6 +243,11 @@ next_pteg:
243 /* Now tell our Shadow PTE code about the new page */ 243 /* Now tell our Shadow PTE code about the new page */
244 244
245 pte = kvmppc_mmu_hpte_cache_next(vcpu); 245 pte = kvmppc_mmu_hpte_cache_next(vcpu);
246 if (!pte) {
247 kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
248 r = -EAGAIN;
249 goto out;
250 }
246 251
247 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n", 252 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
248 orig_pte->may_write ? 'w' : '-', 253 orig_pte->may_write ? 'w' : '-',
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index c5d148434c08..303ece75b8e4 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -262,7 +262,7 @@ int kvmppc_mmu_hv_init(void)
262 262
263static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) 263static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
264{ 264{
265 kvmppc_set_msr(vcpu, MSR_SF | MSR_ME); 265 kvmppc_set_msr(vcpu, vcpu->arch.intr_msr);
266} 266}
267 267
268/* 268/*
@@ -562,7 +562,7 @@ static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
562 * we just return and retry the instruction. 562 * we just return and retry the instruction.
563 */ 563 */
564 564
565 if (instruction_is_store(vcpu->arch.last_inst) != !!is_store) 565 if (instruction_is_store(kvmppc_get_last_inst(vcpu)) != !!is_store)
566 return RESUME_GUEST; 566 return RESUME_GUEST;
567 567
568 /* 568 /*
diff --git a/arch/powerpc/kvm/book3s_exports.c b/arch/powerpc/kvm/book3s_exports.c
index 852989a9bad3..20d4ea8e656d 100644
--- a/arch/powerpc/kvm/book3s_exports.c
+++ b/arch/powerpc/kvm/book3s_exports.c
@@ -25,9 +25,5 @@ EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline);
25#endif 25#endif
26#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 26#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
27EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline); 27EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline);
28EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu);
29#ifdef CONFIG_ALTIVEC
30EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec);
31#endif
32#endif 28#endif
33 29
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 3818bd95327c..17fc9496b6ac 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -31,6 +31,7 @@
31#include <linux/spinlock.h> 31#include <linux/spinlock.h>
32#include <linux/page-flags.h> 32#include <linux/page-flags.h>
33#include <linux/srcu.h> 33#include <linux/srcu.h>
34#include <linux/miscdevice.h>
34 35
35#include <asm/reg.h> 36#include <asm/reg.h>
36#include <asm/cputable.h> 37#include <asm/cputable.h>
@@ -85,10 +86,13 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
85 86
86 /* CPU points to the first thread of the core */ 87 /* CPU points to the first thread of the core */
87 if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) { 88 if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) {
89#ifdef CONFIG_KVM_XICS
88 int real_cpu = cpu + vcpu->arch.ptid; 90 int real_cpu = cpu + vcpu->arch.ptid;
89 if (paca[real_cpu].kvm_hstate.xics_phys) 91 if (paca[real_cpu].kvm_hstate.xics_phys)
90 xics_wake_cpu(real_cpu); 92 xics_wake_cpu(real_cpu);
91 else if (cpu_online(cpu)) 93 else
94#endif
95 if (cpu_online(cpu))
92 smp_send_reschedule(cpu); 96 smp_send_reschedule(cpu);
93 } 97 }
94 put_cpu(); 98 put_cpu();
@@ -182,14 +186,28 @@ int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
182 186
183 switch (arch_compat) { 187 switch (arch_compat) {
184 case PVR_ARCH_205: 188 case PVR_ARCH_205:
185 pcr = PCR_ARCH_205; 189 /*
190 * If an arch bit is set in PCR, all the defined
191 * higher-order arch bits also have to be set.
192 */
193 pcr = PCR_ARCH_206 | PCR_ARCH_205;
186 break; 194 break;
187 case PVR_ARCH_206: 195 case PVR_ARCH_206:
188 case PVR_ARCH_206p: 196 case PVR_ARCH_206p:
197 pcr = PCR_ARCH_206;
198 break;
199 case PVR_ARCH_207:
189 break; 200 break;
190 default: 201 default:
191 return -EINVAL; 202 return -EINVAL;
192 } 203 }
204
205 if (!cpu_has_feature(CPU_FTR_ARCH_207S)) {
206 /* POWER7 can't emulate POWER8 */
207 if (!(pcr & PCR_ARCH_206))
208 return -EINVAL;
209 pcr &= ~PCR_ARCH_206;
210 }
193 } 211 }
194 212
195 spin_lock(&vc->lock); 213 spin_lock(&vc->lock);
@@ -637,6 +655,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
637 r = RESUME_GUEST; 655 r = RESUME_GUEST;
638 break; 656 break;
639 case BOOK3S_INTERRUPT_EXTERNAL: 657 case BOOK3S_INTERRUPT_EXTERNAL:
658 case BOOK3S_INTERRUPT_H_DOORBELL:
640 vcpu->stat.ext_intr_exits++; 659 vcpu->stat.ext_intr_exits++;
641 r = RESUME_GUEST; 660 r = RESUME_GUEST;
642 break; 661 break;
@@ -673,12 +692,10 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
673 /* hcall - punt to userspace */ 692 /* hcall - punt to userspace */
674 int i; 693 int i;
675 694
676 if (vcpu->arch.shregs.msr & MSR_PR) { 695 /* hypercall with MSR_PR has already been handled in rmode,
677 /* sc 1 from userspace - reflect to guest syscall */ 696 * and never reaches here.
678 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_SYSCALL); 697 */
679 r = RESUME_GUEST; 698
680 break;
681 }
682 run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3); 699 run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
683 for (i = 0; i < 9; ++i) 700 for (i = 0; i < 9; ++i)
684 run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i); 701 run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
@@ -708,7 +725,16 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
708 * we don't emulate any guest instructions at this stage. 725 * we don't emulate any guest instructions at this stage.
709 */ 726 */
710 case BOOK3S_INTERRUPT_H_EMUL_ASSIST: 727 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
711 kvmppc_core_queue_program(vcpu, 0x80000); 728 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
729 r = RESUME_GUEST;
730 break;
731 /*
732 * This occurs if the guest (kernel or userspace), does something that
733 * is prohibited by HFSCR. We just generate a program interrupt to
734 * the guest.
735 */
736 case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
737 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
712 r = RESUME_GUEST; 738 r = RESUME_GUEST;
713 break; 739 break;
714 default: 740 default:
@@ -766,10 +792,34 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr)
766 792
767 spin_lock(&vc->lock); 793 spin_lock(&vc->lock);
768 /* 794 /*
795 * If ILE (interrupt little-endian) has changed, update the
796 * MSR_LE bit in the intr_msr for each vcpu in this vcore.
797 */
798 if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
799 struct kvm *kvm = vcpu->kvm;
800 struct kvm_vcpu *vcpu;
801 int i;
802
803 mutex_lock(&kvm->lock);
804 kvm_for_each_vcpu(i, vcpu, kvm) {
805 if (vcpu->arch.vcore != vc)
806 continue;
807 if (new_lpcr & LPCR_ILE)
808 vcpu->arch.intr_msr |= MSR_LE;
809 else
810 vcpu->arch.intr_msr &= ~MSR_LE;
811 }
812 mutex_unlock(&kvm->lock);
813 }
814
815 /*
769 * Userspace can only modify DPFD (default prefetch depth), 816 * Userspace can only modify DPFD (default prefetch depth),
770 * ILE (interrupt little-endian) and TC (translation control). 817 * ILE (interrupt little-endian) and TC (translation control).
818 * On POWER8 userspace can also modify AIL (alt. interrupt loc.)
771 */ 819 */
772 mask = LPCR_DPFD | LPCR_ILE | LPCR_TC; 820 mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
821 if (cpu_has_feature(CPU_FTR_ARCH_207S))
822 mask |= LPCR_AIL;
773 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); 823 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
774 spin_unlock(&vc->lock); 824 spin_unlock(&vc->lock);
775} 825}
@@ -787,6 +837,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
787 case KVM_REG_PPC_DABR: 837 case KVM_REG_PPC_DABR:
788 *val = get_reg_val(id, vcpu->arch.dabr); 838 *val = get_reg_val(id, vcpu->arch.dabr);
789 break; 839 break;
840 case KVM_REG_PPC_DABRX:
841 *val = get_reg_val(id, vcpu->arch.dabrx);
842 break;
790 case KVM_REG_PPC_DSCR: 843 case KVM_REG_PPC_DSCR:
791 *val = get_reg_val(id, vcpu->arch.dscr); 844 *val = get_reg_val(id, vcpu->arch.dscr);
792 break; 845 break;
@@ -802,7 +855,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
802 case KVM_REG_PPC_UAMOR: 855 case KVM_REG_PPC_UAMOR:
803 *val = get_reg_val(id, vcpu->arch.uamor); 856 *val = get_reg_val(id, vcpu->arch.uamor);
804 break; 857 break;
805 case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA: 858 case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
806 i = id - KVM_REG_PPC_MMCR0; 859 i = id - KVM_REG_PPC_MMCR0;
807 *val = get_reg_val(id, vcpu->arch.mmcr[i]); 860 *val = get_reg_val(id, vcpu->arch.mmcr[i]);
808 break; 861 break;
@@ -810,33 +863,87 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
810 i = id - KVM_REG_PPC_PMC1; 863 i = id - KVM_REG_PPC_PMC1;
811 *val = get_reg_val(id, vcpu->arch.pmc[i]); 864 *val = get_reg_val(id, vcpu->arch.pmc[i]);
812 break; 865 break;
866 case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
867 i = id - KVM_REG_PPC_SPMC1;
868 *val = get_reg_val(id, vcpu->arch.spmc[i]);
869 break;
813 case KVM_REG_PPC_SIAR: 870 case KVM_REG_PPC_SIAR:
814 *val = get_reg_val(id, vcpu->arch.siar); 871 *val = get_reg_val(id, vcpu->arch.siar);
815 break; 872 break;
816 case KVM_REG_PPC_SDAR: 873 case KVM_REG_PPC_SDAR:
817 *val = get_reg_val(id, vcpu->arch.sdar); 874 *val = get_reg_val(id, vcpu->arch.sdar);
818 break; 875 break;
819#ifdef CONFIG_VSX 876 case KVM_REG_PPC_SIER:
820 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: 877 *val = get_reg_val(id, vcpu->arch.sier);
821 if (cpu_has_feature(CPU_FTR_VSX)) {
822 /* VSX => FP reg i is stored in arch.vsr[2*i] */
823 long int i = id - KVM_REG_PPC_FPR0;
824 *val = get_reg_val(id, vcpu->arch.vsr[2 * i]);
825 } else {
826 /* let generic code handle it */
827 r = -EINVAL;
828 }
829 break; 878 break;
830 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: 879 case KVM_REG_PPC_IAMR:
831 if (cpu_has_feature(CPU_FTR_VSX)) { 880 *val = get_reg_val(id, vcpu->arch.iamr);
832 long int i = id - KVM_REG_PPC_VSR0; 881 break;
833 val->vsxval[0] = vcpu->arch.vsr[2 * i]; 882#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
834 val->vsxval[1] = vcpu->arch.vsr[2 * i + 1]; 883 case KVM_REG_PPC_TFHAR:
835 } else { 884 *val = get_reg_val(id, vcpu->arch.tfhar);
836 r = -ENXIO; 885 break;
837 } 886 case KVM_REG_PPC_TFIAR:
887 *val = get_reg_val(id, vcpu->arch.tfiar);
888 break;
889 case KVM_REG_PPC_TEXASR:
890 *val = get_reg_val(id, vcpu->arch.texasr);
891 break;
892#endif
893 case KVM_REG_PPC_FSCR:
894 *val = get_reg_val(id, vcpu->arch.fscr);
895 break;
896 case KVM_REG_PPC_PSPB:
897 *val = get_reg_val(id, vcpu->arch.pspb);
898 break;
899 case KVM_REG_PPC_EBBHR:
900 *val = get_reg_val(id, vcpu->arch.ebbhr);
901 break;
902 case KVM_REG_PPC_EBBRR:
903 *val = get_reg_val(id, vcpu->arch.ebbrr);
904 break;
905 case KVM_REG_PPC_BESCR:
906 *val = get_reg_val(id, vcpu->arch.bescr);
907 break;
908 case KVM_REG_PPC_TAR:
909 *val = get_reg_val(id, vcpu->arch.tar);
910 break;
911 case KVM_REG_PPC_DPDES:
912 *val = get_reg_val(id, vcpu->arch.vcore->dpdes);
913 break;
914 case KVM_REG_PPC_DAWR:
915 *val = get_reg_val(id, vcpu->arch.dawr);
916 break;
917 case KVM_REG_PPC_DAWRX:
918 *val = get_reg_val(id, vcpu->arch.dawrx);
919 break;
920 case KVM_REG_PPC_CIABR:
921 *val = get_reg_val(id, vcpu->arch.ciabr);
922 break;
923 case KVM_REG_PPC_IC:
924 *val = get_reg_val(id, vcpu->arch.ic);
925 break;
926 case KVM_REG_PPC_VTB:
927 *val = get_reg_val(id, vcpu->arch.vtb);
928 break;
929 case KVM_REG_PPC_CSIGR:
930 *val = get_reg_val(id, vcpu->arch.csigr);
931 break;
932 case KVM_REG_PPC_TACR:
933 *val = get_reg_val(id, vcpu->arch.tacr);
934 break;
935 case KVM_REG_PPC_TCSCR:
936 *val = get_reg_val(id, vcpu->arch.tcscr);
937 break;
938 case KVM_REG_PPC_PID:
939 *val = get_reg_val(id, vcpu->arch.pid);
940 break;
941 case KVM_REG_PPC_ACOP:
942 *val = get_reg_val(id, vcpu->arch.acop);
943 break;
944 case KVM_REG_PPC_WORT:
945 *val = get_reg_val(id, vcpu->arch.wort);
838 break; 946 break;
839#endif /* CONFIG_VSX */
840 case KVM_REG_PPC_VPA_ADDR: 947 case KVM_REG_PPC_VPA_ADDR:
841 spin_lock(&vcpu->arch.vpa_update_lock); 948 spin_lock(&vcpu->arch.vpa_update_lock);
842 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); 949 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
@@ -890,6 +997,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
890 case KVM_REG_PPC_DABR: 997 case KVM_REG_PPC_DABR:
891 vcpu->arch.dabr = set_reg_val(id, *val); 998 vcpu->arch.dabr = set_reg_val(id, *val);
892 break; 999 break;
1000 case KVM_REG_PPC_DABRX:
1001 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP;
1002 break;
893 case KVM_REG_PPC_DSCR: 1003 case KVM_REG_PPC_DSCR:
894 vcpu->arch.dscr = set_reg_val(id, *val); 1004 vcpu->arch.dscr = set_reg_val(id, *val);
895 break; 1005 break;
@@ -905,7 +1015,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
905 case KVM_REG_PPC_UAMOR: 1015 case KVM_REG_PPC_UAMOR:
906 vcpu->arch.uamor = set_reg_val(id, *val); 1016 vcpu->arch.uamor = set_reg_val(id, *val);
907 break; 1017 break;
908 case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA: 1018 case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
909 i = id - KVM_REG_PPC_MMCR0; 1019 i = id - KVM_REG_PPC_MMCR0;
910 vcpu->arch.mmcr[i] = set_reg_val(id, *val); 1020 vcpu->arch.mmcr[i] = set_reg_val(id, *val);
911 break; 1021 break;
@@ -913,33 +1023,90 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
913 i = id - KVM_REG_PPC_PMC1; 1023 i = id - KVM_REG_PPC_PMC1;
914 vcpu->arch.pmc[i] = set_reg_val(id, *val); 1024 vcpu->arch.pmc[i] = set_reg_val(id, *val);
915 break; 1025 break;
1026 case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
1027 i = id - KVM_REG_PPC_SPMC1;
1028 vcpu->arch.spmc[i] = set_reg_val(id, *val);
1029 break;
916 case KVM_REG_PPC_SIAR: 1030 case KVM_REG_PPC_SIAR:
917 vcpu->arch.siar = set_reg_val(id, *val); 1031 vcpu->arch.siar = set_reg_val(id, *val);
918 break; 1032 break;
919 case KVM_REG_PPC_SDAR: 1033 case KVM_REG_PPC_SDAR:
920 vcpu->arch.sdar = set_reg_val(id, *val); 1034 vcpu->arch.sdar = set_reg_val(id, *val);
921 break; 1035 break;
922#ifdef CONFIG_VSX 1036 case KVM_REG_PPC_SIER:
923 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: 1037 vcpu->arch.sier = set_reg_val(id, *val);
924 if (cpu_has_feature(CPU_FTR_VSX)) {
925 /* VSX => FP reg i is stored in arch.vsr[2*i] */
926 long int i = id - KVM_REG_PPC_FPR0;
927 vcpu->arch.vsr[2 * i] = set_reg_val(id, *val);
928 } else {
929 /* let generic code handle it */
930 r = -EINVAL;
931 }
932 break; 1038 break;
933 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: 1039 case KVM_REG_PPC_IAMR:
934 if (cpu_has_feature(CPU_FTR_VSX)) { 1040 vcpu->arch.iamr = set_reg_val(id, *val);
935 long int i = id - KVM_REG_PPC_VSR0; 1041 break;
936 vcpu->arch.vsr[2 * i] = val->vsxval[0]; 1042#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
937 vcpu->arch.vsr[2 * i + 1] = val->vsxval[1]; 1043 case KVM_REG_PPC_TFHAR:
938 } else { 1044 vcpu->arch.tfhar = set_reg_val(id, *val);
939 r = -ENXIO; 1045 break;
940 } 1046 case KVM_REG_PPC_TFIAR:
1047 vcpu->arch.tfiar = set_reg_val(id, *val);
1048 break;
1049 case KVM_REG_PPC_TEXASR:
1050 vcpu->arch.texasr = set_reg_val(id, *val);
1051 break;
1052#endif
1053 case KVM_REG_PPC_FSCR:
1054 vcpu->arch.fscr = set_reg_val(id, *val);
1055 break;
1056 case KVM_REG_PPC_PSPB:
1057 vcpu->arch.pspb = set_reg_val(id, *val);
1058 break;
1059 case KVM_REG_PPC_EBBHR:
1060 vcpu->arch.ebbhr = set_reg_val(id, *val);
1061 break;
1062 case KVM_REG_PPC_EBBRR:
1063 vcpu->arch.ebbrr = set_reg_val(id, *val);
1064 break;
1065 case KVM_REG_PPC_BESCR:
1066 vcpu->arch.bescr = set_reg_val(id, *val);
1067 break;
1068 case KVM_REG_PPC_TAR:
1069 vcpu->arch.tar = set_reg_val(id, *val);
1070 break;
1071 case KVM_REG_PPC_DPDES:
1072 vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
1073 break;
1074 case KVM_REG_PPC_DAWR:
1075 vcpu->arch.dawr = set_reg_val(id, *val);
1076 break;
1077 case KVM_REG_PPC_DAWRX:
1078 vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP;
1079 break;
1080 case KVM_REG_PPC_CIABR:
1081 vcpu->arch.ciabr = set_reg_val(id, *val);
1082 /* Don't allow setting breakpoints in hypervisor code */
1083 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
1084 vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */
1085 break;
1086 case KVM_REG_PPC_IC:
1087 vcpu->arch.ic = set_reg_val(id, *val);
1088 break;
1089 case KVM_REG_PPC_VTB:
1090 vcpu->arch.vtb = set_reg_val(id, *val);
1091 break;
1092 case KVM_REG_PPC_CSIGR:
1093 vcpu->arch.csigr = set_reg_val(id, *val);
1094 break;
1095 case KVM_REG_PPC_TACR:
1096 vcpu->arch.tacr = set_reg_val(id, *val);
1097 break;
1098 case KVM_REG_PPC_TCSCR:
1099 vcpu->arch.tcscr = set_reg_val(id, *val);
1100 break;
1101 case KVM_REG_PPC_PID:
1102 vcpu->arch.pid = set_reg_val(id, *val);
1103 break;
1104 case KVM_REG_PPC_ACOP:
1105 vcpu->arch.acop = set_reg_val(id, *val);
1106 break;
1107 case KVM_REG_PPC_WORT:
1108 vcpu->arch.wort = set_reg_val(id, *val);
941 break; 1109 break;
942#endif /* CONFIG_VSX */
943 case KVM_REG_PPC_VPA_ADDR: 1110 case KVM_REG_PPC_VPA_ADDR:
944 addr = set_reg_val(id, *val); 1111 addr = set_reg_val(id, *val);
945 r = -EINVAL; 1112 r = -EINVAL;
@@ -1017,6 +1184,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
1017 spin_lock_init(&vcpu->arch.vpa_update_lock); 1184 spin_lock_init(&vcpu->arch.vpa_update_lock);
1018 spin_lock_init(&vcpu->arch.tbacct_lock); 1185 spin_lock_init(&vcpu->arch.tbacct_lock);
1019 vcpu->arch.busy_preempt = TB_NIL; 1186 vcpu->arch.busy_preempt = TB_NIL;
1187 vcpu->arch.intr_msr = MSR_SF | MSR_ME;
1020 1188
1021 kvmppc_mmu_book3s_hv_init(vcpu); 1189 kvmppc_mmu_book3s_hv_init(vcpu);
1022 1190
@@ -1034,6 +1202,8 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
1034 init_waitqueue_head(&vcore->wq); 1202 init_waitqueue_head(&vcore->wq);
1035 vcore->preempt_tb = TB_NIL; 1203 vcore->preempt_tb = TB_NIL;
1036 vcore->lpcr = kvm->arch.lpcr; 1204 vcore->lpcr = kvm->arch.lpcr;
1205 vcore->first_vcpuid = core * threads_per_core;
1206 vcore->kvm = kvm;
1037 } 1207 }
1038 kvm->arch.vcores[core] = vcore; 1208 kvm->arch.vcores[core] = vcore;
1039 kvm->arch.online_vcores++; 1209 kvm->arch.online_vcores++;
@@ -1047,6 +1217,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
1047 ++vcore->num_threads; 1217 ++vcore->num_threads;
1048 spin_unlock(&vcore->lock); 1218 spin_unlock(&vcore->lock);
1049 vcpu->arch.vcore = vcore; 1219 vcpu->arch.vcore = vcore;
1220 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid;
1050 1221
1051 vcpu->arch.cpu_type = KVM_CPU_3S_64; 1222 vcpu->arch.cpu_type = KVM_CPU_3S_64;
1052 kvmppc_sanity_check(vcpu); 1223 kvmppc_sanity_check(vcpu);
@@ -1110,7 +1281,7 @@ static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
1110 } 1281 }
1111} 1282}
1112 1283
1113extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); 1284extern void __kvmppc_vcore_entry(void);
1114 1285
1115static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, 1286static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
1116 struct kvm_vcpu *vcpu) 1287 struct kvm_vcpu *vcpu)
@@ -1184,13 +1355,16 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
1184 tpaca = &paca[cpu]; 1355 tpaca = &paca[cpu];
1185 tpaca->kvm_hstate.kvm_vcpu = vcpu; 1356 tpaca->kvm_hstate.kvm_vcpu = vcpu;
1186 tpaca->kvm_hstate.kvm_vcore = vc; 1357 tpaca->kvm_hstate.kvm_vcore = vc;
1187 tpaca->kvm_hstate.napping = 0; 1358 tpaca->kvm_hstate.ptid = vcpu->arch.ptid;
1188 vcpu->cpu = vc->pcpu; 1359 vcpu->cpu = vc->pcpu;
1189 smp_wmb(); 1360 smp_wmb();
1190#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) 1361#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
1191 if (vcpu->arch.ptid) { 1362 if (cpu != smp_processor_id()) {
1363#ifdef CONFIG_KVM_XICS
1192 xics_wake_cpu(cpu); 1364 xics_wake_cpu(cpu);
1193 ++vc->n_woken; 1365#endif
1366 if (vcpu->arch.ptid)
1367 ++vc->n_woken;
1194 } 1368 }
1195#endif 1369#endif
1196} 1370}
@@ -1247,10 +1421,10 @@ static int on_primary_thread(void)
1247 */ 1421 */
1248static void kvmppc_run_core(struct kvmppc_vcore *vc) 1422static void kvmppc_run_core(struct kvmppc_vcore *vc)
1249{ 1423{
1250 struct kvm_vcpu *vcpu, *vcpu0, *vnext; 1424 struct kvm_vcpu *vcpu, *vnext;
1251 long ret; 1425 long ret;
1252 u64 now; 1426 u64 now;
1253 int ptid, i, need_vpa_update; 1427 int i, need_vpa_update;
1254 int srcu_idx; 1428 int srcu_idx;
1255 struct kvm_vcpu *vcpus_to_update[threads_per_core]; 1429 struct kvm_vcpu *vcpus_to_update[threads_per_core];
1256 1430
@@ -1288,25 +1462,6 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
1288 } 1462 }
1289 1463
1290 /* 1464 /*
1291 * Assign physical thread IDs, first to non-ceded vcpus
1292 * and then to ceded ones.
1293 */
1294 ptid = 0;
1295 vcpu0 = NULL;
1296 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
1297 if (!vcpu->arch.ceded) {
1298 if (!ptid)
1299 vcpu0 = vcpu;
1300 vcpu->arch.ptid = ptid++;
1301 }
1302 }
1303 if (!vcpu0)
1304 goto out; /* nothing to run; should never happen */
1305 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
1306 if (vcpu->arch.ceded)
1307 vcpu->arch.ptid = ptid++;
1308
1309 /*
1310 * Make sure we are running on thread 0, and that 1465 * Make sure we are running on thread 0, and that
1311 * secondary threads are offline. 1466 * secondary threads are offline.
1312 */ 1467 */
@@ -1322,15 +1477,19 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
1322 kvmppc_create_dtl_entry(vcpu, vc); 1477 kvmppc_create_dtl_entry(vcpu, vc);
1323 } 1478 }
1324 1479
1480 /* Set this explicitly in case thread 0 doesn't have a vcpu */
1481 get_paca()->kvm_hstate.kvm_vcore = vc;
1482 get_paca()->kvm_hstate.ptid = 0;
1483
1325 vc->vcore_state = VCORE_RUNNING; 1484 vc->vcore_state = VCORE_RUNNING;
1326 preempt_disable(); 1485 preempt_disable();
1327 spin_unlock(&vc->lock); 1486 spin_unlock(&vc->lock);
1328 1487
1329 kvm_guest_enter(); 1488 kvm_guest_enter();
1330 1489
1331 srcu_idx = srcu_read_lock(&vcpu0->kvm->srcu); 1490 srcu_idx = srcu_read_lock(&vc->kvm->srcu);
1332 1491
1333 __kvmppc_vcore_entry(NULL, vcpu0); 1492 __kvmppc_vcore_entry();
1334 1493
1335 spin_lock(&vc->lock); 1494 spin_lock(&vc->lock);
1336 /* disable sending of IPIs on virtual external irqs */ 1495 /* disable sending of IPIs on virtual external irqs */
@@ -1345,7 +1504,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
1345 vc->vcore_state = VCORE_EXITING; 1504 vc->vcore_state = VCORE_EXITING;
1346 spin_unlock(&vc->lock); 1505 spin_unlock(&vc->lock);
1347 1506
1348 srcu_read_unlock(&vcpu0->kvm->srcu, srcu_idx); 1507 srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
1349 1508
1350 /* make sure updates to secondary vcpu structs are visible now */ 1509 /* make sure updates to secondary vcpu structs are visible now */
1351 smp_mb(); 1510 smp_mb();
@@ -1453,7 +1612,6 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1453 if (!signal_pending(current)) { 1612 if (!signal_pending(current)) {
1454 if (vc->vcore_state == VCORE_RUNNING && 1613 if (vc->vcore_state == VCORE_RUNNING &&
1455 VCORE_EXIT_COUNT(vc) == 0) { 1614 VCORE_EXIT_COUNT(vc) == 0) {
1456 vcpu->arch.ptid = vc->n_runnable - 1;
1457 kvmppc_create_dtl_entry(vcpu, vc); 1615 kvmppc_create_dtl_entry(vcpu, vc);
1458 kvmppc_start_thread(vcpu); 1616 kvmppc_start_thread(vcpu);
1459 } else if (vc->vcore_state == VCORE_SLEEPING) { 1617 } else if (vc->vcore_state == VCORE_SLEEPING) {
@@ -2048,6 +2206,9 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
2048 LPCR_VPM0 | LPCR_VPM1; 2206 LPCR_VPM0 | LPCR_VPM1;
2049 kvm->arch.vrma_slb_v = SLB_VSID_B_1T | 2207 kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
2050 (VRMA_VSID << SLB_VSID_SHIFT_1T); 2208 (VRMA_VSID << SLB_VSID_SHIFT_1T);
2209 /* On POWER8 turn on online bit to enable PURR/SPURR */
2210 if (cpu_has_feature(CPU_FTR_ARCH_207S))
2211 lpcr |= LPCR_ONL;
2051 } 2212 }
2052 kvm->arch.lpcr = lpcr; 2213 kvm->arch.lpcr = lpcr;
2053 2214
@@ -2222,3 +2383,5 @@ static void kvmppc_book3s_exit_hv(void)
2222module_init(kvmppc_book3s_init_hv); 2383module_init(kvmppc_book3s_init_hv);
2223module_exit(kvmppc_book3s_exit_hv); 2384module_exit(kvmppc_book3s_exit_hv);
2224MODULE_LICENSE("GPL"); 2385MODULE_LICENSE("GPL");
2386MODULE_ALIAS_MISCDEV(KVM_MINOR);
2387MODULE_ALIAS("devname:kvm");
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 928142c64cb0..e873796b1a29 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -35,7 +35,7 @@
35 ****************************************************************************/ 35 ****************************************************************************/
36 36
37/* Registers: 37/* Registers:
38 * r4: vcpu pointer 38 * none
39 */ 39 */
40_GLOBAL(__kvmppc_vcore_entry) 40_GLOBAL(__kvmppc_vcore_entry)
41 41
@@ -57,9 +57,11 @@ BEGIN_FTR_SECTION
57 std r3, HSTATE_DSCR(r13) 57 std r3, HSTATE_DSCR(r13)
58END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 58END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
59 59
60BEGIN_FTR_SECTION
60 /* Save host DABR */ 61 /* Save host DABR */
61 mfspr r3, SPRN_DABR 62 mfspr r3, SPRN_DABR
62 std r3, HSTATE_DABR(r13) 63 std r3, HSTATE_DABR(r13)
64END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
63 65
64 /* Hard-disable interrupts */ 66 /* Hard-disable interrupts */
65 mfmsr r10 67 mfmsr r10
@@ -69,7 +71,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
69 mtmsrd r10,1 71 mtmsrd r10,1
70 72
71 /* Save host PMU registers */ 73 /* Save host PMU registers */
72 /* R4 is live here (vcpu pointer) but not r3 or r5 */
73 li r3, 1 74 li r3, 1
74 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 75 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
75 mfspr r7, SPRN_MMCR0 /* save MMCR0 */ 76 mfspr r7, SPRN_MMCR0 /* save MMCR0 */
@@ -134,16 +135,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
134 * enters the guest with interrupts enabled. 135 * enters the guest with interrupts enabled.
135 */ 136 */
136BEGIN_FTR_SECTION 137BEGIN_FTR_SECTION
138 ld r4, HSTATE_KVM_VCPU(r13)
137 ld r0, VCPU_PENDING_EXC(r4) 139 ld r0, VCPU_PENDING_EXC(r4)
138 li r7, (1 << BOOK3S_IRQPRIO_EXTERNAL) 140 li r7, (1 << BOOK3S_IRQPRIO_EXTERNAL)
139 oris r7, r7, (1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h 141 oris r7, r7, (1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
140 and. r0, r0, r7 142 and. r0, r0, r7
141 beq 32f 143 beq 32f
142 mr r31, r4
143 lhz r3, PACAPACAINDEX(r13) 144 lhz r3, PACAPACAINDEX(r13)
144 bl smp_send_reschedule 145 bl smp_send_reschedule
145 nop 146 nop
146 mr r4, r31
14732: 14732:
148END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 148END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
149#endif /* CONFIG_SMP */ 149#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 8689e2e30857..37fb3caa4c80 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -134,7 +134,7 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index,
134 unlock_rmap(rmap); 134 unlock_rmap(rmap);
135} 135}
136 136
137static pte_t lookup_linux_pte(pgd_t *pgdir, unsigned long hva, 137static pte_t lookup_linux_pte_and_update(pgd_t *pgdir, unsigned long hva,
138 int writing, unsigned long *pte_sizep) 138 int writing, unsigned long *pte_sizep)
139{ 139{
140 pte_t *ptep; 140 pte_t *ptep;
@@ -232,7 +232,8 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
232 232
233 /* Look up the Linux PTE for the backing page */ 233 /* Look up the Linux PTE for the backing page */
234 pte_size = psize; 234 pte_size = psize;
235 pte = lookup_linux_pte(pgdir, hva, writing, &pte_size); 235 pte = lookup_linux_pte_and_update(pgdir, hva, writing,
236 &pte_size);
236 if (pte_present(pte)) { 237 if (pte_present(pte)) {
237 if (writing && !pte_write(pte)) 238 if (writing && !pte_write(pte))
238 /* make the actual HPTE be read-only */ 239 /* make the actual HPTE be read-only */
@@ -672,7 +673,8 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
672 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn); 673 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
673 if (memslot) { 674 if (memslot) {
674 hva = __gfn_to_hva_memslot(memslot, gfn); 675 hva = __gfn_to_hva_memslot(memslot, gfn);
675 pte = lookup_linux_pte(pgdir, hva, 1, &psize); 676 pte = lookup_linux_pte_and_update(pgdir, hva,
677 1, &psize);
676 if (pte_present(pte) && !pte_write(pte)) 678 if (pte_present(pte) && !pte_write(pte))
677 r = hpte_make_readonly(r); 679 r = hpte_make_readonly(r);
678 } 680 }
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index be4fa04a37c9..e66d4ec04d95 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -33,6 +33,10 @@
33#error Need to fix lppaca and SLB shadow accesses in little endian mode 33#error Need to fix lppaca and SLB shadow accesses in little endian mode
34#endif 34#endif
35 35
36/* Values in HSTATE_NAPPING(r13) */
37#define NAPPING_CEDE 1
38#define NAPPING_NOVCPU 2
39
36/* 40/*
37 * Call kvmppc_hv_entry in real mode. 41 * Call kvmppc_hv_entry in real mode.
38 * Must be called with interrupts hard-disabled. 42 * Must be called with interrupts hard-disabled.
@@ -57,29 +61,23 @@ _GLOBAL(kvmppc_hv_entry_trampoline)
57 RFI 61 RFI
58 62
59kvmppc_call_hv_entry: 63kvmppc_call_hv_entry:
64 ld r4, HSTATE_KVM_VCPU(r13)
60 bl kvmppc_hv_entry 65 bl kvmppc_hv_entry
61 66
62 /* Back from guest - restore host state and return to caller */ 67 /* Back from guest - restore host state and return to caller */
63 68
69BEGIN_FTR_SECTION
64 /* Restore host DABR and DABRX */ 70 /* Restore host DABR and DABRX */
65 ld r5,HSTATE_DABR(r13) 71 ld r5,HSTATE_DABR(r13)
66 li r6,7 72 li r6,7
67 mtspr SPRN_DABR,r5 73 mtspr SPRN_DABR,r5
68 mtspr SPRN_DABRX,r6 74 mtspr SPRN_DABRX,r6
75END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
69 76
70 /* Restore SPRG3 */ 77 /* Restore SPRG3 */
71 ld r3,PACA_SPRG3(r13) 78 ld r3,PACA_SPRG3(r13)
72 mtspr SPRN_SPRG3,r3 79 mtspr SPRN_SPRG3,r3
73 80
74 /*
75 * Reload DEC. HDEC interrupts were disabled when
76 * we reloaded the host's LPCR value.
77 */
78 ld r3, HSTATE_DECEXP(r13)
79 mftb r4
80 subf r4, r4, r3
81 mtspr SPRN_DEC, r4
82
83 /* Reload the host's PMU registers */ 81 /* Reload the host's PMU registers */
84 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ 82 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
85 lbz r4, LPPACA_PMCINUSE(r3) 83 lbz r4, LPPACA_PMCINUSE(r3)
@@ -115,6 +113,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
11523: 11323:
116 114
117 /* 115 /*
116 * Reload DEC. HDEC interrupts were disabled when
117 * we reloaded the host's LPCR value.
118 */
119 ld r3, HSTATE_DECEXP(r13)
120 mftb r4
121 subf r4, r4, r3
122 mtspr SPRN_DEC, r4
123
124 /*
118 * For external and machine check interrupts, we need 125 * For external and machine check interrupts, we need
119 * to call the Linux handler to process the interrupt. 126 * to call the Linux handler to process the interrupt.
120 * We do that by jumping to absolute address 0x500 for 127 * We do that by jumping to absolute address 0x500 for
@@ -153,15 +160,75 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
153 160
15413: b machine_check_fwnmi 16113: b machine_check_fwnmi
155 162
163kvmppc_primary_no_guest:
164 /* We handle this much like a ceded vcpu */
165 /* set our bit in napping_threads */
166 ld r5, HSTATE_KVM_VCORE(r13)
167 lbz r7, HSTATE_PTID(r13)
168 li r0, 1
169 sld r0, r0, r7
170 addi r6, r5, VCORE_NAPPING_THREADS
1711: lwarx r3, 0, r6
172 or r3, r3, r0
173 stwcx. r3, 0, r6
174 bne 1b
175 /* order napping_threads update vs testing entry_exit_count */
176 isync
177 li r12, 0
178 lwz r7, VCORE_ENTRY_EXIT(r5)
179 cmpwi r7, 0x100
180 bge kvm_novcpu_exit /* another thread already exiting */
181 li r3, NAPPING_NOVCPU
182 stb r3, HSTATE_NAPPING(r13)
183 li r3, 1
184 stb r3, HSTATE_HWTHREAD_REQ(r13)
185
186 b kvm_do_nap
187
188kvm_novcpu_wakeup:
189 ld r1, HSTATE_HOST_R1(r13)
190 ld r5, HSTATE_KVM_VCORE(r13)
191 li r0, 0
192 stb r0, HSTATE_NAPPING(r13)
193 stb r0, HSTATE_HWTHREAD_REQ(r13)
194
195 /* check the wake reason */
196 bl kvmppc_check_wake_reason
197
198 /* see if any other thread is already exiting */
199 lwz r0, VCORE_ENTRY_EXIT(r5)
200 cmpwi r0, 0x100
201 bge kvm_novcpu_exit
202
203 /* clear our bit in napping_threads */
204 lbz r7, HSTATE_PTID(r13)
205 li r0, 1
206 sld r0, r0, r7
207 addi r6, r5, VCORE_NAPPING_THREADS
2084: lwarx r7, 0, r6
209 andc r7, r7, r0
210 stwcx. r7, 0, r6
211 bne 4b
212
213 /* See if the wake reason means we need to exit */
214 cmpdi r3, 0
215 bge kvm_novcpu_exit
216
217 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
218 ld r4, HSTATE_KVM_VCPU(r13)
219 cmpdi r4, 0
220 bne kvmppc_got_guest
221
222kvm_novcpu_exit:
223 b hdec_soon
224
156/* 225/*
157 * We come in here when wakened from nap mode on a secondary hw thread. 226 * We come in here when wakened from nap mode.
158 * Relocation is off and most register values are lost. 227 * Relocation is off and most register values are lost.
159 * r13 points to the PACA. 228 * r13 points to the PACA.
160 */ 229 */
161 .globl kvm_start_guest 230 .globl kvm_start_guest
162kvm_start_guest: 231kvm_start_guest:
163 ld r1,PACAEMERGSP(r13)
164 subi r1,r1,STACK_FRAME_OVERHEAD
165 ld r2,PACATOC(r13) 232 ld r2,PACATOC(r13)
166 233
167 li r0,KVM_HWTHREAD_IN_KVM 234 li r0,KVM_HWTHREAD_IN_KVM
@@ -173,8 +240,13 @@ kvm_start_guest:
173 240
174 /* were we napping due to cede? */ 241 /* were we napping due to cede? */
175 lbz r0,HSTATE_NAPPING(r13) 242 lbz r0,HSTATE_NAPPING(r13)
176 cmpwi r0,0 243 cmpwi r0,NAPPING_CEDE
177 bne kvm_end_cede 244 beq kvm_end_cede
245 cmpwi r0,NAPPING_NOVCPU
246 beq kvm_novcpu_wakeup
247
248 ld r1,PACAEMERGSP(r13)
249 subi r1,r1,STACK_FRAME_OVERHEAD
178 250
179 /* 251 /*
180 * We weren't napping due to cede, so this must be a secondary 252 * We weren't napping due to cede, so this must be a secondary
@@ -184,40 +256,22 @@ kvm_start_guest:
184 */ 256 */
185 257
186 /* Check the wake reason in SRR1 to see why we got here */ 258 /* Check the wake reason in SRR1 to see why we got here */
187 mfspr r3,SPRN_SRR1 259 bl kvmppc_check_wake_reason
188 rlwinm r3,r3,44-31,0x7 /* extract wake reason field */ 260 cmpdi r3, 0
189 cmpwi r3,4 /* was it an external interrupt? */ 261 bge kvm_no_guest
190 bne 27f /* if not */
191 ld r5,HSTATE_XICS_PHYS(r13)
192 li r7,XICS_XIRR /* if it was an external interrupt, */
193 lwzcix r8,r5,r7 /* get and ack the interrupt */
194 sync
195 clrldi. r9,r8,40 /* get interrupt source ID. */
196 beq 28f /* none there? */
197 cmpwi r9,XICS_IPI /* was it an IPI? */
198 bne 29f
199 li r0,0xff
200 li r6,XICS_MFRR
201 stbcix r0,r5,r6 /* clear IPI */
202 stwcix r8,r5,r7 /* EOI the interrupt */
203 sync /* order loading of vcpu after that */
204 262
205 /* get vcpu pointer, NULL if we have no vcpu to run */ 263 /* get vcpu pointer, NULL if we have no vcpu to run */
206 ld r4,HSTATE_KVM_VCPU(r13) 264 ld r4,HSTATE_KVM_VCPU(r13)
207 cmpdi r4,0 265 cmpdi r4,0
208 /* if we have no vcpu to run, go back to sleep */ 266 /* if we have no vcpu to run, go back to sleep */
209 beq kvm_no_guest 267 beq kvm_no_guest
210 b 30f
211 268
21227: /* XXX should handle hypervisor maintenance interrupts etc. here */ 269 /* Set HSTATE_DSCR(r13) to something sensible */
213 b kvm_no_guest 270 LOAD_REG_ADDR(r6, dscr_default)
21428: /* SRR1 said external but ICP said nope?? */ 271 ld r6, 0(r6)
215 b kvm_no_guest 272 std r6, HSTATE_DSCR(r13)
21629: /* External non-IPI interrupt to offline secondary thread? help?? */
217 stw r8,HSTATE_SAVED_XIRR(r13)
218 b kvm_no_guest
219 273
22030: bl kvmppc_hv_entry 274 bl kvmppc_hv_entry
221 275
222 /* Back from the guest, go back to nap */ 276 /* Back from the guest, go back to nap */
223 /* Clear our vcpu pointer so we don't come back in early */ 277 /* Clear our vcpu pointer so we don't come back in early */
@@ -229,18 +283,6 @@ kvm_start_guest:
229 * visible we could be given another vcpu. 283 * visible we could be given another vcpu.
230 */ 284 */
231 lwsync 285 lwsync
232 /* Clear any pending IPI - we're an offline thread */
233 ld r5, HSTATE_XICS_PHYS(r13)
234 li r7, XICS_XIRR
235 lwzcix r3, r5, r7 /* ack any pending interrupt */
236 rlwinm. r0, r3, 0, 0xffffff /* any pending? */
237 beq 37f
238 sync
239 li r0, 0xff
240 li r6, XICS_MFRR
241 stbcix r0, r5, r6 /* clear the IPI */
242 stwcix r3, r5, r7 /* EOI it */
24337: sync
244 286
245 /* increment the nap count and then go to nap mode */ 287 /* increment the nap count and then go to nap mode */
246 ld r4, HSTATE_KVM_VCORE(r13) 288 ld r4, HSTATE_KVM_VCORE(r13)
@@ -253,6 +295,7 @@ kvm_start_guest:
253kvm_no_guest: 295kvm_no_guest:
254 li r0, KVM_HWTHREAD_IN_NAP 296 li r0, KVM_HWTHREAD_IN_NAP
255 stb r0, HSTATE_HWTHREAD_STATE(r13) 297 stb r0, HSTATE_HWTHREAD_STATE(r13)
298kvm_do_nap:
256 li r3, LPCR_PECE0 299 li r3, LPCR_PECE0
257 mfspr r4, SPRN_LPCR 300 mfspr r4, SPRN_LPCR
258 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 301 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
@@ -277,7 +320,7 @@ kvmppc_hv_entry:
277 320
278 /* Required state: 321 /* Required state:
279 * 322 *
280 * R4 = vcpu pointer 323 * R4 = vcpu pointer (or NULL)
281 * MSR = ~IR|DR 324 * MSR = ~IR|DR
282 * R13 = PACA 325 * R13 = PACA
283 * R1 = host R1 326 * R1 = host R1
@@ -287,122 +330,12 @@ kvmppc_hv_entry:
287 std r0, PPC_LR_STKOFF(r1) 330 std r0, PPC_LR_STKOFF(r1)
288 stdu r1, -112(r1) 331 stdu r1, -112(r1)
289 332
290 /* Set partition DABR */
291 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
292 li r5,3
293 ld r6,VCPU_DABR(r4)
294 mtspr SPRN_DABRX,r5
295 mtspr SPRN_DABR,r6
296BEGIN_FTR_SECTION
297 isync
298END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
299
300 /* Load guest PMU registers */
301 /* R4 is live here (vcpu pointer) */
302 li r3, 1
303 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
304 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
305 isync
306 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
307 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
308 lwz r6, VCPU_PMC + 8(r4)
309 lwz r7, VCPU_PMC + 12(r4)
310 lwz r8, VCPU_PMC + 16(r4)
311 lwz r9, VCPU_PMC + 20(r4)
312BEGIN_FTR_SECTION
313 lwz r10, VCPU_PMC + 24(r4)
314 lwz r11, VCPU_PMC + 28(r4)
315END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
316 mtspr SPRN_PMC1, r3
317 mtspr SPRN_PMC2, r5
318 mtspr SPRN_PMC3, r6
319 mtspr SPRN_PMC4, r7
320 mtspr SPRN_PMC5, r8
321 mtspr SPRN_PMC6, r9
322BEGIN_FTR_SECTION
323 mtspr SPRN_PMC7, r10
324 mtspr SPRN_PMC8, r11
325END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
326 ld r3, VCPU_MMCR(r4)
327 ld r5, VCPU_MMCR + 8(r4)
328 ld r6, VCPU_MMCR + 16(r4)
329 ld r7, VCPU_SIAR(r4)
330 ld r8, VCPU_SDAR(r4)
331 mtspr SPRN_MMCR1, r5
332 mtspr SPRN_MMCRA, r6
333 mtspr SPRN_SIAR, r7
334 mtspr SPRN_SDAR, r8
335 mtspr SPRN_MMCR0, r3
336 isync
337
338 /* Load up FP, VMX and VSX registers */
339 bl kvmppc_load_fp
340
341 ld r14, VCPU_GPR(R14)(r4)
342 ld r15, VCPU_GPR(R15)(r4)
343 ld r16, VCPU_GPR(R16)(r4)
344 ld r17, VCPU_GPR(R17)(r4)
345 ld r18, VCPU_GPR(R18)(r4)
346 ld r19, VCPU_GPR(R19)(r4)
347 ld r20, VCPU_GPR(R20)(r4)
348 ld r21, VCPU_GPR(R21)(r4)
349 ld r22, VCPU_GPR(R22)(r4)
350 ld r23, VCPU_GPR(R23)(r4)
351 ld r24, VCPU_GPR(R24)(r4)
352 ld r25, VCPU_GPR(R25)(r4)
353 ld r26, VCPU_GPR(R26)(r4)
354 ld r27, VCPU_GPR(R27)(r4)
355 ld r28, VCPU_GPR(R28)(r4)
356 ld r29, VCPU_GPR(R29)(r4)
357 ld r30, VCPU_GPR(R30)(r4)
358 ld r31, VCPU_GPR(R31)(r4)
359
360BEGIN_FTR_SECTION
361 /* Switch DSCR to guest value */
362 ld r5, VCPU_DSCR(r4)
363 mtspr SPRN_DSCR, r5
364END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
365
366 /*
367 * Set the decrementer to the guest decrementer.
368 */
369 ld r8,VCPU_DEC_EXPIRES(r4)
370 mftb r7
371 subf r3,r7,r8
372 mtspr SPRN_DEC,r3
373 stw r3,VCPU_DEC(r4)
374
375 ld r5, VCPU_SPRG0(r4)
376 ld r6, VCPU_SPRG1(r4)
377 ld r7, VCPU_SPRG2(r4)
378 ld r8, VCPU_SPRG3(r4)
379 mtspr SPRN_SPRG0, r5
380 mtspr SPRN_SPRG1, r6
381 mtspr SPRN_SPRG2, r7
382 mtspr SPRN_SPRG3, r8
383
384 /* Save R1 in the PACA */ 333 /* Save R1 in the PACA */
385 std r1, HSTATE_HOST_R1(r13) 334 std r1, HSTATE_HOST_R1(r13)
386 335
387 /* Load up DAR and DSISR */
388 ld r5, VCPU_DAR(r4)
389 lwz r6, VCPU_DSISR(r4)
390 mtspr SPRN_DAR, r5
391 mtspr SPRN_DSISR, r6
392
393 li r6, KVM_GUEST_MODE_HOST_HV 336 li r6, KVM_GUEST_MODE_HOST_HV
394 stb r6, HSTATE_IN_GUEST(r13) 337 stb r6, HSTATE_IN_GUEST(r13)
395 338
396BEGIN_FTR_SECTION
397 /* Restore AMR and UAMOR, set AMOR to all 1s */
398 ld r5,VCPU_AMR(r4)
399 ld r6,VCPU_UAMOR(r4)
400 li r7,-1
401 mtspr SPRN_AMR,r5
402 mtspr SPRN_UAMOR,r6
403 mtspr SPRN_AMOR,r7
404END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
405
406 /* Clear out SLB */ 339 /* Clear out SLB */
407 li r6,0 340 li r6,0
408 slbmte r6,r6 341 slbmte r6,r6
@@ -428,8 +361,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
428 bne 21b 361 bne 21b
429 362
430 /* Primary thread switches to guest partition. */ 363 /* Primary thread switches to guest partition. */
431 ld r9,VCPU_KVM(r4) /* pointer to struct kvm */ 364 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
432 lwz r6,VCPU_PTID(r4) 365 lbz r6,HSTATE_PTID(r13)
433 cmpwi r6,0 366 cmpwi r6,0
434 bne 20f 367 bne 20f
435 ld r6,KVM_SDR1(r9) 368 ld r6,KVM_SDR1(r9)
@@ -457,7 +390,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
457 andc r7,r7,r0 390 andc r7,r7,r0
458 stdcx. r7,0,r6 391 stdcx. r7,0,r6
459 bne 23b 392 bne 23b
460 li r6,128 /* and flush the TLB */ 393 /* Flush the TLB of any entries for this LPID */
394 /* use arch 2.07S as a proxy for POWER8 */
395BEGIN_FTR_SECTION
396 li r6,512 /* POWER8 has 512 sets */
397FTR_SECTION_ELSE
398 li r6,128 /* POWER7 has 128 sets */
399ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
461 mtctr r6 400 mtctr r6
462 li r7,0x800 /* IS field = 0b10 */ 401 li r7,0x800 /* IS field = 0b10 */
463 ptesync 402 ptesync
@@ -487,6 +426,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
487 beq 38f 426 beq 38f
488 mtspr SPRN_PCR, r7 427 mtspr SPRN_PCR, r7
48938: 42838:
429
430BEGIN_FTR_SECTION
431 /* DPDES is shared between threads */
432 ld r8, VCORE_DPDES(r5)
433 mtspr SPRN_DPDES, r8
434END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
435
490 li r0,1 436 li r0,1
491 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ 437 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
492 b 10f 438 b 10f
@@ -503,32 +449,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
503 mtspr SPRN_RMOR,r8 449 mtspr SPRN_RMOR,r8
504 isync 450 isync
505 451
506 /* Increment yield count if they have a VPA */
507 ld r3, VCPU_VPA(r4)
508 cmpdi r3, 0
509 beq 25f
510 lwz r5, LPPACA_YIELDCOUNT(r3)
511 addi r5, r5, 1
512 stw r5, LPPACA_YIELDCOUNT(r3)
513 li r6, 1
514 stb r6, VCPU_VPA_DIRTY(r4)
51525:
516 /* Check if HDEC expires soon */ 452 /* Check if HDEC expires soon */
517 mfspr r3,SPRN_HDEC 453 mfspr r3,SPRN_HDEC
518 cmpwi r3,10 454 cmpwi r3,512 /* 1 microsecond */
519 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER 455 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
520 mr r9,r4
521 blt hdec_soon 456 blt hdec_soon
522
523 /* Save purr/spurr */
524 mfspr r5,SPRN_PURR
525 mfspr r6,SPRN_SPURR
526 std r5,HSTATE_PURR(r13)
527 std r6,HSTATE_SPURR(r13)
528 ld r7,VCPU_PURR(r4)
529 ld r8,VCPU_SPURR(r4)
530 mtspr SPRN_PURR,r7
531 mtspr SPRN_SPURR,r8
532 b 31f 457 b 31f
533 458
534 /* 459 /*
@@ -539,7 +464,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
539 * We also have to invalidate the TLB since its 464 * We also have to invalidate the TLB since its
540 * entries aren't tagged with the LPID. 465 * entries aren't tagged with the LPID.
541 */ 466 */
54230: ld r9,VCPU_KVM(r4) /* pointer to struct kvm */ 46730: ld r5,HSTATE_KVM_VCORE(r13)
468 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
543 469
544 /* first take native_tlbie_lock */ 470 /* first take native_tlbie_lock */
545 .section ".toc","aw" 471 .section ".toc","aw"
@@ -604,7 +530,6 @@ toc_tlbie_lock:
604 mfspr r3,SPRN_HDEC 530 mfspr r3,SPRN_HDEC
605 cmpwi r3,10 531 cmpwi r3,10
606 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER 532 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
607 mr r9,r4
608 blt hdec_soon 533 blt hdec_soon
609 534
610 /* Enable HDEC interrupts */ 535 /* Enable HDEC interrupts */
@@ -619,9 +544,14 @@ toc_tlbie_lock:
619 mfspr r0,SPRN_HID0 544 mfspr r0,SPRN_HID0
620 mfspr r0,SPRN_HID0 545 mfspr r0,SPRN_HID0
621 mfspr r0,SPRN_HID0 546 mfspr r0,SPRN_HID0
54731:
548 /* Do we have a guest vcpu to run? */
549 cmpdi r4, 0
550 beq kvmppc_primary_no_guest
551kvmppc_got_guest:
622 552
623 /* Load up guest SLB entries */ 553 /* Load up guest SLB entries */
62431: lwz r5,VCPU_SLB_MAX(r4) 554 lwz r5,VCPU_SLB_MAX(r4)
625 cmpwi r5,0 555 cmpwi r5,0
626 beq 9f 556 beq 9f
627 mtctr r5 557 mtctr r5
@@ -632,6 +562,209 @@ toc_tlbie_lock:
632 addi r6,r6,VCPU_SLB_SIZE 562 addi r6,r6,VCPU_SLB_SIZE
633 bdnz 1b 563 bdnz 1b
6349: 5649:
565 /* Increment yield count if they have a VPA */
566 ld r3, VCPU_VPA(r4)
567 cmpdi r3, 0
568 beq 25f
569 lwz r5, LPPACA_YIELDCOUNT(r3)
570 addi r5, r5, 1
571 stw r5, LPPACA_YIELDCOUNT(r3)
572 li r6, 1
573 stb r6, VCPU_VPA_DIRTY(r4)
57425:
575
576BEGIN_FTR_SECTION
577 /* Save purr/spurr */
578 mfspr r5,SPRN_PURR
579 mfspr r6,SPRN_SPURR
580 std r5,HSTATE_PURR(r13)
581 std r6,HSTATE_SPURR(r13)
582 ld r7,VCPU_PURR(r4)
583 ld r8,VCPU_SPURR(r4)
584 mtspr SPRN_PURR,r7
585 mtspr SPRN_SPURR,r8
586END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
587
588BEGIN_FTR_SECTION
589 /* Set partition DABR */
590 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
591 lwz r5,VCPU_DABRX(r4)
592 ld r6,VCPU_DABR(r4)
593 mtspr SPRN_DABRX,r5
594 mtspr SPRN_DABR,r6
595 BEGIN_FTR_SECTION_NESTED(89)
596 isync
597 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89)
598END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
599
600 /* Load guest PMU registers */
601 /* R4 is live here (vcpu pointer) */
602 li r3, 1
603 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
604 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
605 isync
606 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
607 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
608 lwz r6, VCPU_PMC + 8(r4)
609 lwz r7, VCPU_PMC + 12(r4)
610 lwz r8, VCPU_PMC + 16(r4)
611 lwz r9, VCPU_PMC + 20(r4)
612BEGIN_FTR_SECTION
613 lwz r10, VCPU_PMC + 24(r4)
614 lwz r11, VCPU_PMC + 28(r4)
615END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
616 mtspr SPRN_PMC1, r3
617 mtspr SPRN_PMC2, r5
618 mtspr SPRN_PMC3, r6
619 mtspr SPRN_PMC4, r7
620 mtspr SPRN_PMC5, r8
621 mtspr SPRN_PMC6, r9
622BEGIN_FTR_SECTION
623 mtspr SPRN_PMC7, r10
624 mtspr SPRN_PMC8, r11
625END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
626 ld r3, VCPU_MMCR(r4)
627 ld r5, VCPU_MMCR + 8(r4)
628 ld r6, VCPU_MMCR + 16(r4)
629 ld r7, VCPU_SIAR(r4)
630 ld r8, VCPU_SDAR(r4)
631 mtspr SPRN_MMCR1, r5
632 mtspr SPRN_MMCRA, r6
633 mtspr SPRN_SIAR, r7
634 mtspr SPRN_SDAR, r8
635BEGIN_FTR_SECTION
636 ld r5, VCPU_MMCR + 24(r4)
637 ld r6, VCPU_SIER(r4)
638 lwz r7, VCPU_PMC + 24(r4)
639 lwz r8, VCPU_PMC + 28(r4)
640 ld r9, VCPU_MMCR + 32(r4)
641 mtspr SPRN_MMCR2, r5
642 mtspr SPRN_SIER, r6
643 mtspr SPRN_SPMC1, r7
644 mtspr SPRN_SPMC2, r8
645 mtspr SPRN_MMCRS, r9
646END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
647 mtspr SPRN_MMCR0, r3
648 isync
649
650 /* Load up FP, VMX and VSX registers */
651 bl kvmppc_load_fp
652
653 ld r14, VCPU_GPR(R14)(r4)
654 ld r15, VCPU_GPR(R15)(r4)
655 ld r16, VCPU_GPR(R16)(r4)
656 ld r17, VCPU_GPR(R17)(r4)
657 ld r18, VCPU_GPR(R18)(r4)
658 ld r19, VCPU_GPR(R19)(r4)
659 ld r20, VCPU_GPR(R20)(r4)
660 ld r21, VCPU_GPR(R21)(r4)
661 ld r22, VCPU_GPR(R22)(r4)
662 ld r23, VCPU_GPR(R23)(r4)
663 ld r24, VCPU_GPR(R24)(r4)
664 ld r25, VCPU_GPR(R25)(r4)
665 ld r26, VCPU_GPR(R26)(r4)
666 ld r27, VCPU_GPR(R27)(r4)
667 ld r28, VCPU_GPR(R28)(r4)
668 ld r29, VCPU_GPR(R29)(r4)
669 ld r30, VCPU_GPR(R30)(r4)
670 ld r31, VCPU_GPR(R31)(r4)
671
672BEGIN_FTR_SECTION
673 /* Switch DSCR to guest value */
674 ld r5, VCPU_DSCR(r4)
675 mtspr SPRN_DSCR, r5
676END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
677
678BEGIN_FTR_SECTION
679 /* Skip next section on POWER7 or PPC970 */
680 b 8f
681END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
682 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
683 mfmsr r8
684 li r0, 1
685 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
686 mtmsrd r8
687
688 /* Load up POWER8-specific registers */
689 ld r5, VCPU_IAMR(r4)
690 lwz r6, VCPU_PSPB(r4)
691 ld r7, VCPU_FSCR(r4)
692 mtspr SPRN_IAMR, r5
693 mtspr SPRN_PSPB, r6
694 mtspr SPRN_FSCR, r7
695 ld r5, VCPU_DAWR(r4)
696 ld r6, VCPU_DAWRX(r4)
697 ld r7, VCPU_CIABR(r4)
698 ld r8, VCPU_TAR(r4)
699 mtspr SPRN_DAWR, r5
700 mtspr SPRN_DAWRX, r6
701 mtspr SPRN_CIABR, r7
702 mtspr SPRN_TAR, r8
703 ld r5, VCPU_IC(r4)
704 ld r6, VCPU_VTB(r4)
705 mtspr SPRN_IC, r5
706 mtspr SPRN_VTB, r6
707#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
708 ld r5, VCPU_TFHAR(r4)
709 ld r6, VCPU_TFIAR(r4)
710 ld r7, VCPU_TEXASR(r4)
711 mtspr SPRN_TFHAR, r5
712 mtspr SPRN_TFIAR, r6
713 mtspr SPRN_TEXASR, r7
714#endif
715 ld r8, VCPU_EBBHR(r4)
716 mtspr SPRN_EBBHR, r8
717 ld r5, VCPU_EBBRR(r4)
718 ld r6, VCPU_BESCR(r4)
719 ld r7, VCPU_CSIGR(r4)
720 ld r8, VCPU_TACR(r4)
721 mtspr SPRN_EBBRR, r5
722 mtspr SPRN_BESCR, r6
723 mtspr SPRN_CSIGR, r7
724 mtspr SPRN_TACR, r8
725 ld r5, VCPU_TCSCR(r4)
726 ld r6, VCPU_ACOP(r4)
727 lwz r7, VCPU_GUEST_PID(r4)
728 ld r8, VCPU_WORT(r4)
729 mtspr SPRN_TCSCR, r5
730 mtspr SPRN_ACOP, r6
731 mtspr SPRN_PID, r7
732 mtspr SPRN_WORT, r8
7338:
734
735 /*
736 * Set the decrementer to the guest decrementer.
737 */
738 ld r8,VCPU_DEC_EXPIRES(r4)
739 mftb r7
740 subf r3,r7,r8
741 mtspr SPRN_DEC,r3
742 stw r3,VCPU_DEC(r4)
743
744 ld r5, VCPU_SPRG0(r4)
745 ld r6, VCPU_SPRG1(r4)
746 ld r7, VCPU_SPRG2(r4)
747 ld r8, VCPU_SPRG3(r4)
748 mtspr SPRN_SPRG0, r5
749 mtspr SPRN_SPRG1, r6
750 mtspr SPRN_SPRG2, r7
751 mtspr SPRN_SPRG3, r8
752
753 /* Load up DAR and DSISR */
754 ld r5, VCPU_DAR(r4)
755 lwz r6, VCPU_DSISR(r4)
756 mtspr SPRN_DAR, r5
757 mtspr SPRN_DSISR, r6
758
759BEGIN_FTR_SECTION
760 /* Restore AMR and UAMOR, set AMOR to all 1s */
761 ld r5,VCPU_AMR(r4)
762 ld r6,VCPU_UAMOR(r4)
763 li r7,-1
764 mtspr SPRN_AMR,r5
765 mtspr SPRN_UAMOR,r6
766 mtspr SPRN_AMOR,r7
767END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
635 768
636 /* Restore state of CTRL run bit; assume 1 on entry */ 769 /* Restore state of CTRL run bit; assume 1 on entry */
637 lwz r5,VCPU_CTRL(r4) 770 lwz r5,VCPU_CTRL(r4)
@@ -647,48 +780,53 @@ toc_tlbie_lock:
647 mtctr r6 780 mtctr r6
648 mtxer r7 781 mtxer r7
649 782
783kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
650 ld r10, VCPU_PC(r4) 784 ld r10, VCPU_PC(r4)
651 ld r11, VCPU_MSR(r4) 785 ld r11, VCPU_MSR(r4)
652kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
653 ld r6, VCPU_SRR0(r4) 786 ld r6, VCPU_SRR0(r4)
654 ld r7, VCPU_SRR1(r4) 787 ld r7, VCPU_SRR1(r4)
788 mtspr SPRN_SRR0, r6
789 mtspr SPRN_SRR1, r7
655 790
791deliver_guest_interrupt:
656 /* r11 = vcpu->arch.msr & ~MSR_HV */ 792 /* r11 = vcpu->arch.msr & ~MSR_HV */
657 rldicl r11, r11, 63 - MSR_HV_LG, 1 793 rldicl r11, r11, 63 - MSR_HV_LG, 1
658 rotldi r11, r11, 1 + MSR_HV_LG 794 rotldi r11, r11, 1 + MSR_HV_LG
659 ori r11, r11, MSR_ME 795 ori r11, r11, MSR_ME
660 796
661 /* Check if we can deliver an external or decrementer interrupt now */ 797 /* Check if we can deliver an external or decrementer interrupt now */
662 ld r0,VCPU_PENDING_EXC(r4) 798 ld r0, VCPU_PENDING_EXC(r4)
663 lis r8,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h 799 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
664 and r0,r0,r8 800 cmpdi cr1, r0, 0
665 cmpdi cr1,r0,0 801 andi. r8, r11, MSR_EE
666 andi. r0,r11,MSR_EE
667 beq cr1,11f
668BEGIN_FTR_SECTION 802BEGIN_FTR_SECTION
669 mfspr r8,SPRN_LPCR 803 mfspr r8, SPRN_LPCR
670 ori r8,r8,LPCR_MER 804 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
671 mtspr SPRN_LPCR,r8 805 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
806 mtspr SPRN_LPCR, r8
672 isync 807 isync
673END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 808END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
674 beq 5f 809 beq 5f
675 li r0,BOOK3S_INTERRUPT_EXTERNAL 810 li r0, BOOK3S_INTERRUPT_EXTERNAL
67612: mr r6,r10 811 bne cr1, 12f
677 mr r10,r0 812 mfspr r0, SPRN_DEC
678 mr r7,r11 813 cmpwi r0, 0
679 li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ 814 li r0, BOOK3S_INTERRUPT_DECREMENTER
680 rotldi r11,r11,63 815 bge 5f
681 b 5f
68211: beq 5f
683 mfspr r0,SPRN_DEC
684 cmpwi r0,0
685 li r0,BOOK3S_INTERRUPT_DECREMENTER
686 blt 12b
687 816
688 /* Move SRR0 and SRR1 into the respective regs */ 81712: mtspr SPRN_SRR0, r10
6895: mtspr SPRN_SRR0, r6 818 mr r10,r0
690 mtspr SPRN_SRR1, r7 819 mtspr SPRN_SRR1, r11
820 ld r11, VCPU_INTR_MSR(r4)
8215:
691 822
823/*
824 * Required state:
825 * R4 = vcpu
826 * R10: value for HSRR0
827 * R11: value for HSRR1
828 * R13 = PACA
829 */
692fast_guest_return: 830fast_guest_return:
693 li r0,0 831 li r0,0
694 stb r0,VCPU_CEDED(r4) /* cancel cede */ 832 stb r0,VCPU_CEDED(r4) /* cancel cede */
@@ -868,39 +1006,19 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
868 /* External interrupt, first check for host_ipi. If this is 1006 /* External interrupt, first check for host_ipi. If this is
869 * set, we know the host wants us out so let's do it now 1007 * set, we know the host wants us out so let's do it now
870 */ 1008 */
871do_ext_interrupt:
872 bl kvmppc_read_intr 1009 bl kvmppc_read_intr
873 cmpdi r3, 0 1010 cmpdi r3, 0
874 bgt ext_interrupt_to_host 1011 bgt ext_interrupt_to_host
875 1012
876 /* Allright, looks like an IPI for the guest, we need to set MER */
877 /* Check if any CPU is heading out to the host, if so head out too */ 1013 /* Check if any CPU is heading out to the host, if so head out too */
878 ld r5, HSTATE_KVM_VCORE(r13) 1014 ld r5, HSTATE_KVM_VCORE(r13)
879 lwz r0, VCORE_ENTRY_EXIT(r5) 1015 lwz r0, VCORE_ENTRY_EXIT(r5)
880 cmpwi r0, 0x100 1016 cmpwi r0, 0x100
881 bge ext_interrupt_to_host 1017 bge ext_interrupt_to_host
882 1018
883 /* See if there is a pending interrupt for the guest */ 1019 /* Return to guest after delivering any pending interrupt */
884 mfspr r8, SPRN_LPCR 1020 mr r4, r9
885 ld r0, VCPU_PENDING_EXC(r9) 1021 b deliver_guest_interrupt
886 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
887 rldicl. r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
888 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
889 beq 2f
890
891 /* And if the guest EE is set, we can deliver immediately, else
892 * we return to the guest with MER set
893 */
894 andi. r0, r11, MSR_EE
895 beq 2f
896 mtspr SPRN_SRR0, r10
897 mtspr SPRN_SRR1, r11
898 li r10, BOOK3S_INTERRUPT_EXTERNAL
899 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
900 rotldi r11, r11, 63
9012: mr r4, r9
902 mtspr SPRN_LPCR, r8
903 b fast_guest_return
904 1022
905ext_interrupt_to_host: 1023ext_interrupt_to_host:
906 1024
@@ -975,13 +1093,194 @@ BEGIN_FTR_SECTION
975 mtspr SPRN_SPURR,r4 1093 mtspr SPRN_SPURR,r4
976END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201) 1094END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
977 1095
1096 /* Save DEC */
1097 mfspr r5,SPRN_DEC
1098 mftb r6
1099 extsw r5,r5
1100 add r5,r5,r6
1101 std r5,VCPU_DEC_EXPIRES(r9)
1102
1103BEGIN_FTR_SECTION
1104 b 8f
1105END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1106 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
1107 mfmsr r8
1108 li r0, 1
1109 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
1110 mtmsrd r8
1111
1112 /* Save POWER8-specific registers */
1113 mfspr r5, SPRN_IAMR
1114 mfspr r6, SPRN_PSPB
1115 mfspr r7, SPRN_FSCR
1116 std r5, VCPU_IAMR(r9)
1117 stw r6, VCPU_PSPB(r9)
1118 std r7, VCPU_FSCR(r9)
1119 mfspr r5, SPRN_IC
1120 mfspr r6, SPRN_VTB
1121 mfspr r7, SPRN_TAR
1122 std r5, VCPU_IC(r9)
1123 std r6, VCPU_VTB(r9)
1124 std r7, VCPU_TAR(r9)
1125#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1126 mfspr r5, SPRN_TFHAR
1127 mfspr r6, SPRN_TFIAR
1128 mfspr r7, SPRN_TEXASR
1129 std r5, VCPU_TFHAR(r9)
1130 std r6, VCPU_TFIAR(r9)
1131 std r7, VCPU_TEXASR(r9)
1132#endif
1133 mfspr r8, SPRN_EBBHR
1134 std r8, VCPU_EBBHR(r9)
1135 mfspr r5, SPRN_EBBRR
1136 mfspr r6, SPRN_BESCR
1137 mfspr r7, SPRN_CSIGR
1138 mfspr r8, SPRN_TACR
1139 std r5, VCPU_EBBRR(r9)
1140 std r6, VCPU_BESCR(r9)
1141 std r7, VCPU_CSIGR(r9)
1142 std r8, VCPU_TACR(r9)
1143 mfspr r5, SPRN_TCSCR
1144 mfspr r6, SPRN_ACOP
1145 mfspr r7, SPRN_PID
1146 mfspr r8, SPRN_WORT
1147 std r5, VCPU_TCSCR(r9)
1148 std r6, VCPU_ACOP(r9)
1149 stw r7, VCPU_GUEST_PID(r9)
1150 std r8, VCPU_WORT(r9)
11518:
1152
1153 /* Save and reset AMR and UAMOR before turning on the MMU */
1154BEGIN_FTR_SECTION
1155 mfspr r5,SPRN_AMR
1156 mfspr r6,SPRN_UAMOR
1157 std r5,VCPU_AMR(r9)
1158 std r6,VCPU_UAMOR(r9)
1159 li r6,0
1160 mtspr SPRN_AMR,r6
1161END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1162
1163 /* Switch DSCR back to host value */
1164BEGIN_FTR_SECTION
1165 mfspr r8, SPRN_DSCR
1166 ld r7, HSTATE_DSCR(r13)
1167 std r8, VCPU_DSCR(r9)
1168 mtspr SPRN_DSCR, r7
1169END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1170
1171 /* Save non-volatile GPRs */
1172 std r14, VCPU_GPR(R14)(r9)
1173 std r15, VCPU_GPR(R15)(r9)
1174 std r16, VCPU_GPR(R16)(r9)
1175 std r17, VCPU_GPR(R17)(r9)
1176 std r18, VCPU_GPR(R18)(r9)
1177 std r19, VCPU_GPR(R19)(r9)
1178 std r20, VCPU_GPR(R20)(r9)
1179 std r21, VCPU_GPR(R21)(r9)
1180 std r22, VCPU_GPR(R22)(r9)
1181 std r23, VCPU_GPR(R23)(r9)
1182 std r24, VCPU_GPR(R24)(r9)
1183 std r25, VCPU_GPR(R25)(r9)
1184 std r26, VCPU_GPR(R26)(r9)
1185 std r27, VCPU_GPR(R27)(r9)
1186 std r28, VCPU_GPR(R28)(r9)
1187 std r29, VCPU_GPR(R29)(r9)
1188 std r30, VCPU_GPR(R30)(r9)
1189 std r31, VCPU_GPR(R31)(r9)
1190
1191 /* Save SPRGs */
1192 mfspr r3, SPRN_SPRG0
1193 mfspr r4, SPRN_SPRG1
1194 mfspr r5, SPRN_SPRG2
1195 mfspr r6, SPRN_SPRG3
1196 std r3, VCPU_SPRG0(r9)
1197 std r4, VCPU_SPRG1(r9)
1198 std r5, VCPU_SPRG2(r9)
1199 std r6, VCPU_SPRG3(r9)
1200
1201 /* save FP state */
1202 mr r3, r9
1203 bl kvmppc_save_fp
1204
1205 /* Increment yield count if they have a VPA */
1206 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1207 cmpdi r8, 0
1208 beq 25f
1209 lwz r3, LPPACA_YIELDCOUNT(r8)
1210 addi r3, r3, 1
1211 stw r3, LPPACA_YIELDCOUNT(r8)
1212 li r3, 1
1213 stb r3, VCPU_VPA_DIRTY(r9)
121425:
1215 /* Save PMU registers if requested */
1216 /* r8 and cr0.eq are live here */
1217 li r3, 1
1218 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1219 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1220 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1221 mfspr r6, SPRN_MMCRA
1222BEGIN_FTR_SECTION
1223 /* On P7, clear MMCRA in order to disable SDAR updates */
1224 li r7, 0
1225 mtspr SPRN_MMCRA, r7
1226END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1227 isync
1228 beq 21f /* if no VPA, save PMU stuff anyway */
1229 lbz r7, LPPACA_PMCINUSE(r8)
1230 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1231 bne 21f
1232 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1233 b 22f
123421: mfspr r5, SPRN_MMCR1
1235 mfspr r7, SPRN_SIAR
1236 mfspr r8, SPRN_SDAR
1237 std r4, VCPU_MMCR(r9)
1238 std r5, VCPU_MMCR + 8(r9)
1239 std r6, VCPU_MMCR + 16(r9)
1240 std r7, VCPU_SIAR(r9)
1241 std r8, VCPU_SDAR(r9)
1242 mfspr r3, SPRN_PMC1
1243 mfspr r4, SPRN_PMC2
1244 mfspr r5, SPRN_PMC3
1245 mfspr r6, SPRN_PMC4
1246 mfspr r7, SPRN_PMC5
1247 mfspr r8, SPRN_PMC6
1248BEGIN_FTR_SECTION
1249 mfspr r10, SPRN_PMC7
1250 mfspr r11, SPRN_PMC8
1251END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1252 stw r3, VCPU_PMC(r9)
1253 stw r4, VCPU_PMC + 4(r9)
1254 stw r5, VCPU_PMC + 8(r9)
1255 stw r6, VCPU_PMC + 12(r9)
1256 stw r7, VCPU_PMC + 16(r9)
1257 stw r8, VCPU_PMC + 20(r9)
1258BEGIN_FTR_SECTION
1259 stw r10, VCPU_PMC + 24(r9)
1260 stw r11, VCPU_PMC + 28(r9)
1261END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1262BEGIN_FTR_SECTION
1263 mfspr r4, SPRN_MMCR2
1264 mfspr r5, SPRN_SIER
1265 mfspr r6, SPRN_SPMC1
1266 mfspr r7, SPRN_SPMC2
1267 mfspr r8, SPRN_MMCRS
1268 std r4, VCPU_MMCR + 24(r9)
1269 std r5, VCPU_SIER(r9)
1270 stw r6, VCPU_PMC + 24(r9)
1271 stw r7, VCPU_PMC + 28(r9)
1272 std r8, VCPU_MMCR + 32(r9)
1273 lis r4, 0x8000
1274 mtspr SPRN_MMCRS, r4
1275END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
127622:
978 /* Clear out SLB */ 1277 /* Clear out SLB */
979 li r5,0 1278 li r5,0
980 slbmte r5,r5 1279 slbmte r5,r5
981 slbia 1280 slbia
982 ptesync 1281 ptesync
983 1282
984hdec_soon: /* r9 = vcpu, r12 = trap, r13 = paca */ 1283hdec_soon: /* r12 = trap, r13 = paca */
985BEGIN_FTR_SECTION 1284BEGIN_FTR_SECTION
986 b 32f 1285 b 32f
987END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1286END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
@@ -1014,8 +1313,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1014 */ 1313 */
1015 cmpwi r3,0x100 /* Are we the first here? */ 1314 cmpwi r3,0x100 /* Are we the first here? */
1016 bge 43f 1315 bge 43f
1017 cmpwi r3,1 /* Are any other threads in the guest? */
1018 ble 43f
1019 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 1316 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1020 beq 40f 1317 beq 40f
1021 li r0,0 1318 li r0,0
@@ -1026,7 +1323,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1026 * doesn't wake CPUs up from nap. 1323 * doesn't wake CPUs up from nap.
1027 */ 1324 */
1028 lwz r3,VCORE_NAPPING_THREADS(r5) 1325 lwz r3,VCORE_NAPPING_THREADS(r5)
1029 lwz r4,VCPU_PTID(r9) 1326 lbz r4,HSTATE_PTID(r13)
1030 li r0,1 1327 li r0,1
1031 sld r0,r0,r4 1328 sld r0,r0,r4
1032 andc. r3,r3,r0 /* no sense IPI'ing ourselves */ 1329 andc. r3,r3,r0 /* no sense IPI'ing ourselves */
@@ -1045,10 +1342,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1045 addi r6,r6,PACA_SIZE 1342 addi r6,r6,PACA_SIZE
1046 bne 42b 1343 bne 42b
1047 1344
1345secondary_too_late:
1048 /* Secondary threads wait for primary to do partition switch */ 1346 /* Secondary threads wait for primary to do partition switch */
104943: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */ 134743: ld r5,HSTATE_KVM_VCORE(r13)
1050 ld r5,HSTATE_KVM_VCORE(r13) 1348 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1051 lwz r3,VCPU_PTID(r9) 1349 lbz r3,HSTATE_PTID(r13)
1052 cmpwi r3,0 1350 cmpwi r3,0
1053 beq 15f 1351 beq 15f
1054 HMT_LOW 1352 HMT_LOW
@@ -1076,6 +1374,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1076 mtspr SPRN_LPID,r7 1374 mtspr SPRN_LPID,r7
1077 isync 1375 isync
1078 1376
1377BEGIN_FTR_SECTION
1378 /* DPDES is shared between threads */
1379 mfspr r7, SPRN_DPDES
1380 std r7, VCORE_DPDES(r5)
1381 /* clear DPDES so we don't get guest doorbells in the host */
1382 li r8, 0
1383 mtspr SPRN_DPDES, r8
1384END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1385
1079 /* Subtract timebase offset from timebase */ 1386 /* Subtract timebase offset from timebase */
1080 ld r8,VCORE_TB_OFFSET(r5) 1387 ld r8,VCORE_TB_OFFSET(r5)
1081 cmpdi r8,0 1388 cmpdi r8,0
@@ -1113,7 +1420,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1113 * We have to lock against concurrent tlbies, and 1420 * We have to lock against concurrent tlbies, and
1114 * we have to flush the whole TLB. 1421 * we have to flush the whole TLB.
1115 */ 1422 */
111632: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */ 142332: ld r5,HSTATE_KVM_VCORE(r13)
1424 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1117 1425
1118 /* Take the guest's tlbie_lock */ 1426 /* Take the guest's tlbie_lock */
1119#ifdef __BIG_ENDIAN__ 1427#ifdef __BIG_ENDIAN__
@@ -1203,6 +1511,56 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1203 add r5,r5,r6 1511 add r5,r5,r6
1204 std r5,VCPU_DEC_EXPIRES(r9) 1512 std r5,VCPU_DEC_EXPIRES(r9)
1205 1513
1514BEGIN_FTR_SECTION
1515 b 8f
1516END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1517 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
1518 mfmsr r8
1519 li r0, 1
1520 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
1521 mtmsrd r8
1522
1523 /* Save POWER8-specific registers */
1524 mfspr r5, SPRN_IAMR
1525 mfspr r6, SPRN_PSPB
1526 mfspr r7, SPRN_FSCR
1527 std r5, VCPU_IAMR(r9)
1528 stw r6, VCPU_PSPB(r9)
1529 std r7, VCPU_FSCR(r9)
1530 mfspr r5, SPRN_IC
1531 mfspr r6, SPRN_VTB
1532 mfspr r7, SPRN_TAR
1533 std r5, VCPU_IC(r9)
1534 std r6, VCPU_VTB(r9)
1535 std r7, VCPU_TAR(r9)
1536#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1537 mfspr r5, SPRN_TFHAR
1538 mfspr r6, SPRN_TFIAR
1539 mfspr r7, SPRN_TEXASR
1540 std r5, VCPU_TFHAR(r9)
1541 std r6, VCPU_TFIAR(r9)
1542 std r7, VCPU_TEXASR(r9)
1543#endif
1544 mfspr r8, SPRN_EBBHR
1545 std r8, VCPU_EBBHR(r9)
1546 mfspr r5, SPRN_EBBRR
1547 mfspr r6, SPRN_BESCR
1548 mfspr r7, SPRN_CSIGR
1549 mfspr r8, SPRN_TACR
1550 std r5, VCPU_EBBRR(r9)
1551 std r6, VCPU_BESCR(r9)
1552 std r7, VCPU_CSIGR(r9)
1553 std r8, VCPU_TACR(r9)
1554 mfspr r5, SPRN_TCSCR
1555 mfspr r6, SPRN_ACOP
1556 mfspr r7, SPRN_PID
1557 mfspr r8, SPRN_WORT
1558 std r5, VCPU_TCSCR(r9)
1559 std r6, VCPU_ACOP(r9)
1560 stw r7, VCPU_GUEST_PID(r9)
1561 std r8, VCPU_WORT(r9)
15628:
1563
1206 /* Save and reset AMR and UAMOR before turning on the MMU */ 1564 /* Save and reset AMR and UAMOR before turning on the MMU */
1207BEGIN_FTR_SECTION 1565BEGIN_FTR_SECTION
1208 mfspr r5,SPRN_AMR 1566 mfspr r5,SPRN_AMR
@@ -1217,130 +1575,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1217 li r0, KVM_GUEST_MODE_NONE 1575 li r0, KVM_GUEST_MODE_NONE
1218 stb r0, HSTATE_IN_GUEST(r13) 1576 stb r0, HSTATE_IN_GUEST(r13)
1219 1577
1220 /* Switch DSCR back to host value */
1221BEGIN_FTR_SECTION
1222 mfspr r8, SPRN_DSCR
1223 ld r7, HSTATE_DSCR(r13)
1224 std r8, VCPU_DSCR(r9)
1225 mtspr SPRN_DSCR, r7
1226END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1227
1228 /* Save non-volatile GPRs */
1229 std r14, VCPU_GPR(R14)(r9)
1230 std r15, VCPU_GPR(R15)(r9)
1231 std r16, VCPU_GPR(R16)(r9)
1232 std r17, VCPU_GPR(R17)(r9)
1233 std r18, VCPU_GPR(R18)(r9)
1234 std r19, VCPU_GPR(R19)(r9)
1235 std r20, VCPU_GPR(R20)(r9)
1236 std r21, VCPU_GPR(R21)(r9)
1237 std r22, VCPU_GPR(R22)(r9)
1238 std r23, VCPU_GPR(R23)(r9)
1239 std r24, VCPU_GPR(R24)(r9)
1240 std r25, VCPU_GPR(R25)(r9)
1241 std r26, VCPU_GPR(R26)(r9)
1242 std r27, VCPU_GPR(R27)(r9)
1243 std r28, VCPU_GPR(R28)(r9)
1244 std r29, VCPU_GPR(R29)(r9)
1245 std r30, VCPU_GPR(R30)(r9)
1246 std r31, VCPU_GPR(R31)(r9)
1247
1248 /* Save SPRGs */
1249 mfspr r3, SPRN_SPRG0
1250 mfspr r4, SPRN_SPRG1
1251 mfspr r5, SPRN_SPRG2
1252 mfspr r6, SPRN_SPRG3
1253 std r3, VCPU_SPRG0(r9)
1254 std r4, VCPU_SPRG1(r9)
1255 std r5, VCPU_SPRG2(r9)
1256 std r6, VCPU_SPRG3(r9)
1257
1258 /* save FP state */
1259 mr r3, r9
1260 bl .kvmppc_save_fp
1261
1262 /* Increment yield count if they have a VPA */
1263 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1264 cmpdi r8, 0
1265 beq 25f
1266 lwz r3, LPPACA_YIELDCOUNT(r8)
1267 addi r3, r3, 1
1268 stw r3, LPPACA_YIELDCOUNT(r8)
1269 li r3, 1
1270 stb r3, VCPU_VPA_DIRTY(r9)
127125:
1272 /* Save PMU registers if requested */
1273 /* r8 and cr0.eq are live here */
1274 li r3, 1
1275 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1276 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1277 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1278 mfspr r6, SPRN_MMCRA
1279BEGIN_FTR_SECTION
1280 /* On P7, clear MMCRA in order to disable SDAR updates */
1281 li r7, 0
1282 mtspr SPRN_MMCRA, r7
1283END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1284 isync
1285 beq 21f /* if no VPA, save PMU stuff anyway */
1286 lbz r7, LPPACA_PMCINUSE(r8)
1287 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1288 bne 21f
1289 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1290 b 22f
129121: mfspr r5, SPRN_MMCR1
1292 mfspr r7, SPRN_SIAR
1293 mfspr r8, SPRN_SDAR
1294 std r4, VCPU_MMCR(r9)
1295 std r5, VCPU_MMCR + 8(r9)
1296 std r6, VCPU_MMCR + 16(r9)
1297 std r7, VCPU_SIAR(r9)
1298 std r8, VCPU_SDAR(r9)
1299 mfspr r3, SPRN_PMC1
1300 mfspr r4, SPRN_PMC2
1301 mfspr r5, SPRN_PMC3
1302 mfspr r6, SPRN_PMC4
1303 mfspr r7, SPRN_PMC5
1304 mfspr r8, SPRN_PMC6
1305BEGIN_FTR_SECTION
1306 mfspr r10, SPRN_PMC7
1307 mfspr r11, SPRN_PMC8
1308END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1309 stw r3, VCPU_PMC(r9)
1310 stw r4, VCPU_PMC + 4(r9)
1311 stw r5, VCPU_PMC + 8(r9)
1312 stw r6, VCPU_PMC + 12(r9)
1313 stw r7, VCPU_PMC + 16(r9)
1314 stw r8, VCPU_PMC + 20(r9)
1315BEGIN_FTR_SECTION
1316 stw r10, VCPU_PMC + 24(r9)
1317 stw r11, VCPU_PMC + 28(r9)
1318END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
131922:
1320 ld r0, 112+PPC_LR_STKOFF(r1) 1578 ld r0, 112+PPC_LR_STKOFF(r1)
1321 addi r1, r1, 112 1579 addi r1, r1, 112
1322 mtlr r0 1580 mtlr r0
1323 blr 1581 blr
1324secondary_too_late:
1325 ld r5,HSTATE_KVM_VCORE(r13)
1326 HMT_LOW
132713: lbz r3,VCORE_IN_GUEST(r5)
1328 cmpwi r3,0
1329 bne 13b
1330 HMT_MEDIUM
1331 li r0, KVM_GUEST_MODE_NONE
1332 stb r0, HSTATE_IN_GUEST(r13)
1333 ld r11,PACA_SLBSHADOWPTR(r13)
1334
1335 .rept SLB_NUM_BOLTED
1336 ld r5,SLBSHADOW_SAVEAREA(r11)
1337 ld r6,SLBSHADOW_SAVEAREA+8(r11)
1338 andis. r7,r5,SLB_ESID_V@h
1339 beq 1f
1340 slbmte r6,r5
13411: addi r11,r11,16
1342 .endr
1343 b 22b
1344 1582
1345/* 1583/*
1346 * Check whether an HDSI is an HPTE not found fault or something else. 1584 * Check whether an HDSI is an HPTE not found fault or something else.
@@ -1386,8 +1624,7 @@ kvmppc_hdsi:
1386 mtspr SPRN_SRR0, r10 1624 mtspr SPRN_SRR0, r10
1387 mtspr SPRN_SRR1, r11 1625 mtspr SPRN_SRR1, r11
1388 li r10, BOOK3S_INTERRUPT_DATA_STORAGE 1626 li r10, BOOK3S_INTERRUPT_DATA_STORAGE
1389 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ 1627 ld r11, VCPU_INTR_MSR(r9)
1390 rotldi r11, r11, 63
1391fast_interrupt_c_return: 1628fast_interrupt_c_return:
13926: ld r7, VCPU_CTR(r9) 16296: ld r7, VCPU_CTR(r9)
1393 lwz r8, VCPU_XER(r9) 1630 lwz r8, VCPU_XER(r9)
@@ -1456,8 +1693,7 @@ kvmppc_hisi:
14561: mtspr SPRN_SRR0, r10 16931: mtspr SPRN_SRR0, r10
1457 mtspr SPRN_SRR1, r11 1694 mtspr SPRN_SRR1, r11
1458 li r10, BOOK3S_INTERRUPT_INST_STORAGE 1695 li r10, BOOK3S_INTERRUPT_INST_STORAGE
1459 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ 1696 ld r11, VCPU_INTR_MSR(r9)
1460 rotldi r11, r11, 63
1461 b fast_interrupt_c_return 1697 b fast_interrupt_c_return
1462 1698
14633: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ 16993: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
@@ -1474,7 +1710,8 @@ kvmppc_hisi:
1474hcall_try_real_mode: 1710hcall_try_real_mode:
1475 ld r3,VCPU_GPR(R3)(r9) 1711 ld r3,VCPU_GPR(R3)(r9)
1476 andi. r0,r11,MSR_PR 1712 andi. r0,r11,MSR_PR
1477 bne guest_exit_cont 1713 /* sc 1 from userspace - reflect to guest syscall */
1714 bne sc_1_fast_return
1478 clrrdi r3,r3,2 1715 clrrdi r3,r3,2
1479 cmpldi r3,hcall_real_table_end - hcall_real_table 1716 cmpldi r3,hcall_real_table_end - hcall_real_table
1480 bge guest_exit_cont 1717 bge guest_exit_cont
@@ -1495,6 +1732,14 @@ hcall_try_real_mode:
1495 ld r11,VCPU_MSR(r4) 1732 ld r11,VCPU_MSR(r4)
1496 b fast_guest_return 1733 b fast_guest_return
1497 1734
1735sc_1_fast_return:
1736 mtspr SPRN_SRR0,r10
1737 mtspr SPRN_SRR1,r11
1738 li r10, BOOK3S_INTERRUPT_SYSCALL
1739 ld r11, VCPU_INTR_MSR(r9)
1740 mr r4,r9
1741 b fast_guest_return
1742
1498 /* We've attempted a real mode hcall, but it's punted it back 1743 /* We've attempted a real mode hcall, but it's punted it back
1499 * to userspace. We need to restore some clobbered volatiles 1744 * to userspace. We need to restore some clobbered volatiles
1500 * before resuming the pass-it-to-qemu path */ 1745 * before resuming the pass-it-to-qemu path */
@@ -1588,14 +1833,34 @@ hcall_real_table:
1588 .long 0 /* 0x11c */ 1833 .long 0 /* 0x11c */
1589 .long 0 /* 0x120 */ 1834 .long 0 /* 0x120 */
1590 .long .kvmppc_h_bulk_remove - hcall_real_table 1835 .long .kvmppc_h_bulk_remove - hcall_real_table
1836 .long 0 /* 0x128 */
1837 .long 0 /* 0x12c */
1838 .long 0 /* 0x130 */
1839 .long .kvmppc_h_set_xdabr - hcall_real_table
1591hcall_real_table_end: 1840hcall_real_table_end:
1592 1841
1593ignore_hdec: 1842ignore_hdec:
1594 mr r4,r9 1843 mr r4,r9
1595 b fast_guest_return 1844 b fast_guest_return
1596 1845
1846_GLOBAL(kvmppc_h_set_xdabr)
1847 andi. r0, r5, DABRX_USER | DABRX_KERNEL
1848 beq 6f
1849 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
1850 andc. r0, r5, r0
1851 beq 3f
18526: li r3, H_PARAMETER
1853 blr
1854
1597_GLOBAL(kvmppc_h_set_dabr) 1855_GLOBAL(kvmppc_h_set_dabr)
1856 li r5, DABRX_USER | DABRX_KERNEL
18573:
1858BEGIN_FTR_SECTION
1859 b 2f
1860END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1598 std r4,VCPU_DABR(r3) 1861 std r4,VCPU_DABR(r3)
1862 stw r5, VCPU_DABRX(r3)
1863 mtspr SPRN_DABRX, r5
1599 /* Work around P7 bug where DABR can get corrupted on mtspr */ 1864 /* Work around P7 bug where DABR can get corrupted on mtspr */
16001: mtspr SPRN_DABR,r4 18651: mtspr SPRN_DABR,r4
1601 mfspr r5, SPRN_DABR 1866 mfspr r5, SPRN_DABR
@@ -1605,6 +1870,17 @@ _GLOBAL(kvmppc_h_set_dabr)
1605 li r3,0 1870 li r3,0
1606 blr 1871 blr
1607 1872
1873 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
18742: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
1875 rlwimi r5, r4, 1, DAWRX_WT
1876 clrrdi r4, r4, 3
1877 std r4, VCPU_DAWR(r3)
1878 std r5, VCPU_DAWRX(r3)
1879 mtspr SPRN_DAWR, r4
1880 mtspr SPRN_DAWRX, r5
1881 li r3, 0
1882 blr
1883
1608_GLOBAL(kvmppc_h_cede) 1884_GLOBAL(kvmppc_h_cede)
1609 ori r11,r11,MSR_EE 1885 ori r11,r11,MSR_EE
1610 std r11,VCPU_MSR(r3) 1886 std r11,VCPU_MSR(r3)
@@ -1628,7 +1904,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1628 * up to the host. 1904 * up to the host.
1629 */ 1905 */
1630 ld r5,HSTATE_KVM_VCORE(r13) 1906 ld r5,HSTATE_KVM_VCORE(r13)
1631 lwz r6,VCPU_PTID(r3) 1907 lbz r6,HSTATE_PTID(r13)
1632 lwz r8,VCORE_ENTRY_EXIT(r5) 1908 lwz r8,VCORE_ENTRY_EXIT(r5)
1633 clrldi r8,r8,56 1909 clrldi r8,r8,56
1634 li r0,1 1910 li r0,1
@@ -1643,9 +1919,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1643 bne 31b 1919 bne 31b
1644 /* order napping_threads update vs testing entry_exit_count */ 1920 /* order napping_threads update vs testing entry_exit_count */
1645 isync 1921 isync
1646 li r0,1 1922 li r0,NAPPING_CEDE
1647 stb r0,HSTATE_NAPPING(r13) 1923 stb r0,HSTATE_NAPPING(r13)
1648 mr r4,r3
1649 lwz r7,VCORE_ENTRY_EXIT(r5) 1924 lwz r7,VCORE_ENTRY_EXIT(r5)
1650 cmpwi r7,0x100 1925 cmpwi r7,0x100
1651 bge 33f /* another thread already exiting */ 1926 bge 33f /* another thread already exiting */
@@ -1677,16 +1952,19 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1677 std r31, VCPU_GPR(R31)(r3) 1952 std r31, VCPU_GPR(R31)(r3)
1678 1953
1679 /* save FP state */ 1954 /* save FP state */
1680 bl .kvmppc_save_fp 1955 bl kvmppc_save_fp
1681 1956
1682 /* 1957 /*
1683 * Take a nap until a decrementer or external interrupt occurs, 1958 * Take a nap until a decrementer or external or doobell interrupt
1684 * with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR 1959 * occurs, with PECE1, PECE0 and PECEDP set in LPCR
1685 */ 1960 */
1686 li r0,1 1961 li r0,1
1687 stb r0,HSTATE_HWTHREAD_REQ(r13) 1962 stb r0,HSTATE_HWTHREAD_REQ(r13)
1688 mfspr r5,SPRN_LPCR 1963 mfspr r5,SPRN_LPCR
1689 ori r5,r5,LPCR_PECE0 | LPCR_PECE1 1964 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
1965BEGIN_FTR_SECTION
1966 oris r5,r5,LPCR_PECEDP@h
1967END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1690 mtspr SPRN_LPCR,r5 1968 mtspr SPRN_LPCR,r5
1691 isync 1969 isync
1692 li r0, 0 1970 li r0, 0
@@ -1698,6 +1976,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1698 nap 1976 nap
1699 b . 1977 b .
1700 1978
197933: mr r4, r3
1980 li r3, 0
1981 li r12, 0
1982 b 34f
1983
1701kvm_end_cede: 1984kvm_end_cede:
1702 /* get vcpu pointer */ 1985 /* get vcpu pointer */
1703 ld r4, HSTATE_KVM_VCPU(r13) 1986 ld r4, HSTATE_KVM_VCPU(r13)
@@ -1727,12 +2010,15 @@ kvm_end_cede:
1727 ld r29, VCPU_GPR(R29)(r4) 2010 ld r29, VCPU_GPR(R29)(r4)
1728 ld r30, VCPU_GPR(R30)(r4) 2011 ld r30, VCPU_GPR(R30)(r4)
1729 ld r31, VCPU_GPR(R31)(r4) 2012 ld r31, VCPU_GPR(R31)(r4)
2013
2014 /* Check the wake reason in SRR1 to see why we got here */
2015 bl kvmppc_check_wake_reason
1730 2016
1731 /* clear our bit in vcore->napping_threads */ 2017 /* clear our bit in vcore->napping_threads */
173233: ld r5,HSTATE_KVM_VCORE(r13) 201834: ld r5,HSTATE_KVM_VCORE(r13)
1733 lwz r3,VCPU_PTID(r4) 2019 lbz r7,HSTATE_PTID(r13)
1734 li r0,1 2020 li r0,1
1735 sld r0,r0,r3 2021 sld r0,r0,r7
1736 addi r6,r5,VCORE_NAPPING_THREADS 2022 addi r6,r5,VCORE_NAPPING_THREADS
173732: lwarx r7,0,r6 202332: lwarx r7,0,r6
1738 andc r7,r7,r0 2024 andc r7,r7,r0
@@ -1741,23 +2027,18 @@ kvm_end_cede:
1741 li r0,0 2027 li r0,0
1742 stb r0,HSTATE_NAPPING(r13) 2028 stb r0,HSTATE_NAPPING(r13)
1743 2029
1744 /* Check the wake reason in SRR1 to see why we got here */ 2030 /* See if the wake reason means we need to exit */
1745 mfspr r3, SPRN_SRR1 2031 stw r12, VCPU_TRAP(r4)
1746 rlwinm r3, r3, 44-31, 0x7 /* extract wake reason field */
1747 cmpwi r3, 4 /* was it an external interrupt? */
1748 li r12, BOOK3S_INTERRUPT_EXTERNAL
1749 mr r9, r4 2032 mr r9, r4
1750 ld r10, VCPU_PC(r9) 2033 cmpdi r3, 0
1751 ld r11, VCPU_MSR(r9) 2034 bgt guest_exit_cont
1752 beq do_ext_interrupt /* if so */
1753 2035
1754 /* see if any other thread is already exiting */ 2036 /* see if any other thread is already exiting */
1755 lwz r0,VCORE_ENTRY_EXIT(r5) 2037 lwz r0,VCORE_ENTRY_EXIT(r5)
1756 cmpwi r0,0x100 2038 cmpwi r0,0x100
1757 blt kvmppc_cede_reentry /* if not go back to guest */ 2039 bge guest_exit_cont
1758 2040
1759 /* some threads are exiting, so go to the guest exit path */ 2041 b kvmppc_cede_reentry /* if not go back to guest */
1760 b hcall_real_fallback
1761 2042
1762 /* cede when already previously prodded case */ 2043 /* cede when already previously prodded case */
1763kvm_cede_prodded: 2044kvm_cede_prodded:
@@ -1783,11 +2064,48 @@ machine_check_realmode:
1783 beq mc_cont 2064 beq mc_cont
1784 /* If not, deliver a machine check. SRR0/1 are already set */ 2065 /* If not, deliver a machine check. SRR0/1 are already set */
1785 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK 2066 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
1786 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ 2067 ld r11, VCPU_INTR_MSR(r9)
1787 rotldi r11, r11, 63
1788 b fast_interrupt_c_return 2068 b fast_interrupt_c_return
1789 2069
1790/* 2070/*
2071 * Check the reason we woke from nap, and take appropriate action.
2072 * Returns:
2073 * 0 if nothing needs to be done
2074 * 1 if something happened that needs to be handled by the host
2075 * -1 if there was a guest wakeup (IPI)
2076 *
2077 * Also sets r12 to the interrupt vector for any interrupt that needs
2078 * to be handled now by the host (0x500 for external interrupt), or zero.
2079 */
2080kvmppc_check_wake_reason:
2081 mfspr r6, SPRN_SRR1
2082BEGIN_FTR_SECTION
2083 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2084FTR_SECTION_ELSE
2085 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2086ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2087 cmpwi r6, 8 /* was it an external interrupt? */
2088 li r12, BOOK3S_INTERRUPT_EXTERNAL
2089 beq kvmppc_read_intr /* if so, see what it was */
2090 li r3, 0
2091 li r12, 0
2092 cmpwi r6, 6 /* was it the decrementer? */
2093 beq 0f
2094BEGIN_FTR_SECTION
2095 cmpwi r6, 5 /* privileged doorbell? */
2096 beq 0f
2097 cmpwi r6, 3 /* hypervisor doorbell? */
2098 beq 3f
2099END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2100 li r3, 1 /* anything else, return 1 */
21010: blr
2102
2103 /* hypervisor doorbell */
21043: li r12, BOOK3S_INTERRUPT_H_DOORBELL
2105 li r3, 1
2106 blr
2107
2108/*
1791 * Determine what sort of external interrupt is pending (if any). 2109 * Determine what sort of external interrupt is pending (if any).
1792 * Returns: 2110 * Returns:
1793 * 0 if no interrupt is pending 2111 * 0 if no interrupt is pending
@@ -1818,7 +2136,6 @@ kvmppc_read_intr:
1818 * interrupts directly to the guest 2136 * interrupts directly to the guest
1819 */ 2137 */
1820 cmpwi r3, XICS_IPI /* if there is, is it an IPI? */ 2138 cmpwi r3, XICS_IPI /* if there is, is it an IPI? */
1821 li r3, 1
1822 bne 42f 2139 bne 42f
1823 2140
1824 /* It's an IPI, clear the MFRR and EOI it */ 2141 /* It's an IPI, clear the MFRR and EOI it */
@@ -1844,19 +2161,25 @@ kvmppc_read_intr:
1844 * before exit, it will be picked up by the host ICP driver 2161 * before exit, it will be picked up by the host ICP driver
1845 */ 2162 */
1846 stw r0, HSTATE_SAVED_XIRR(r13) 2163 stw r0, HSTATE_SAVED_XIRR(r13)
2164 li r3, 1
1847 b 1b 2165 b 1b
1848 2166
184943: /* We raced with the host, we need to resend that IPI, bummer */ 216743: /* We raced with the host, we need to resend that IPI, bummer */
1850 li r0, IPI_PRIORITY 2168 li r0, IPI_PRIORITY
1851 stbcix r0, r6, r8 /* set the IPI */ 2169 stbcix r0, r6, r8 /* set the IPI */
1852 sync 2170 sync
2171 li r3, 1
1853 b 1b 2172 b 1b
1854 2173
1855/* 2174/*
1856 * Save away FP, VMX and VSX registers. 2175 * Save away FP, VMX and VSX registers.
1857 * r3 = vcpu pointer 2176 * r3 = vcpu pointer
2177 * N.B. r30 and r31 are volatile across this function,
2178 * thus it is not callable from C.
1858 */ 2179 */
1859_GLOBAL(kvmppc_save_fp) 2180kvmppc_save_fp:
2181 mflr r30
2182 mr r31,r3
1860 mfmsr r5 2183 mfmsr r5
1861 ori r8,r5,MSR_FP 2184 ori r8,r5,MSR_FP
1862#ifdef CONFIG_ALTIVEC 2185#ifdef CONFIG_ALTIVEC
@@ -1871,42 +2194,17 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1871#endif 2194#endif
1872 mtmsrd r8 2195 mtmsrd r8
1873 isync 2196 isync
1874#ifdef CONFIG_VSX 2197 addi r3,r3,VCPU_FPRS
1875BEGIN_FTR_SECTION 2198 bl .store_fp_state
1876 reg = 0
1877 .rept 32
1878 li r6,reg*16+VCPU_VSRS
1879 STXVD2X(reg,R6,R3)
1880 reg = reg + 1
1881 .endr
1882FTR_SECTION_ELSE
1883#endif
1884 reg = 0
1885 .rept 32
1886 stfd reg,reg*8+VCPU_FPRS(r3)
1887 reg = reg + 1
1888 .endr
1889#ifdef CONFIG_VSX
1890ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
1891#endif
1892 mffs fr0
1893 stfd fr0,VCPU_FPSCR(r3)
1894
1895#ifdef CONFIG_ALTIVEC 2199#ifdef CONFIG_ALTIVEC
1896BEGIN_FTR_SECTION 2200BEGIN_FTR_SECTION
1897 reg = 0 2201 addi r3,r31,VCPU_VRS
1898 .rept 32 2202 bl .store_vr_state
1899 li r6,reg*16+VCPU_VRS
1900 stvx reg,r6,r3
1901 reg = reg + 1
1902 .endr
1903 mfvscr vr0
1904 li r6,VCPU_VSCR
1905 stvx vr0,r6,r3
1906END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2203END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1907#endif 2204#endif
1908 mfspr r6,SPRN_VRSAVE 2205 mfspr r6,SPRN_VRSAVE
1909 stw r6,VCPU_VRSAVE(r3) 2206 stw r6,VCPU_VRSAVE(r3)
2207 mtlr r30
1910 mtmsrd r5 2208 mtmsrd r5
1911 isync 2209 isync
1912 blr 2210 blr
@@ -1914,9 +2212,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1914/* 2212/*
1915 * Load up FP, VMX and VSX registers 2213 * Load up FP, VMX and VSX registers
1916 * r4 = vcpu pointer 2214 * r4 = vcpu pointer
2215 * N.B. r30 and r31 are volatile across this function,
2216 * thus it is not callable from C.
1917 */ 2217 */
1918 .globl kvmppc_load_fp
1919kvmppc_load_fp: 2218kvmppc_load_fp:
2219 mflr r30
2220 mr r31,r4
1920 mfmsr r9 2221 mfmsr r9
1921 ori r8,r9,MSR_FP 2222 ori r8,r9,MSR_FP
1922#ifdef CONFIG_ALTIVEC 2223#ifdef CONFIG_ALTIVEC
@@ -1931,42 +2232,18 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1931#endif 2232#endif
1932 mtmsrd r8 2233 mtmsrd r8
1933 isync 2234 isync
1934 lfd fr0,VCPU_FPSCR(r4) 2235 addi r3,r4,VCPU_FPRS
1935 MTFSF_L(fr0) 2236 bl .load_fp_state
1936#ifdef CONFIG_VSX
1937BEGIN_FTR_SECTION
1938 reg = 0
1939 .rept 32
1940 li r7,reg*16+VCPU_VSRS
1941 LXVD2X(reg,R7,R4)
1942 reg = reg + 1
1943 .endr
1944FTR_SECTION_ELSE
1945#endif
1946 reg = 0
1947 .rept 32
1948 lfd reg,reg*8+VCPU_FPRS(r4)
1949 reg = reg + 1
1950 .endr
1951#ifdef CONFIG_VSX
1952ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
1953#endif
1954
1955#ifdef CONFIG_ALTIVEC 2237#ifdef CONFIG_ALTIVEC
1956BEGIN_FTR_SECTION 2238BEGIN_FTR_SECTION
1957 li r7,VCPU_VSCR 2239 addi r3,r31,VCPU_VRS
1958 lvx vr0,r7,r4 2240 bl .load_vr_state
1959 mtvscr vr0
1960 reg = 0
1961 .rept 32
1962 li r7,reg*16+VCPU_VRS
1963 lvx reg,r7,r4
1964 reg = reg + 1
1965 .endr
1966END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2241END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1967#endif 2242#endif
1968 lwz r7,VCPU_VRSAVE(r4) 2243 lwz r7,VCPU_VRSAVE(r4)
1969 mtspr SPRN_VRSAVE,r7 2244 mtspr SPRN_VRSAVE,r7
2245 mtlr r30
2246 mr r4,r31
1970 blr 2247 blr
1971 2248
1972/* 2249/*
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c
index a59a25a13218..c1abd95063f4 100644
--- a/arch/powerpc/kvm/book3s_paired_singles.c
+++ b/arch/powerpc/kvm/book3s_paired_singles.c
@@ -160,7 +160,7 @@
160 160
161static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt) 161static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt)
162{ 162{
163 kvm_cvt_df(&vcpu->arch.fpr[rt], &vcpu->arch.qpr[rt]); 163 kvm_cvt_df(&VCPU_FPR(vcpu, rt), &vcpu->arch.qpr[rt]);
164} 164}
165 165
166static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) 166static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store)
@@ -207,11 +207,11 @@ static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
207 /* put in registers */ 207 /* put in registers */
208 switch (ls_type) { 208 switch (ls_type) {
209 case FPU_LS_SINGLE: 209 case FPU_LS_SINGLE:
210 kvm_cvt_fd((u32*)tmp, &vcpu->arch.fpr[rs]); 210 kvm_cvt_fd((u32*)tmp, &VCPU_FPR(vcpu, rs));
211 vcpu->arch.qpr[rs] = *((u32*)tmp); 211 vcpu->arch.qpr[rs] = *((u32*)tmp);
212 break; 212 break;
213 case FPU_LS_DOUBLE: 213 case FPU_LS_DOUBLE:
214 vcpu->arch.fpr[rs] = *((u64*)tmp); 214 VCPU_FPR(vcpu, rs) = *((u64*)tmp);
215 break; 215 break;
216 } 216 }
217 217
@@ -233,18 +233,18 @@ static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
233 233
234 switch (ls_type) { 234 switch (ls_type) {
235 case FPU_LS_SINGLE: 235 case FPU_LS_SINGLE:
236 kvm_cvt_df(&vcpu->arch.fpr[rs], (u32*)tmp); 236 kvm_cvt_df(&VCPU_FPR(vcpu, rs), (u32*)tmp);
237 val = *((u32*)tmp); 237 val = *((u32*)tmp);
238 len = sizeof(u32); 238 len = sizeof(u32);
239 break; 239 break;
240 case FPU_LS_SINGLE_LOW: 240 case FPU_LS_SINGLE_LOW:
241 *((u32*)tmp) = vcpu->arch.fpr[rs]; 241 *((u32*)tmp) = VCPU_FPR(vcpu, rs);
242 val = vcpu->arch.fpr[rs] & 0xffffffff; 242 val = VCPU_FPR(vcpu, rs) & 0xffffffff;
243 len = sizeof(u32); 243 len = sizeof(u32);
244 break; 244 break;
245 case FPU_LS_DOUBLE: 245 case FPU_LS_DOUBLE:
246 *((u64*)tmp) = vcpu->arch.fpr[rs]; 246 *((u64*)tmp) = VCPU_FPR(vcpu, rs);
247 val = vcpu->arch.fpr[rs]; 247 val = VCPU_FPR(vcpu, rs);
248 len = sizeof(u64); 248 len = sizeof(u64);
249 break; 249 break;
250 default: 250 default:
@@ -301,7 +301,7 @@ static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
301 emulated = EMULATE_DONE; 301 emulated = EMULATE_DONE;
302 302
303 /* put in registers */ 303 /* put in registers */
304 kvm_cvt_fd(&tmp[0], &vcpu->arch.fpr[rs]); 304 kvm_cvt_fd(&tmp[0], &VCPU_FPR(vcpu, rs));
305 vcpu->arch.qpr[rs] = tmp[1]; 305 vcpu->arch.qpr[rs] = tmp[1];
306 306
307 dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0], 307 dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0],
@@ -319,7 +319,7 @@ static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
319 u32 tmp[2]; 319 u32 tmp[2];
320 int len = w ? sizeof(u32) : sizeof(u64); 320 int len = w ? sizeof(u32) : sizeof(u64);
321 321
322 kvm_cvt_df(&vcpu->arch.fpr[rs], &tmp[0]); 322 kvm_cvt_df(&VCPU_FPR(vcpu, rs), &tmp[0]);
323 tmp[1] = vcpu->arch.qpr[rs]; 323 tmp[1] = vcpu->arch.qpr[rs];
324 324
325 r = kvmppc_st(vcpu, &addr, len, tmp, true); 325 r = kvmppc_st(vcpu, &addr, len, tmp, true);
@@ -512,7 +512,6 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc,
512 u32 *src2, u32 *src3)) 512 u32 *src2, u32 *src3))
513{ 513{
514 u32 *qpr = vcpu->arch.qpr; 514 u32 *qpr = vcpu->arch.qpr;
515 u64 *fpr = vcpu->arch.fpr;
516 u32 ps0_out; 515 u32 ps0_out;
517 u32 ps0_in1, ps0_in2, ps0_in3; 516 u32 ps0_in1, ps0_in2, ps0_in3;
518 u32 ps1_in1, ps1_in2, ps1_in3; 517 u32 ps1_in1, ps1_in2, ps1_in3;
@@ -521,20 +520,20 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc,
521 WARN_ON(rc); 520 WARN_ON(rc);
522 521
523 /* PS0 */ 522 /* PS0 */
524 kvm_cvt_df(&fpr[reg_in1], &ps0_in1); 523 kvm_cvt_df(&VCPU_FPR(vcpu, reg_in1), &ps0_in1);
525 kvm_cvt_df(&fpr[reg_in2], &ps0_in2); 524 kvm_cvt_df(&VCPU_FPR(vcpu, reg_in2), &ps0_in2);
526 kvm_cvt_df(&fpr[reg_in3], &ps0_in3); 525 kvm_cvt_df(&VCPU_FPR(vcpu, reg_in3), &ps0_in3);
527 526
528 if (scalar & SCALAR_LOW) 527 if (scalar & SCALAR_LOW)
529 ps0_in2 = qpr[reg_in2]; 528 ps0_in2 = qpr[reg_in2];
530 529
531 func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3); 530 func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3);
532 531
533 dprintk(KERN_INFO "PS3 ps0 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n", 532 dprintk(KERN_INFO "PS3 ps0 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
534 ps0_in1, ps0_in2, ps0_in3, ps0_out); 533 ps0_in1, ps0_in2, ps0_in3, ps0_out);
535 534
536 if (!(scalar & SCALAR_NO_PS0)) 535 if (!(scalar & SCALAR_NO_PS0))
537 kvm_cvt_fd(&ps0_out, &fpr[reg_out]); 536 kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out));
538 537
539 /* PS1 */ 538 /* PS1 */
540 ps1_in1 = qpr[reg_in1]; 539 ps1_in1 = qpr[reg_in1];
@@ -545,7 +544,7 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc,
545 ps1_in2 = ps0_in2; 544 ps1_in2 = ps0_in2;
546 545
547 if (!(scalar & SCALAR_NO_PS1)) 546 if (!(scalar & SCALAR_NO_PS1))
548 func(&vcpu->arch.fpscr, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3); 547 func(&vcpu->arch.fp.fpscr, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3);
549 548
550 dprintk(KERN_INFO "PS3 ps1 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n", 549 dprintk(KERN_INFO "PS3 ps1 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
551 ps1_in1, ps1_in2, ps1_in3, qpr[reg_out]); 550 ps1_in1, ps1_in2, ps1_in3, qpr[reg_out]);
@@ -561,7 +560,6 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc,
561 u32 *src2)) 560 u32 *src2))
562{ 561{
563 u32 *qpr = vcpu->arch.qpr; 562 u32 *qpr = vcpu->arch.qpr;
564 u64 *fpr = vcpu->arch.fpr;
565 u32 ps0_out; 563 u32 ps0_out;
566 u32 ps0_in1, ps0_in2; 564 u32 ps0_in1, ps0_in2;
567 u32 ps1_out; 565 u32 ps1_out;
@@ -571,20 +569,20 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc,
571 WARN_ON(rc); 569 WARN_ON(rc);
572 570
573 /* PS0 */ 571 /* PS0 */
574 kvm_cvt_df(&fpr[reg_in1], &ps0_in1); 572 kvm_cvt_df(&VCPU_FPR(vcpu, reg_in1), &ps0_in1);
575 573
576 if (scalar & SCALAR_LOW) 574 if (scalar & SCALAR_LOW)
577 ps0_in2 = qpr[reg_in2]; 575 ps0_in2 = qpr[reg_in2];
578 else 576 else
579 kvm_cvt_df(&fpr[reg_in2], &ps0_in2); 577 kvm_cvt_df(&VCPU_FPR(vcpu, reg_in2), &ps0_in2);
580 578
581 func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2); 579 func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in1, &ps0_in2);
582 580
583 if (!(scalar & SCALAR_NO_PS0)) { 581 if (!(scalar & SCALAR_NO_PS0)) {
584 dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n", 582 dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n",
585 ps0_in1, ps0_in2, ps0_out); 583 ps0_in1, ps0_in2, ps0_out);
586 584
587 kvm_cvt_fd(&ps0_out, &fpr[reg_out]); 585 kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out));
588 } 586 }
589 587
590 /* PS1 */ 588 /* PS1 */
@@ -594,7 +592,7 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc,
594 if (scalar & SCALAR_HIGH) 592 if (scalar & SCALAR_HIGH)
595 ps1_in2 = ps0_in2; 593 ps1_in2 = ps0_in2;
596 594
597 func(&vcpu->arch.fpscr, &ps1_out, &ps1_in1, &ps1_in2); 595 func(&vcpu->arch.fp.fpscr, &ps1_out, &ps1_in1, &ps1_in2);
598 596
599 if (!(scalar & SCALAR_NO_PS1)) { 597 if (!(scalar & SCALAR_NO_PS1)) {
600 qpr[reg_out] = ps1_out; 598 qpr[reg_out] = ps1_out;
@@ -612,7 +610,6 @@ static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc,
612 u32 *dst, u32 *src1)) 610 u32 *dst, u32 *src1))
613{ 611{
614 u32 *qpr = vcpu->arch.qpr; 612 u32 *qpr = vcpu->arch.qpr;
615 u64 *fpr = vcpu->arch.fpr;
616 u32 ps0_out, ps0_in; 613 u32 ps0_out, ps0_in;
617 u32 ps1_in; 614 u32 ps1_in;
618 615
@@ -620,17 +617,17 @@ static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc,
620 WARN_ON(rc); 617 WARN_ON(rc);
621 618
622 /* PS0 */ 619 /* PS0 */
623 kvm_cvt_df(&fpr[reg_in], &ps0_in); 620 kvm_cvt_df(&VCPU_FPR(vcpu, reg_in), &ps0_in);
624 func(&vcpu->arch.fpscr, &ps0_out, &ps0_in); 621 func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in);
625 622
626 dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n", 623 dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n",
627 ps0_in, ps0_out); 624 ps0_in, ps0_out);
628 625
629 kvm_cvt_fd(&ps0_out, &fpr[reg_out]); 626 kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out));
630 627
631 /* PS1 */ 628 /* PS1 */
632 ps1_in = qpr[reg_in]; 629 ps1_in = qpr[reg_in];
633 func(&vcpu->arch.fpscr, &qpr[reg_out], &ps1_in); 630 func(&vcpu->arch.fp.fpscr, &qpr[reg_out], &ps1_in);
634 631
635 dprintk(KERN_INFO "PS1 ps1 -> f(0x%x) = 0x%x\n", 632 dprintk(KERN_INFO "PS1 ps1 -> f(0x%x) = 0x%x\n",
636 ps1_in, qpr[reg_out]); 633 ps1_in, qpr[reg_out]);
@@ -649,10 +646,10 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
649 int ax_rc = inst_get_field(inst, 21, 25); 646 int ax_rc = inst_get_field(inst, 21, 25);
650 short full_d = inst_get_field(inst, 16, 31); 647 short full_d = inst_get_field(inst, 16, 31);
651 648
652 u64 *fpr_d = &vcpu->arch.fpr[ax_rd]; 649 u64 *fpr_d = &VCPU_FPR(vcpu, ax_rd);
653 u64 *fpr_a = &vcpu->arch.fpr[ax_ra]; 650 u64 *fpr_a = &VCPU_FPR(vcpu, ax_ra);
654 u64 *fpr_b = &vcpu->arch.fpr[ax_rb]; 651 u64 *fpr_b = &VCPU_FPR(vcpu, ax_rb);
655 u64 *fpr_c = &vcpu->arch.fpr[ax_rc]; 652 u64 *fpr_c = &VCPU_FPR(vcpu, ax_rc);
656 653
657 bool rcomp = (inst & 1) ? true : false; 654 bool rcomp = (inst & 1) ? true : false;
658 u32 cr = kvmppc_get_cr(vcpu); 655 u32 cr = kvmppc_get_cr(vcpu);
@@ -674,11 +671,11 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
674 /* Do we need to clear FE0 / FE1 here? Don't think so. */ 671 /* Do we need to clear FE0 / FE1 here? Don't think so. */
675 672
676#ifdef DEBUG 673#ifdef DEBUG
677 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { 674 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fp.fpr); i++) {
678 u32 f; 675 u32 f;
679 kvm_cvt_df(&vcpu->arch.fpr[i], &f); 676 kvm_cvt_df(&VCPU_FPR(vcpu, i), &f);
680 dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n", 677 dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n",
681 i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]); 678 i, f, VCPU_FPR(vcpu, i), i, vcpu->arch.qpr[i]);
682 } 679 }
683#endif 680#endif
684 681
@@ -764,8 +761,8 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
764 break; 761 break;
765 } 762 }
766 case OP_4X_PS_NEG: 763 case OP_4X_PS_NEG:
767 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; 764 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
768 vcpu->arch.fpr[ax_rd] ^= 0x8000000000000000ULL; 765 VCPU_FPR(vcpu, ax_rd) ^= 0x8000000000000000ULL;
769 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; 766 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
770 vcpu->arch.qpr[ax_rd] ^= 0x80000000; 767 vcpu->arch.qpr[ax_rd] ^= 0x80000000;
771 break; 768 break;
@@ -775,7 +772,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
775 break; 772 break;
776 case OP_4X_PS_MR: 773 case OP_4X_PS_MR:
777 WARN_ON(rcomp); 774 WARN_ON(rcomp);
778 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; 775 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
779 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; 776 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
780 break; 777 break;
781 case OP_4X_PS_CMPO1: 778 case OP_4X_PS_CMPO1:
@@ -784,44 +781,44 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
784 break; 781 break;
785 case OP_4X_PS_NABS: 782 case OP_4X_PS_NABS:
786 WARN_ON(rcomp); 783 WARN_ON(rcomp);
787 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; 784 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
788 vcpu->arch.fpr[ax_rd] |= 0x8000000000000000ULL; 785 VCPU_FPR(vcpu, ax_rd) |= 0x8000000000000000ULL;
789 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; 786 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
790 vcpu->arch.qpr[ax_rd] |= 0x80000000; 787 vcpu->arch.qpr[ax_rd] |= 0x80000000;
791 break; 788 break;
792 case OP_4X_PS_ABS: 789 case OP_4X_PS_ABS:
793 WARN_ON(rcomp); 790 WARN_ON(rcomp);
794 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; 791 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
795 vcpu->arch.fpr[ax_rd] &= ~0x8000000000000000ULL; 792 VCPU_FPR(vcpu, ax_rd) &= ~0x8000000000000000ULL;
796 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; 793 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
797 vcpu->arch.qpr[ax_rd] &= ~0x80000000; 794 vcpu->arch.qpr[ax_rd] &= ~0x80000000;
798 break; 795 break;
799 case OP_4X_PS_MERGE00: 796 case OP_4X_PS_MERGE00:
800 WARN_ON(rcomp); 797 WARN_ON(rcomp);
801 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra]; 798 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_ra);
802 /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ 799 /* vcpu->arch.qpr[ax_rd] = VCPU_FPR(vcpu, ax_rb); */
803 kvm_cvt_df(&vcpu->arch.fpr[ax_rb], 800 kvm_cvt_df(&VCPU_FPR(vcpu, ax_rb),
804 &vcpu->arch.qpr[ax_rd]); 801 &vcpu->arch.qpr[ax_rd]);
805 break; 802 break;
806 case OP_4X_PS_MERGE01: 803 case OP_4X_PS_MERGE01:
807 WARN_ON(rcomp); 804 WARN_ON(rcomp);
808 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra]; 805 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_ra);
809 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; 806 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
810 break; 807 break;
811 case OP_4X_PS_MERGE10: 808 case OP_4X_PS_MERGE10:
812 WARN_ON(rcomp); 809 WARN_ON(rcomp);
813 /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ 810 /* VCPU_FPR(vcpu, ax_rd) = vcpu->arch.qpr[ax_ra]; */
814 kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], 811 kvm_cvt_fd(&vcpu->arch.qpr[ax_ra],
815 &vcpu->arch.fpr[ax_rd]); 812 &VCPU_FPR(vcpu, ax_rd));
816 /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ 813 /* vcpu->arch.qpr[ax_rd] = VCPU_FPR(vcpu, ax_rb); */
817 kvm_cvt_df(&vcpu->arch.fpr[ax_rb], 814 kvm_cvt_df(&VCPU_FPR(vcpu, ax_rb),
818 &vcpu->arch.qpr[ax_rd]); 815 &vcpu->arch.qpr[ax_rd]);
819 break; 816 break;
820 case OP_4X_PS_MERGE11: 817 case OP_4X_PS_MERGE11:
821 WARN_ON(rcomp); 818 WARN_ON(rcomp);
822 /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ 819 /* VCPU_FPR(vcpu, ax_rd) = vcpu->arch.qpr[ax_ra]; */
823 kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], 820 kvm_cvt_fd(&vcpu->arch.qpr[ax_ra],
824 &vcpu->arch.fpr[ax_rd]); 821 &VCPU_FPR(vcpu, ax_rd));
825 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; 822 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
826 break; 823 break;
827 } 824 }
@@ -856,7 +853,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
856 case OP_4A_PS_SUM1: 853 case OP_4A_PS_SUM1:
857 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, 854 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
858 ax_rb, ax_ra, SCALAR_NO_PS0 | SCALAR_HIGH, fps_fadds); 855 ax_rb, ax_ra, SCALAR_NO_PS0 | SCALAR_HIGH, fps_fadds);
859 vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rc]; 856 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rc);
860 break; 857 break;
861 case OP_4A_PS_SUM0: 858 case OP_4A_PS_SUM0:
862 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, 859 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
@@ -1106,45 +1103,45 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
1106 case 59: 1103 case 59:
1107 switch (inst_get_field(inst, 21, 30)) { 1104 switch (inst_get_field(inst, 21, 30)) {
1108 case OP_59_FADDS: 1105 case OP_59_FADDS:
1109 fpd_fadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1106 fpd_fadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1110 kvmppc_sync_qpr(vcpu, ax_rd); 1107 kvmppc_sync_qpr(vcpu, ax_rd);
1111 break; 1108 break;
1112 case OP_59_FSUBS: 1109 case OP_59_FSUBS:
1113 fpd_fsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1110 fpd_fsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1114 kvmppc_sync_qpr(vcpu, ax_rd); 1111 kvmppc_sync_qpr(vcpu, ax_rd);
1115 break; 1112 break;
1116 case OP_59_FDIVS: 1113 case OP_59_FDIVS:
1117 fpd_fdivs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1114 fpd_fdivs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1118 kvmppc_sync_qpr(vcpu, ax_rd); 1115 kvmppc_sync_qpr(vcpu, ax_rd);
1119 break; 1116 break;
1120 case OP_59_FRES: 1117 case OP_59_FRES:
1121 fpd_fres(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); 1118 fpd_fres(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1122 kvmppc_sync_qpr(vcpu, ax_rd); 1119 kvmppc_sync_qpr(vcpu, ax_rd);
1123 break; 1120 break;
1124 case OP_59_FRSQRTES: 1121 case OP_59_FRSQRTES:
1125 fpd_frsqrtes(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); 1122 fpd_frsqrtes(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1126 kvmppc_sync_qpr(vcpu, ax_rd); 1123 kvmppc_sync_qpr(vcpu, ax_rd);
1127 break; 1124 break;
1128 } 1125 }
1129 switch (inst_get_field(inst, 26, 30)) { 1126 switch (inst_get_field(inst, 26, 30)) {
1130 case OP_59_FMULS: 1127 case OP_59_FMULS:
1131 fpd_fmuls(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c); 1128 fpd_fmuls(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c);
1132 kvmppc_sync_qpr(vcpu, ax_rd); 1129 kvmppc_sync_qpr(vcpu, ax_rd);
1133 break; 1130 break;
1134 case OP_59_FMSUBS: 1131 case OP_59_FMSUBS:
1135 fpd_fmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1132 fpd_fmsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1136 kvmppc_sync_qpr(vcpu, ax_rd); 1133 kvmppc_sync_qpr(vcpu, ax_rd);
1137 break; 1134 break;
1138 case OP_59_FMADDS: 1135 case OP_59_FMADDS:
1139 fpd_fmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1136 fpd_fmadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1140 kvmppc_sync_qpr(vcpu, ax_rd); 1137 kvmppc_sync_qpr(vcpu, ax_rd);
1141 break; 1138 break;
1142 case OP_59_FNMSUBS: 1139 case OP_59_FNMSUBS:
1143 fpd_fnmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1140 fpd_fnmsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1144 kvmppc_sync_qpr(vcpu, ax_rd); 1141 kvmppc_sync_qpr(vcpu, ax_rd);
1145 break; 1142 break;
1146 case OP_59_FNMADDS: 1143 case OP_59_FNMADDS:
1147 fpd_fnmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1144 fpd_fnmadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1148 kvmppc_sync_qpr(vcpu, ax_rd); 1145 kvmppc_sync_qpr(vcpu, ax_rd);
1149 break; 1146 break;
1150 } 1147 }
@@ -1159,12 +1156,12 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
1159 break; 1156 break;
1160 case OP_63_MFFS: 1157 case OP_63_MFFS:
1161 /* XXX missing CR */ 1158 /* XXX missing CR */
1162 *fpr_d = vcpu->arch.fpscr; 1159 *fpr_d = vcpu->arch.fp.fpscr;
1163 break; 1160 break;
1164 case OP_63_MTFSF: 1161 case OP_63_MTFSF:
1165 /* XXX missing fm bits */ 1162 /* XXX missing fm bits */
1166 /* XXX missing CR */ 1163 /* XXX missing CR */
1167 vcpu->arch.fpscr = *fpr_b; 1164 vcpu->arch.fp.fpscr = *fpr_b;
1168 break; 1165 break;
1169 case OP_63_FCMPU: 1166 case OP_63_FCMPU:
1170 { 1167 {
@@ -1172,7 +1169,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
1172 u32 cr0_mask = 0xf0000000; 1169 u32 cr0_mask = 0xf0000000;
1173 u32 cr_shift = inst_get_field(inst, 6, 8) * 4; 1170 u32 cr_shift = inst_get_field(inst, 6, 8) * 4;
1174 1171
1175 fpd_fcmpu(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b); 1172 fpd_fcmpu(&vcpu->arch.fp.fpscr, &tmp_cr, fpr_a, fpr_b);
1176 cr &= ~(cr0_mask >> cr_shift); 1173 cr &= ~(cr0_mask >> cr_shift);
1177 cr |= (cr & cr0_mask) >> cr_shift; 1174 cr |= (cr & cr0_mask) >> cr_shift;
1178 break; 1175 break;
@@ -1183,40 +1180,40 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
1183 u32 cr0_mask = 0xf0000000; 1180 u32 cr0_mask = 0xf0000000;
1184 u32 cr_shift = inst_get_field(inst, 6, 8) * 4; 1181 u32 cr_shift = inst_get_field(inst, 6, 8) * 4;
1185 1182
1186 fpd_fcmpo(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b); 1183 fpd_fcmpo(&vcpu->arch.fp.fpscr, &tmp_cr, fpr_a, fpr_b);
1187 cr &= ~(cr0_mask >> cr_shift); 1184 cr &= ~(cr0_mask >> cr_shift);
1188 cr |= (cr & cr0_mask) >> cr_shift; 1185 cr |= (cr & cr0_mask) >> cr_shift;
1189 break; 1186 break;
1190 } 1187 }
1191 case OP_63_FNEG: 1188 case OP_63_FNEG:
1192 fpd_fneg(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); 1189 fpd_fneg(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1193 break; 1190 break;
1194 case OP_63_FMR: 1191 case OP_63_FMR:
1195 *fpr_d = *fpr_b; 1192 *fpr_d = *fpr_b;
1196 break; 1193 break;
1197 case OP_63_FABS: 1194 case OP_63_FABS:
1198 fpd_fabs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); 1195 fpd_fabs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1199 break; 1196 break;
1200 case OP_63_FCPSGN: 1197 case OP_63_FCPSGN:
1201 fpd_fcpsgn(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1198 fpd_fcpsgn(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1202 break; 1199 break;
1203 case OP_63_FDIV: 1200 case OP_63_FDIV:
1204 fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1201 fpd_fdiv(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1205 break; 1202 break;
1206 case OP_63_FADD: 1203 case OP_63_FADD:
1207 fpd_fadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1204 fpd_fadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1208 break; 1205 break;
1209 case OP_63_FSUB: 1206 case OP_63_FSUB:
1210 fpd_fsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); 1207 fpd_fsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1211 break; 1208 break;
1212 case OP_63_FCTIW: 1209 case OP_63_FCTIW:
1213 fpd_fctiw(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); 1210 fpd_fctiw(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1214 break; 1211 break;
1215 case OP_63_FCTIWZ: 1212 case OP_63_FCTIWZ:
1216 fpd_fctiwz(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); 1213 fpd_fctiwz(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1217 break; 1214 break;
1218 case OP_63_FRSP: 1215 case OP_63_FRSP:
1219 fpd_frsp(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); 1216 fpd_frsp(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1220 kvmppc_sync_qpr(vcpu, ax_rd); 1217 kvmppc_sync_qpr(vcpu, ax_rd);
1221 break; 1218 break;
1222 case OP_63_FRSQRTE: 1219 case OP_63_FRSQRTE:
@@ -1224,39 +1221,39 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
1224 double one = 1.0f; 1221 double one = 1.0f;
1225 1222
1226 /* fD = sqrt(fB) */ 1223 /* fD = sqrt(fB) */
1227 fpd_fsqrt(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); 1224 fpd_fsqrt(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1228 /* fD = 1.0f / fD */ 1225 /* fD = 1.0f / fD */
1229 fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, (u64*)&one, fpr_d); 1226 fpd_fdiv(&vcpu->arch.fp.fpscr, &cr, fpr_d, (u64*)&one, fpr_d);
1230 break; 1227 break;
1231 } 1228 }
1232 } 1229 }
1233 switch (inst_get_field(inst, 26, 30)) { 1230 switch (inst_get_field(inst, 26, 30)) {
1234 case OP_63_FMUL: 1231 case OP_63_FMUL:
1235 fpd_fmul(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c); 1232 fpd_fmul(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c);
1236 break; 1233 break;
1237 case OP_63_FSEL: 1234 case OP_63_FSEL:
1238 fpd_fsel(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1235 fpd_fsel(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1239 break; 1236 break;
1240 case OP_63_FMSUB: 1237 case OP_63_FMSUB:
1241 fpd_fmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1238 fpd_fmsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1242 break; 1239 break;
1243 case OP_63_FMADD: 1240 case OP_63_FMADD:
1244 fpd_fmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1241 fpd_fmadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1245 break; 1242 break;
1246 case OP_63_FNMSUB: 1243 case OP_63_FNMSUB:
1247 fpd_fnmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1244 fpd_fnmsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1248 break; 1245 break;
1249 case OP_63_FNMADD: 1246 case OP_63_FNMADD:
1250 fpd_fnmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); 1247 fpd_fnmadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1251 break; 1248 break;
1252 } 1249 }
1253 break; 1250 break;
1254 } 1251 }
1255 1252
1256#ifdef DEBUG 1253#ifdef DEBUG
1257 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { 1254 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fp.fpr); i++) {
1258 u32 f; 1255 u32 f;
1259 kvm_cvt_df(&vcpu->arch.fpr[i], &f); 1256 kvm_cvt_df(&VCPU_FPR(vcpu, i), &f);
1260 dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f); 1257 dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f);
1261 } 1258 }
1262#endif 1259#endif
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 5b9e9063cfaf..c5c052a9729c 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -41,6 +41,7 @@
41#include <linux/vmalloc.h> 41#include <linux/vmalloc.h>
42#include <linux/highmem.h> 42#include <linux/highmem.h>
43#include <linux/module.h> 43#include <linux/module.h>
44#include <linux/miscdevice.h>
44 45
45#include "book3s.h" 46#include "book3s.h"
46 47
@@ -566,12 +567,6 @@ static inline int get_fpr_index(int i)
566void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) 567void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
567{ 568{
568 struct thread_struct *t = &current->thread; 569 struct thread_struct *t = &current->thread;
569 u64 *vcpu_fpr = vcpu->arch.fpr;
570#ifdef CONFIG_VSX
571 u64 *vcpu_vsx = vcpu->arch.vsr;
572#endif
573 u64 *thread_fpr = &t->fp_state.fpr[0][0];
574 int i;
575 570
576 /* 571 /*
577 * VSX instructions can access FP and vector registers, so if 572 * VSX instructions can access FP and vector registers, so if
@@ -594,26 +589,16 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
594 * both the traditional FP registers and the added VSX 589 * both the traditional FP registers and the added VSX
595 * registers into thread.fp_state.fpr[]. 590 * registers into thread.fp_state.fpr[].
596 */ 591 */
597 if (current->thread.regs->msr & MSR_FP) 592 if (t->regs->msr & MSR_FP)
598 giveup_fpu(current); 593 giveup_fpu(current);
599 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) 594 t->fp_save_area = NULL;
600 vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
601
602 vcpu->arch.fpscr = t->fp_state.fpscr;
603
604#ifdef CONFIG_VSX
605 if (cpu_has_feature(CPU_FTR_VSX))
606 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
607 vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
608#endif
609 } 595 }
610 596
611#ifdef CONFIG_ALTIVEC 597#ifdef CONFIG_ALTIVEC
612 if (msr & MSR_VEC) { 598 if (msr & MSR_VEC) {
613 if (current->thread.regs->msr & MSR_VEC) 599 if (current->thread.regs->msr & MSR_VEC)
614 giveup_altivec(current); 600 giveup_altivec(current);
615 memcpy(vcpu->arch.vr, t->vr_state.vr, sizeof(vcpu->arch.vr)); 601 t->vr_save_area = NULL;
616 vcpu->arch.vscr = t->vr_state.vscr;
617 } 602 }
618#endif 603#endif
619 604
@@ -661,12 +646,6 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
661 ulong msr) 646 ulong msr)
662{ 647{
663 struct thread_struct *t = &current->thread; 648 struct thread_struct *t = &current->thread;
664 u64 *vcpu_fpr = vcpu->arch.fpr;
665#ifdef CONFIG_VSX
666 u64 *vcpu_vsx = vcpu->arch.vsr;
667#endif
668 u64 *thread_fpr = &t->fp_state.fpr[0][0];
669 int i;
670 649
671 /* When we have paired singles, we emulate in software */ 650 /* When we have paired singles, we emulate in software */
672 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) 651 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
@@ -704,27 +683,20 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
704#endif 683#endif
705 684
706 if (msr & MSR_FP) { 685 if (msr & MSR_FP) {
707 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) 686 enable_kernel_fp();
708 thread_fpr[get_fpr_index(i)] = vcpu_fpr[i]; 687 load_fp_state(&vcpu->arch.fp);
709#ifdef CONFIG_VSX 688 t->fp_save_area = &vcpu->arch.fp;
710 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
711 thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
712#endif
713 t->fp_state.fpscr = vcpu->arch.fpscr;
714 t->fpexc_mode = 0;
715 kvmppc_load_up_fpu();
716 } 689 }
717 690
718 if (msr & MSR_VEC) { 691 if (msr & MSR_VEC) {
719#ifdef CONFIG_ALTIVEC 692#ifdef CONFIG_ALTIVEC
720 memcpy(t->vr_state.vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); 693 enable_kernel_altivec();
721 t->vr_state.vscr = vcpu->arch.vscr; 694 load_vr_state(&vcpu->arch.vr);
722 t->vrsave = -1; 695 t->vr_save_area = &vcpu->arch.vr;
723 kvmppc_load_up_altivec();
724#endif 696#endif
725 } 697 }
726 698
727 current->thread.regs->msr |= msr; 699 t->regs->msr |= msr;
728 vcpu->arch.guest_owned_ext |= msr; 700 vcpu->arch.guest_owned_ext |= msr;
729 kvmppc_recalc_shadow_msr(vcpu); 701 kvmppc_recalc_shadow_msr(vcpu);
730 702
@@ -743,11 +715,15 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
743 if (!lost_ext) 715 if (!lost_ext)
744 return; 716 return;
745 717
746 if (lost_ext & MSR_FP) 718 if (lost_ext & MSR_FP) {
747 kvmppc_load_up_fpu(); 719 enable_kernel_fp();
720 load_fp_state(&vcpu->arch.fp);
721 }
748#ifdef CONFIG_ALTIVEC 722#ifdef CONFIG_ALTIVEC
749 if (lost_ext & MSR_VEC) 723 if (lost_ext & MSR_VEC) {
750 kvmppc_load_up_altivec(); 724 enable_kernel_altivec();
725 load_vr_state(&vcpu->arch.vr);
726 }
751#endif 727#endif
752 current->thread.regs->msr |= lost_ext; 728 current->thread.regs->msr |= lost_ext;
753} 729}
@@ -873,6 +849,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
873 /* We're good on these - the host merely wanted to get our attention */ 849 /* We're good on these - the host merely wanted to get our attention */
874 case BOOK3S_INTERRUPT_DECREMENTER: 850 case BOOK3S_INTERRUPT_DECREMENTER:
875 case BOOK3S_INTERRUPT_HV_DECREMENTER: 851 case BOOK3S_INTERRUPT_HV_DECREMENTER:
852 case BOOK3S_INTERRUPT_DOORBELL:
876 vcpu->stat.dec_exits++; 853 vcpu->stat.dec_exits++;
877 r = RESUME_GUEST; 854 r = RESUME_GUEST;
878 break; 855 break;
@@ -1045,14 +1022,14 @@ program_interrupt:
1045 * and if we really did time things so badly, then we just exit 1022 * and if we really did time things so badly, then we just exit
1046 * again due to a host external interrupt. 1023 * again due to a host external interrupt.
1047 */ 1024 */
1048 local_irq_disable();
1049 s = kvmppc_prepare_to_enter(vcpu); 1025 s = kvmppc_prepare_to_enter(vcpu);
1050 if (s <= 0) { 1026 if (s <= 0)
1051 local_irq_enable();
1052 r = s; 1027 r = s;
1053 } else { 1028 else {
1029 /* interrupts now hard-disabled */
1054 kvmppc_fix_ee_before_entry(); 1030 kvmppc_fix_ee_before_entry();
1055 } 1031 }
1032
1056 kvmppc_handle_lost_ext(vcpu); 1033 kvmppc_handle_lost_ext(vcpu);
1057 } 1034 }
1058 1035
@@ -1133,19 +1110,6 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1133 case KVM_REG_PPC_HIOR: 1110 case KVM_REG_PPC_HIOR:
1134 *val = get_reg_val(id, to_book3s(vcpu)->hior); 1111 *val = get_reg_val(id, to_book3s(vcpu)->hior);
1135 break; 1112 break;
1136#ifdef CONFIG_VSX
1137 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
1138 long int i = id - KVM_REG_PPC_VSR0;
1139
1140 if (!cpu_has_feature(CPU_FTR_VSX)) {
1141 r = -ENXIO;
1142 break;
1143 }
1144 val->vsxval[0] = vcpu->arch.fpr[i];
1145 val->vsxval[1] = vcpu->arch.vsr[i];
1146 break;
1147 }
1148#endif /* CONFIG_VSX */
1149 default: 1113 default:
1150 r = -EINVAL; 1114 r = -EINVAL;
1151 break; 1115 break;
@@ -1164,19 +1128,6 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1164 to_book3s(vcpu)->hior = set_reg_val(id, *val); 1128 to_book3s(vcpu)->hior = set_reg_val(id, *val);
1165 to_book3s(vcpu)->hior_explicit = true; 1129 to_book3s(vcpu)->hior_explicit = true;
1166 break; 1130 break;
1167#ifdef CONFIG_VSX
1168 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
1169 long int i = id - KVM_REG_PPC_VSR0;
1170
1171 if (!cpu_has_feature(CPU_FTR_VSX)) {
1172 r = -ENXIO;
1173 break;
1174 }
1175 vcpu->arch.fpr[i] = val->vsxval[0];
1176 vcpu->arch.vsr[i] = val->vsxval[1];
1177 break;
1178 }
1179#endif /* CONFIG_VSX */
1180 default: 1131 default:
1181 r = -EINVAL; 1132 r = -EINVAL;
1182 break; 1133 break;
@@ -1274,17 +1225,9 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
1274static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 1225static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1275{ 1226{
1276 int ret; 1227 int ret;
1277 struct thread_fp_state fp;
1278 int fpexc_mode;
1279#ifdef CONFIG_ALTIVEC 1228#ifdef CONFIG_ALTIVEC
1280 struct thread_vr_state vr;
1281 unsigned long uninitialized_var(vrsave); 1229 unsigned long uninitialized_var(vrsave);
1282 int used_vr;
1283#endif 1230#endif
1284#ifdef CONFIG_VSX
1285 int used_vsr;
1286#endif
1287 ulong ext_msr;
1288 1231
1289 /* Check if we can run the vcpu at all */ 1232 /* Check if we can run the vcpu at all */
1290 if (!vcpu->arch.sane) { 1233 if (!vcpu->arch.sane) {
@@ -1299,40 +1242,27 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1299 * really did time things so badly, then we just exit again due to 1242 * really did time things so badly, then we just exit again due to
1300 * a host external interrupt. 1243 * a host external interrupt.
1301 */ 1244 */
1302 local_irq_disable();
1303 ret = kvmppc_prepare_to_enter(vcpu); 1245 ret = kvmppc_prepare_to_enter(vcpu);
1304 if (ret <= 0) { 1246 if (ret <= 0)
1305 local_irq_enable();
1306 goto out; 1247 goto out;
1307 } 1248 /* interrupts now hard-disabled */
1308 1249
1309 /* Save FPU state in stack */ 1250 /* Save FPU state in thread_struct */
1310 if (current->thread.regs->msr & MSR_FP) 1251 if (current->thread.regs->msr & MSR_FP)
1311 giveup_fpu(current); 1252 giveup_fpu(current);
1312 fp = current->thread.fp_state;
1313 fpexc_mode = current->thread.fpexc_mode;
1314 1253
1315#ifdef CONFIG_ALTIVEC 1254#ifdef CONFIG_ALTIVEC
1316 /* Save Altivec state in stack */ 1255 /* Save Altivec state in thread_struct */
1317 used_vr = current->thread.used_vr; 1256 if (current->thread.regs->msr & MSR_VEC)
1318 if (used_vr) { 1257 giveup_altivec(current);
1319 if (current->thread.regs->msr & MSR_VEC)
1320 giveup_altivec(current);
1321 vr = current->thread.vr_state;
1322 vrsave = current->thread.vrsave;
1323 }
1324#endif 1258#endif
1325 1259
1326#ifdef CONFIG_VSX 1260#ifdef CONFIG_VSX
1327 /* Save VSX state in stack */ 1261 /* Save VSX state in thread_struct */
1328 used_vsr = current->thread.used_vsr; 1262 if (current->thread.regs->msr & MSR_VSX)
1329 if (used_vsr && (current->thread.regs->msr & MSR_VSX))
1330 __giveup_vsx(current); 1263 __giveup_vsx(current);
1331#endif 1264#endif
1332 1265
1333 /* Remember the MSR with disabled extensions */
1334 ext_msr = current->thread.regs->msr;
1335
1336 /* Preload FPU if it's enabled */ 1266 /* Preload FPU if it's enabled */
1337 if (vcpu->arch.shared->msr & MSR_FP) 1267 if (vcpu->arch.shared->msr & MSR_FP)
1338 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); 1268 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
@@ -1347,25 +1277,6 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1347 /* Make sure we save the guest FPU/Altivec/VSX state */ 1277 /* Make sure we save the guest FPU/Altivec/VSX state */
1348 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); 1278 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
1349 1279
1350 current->thread.regs->msr = ext_msr;
1351
1352 /* Restore FPU/VSX state from stack */
1353 current->thread.fp_state = fp;
1354 current->thread.fpexc_mode = fpexc_mode;
1355
1356#ifdef CONFIG_ALTIVEC
1357 /* Restore Altivec state from stack */
1358 if (used_vr && current->thread.used_vr) {
1359 current->thread.vr_state = vr;
1360 current->thread.vrsave = vrsave;
1361 }
1362 current->thread.used_vr = used_vr;
1363#endif
1364
1365#ifdef CONFIG_VSX
1366 current->thread.used_vsr = used_vsr;
1367#endif
1368
1369out: 1280out:
1370 vcpu->mode = OUTSIDE_GUEST_MODE; 1281 vcpu->mode = OUTSIDE_GUEST_MODE;
1371 return ret; 1282 return ret;
@@ -1606,4 +1517,6 @@ module_init(kvmppc_book3s_init_pr);
1606module_exit(kvmppc_book3s_exit_pr); 1517module_exit(kvmppc_book3s_exit_pr);
1607 1518
1608MODULE_LICENSE("GPL"); 1519MODULE_LICENSE("GPL");
1520MODULE_ALIAS_MISCDEV(KVM_MINOR);
1521MODULE_ALIAS("devname:kvm");
1609#endif 1522#endif
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index c3c5231adade..9eec675220e6 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -162,51 +162,4 @@ _GLOBAL(kvmppc_entry_trampoline)
162 mtsrr1 r6 162 mtsrr1 r6
163 RFI 163 RFI
164 164
165#if defined(CONFIG_PPC_BOOK3S_32)
166#define STACK_LR INT_FRAME_SIZE+4
167
168/* load_up_xxx have to run with MSR_DR=0 on Book3S_32 */
169#define MSR_EXT_START \
170 PPC_STL r20, _NIP(r1); \
171 mfmsr r20; \
172 LOAD_REG_IMMEDIATE(r3, MSR_DR|MSR_EE); \
173 andc r3,r20,r3; /* Disable DR,EE */ \
174 mtmsr r3; \
175 sync
176
177#define MSR_EXT_END \
178 mtmsr r20; /* Enable DR,EE */ \
179 sync; \
180 PPC_LL r20, _NIP(r1)
181
182#elif defined(CONFIG_PPC_BOOK3S_64)
183#define STACK_LR _LINK
184#define MSR_EXT_START
185#define MSR_EXT_END
186#endif
187
188/*
189 * Activate current's external feature (FPU/Altivec/VSX)
190 */
191#define define_load_up(what) \
192 \
193_GLOBAL(kvmppc_load_up_ ## what); \
194 PPC_STLU r1, -INT_FRAME_SIZE(r1); \
195 mflr r3; \
196 PPC_STL r3, STACK_LR(r1); \
197 MSR_EXT_START; \
198 \
199 bl FUNC(load_up_ ## what); \
200 \
201 MSR_EXT_END; \
202 PPC_LL r3, STACK_LR(r1); \
203 mtlr r3; \
204 addi r1, r1, INT_FRAME_SIZE; \
205 blr
206
207define_load_up(fpu)
208#ifdef CONFIG_ALTIVEC
209define_load_up(altivec)
210#endif
211
212#include "book3s_segment.S" 165#include "book3s_segment.S"
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index bc50c97751d3..1e0cc2adfd40 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -361,6 +361,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
361 beqa BOOK3S_INTERRUPT_DECREMENTER 361 beqa BOOK3S_INTERRUPT_DECREMENTER
362 cmpwi r12, BOOK3S_INTERRUPT_PERFMON 362 cmpwi r12, BOOK3S_INTERRUPT_PERFMON
363 beqa BOOK3S_INTERRUPT_PERFMON 363 beqa BOOK3S_INTERRUPT_PERFMON
364 cmpwi r12, BOOK3S_INTERRUPT_DOORBELL
365 beqa BOOK3S_INTERRUPT_DOORBELL
364 366
365 RFI 367 RFI
366kvmppc_handler_trampoline_exit_end: 368kvmppc_handler_trampoline_exit_end:
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index 02a17dcf1610..d1acd32a64c0 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -1246,8 +1246,10 @@ static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
1246 kvm->arch.xics = xics; 1246 kvm->arch.xics = xics;
1247 mutex_unlock(&kvm->lock); 1247 mutex_unlock(&kvm->lock);
1248 1248
1249 if (ret) 1249 if (ret) {
1250 kfree(xics);
1250 return ret; 1251 return ret;
1252 }
1251 1253
1252 xics_debugfs_init(xics); 1254 xics_debugfs_init(xics);
1253 1255
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 0591e05db74b..ab62109fdfa3 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -643,7 +643,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
643 local_irq_enable(); 643 local_irq_enable();
644 kvm_vcpu_block(vcpu); 644 kvm_vcpu_block(vcpu);
645 clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 645 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
646 local_irq_disable(); 646 hard_irq_disable();
647 647
648 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); 648 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
649 r = 1; 649 r = 1;
@@ -682,34 +682,22 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
682{ 682{
683 int ret, s; 683 int ret, s;
684 struct debug_reg debug; 684 struct debug_reg debug;
685#ifdef CONFIG_PPC_FPU
686 struct thread_fp_state fp;
687 int fpexc_mode;
688#endif
689 685
690 if (!vcpu->arch.sane) { 686 if (!vcpu->arch.sane) {
691 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 687 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
692 return -EINVAL; 688 return -EINVAL;
693 } 689 }
694 690
695 local_irq_disable();
696 s = kvmppc_prepare_to_enter(vcpu); 691 s = kvmppc_prepare_to_enter(vcpu);
697 if (s <= 0) { 692 if (s <= 0) {
698 local_irq_enable();
699 ret = s; 693 ret = s;
700 goto out; 694 goto out;
701 } 695 }
696 /* interrupts now hard-disabled */
702 697
703#ifdef CONFIG_PPC_FPU 698#ifdef CONFIG_PPC_FPU
704 /* Save userspace FPU state in stack */ 699 /* Save userspace FPU state in stack */
705 enable_kernel_fp(); 700 enable_kernel_fp();
706 fp = current->thread.fp_state;
707 fpexc_mode = current->thread.fpexc_mode;
708
709 /* Restore guest FPU state to thread */
710 memcpy(current->thread.fp_state.fpr, vcpu->arch.fpr,
711 sizeof(vcpu->arch.fpr));
712 current->thread.fp_state.fpscr = vcpu->arch.fpscr;
713 701
714 /* 702 /*
715 * Since we can't trap on MSR_FP in GS-mode, we consider the guest 703 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
@@ -728,6 +716,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
728 debug = current->thread.debug; 716 debug = current->thread.debug;
729 current->thread.debug = vcpu->arch.shadow_dbg_reg; 717 current->thread.debug = vcpu->arch.shadow_dbg_reg;
730 718
719 vcpu->arch.pgdir = current->mm->pgd;
731 kvmppc_fix_ee_before_entry(); 720 kvmppc_fix_ee_before_entry();
732 721
733 ret = __kvmppc_vcpu_run(kvm_run, vcpu); 722 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
@@ -743,15 +732,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
743 kvmppc_save_guest_fp(vcpu); 732 kvmppc_save_guest_fp(vcpu);
744 733
745 vcpu->fpu_active = 0; 734 vcpu->fpu_active = 0;
746
747 /* Save guest FPU state from thread */
748 memcpy(vcpu->arch.fpr, current->thread.fp_state.fpr,
749 sizeof(vcpu->arch.fpr));
750 vcpu->arch.fpscr = current->thread.fp_state.fpscr;
751
752 /* Restore userspace FPU state from stack */
753 current->thread.fp_state = fp;
754 current->thread.fpexc_mode = fpexc_mode;
755#endif 735#endif
756 736
757out: 737out:
@@ -898,17 +878,6 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
898 int s; 878 int s;
899 int idx; 879 int idx;
900 880
901#ifdef CONFIG_PPC64
902 WARN_ON(local_paca->irq_happened != 0);
903#endif
904
905 /*
906 * We enter with interrupts disabled in hardware, but
907 * we need to call hard_irq_disable anyway to ensure that
908 * the software state is kept in sync.
909 */
910 hard_irq_disable();
911
912 /* update before a new last_exit_type is rewritten */ 881 /* update before a new last_exit_type is rewritten */
913 kvmppc_update_timing_stats(vcpu); 882 kvmppc_update_timing_stats(vcpu);
914 883
@@ -1217,12 +1186,11 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
1217 * aren't already exiting to userspace for some other reason. 1186 * aren't already exiting to userspace for some other reason.
1218 */ 1187 */
1219 if (!(r & RESUME_HOST)) { 1188 if (!(r & RESUME_HOST)) {
1220 local_irq_disable();
1221 s = kvmppc_prepare_to_enter(vcpu); 1189 s = kvmppc_prepare_to_enter(vcpu);
1222 if (s <= 0) { 1190 if (s <= 0)
1223 local_irq_enable();
1224 r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); 1191 r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
1225 } else { 1192 else {
1193 /* interrupts now hard-disabled */
1226 kvmppc_fix_ee_before_entry(); 1194 kvmppc_fix_ee_before_entry();
1227 } 1195 }
1228 } 1196 }
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 09bfd9bc7cf8..b632cd35919b 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -136,7 +136,9 @@ static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
136{ 136{
137#ifdef CONFIG_PPC_FPU 137#ifdef CONFIG_PPC_FPU
138 if (vcpu->fpu_active && !(current->thread.regs->msr & MSR_FP)) { 138 if (vcpu->fpu_active && !(current->thread.regs->msr & MSR_FP)) {
139 load_up_fpu(); 139 enable_kernel_fp();
140 load_fp_state(&vcpu->arch.fp);
141 current->thread.fp_save_area = &vcpu->arch.fp;
140 current->thread.regs->msr |= MSR_FP; 142 current->thread.regs->msr |= MSR_FP;
141 } 143 }
142#endif 144#endif
@@ -151,6 +153,7 @@ static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
151#ifdef CONFIG_PPC_FPU 153#ifdef CONFIG_PPC_FPU
152 if (vcpu->fpu_active && (current->thread.regs->msr & MSR_FP)) 154 if (vcpu->fpu_active && (current->thread.regs->msr & MSR_FP))
153 giveup_fpu(current); 155 giveup_fpu(current);
156 current->thread.fp_save_area = NULL;
154#endif 157#endif
155} 158}
156 159
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index a0d6929d8678..e4185f6b3309 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -33,6 +33,8 @@
33 33
34#ifdef CONFIG_64BIT 34#ifdef CONFIG_64BIT
35#include <asm/exception-64e.h> 35#include <asm/exception-64e.h>
36#include <asm/hw_irq.h>
37#include <asm/irqflags.h>
36#else 38#else
37#include "../kernel/head_booke.h" /* for THREAD_NORMSAVE() */ 39#include "../kernel/head_booke.h" /* for THREAD_NORMSAVE() */
38#endif 40#endif
@@ -467,6 +469,15 @@ _GLOBAL(kvmppc_resume_host)
467 mtspr SPRN_EPCR, r3 469 mtspr SPRN_EPCR, r3
468 isync 470 isync
469 471
472#ifdef CONFIG_64BIT
473 /*
474 * We enter with interrupts disabled in hardware, but
475 * we need to call RECONCILE_IRQ_STATE to ensure
476 * that the software state is kept in sync.
477 */
478 RECONCILE_IRQ_STATE(r3,r5)
479#endif
480
470 /* Switch to kernel stack and jump to handler. */ 481 /* Switch to kernel stack and jump to handler. */
471 PPC_LL r3, HOST_RUN(r1) 482 PPC_LL r3, HOST_RUN(r1)
472 mr r5, r14 /* intno */ 483 mr r5, r14 /* intno */
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 497b142f651c..2e02ed849f36 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -16,6 +16,8 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/export.h> 18#include <linux/export.h>
19#include <linux/module.h>
20#include <linux/miscdevice.h>
19 21
20#include <asm/reg.h> 22#include <asm/reg.h>
21#include <asm/cputable.h> 23#include <asm/cputable.h>
@@ -573,3 +575,5 @@ static void __exit kvmppc_e500_exit(void)
573 575
574module_init(kvmppc_e500_init); 576module_init(kvmppc_e500_init);
575module_exit(kvmppc_e500_exit); 577module_exit(kvmppc_e500_exit);
578MODULE_ALIAS_MISCDEV(KVM_MINOR);
579MODULE_ALIAS("devname:kvm");
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
index 4fd9650eb018..a326178bdea5 100644
--- a/arch/powerpc/kvm/e500.h
+++ b/arch/powerpc/kvm/e500.h
@@ -31,11 +31,13 @@ enum vcpu_ftr {
31#define E500_TLB_NUM 2 31#define E500_TLB_NUM 2
32 32
33/* entry is mapped somewhere in host TLB */ 33/* entry is mapped somewhere in host TLB */
34#define E500_TLB_VALID (1 << 0) 34#define E500_TLB_VALID (1 << 31)
35/* TLB1 entry is mapped by host TLB1, tracked by bitmaps */ 35/* TLB1 entry is mapped by host TLB1, tracked by bitmaps */
36#define E500_TLB_BITMAP (1 << 1) 36#define E500_TLB_BITMAP (1 << 30)
37/* TLB1 entry is mapped by host TLB0 */ 37/* TLB1 entry is mapped by host TLB0 */
38#define E500_TLB_TLB0 (1 << 2) 38#define E500_TLB_TLB0 (1 << 29)
39/* bits [6-5] MAS2_X1 and MAS2_X0 and [4-0] bits for WIMGE */
40#define E500_TLB_MAS2_ATTR (0x7f)
39 41
40struct tlbe_ref { 42struct tlbe_ref {
41 pfn_t pfn; /* valid only for TLB0, except briefly */ 43 pfn_t pfn; /* valid only for TLB0, except briefly */
diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c
index ebca6b88ea5e..50860e919cb8 100644
--- a/arch/powerpc/kvm/e500_mmu.c
+++ b/arch/powerpc/kvm/e500_mmu.c
@@ -127,7 +127,7 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
127} 127}
128 128
129static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, 129static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
130 unsigned int eaddr, int as) 130 gva_t eaddr, int as)
131{ 131{
132 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 132 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
133 unsigned int victim, tsized; 133 unsigned int victim, tsized;
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index ecf2247b13be..dd2cc03f406f 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -65,15 +65,6 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
65 return mas3; 65 return mas3;
66} 66}
67 67
68static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
69{
70#ifdef CONFIG_SMP
71 return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
72#else
73 return mas2 & MAS2_ATTRIB_MASK;
74#endif
75}
76
77/* 68/*
78 * writing shadow tlb entry to host TLB 69 * writing shadow tlb entry to host TLB
79 */ 70 */
@@ -231,15 +222,15 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
231 ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID); 222 ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID);
232 } 223 }
233 224
234 /* Already invalidated in between */ 225 /*
235 if (!(ref->flags & E500_TLB_VALID)) 226 * If TLB entry is still valid then it's a TLB0 entry, and thus
236 return; 227 * backed by at most one host tlbe per shadow pid
237 228 */
238 /* Guest tlbe is backed by at most one host tlbe per shadow pid. */ 229 if (ref->flags & E500_TLB_VALID)
239 kvmppc_e500_tlbil_one(vcpu_e500, gtlbe); 230 kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
240 231
241 /* Mark the TLB as not backed by the host anymore */ 232 /* Mark the TLB as not backed by the host anymore */
242 ref->flags &= ~E500_TLB_VALID; 233 ref->flags = 0;
243} 234}
244 235
245static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe) 236static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
@@ -249,10 +240,13 @@ static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
249 240
250static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, 241static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
251 struct kvm_book3e_206_tlb_entry *gtlbe, 242 struct kvm_book3e_206_tlb_entry *gtlbe,
252 pfn_t pfn) 243 pfn_t pfn, unsigned int wimg)
253{ 244{
254 ref->pfn = pfn; 245 ref->pfn = pfn;
255 ref->flags |= E500_TLB_VALID; 246 ref->flags = E500_TLB_VALID;
247
248 /* Use guest supplied MAS2_G and MAS2_E */
249 ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg;
256 250
257 /* Mark the page accessed */ 251 /* Mark the page accessed */
258 kvm_set_pfn_accessed(pfn); 252 kvm_set_pfn_accessed(pfn);
@@ -316,8 +310,7 @@ static void kvmppc_e500_setup_stlbe(
316 310
317 /* Force IPROT=0 for all guest mappings. */ 311 /* Force IPROT=0 for all guest mappings. */
318 stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID; 312 stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
319 stlbe->mas2 = (gvaddr & MAS2_EPN) | 313 stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR);
320 e500_shadow_mas2_attrib(gtlbe->mas2, pr);
321 stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | 314 stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
322 e500_shadow_mas3_attrib(gtlbe->mas7_3, pr); 315 e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
323 316
@@ -339,6 +332,10 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
339 int ret = 0; 332 int ret = 0;
340 unsigned long mmu_seq; 333 unsigned long mmu_seq;
341 struct kvm *kvm = vcpu_e500->vcpu.kvm; 334 struct kvm *kvm = vcpu_e500->vcpu.kvm;
335 unsigned long tsize_pages = 0;
336 pte_t *ptep;
337 unsigned int wimg = 0;
338 pgd_t *pgdir;
342 339
343 /* used to check for invalidations in progress */ 340 /* used to check for invalidations in progress */
344 mmu_seq = kvm->mmu_notifier_seq; 341 mmu_seq = kvm->mmu_notifier_seq;
@@ -405,7 +402,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
405 */ 402 */
406 403
407 for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) { 404 for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
408 unsigned long gfn_start, gfn_end, tsize_pages; 405 unsigned long gfn_start, gfn_end;
409 tsize_pages = 1 << (tsize - 2); 406 tsize_pages = 1 << (tsize - 2);
410 407
411 gfn_start = gfn & ~(tsize_pages - 1); 408 gfn_start = gfn & ~(tsize_pages - 1);
@@ -447,11 +444,12 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
447 } 444 }
448 445
449 if (likely(!pfnmap)) { 446 if (likely(!pfnmap)) {
450 unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT); 447 tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
451 pfn = gfn_to_pfn_memslot(slot, gfn); 448 pfn = gfn_to_pfn_memslot(slot, gfn);
452 if (is_error_noslot_pfn(pfn)) { 449 if (is_error_noslot_pfn(pfn)) {
453 printk(KERN_ERR "Couldn't get real page for gfn %lx!\n", 450 if (printk_ratelimit())
454 (long)gfn); 451 pr_err("%s: real page not found for gfn %lx\n",
452 __func__, (long)gfn);
455 return -EINVAL; 453 return -EINVAL;
456 } 454 }
457 455
@@ -466,7 +464,18 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
466 goto out; 464 goto out;
467 } 465 }
468 466
469 kvmppc_e500_ref_setup(ref, gtlbe, pfn); 467
468 pgdir = vcpu_e500->vcpu.arch.pgdir;
469 ptep = lookup_linux_ptep(pgdir, hva, &tsize_pages);
470 if (pte_present(*ptep))
471 wimg = (*ptep >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK;
472 else {
473 if (printk_ratelimit())
474 pr_err("%s: pte not present: gfn %lx, pfn %lx\n",
475 __func__, (long)gfn, pfn);
476 return -EINVAL;
477 }
478 kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
470 479
471 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, 480 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
472 ref, gvaddr, stlbe); 481 ref, gvaddr, stlbe);
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 4132cd2fc171..17e456279224 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -16,6 +16,8 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/export.h> 18#include <linux/export.h>
19#include <linux/miscdevice.h>
20#include <linux/module.h>
19 21
20#include <asm/reg.h> 22#include <asm/reg.h>
21#include <asm/cputable.h> 23#include <asm/cputable.h>
@@ -391,3 +393,5 @@ static void __exit kvmppc_e500mc_exit(void)
391 393
392module_init(kvmppc_e500mc_init); 394module_init(kvmppc_e500mc_init);
393module_exit(kvmppc_e500mc_exit); 395module_exit(kvmppc_e500mc_exit);
396MODULE_ALIAS_MISCDEV(KVM_MINOR);
397MODULE_ALIAS("devname:kvm");
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 2f9a0873b44f..c2b887be2c29 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -219,7 +219,6 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
219 * lmw 219 * lmw
220 * stmw 220 * stmw
221 * 221 *
222 * XXX is_bigendian should depend on MMU mapping or MSR[LE]
223 */ 222 */
224/* XXX Should probably auto-generate instruction decoding for a particular core 223/* XXX Should probably auto-generate instruction decoding for a particular core
225 * from opcode tables in the future. */ 224 * from opcode tables in the future. */
diff --git a/arch/powerpc/kvm/mpic.c b/arch/powerpc/kvm/mpic.c
index 2861ae9eaae6..efbd9962a209 100644
--- a/arch/powerpc/kvm/mpic.c
+++ b/arch/powerpc/kvm/mpic.c
@@ -1635,6 +1635,7 @@ static void mpic_destroy(struct kvm_device *dev)
1635 1635
1636 dev->kvm->arch.mpic = NULL; 1636 dev->kvm->arch.mpic = NULL;
1637 kfree(opp); 1637 kfree(opp);
1638 kfree(dev);
1638} 1639}
1639 1640
1640static int mpic_set_default_irq_routing(struct openpic *opp) 1641static int mpic_set_default_irq_routing(struct openpic *opp)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 9ae97686e9f4..3cf541a53e2a 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -68,14 +68,16 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
68 */ 68 */
69int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) 69int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
70{ 70{
71 int r = 1; 71 int r;
72
73 WARN_ON(irqs_disabled());
74 hard_irq_disable();
72 75
73 WARN_ON_ONCE(!irqs_disabled());
74 while (true) { 76 while (true) {
75 if (need_resched()) { 77 if (need_resched()) {
76 local_irq_enable(); 78 local_irq_enable();
77 cond_resched(); 79 cond_resched();
78 local_irq_disable(); 80 hard_irq_disable();
79 continue; 81 continue;
80 } 82 }
81 83
@@ -101,7 +103,7 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
101 local_irq_enable(); 103 local_irq_enable();
102 trace_kvm_check_requests(vcpu); 104 trace_kvm_check_requests(vcpu);
103 r = kvmppc_core_check_requests(vcpu); 105 r = kvmppc_core_check_requests(vcpu);
104 local_irq_disable(); 106 hard_irq_disable();
105 if (r > 0) 107 if (r > 0)
106 continue; 108 continue;
107 break; 109 break;
@@ -113,22 +115,12 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
113 continue; 115 continue;
114 } 116 }
115 117
116#ifdef CONFIG_PPC64
117 /* lazy EE magic */
118 hard_irq_disable();
119 if (lazy_irq_pending()) {
120 /* Got an interrupt in between, try again */
121 local_irq_enable();
122 local_irq_disable();
123 kvm_guest_exit();
124 continue;
125 }
126#endif
127
128 kvm_guest_enter(); 118 kvm_guest_enter();
129 break; 119 return 1;
130 } 120 }
131 121
122 /* return to host */
123 local_irq_enable();
132 return r; 124 return r;
133} 125}
134EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); 126EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
@@ -656,14 +648,14 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
656 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 648 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
657 break; 649 break;
658 case KVM_MMIO_REG_FPR: 650 case KVM_MMIO_REG_FPR:
659 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 651 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
660 break; 652 break;
661#ifdef CONFIG_PPC_BOOK3S 653#ifdef CONFIG_PPC_BOOK3S
662 case KVM_MMIO_REG_QPR: 654 case KVM_MMIO_REG_QPR:
663 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 655 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
664 break; 656 break;
665 case KVM_MMIO_REG_FQPR: 657 case KVM_MMIO_REG_FQPR:
666 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 658 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
667 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 659 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
668 break; 660 break;
669#endif 661#endif
@@ -673,9 +665,19 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
673} 665}
674 666
675int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 667int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
676 unsigned int rt, unsigned int bytes, int is_bigendian) 668 unsigned int rt, unsigned int bytes,
669 int is_default_endian)
677{ 670{
678 int idx, ret; 671 int idx, ret;
672 int is_bigendian;
673
674 if (kvmppc_need_byteswap(vcpu)) {
675 /* Default endianness is "little endian". */
676 is_bigendian = !is_default_endian;
677 } else {
678 /* Default endianness is "big endian". */
679 is_bigendian = is_default_endian;
680 }
679 681
680 if (bytes > sizeof(run->mmio.data)) { 682 if (bytes > sizeof(run->mmio.data)) {
681 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 683 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
@@ -711,21 +713,31 @@ EXPORT_SYMBOL_GPL(kvmppc_handle_load);
711 713
712/* Same as above, but sign extends */ 714/* Same as above, but sign extends */
713int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, 715int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
714 unsigned int rt, unsigned int bytes, int is_bigendian) 716 unsigned int rt, unsigned int bytes,
717 int is_default_endian)
715{ 718{
716 int r; 719 int r;
717 720
718 vcpu->arch.mmio_sign_extend = 1; 721 vcpu->arch.mmio_sign_extend = 1;
719 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian); 722 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian);
720 723
721 return r; 724 return r;
722} 725}
723 726
724int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 727int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
725 u64 val, unsigned int bytes, int is_bigendian) 728 u64 val, unsigned int bytes, int is_default_endian)
726{ 729{
727 void *data = run->mmio.data; 730 void *data = run->mmio.data;
728 int idx, ret; 731 int idx, ret;
732 int is_bigendian;
733
734 if (kvmppc_need_byteswap(vcpu)) {
735 /* Default endianness is "little endian". */
736 is_bigendian = !is_default_endian;
737 } else {
738 /* Default endianness is "big endian". */
739 is_bigendian = is_default_endian;
740 }
729 741
730 if (bytes > sizeof(run->mmio.data)) { 742 if (bytes > sizeof(run->mmio.data)) {
731 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 743 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index d5bc3750616e..eef3dd3fd9a9 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -106,9 +106,22 @@ struct kvm_s390_sie_block {
106 __u64 gbea; /* 0x0180 */ 106 __u64 gbea; /* 0x0180 */
107 __u8 reserved188[24]; /* 0x0188 */ 107 __u8 reserved188[24]; /* 0x0188 */
108 __u32 fac; /* 0x01a0 */ 108 __u32 fac; /* 0x01a0 */
109 __u8 reserved1a4[92]; /* 0x01a4 */ 109 __u8 reserved1a4[68]; /* 0x01a4 */
110 __u64 itdba; /* 0x01e8 */
111 __u8 reserved1f0[16]; /* 0x01f0 */
110} __attribute__((packed)); 112} __attribute__((packed));
111 113
114struct kvm_s390_itdb {
115 __u8 data[256];
116} __packed;
117
118struct sie_page {
119 struct kvm_s390_sie_block sie_block;
120 __u8 reserved200[1024]; /* 0x0200 */
121 struct kvm_s390_itdb itdb; /* 0x0600 */
122 __u8 reserved700[2304]; /* 0x0700 */
123} __packed;
124
112struct kvm_vcpu_stat { 125struct kvm_vcpu_stat {
113 u32 exit_userspace; 126 u32 exit_userspace;
114 u32 exit_null; 127 u32 exit_null;
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 5ddbbde6f65c..eeb1ac7d8fa4 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -112,6 +112,17 @@ static int handle_instruction(struct kvm_vcpu *vcpu)
112static int handle_prog(struct kvm_vcpu *vcpu) 112static int handle_prog(struct kvm_vcpu *vcpu)
113{ 113{
114 vcpu->stat.exit_program_interruption++; 114 vcpu->stat.exit_program_interruption++;
115
116 /* Restore ITDB to Program-Interruption TDB in guest memory */
117 if (IS_TE_ENABLED(vcpu) &&
118 !(current->thread.per_flags & PER_FLAG_NO_TE) &&
119 IS_ITDB_VALID(vcpu)) {
120 copy_to_guest(vcpu, TDB_ADDR, vcpu->arch.sie_block->itdba,
121 sizeof(struct kvm_s390_itdb));
122 memset((void *) vcpu->arch.sie_block->itdba, 0,
123 sizeof(struct kvm_s390_itdb));
124 }
125
115 trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc); 126 trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc);
116 return kvm_s390_inject_program_int(vcpu, vcpu->arch.sie_block->iprcc); 127 return kvm_s390_inject_program_int(vcpu, vcpu->arch.sie_block->iprcc);
117} 128}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 7635c00a1479..e0676f390d57 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -395,6 +395,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
395 CPUSTAT_STOPPED | 395 CPUSTAT_STOPPED |
396 CPUSTAT_GED); 396 CPUSTAT_GED);
397 vcpu->arch.sie_block->ecb = 6; 397 vcpu->arch.sie_block->ecb = 6;
398 if (test_vfacility(50) && test_vfacility(73))
399 vcpu->arch.sie_block->ecb |= 0x10;
400
398 vcpu->arch.sie_block->ecb2 = 8; 401 vcpu->arch.sie_block->ecb2 = 8;
399 vcpu->arch.sie_block->eca = 0xC1002001U; 402 vcpu->arch.sie_block->eca = 0xC1002001U;
400 vcpu->arch.sie_block->fac = (int) (long) vfacilities; 403 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
@@ -411,6 +414,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
411 unsigned int id) 414 unsigned int id)
412{ 415{
413 struct kvm_vcpu *vcpu; 416 struct kvm_vcpu *vcpu;
417 struct sie_page *sie_page;
414 int rc = -EINVAL; 418 int rc = -EINVAL;
415 419
416 if (id >= KVM_MAX_VCPUS) 420 if (id >= KVM_MAX_VCPUS)
@@ -422,12 +426,13 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
422 if (!vcpu) 426 if (!vcpu)
423 goto out; 427 goto out;
424 428
425 vcpu->arch.sie_block = (struct kvm_s390_sie_block *) 429 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
426 get_zeroed_page(GFP_KERNEL); 430 if (!sie_page)
427
428 if (!vcpu->arch.sie_block)
429 goto out_free_cpu; 431 goto out_free_cpu;
430 432
433 vcpu->arch.sie_block = &sie_page->sie_block;
434 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
435
431 vcpu->arch.sie_block->icpua = id; 436 vcpu->arch.sie_block->icpua = id;
432 if (!kvm_is_ucontrol(kvm)) { 437 if (!kvm_is_ucontrol(kvm)) {
433 if (!kvm->arch.sca) { 438 if (!kvm->arch.sca) {
@@ -1182,8 +1187,8 @@ static int __init kvm_s390_init(void)
1182 return -ENOMEM; 1187 return -ENOMEM;
1183 } 1188 }
1184 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16); 1189 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
1185 vfacilities[0] &= 0xff82fff3f47c0000UL; 1190 vfacilities[0] &= 0xff82fff3f4fc2000UL;
1186 vfacilities[1] &= 0x001c000000000000UL; 1191 vfacilities[1] &= 0x005c000000000000UL;
1187 return 0; 1192 return 0;
1188} 1193}
1189 1194
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 095cf51b16ec..f9559b0bd620 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -26,6 +26,12 @@ extern unsigned long *vfacilities;
26 26
27int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu); 27int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
28 28
29/* Transactional Memory Execution related macros */
30#define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & 0x10))
31#define TDB_ADDR 0x1800UL
32#define TDB_FORMAT1 1
33#define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1))
34
29#define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\ 35#define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
30do { \ 36do { \
31 debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \ 37 debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 1df115909758..c7678e43465b 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -85,28 +85,9 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
85 return ret; 85 return ret;
86} 86}
87 87
88static inline uint32_t kvm_cpuid_base(void)
89{
90 if (boot_cpu_data.cpuid_level < 0)
91 return 0; /* So we don't blow up on old processors */
92
93 if (cpu_has_hypervisor)
94 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
95
96 return 0;
97}
98
99static inline bool kvm_para_available(void)
100{
101 return kvm_cpuid_base() != 0;
102}
103
104static inline unsigned int kvm_arch_para_features(void)
105{
106 return cpuid_eax(KVM_CPUID_FEATURES);
107}
108
109#ifdef CONFIG_KVM_GUEST 88#ifdef CONFIG_KVM_GUEST
89bool kvm_para_available(void);
90unsigned int kvm_arch_para_features(void);
110void __init kvm_guest_init(void); 91void __init kvm_guest_init(void);
111void kvm_async_pf_task_wait(u32 token); 92void kvm_async_pf_task_wait(u32 token);
112void kvm_async_pf_task_wake(u32 token); 93void kvm_async_pf_task_wake(u32 token);
@@ -126,6 +107,16 @@ static inline void kvm_spinlock_init(void)
126#define kvm_async_pf_task_wait(T) do {} while(0) 107#define kvm_async_pf_task_wait(T) do {} while(0)
127#define kvm_async_pf_task_wake(T) do {} while(0) 108#define kvm_async_pf_task_wake(T) do {} while(0)
128 109
110static inline bool kvm_para_available(void)
111{
112 return 0;
113}
114
115static inline unsigned int kvm_arch_para_features(void)
116{
117 return 0;
118}
119
129static inline u32 kvm_read_and_reset_pf_reason(void) 120static inline u32 kvm_read_and_reset_pf_reason(void)
130{ 121{
131 return 0; 122 return 0;
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index cd1b362e4a23..713f1b3bad52 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -500,6 +500,38 @@ void __init kvm_guest_init(void)
500#endif 500#endif
501} 501}
502 502
503static noinline uint32_t __kvm_cpuid_base(void)
504{
505 if (boot_cpu_data.cpuid_level < 0)
506 return 0; /* So we don't blow up on old processors */
507
508 if (cpu_has_hypervisor)
509 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
510
511 return 0;
512}
513
514static inline uint32_t kvm_cpuid_base(void)
515{
516 static int kvm_cpuid_base = -1;
517
518 if (kvm_cpuid_base == -1)
519 kvm_cpuid_base = __kvm_cpuid_base();
520
521 return kvm_cpuid_base;
522}
523
524bool kvm_para_available(void)
525{
526 return kvm_cpuid_base() != 0;
527}
528EXPORT_SYMBOL_GPL(kvm_para_available);
529
530unsigned int kvm_arch_para_features(void)
531{
532 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
533}
534
503static uint32_t __init kvm_detect(void) 535static uint32_t __init kvm_detect(void)
504{ 536{
505 return kvm_cpuid_base(); 537 return kvm_cpuid_base();
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index f1e4895174b2..a2a1bb7ed8c1 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -72,4 +72,12 @@ static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu)
72 return best && (best->ecx & bit(X86_FEATURE_PCID)); 72 return best && (best->ecx & bit(X86_FEATURE_PCID));
73} 73}
74 74
75static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu)
76{
77 struct kvm_cpuid_entry2 *best;
78
79 best = kvm_find_cpuid_entry(vcpu, 1, 0);
80 return best && (best->ecx & bit(X86_FEATURE_X2APIC));
81}
82
75#endif 83#endif
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index c8b0d0d2da5c..6a11845fd8b9 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -65,7 +65,7 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
65 struct kvm_lapic_irq *irq, int *r, unsigned long *dest_map); 65 struct kvm_lapic_irq *irq, int *r, unsigned long *dest_map);
66 66
67u64 kvm_get_apic_base(struct kvm_vcpu *vcpu); 67u64 kvm_get_apic_base(struct kvm_vcpu *vcpu);
68void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data); 68int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
69void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu, 69void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
70 struct kvm_lapic_state *s); 70 struct kvm_lapic_state *s);
71int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu); 71int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 5c8879127cfa..a06f101ef64b 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4392,7 +4392,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
4392static void vmx_vcpu_reset(struct kvm_vcpu *vcpu) 4392static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
4393{ 4393{
4394 struct vcpu_vmx *vmx = to_vmx(vcpu); 4394 struct vcpu_vmx *vmx = to_vmx(vcpu);
4395 u64 msr; 4395 struct msr_data apic_base_msr;
4396 4396
4397 vmx->rmode.vm86_active = 0; 4397 vmx->rmode.vm86_active = 0;
4398 4398
@@ -4400,10 +4400,11 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
4400 4400
4401 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); 4401 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
4402 kvm_set_cr8(&vmx->vcpu, 0); 4402 kvm_set_cr8(&vmx->vcpu, 0);
4403 msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; 4403 apic_base_msr.data = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
4404 if (kvm_vcpu_is_bsp(&vmx->vcpu)) 4404 if (kvm_vcpu_is_bsp(&vmx->vcpu))
4405 msr |= MSR_IA32_APICBASE_BSP; 4405 apic_base_msr.data |= MSR_IA32_APICBASE_BSP;
4406 kvm_set_apic_base(&vmx->vcpu, msr); 4406 apic_base_msr.host_initiated = true;
4407 kvm_set_apic_base(&vmx->vcpu, &apic_base_msr);
4407 4408
4408 vmx_segment_cache_clear(vmx); 4409 vmx_segment_cache_clear(vmx);
4409 4410
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0c76f7cfdb32..39c28f09dfd5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -257,10 +257,26 @@ u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
257} 257}
258EXPORT_SYMBOL_GPL(kvm_get_apic_base); 258EXPORT_SYMBOL_GPL(kvm_get_apic_base);
259 259
260void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data) 260int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
261{ 261{
262 /* TODO: reserve bits check */ 262 u64 old_state = vcpu->arch.apic_base &
263 kvm_lapic_set_base(vcpu, data); 263 (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
264 u64 new_state = msr_info->data &
265 (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
266 u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) |
267 0x2ff | (guest_cpuid_has_x2apic(vcpu) ? 0 : X2APIC_ENABLE);
268
269 if (!msr_info->host_initiated &&
270 ((msr_info->data & reserved_bits) != 0 ||
271 new_state == X2APIC_ENABLE ||
272 (new_state == MSR_IA32_APICBASE_ENABLE &&
273 old_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) ||
274 (new_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE) &&
275 old_state == 0)))
276 return 1;
277
278 kvm_lapic_set_base(vcpu, msr_info->data);
279 return 0;
264} 280}
265EXPORT_SYMBOL_GPL(kvm_set_apic_base); 281EXPORT_SYMBOL_GPL(kvm_set_apic_base);
266 282
@@ -1840,6 +1856,7 @@ static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1840 if (__copy_to_user((void __user *)addr, instructions, 4)) 1856 if (__copy_to_user((void __user *)addr, instructions, 4))
1841 return 1; 1857 return 1;
1842 kvm->arch.hv_hypercall = data; 1858 kvm->arch.hv_hypercall = data;
1859 mark_page_dirty(kvm, gfn);
1843 break; 1860 break;
1844 } 1861 }
1845 case HV_X64_MSR_REFERENCE_TSC: { 1862 case HV_X64_MSR_REFERENCE_TSC: {
@@ -1868,19 +1885,21 @@ static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1868{ 1885{
1869 switch (msr) { 1886 switch (msr) {
1870 case HV_X64_MSR_APIC_ASSIST_PAGE: { 1887 case HV_X64_MSR_APIC_ASSIST_PAGE: {
1888 u64 gfn;
1871 unsigned long addr; 1889 unsigned long addr;
1872 1890
1873 if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) { 1891 if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
1874 vcpu->arch.hv_vapic = data; 1892 vcpu->arch.hv_vapic = data;
1875 break; 1893 break;
1876 } 1894 }
1877 addr = gfn_to_hva(vcpu->kvm, data >> 1895 gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT;
1878 HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT); 1896 addr = gfn_to_hva(vcpu->kvm, gfn);
1879 if (kvm_is_error_hva(addr)) 1897 if (kvm_is_error_hva(addr))
1880 return 1; 1898 return 1;
1881 if (__clear_user((void __user *)addr, PAGE_SIZE)) 1899 if (__clear_user((void __user *)addr, PAGE_SIZE))
1882 return 1; 1900 return 1;
1883 vcpu->arch.hv_vapic = data; 1901 vcpu->arch.hv_vapic = data;
1902 mark_page_dirty(vcpu->kvm, gfn);
1884 break; 1903 break;
1885 } 1904 }
1886 case HV_X64_MSR_EOI: 1905 case HV_X64_MSR_EOI:
@@ -2006,8 +2025,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2006 case 0x200 ... 0x2ff: 2025 case 0x200 ... 0x2ff:
2007 return set_msr_mtrr(vcpu, msr, data); 2026 return set_msr_mtrr(vcpu, msr, data);
2008 case MSR_IA32_APICBASE: 2027 case MSR_IA32_APICBASE:
2009 kvm_set_apic_base(vcpu, data); 2028 return kvm_set_apic_base(vcpu, msr_info);
2010 break;
2011 case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: 2029 case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
2012 return kvm_x2apic_msr_write(vcpu, msr, data); 2030 return kvm_x2apic_msr_write(vcpu, msr, data);
2013 case MSR_IA32_TSCDEADLINE: 2031 case MSR_IA32_TSCDEADLINE:
@@ -2598,10 +2616,10 @@ int kvm_dev_ioctl_check_extension(long ext)
2598 case KVM_CAP_GET_TSC_KHZ: 2616 case KVM_CAP_GET_TSC_KHZ:
2599 case KVM_CAP_KVMCLOCK_CTRL: 2617 case KVM_CAP_KVMCLOCK_CTRL:
2600 case KVM_CAP_READONLY_MEM: 2618 case KVM_CAP_READONLY_MEM:
2619 case KVM_CAP_HYPERV_TIME:
2601#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT 2620#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
2602 case KVM_CAP_ASSIGN_DEV_IRQ: 2621 case KVM_CAP_ASSIGN_DEV_IRQ:
2603 case KVM_CAP_PCI_2_3: 2622 case KVM_CAP_PCI_2_3:
2604 case KVM_CAP_HYPERV_TIME:
2605#endif 2623#endif
2606 r = 1; 2624 r = 1;
2607 break; 2625 break;
@@ -6409,6 +6427,7 @@ EXPORT_SYMBOL_GPL(kvm_task_switch);
6409int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 6427int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
6410 struct kvm_sregs *sregs) 6428 struct kvm_sregs *sregs)
6411{ 6429{
6430 struct msr_data apic_base_msr;
6412 int mmu_reset_needed = 0; 6431 int mmu_reset_needed = 0;
6413 int pending_vec, max_bits, idx; 6432 int pending_vec, max_bits, idx;
6414 struct desc_ptr dt; 6433 struct desc_ptr dt;
@@ -6432,7 +6451,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
6432 6451
6433 mmu_reset_needed |= vcpu->arch.efer != sregs->efer; 6452 mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
6434 kvm_x86_ops->set_efer(vcpu, sregs->efer); 6453 kvm_x86_ops->set_efer(vcpu, sregs->efer);
6435 kvm_set_apic_base(vcpu, sregs->apic_base); 6454 apic_base_msr.data = sregs->apic_base;
6455 apic_base_msr.host_initiated = true;
6456 kvm_set_apic_base(vcpu, &apic_base_msr);
6436 6457
6437 mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; 6458 mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
6438 kvm_x86_ops->set_cr0(vcpu, sregs->cr0); 6459 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
index d6297176ab85..0fc584832001 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/kvm/virtio_ccw.c
@@ -642,8 +642,15 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
642 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) { 642 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) {
643 /* OK */ 643 /* OK */
644 } 644 }
645 if (irb_is_error(irb)) 645 if (irb_is_error(irb)) {
646 vcdev->err = -EIO; /* XXX - use real error */ 646 /* Command reject? */
647 if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
648 (irb->ecw[0] & SNS0_CMD_REJECT))
649 vcdev->err = -EOPNOTSUPP;
650 else
651 /* Map everything else to -EIO. */
652 vcdev->err = -EIO;
653 }
647 if (vcdev->curr_io & activity) { 654 if (vcdev->curr_io & activity) {
648 switch (activity) { 655 switch (activity) {
649 case VIRTIO_CCW_DOING_READ_FEAT: 656 case VIRTIO_CCW_DOING_READ_FEAT: