aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-05-23 11:42:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-23 11:42:08 -0400
commitf4b10bc60a310916bab5413f821b99ef845cac17 (patch)
tree904532e8cd93b88261f21427c4ec4917d4b3e79d /arch/powerpc/kvm
parent53ee7569ce8beb3fd3fc0817116c29298d72353f (diff)
parentc8cfbb555eb3632bf3dcbe1a591c1f4d0c28681c (diff)
Merge branch 'kvm-updates/2.6.40' of git://git.kernel.org/pub/scm/virt/kvm/kvm
* 'kvm-updates/2.6.40' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (131 commits) KVM: MMU: Use ptep_user for cmpxchg_gpte() KVM: Fix kvm mmu_notifier initialization order KVM: Add documentation for KVM_CAP_NR_VCPUS KVM: make guest mode entry to be rcu quiescent state KVM: x86 emulator: Make jmp far emulation into a separate function KVM: x86 emulator: Rename emulate_grpX() to em_grpX() KVM: x86 emulator: Remove unused arg from emulate_pop() KVM: x86 emulator: Remove unused arg from writeback() KVM: x86 emulator: Remove unused arg from read_descriptor() KVM: x86 emulator: Remove unused arg from seg_override() KVM: Validate userspace_addr of memslot when registered KVM: MMU: Clean up gpte reading with copy_from_user() KVM: PPC: booke: add sregs support KVM: PPC: booke: save/restore VRSAVE (a.k.a. USPRG0) KVM: PPC: use ticks, not usecs, for exit timing KVM: PPC: fix exit accounting for SPRs, tlbwe, tlbsx KVM: PPC: e500: emulate SVR KVM: VMX: Cache vmcs segment fields KVM: x86 emulator: consolidate segment accessors KVM: VMX: Avoid reading %rip unnecessarily when handling exceptions ...
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/44x.c10
-rw-r--r--arch/powerpc/kvm/44x_emulate.c2
-rw-r--r--arch/powerpc/kvm/booke.c154
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S1
-rw-r--r--arch/powerpc/kvm/e500.c76
-rw-r--r--arch/powerpc/kvm/e500_emulate.c7
-rw-r--r--arch/powerpc/kvm/e500_tlb.c13
-rw-r--r--arch/powerpc/kvm/emulate.c15
-rw-r--r--arch/powerpc/kvm/powerpc.c21
-rw-r--r--arch/powerpc/kvm/timing.c31
10 files changed, 309 insertions, 21 deletions
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
index 74d0e7421143..da3a1225c0ac 100644
--- a/arch/powerpc/kvm/44x.c
+++ b/arch/powerpc/kvm/44x.c
@@ -107,6 +107,16 @@ int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
107 return 0; 107 return 0;
108} 108}
109 109
110void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
111{
112 kvmppc_get_sregs_ivor(vcpu, sregs);
113}
114
115int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
116{
117 return kvmppc_set_sregs_ivor(vcpu, sregs);
118}
119
110struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) 120struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
111{ 121{
112 struct kvmppc_vcpu_44x *vcpu_44x; 122 struct kvmppc_vcpu_44x *vcpu_44x;
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c
index 65ea083a5b27..549bb2c9a47a 100644
--- a/arch/powerpc/kvm/44x_emulate.c
+++ b/arch/powerpc/kvm/44x_emulate.c
@@ -158,7 +158,6 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
158 emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs); 158 emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs);
159 } 159 }
160 160
161 kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
162 return emulated; 161 return emulated;
163} 162}
164 163
@@ -179,7 +178,6 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
179 emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); 178 emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt);
180 } 179 }
181 180
182 kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
183 return emulated; 181 return emulated;
184} 182}
185 183
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index ef76acb455c3..8462b3a1c1c7 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -569,6 +569,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
569 kvmppc_set_msr(vcpu, regs->msr); 569 kvmppc_set_msr(vcpu, regs->msr);
570 vcpu->arch.shared->srr0 = regs->srr0; 570 vcpu->arch.shared->srr0 = regs->srr0;
571 vcpu->arch.shared->srr1 = regs->srr1; 571 vcpu->arch.shared->srr1 = regs->srr1;
572 kvmppc_set_pid(vcpu, regs->pid);
572 vcpu->arch.shared->sprg0 = regs->sprg0; 573 vcpu->arch.shared->sprg0 = regs->sprg0;
573 vcpu->arch.shared->sprg1 = regs->sprg1; 574 vcpu->arch.shared->sprg1 = regs->sprg1;
574 vcpu->arch.shared->sprg2 = regs->sprg2; 575 vcpu->arch.shared->sprg2 = regs->sprg2;
@@ -584,16 +585,165 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
584 return 0; 585 return 0;
585} 586}
586 587
588static void get_sregs_base(struct kvm_vcpu *vcpu,
589 struct kvm_sregs *sregs)
590{
591 u64 tb = get_tb();
592
593 sregs->u.e.features |= KVM_SREGS_E_BASE;
594
595 sregs->u.e.csrr0 = vcpu->arch.csrr0;
596 sregs->u.e.csrr1 = vcpu->arch.csrr1;
597 sregs->u.e.mcsr = vcpu->arch.mcsr;
598 sregs->u.e.esr = vcpu->arch.esr;
599 sregs->u.e.dear = vcpu->arch.shared->dar;
600 sregs->u.e.tsr = vcpu->arch.tsr;
601 sregs->u.e.tcr = vcpu->arch.tcr;
602 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
603 sregs->u.e.tb = tb;
604 sregs->u.e.vrsave = vcpu->arch.vrsave;
605}
606
607static int set_sregs_base(struct kvm_vcpu *vcpu,
608 struct kvm_sregs *sregs)
609{
610 if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
611 return 0;
612
613 vcpu->arch.csrr0 = sregs->u.e.csrr0;
614 vcpu->arch.csrr1 = sregs->u.e.csrr1;
615 vcpu->arch.mcsr = sregs->u.e.mcsr;
616 vcpu->arch.esr = sregs->u.e.esr;
617 vcpu->arch.shared->dar = sregs->u.e.dear;
618 vcpu->arch.vrsave = sregs->u.e.vrsave;
619 vcpu->arch.tcr = sregs->u.e.tcr;
620
621 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC)
622 vcpu->arch.dec = sregs->u.e.dec;
623
624 kvmppc_emulate_dec(vcpu);
625
626 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) {
627 /*
628 * FIXME: existing KVM timer handling is incomplete.
629 * TSR cannot be read by the guest, and its value in
630 * vcpu->arch is always zero. For now, just handle
631 * the case where the caller is trying to inject a
632 * decrementer interrupt.
633 */
634
635 if ((sregs->u.e.tsr & TSR_DIS) &&
636 (vcpu->arch.tcr & TCR_DIE))
637 kvmppc_core_queue_dec(vcpu);
638 }
639
640 return 0;
641}
642
643static void get_sregs_arch206(struct kvm_vcpu *vcpu,
644 struct kvm_sregs *sregs)
645{
646 sregs->u.e.features |= KVM_SREGS_E_ARCH206;
647
648 sregs->u.e.pir = 0;
649 sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
650 sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
651 sregs->u.e.decar = vcpu->arch.decar;
652 sregs->u.e.ivpr = vcpu->arch.ivpr;
653}
654
655static int set_sregs_arch206(struct kvm_vcpu *vcpu,
656 struct kvm_sregs *sregs)
657{
658 if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
659 return 0;
660
661 if (sregs->u.e.pir != 0)
662 return -EINVAL;
663
664 vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
665 vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
666 vcpu->arch.decar = sregs->u.e.decar;
667 vcpu->arch.ivpr = sregs->u.e.ivpr;
668
669 return 0;
670}
671
672void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
673{
674 sregs->u.e.features |= KVM_SREGS_E_IVOR;
675
676 sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
677 sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
678 sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
679 sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
680 sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
681 sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
682 sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
683 sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
684 sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
685 sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
686 sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
687 sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
688 sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
689 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
690 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
691 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
692}
693
694int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
695{
696 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
697 return 0;
698
699 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
700 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
701 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
702 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
703 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
704 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
705 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
706 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
707 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
708 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
709 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
710 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
711 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
712 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
713 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
714 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
715
716 return 0;
717}
718
587int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 719int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
588 struct kvm_sregs *sregs) 720 struct kvm_sregs *sregs)
589{ 721{
590 return -ENOTSUPP; 722 sregs->pvr = vcpu->arch.pvr;
723
724 get_sregs_base(vcpu, sregs);
725 get_sregs_arch206(vcpu, sregs);
726 kvmppc_core_get_sregs(vcpu, sregs);
727 return 0;
591} 728}
592 729
593int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 730int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
594 struct kvm_sregs *sregs) 731 struct kvm_sregs *sregs)
595{ 732{
596 return -ENOTSUPP; 733 int ret;
734
735 if (vcpu->arch.pvr != sregs->pvr)
736 return -EINVAL;
737
738 ret = set_sregs_base(vcpu, sregs);
739 if (ret < 0)
740 return ret;
741
742 ret = set_sregs_arch206(vcpu, sregs);
743 if (ret < 0)
744 return ret;
745
746 return kvmppc_core_set_sregs(vcpu, sregs);
597} 747}
598 748
599int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 749int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 1cc471faac2d..b58ccae95904 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -380,7 +380,6 @@ lightweight_exit:
380 * because host interrupt handlers would get confused. */ 380 * because host interrupt handlers would get confused. */
381 lwz r1, VCPU_GPR(r1)(r4) 381 lwz r1, VCPU_GPR(r1)(r4)
382 382
383 /* XXX handle USPRG0 */
384 /* Host interrupt handlers may have clobbered these guest-readable 383 /* Host interrupt handlers may have clobbered these guest-readable
385 * SPRGs, so we need to reload them here with the guest's values. */ 384 * SPRGs, so we need to reload them here with the guest's values. */
386 lwz r3, VCPU_SPRG4(r4) 385 lwz r3, VCPU_SPRG4(r4)
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index e3768ee9b595..318dbc61ba44 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -63,6 +63,7 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
63 63
64 /* Registers init */ 64 /* Registers init */
65 vcpu->arch.pvr = mfspr(SPRN_PVR); 65 vcpu->arch.pvr = mfspr(SPRN_PVR);
66 vcpu_e500->svr = mfspr(SPRN_SVR);
66 67
67 /* Since booke kvm only support one core, update all vcpus' PIR to 0 */ 68 /* Since booke kvm only support one core, update all vcpus' PIR to 0 */
68 vcpu->vcpu_id = 0; 69 vcpu->vcpu_id = 0;
@@ -96,6 +97,81 @@ int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
96 return 0; 97 return 0;
97} 98}
98 99
100void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
101{
102 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
103
104 sregs->u.e.features |= KVM_SREGS_E_ARCH206_MMU | KVM_SREGS_E_SPE |
105 KVM_SREGS_E_PM;
106 sregs->u.e.impl_id = KVM_SREGS_E_IMPL_FSL;
107
108 sregs->u.e.impl.fsl.features = 0;
109 sregs->u.e.impl.fsl.svr = vcpu_e500->svr;
110 sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0;
111 sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar;
112
113 sregs->u.e.mas0 = vcpu_e500->mas0;
114 sregs->u.e.mas1 = vcpu_e500->mas1;
115 sregs->u.e.mas2 = vcpu_e500->mas2;
116 sregs->u.e.mas7_3 = ((u64)vcpu_e500->mas7 << 32) | vcpu_e500->mas3;
117 sregs->u.e.mas4 = vcpu_e500->mas4;
118 sregs->u.e.mas6 = vcpu_e500->mas6;
119
120 sregs->u.e.mmucfg = mfspr(SPRN_MMUCFG);
121 sregs->u.e.tlbcfg[0] = vcpu_e500->tlb0cfg;
122 sregs->u.e.tlbcfg[1] = vcpu_e500->tlb1cfg;
123 sregs->u.e.tlbcfg[2] = 0;
124 sregs->u.e.tlbcfg[3] = 0;
125
126 sregs->u.e.ivor_high[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
127 sregs->u.e.ivor_high[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
128 sregs->u.e.ivor_high[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
129 sregs->u.e.ivor_high[3] =
130 vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
131
132 kvmppc_get_sregs_ivor(vcpu, sregs);
133}
134
135int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
136{
137 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
138
139 if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
140 vcpu_e500->svr = sregs->u.e.impl.fsl.svr;
141 vcpu_e500->hid0 = sregs->u.e.impl.fsl.hid0;
142 vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar;
143 }
144
145 if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) {
146 vcpu_e500->mas0 = sregs->u.e.mas0;
147 vcpu_e500->mas1 = sregs->u.e.mas1;
148 vcpu_e500->mas2 = sregs->u.e.mas2;
149 vcpu_e500->mas7 = sregs->u.e.mas7_3 >> 32;
150 vcpu_e500->mas3 = (u32)sregs->u.e.mas7_3;
151 vcpu_e500->mas4 = sregs->u.e.mas4;
152 vcpu_e500->mas6 = sregs->u.e.mas6;
153 }
154
155 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
156 return 0;
157
158 if (sregs->u.e.features & KVM_SREGS_E_SPE) {
159 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] =
160 sregs->u.e.ivor_high[0];
161 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] =
162 sregs->u.e.ivor_high[1];
163 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] =
164 sregs->u.e.ivor_high[2];
165 }
166
167 if (sregs->u.e.features & KVM_SREGS_E_PM) {
168 vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] =
169 sregs->u.e.ivor_high[3];
170 }
171
172 return kvmppc_set_sregs_ivor(vcpu, sregs);
173}
174
99struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) 175struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
100{ 176{
101 struct kvmppc_vcpu_e500 *vcpu_e500; 177 struct kvmppc_vcpu_e500 *vcpu_e500;
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index 8e3edfbc9634..69cd665a0caf 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved. 2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
3 * 3 *
4 * Author: Yu Liu, <yu.liu@freescale.com> 4 * Author: Yu Liu, <yu.liu@freescale.com>
5 * 5 *
@@ -78,8 +78,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
78 78
79 switch (sprn) { 79 switch (sprn) {
80 case SPRN_PID: 80 case SPRN_PID:
81 vcpu_e500->pid[0] = vcpu->arch.shadow_pid = 81 kvmppc_set_pid(vcpu, spr_val);
82 vcpu->arch.pid = spr_val;
83 break; 82 break;
84 case SPRN_PID1: 83 case SPRN_PID1:
85 vcpu_e500->pid[1] = spr_val; break; 84 vcpu_e500->pid[1] = spr_val; break;
@@ -175,6 +174,8 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
175 kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid0); break; 174 kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid0); break;
176 case SPRN_HID1: 175 case SPRN_HID1:
177 kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid1); break; 176 kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid1); break;
177 case SPRN_SVR:
178 kvmppc_set_gpr(vcpu, rt, vcpu_e500->svr); break;
178 179
179 case SPRN_MMUCSR0: 180 case SPRN_MMUCSR0:
180 kvmppc_set_gpr(vcpu, rt, 0); break; 181 kvmppc_set_gpr(vcpu, rt, 0); break;
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index d6d6d47a75a9..b18fe353397d 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved. 2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
3 * 3 *
4 * Author: Yu Liu, yu.liu@freescale.com 4 * Author: Yu Liu, yu.liu@freescale.com
5 * 5 *
@@ -24,6 +24,7 @@
24#include "../mm/mmu_decl.h" 24#include "../mm/mmu_decl.h"
25#include "e500_tlb.h" 25#include "e500_tlb.h"
26#include "trace.h" 26#include "trace.h"
27#include "timing.h"
27 28
28#define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1) 29#define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1)
29 30
@@ -506,6 +507,7 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
506 vcpu_e500->mas7 = 0; 507 vcpu_e500->mas7 = 0;
507 } 508 }
508 509
510 kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
509 return EMULATE_DONE; 511 return EMULATE_DONE;
510} 512}
511 513
@@ -571,6 +573,7 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
571 write_host_tlbe(vcpu_e500, stlbsel, sesel); 573 write_host_tlbe(vcpu_e500, stlbsel, sesel);
572 } 574 }
573 575
576 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
574 return EMULATE_DONE; 577 return EMULATE_DONE;
575} 578}
576 579
@@ -672,6 +675,14 @@ int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
672 return -1; 675 return -1;
673} 676}
674 677
678void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
679{
680 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
681
682 vcpu_e500->pid[0] = vcpu->arch.shadow_pid =
683 vcpu->arch.pid = pid;
684}
685
675void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500) 686void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
676{ 687{
677 struct tlbe *tlbe; 688 struct tlbe *tlbe;
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index c64fd2909bb2..141dce3c6810 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -114,6 +114,12 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
114 } 114 }
115} 115}
116 116
117u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
118{
119 u64 jd = tb - vcpu->arch.dec_jiffies;
120 return vcpu->arch.dec - jd;
121}
122
117/* XXX to do: 123/* XXX to do:
118 * lhax 124 * lhax
119 * lhaux 125 * lhaux
@@ -279,11 +285,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
279 285
280 case SPRN_DEC: 286 case SPRN_DEC:
281 { 287 {
282 u64 jd = get_tb() - vcpu->arch.dec_jiffies; 288 kvmppc_set_gpr(vcpu, rt,
283 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dec - jd); 289 kvmppc_get_dec(vcpu, get_tb()));
284 pr_debug("mfDEC: %x - %llx = %lx\n",
285 vcpu->arch.dec, jd,
286 kvmppc_get_gpr(vcpu, rt));
287 break; 290 break;
288 } 291 }
289 default: 292 default:
@@ -294,6 +297,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
294 } 297 }
295 break; 298 break;
296 } 299 }
300 kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
297 break; 301 break;
298 302
299 case OP_31_XOP_STHX: 303 case OP_31_XOP_STHX:
@@ -363,6 +367,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
363 printk("mtspr: unknown spr %x\n", sprn); 367 printk("mtspr: unknown spr %x\n", sprn);
364 break; 368 break;
365 } 369 }
370 kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
366 break; 371 break;
367 372
368 case OP_31_XOP_DCBI: 373 case OP_31_XOP_DCBI:
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 99758460efde..616dd516ca1f 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -175,7 +175,11 @@ int kvm_dev_ioctl_check_extension(long ext)
175 int r; 175 int r;
176 176
177 switch (ext) { 177 switch (ext) {
178#ifdef CONFIG_BOOKE
179 case KVM_CAP_PPC_BOOKE_SREGS:
180#else
178 case KVM_CAP_PPC_SEGSTATE: 181 case KVM_CAP_PPC_SEGSTATE:
182#endif
179 case KVM_CAP_PPC_PAIRED_SINGLES: 183 case KVM_CAP_PPC_PAIRED_SINGLES:
180 case KVM_CAP_PPC_UNSET_IRQ: 184 case KVM_CAP_PPC_UNSET_IRQ:
181 case KVM_CAP_PPC_IRQ_LEVEL: 185 case KVM_CAP_PPC_IRQ_LEVEL:
@@ -284,6 +288,10 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
284 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu); 288 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
285 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; 289 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
286 290
291#ifdef CONFIG_KVM_EXIT_TIMING
292 mutex_init(&vcpu->arch.exit_timing_lock);
293#endif
294
287 return 0; 295 return 0;
288} 296}
289 297
@@ -294,12 +302,25 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
294 302
295void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 303void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
296{ 304{
305#ifdef CONFIG_BOOKE
306 /*
307 * vrsave (formerly usprg0) isn't used by Linux, but may
308 * be used by the guest.
309 *
310 * On non-booke this is associated with Altivec and
311 * is handled by code in book3s.c.
312 */
313 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
314#endif
297 kvmppc_core_vcpu_load(vcpu, cpu); 315 kvmppc_core_vcpu_load(vcpu, cpu);
298} 316}
299 317
300void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 318void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
301{ 319{
302 kvmppc_core_vcpu_put(vcpu); 320 kvmppc_core_vcpu_put(vcpu);
321#ifdef CONFIG_BOOKE
322 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
323#endif
303} 324}
304 325
305int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 326int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
diff --git a/arch/powerpc/kvm/timing.c b/arch/powerpc/kvm/timing.c
index a021f5827a33..319177df9587 100644
--- a/arch/powerpc/kvm/timing.c
+++ b/arch/powerpc/kvm/timing.c
@@ -34,8 +34,8 @@ void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu)
34{ 34{
35 int i; 35 int i;
36 36
37 /* pause guest execution to avoid concurrent updates */ 37 /* Take a lock to avoid concurrent updates */
38 mutex_lock(&vcpu->mutex); 38 mutex_lock(&vcpu->arch.exit_timing_lock);
39 39
40 vcpu->arch.last_exit_type = 0xDEAD; 40 vcpu->arch.last_exit_type = 0xDEAD;
41 for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) { 41 for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) {
@@ -49,7 +49,7 @@ void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu)
49 vcpu->arch.timing_exit.tv64 = 0; 49 vcpu->arch.timing_exit.tv64 = 0;
50 vcpu->arch.timing_last_enter.tv64 = 0; 50 vcpu->arch.timing_last_enter.tv64 = 0;
51 51
52 mutex_unlock(&vcpu->mutex); 52 mutex_unlock(&vcpu->arch.exit_timing_lock);
53} 53}
54 54
55static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type) 55static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)
@@ -65,6 +65,8 @@ static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)
65 return; 65 return;
66 } 66 }
67 67
68 mutex_lock(&vcpu->arch.exit_timing_lock);
69
68 vcpu->arch.timing_count_type[type]++; 70 vcpu->arch.timing_count_type[type]++;
69 71
70 /* sum */ 72 /* sum */
@@ -93,6 +95,8 @@ static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)
93 vcpu->arch.timing_min_duration[type] = duration; 95 vcpu->arch.timing_min_duration[type] = duration;
94 if (unlikely(duration > vcpu->arch.timing_max_duration[type])) 96 if (unlikely(duration > vcpu->arch.timing_max_duration[type]))
95 vcpu->arch.timing_max_duration[type] = duration; 97 vcpu->arch.timing_max_duration[type] = duration;
98
99 mutex_unlock(&vcpu->arch.exit_timing_lock);
96} 100}
97 101
98void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu) 102void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu)
@@ -147,17 +151,30 @@ static int kvmppc_exit_timing_show(struct seq_file *m, void *private)
147{ 151{
148 struct kvm_vcpu *vcpu = m->private; 152 struct kvm_vcpu *vcpu = m->private;
149 int i; 153 int i;
154 u64 min, max, sum, sum_quad;
150 155
151 seq_printf(m, "%s", "type count min max sum sum_squared\n"); 156 seq_printf(m, "%s", "type count min max sum sum_squared\n");
152 157
158
153 for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) { 159 for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) {
160
161 min = vcpu->arch.timing_min_duration[i];
162 do_div(min, tb_ticks_per_usec);
163 max = vcpu->arch.timing_max_duration[i];
164 do_div(max, tb_ticks_per_usec);
165 sum = vcpu->arch.timing_sum_duration[i];
166 do_div(sum, tb_ticks_per_usec);
167 sum_quad = vcpu->arch.timing_sum_quad_duration[i];
168 do_div(sum_quad, tb_ticks_per_usec);
169
154 seq_printf(m, "%12s %10d %10lld %10lld %20lld %20lld\n", 170 seq_printf(m, "%12s %10d %10lld %10lld %20lld %20lld\n",
155 kvm_exit_names[i], 171 kvm_exit_names[i],
156 vcpu->arch.timing_count_type[i], 172 vcpu->arch.timing_count_type[i],
157 vcpu->arch.timing_min_duration[i], 173 min,
158 vcpu->arch.timing_max_duration[i], 174 max,
159 vcpu->arch.timing_sum_duration[i], 175 sum,
160 vcpu->arch.timing_sum_quad_duration[i]); 176 sum_quad);
177
161 } 178 }
162 return 0; 179 return 0;
163} 180}