aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2009-11-29 22:02:02 -0500
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-12-08 00:02:50 -0500
commite15a113700324f7fdcee95589875daed2b98a2fe (patch)
treef2a528fb83219a748c86d0c03efd54c6980165ab /arch/powerpc/kvm
parentc0cefebc0b6ae1bc4c92672223a54e1ee96ea7f0 (diff)
powerpc/kvm: Sync guest visible MMU state
Currently userspace has no chance to find out which virtual address space we're in and resolve addresses. While that is a big problem for migration, it's also unpleasent when debugging, as gdb and the monitor don't work on virtual addresses. This patch exports enough of the MMU segment state to userspace to make debugging work and thus also includes the groundwork for migration. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s.c49
-rw-r--r--arch/powerpc/kvm/book3s_64_emulate.c38
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c2
-rw-r--r--arch/powerpc/kvm/powerpc.c3
4 files changed, 77 insertions, 15 deletions
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 42037d46a41..3e294bd9b8c 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -281,6 +281,7 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
281 281
282void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) 282void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
283{ 283{
284 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
284 vcpu->arch.pvr = pvr; 285 vcpu->arch.pvr = pvr;
285 if ((pvr >= 0x330000) && (pvr < 0x70330000)) { 286 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
286 kvmppc_mmu_book3s_64_init(vcpu); 287 kvmppc_mmu_book3s_64_init(vcpu);
@@ -762,14 +763,62 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
762int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 763int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
763 struct kvm_sregs *sregs) 764 struct kvm_sregs *sregs)
764{ 765{
766 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
767 int i;
768
765 sregs->pvr = vcpu->arch.pvr; 769 sregs->pvr = vcpu->arch.pvr;
770
771 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
772 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
773 for (i = 0; i < 64; i++) {
774 sregs->u.s.ppc64.slb[i].slbe = vcpu3s->slb[i].orige | i;
775 sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv;
776 }
777 } else {
778 for (i = 0; i < 16; i++) {
779 sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw;
780 sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw;
781 }
782 for (i = 0; i < 8; i++) {
783 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
784 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
785 }
786 }
766 return 0; 787 return 0;
767} 788}
768 789
769int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 790int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
770 struct kvm_sregs *sregs) 791 struct kvm_sregs *sregs)
771{ 792{
793 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
794 int i;
795
772 kvmppc_set_pvr(vcpu, sregs->pvr); 796 kvmppc_set_pvr(vcpu, sregs->pvr);
797
798 vcpu3s->sdr1 = sregs->u.s.sdr1;
799 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
800 for (i = 0; i < 64; i++) {
801 vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
802 sregs->u.s.ppc64.slb[i].slbe);
803 }
804 } else {
805 for (i = 0; i < 16; i++) {
806 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
807 }
808 for (i = 0; i < 8; i++) {
809 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
810 (u32)sregs->u.s.ppc32.ibat[i]);
811 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
812 (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
813 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
814 (u32)sregs->u.s.ppc32.dbat[i]);
815 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
816 (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
817 }
818 }
819
820 /* Flush the MMU after messing with the segments */
821 kvmppc_mmu_pte_flush(vcpu, 0, 0);
773 return 0; 822 return 0;
774} 823}
775 824
diff --git a/arch/powerpc/kvm/book3s_64_emulate.c b/arch/powerpc/kvm/book3s_64_emulate.c
index c343e67306e..1027eac6d47 100644
--- a/arch/powerpc/kvm/book3s_64_emulate.c
+++ b/arch/powerpc/kvm/book3s_64_emulate.c
@@ -185,7 +185,27 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
185 return emulated; 185 return emulated;
186} 186}
187 187
188static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u64 val) 188void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper,
189 u32 val)
190{
191 if (upper) {
192 /* Upper BAT */
193 u32 bl = (val >> 2) & 0x7ff;
194 bat->bepi_mask = (~bl << 17);
195 bat->bepi = val & 0xfffe0000;
196 bat->vs = (val & 2) ? 1 : 0;
197 bat->vp = (val & 1) ? 1 : 0;
198 bat->raw = (bat->raw & 0xffffffff00000000ULL) | val;
199 } else {
200 /* Lower BAT */
201 bat->brpn = val & 0xfffe0000;
202 bat->wimg = (val >> 3) & 0xf;
203 bat->pp = val & 3;
204 bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32);
205 }
206}
207
208static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val)
189{ 209{
190 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 210 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
191 struct kvmppc_bat *bat; 211 struct kvmppc_bat *bat;
@@ -207,19 +227,7 @@ static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u64 val)
207 BUG(); 227 BUG();
208 } 228 }
209 229
210 if (!(sprn % 2)) { 230 kvmppc_set_bat(vcpu, bat, !(sprn % 2), val);
211 /* Upper BAT */
212 u32 bl = (val >> 2) & 0x7ff;
213 bat->bepi_mask = (~bl << 17);
214 bat->bepi = val & 0xfffe0000;
215 bat->vs = (val & 2) ? 1 : 0;
216 bat->vp = (val & 1) ? 1 : 0;
217 } else {
218 /* Lower BAT */
219 bat->brpn = val & 0xfffe0000;
220 bat->wimg = (val >> 3) & 0xf;
221 bat->pp = val & 3;
222 }
223} 231}
224 232
225int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) 233int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
@@ -243,7 +251,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
243 case SPRN_IBAT4U ... SPRN_IBAT7L: 251 case SPRN_IBAT4U ... SPRN_IBAT7L:
244 case SPRN_DBAT0U ... SPRN_DBAT3L: 252 case SPRN_DBAT0U ... SPRN_DBAT3L:
245 case SPRN_DBAT4U ... SPRN_DBAT7L: 253 case SPRN_DBAT4U ... SPRN_DBAT7L:
246 kvmppc_write_bat(vcpu, sprn, vcpu->arch.gpr[rs]); 254 kvmppc_write_bat(vcpu, sprn, (u32)vcpu->arch.gpr[rs]);
247 /* BAT writes happen so rarely that we're ok to flush 255 /* BAT writes happen so rarely that we're ok to flush
248 * everything here */ 256 * everything here */
249 kvmppc_mmu_pte_flush(vcpu, 0, 0); 257 kvmppc_mmu_pte_flush(vcpu, 0, 0);
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index a31f9c677d2..5598f88f142 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -473,4 +473,6 @@ void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu)
473 mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid; 473 mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid;
474 mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp; 474 mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp;
475 mmu->is_dcbz32 = kvmppc_mmu_book3s_64_is_dcbz32; 475 mmu->is_dcbz32 = kvmppc_mmu_book3s_64_is_dcbz32;
476
477 vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
476} 478}
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 692c3709011..d82551efbfb 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -144,6 +144,9 @@ int kvm_dev_ioctl_check_extension(long ext)
144 int r; 144 int r;
145 145
146 switch (ext) { 146 switch (ext) {
147 case KVM_CAP_PPC_SEGSTATE:
148 r = 1;
149 break;
147 case KVM_CAP_COALESCED_MMIO: 150 case KVM_CAP_COALESCED_MMIO:
148 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 151 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
149 break; 152 break;