aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2009-11-29 22:02:02 -0500
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-12-08 00:02:50 -0500
commite15a113700324f7fdcee95589875daed2b98a2fe (patch)
treef2a528fb83219a748c86d0c03efd54c6980165ab
parentc0cefebc0b6ae1bc4c92672223a54e1ee96ea7f0 (diff)
powerpc/kvm: Sync guest visible MMU state
Currently userspace has no chance to find out which virtual address space we're in and resolve addresses. While that is a big problem for migration, it's also unpleasent when debugging, as gdb and the monitor don't work on virtual addresses. This patch exports enough of the MMU segment state to userspace to make debugging work and thus also includes the groundwork for migration. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r--arch/powerpc/include/asm/kvm.h18
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h1
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h3
-rw-r--r--arch/powerpc/kvm/book3s.c49
-rw-r--r--arch/powerpc/kvm/book3s_64_emulate.c38
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c2
-rw-r--r--arch/powerpc/kvm/powerpc.c3
-rw-r--r--include/linux/kvm.h3
8 files changed, 101 insertions, 16 deletions
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h
index c9ca97f43bc1..81f3b0b5601e 100644
--- a/arch/powerpc/include/asm/kvm.h
+++ b/arch/powerpc/include/asm/kvm.h
@@ -47,7 +47,23 @@ struct kvm_regs {
47 47
48struct kvm_sregs { 48struct kvm_sregs {
49 __u32 pvr; 49 __u32 pvr;
50 char pad[1020]; 50 union {
51 struct {
52 __u64 sdr1;
53 struct {
54 struct {
55 __u64 slbe;
56 __u64 slbv;
57 } slb[64];
58 } ppc64;
59 struct {
60 __u32 sr[16];
61 __u64 ibat[8];
62 __u64 dbat[8];
63 } ppc32;
64 } s;
65 __u8 pad[1020];
66 } u;
51}; 67};
52 68
53struct kvm_fpu { 69struct kvm_fpu {
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 19ddb352fd0f..af2abe74f544 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -87,6 +87,7 @@
87#define BOOK3S_IRQPRIO_MAX 16 87#define BOOK3S_IRQPRIO_MAX 16
88 88
89#define BOOK3S_HFLAG_DCBZ32 0x1 89#define BOOK3S_HFLAG_DCBZ32 0x1
90#define BOOK3S_HFLAG_SLB 0x2
90 91
91#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */ 92#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
92#define RESUME_FLAG_HOST (1<<1) /* Resume host? */ 93#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index c6011336371e..74b7369770d0 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -46,6 +46,7 @@ struct kvmppc_sr {
46}; 46};
47 47
48struct kvmppc_bat { 48struct kvmppc_bat {
49 u64 raw;
49 u32 bepi; 50 u32 bepi;
50 u32 bepi_mask; 51 u32 bepi_mask;
51 bool vs; 52 bool vs;
@@ -113,6 +114,8 @@ extern struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, boo
113extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr, bool data); 114extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr, bool data);
114extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr); 115extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr);
115extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec); 116extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
117extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
118 bool upper, u32 val);
116 119
117extern u32 kvmppc_trampoline_lowmem; 120extern u32 kvmppc_trampoline_lowmem;
118extern u32 kvmppc_trampoline_enter; 121extern u32 kvmppc_trampoline_enter;
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 42037d46a416..3e294bd9b8c6 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -281,6 +281,7 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
281 281
282void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) 282void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
283{ 283{
284 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
284 vcpu->arch.pvr = pvr; 285 vcpu->arch.pvr = pvr;
285 if ((pvr >= 0x330000) && (pvr < 0x70330000)) { 286 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
286 kvmppc_mmu_book3s_64_init(vcpu); 287 kvmppc_mmu_book3s_64_init(vcpu);
@@ -762,14 +763,62 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
762int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 763int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
763 struct kvm_sregs *sregs) 764 struct kvm_sregs *sregs)
764{ 765{
766 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
767 int i;
768
765 sregs->pvr = vcpu->arch.pvr; 769 sregs->pvr = vcpu->arch.pvr;
770
771 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
772 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
773 for (i = 0; i < 64; i++) {
774 sregs->u.s.ppc64.slb[i].slbe = vcpu3s->slb[i].orige | i;
775 sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv;
776 }
777 } else {
778 for (i = 0; i < 16; i++) {
779 sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw;
780 sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw;
781 }
782 for (i = 0; i < 8; i++) {
783 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
784 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
785 }
786 }
766 return 0; 787 return 0;
767} 788}
768 789
769int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 790int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
770 struct kvm_sregs *sregs) 791 struct kvm_sregs *sregs)
771{ 792{
793 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
794 int i;
795
772 kvmppc_set_pvr(vcpu, sregs->pvr); 796 kvmppc_set_pvr(vcpu, sregs->pvr);
797
798 vcpu3s->sdr1 = sregs->u.s.sdr1;
799 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
800 for (i = 0; i < 64; i++) {
801 vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
802 sregs->u.s.ppc64.slb[i].slbe);
803 }
804 } else {
805 for (i = 0; i < 16; i++) {
806 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
807 }
808 for (i = 0; i < 8; i++) {
809 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
810 (u32)sregs->u.s.ppc32.ibat[i]);
811 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
812 (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
813 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
814 (u32)sregs->u.s.ppc32.dbat[i]);
815 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
816 (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
817 }
818 }
819
820 /* Flush the MMU after messing with the segments */
821 kvmppc_mmu_pte_flush(vcpu, 0, 0);
773 return 0; 822 return 0;
774} 823}
775 824
diff --git a/arch/powerpc/kvm/book3s_64_emulate.c b/arch/powerpc/kvm/book3s_64_emulate.c
index c343e67306e0..1027eac6d474 100644
--- a/arch/powerpc/kvm/book3s_64_emulate.c
+++ b/arch/powerpc/kvm/book3s_64_emulate.c
@@ -185,7 +185,27 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
185 return emulated; 185 return emulated;
186} 186}
187 187
188static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u64 val) 188void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper,
189 u32 val)
190{
191 if (upper) {
192 /* Upper BAT */
193 u32 bl = (val >> 2) & 0x7ff;
194 bat->bepi_mask = (~bl << 17);
195 bat->bepi = val & 0xfffe0000;
196 bat->vs = (val & 2) ? 1 : 0;
197 bat->vp = (val & 1) ? 1 : 0;
198 bat->raw = (bat->raw & 0xffffffff00000000ULL) | val;
199 } else {
200 /* Lower BAT */
201 bat->brpn = val & 0xfffe0000;
202 bat->wimg = (val >> 3) & 0xf;
203 bat->pp = val & 3;
204 bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32);
205 }
206}
207
208static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val)
189{ 209{
190 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 210 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
191 struct kvmppc_bat *bat; 211 struct kvmppc_bat *bat;
@@ -207,19 +227,7 @@ static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u64 val)
207 BUG(); 227 BUG();
208 } 228 }
209 229
210 if (!(sprn % 2)) { 230 kvmppc_set_bat(vcpu, bat, !(sprn % 2), val);
211 /* Upper BAT */
212 u32 bl = (val >> 2) & 0x7ff;
213 bat->bepi_mask = (~bl << 17);
214 bat->bepi = val & 0xfffe0000;
215 bat->vs = (val & 2) ? 1 : 0;
216 bat->vp = (val & 1) ? 1 : 0;
217 } else {
218 /* Lower BAT */
219 bat->brpn = val & 0xfffe0000;
220 bat->wimg = (val >> 3) & 0xf;
221 bat->pp = val & 3;
222 }
223} 231}
224 232
225int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) 233int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
@@ -243,7 +251,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
243 case SPRN_IBAT4U ... SPRN_IBAT7L: 251 case SPRN_IBAT4U ... SPRN_IBAT7L:
244 case SPRN_DBAT0U ... SPRN_DBAT3L: 252 case SPRN_DBAT0U ... SPRN_DBAT3L:
245 case SPRN_DBAT4U ... SPRN_DBAT7L: 253 case SPRN_DBAT4U ... SPRN_DBAT7L:
246 kvmppc_write_bat(vcpu, sprn, vcpu->arch.gpr[rs]); 254 kvmppc_write_bat(vcpu, sprn, (u32)vcpu->arch.gpr[rs]);
247 /* BAT writes happen so rarely that we're ok to flush 255 /* BAT writes happen so rarely that we're ok to flush
248 * everything here */ 256 * everything here */
249 kvmppc_mmu_pte_flush(vcpu, 0, 0); 257 kvmppc_mmu_pte_flush(vcpu, 0, 0);
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index a31f9c677d23..5598f88f142e 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -473,4 +473,6 @@ void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu)
473 mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid; 473 mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid;
474 mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp; 474 mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp;
475 mmu->is_dcbz32 = kvmppc_mmu_book3s_64_is_dcbz32; 475 mmu->is_dcbz32 = kvmppc_mmu_book3s_64_is_dcbz32;
476
477 vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
476} 478}
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 692c3709011e..d82551efbfbf 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -144,6 +144,9 @@ int kvm_dev_ioctl_check_extension(long ext)
144 int r; 144 int r;
145 145
146 switch (ext) { 146 switch (ext) {
147 case KVM_CAP_PPC_SEGSTATE:
148 r = 1;
149 break;
147 case KVM_CAP_COALESCED_MMIO: 150 case KVM_CAP_COALESCED_MMIO:
148 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 151 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
149 break; 152 break;
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index f8f8900fc5ec..caf6173bd2e8 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -436,6 +436,9 @@ struct kvm_ioeventfd {
436#endif 436#endif
437#define KVM_CAP_IOEVENTFD 36 437#define KVM_CAP_IOEVENTFD 36
438#define KVM_CAP_SET_IDENTITY_MAP_ADDR 37 438#define KVM_CAP_SET_IDENTITY_MAP_ADDR 37
439/* KVM upstream has more features, but we synched this number.
440 Linux, please remove this comment on rebase. */
441#define KVM_CAP_PPC_SEGSTATE 43
439 442
440#ifdef KVM_CAP_IRQ_ROUTING 443#ifdef KVM_CAP_IRQ_ROUTING
441 444