aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h8
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h6
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h7
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c2
-rw-r--r--arch/powerpc/kvm/emulate.c1
-rw-r--r--arch/powerpc/kvm/powerpc.c28
6 files changed, 42 insertions, 10 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index f8b23201c105..1e9c26f45d18 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -264,6 +264,11 @@ static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
264 return vcpu->arch.pc; 264 return vcpu->arch.pc;
265} 265}
266 266
267static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
268{
269 return (vcpu->arch.shared->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
270}
271
267static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc) 272static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc)
268{ 273{
269 /* Load the instruction manually if it failed to do so in the 274 /* Load the instruction manually if it failed to do so in the
@@ -271,7 +276,8 @@ static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc)
271 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) 276 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
272 kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false); 277 kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
273 278
274 return vcpu->arch.last_inst; 279 return kvmppc_need_byteswap(vcpu) ? swab32(vcpu->arch.last_inst) :
280 vcpu->arch.last_inst;
275} 281}
276 282
277static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) 283static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index dd8f61510dfd..80d46b5a7efb 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -63,6 +63,12 @@ static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
63 return vcpu->arch.xer; 63 return vcpu->arch.xer;
64} 64}
65 65
66static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
67{
68 /* XXX Would need to check TLB entry */
69 return false;
70}
71
66static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) 72static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
67{ 73{
68 return vcpu->arch.last_inst; 74 return vcpu->arch.last_inst;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index c8317fbf92c4..629277df4798 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -54,12 +54,13 @@ extern void kvmppc_handler_highmem(void);
54extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu); 54extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
55extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 55extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
56 unsigned int rt, unsigned int bytes, 56 unsigned int rt, unsigned int bytes,
57 int is_bigendian); 57 int is_default_endian);
58extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, 58extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
59 unsigned int rt, unsigned int bytes, 59 unsigned int rt, unsigned int bytes,
60 int is_bigendian); 60 int is_default_endian);
61extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 61extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
62 u64 val, unsigned int bytes, int is_bigendian); 62 u64 val, unsigned int bytes,
63 int is_default_endian);
63 64
64extern int kvmppc_emulate_instruction(struct kvm_run *run, 65extern int kvmppc_emulate_instruction(struct kvm_run *run,
65 struct kvm_vcpu *vcpu); 66 struct kvm_vcpu *vcpu);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index f3ff587a8b7d..efb8aa544876 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -558,7 +558,7 @@ static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
558 * we just return and retry the instruction. 558 * we just return and retry the instruction.
559 */ 559 */
560 560
561 if (instruction_is_store(vcpu->arch.last_inst) != !!is_store) 561 if (instruction_is_store(kvmppc_get_last_inst(vcpu)) != !!is_store)
562 return RESUME_GUEST; 562 return RESUME_GUEST;
563 563
564 /* 564 /*
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 2f9a0873b44f..c2b887be2c29 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -219,7 +219,6 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
219 * lmw 219 * lmw
220 * stmw 220 * stmw
221 * 221 *
222 * XXX is_bigendian should depend on MMU mapping or MSR[LE]
223 */ 222 */
224/* XXX Should probably auto-generate instruction decoding for a particular core 223/* XXX Should probably auto-generate instruction decoding for a particular core
225 * from opcode tables in the future. */ 224 * from opcode tables in the future. */
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 7ca9e0a80499..026dfaaa4772 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -673,9 +673,19 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
673} 673}
674 674
675int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 675int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
676 unsigned int rt, unsigned int bytes, int is_bigendian) 676 unsigned int rt, unsigned int bytes,
677 int is_default_endian)
677{ 678{
678 int idx, ret; 679 int idx, ret;
680 int is_bigendian;
681
682 if (kvmppc_need_byteswap(vcpu)) {
683 /* Default endianness is "little endian". */
684 is_bigendian = !is_default_endian;
685 } else {
686 /* Default endianness is "big endian". */
687 is_bigendian = is_default_endian;
688 }
679 689
680 if (bytes > sizeof(run->mmio.data)) { 690 if (bytes > sizeof(run->mmio.data)) {
681 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 691 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
@@ -711,21 +721,31 @@ EXPORT_SYMBOL_GPL(kvmppc_handle_load);
711 721
712/* Same as above, but sign extends */ 722/* Same as above, but sign extends */
713int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, 723int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
714 unsigned int rt, unsigned int bytes, int is_bigendian) 724 unsigned int rt, unsigned int bytes,
725 int is_default_endian)
715{ 726{
716 int r; 727 int r;
717 728
718 vcpu->arch.mmio_sign_extend = 1; 729 vcpu->arch.mmio_sign_extend = 1;
719 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian); 730 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian);
720 731
721 return r; 732 return r;
722} 733}
723 734
724int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 735int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
725 u64 val, unsigned int bytes, int is_bigendian) 736 u64 val, unsigned int bytes, int is_default_endian)
726{ 737{
727 void *data = run->mmio.data; 738 void *data = run->mmio.data;
728 int idx, ret; 739 int idx, ret;
740 int is_bigendian;
741
742 if (kvmppc_need_byteswap(vcpu)) {
743 /* Default endianness is "little endian". */
744 is_bigendian = !is_default_endian;
745 } else {
746 /* Default endianness is "big endian". */
747 is_bigendian = is_default_endian;
748 }
729 749
730 if (bytes > sizeof(run->mmio.data)) { 750 if (bytes > sizeof(run->mmio.data)) {
731 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 751 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,