diff options
-rw-r--r-- | arch/powerpc/include/asm/kvm_host.h | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/powerpc.c | 38 |
2 files changed, 19 insertions, 21 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 8ef05121d3cd..c610961720c7 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h | |||
@@ -585,7 +585,7 @@ struct kvm_vcpu_arch { | |||
585 | pgd_t *pgdir; | 585 | pgd_t *pgdir; |
586 | 586 | ||
587 | u8 io_gpr; /* GPR used as IO source/target */ | 587 | u8 io_gpr; /* GPR used as IO source/target */ |
588 | u8 mmio_is_bigendian; | 588 | u8 mmio_host_swabbed; |
589 | u8 mmio_sign_extend; | 589 | u8 mmio_sign_extend; |
590 | u8 osi_needed; | 590 | u8 osi_needed; |
591 | u8 osi_enabled; | 591 | u8 osi_enabled; |
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 27c0face86f4..41c5f8f8a20d 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -720,7 +720,7 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, | |||
720 | return; | 720 | return; |
721 | } | 721 | } |
722 | 722 | ||
723 | if (vcpu->arch.mmio_is_bigendian) { | 723 | if (!vcpu->arch.mmio_host_swabbed) { |
724 | switch (run->mmio.len) { | 724 | switch (run->mmio.len) { |
725 | case 8: gpr = *(u64 *)run->mmio.data; break; | 725 | case 8: gpr = *(u64 *)run->mmio.data; break; |
726 | case 4: gpr = *(u32 *)run->mmio.data; break; | 726 | case 4: gpr = *(u32 *)run->mmio.data; break; |
@@ -728,10 +728,10 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, | |||
728 | case 1: gpr = *(u8 *)run->mmio.data; break; | 728 | case 1: gpr = *(u8 *)run->mmio.data; break; |
729 | } | 729 | } |
730 | } else { | 730 | } else { |
731 | /* Convert BE data from userland back to LE. */ | ||
732 | switch (run->mmio.len) { | 731 | switch (run->mmio.len) { |
733 | case 4: gpr = ld_le32((u32 *)run->mmio.data); break; | 732 | case 8: gpr = swab64(*(u64 *)run->mmio.data); break; |
734 | case 2: gpr = ld_le16((u16 *)run->mmio.data); break; | 733 | case 4: gpr = swab32(*(u32 *)run->mmio.data); break; |
734 | case 2: gpr = swab16(*(u16 *)run->mmio.data); break; | ||
735 | case 1: gpr = *(u8 *)run->mmio.data; break; | 735 | case 1: gpr = *(u8 *)run->mmio.data; break; |
736 | } | 736 | } |
737 | } | 737 | } |
@@ -780,14 +780,13 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
780 | int is_default_endian) | 780 | int is_default_endian) |
781 | { | 781 | { |
782 | int idx, ret; | 782 | int idx, ret; |
783 | int is_bigendian; | 783 | bool host_swabbed; |
784 | 784 | ||
785 | /* Pity C doesn't have a logical XOR operator */ | ||
785 | if (kvmppc_need_byteswap(vcpu)) { | 786 | if (kvmppc_need_byteswap(vcpu)) { |
786 | /* Default endianness is "little endian". */ | 787 | host_swabbed = is_default_endian; |
787 | is_bigendian = !is_default_endian; | ||
788 | } else { | 788 | } else { |
789 | /* Default endianness is "big endian". */ | 789 | host_swabbed = !is_default_endian; |
790 | is_bigendian = is_default_endian; | ||
791 | } | 790 | } |
792 | 791 | ||
793 | if (bytes > sizeof(run->mmio.data)) { | 792 | if (bytes > sizeof(run->mmio.data)) { |
@@ -800,7 +799,7 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
800 | run->mmio.is_write = 0; | 799 | run->mmio.is_write = 0; |
801 | 800 | ||
802 | vcpu->arch.io_gpr = rt; | 801 | vcpu->arch.io_gpr = rt; |
803 | vcpu->arch.mmio_is_bigendian = is_bigendian; | 802 | vcpu->arch.mmio_host_swabbed = host_swabbed; |
804 | vcpu->mmio_needed = 1; | 803 | vcpu->mmio_needed = 1; |
805 | vcpu->mmio_is_write = 0; | 804 | vcpu->mmio_is_write = 0; |
806 | vcpu->arch.mmio_sign_extend = 0; | 805 | vcpu->arch.mmio_sign_extend = 0; |
@@ -840,14 +839,13 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
840 | { | 839 | { |
841 | void *data = run->mmio.data; | 840 | void *data = run->mmio.data; |
842 | int idx, ret; | 841 | int idx, ret; |
843 | int is_bigendian; | 842 | bool host_swabbed; |
844 | 843 | ||
844 | /* Pity C doesn't have a logical XOR operator */ | ||
845 | if (kvmppc_need_byteswap(vcpu)) { | 845 | if (kvmppc_need_byteswap(vcpu)) { |
846 | /* Default endianness is "little endian". */ | 846 | host_swabbed = is_default_endian; |
847 | is_bigendian = !is_default_endian; | ||
848 | } else { | 847 | } else { |
849 | /* Default endianness is "big endian". */ | 848 | host_swabbed = !is_default_endian; |
850 | is_bigendian = is_default_endian; | ||
851 | } | 849 | } |
852 | 850 | ||
853 | if (bytes > sizeof(run->mmio.data)) { | 851 | if (bytes > sizeof(run->mmio.data)) { |
@@ -862,7 +860,7 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
862 | vcpu->mmio_is_write = 1; | 860 | vcpu->mmio_is_write = 1; |
863 | 861 | ||
864 | /* Store the value at the lowest bytes in 'data'. */ | 862 | /* Store the value at the lowest bytes in 'data'. */ |
865 | if (is_bigendian) { | 863 | if (!host_swabbed) { |
866 | switch (bytes) { | 864 | switch (bytes) { |
867 | case 8: *(u64 *)data = val; break; | 865 | case 8: *(u64 *)data = val; break; |
868 | case 4: *(u32 *)data = val; break; | 866 | case 4: *(u32 *)data = val; break; |
@@ -870,11 +868,11 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
870 | case 1: *(u8 *)data = val; break; | 868 | case 1: *(u8 *)data = val; break; |
871 | } | 869 | } |
872 | } else { | 870 | } else { |
873 | /* Store LE value into 'data'. */ | ||
874 | switch (bytes) { | 871 | switch (bytes) { |
875 | case 4: st_le32(data, val); break; | 872 | case 8: *(u64 *)data = swab64(val); break; |
876 | case 2: st_le16(data, val); break; | 873 | case 4: *(u32 *)data = swab32(val); break; |
877 | case 1: *(u8 *)data = val; break; | 874 | case 2: *(u16 *)data = swab16(val); break; |
875 | case 1: *(u8 *)data = val; break; | ||
878 | } | 876 | } |
879 | } | 877 | } |
880 | 878 | ||