diff options
author | Alexander Graf <agraf@suse.de> | 2014-06-11 04:16:06 -0400 |
---|---|---|
committer | Alexander Graf <agraf@suse.de> | 2014-07-28 09:22:22 -0400 |
commit | 6f22bd3265fb542acb2697026b953ec07298242d (patch) | |
tree | 857f31bd9f100282b0e833b26a5d45bbc23a3a5d /arch/powerpc/kvm/book3s_64_mmu_hv.c | |
parent | 8f6822c4b9fac6e47414d2f1e11dbabda9bc2163 (diff) |
KVM: PPC: Book3S HV: Make HTAB code LE host aware
When running on an LE host all data structures are kept in little endian
byte order. However, the HTAB still needs to be maintained in big endian.
So every time we access any HTAB we need to make sure we do so in the right
byte order. Fix up all accesses to manually byte swap.
Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm/book3s_64_mmu_hv.c')
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_hv.c | 128 |
1 files changed, 67 insertions, 61 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 80561074078d..2d154d9319b3 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
@@ -450,7 +450,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
450 | unsigned long slb_v; | 450 | unsigned long slb_v; |
451 | unsigned long pp, key; | 451 | unsigned long pp, key; |
452 | unsigned long v, gr; | 452 | unsigned long v, gr; |
453 | unsigned long *hptep; | 453 | __be64 *hptep; |
454 | int index; | 454 | int index; |
455 | int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR); | 455 | int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR); |
456 | 456 | ||
@@ -473,13 +473,13 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
473 | preempt_enable(); | 473 | preempt_enable(); |
474 | return -ENOENT; | 474 | return -ENOENT; |
475 | } | 475 | } |
476 | hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); | 476 | hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4)); |
477 | v = hptep[0] & ~HPTE_V_HVLOCK; | 477 | v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK; |
478 | gr = kvm->arch.revmap[index].guest_rpte; | 478 | gr = kvm->arch.revmap[index].guest_rpte; |
479 | 479 | ||
480 | /* Unlock the HPTE */ | 480 | /* Unlock the HPTE */ |
481 | asm volatile("lwsync" : : : "memory"); | 481 | asm volatile("lwsync" : : : "memory"); |
482 | hptep[0] = v; | 482 | hptep[0] = cpu_to_be64(v); |
483 | preempt_enable(); | 483 | preempt_enable(); |
484 | 484 | ||
485 | gpte->eaddr = eaddr; | 485 | gpte->eaddr = eaddr; |
@@ -583,7 +583,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
583 | unsigned long ea, unsigned long dsisr) | 583 | unsigned long ea, unsigned long dsisr) |
584 | { | 584 | { |
585 | struct kvm *kvm = vcpu->kvm; | 585 | struct kvm *kvm = vcpu->kvm; |
586 | unsigned long *hptep, hpte[3], r; | 586 | unsigned long hpte[3], r; |
587 | __be64 *hptep; | ||
587 | unsigned long mmu_seq, psize, pte_size; | 588 | unsigned long mmu_seq, psize, pte_size; |
588 | unsigned long gpa_base, gfn_base; | 589 | unsigned long gpa_base, gfn_base; |
589 | unsigned long gpa, gfn, hva, pfn; | 590 | unsigned long gpa, gfn, hva, pfn; |
@@ -606,16 +607,16 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
606 | if (ea != vcpu->arch.pgfault_addr) | 607 | if (ea != vcpu->arch.pgfault_addr) |
607 | return RESUME_GUEST; | 608 | return RESUME_GUEST; |
608 | index = vcpu->arch.pgfault_index; | 609 | index = vcpu->arch.pgfault_index; |
609 | hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); | 610 | hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4)); |
610 | rev = &kvm->arch.revmap[index]; | 611 | rev = &kvm->arch.revmap[index]; |
611 | preempt_disable(); | 612 | preempt_disable(); |
612 | while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) | 613 | while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) |
613 | cpu_relax(); | 614 | cpu_relax(); |
614 | hpte[0] = hptep[0] & ~HPTE_V_HVLOCK; | 615 | hpte[0] = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK; |
615 | hpte[1] = hptep[1]; | 616 | hpte[1] = be64_to_cpu(hptep[1]); |
616 | hpte[2] = r = rev->guest_rpte; | 617 | hpte[2] = r = rev->guest_rpte; |
617 | asm volatile("lwsync" : : : "memory"); | 618 | asm volatile("lwsync" : : : "memory"); |
618 | hptep[0] = hpte[0]; | 619 | hptep[0] = cpu_to_be64(hpte[0]); |
619 | preempt_enable(); | 620 | preempt_enable(); |
620 | 621 | ||
621 | if (hpte[0] != vcpu->arch.pgfault_hpte[0] || | 622 | if (hpte[0] != vcpu->arch.pgfault_hpte[0] || |
@@ -731,8 +732,9 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
731 | preempt_disable(); | 732 | preempt_disable(); |
732 | while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) | 733 | while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) |
733 | cpu_relax(); | 734 | cpu_relax(); |
734 | if ((hptep[0] & ~HPTE_V_HVLOCK) != hpte[0] || hptep[1] != hpte[1] || | 735 | if ((be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK) != hpte[0] || |
735 | rev->guest_rpte != hpte[2]) | 736 | be64_to_cpu(hptep[1]) != hpte[1] || |
737 | rev->guest_rpte != hpte[2]) | ||
736 | /* HPTE has been changed under us; let the guest retry */ | 738 | /* HPTE has been changed under us; let the guest retry */ |
737 | goto out_unlock; | 739 | goto out_unlock; |
738 | hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID; | 740 | hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID; |
@@ -752,20 +754,20 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
752 | rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT; | 754 | rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT; |
753 | r &= rcbits | ~(HPTE_R_R | HPTE_R_C); | 755 | r &= rcbits | ~(HPTE_R_R | HPTE_R_C); |
754 | 756 | ||
755 | if (hptep[0] & HPTE_V_VALID) { | 757 | if (be64_to_cpu(hptep[0]) & HPTE_V_VALID) { |
756 | /* HPTE was previously valid, so we need to invalidate it */ | 758 | /* HPTE was previously valid, so we need to invalidate it */ |
757 | unlock_rmap(rmap); | 759 | unlock_rmap(rmap); |
758 | hptep[0] |= HPTE_V_ABSENT; | 760 | hptep[0] |= cpu_to_be64(HPTE_V_ABSENT); |
759 | kvmppc_invalidate_hpte(kvm, hptep, index); | 761 | kvmppc_invalidate_hpte(kvm, hptep, index); |
760 | /* don't lose previous R and C bits */ | 762 | /* don't lose previous R and C bits */ |
761 | r |= hptep[1] & (HPTE_R_R | HPTE_R_C); | 763 | r |= be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C); |
762 | } else { | 764 | } else { |
763 | kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0); | 765 | kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0); |
764 | } | 766 | } |
765 | 767 | ||
766 | hptep[1] = r; | 768 | hptep[1] = cpu_to_be64(r); |
767 | eieio(); | 769 | eieio(); |
768 | hptep[0] = hpte[0]; | 770 | hptep[0] = cpu_to_be64(hpte[0]); |
769 | asm volatile("ptesync" : : : "memory"); | 771 | asm volatile("ptesync" : : : "memory"); |
770 | preempt_enable(); | 772 | preempt_enable(); |
771 | if (page && hpte_is_writable(r)) | 773 | if (page && hpte_is_writable(r)) |
@@ -784,7 +786,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
784 | return ret; | 786 | return ret; |
785 | 787 | ||
786 | out_unlock: | 788 | out_unlock: |
787 | hptep[0] &= ~HPTE_V_HVLOCK; | 789 | hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK); |
788 | preempt_enable(); | 790 | preempt_enable(); |
789 | goto out_put; | 791 | goto out_put; |
790 | } | 792 | } |
@@ -860,7 +862,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, | |||
860 | { | 862 | { |
861 | struct revmap_entry *rev = kvm->arch.revmap; | 863 | struct revmap_entry *rev = kvm->arch.revmap; |
862 | unsigned long h, i, j; | 864 | unsigned long h, i, j; |
863 | unsigned long *hptep; | 865 | __be64 *hptep; |
864 | unsigned long ptel, psize, rcbits; | 866 | unsigned long ptel, psize, rcbits; |
865 | 867 | ||
866 | for (;;) { | 868 | for (;;) { |
@@ -876,11 +878,11 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, | |||
876 | * rmap chain lock. | 878 | * rmap chain lock. |
877 | */ | 879 | */ |
878 | i = *rmapp & KVMPPC_RMAP_INDEX; | 880 | i = *rmapp & KVMPPC_RMAP_INDEX; |
879 | hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4)); | 881 | hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4)); |
880 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { | 882 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { |
881 | /* unlock rmap before spinning on the HPTE lock */ | 883 | /* unlock rmap before spinning on the HPTE lock */ |
882 | unlock_rmap(rmapp); | 884 | unlock_rmap(rmapp); |
883 | while (hptep[0] & HPTE_V_HVLOCK) | 885 | while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK) |
884 | cpu_relax(); | 886 | cpu_relax(); |
885 | continue; | 887 | continue; |
886 | } | 888 | } |
@@ -899,14 +901,14 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, | |||
899 | 901 | ||
900 | /* Now check and modify the HPTE */ | 902 | /* Now check and modify the HPTE */ |
901 | ptel = rev[i].guest_rpte; | 903 | ptel = rev[i].guest_rpte; |
902 | psize = hpte_page_size(hptep[0], ptel); | 904 | psize = hpte_page_size(be64_to_cpu(hptep[0]), ptel); |
903 | if ((hptep[0] & HPTE_V_VALID) && | 905 | if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) && |
904 | hpte_rpn(ptel, psize) == gfn) { | 906 | hpte_rpn(ptel, psize) == gfn) { |
905 | if (kvm->arch.using_mmu_notifiers) | 907 | if (kvm->arch.using_mmu_notifiers) |
906 | hptep[0] |= HPTE_V_ABSENT; | 908 | hptep[0] |= cpu_to_be64(HPTE_V_ABSENT); |
907 | kvmppc_invalidate_hpte(kvm, hptep, i); | 909 | kvmppc_invalidate_hpte(kvm, hptep, i); |
908 | /* Harvest R and C */ | 910 | /* Harvest R and C */ |
909 | rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C); | 911 | rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C); |
910 | *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT; | 912 | *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT; |
911 | if (rcbits & ~rev[i].guest_rpte) { | 913 | if (rcbits & ~rev[i].guest_rpte) { |
912 | rev[i].guest_rpte = ptel | rcbits; | 914 | rev[i].guest_rpte = ptel | rcbits; |
@@ -914,7 +916,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, | |||
914 | } | 916 | } |
915 | } | 917 | } |
916 | unlock_rmap(rmapp); | 918 | unlock_rmap(rmapp); |
917 | hptep[0] &= ~HPTE_V_HVLOCK; | 919 | hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK); |
918 | } | 920 | } |
919 | return 0; | 921 | return 0; |
920 | } | 922 | } |
@@ -961,7 +963,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, | |||
961 | { | 963 | { |
962 | struct revmap_entry *rev = kvm->arch.revmap; | 964 | struct revmap_entry *rev = kvm->arch.revmap; |
963 | unsigned long head, i, j; | 965 | unsigned long head, i, j; |
964 | unsigned long *hptep; | 966 | __be64 *hptep; |
965 | int ret = 0; | 967 | int ret = 0; |
966 | 968 | ||
967 | retry: | 969 | retry: |
@@ -977,23 +979,24 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, | |||
977 | 979 | ||
978 | i = head = *rmapp & KVMPPC_RMAP_INDEX; | 980 | i = head = *rmapp & KVMPPC_RMAP_INDEX; |
979 | do { | 981 | do { |
980 | hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4)); | 982 | hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4)); |
981 | j = rev[i].forw; | 983 | j = rev[i].forw; |
982 | 984 | ||
983 | /* If this HPTE isn't referenced, ignore it */ | 985 | /* If this HPTE isn't referenced, ignore it */ |
984 | if (!(hptep[1] & HPTE_R_R)) | 986 | if (!(be64_to_cpu(hptep[1]) & HPTE_R_R)) |
985 | continue; | 987 | continue; |
986 | 988 | ||
987 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { | 989 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { |
988 | /* unlock rmap before spinning on the HPTE lock */ | 990 | /* unlock rmap before spinning on the HPTE lock */ |
989 | unlock_rmap(rmapp); | 991 | unlock_rmap(rmapp); |
990 | while (hptep[0] & HPTE_V_HVLOCK) | 992 | while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK) |
991 | cpu_relax(); | 993 | cpu_relax(); |
992 | goto retry; | 994 | goto retry; |
993 | } | 995 | } |
994 | 996 | ||
995 | /* Now check and modify the HPTE */ | 997 | /* Now check and modify the HPTE */ |
996 | if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_R)) { | 998 | if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) && |
999 | (be64_to_cpu(hptep[1]) & HPTE_R_R)) { | ||
997 | kvmppc_clear_ref_hpte(kvm, hptep, i); | 1000 | kvmppc_clear_ref_hpte(kvm, hptep, i); |
998 | if (!(rev[i].guest_rpte & HPTE_R_R)) { | 1001 | if (!(rev[i].guest_rpte & HPTE_R_R)) { |
999 | rev[i].guest_rpte |= HPTE_R_R; | 1002 | rev[i].guest_rpte |= HPTE_R_R; |
@@ -1001,7 +1004,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, | |||
1001 | } | 1004 | } |
1002 | ret = 1; | 1005 | ret = 1; |
1003 | } | 1006 | } |
1004 | hptep[0] &= ~HPTE_V_HVLOCK; | 1007 | hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK); |
1005 | } while ((i = j) != head); | 1008 | } while ((i = j) != head); |
1006 | 1009 | ||
1007 | unlock_rmap(rmapp); | 1010 | unlock_rmap(rmapp); |
@@ -1035,7 +1038,7 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, | |||
1035 | do { | 1038 | do { |
1036 | hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4)); | 1039 | hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4)); |
1037 | j = rev[i].forw; | 1040 | j = rev[i].forw; |
1038 | if (hp[1] & HPTE_R_R) | 1041 | if (be64_to_cpu(hp[1]) & HPTE_R_R) |
1039 | goto out; | 1042 | goto out; |
1040 | } while ((i = j) != head); | 1043 | } while ((i = j) != head); |
1041 | } | 1044 | } |
@@ -1075,7 +1078,7 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp) | |||
1075 | unsigned long head, i, j; | 1078 | unsigned long head, i, j; |
1076 | unsigned long n; | 1079 | unsigned long n; |
1077 | unsigned long v, r; | 1080 | unsigned long v, r; |
1078 | unsigned long *hptep; | 1081 | __be64 *hptep; |
1079 | int npages_dirty = 0; | 1082 | int npages_dirty = 0; |
1080 | 1083 | ||
1081 | retry: | 1084 | retry: |
@@ -1091,7 +1094,8 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp) | |||
1091 | 1094 | ||
1092 | i = head = *rmapp & KVMPPC_RMAP_INDEX; | 1095 | i = head = *rmapp & KVMPPC_RMAP_INDEX; |
1093 | do { | 1096 | do { |
1094 | hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4)); | 1097 | unsigned long hptep1; |
1098 | hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4)); | ||
1095 | j = rev[i].forw; | 1099 | j = rev[i].forw; |
1096 | 1100 | ||
1097 | /* | 1101 | /* |
@@ -1108,29 +1112,30 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp) | |||
1108 | * Otherwise we need to do the tlbie even if C==0 in | 1112 | * Otherwise we need to do the tlbie even if C==0 in |
1109 | * order to pick up any delayed writeback of C. | 1113 | * order to pick up any delayed writeback of C. |
1110 | */ | 1114 | */ |
1111 | if (!(hptep[1] & HPTE_R_C) && | 1115 | hptep1 = be64_to_cpu(hptep[1]); |
1112 | (!hpte_is_writable(hptep[1]) || vcpus_running(kvm))) | 1116 | if (!(hptep1 & HPTE_R_C) && |
1117 | (!hpte_is_writable(hptep1) || vcpus_running(kvm))) | ||
1113 | continue; | 1118 | continue; |
1114 | 1119 | ||
1115 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { | 1120 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { |
1116 | /* unlock rmap before spinning on the HPTE lock */ | 1121 | /* unlock rmap before spinning on the HPTE lock */ |
1117 | unlock_rmap(rmapp); | 1122 | unlock_rmap(rmapp); |
1118 | while (hptep[0] & HPTE_V_HVLOCK) | 1123 | while (hptep[0] & cpu_to_be64(HPTE_V_HVLOCK)) |
1119 | cpu_relax(); | 1124 | cpu_relax(); |
1120 | goto retry; | 1125 | goto retry; |
1121 | } | 1126 | } |
1122 | 1127 | ||
1123 | /* Now check and modify the HPTE */ | 1128 | /* Now check and modify the HPTE */ |
1124 | if (!(hptep[0] & HPTE_V_VALID)) | 1129 | if (!(hptep[0] & cpu_to_be64(HPTE_V_VALID))) |
1125 | continue; | 1130 | continue; |
1126 | 1131 | ||
1127 | /* need to make it temporarily absent so C is stable */ | 1132 | /* need to make it temporarily absent so C is stable */ |
1128 | hptep[0] |= HPTE_V_ABSENT; | 1133 | hptep[0] |= cpu_to_be64(HPTE_V_ABSENT); |
1129 | kvmppc_invalidate_hpte(kvm, hptep, i); | 1134 | kvmppc_invalidate_hpte(kvm, hptep, i); |
1130 | v = hptep[0]; | 1135 | v = be64_to_cpu(hptep[0]); |
1131 | r = hptep[1]; | 1136 | r = be64_to_cpu(hptep[1]); |
1132 | if (r & HPTE_R_C) { | 1137 | if (r & HPTE_R_C) { |
1133 | hptep[1] = r & ~HPTE_R_C; | 1138 | hptep[1] = cpu_to_be64(r & ~HPTE_R_C); |
1134 | if (!(rev[i].guest_rpte & HPTE_R_C)) { | 1139 | if (!(rev[i].guest_rpte & HPTE_R_C)) { |
1135 | rev[i].guest_rpte |= HPTE_R_C; | 1140 | rev[i].guest_rpte |= HPTE_R_C; |
1136 | note_hpte_modification(kvm, &rev[i]); | 1141 | note_hpte_modification(kvm, &rev[i]); |
@@ -1143,7 +1148,7 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp) | |||
1143 | } | 1148 | } |
1144 | v &= ~(HPTE_V_ABSENT | HPTE_V_HVLOCK); | 1149 | v &= ~(HPTE_V_ABSENT | HPTE_V_HVLOCK); |
1145 | v |= HPTE_V_VALID; | 1150 | v |= HPTE_V_VALID; |
1146 | hptep[0] = v; | 1151 | hptep[0] = cpu_to_be64(v); |
1147 | } while ((i = j) != head); | 1152 | } while ((i = j) != head); |
1148 | 1153 | ||
1149 | unlock_rmap(rmapp); | 1154 | unlock_rmap(rmapp); |
@@ -1307,7 +1312,7 @@ struct kvm_htab_ctx { | |||
1307 | * Returns 1 if this HPT entry has been modified or has pending | 1312 | * Returns 1 if this HPT entry has been modified or has pending |
1308 | * R/C bit changes. | 1313 | * R/C bit changes. |
1309 | */ | 1314 | */ |
1310 | static int hpte_dirty(struct revmap_entry *revp, unsigned long *hptp) | 1315 | static int hpte_dirty(struct revmap_entry *revp, __be64 *hptp) |
1311 | { | 1316 | { |
1312 | unsigned long rcbits_unset; | 1317 | unsigned long rcbits_unset; |
1313 | 1318 | ||
@@ -1316,13 +1321,14 @@ static int hpte_dirty(struct revmap_entry *revp, unsigned long *hptp) | |||
1316 | 1321 | ||
1317 | /* Also need to consider changes in reference and changed bits */ | 1322 | /* Also need to consider changes in reference and changed bits */ |
1318 | rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C); | 1323 | rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C); |
1319 | if ((hptp[0] & HPTE_V_VALID) && (hptp[1] & rcbits_unset)) | 1324 | if ((be64_to_cpu(hptp[0]) & HPTE_V_VALID) && |
1325 | (be64_to_cpu(hptp[1]) & rcbits_unset)) | ||
1320 | return 1; | 1326 | return 1; |
1321 | 1327 | ||
1322 | return 0; | 1328 | return 0; |
1323 | } | 1329 | } |
1324 | 1330 | ||
1325 | static long record_hpte(unsigned long flags, unsigned long *hptp, | 1331 | static long record_hpte(unsigned long flags, __be64 *hptp, |
1326 | unsigned long *hpte, struct revmap_entry *revp, | 1332 | unsigned long *hpte, struct revmap_entry *revp, |
1327 | int want_valid, int first_pass) | 1333 | int want_valid, int first_pass) |
1328 | { | 1334 | { |
@@ -1337,10 +1343,10 @@ static long record_hpte(unsigned long flags, unsigned long *hptp, | |||
1337 | return 0; | 1343 | return 0; |
1338 | 1344 | ||
1339 | valid = 0; | 1345 | valid = 0; |
1340 | if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT)) { | 1346 | if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) { |
1341 | valid = 1; | 1347 | valid = 1; |
1342 | if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && | 1348 | if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && |
1343 | !(hptp[0] & HPTE_V_BOLTED)) | 1349 | !(be64_to_cpu(hptp[0]) & HPTE_V_BOLTED)) |
1344 | valid = 0; | 1350 | valid = 0; |
1345 | } | 1351 | } |
1346 | if (valid != want_valid) | 1352 | if (valid != want_valid) |
@@ -1352,7 +1358,7 @@ static long record_hpte(unsigned long flags, unsigned long *hptp, | |||
1352 | preempt_disable(); | 1358 | preempt_disable(); |
1353 | while (!try_lock_hpte(hptp, HPTE_V_HVLOCK)) | 1359 | while (!try_lock_hpte(hptp, HPTE_V_HVLOCK)) |
1354 | cpu_relax(); | 1360 | cpu_relax(); |
1355 | v = hptp[0]; | 1361 | v = be64_to_cpu(hptp[0]); |
1356 | 1362 | ||
1357 | /* re-evaluate valid and dirty from synchronized HPTE value */ | 1363 | /* re-evaluate valid and dirty from synchronized HPTE value */ |
1358 | valid = !!(v & HPTE_V_VALID); | 1364 | valid = !!(v & HPTE_V_VALID); |
@@ -1360,9 +1366,9 @@ static long record_hpte(unsigned long flags, unsigned long *hptp, | |||
1360 | 1366 | ||
1361 | /* Harvest R and C into guest view if necessary */ | 1367 | /* Harvest R and C into guest view if necessary */ |
1362 | rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C); | 1368 | rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C); |
1363 | if (valid && (rcbits_unset & hptp[1])) { | 1369 | if (valid && (rcbits_unset & be64_to_cpu(hptp[1]))) { |
1364 | revp->guest_rpte |= (hptp[1] & (HPTE_R_R | HPTE_R_C)) | | 1370 | revp->guest_rpte |= (be64_to_cpu(hptp[1]) & |
1365 | HPTE_GR_MODIFIED; | 1371 | (HPTE_R_R | HPTE_R_C)) | HPTE_GR_MODIFIED; |
1366 | dirty = 1; | 1372 | dirty = 1; |
1367 | } | 1373 | } |
1368 | 1374 | ||
@@ -1381,13 +1387,13 @@ static long record_hpte(unsigned long flags, unsigned long *hptp, | |||
1381 | revp->guest_rpte = r; | 1387 | revp->guest_rpte = r; |
1382 | } | 1388 | } |
1383 | asm volatile(PPC_RELEASE_BARRIER "" : : : "memory"); | 1389 | asm volatile(PPC_RELEASE_BARRIER "" : : : "memory"); |
1384 | hptp[0] &= ~HPTE_V_HVLOCK; | 1390 | hptp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK); |
1385 | preempt_enable(); | 1391 | preempt_enable(); |
1386 | if (!(valid == want_valid && (first_pass || dirty))) | 1392 | if (!(valid == want_valid && (first_pass || dirty))) |
1387 | ok = 0; | 1393 | ok = 0; |
1388 | } | 1394 | } |
1389 | hpte[0] = v; | 1395 | hpte[0] = cpu_to_be64(v); |
1390 | hpte[1] = r; | 1396 | hpte[1] = cpu_to_be64(r); |
1391 | return ok; | 1397 | return ok; |
1392 | } | 1398 | } |
1393 | 1399 | ||
@@ -1397,7 +1403,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf, | |||
1397 | struct kvm_htab_ctx *ctx = file->private_data; | 1403 | struct kvm_htab_ctx *ctx = file->private_data; |
1398 | struct kvm *kvm = ctx->kvm; | 1404 | struct kvm *kvm = ctx->kvm; |
1399 | struct kvm_get_htab_header hdr; | 1405 | struct kvm_get_htab_header hdr; |
1400 | unsigned long *hptp; | 1406 | __be64 *hptp; |
1401 | struct revmap_entry *revp; | 1407 | struct revmap_entry *revp; |
1402 | unsigned long i, nb, nw; | 1408 | unsigned long i, nb, nw; |
1403 | unsigned long __user *lbuf; | 1409 | unsigned long __user *lbuf; |
@@ -1413,7 +1419,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf, | |||
1413 | flags = ctx->flags; | 1419 | flags = ctx->flags; |
1414 | 1420 | ||
1415 | i = ctx->index; | 1421 | i = ctx->index; |
1416 | hptp = (unsigned long *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); | 1422 | hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); |
1417 | revp = kvm->arch.revmap + i; | 1423 | revp = kvm->arch.revmap + i; |
1418 | lbuf = (unsigned long __user *)buf; | 1424 | lbuf = (unsigned long __user *)buf; |
1419 | 1425 | ||
@@ -1497,7 +1503,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, | |||
1497 | unsigned long i, j; | 1503 | unsigned long i, j; |
1498 | unsigned long v, r; | 1504 | unsigned long v, r; |
1499 | unsigned long __user *lbuf; | 1505 | unsigned long __user *lbuf; |
1500 | unsigned long *hptp; | 1506 | __be64 *hptp; |
1501 | unsigned long tmp[2]; | 1507 | unsigned long tmp[2]; |
1502 | ssize_t nb; | 1508 | ssize_t nb; |
1503 | long int err, ret; | 1509 | long int err, ret; |
@@ -1539,7 +1545,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, | |||
1539 | i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt_npte) | 1545 | i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt_npte) |
1540 | break; | 1546 | break; |
1541 | 1547 | ||
1542 | hptp = (unsigned long *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); | 1548 | hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); |
1543 | lbuf = (unsigned long __user *)buf; | 1549 | lbuf = (unsigned long __user *)buf; |
1544 | for (j = 0; j < hdr.n_valid; ++j) { | 1550 | for (j = 0; j < hdr.n_valid; ++j) { |
1545 | err = -EFAULT; | 1551 | err = -EFAULT; |
@@ -1551,7 +1557,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, | |||
1551 | lbuf += 2; | 1557 | lbuf += 2; |
1552 | nb += HPTE_SIZE; | 1558 | nb += HPTE_SIZE; |
1553 | 1559 | ||
1554 | if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT)) | 1560 | if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) |
1555 | kvmppc_do_h_remove(kvm, 0, i, 0, tmp); | 1561 | kvmppc_do_h_remove(kvm, 0, i, 0, tmp); |
1556 | err = -EIO; | 1562 | err = -EIO; |
1557 | ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r, | 1563 | ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r, |
@@ -1577,7 +1583,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, | |||
1577 | } | 1583 | } |
1578 | 1584 | ||
1579 | for (j = 0; j < hdr.n_invalid; ++j) { | 1585 | for (j = 0; j < hdr.n_invalid; ++j) { |
1580 | if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT)) | 1586 | if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) |
1581 | kvmppc_do_h_remove(kvm, 0, i, 0, tmp); | 1587 | kvmppc_do_h_remove(kvm, 0, i, 0, tmp); |
1582 | ++i; | 1588 | ++i; |
1583 | hptp += 2; | 1589 | hptp += 2; |