aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kvm/e500_tlb.c38
1 files changed, 7 insertions, 31 deletions
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index 3777167e5f31..48d1a4f1f5ff 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -878,8 +878,8 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea)
878int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) 878int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
879{ 879{
880 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 880 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
881 struct kvm_book3e_206_tlb_entry *gtlbe, stlbe; 881 struct kvm_book3e_206_tlb_entry *gtlbe;
882 int tlbsel, esel, stlbsel, sesel; 882 int tlbsel, esel;
883 int recal = 0; 883 int recal = 0;
884 884
885 tlbsel = get_tlb_tlbsel(vcpu); 885 tlbsel = get_tlb_tlbsel(vcpu);
@@ -917,40 +917,16 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
917 917
918 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ 918 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
919 if (tlbe_is_host_safe(vcpu, gtlbe)) { 919 if (tlbe_is_host_safe(vcpu, gtlbe)) {
920 u64 eaddr; 920 u64 eaddr = get_tlb_eaddr(gtlbe);
921 u64 raddr; 921 u64 raddr = get_tlb_raddr(gtlbe);
922 922
923 switch (tlbsel) { 923 if (tlbsel == 0) {
924 case 0:
925 /* TLB0 */
926 gtlbe->mas1 &= ~MAS1_TSIZE(~0); 924 gtlbe->mas1 &= ~MAS1_TSIZE(~0);
927 gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K); 925 gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
928
929 stlbsel = 0;
930 kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
931 sesel = 0; /* unused */
932
933 break;
934
935 case 1:
936 /* TLB1 */
937 eaddr = get_tlb_eaddr(gtlbe);
938 raddr = get_tlb_raddr(gtlbe);
939
940 /* Create a 4KB mapping on the host.
941 * If the guest wanted a large page,
942 * only the first 4KB is mapped here and the rest
943 * are mapped on the fly. */
944 stlbsel = 1;
945 sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
946 raddr >> PAGE_SHIFT, gtlbe, &stlbe, esel);
947 break;
948
949 default:
950 BUG();
951 } 926 }
952 927
953 write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel); 928 /* Premap the faulting page */
929 kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel));
954 } 930 }
955 931
956 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); 932 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);