aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2013-01-17 13:23:28 -0500
committerAlexander Graf <agraf@suse.de>2013-01-24 13:23:31 -0500
commit9d98b3ff949dab3bafa2c50856ce9e1f88497f9a (patch)
treec0fd24212abc99378d6c8bb8331cc23bee1318a8 /arch/powerpc
parent2c378fd779d2b37aed64cb44caa607707edc51d3 (diff)
KVM: PPC: e500: Call kvmppc_mmu_map for initial mapping
When emulating tlbwe, we want to automatically map the entry that just got written in our shadow TLB map, because chances are quite high that it's going to be used very soon. Today this happens explicitly, duplicating all the logic that is in kvmppc_mmu_map() already. Just call that one instead. Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kvm/e500_tlb.c38
1 files changed, 7 insertions, 31 deletions
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index 3777167e5f31..48d1a4f1f5ff 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -878,8 +878,8 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea)
878int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) 878int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
879{ 879{
880 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 880 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
881 struct kvm_book3e_206_tlb_entry *gtlbe, stlbe; 881 struct kvm_book3e_206_tlb_entry *gtlbe;
882 int tlbsel, esel, stlbsel, sesel; 882 int tlbsel, esel;
883 int recal = 0; 883 int recal = 0;
884 884
885 tlbsel = get_tlb_tlbsel(vcpu); 885 tlbsel = get_tlb_tlbsel(vcpu);
@@ -917,40 +917,16 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
917 917
918 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ 918 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
919 if (tlbe_is_host_safe(vcpu, gtlbe)) { 919 if (tlbe_is_host_safe(vcpu, gtlbe)) {
920 u64 eaddr; 920 u64 eaddr = get_tlb_eaddr(gtlbe);
921 u64 raddr; 921 u64 raddr = get_tlb_raddr(gtlbe);
922 922
923 switch (tlbsel) { 923 if (tlbsel == 0) {
924 case 0:
925 /* TLB0 */
926 gtlbe->mas1 &= ~MAS1_TSIZE(~0); 924 gtlbe->mas1 &= ~MAS1_TSIZE(~0);
927 gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K); 925 gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
928
929 stlbsel = 0;
930 kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
931 sesel = 0; /* unused */
932
933 break;
934
935 case 1:
936 /* TLB1 */
937 eaddr = get_tlb_eaddr(gtlbe);
938 raddr = get_tlb_raddr(gtlbe);
939
940 /* Create a 4KB mapping on the host.
941 * If the guest wanted a large page,
942 * only the first 4KB is mapped here and the rest
943 * are mapped on the fly. */
944 stlbsel = 1;
945 sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
946 raddr >> PAGE_SHIFT, gtlbe, &stlbe, esel);
947 break;
948
949 default:
950 BUG();
951 } 926 }
952 927
953 write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel); 928 /* Premap the faulting page */
929 kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel));
954 } 930 }
955 931
956 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); 932 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);