diff options
author | Bharat Bhushan <r65777@freescale.com> | 2012-03-22 14:39:11 -0400 |
---|---|---|
committer | Alexander Graf <agraf@suse.de> | 2012-05-06 10:19:07 -0400 |
commit | cc902ad4f2b7cd3dd2cc268c63f6fb99fb1abf0f (patch) | |
tree | 52eab903f0cf03337664ac1702e8dc04763fb7ec /arch/powerpc/kvm | |
parent | e726b1bd64b0b8945c171d2d4bf749fba9fc0800 (diff) |
KVM: Use minimum and maximum address mapped by TLB1
Keep track of minimum and maximum address mapped by tlb1.
This helps in TLBMISS handling in KVM to quick check whether the address lies in mapped range.
If address does not lies in this range then no need to look in each tlb1 entry of tlb1 array.
Signed-off-by: Bharat Bhushan <bharat.bhushan@freescale.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r-- | arch/powerpc/kvm/e500.h | 4 | ||||
-rw-r--r-- | arch/powerpc/kvm/e500_tlb.c | 88 |
2 files changed, 90 insertions, 2 deletions
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h index 7967f3f10a16..aa8b81428bf4 100644 --- a/arch/powerpc/kvm/e500.h +++ b/arch/powerpc/kvm/e500.h | |||
@@ -89,6 +89,10 @@ struct kvmppc_vcpu_e500 { | |||
89 | u64 *g2h_tlb1_map; | 89 | u64 *g2h_tlb1_map; |
90 | unsigned int *h2g_tlb1_rmap; | 90 | unsigned int *h2g_tlb1_rmap; |
91 | 91 | ||
92 | /* Minimum and maximum address mapped my TLB1 */ | ||
93 | unsigned long tlb1_min_eaddr; | ||
94 | unsigned long tlb1_max_eaddr; | ||
95 | |||
92 | #ifdef CONFIG_KVM_E500V2 | 96 | #ifdef CONFIG_KVM_E500V2 |
93 | u32 pid[E500_PID_NUM]; | 97 | u32 pid[E500_PID_NUM]; |
94 | 98 | ||
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c index e05232b746ff..c510fc961302 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_tlb.c | |||
@@ -261,6 +261,9 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
261 | set_base = gtlb0_set_base(vcpu_e500, eaddr); | 261 | set_base = gtlb0_set_base(vcpu_e500, eaddr); |
262 | size = vcpu_e500->gtlb_params[0].ways; | 262 | size = vcpu_e500->gtlb_params[0].ways; |
263 | } else { | 263 | } else { |
264 | if (eaddr < vcpu_e500->tlb1_min_eaddr || | ||
265 | eaddr > vcpu_e500->tlb1_max_eaddr) | ||
266 | return -1; | ||
264 | set_base = 0; | 267 | set_base = 0; |
265 | } | 268 | } |
266 | 269 | ||
@@ -583,6 +586,65 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
583 | return victim; | 586 | return victim; |
584 | } | 587 | } |
585 | 588 | ||
589 | static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
590 | { | ||
591 | int size = vcpu_e500->gtlb_params[1].entries; | ||
592 | unsigned int offset; | ||
593 | gva_t eaddr; | ||
594 | int i; | ||
595 | |||
596 | vcpu_e500->tlb1_min_eaddr = ~0UL; | ||
597 | vcpu_e500->tlb1_max_eaddr = 0; | ||
598 | offset = vcpu_e500->gtlb_offset[1]; | ||
599 | |||
600 | for (i = 0; i < size; i++) { | ||
601 | struct kvm_book3e_206_tlb_entry *tlbe = | ||
602 | &vcpu_e500->gtlb_arch[offset + i]; | ||
603 | |||
604 | if (!get_tlb_v(tlbe)) | ||
605 | continue; | ||
606 | |||
607 | eaddr = get_tlb_eaddr(tlbe); | ||
608 | vcpu_e500->tlb1_min_eaddr = | ||
609 | min(vcpu_e500->tlb1_min_eaddr, eaddr); | ||
610 | |||
611 | eaddr = get_tlb_end(tlbe); | ||
612 | vcpu_e500->tlb1_max_eaddr = | ||
613 | max(vcpu_e500->tlb1_max_eaddr, eaddr); | ||
614 | } | ||
615 | } | ||
616 | |||
617 | static int kvmppc_need_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
618 | struct kvm_book3e_206_tlb_entry *gtlbe) | ||
619 | { | ||
620 | unsigned long start, end, size; | ||
621 | |||
622 | size = get_tlb_bytes(gtlbe); | ||
623 | start = get_tlb_eaddr(gtlbe) & ~(size - 1); | ||
624 | end = start + size - 1; | ||
625 | |||
626 | return vcpu_e500->tlb1_min_eaddr == start || | ||
627 | vcpu_e500->tlb1_max_eaddr == end; | ||
628 | } | ||
629 | |||
630 | /* This function is supposed to be called for a adding a new valid tlb entry */ | ||
631 | static void kvmppc_set_tlb1map_range(struct kvm_vcpu *vcpu, | ||
632 | struct kvm_book3e_206_tlb_entry *gtlbe) | ||
633 | { | ||
634 | unsigned long start, end, size; | ||
635 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
636 | |||
637 | if (!get_tlb_v(gtlbe)) | ||
638 | return; | ||
639 | |||
640 | size = get_tlb_bytes(gtlbe); | ||
641 | start = get_tlb_eaddr(gtlbe) & ~(size - 1); | ||
642 | end = start + size - 1; | ||
643 | |||
644 | vcpu_e500->tlb1_min_eaddr = min(vcpu_e500->tlb1_min_eaddr, start); | ||
645 | vcpu_e500->tlb1_max_eaddr = max(vcpu_e500->tlb1_max_eaddr, end); | ||
646 | } | ||
647 | |||
586 | static inline int kvmppc_e500_gtlbe_invalidate( | 648 | static inline int kvmppc_e500_gtlbe_invalidate( |
587 | struct kvmppc_vcpu_e500 *vcpu_e500, | 649 | struct kvmppc_vcpu_e500 *vcpu_e500, |
588 | int tlbsel, int esel) | 650 | int tlbsel, int esel) |
@@ -593,6 +655,9 @@ static inline int kvmppc_e500_gtlbe_invalidate( | |||
593 | if (unlikely(get_tlb_iprot(gtlbe))) | 655 | if (unlikely(get_tlb_iprot(gtlbe))) |
594 | return -1; | 656 | return -1; |
595 | 657 | ||
658 | if (tlbsel == 1 && kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe)) | ||
659 | kvmppc_recalc_tlb1map_range(vcpu_e500); | ||
660 | |||
596 | gtlbe->mas1 = 0; | 661 | gtlbe->mas1 = 0; |
597 | 662 | ||
598 | return 0; | 663 | return 0; |
@@ -792,14 +857,19 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) | |||
792 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 857 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
793 | struct kvm_book3e_206_tlb_entry *gtlbe, stlbe; | 858 | struct kvm_book3e_206_tlb_entry *gtlbe, stlbe; |
794 | int tlbsel, esel, stlbsel, sesel; | 859 | int tlbsel, esel, stlbsel, sesel; |
860 | int recal = 0; | ||
795 | 861 | ||
796 | tlbsel = get_tlb_tlbsel(vcpu); | 862 | tlbsel = get_tlb_tlbsel(vcpu); |
797 | esel = get_tlb_esel(vcpu, tlbsel); | 863 | esel = get_tlb_esel(vcpu, tlbsel); |
798 | 864 | ||
799 | gtlbe = get_entry(vcpu_e500, tlbsel, esel); | 865 | gtlbe = get_entry(vcpu_e500, tlbsel, esel); |
800 | 866 | ||
801 | if (get_tlb_v(gtlbe)) | 867 | if (get_tlb_v(gtlbe)) { |
802 | inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); | 868 | inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); |
869 | if ((tlbsel == 1) && | ||
870 | kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe)) | ||
871 | recal = 1; | ||
872 | } | ||
803 | 873 | ||
804 | gtlbe->mas1 = vcpu->arch.shared->mas1; | 874 | gtlbe->mas1 = vcpu->arch.shared->mas1; |
805 | gtlbe->mas2 = vcpu->arch.shared->mas2; | 875 | gtlbe->mas2 = vcpu->arch.shared->mas2; |
@@ -808,6 +878,18 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) | |||
808 | trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, | 878 | trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, |
809 | gtlbe->mas2, gtlbe->mas7_3); | 879 | gtlbe->mas2, gtlbe->mas7_3); |
810 | 880 | ||
881 | if (tlbsel == 1) { | ||
882 | /* | ||
883 | * If a valid tlb1 entry is overwritten then recalculate the | ||
884 | * min/max TLB1 map address range otherwise no need to look | ||
885 | * in tlb1 array. | ||
886 | */ | ||
887 | if (recal) | ||
888 | kvmppc_recalc_tlb1map_range(vcpu_e500); | ||
889 | else | ||
890 | kvmppc_set_tlb1map_range(vcpu, gtlbe); | ||
891 | } | ||
892 | |||
811 | /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ | 893 | /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ |
812 | if (tlbe_is_host_safe(vcpu, gtlbe)) { | 894 | if (tlbe_is_host_safe(vcpu, gtlbe)) { |
813 | u64 eaddr; | 895 | u64 eaddr; |
@@ -1145,6 +1227,7 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, | |||
1145 | vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1]; | 1227 | vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1]; |
1146 | vcpu_e500->gtlb_params[1].sets = 1; | 1228 | vcpu_e500->gtlb_params[1].sets = 1; |
1147 | 1229 | ||
1230 | kvmppc_recalc_tlb1map_range(vcpu_e500); | ||
1148 | return 0; | 1231 | return 0; |
1149 | 1232 | ||
1150 | err_put_page: | 1233 | err_put_page: |
@@ -1163,7 +1246,7 @@ int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu, | |||
1163 | struct kvm_dirty_tlb *dirty) | 1246 | struct kvm_dirty_tlb *dirty) |
1164 | { | 1247 | { |
1165 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 1248 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
1166 | 1249 | kvmppc_recalc_tlb1map_range(vcpu_e500); | |
1167 | clear_tlb_refs(vcpu_e500); | 1250 | clear_tlb_refs(vcpu_e500); |
1168 | return 0; | 1251 | return 0; |
1169 | } | 1252 | } |
@@ -1272,6 +1355,7 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) | |||
1272 | vcpu->arch.tlbcfg[1] |= | 1355 | vcpu->arch.tlbcfg[1] |= |
1273 | vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT; | 1356 | vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT; |
1274 | 1357 | ||
1358 | kvmppc_recalc_tlb1map_range(vcpu_e500); | ||
1275 | return 0; | 1359 | return 0; |
1276 | 1360 | ||
1277 | err: | 1361 | err: |