aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2011-12-14 21:02:47 -0500
committerAvi Kivity <avi@redhat.com>2012-03-05 07:52:39 -0500
commit55514893739d28f095f19b012133eea4cb4a9390 (patch)
tree53133098473395a8a4ff1f00414137c2481a5fd0 /arch
parentbad3b5075eeb18cb1641b4171618add638bc0fa7 (diff)
KVM: PPC: Book3S HV: Use the hardware referenced bit for kvm_age_hva
This uses the host view of the hardware R (referenced) bit to speed up kvm_age_hva() and kvm_test_age_hva(). Instead of removing all the relevant HPTEs in kvm_age_hva(), we now just reset their R bits if set. Also, kvm_test_age_hva() now scans the relevant HPTEs to see if any of them have R set. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h2
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c81
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c19
3 files changed, 91 insertions, 11 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 9240cebf8bad..33fdc09508a1 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -147,6 +147,8 @@ extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
147 unsigned long *rmap, long pte_index, int realmode); 147 unsigned long *rmap, long pte_index, int realmode);
148extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep, 148extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
149 unsigned long pte_index); 149 unsigned long pte_index);
150void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
151 unsigned long pte_index);
150extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr, 152extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
151 unsigned long *nb_ret); 153 unsigned long *nb_ret);
152extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr); 154extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index aa51ddef468e..926e2b92bdab 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -772,16 +772,50 @@ int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
772static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, 772static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
773 unsigned long gfn) 773 unsigned long gfn)
774{ 774{
775 if (!kvm->arch.using_mmu_notifiers) 775 struct revmap_entry *rev = kvm->arch.revmap;
776 return 0; 776 unsigned long head, i, j;
777 if (!(*rmapp & KVMPPC_RMAP_REFERENCED)) 777 unsigned long *hptep;
778 return 0; 778 int ret = 0;
779 kvm_unmap_rmapp(kvm, rmapp, gfn); 779
780 while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmapp)) 780 retry:
781 cpu_relax(); 781 lock_rmap(rmapp);
782 *rmapp &= ~KVMPPC_RMAP_REFERENCED; 782 if (*rmapp & KVMPPC_RMAP_REFERENCED) {
783 __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmapp); 783 *rmapp &= ~KVMPPC_RMAP_REFERENCED;
784 return 1; 784 ret = 1;
785 }
786 if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
787 unlock_rmap(rmapp);
788 return ret;
789 }
790
791 i = head = *rmapp & KVMPPC_RMAP_INDEX;
792 do {
793 hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
794 j = rev[i].forw;
795
796 /* If this HPTE isn't referenced, ignore it */
797 if (!(hptep[1] & HPTE_R_R))
798 continue;
799
800 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
801 /* unlock rmap before spinning on the HPTE lock */
802 unlock_rmap(rmapp);
803 while (hptep[0] & HPTE_V_HVLOCK)
804 cpu_relax();
805 goto retry;
806 }
807
808 /* Now check and modify the HPTE */
809 if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_R)) {
810 kvmppc_clear_ref_hpte(kvm, hptep, i);
811 rev[i].guest_rpte |= HPTE_R_R;
812 ret = 1;
813 }
814 hptep[0] &= ~HPTE_V_HVLOCK;
815 } while ((i = j) != head);
816
817 unlock_rmap(rmapp);
818 return ret;
785} 819}
786 820
787int kvm_age_hva(struct kvm *kvm, unsigned long hva) 821int kvm_age_hva(struct kvm *kvm, unsigned long hva)
@@ -794,7 +828,32 @@ int kvm_age_hva(struct kvm *kvm, unsigned long hva)
794static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, 828static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
795 unsigned long gfn) 829 unsigned long gfn)
796{ 830{
797 return !!(*rmapp & KVMPPC_RMAP_REFERENCED); 831 struct revmap_entry *rev = kvm->arch.revmap;
832 unsigned long head, i, j;
833 unsigned long *hp;
834 int ret = 1;
835
836 if (*rmapp & KVMPPC_RMAP_REFERENCED)
837 return 1;
838
839 lock_rmap(rmapp);
840 if (*rmapp & KVMPPC_RMAP_REFERENCED)
841 goto out;
842
843 if (*rmapp & KVMPPC_RMAP_PRESENT) {
844 i = head = *rmapp & KVMPPC_RMAP_INDEX;
845 do {
846 hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4));
847 j = rev[i].forw;
848 if (hp[1] & HPTE_R_R)
849 goto out;
850 } while ((i = j) != head);
851 }
852 ret = 0;
853
854 out:
855 unlock_rmap(rmapp);
856 return ret;
798} 857}
799 858
800int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) 859int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 91b45a03f438..5f3c60b89faf 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -641,6 +641,25 @@ void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
641} 641}
642EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte); 642EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
643 643
644void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
645 unsigned long pte_index)
646{
647 unsigned long rb;
648 unsigned char rbyte;
649
650 rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
651 rbyte = (hptep[1] & ~HPTE_R_R) >> 8;
652 /* modify only the second-last byte, which contains the ref bit */
653 *((char *)hptep + 14) = rbyte;
654 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
655 cpu_relax();
656 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
657 : : "r" (rb), "r" (kvm->arch.lpid));
658 asm volatile("ptesync" : : : "memory");
659 kvm->arch.tlbie_lock = 0;
660}
661EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte);
662
644static int slb_base_page_shift[4] = { 663static int slb_base_page_shift[4] = {
645 24, /* 16M */ 664 24, /* 16M */
646 16, /* 64k */ 665 16, /* 64k */