aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoffer Dall <c.dall@virtualopensystems.com>2012-10-14 23:10:18 -0400
committerAvi Kivity <avi@redhat.com>2012-10-23 07:35:43 -0400
commit8ca40a70a70988c0bdea106c894843f763ca2989 (patch)
tree32879a731d622440440c501bc221da948e1e3e1c
parent1f5b77f51a221f5eb10af08da9067fba360dc52f (diff)
KVM: Take kvm instead of vcpu to mmu_notifier_retry
The mmu_notifier_retry is not specific to any vcpu (and never will be) so only take struct kvm as a parameter. The motivation is the ARM mmu code that needs to call this from somewhere where we long let go of the vcpu pointer. Signed-off-by: Christoffer Dall <c.dall@virtualopensystems.com> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c2
-rw-r--r--arch/x86/kvm/mmu.c4
-rw-r--r--arch/x86/kvm/paging_tmpl.h2
-rw-r--r--include/linux/kvm_host.h6
5 files changed, 8 insertions, 8 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 7a4aae99ac5..2a89a36e726 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -710,7 +710,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
710 710
711 /* Check if we might have been invalidated; let the guest retry if so */ 711 /* Check if we might have been invalidated; let the guest retry if so */
712 ret = RESUME_GUEST; 712 ret = RESUME_GUEST;
713 if (mmu_notifier_retry(vcpu, mmu_seq)) { 713 if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) {
714 unlock_rmap(rmap); 714 unlock_rmap(rmap);
715 goto out_unlock; 715 goto out_unlock;
716 } 716 }
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 9955216477a..5e06e315388 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -297,7 +297,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
297 lock_rmap(rmap); 297 lock_rmap(rmap);
298 /* Check for pending invalidations under the rmap chain lock */ 298 /* Check for pending invalidations under the rmap chain lock */
299 if (kvm->arch.using_mmu_notifiers && 299 if (kvm->arch.using_mmu_notifiers &&
300 mmu_notifier_retry(vcpu, mmu_seq)) { 300 mmu_notifier_retry(vcpu->kvm, mmu_seq)) {
301 /* inval in progress, write a non-present HPTE */ 301 /* inval in progress, write a non-present HPTE */
302 pteh |= HPTE_V_ABSENT; 302 pteh |= HPTE_V_ABSENT;
303 pteh &= ~HPTE_V_VALID; 303 pteh &= ~HPTE_V_VALID;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 3d5ca793938..6f78fa3a470 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2886,7 +2886,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
2886 return r; 2886 return r;
2887 2887
2888 spin_lock(&vcpu->kvm->mmu_lock); 2888 spin_lock(&vcpu->kvm->mmu_lock);
2889 if (mmu_notifier_retry(vcpu, mmu_seq)) 2889 if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
2890 goto out_unlock; 2890 goto out_unlock;
2891 kvm_mmu_free_some_pages(vcpu); 2891 kvm_mmu_free_some_pages(vcpu);
2892 if (likely(!force_pt_level)) 2892 if (likely(!force_pt_level))
@@ -3355,7 +3355,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
3355 return r; 3355 return r;
3356 3356
3357 spin_lock(&vcpu->kvm->mmu_lock); 3357 spin_lock(&vcpu->kvm->mmu_lock);
3358 if (mmu_notifier_retry(vcpu, mmu_seq)) 3358 if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
3359 goto out_unlock; 3359 goto out_unlock;
3360 kvm_mmu_free_some_pages(vcpu); 3360 kvm_mmu_free_some_pages(vcpu);
3361 if (likely(!force_pt_level)) 3361 if (likely(!force_pt_level))
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index f887e4cfc1f..d17decaf1db 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -565,7 +565,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
565 return r; 565 return r;
566 566
567 spin_lock(&vcpu->kvm->mmu_lock); 567 spin_lock(&vcpu->kvm->mmu_lock);
568 if (mmu_notifier_retry(vcpu, mmu_seq)) 568 if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
569 goto out_unlock; 569 goto out_unlock;
570 570
571 kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT); 571 kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 6afc5be2615..82e2c783a21 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -841,9 +841,9 @@ extern struct kvm_stats_debugfs_item debugfs_entries[];
841extern struct dentry *kvm_debugfs_dir; 841extern struct dentry *kvm_debugfs_dir;
842 842
843#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 843#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
844static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq) 844static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
845{ 845{
846 if (unlikely(vcpu->kvm->mmu_notifier_count)) 846 if (unlikely(kvm->mmu_notifier_count))
847 return 1; 847 return 1;
848 /* 848 /*
849 * Ensure the read of mmu_notifier_count happens before the read 849 * Ensure the read of mmu_notifier_count happens before the read
@@ -856,7 +856,7 @@ static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_se
856 * can't rely on kvm->mmu_lock to keep things ordered. 856 * can't rely on kvm->mmu_lock to keep things ordered.
857 */ 857 */
858 smp_rmb(); 858 smp_rmb();
859 if (vcpu->kvm->mmu_notifier_seq != mmu_seq) 859 if (kvm->mmu_notifier_seq != mmu_seq)
860 return 1; 860 return 1;
861 return 0; 861 return 0;
862} 862}