aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/kvm_host.h
diff options
context:
space:
mode:
authorChristoffer Dall <c.dall@virtualopensystems.com>2012-10-14 23:10:18 -0400
committerAvi Kivity <avi@redhat.com>2012-10-23 07:35:43 -0400
commit8ca40a70a70988c0bdea106c894843f763ca2989 (patch)
tree32879a731d622440440c501bc221da948e1e3e1c /include/linux/kvm_host.h
parent1f5b77f51a221f5eb10af08da9067fba360dc52f (diff)
KVM: Take kvm instead of vcpu to mmu_notifier_retry
The mmu_notifier_retry is not specific to any vcpu (and never will be) so only take struct kvm as a parameter. The motivation is the ARM mmu code that needs to call this from somewhere where we long let go of the vcpu pointer. Signed-off-by: Christoffer Dall <c.dall@virtualopensystems.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'include/linux/kvm_host.h')
-rw-r--r--include/linux/kvm_host.h6
1 files changed, 3 insertions, 3 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 6afc5be2615e..82e2c783a21e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -841,9 +841,9 @@ extern struct kvm_stats_debugfs_item debugfs_entries[];
841extern struct dentry *kvm_debugfs_dir; 841extern struct dentry *kvm_debugfs_dir;
842 842
843#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 843#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
844static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq) 844static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
845{ 845{
846 if (unlikely(vcpu->kvm->mmu_notifier_count)) 846 if (unlikely(kvm->mmu_notifier_count))
847 return 1; 847 return 1;
848 /* 848 /*
849 * Ensure the read of mmu_notifier_count happens before the read 849 * Ensure the read of mmu_notifier_count happens before the read
@@ -856,7 +856,7 @@ static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_se
856 * can't rely on kvm->mmu_lock to keep things ordered. 856 * can't rely on kvm->mmu_lock to keep things ordered.
857 */ 857 */
858 smp_rmb(); 858 smp_rmb();
859 if (vcpu->kvm->mmu_notifier_seq != mmu_seq) 859 if (kvm->mmu_notifier_seq != mmu_seq)
860 return 1; 860 return 1;
861 return 0; 861 return 0;
862} 862}