diff options
author | Christoffer Dall <c.dall@virtualopensystems.com> | 2012-10-14 23:10:18 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2012-10-23 07:35:43 -0400 |
commit | 8ca40a70a70988c0bdea106c894843f763ca2989 (patch) | |
tree | 32879a731d622440440c501bc221da948e1e3e1c /arch | |
parent | 1f5b77f51a221f5eb10af08da9067fba360dc52f (diff) |
KVM: Take kvm instead of vcpu to mmu_notifier_retry
The mmu_notifier_retry is not specific to any vcpu (and never will be)
so only take struct kvm as a parameter.
The motivation is the ARM mmu code that needs to call this from
somewhere where we long let go of the vcpu pointer.
Signed-off-by: Christoffer Dall <c.dall@virtualopensystems.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_hv.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rm_mmu.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 4 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 2 |
4 files changed, 5 insertions, 5 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 7a4aae99ac5b..2a89a36e7263 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
@@ -710,7 +710,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
710 | 710 | ||
711 | /* Check if we might have been invalidated; let the guest retry if so */ | 711 | /* Check if we might have been invalidated; let the guest retry if so */ |
712 | ret = RESUME_GUEST; | 712 | ret = RESUME_GUEST; |
713 | if (mmu_notifier_retry(vcpu, mmu_seq)) { | 713 | if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) { |
714 | unlock_rmap(rmap); | 714 | unlock_rmap(rmap); |
715 | goto out_unlock; | 715 | goto out_unlock; |
716 | } | 716 | } |
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 9955216477a4..5e06e3153888 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c | |||
@@ -297,7 +297,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, | |||
297 | lock_rmap(rmap); | 297 | lock_rmap(rmap); |
298 | /* Check for pending invalidations under the rmap chain lock */ | 298 | /* Check for pending invalidations under the rmap chain lock */ |
299 | if (kvm->arch.using_mmu_notifiers && | 299 | if (kvm->arch.using_mmu_notifiers && |
300 | mmu_notifier_retry(vcpu, mmu_seq)) { | 300 | mmu_notifier_retry(vcpu->kvm, mmu_seq)) { |
301 | /* inval in progress, write a non-present HPTE */ | 301 | /* inval in progress, write a non-present HPTE */ |
302 | pteh |= HPTE_V_ABSENT; | 302 | pteh |= HPTE_V_ABSENT; |
303 | pteh &= ~HPTE_V_VALID; | 303 | pteh &= ~HPTE_V_VALID; |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 3d5ca7939380..6f78fa3a4706 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2886,7 +2886,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, | |||
2886 | return r; | 2886 | return r; |
2887 | 2887 | ||
2888 | spin_lock(&vcpu->kvm->mmu_lock); | 2888 | spin_lock(&vcpu->kvm->mmu_lock); |
2889 | if (mmu_notifier_retry(vcpu, mmu_seq)) | 2889 | if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) |
2890 | goto out_unlock; | 2890 | goto out_unlock; |
2891 | kvm_mmu_free_some_pages(vcpu); | 2891 | kvm_mmu_free_some_pages(vcpu); |
2892 | if (likely(!force_pt_level)) | 2892 | if (likely(!force_pt_level)) |
@@ -3355,7 +3355,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, | |||
3355 | return r; | 3355 | return r; |
3356 | 3356 | ||
3357 | spin_lock(&vcpu->kvm->mmu_lock); | 3357 | spin_lock(&vcpu->kvm->mmu_lock); |
3358 | if (mmu_notifier_retry(vcpu, mmu_seq)) | 3358 | if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) |
3359 | goto out_unlock; | 3359 | goto out_unlock; |
3360 | kvm_mmu_free_some_pages(vcpu); | 3360 | kvm_mmu_free_some_pages(vcpu); |
3361 | if (likely(!force_pt_level)) | 3361 | if (likely(!force_pt_level)) |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index f887e4cfc1fe..d17decaf1db9 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -565,7 +565,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, | |||
565 | return r; | 565 | return r; |
566 | 566 | ||
567 | spin_lock(&vcpu->kvm->mmu_lock); | 567 | spin_lock(&vcpu->kvm->mmu_lock); |
568 | if (mmu_notifier_retry(vcpu, mmu_seq)) | 568 | if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) |
569 | goto out_unlock; | 569 | goto out_unlock; |
570 | 570 | ||
571 | kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT); | 571 | kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT); |