diff options
author | Alexander Graf <agraf@suse.de> | 2013-03-27 08:41:35 -0400 |
---|---|---|
committer | Alexander Graf <agraf@suse.de> | 2013-03-27 08:41:35 -0400 |
commit | fbfba342a719b49d9cd0837202cf5365ba46ca9b (patch) | |
tree | fea96faeb70c519c9995b3c9c8b49fb14476ade9 /arch | |
parent | 4fe27d2addda8af7714546a69369fb92dddcf9a3 (diff) | |
parent | 81f4f76bbc712a2dff8bb020057c554e285370e1 (diff) |
Merge commit 'origin/next' into kvm-ppc-next
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kvm/mmu.c | 16 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.h | 6 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 1 |
3 files changed, 9 insertions, 14 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index c1a9b7b08ab7..633e30cfbd63 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1501,10 +1501,15 @@ static void drop_parent_pte(struct kvm_mmu_page *sp, | |||
1501 | mmu_spte_clear_no_track(parent_pte); | 1501 | mmu_spte_clear_no_track(parent_pte); |
1502 | } | 1502 | } |
1503 | 1503 | ||
1504 | static void make_mmu_pages_available(struct kvm_vcpu *vcpu); | ||
1505 | |||
1504 | static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, | 1506 | static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, |
1505 | u64 *parent_pte, int direct) | 1507 | u64 *parent_pte, int direct) |
1506 | { | 1508 | { |
1507 | struct kvm_mmu_page *sp; | 1509 | struct kvm_mmu_page *sp; |
1510 | |||
1511 | make_mmu_pages_available(vcpu); | ||
1512 | |||
1508 | sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); | 1513 | sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); |
1509 | sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); | 1514 | sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); |
1510 | if (!direct) | 1515 | if (!direct) |
@@ -2842,7 +2847,6 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, | |||
2842 | spin_lock(&vcpu->kvm->mmu_lock); | 2847 | spin_lock(&vcpu->kvm->mmu_lock); |
2843 | if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) | 2848 | if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) |
2844 | goto out_unlock; | 2849 | goto out_unlock; |
2845 | kvm_mmu_free_some_pages(vcpu); | ||
2846 | if (likely(!force_pt_level)) | 2850 | if (likely(!force_pt_level)) |
2847 | transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); | 2851 | transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); |
2848 | r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn, | 2852 | r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn, |
@@ -2920,7 +2924,6 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) | |||
2920 | 2924 | ||
2921 | if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { | 2925 | if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { |
2922 | spin_lock(&vcpu->kvm->mmu_lock); | 2926 | spin_lock(&vcpu->kvm->mmu_lock); |
2923 | kvm_mmu_free_some_pages(vcpu); | ||
2924 | sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL, | 2927 | sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL, |
2925 | 1, ACC_ALL, NULL); | 2928 | 1, ACC_ALL, NULL); |
2926 | ++sp->root_count; | 2929 | ++sp->root_count; |
@@ -2932,7 +2935,6 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) | |||
2932 | 2935 | ||
2933 | ASSERT(!VALID_PAGE(root)); | 2936 | ASSERT(!VALID_PAGE(root)); |
2934 | spin_lock(&vcpu->kvm->mmu_lock); | 2937 | spin_lock(&vcpu->kvm->mmu_lock); |
2935 | kvm_mmu_free_some_pages(vcpu); | ||
2936 | sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT), | 2938 | sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT), |
2937 | i << 30, | 2939 | i << 30, |
2938 | PT32_ROOT_LEVEL, 1, ACC_ALL, | 2940 | PT32_ROOT_LEVEL, 1, ACC_ALL, |
@@ -2971,7 +2973,6 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) | |||
2971 | ASSERT(!VALID_PAGE(root)); | 2973 | ASSERT(!VALID_PAGE(root)); |
2972 | 2974 | ||
2973 | spin_lock(&vcpu->kvm->mmu_lock); | 2975 | spin_lock(&vcpu->kvm->mmu_lock); |
2974 | kvm_mmu_free_some_pages(vcpu); | ||
2975 | sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL, | 2976 | sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL, |
2976 | 0, ACC_ALL, NULL); | 2977 | 0, ACC_ALL, NULL); |
2977 | root = __pa(sp->spt); | 2978 | root = __pa(sp->spt); |
@@ -3005,7 +3006,6 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) | |||
3005 | return 1; | 3006 | return 1; |
3006 | } | 3007 | } |
3007 | spin_lock(&vcpu->kvm->mmu_lock); | 3008 | spin_lock(&vcpu->kvm->mmu_lock); |
3008 | kvm_mmu_free_some_pages(vcpu); | ||
3009 | sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, | 3009 | sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, |
3010 | PT32_ROOT_LEVEL, 0, | 3010 | PT32_ROOT_LEVEL, 0, |
3011 | ACC_ALL, NULL); | 3011 | ACC_ALL, NULL); |
@@ -3311,7 +3311,6 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, | |||
3311 | spin_lock(&vcpu->kvm->mmu_lock); | 3311 | spin_lock(&vcpu->kvm->mmu_lock); |
3312 | if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) | 3312 | if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) |
3313 | goto out_unlock; | 3313 | goto out_unlock; |
3314 | kvm_mmu_free_some_pages(vcpu); | ||
3315 | if (likely(!force_pt_level)) | 3314 | if (likely(!force_pt_level)) |
3316 | transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); | 3315 | transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); |
3317 | r = __direct_map(vcpu, gpa, write, map_writable, | 3316 | r = __direct_map(vcpu, gpa, write, map_writable, |
@@ -4013,10 +4012,13 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) | |||
4013 | } | 4012 | } |
4014 | EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt); | 4013 | EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt); |
4015 | 4014 | ||
4016 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) | 4015 | static void make_mmu_pages_available(struct kvm_vcpu *vcpu) |
4017 | { | 4016 | { |
4018 | LIST_HEAD(invalid_list); | 4017 | LIST_HEAD(invalid_list); |
4019 | 4018 | ||
4019 | if (likely(kvm_mmu_available_pages(vcpu->kvm) >= KVM_MIN_FREE_MMU_PAGES)) | ||
4020 | return; | ||
4021 | |||
4020 | while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) { | 4022 | while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) { |
4021 | if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list)) | 4023 | if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list)) |
4022 | break; | 4024 | break; |
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 3b1ad0049ea4..2adcbc2cac6d 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h | |||
@@ -64,12 +64,6 @@ static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) | |||
64 | return 0; | 64 | return 0; |
65 | } | 65 | } |
66 | 66 | ||
67 | static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) | ||
68 | { | ||
69 | if (unlikely(kvm_mmu_available_pages(vcpu->kvm)< KVM_MIN_FREE_MMU_PAGES)) | ||
70 | __kvm_mmu_free_some_pages(vcpu); | ||
71 | } | ||
72 | |||
73 | static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) | 67 | static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) |
74 | { | 68 | { |
75 | if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE)) | 69 | if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE)) |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 105dd5bd550e..af143f065532 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -627,7 +627,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, | |||
627 | goto out_unlock; | 627 | goto out_unlock; |
628 | 628 | ||
629 | kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT); | 629 | kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT); |
630 | kvm_mmu_free_some_pages(vcpu); | ||
631 | if (!force_pt_level) | 630 | if (!force_pt_level) |
632 | transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level); | 631 | transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level); |
633 | r = FNAME(fetch)(vcpu, addr, &walker, write_fault, | 632 | r = FNAME(fetch)(vcpu, addr, &walker, write_fault, |