aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kvm/mmu.c192
-rw-r--r--arch/x86/kvm/mmu.h2
-rw-r--r--arch/x86/kvm/paging_tmpl.h21
-rw-r--r--arch/x86/kvm/vmx.c22
-rw-r--r--arch/x86/kvm/x86.c25
-rw-r--r--virt/kvm/kvm_main.c7
6 files changed, 255 insertions, 14 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 4b1aa677214..4e22df6f93e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -197,6 +197,47 @@ static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
197static u64 __read_mostly shadow_user_mask; 197static u64 __read_mostly shadow_user_mask;
198static u64 __read_mostly shadow_accessed_mask; 198static u64 __read_mostly shadow_accessed_mask;
199static u64 __read_mostly shadow_dirty_mask; 199static u64 __read_mostly shadow_dirty_mask;
200static u64 __read_mostly shadow_mmio_mask;
201
202static void mmu_spte_set(u64 *sptep, u64 spte);
203
204void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask)
205{
206 shadow_mmio_mask = mmio_mask;
207}
208EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
209
210static void mark_mmio_spte(u64 *sptep, u64 gfn, unsigned access)
211{
212 access &= ACC_WRITE_MASK | ACC_USER_MASK;
213
214 mmu_spte_set(sptep, shadow_mmio_mask | access | gfn << PAGE_SHIFT);
215}
216
217static bool is_mmio_spte(u64 spte)
218{
219 return (spte & shadow_mmio_mask) == shadow_mmio_mask;
220}
221
222static gfn_t get_mmio_spte_gfn(u64 spte)
223{
224 return (spte & ~shadow_mmio_mask) >> PAGE_SHIFT;
225}
226
227static unsigned get_mmio_spte_access(u64 spte)
228{
229 return (spte & ~shadow_mmio_mask) & ~PAGE_MASK;
230}
231
232static bool set_mmio_spte(u64 *sptep, gfn_t gfn, pfn_t pfn, unsigned access)
233{
234 if (unlikely(is_noslot_pfn(pfn))) {
235 mark_mmio_spte(sptep, gfn, access);
236 return true;
237 }
238
239 return false;
240}
200 241
201static inline u64 rsvd_bits(int s, int e) 242static inline u64 rsvd_bits(int s, int e)
202{ 243{
@@ -226,7 +267,7 @@ static int is_nx(struct kvm_vcpu *vcpu)
226 267
227static int is_shadow_present_pte(u64 pte) 268static int is_shadow_present_pte(u64 pte)
228{ 269{
229 return pte & PT_PRESENT_MASK; 270 return pte & PT_PRESENT_MASK && !is_mmio_spte(pte);
230} 271}
231 272
232static int is_large_pte(u64 pte) 273static int is_large_pte(u64 pte)
@@ -285,6 +326,12 @@ static u64 __get_spte_lockless(u64 *sptep)
285{ 326{
286 return ACCESS_ONCE(*sptep); 327 return ACCESS_ONCE(*sptep);
287} 328}
329
330static bool __check_direct_spte_mmio_pf(u64 spte)
331{
332 /* It is valid if the spte is zapped. */
333 return spte == 0ull;
334}
288#else 335#else
289union split_spte { 336union split_spte {
290 struct { 337 struct {
@@ -388,6 +435,23 @@ retry:
388 435
389 return spte.spte; 436 return spte.spte;
390} 437}
438
439static bool __check_direct_spte_mmio_pf(u64 spte)
440{
441 union split_spte sspte = (union split_spte)spte;
442 u32 high_mmio_mask = shadow_mmio_mask >> 32;
443
444 /* It is valid if the spte is zapped. */
445 if (spte == 0ull)
446 return true;
447
448 /* It is valid if the spte is being zapped. */
449 if (sspte.spte_low == 0ull &&
450 (sspte.spte_high & high_mmio_mask) == high_mmio_mask)
451 return true;
452
453 return false;
454}
391#endif 455#endif
392 456
393static bool spte_has_volatile_bits(u64 spte) 457static bool spte_has_volatile_bits(u64 spte)
@@ -1745,7 +1809,8 @@ static void mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
1745 child = page_header(pte & PT64_BASE_ADDR_MASK); 1809 child = page_header(pte & PT64_BASE_ADDR_MASK);
1746 drop_parent_pte(child, spte); 1810 drop_parent_pte(child, spte);
1747 } 1811 }
1748 } 1812 } else if (is_mmio_spte(pte))
1813 mmu_spte_clear_no_track(spte);
1749 1814
1750 if (is_large_pte(pte)) 1815 if (is_large_pte(pte))
1751 --kvm->stat.lpages; 1816 --kvm->stat.lpages;
@@ -2120,6 +2185,9 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2120 u64 spte, entry = *sptep; 2185 u64 spte, entry = *sptep;
2121 int ret = 0; 2186 int ret = 0;
2122 2187
2188 if (set_mmio_spte(sptep, gfn, pfn, pte_access))
2189 return 0;
2190
2123 /* 2191 /*
2124 * We don't set the accessed bit, since we sometimes want to see 2192 * We don't set the accessed bit, since we sometimes want to see
2125 * whether the guest actually used the pte (in order to detect 2193 * whether the guest actually used the pte (in order to detect
@@ -2255,6 +2323,9 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2255 kvm_mmu_flush_tlb(vcpu); 2323 kvm_mmu_flush_tlb(vcpu);
2256 } 2324 }
2257 2325
2326 if (unlikely(is_mmio_spte(*sptep) && emulate))
2327 *emulate = 1;
2328
2258 pgprintk("%s: setting spte %llx\n", __func__, *sptep); 2329 pgprintk("%s: setting spte %llx\n", __func__, *sptep);
2259 pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n", 2330 pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n",
2260 is_large_pte(*sptep)? "2MB" : "4kB", 2331 is_large_pte(*sptep)? "2MB" : "4kB",
@@ -2481,7 +2552,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
2481 2552
2482static bool mmu_invalid_pfn(pfn_t pfn) 2553static bool mmu_invalid_pfn(pfn_t pfn)
2483{ 2554{
2484 return unlikely(is_invalid_pfn(pfn) || is_noslot_pfn(pfn)); 2555 return unlikely(is_invalid_pfn(pfn));
2485} 2556}
2486 2557
2487static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, 2558static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
@@ -2495,11 +2566,8 @@ static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
2495 goto exit; 2566 goto exit;
2496 } 2567 }
2497 2568
2498 if (unlikely(is_noslot_pfn(pfn))) { 2569 if (unlikely(is_noslot_pfn(pfn)))
2499 vcpu_cache_mmio_info(vcpu, gva, gfn, access); 2570 vcpu_cache_mmio_info(vcpu, gva, gfn, access);
2500 *ret_val = 1;
2501 goto exit;
2502 }
2503 2571
2504 ret = false; 2572 ret = false;
2505exit: 2573exit:
@@ -2813,6 +2881,92 @@ static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
2813 return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access); 2881 return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access);
2814} 2882}
2815 2883
2884static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct)
2885{
2886 if (direct)
2887 return vcpu_match_mmio_gpa(vcpu, addr);
2888
2889 return vcpu_match_mmio_gva(vcpu, addr);
2890}
2891
2892
2893/*
2894 * On direct hosts, the last spte is only allows two states
2895 * for mmio page fault:
2896 * - It is the mmio spte
2897 * - It is zapped or it is being zapped.
2898 *
2899 * This function completely checks the spte when the last spte
2900 * is not the mmio spte.
2901 */
2902static bool check_direct_spte_mmio_pf(u64 spte)
2903{
2904 return __check_direct_spte_mmio_pf(spte);
2905}
2906
2907static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
2908{
2909 struct kvm_shadow_walk_iterator iterator;
2910 u64 spte = 0ull;
2911
2912 walk_shadow_page_lockless_begin(vcpu);
2913 for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
2914 if (!is_shadow_present_pte(spte))
2915 break;
2916 walk_shadow_page_lockless_end(vcpu);
2917
2918 return spte;
2919}
2920
2921/*
2922 * If it is a real mmio page fault, return 1 and emulat the instruction
2923 * directly, return 0 to let CPU fault again on the address, -1 is
2924 * returned if bug is detected.
2925 */
2926int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
2927{
2928 u64 spte;
2929
2930 if (quickly_check_mmio_pf(vcpu, addr, direct))
2931 return 1;
2932
2933 spte = walk_shadow_page_get_mmio_spte(vcpu, addr);
2934
2935 if (is_mmio_spte(spte)) {
2936 gfn_t gfn = get_mmio_spte_gfn(spte);
2937 unsigned access = get_mmio_spte_access(spte);
2938
2939 if (direct)
2940 addr = 0;
2941 vcpu_cache_mmio_info(vcpu, addr, gfn, access);
2942 return 1;
2943 }
2944
2945 /*
2946 * It's ok if the gva is remapped by other cpus on shadow guest,
2947 * it's a BUG if the gfn is not a mmio page.
2948 */
2949 if (direct && !check_direct_spte_mmio_pf(spte))
2950 return -1;
2951
2952 /*
2953 * If the page table is zapped by other cpus, let CPU fault again on
2954 * the address.
2955 */
2956 return 0;
2957}
2958EXPORT_SYMBOL_GPL(handle_mmio_page_fault_common);
2959
2960static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr,
2961 u32 error_code, bool direct)
2962{
2963 int ret;
2964
2965 ret = handle_mmio_page_fault_common(vcpu, addr, direct);
2966 WARN_ON(ret < 0);
2967 return ret;
2968}
2969
2816static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, 2970static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
2817 u32 error_code, bool prefault) 2971 u32 error_code, bool prefault)
2818{ 2972{
@@ -2820,6 +2974,10 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
2820 int r; 2974 int r;
2821 2975
2822 pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code); 2976 pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
2977
2978 if (unlikely(error_code & PFERR_RSVD_MASK))
2979 return handle_mmio_page_fault(vcpu, gva, error_code, true);
2980
2823 r = mmu_topup_memory_caches(vcpu); 2981 r = mmu_topup_memory_caches(vcpu);
2824 if (r) 2982 if (r)
2825 return r; 2983 return r;
@@ -2896,6 +3054,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
2896 ASSERT(vcpu); 3054 ASSERT(vcpu);
2897 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa)); 3055 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
2898 3056
3057 if (unlikely(error_code & PFERR_RSVD_MASK))
3058 return handle_mmio_page_fault(vcpu, gpa, error_code, true);
3059
2899 r = mmu_topup_memory_caches(vcpu); 3060 r = mmu_topup_memory_caches(vcpu);
2900 if (r) 3061 if (r)
2901 return r; 3062 return r;
@@ -2993,6 +3154,23 @@ static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
2993 return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0; 3154 return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0;
2994} 3155}
2995 3156
3157static bool sync_mmio_spte(u64 *sptep, gfn_t gfn, unsigned access,
3158 int *nr_present)
3159{
3160 if (unlikely(is_mmio_spte(*sptep))) {
3161 if (gfn != get_mmio_spte_gfn(*sptep)) {
3162 mmu_spte_clear_no_track(sptep);
3163 return true;
3164 }
3165
3166 (*nr_present)++;
3167 mark_mmio_spte(sptep, gfn, access);
3168 return true;
3169 }
3170
3171 return false;
3172}
3173
2996#define PTTYPE 64 3174#define PTTYPE 64
2997#include "paging_tmpl.h" 3175#include "paging_tmpl.h"
2998#undef PTTYPE 3176#undef PTTYPE
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 05310b105da..e374db9af02 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -49,6 +49,8 @@
49#define PFERR_FETCH_MASK (1U << 4) 49#define PFERR_FETCH_MASK (1U << 4)
50 50
51int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); 51int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
52void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask);
53int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
52int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context); 54int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
53 55
54static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) 56static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 67998d3be08..507e2b844cf 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -577,6 +577,10 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
577 577
578 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); 578 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
579 579
580 if (unlikely(error_code & PFERR_RSVD_MASK))
581 return handle_mmio_page_fault(vcpu, addr, error_code,
582 mmu_is_nested(vcpu));
583
580 r = mmu_topup_memory_caches(vcpu); 584 r = mmu_topup_memory_caches(vcpu);
581 if (r) 585 if (r)
582 return r; 586 return r;
@@ -684,7 +688,8 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
684 --vcpu->kvm->stat.lpages; 688 --vcpu->kvm->stat.lpages;
685 drop_spte(vcpu->kvm, sptep); 689 drop_spte(vcpu->kvm, sptep);
686 need_flush = 1; 690 need_flush = 1;
687 } 691 } else if (is_mmio_spte(*sptep))
692 mmu_spte_clear_no_track(sptep);
688 693
689 break; 694 break;
690 } 695 }
@@ -780,7 +785,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
780 gpa_t pte_gpa; 785 gpa_t pte_gpa;
781 gfn_t gfn; 786 gfn_t gfn;
782 787
783 if (!is_shadow_present_pte(sp->spt[i])) 788 if (!sp->spt[i])
784 continue; 789 continue;
785 790
786 pte_gpa = first_pte_gpa + i * sizeof(pt_element_t); 791 pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
@@ -789,13 +794,18 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
789 sizeof(pt_element_t))) 794 sizeof(pt_element_t)))
790 return -EINVAL; 795 return -EINVAL;
791 796
792 gfn = gpte_to_gfn(gpte);
793
794 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { 797 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
795 vcpu->kvm->tlbs_dirty++; 798 vcpu->kvm->tlbs_dirty++;
796 continue; 799 continue;
797 } 800 }
798 801
802 gfn = gpte_to_gfn(gpte);
803 pte_access = sp->role.access;
804 pte_access &= FNAME(gpte_access)(vcpu, gpte, true);
805
806 if (sync_mmio_spte(&sp->spt[i], gfn, pte_access, &nr_present))
807 continue;
808
799 if (gfn != sp->gfns[i]) { 809 if (gfn != sp->gfns[i]) {
800 drop_spte(vcpu->kvm, &sp->spt[i]); 810 drop_spte(vcpu->kvm, &sp->spt[i]);
801 vcpu->kvm->tlbs_dirty++; 811 vcpu->kvm->tlbs_dirty++;
@@ -803,8 +813,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
803 } 813 }
804 814
805 nr_present++; 815 nr_present++;
806 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, 816
807 true);
808 host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE; 817 host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE;
809 818
810 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0, 819 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a644acb6ed8..e65a158dee6 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3594,6 +3594,17 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
3594 return exec_control; 3594 return exec_control;
3595} 3595}
3596 3596
3597static void ept_set_mmio_spte_mask(void)
3598{
3599 /*
3600 * EPT Misconfigurations can be generated if the value of bits 2:0
3601 * of an EPT paging-structure entry is 110b (write/execute).
3602 * Also, magic bits (0xffull << 49) is set to quickly identify mmio
3603 * spte.
3604 */
3605 kvm_mmu_set_mmio_spte_mask(0xffull << 49 | 0x6ull);
3606}
3607
3597/* 3608/*
3598 * Sets up the vmcs for emulated real mode. 3609 * Sets up the vmcs for emulated real mode.
3599 */ 3610 */
@@ -4671,11 +4682,19 @@ static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte,
4671static int handle_ept_misconfig(struct kvm_vcpu *vcpu) 4682static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
4672{ 4683{
4673 u64 sptes[4]; 4684 u64 sptes[4];
4674 int nr_sptes, i; 4685 int nr_sptes, i, ret;
4675 gpa_t gpa; 4686 gpa_t gpa;
4676 4687
4677 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); 4688 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
4678 4689
4690 ret = handle_mmio_page_fault_common(vcpu, gpa, true);
4691 if (likely(ret == 1))
4692 return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) ==
4693 EMULATE_DONE;
4694 if (unlikely(!ret))
4695 return 1;
4696
4697 /* It is the real ept misconfig */
4679 printk(KERN_ERR "EPT: Misconfiguration.\n"); 4698 printk(KERN_ERR "EPT: Misconfiguration.\n");
4680 printk(KERN_ERR "EPT: GPA: 0x%llx\n", gpa); 4699 printk(KERN_ERR "EPT: GPA: 0x%llx\n", gpa);
4681 4700
@@ -7102,6 +7121,7 @@ static int __init vmx_init(void)
7102 if (enable_ept) { 7121 if (enable_ept) {
7103 kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull, 7122 kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
7104 VMX_EPT_EXECUTABLE_MASK); 7123 VMX_EPT_EXECUTABLE_MASK);
7124 ept_set_mmio_spte_mask();
7105 kvm_enable_tdp(); 7125 kvm_enable_tdp();
7106 } else 7126 } else
7107 kvm_disable_tdp(); 7127 kvm_disable_tdp();
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 64c42d90112..2c9661f230a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5062,6 +5062,30 @@ void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
5062} 5062}
5063EXPORT_SYMBOL_GPL(kvm_after_handle_nmi); 5063EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
5064 5064
5065static void kvm_set_mmio_spte_mask(void)
5066{
5067 u64 mask;
5068 int maxphyaddr = boot_cpu_data.x86_phys_bits;
5069
5070 /*
5071 * Set the reserved bits and the present bit of an paging-structure
5072 * entry to generate page fault with PFER.RSV = 1.
5073 */
5074 mask = ((1ull << (62 - maxphyaddr + 1)) - 1) << maxphyaddr;
5075 mask |= 1ull;
5076
5077#ifdef CONFIG_X86_64
5078 /*
5079 * If reserved bit is not supported, clear the present bit to disable
5080 * mmio page fault.
5081 */
5082 if (maxphyaddr == 52)
5083 mask &= ~1ull;
5084#endif
5085
5086 kvm_mmu_set_mmio_spte_mask(mask);
5087}
5088
5065int kvm_arch_init(void *opaque) 5089int kvm_arch_init(void *opaque)
5066{ 5090{
5067 int r; 5091 int r;
@@ -5088,6 +5112,7 @@ int kvm_arch_init(void *opaque)
5088 if (r) 5112 if (r)
5089 goto out; 5113 goto out;
5090 5114
5115 kvm_set_mmio_spte_mask();
5091 kvm_init_msr_list(); 5116 kvm_init_msr_list();
5092 5117
5093 kvm_x86_ops = ops; 5118 kvm_x86_ops = ops;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 56f3c704fd7..aefdda390f5 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -831,6 +831,13 @@ skip_lpage:
831 831
832 kvm_arch_commit_memory_region(kvm, mem, old, user_alloc); 832 kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
833 833
834 /*
835 * If the new memory slot is created, we need to clear all
836 * mmio sptes.
837 */
838 if (npages && old.base_gfn != mem->guest_phys_addr >> PAGE_SHIFT)
839 kvm_arch_flush_shadow(kvm);
840
834 kvm_free_physmem_slot(&old, &new); 841 kvm_free_physmem_slot(&old, &new);
835 kfree(old_memslots); 842 kfree(old_memslots);
836 843