diff options
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/mmu.c | 9 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 1 |
2 files changed, 4 insertions, 6 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 834698d24595..c478ee25de66 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -291,7 +291,6 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) | |||
291 | { | 291 | { |
292 | int r; | 292 | int r; |
293 | 293 | ||
294 | kvm_mmu_free_some_pages(vcpu); | ||
295 | r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache, | 294 | r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache, |
296 | pte_chain_cache, 4); | 295 | pte_chain_cache, 4); |
297 | if (r) | 296 | if (r) |
@@ -569,9 +568,6 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, | |||
569 | { | 568 | { |
570 | struct kvm_mmu_page *sp; | 569 | struct kvm_mmu_page *sp; |
571 | 570 | ||
572 | if (!vcpu->kvm->arch.n_free_mmu_pages) | ||
573 | return NULL; | ||
574 | |||
575 | sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp); | 571 | sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp); |
576 | sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); | 572 | sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); |
577 | sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); | 573 | sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); |
@@ -1024,6 +1020,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) | |||
1024 | page = gfn_to_page(vcpu->kvm, gfn); | 1020 | page = gfn_to_page(vcpu->kvm, gfn); |
1025 | 1021 | ||
1026 | spin_lock(&vcpu->kvm->mmu_lock); | 1022 | spin_lock(&vcpu->kvm->mmu_lock); |
1023 | kvm_mmu_free_some_pages(vcpu); | ||
1027 | r = __nonpaging_map(vcpu, v, write, gfn, page); | 1024 | r = __nonpaging_map(vcpu, v, write, gfn, page); |
1028 | spin_unlock(&vcpu->kvm->mmu_lock); | 1025 | spin_unlock(&vcpu->kvm->mmu_lock); |
1029 | 1026 | ||
@@ -1275,6 +1272,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu) | |||
1275 | if (r) | 1272 | if (r) |
1276 | goto out; | 1273 | goto out; |
1277 | spin_lock(&vcpu->kvm->mmu_lock); | 1274 | spin_lock(&vcpu->kvm->mmu_lock); |
1275 | kvm_mmu_free_some_pages(vcpu); | ||
1278 | mmu_alloc_roots(vcpu); | 1276 | mmu_alloc_roots(vcpu); |
1279 | spin_unlock(&vcpu->kvm->mmu_lock); | 1277 | spin_unlock(&vcpu->kvm->mmu_lock); |
1280 | kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa); | 1278 | kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa); |
@@ -1413,6 +1411,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
1413 | pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes); | 1411 | pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes); |
1414 | mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes); | 1412 | mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes); |
1415 | spin_lock(&vcpu->kvm->mmu_lock); | 1413 | spin_lock(&vcpu->kvm->mmu_lock); |
1414 | kvm_mmu_free_some_pages(vcpu); | ||
1416 | ++vcpu->kvm->stat.mmu_pte_write; | 1415 | ++vcpu->kvm->stat.mmu_pte_write; |
1417 | kvm_mmu_audit(vcpu, "pre pte write"); | 1416 | kvm_mmu_audit(vcpu, "pre pte write"); |
1418 | if (gfn == vcpu->arch.last_pt_write_gfn | 1417 | if (gfn == vcpu->arch.last_pt_write_gfn |
@@ -1505,7 +1504,6 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) | |||
1505 | 1504 | ||
1506 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) | 1505 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) |
1507 | { | 1506 | { |
1508 | spin_lock(&vcpu->kvm->mmu_lock); | ||
1509 | while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) { | 1507 | while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) { |
1510 | struct kvm_mmu_page *sp; | 1508 | struct kvm_mmu_page *sp; |
1511 | 1509 | ||
@@ -1514,7 +1512,6 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) | |||
1514 | kvm_mmu_zap_page(vcpu->kvm, sp); | 1512 | kvm_mmu_zap_page(vcpu->kvm, sp); |
1515 | ++vcpu->kvm->stat.mmu_recycled; | 1513 | ++vcpu->kvm->stat.mmu_recycled; |
1516 | } | 1514 | } |
1517 | spin_unlock(&vcpu->kvm->mmu_lock); | ||
1518 | } | 1515 | } |
1519 | 1516 | ||
1520 | int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code) | 1517 | int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code) |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index a35b83a4fef2..349920556be3 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -402,6 +402,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |||
402 | page = gfn_to_page(vcpu->kvm, walker.gfn); | 402 | page = gfn_to_page(vcpu->kvm, walker.gfn); |
403 | 403 | ||
404 | spin_lock(&vcpu->kvm->mmu_lock); | 404 | spin_lock(&vcpu->kvm->mmu_lock); |
405 | kvm_mmu_free_some_pages(vcpu); | ||
405 | shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, | 406 | shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, |
406 | &write_pt, page); | 407 | &write_pt, page); |
407 | pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__, | 408 | pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__, |