diff options
| author | Wei Yang <richard.weiyang@gmail.com> | 2018-09-26 20:31:26 -0400 |
|---|---|---|
| committer | Paolo Bonzini <pbonzini@redhat.com> | 2019-03-28 12:27:19 -0400 |
| commit | 4d66623cfba0949b2f0d669bd2ae732124c99ded (patch) | |
| tree | c031dcad0bdd4e7e995696f18f09c9c88219f16b | |
| parent | 711eff3a8fa1d6193139a895524240912011b4dc (diff) | |
KVM: x86: remove check on nr_mmu_pages in kvm_arch_commit_memory_region()
* nr_mmu_pages would be non-zero only if kvm->arch.n_requested_mmu_pages is
non-zero.
* nr_mmu_pages is always non-zero, since kvm_mmu_calculate_mmu_pages()
never return zero.
Based on these two reasons, we can merge the two *if* clause and use the
return value from kvm_mmu_calculate_mmu_pages() directly. This simplify
the code and also eliminate the possibility for reader to believe
nr_mmu_pages would be zero.
Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| -rw-r--r-- | arch/x86/include/asm/kvm_host.h | 2 | ||||
| -rw-r--r-- | arch/x86/kvm/mmu.c | 2 | ||||
| -rw-r--r-- | arch/x86/kvm/x86.c | 8 |
3 files changed, 4 insertions, 8 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 5b03006c00be..679168931c40 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
| @@ -1254,7 +1254,7 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, | |||
| 1254 | gfn_t gfn_offset, unsigned long mask); | 1254 | gfn_t gfn_offset, unsigned long mask); |
| 1255 | void kvm_mmu_zap_all(struct kvm *kvm); | 1255 | void kvm_mmu_zap_all(struct kvm *kvm); |
| 1256 | void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen); | 1256 | void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen); |
| 1257 | unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); | 1257 | unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm); |
| 1258 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); | 1258 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); |
| 1259 | 1259 | ||
| 1260 | int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3); | 1260 | int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3); |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index f6d760dcdb75..5a9981465fbb 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
| @@ -6028,7 +6028,7 @@ out: | |||
| 6028 | /* | 6028 | /* |
| 6029 | * Calculate mmu pages needed for kvm. | 6029 | * Calculate mmu pages needed for kvm. |
| 6030 | */ | 6030 | */ |
| 6031 | unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm) | 6031 | unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm) |
| 6032 | { | 6032 | { |
| 6033 | unsigned int nr_mmu_pages; | 6033 | unsigned int nr_mmu_pages; |
| 6034 | unsigned int nr_pages = 0; | 6034 | unsigned int nr_pages = 0; |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 65e4559eef2f..491e92383da8 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -9429,13 +9429,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, | |||
| 9429 | const struct kvm_memory_slot *new, | 9429 | const struct kvm_memory_slot *new, |
| 9430 | enum kvm_mr_change change) | 9430 | enum kvm_mr_change change) |
| 9431 | { | 9431 | { |
| 9432 | int nr_mmu_pages = 0; | ||
| 9433 | |||
| 9434 | if (!kvm->arch.n_requested_mmu_pages) | 9432 | if (!kvm->arch.n_requested_mmu_pages) |
| 9435 | nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); | 9433 | kvm_mmu_change_mmu_pages(kvm, |
| 9436 | 9434 | kvm_mmu_calculate_default_mmu_pages(kvm)); | |
| 9437 | if (nr_mmu_pages) | ||
| 9438 | kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); | ||
| 9439 | 9435 | ||
| 9440 | /* | 9436 | /* |
| 9441 | * Dirty logging tracks sptes in 4k granularity, meaning that large | 9437 | * Dirty logging tracks sptes in 4k granularity, meaning that large |
