aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/x86.h
diff options
context:
space:
mode:
authorZhang Xiantao <xiantao.zhang@intel.com>2007-11-20 00:11:38 -0500
committerAvi Kivity <avi@qumranet.com>2008-01-30 10:53:09 -0500
commit3ad82a7e874c5d6c4045090cc01d7794dd9eb21c (patch)
tree94aabdff667daa7de80b61dd7d0233af21a54fa4 /drivers/kvm/x86.h
parent6226686954c4cce3d63ffe1777e60360fcbf0b83 (diff)
KVM: Recalculate mmu pages needed for every memory region change
Instead of incrementally changing the mmu cache size for every memory slot operation, recalculate it from scratch. This is simpler and safer. Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/x86.h')
-rw-r--r--drivers/kvm/x86.h1
1 files changed, 1 insertions, 0 deletions
diff --git a/drivers/kvm/x86.h b/drivers/kvm/x86.h
index 90b791bf6287..71f2477d03fd 100644
--- a/drivers/kvm/x86.h
+++ b/drivers/kvm/x86.h
@@ -236,6 +236,7 @@ void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
236int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); 236int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
237void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); 237void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
238void kvm_mmu_zap_all(struct kvm *kvm); 238void kvm_mmu_zap_all(struct kvm *kvm);
239unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
239void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); 240void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
240 241
241enum emulation_result { 242enum emulation_result {