aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorMarcelo Tosatti <marcelo@kvack.org>2008-02-23 09:44:30 -0500
committerAvi Kivity <avi@qumranet.com>2008-04-27 04:53:25 -0400
commit05da45583de9b383dc81dd695fe248431d6c9f2b (patch)
treea76d699e60aca4f775d5f67254214654235e2e17 /include
parent2e53d63acba75795aa226febd140f67c58c6a353 (diff)
KVM: MMU: large page support
Create large pages mappings if the guest PTE's are marked as such and the underlying memory is hugetlbfs backed. If the largepage contains write-protected pages, a large pte is not used. Gives a consistent 2% improvement for data copies on ram mounted filesystem, without NPT/EPT. Anthony measures a 4% improvement on 4-way kernbench, with NPT. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'include')
-rw-r--r--include/asm-x86/kvm_host.h9
-rw-r--r--include/linux/kvm_host.h5
2 files changed, 14 insertions, 0 deletions
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 8c3f74b73524..95473ef5a906 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -39,6 +39,13 @@
39#define INVALID_PAGE (~(hpa_t)0) 39#define INVALID_PAGE (~(hpa_t)0)
40#define UNMAPPED_GVA (~(gpa_t)0) 40#define UNMAPPED_GVA (~(gpa_t)0)
41 41
42/* shadow tables are PAE even on non-PAE hosts */
43#define KVM_HPAGE_SHIFT 21
44#define KVM_HPAGE_SIZE (1UL << KVM_HPAGE_SHIFT)
45#define KVM_HPAGE_MASK (~(KVM_HPAGE_SIZE - 1))
46
47#define KVM_PAGES_PER_HPAGE (KVM_HPAGE_SIZE / PAGE_SIZE)
48
42#define DE_VECTOR 0 49#define DE_VECTOR 0
43#define UD_VECTOR 6 50#define UD_VECTOR 6
44#define NM_VECTOR 7 51#define NM_VECTOR 7
@@ -230,6 +237,7 @@ struct kvm_vcpu_arch {
230 struct { 237 struct {
231 gfn_t gfn; /* presumed gfn during guest pte update */ 238 gfn_t gfn; /* presumed gfn during guest pte update */
232 struct page *page; /* page corresponding to that gfn */ 239 struct page *page; /* page corresponding to that gfn */
240 int largepage;
233 } update_pte; 241 } update_pte;
234 242
235 struct i387_fxsave_struct host_fx_image; 243 struct i387_fxsave_struct host_fx_image;
@@ -307,6 +315,7 @@ struct kvm_vm_stat {
307 u32 mmu_recycled; 315 u32 mmu_recycled;
308 u32 mmu_cache_miss; 316 u32 mmu_cache_miss;
309 u32 remote_tlb_flush; 317 u32 remote_tlb_flush;
318 u32 lpages;
310}; 319};
311 320
312struct kvm_vcpu_stat { 321struct kvm_vcpu_stat {
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 994278fb5883..9750bb3c5a75 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -103,6 +103,10 @@ struct kvm_memory_slot {
103 unsigned long flags; 103 unsigned long flags;
104 unsigned long *rmap; 104 unsigned long *rmap;
105 unsigned long *dirty_bitmap; 105 unsigned long *dirty_bitmap;
106 struct {
107 unsigned long rmap_pde;
108 int write_count;
109 } *lpage_info;
106 unsigned long userspace_addr; 110 unsigned long userspace_addr;
107 int user_alloc; 111 int user_alloc;
108}; 112};
@@ -169,6 +173,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
169 int user_alloc); 173 int user_alloc);
170gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); 174gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
171struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 175struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
176unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
172void kvm_release_page_clean(struct page *page); 177void kvm_release_page_clean(struct page *page);
173void kvm_release_page_dirty(struct page *page); 178void kvm_release_page_dirty(struct page *page);
174int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 179int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,