aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/kvm.h
diff options
context:
space:
mode:
authorIzik Eidus <izike@qumranet.com>2007-09-27 08:11:22 -0400
committerAvi Kivity <avi@qumranet.com>2008-01-30 10:52:50 -0500
commit290fc38da8187b53b78dd4d5ab27a20b88ef8b61 (patch)
tree983b2b4cecbe489f7b84391c5eed34aa9f073da0 /drivers/kvm/kvm.h
parentf566e09fc2c9f4164e1f0017c8c1c7a18bad7d72 (diff)
KVM: Remove the usage of page->private field by rmap
When kvm uses user-allocated pages in the future for the guest, we won't be able to use page->private for rmap, since page->rmap is reserved for the filesystem. So we move the rmap base pointers to the memory slot. A side effect of this is that we need to store the gfn of each gpte in the shadow pages, since the memory slot is addressed by gfn, instead of hfn like struct page. Signed-off-by: Izik Eidus <izik@qumranet.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/kvm.h')
-rw-r--r--drivers/kvm/kvm.h6
1 files changed, 5 insertions, 1 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 08ffc829f07f..80cfb99fffe0 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -126,6 +126,8 @@ struct kvm_mmu_page {
126 union kvm_mmu_page_role role; 126 union kvm_mmu_page_role role;
127 127
128 u64 *spt; 128 u64 *spt;
129 /* hold the gfn of each spte inside spt */
130 gfn_t *gfns;
129 unsigned long slot_bitmap; /* One bit set per slot which has memory 131 unsigned long slot_bitmap; /* One bit set per slot which has memory
130 * in this shadow page. 132 * in this shadow page.
131 */ 133 */
@@ -159,7 +161,7 @@ struct kvm_mmu {
159 u64 *pae_root; 161 u64 *pae_root;
160}; 162};
161 163
162#define KVM_NR_MEM_OBJS 20 164#define KVM_NR_MEM_OBJS 40
163 165
164struct kvm_mmu_memory_cache { 166struct kvm_mmu_memory_cache {
165 int nobjs; 167 int nobjs;
@@ -402,6 +404,7 @@ struct kvm_memory_slot {
402 unsigned long npages; 404 unsigned long npages;
403 unsigned long flags; 405 unsigned long flags;
404 struct page **phys_mem; 406 struct page **phys_mem;
407 unsigned long *rmap;
405 unsigned long *dirty_bitmap; 408 unsigned long *dirty_bitmap;
406}; 409};
407 410
@@ -554,6 +557,7 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
554 557
555extern hpa_t bad_page_address; 558extern hpa_t bad_page_address;
556 559
560gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
557struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 561struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
558struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); 562struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
559void mark_page_dirty(struct kvm *kvm, gfn_t gfn); 563void mark_page_dirty(struct kvm *kvm, gfn_t gfn);