aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm_host.h9
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/x86.c59
3 files changed, 69 insertions, 1 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c24125cd0c63..74c9edf2bb18 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -483,6 +483,15 @@ struct kvm_vcpu_arch {
483 } osvw; 483 } osvw;
484}; 484};
485 485
486struct kvm_lpage_info {
487 unsigned long rmap_pde;
488 int write_count;
489};
490
491struct kvm_arch_memory_slot {
492 struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
493};
494
486struct kvm_arch { 495struct kvm_arch {
487 unsigned int n_used_mmu_pages; 496 unsigned int n_used_mmu_pages;
488 unsigned int n_requested_mmu_pages; 497 unsigned int n_requested_mmu_pages;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 37e7f100a0e0..ff053ca32303 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -689,7 +689,7 @@ static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
689 unsigned long idx; 689 unsigned long idx;
690 690
691 idx = gfn_to_index(gfn, slot->base_gfn, level); 691 idx = gfn_to_index(gfn, slot->base_gfn, level);
692 return &slot->lpage_info[level - 2][idx]; 692 return &slot->arch.lpage_info[level - 2][idx];
693} 693}
694 694
695static void account_shadowed(struct kvm *kvm, gfn_t gfn) 695static void account_shadowed(struct kvm *kvm, gfn_t gfn)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3df0b7a140b0..ca74c1dadf3a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6239,6 +6239,65 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
6239 put_page(kvm->arch.ept_identity_pagetable); 6239 put_page(kvm->arch.ept_identity_pagetable);
6240} 6240}
6241 6241
6242void kvm_arch_free_memslot(struct kvm_memory_slot *free,
6243 struct kvm_memory_slot *dont)
6244{
6245 int i;
6246
6247 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
6248 if (!dont || free->arch.lpage_info[i] != dont->arch.lpage_info[i]) {
6249 vfree(free->arch.lpage_info[i]);
6250 free->arch.lpage_info[i] = NULL;
6251 }
6252 }
6253}
6254
6255int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
6256{
6257 int i;
6258
6259 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
6260 unsigned long ugfn;
6261 int lpages;
6262 int level = i + 2;
6263
6264 lpages = gfn_to_index(slot->base_gfn + npages - 1,
6265 slot->base_gfn, level) + 1;
6266
6267 slot->arch.lpage_info[i] =
6268 vzalloc(lpages * sizeof(*slot->arch.lpage_info[i]));
6269 if (!slot->arch.lpage_info[i])
6270 goto out_free;
6271
6272 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
6273 slot->arch.lpage_info[i][0].write_count = 1;
6274 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
6275 slot->arch.lpage_info[i][lpages - 1].write_count = 1;
6276 ugfn = slot->userspace_addr >> PAGE_SHIFT;
6277 /*
6278 * If the gfn and userspace address are not aligned wrt each
6279 * other, or if explicitly asked to, disable large page
6280 * support for this slot
6281 */
6282 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
6283 !kvm_largepages_enabled()) {
6284 unsigned long j;
6285
6286 for (j = 0; j < lpages; ++j)
6287 slot->arch.lpage_info[i][j].write_count = 1;
6288 }
6289 }
6290
6291 return 0;
6292
6293out_free:
6294 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
6295 vfree(slot->arch.lpage_info[i]);
6296 slot->arch.lpage_info[i] = NULL;
6297 }
6298 return -ENOMEM;
6299}
6300
6242int kvm_arch_prepare_memory_region(struct kvm *kvm, 6301int kvm_arch_prepare_memory_region(struct kvm *kvm,
6243 struct kvm_memory_slot *memslot, 6302 struct kvm_memory_slot *memslot,
6244 struct kvm_memory_slot old, 6303 struct kvm_memory_slot old,