aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>2012-02-07 23:02:18 -0500
committerAvi Kivity <avi@redhat.com>2012-03-08 07:10:22 -0500
commitdb3fe4eb45f3555d91a7124e18cf3a2f2a30eb90 (patch)
tree5d294feef8f6281d4cd6c67180e0514c74e87079
parent189a2f7b24677deced3d2a9803969ba69f4b75f6 (diff)
KVM: Introduce kvm_memory_slot::arch and move lpage_info into it
Some members of kvm_memory_slot are not used by every architecture. This patch is the first step to make this difference clear by introducing kvm_memory_slot::arch; lpage_info is moved into it. Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/ia64/include/asm/kvm_host.h3
-rw-r--r--arch/ia64/kvm/kvm-ia64.c10
-rw-r--r--arch/powerpc/include/asm/kvm_host.h3
-rw-r--r--arch/powerpc/kvm/powerpc.c10
-rw-r--r--arch/s390/include/asm/kvm_host.h3
-rw-r--r--arch/s390/kvm/kvm-s390.c10
-rw-r--r--arch/x86/include/asm/kvm_host.h9
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/x86.c59
-rw-r--r--include/linux/kvm_host.h11
-rw-r--r--virt/kvm/kvm_main.c70
11 files changed, 122 insertions, 68 deletions
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index 2689ee54a1c9..e35b3a84a40b 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -459,6 +459,9 @@ struct kvm_sal_data {
459 unsigned long boot_gp; 459 unsigned long boot_gp;
460}; 460};
461 461
462struct kvm_arch_memory_slot {
463};
464
462struct kvm_arch { 465struct kvm_arch {
463 spinlock_t dirty_log_lock; 466 spinlock_t dirty_log_lock;
464 467
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 8ca7261e7b3d..d8ddbba6fe7d 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1571,6 +1571,16 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1571 return VM_FAULT_SIGBUS; 1571 return VM_FAULT_SIGBUS;
1572} 1572}
1573 1573
1574void kvm_arch_free_memslot(struct kvm_memory_slot *free,
1575 struct kvm_memory_slot *dont)
1576{
1577}
1578
1579int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
1580{
1581 return 0;
1582}
1583
1574int kvm_arch_prepare_memory_region(struct kvm *kvm, 1584int kvm_arch_prepare_memory_region(struct kvm *kvm,
1575 struct kvm_memory_slot *memslot, 1585 struct kvm_memory_slot *memslot,
1576 struct kvm_memory_slot old, 1586 struct kvm_memory_slot old,
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 1843d5d2a3be..52eb9c1f4fe0 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -213,6 +213,9 @@ struct revmap_entry {
213#define KVMPPC_PAGE_WRITETHRU HPTE_R_W /* 0x40 */ 213#define KVMPPC_PAGE_WRITETHRU HPTE_R_W /* 0x40 */
214#define KVMPPC_GOT_PAGE 0x80 214#define KVMPPC_GOT_PAGE 0x80
215 215
216struct kvm_arch_memory_slot {
217};
218
216struct kvm_arch { 219struct kvm_arch {
217#ifdef CONFIG_KVM_BOOK3S_64_HV 220#ifdef CONFIG_KVM_BOOK3S_64_HV
218 unsigned long hpt_virt; 221 unsigned long hpt_virt;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 0e21d155eea7..00d7e345b3fe 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -281,6 +281,16 @@ long kvm_arch_dev_ioctl(struct file *filp,
281 return -EINVAL; 281 return -EINVAL;
282} 282}
283 283
284void kvm_arch_free_memslot(struct kvm_memory_slot *free,
285 struct kvm_memory_slot *dont)
286{
287}
288
289int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
290{
291 return 0;
292}
293
284int kvm_arch_prepare_memory_region(struct kvm *kvm, 294int kvm_arch_prepare_memory_region(struct kvm *kvm,
285 struct kvm_memory_slot *memslot, 295 struct kvm_memory_slot *memslot,
286 struct kvm_memory_slot old, 296 struct kvm_memory_slot old,
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index e6304268ea28..7343872890a2 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -245,6 +245,9 @@ struct kvm_vm_stat {
245 u32 remote_tlb_flush; 245 u32 remote_tlb_flush;
246}; 246};
247 247
248struct kvm_arch_memory_slot {
249};
250
248struct kvm_arch{ 251struct kvm_arch{
249 struct sca_block *sca; 252 struct sca_block *sca;
250 debug_info_t *dbf; 253 debug_info_t *dbf;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index cf3c0a91d046..17ad69d596fd 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -814,6 +814,16 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
814 return VM_FAULT_SIGBUS; 814 return VM_FAULT_SIGBUS;
815} 815}
816 816
817void kvm_arch_free_memslot(struct kvm_memory_slot *free,
818 struct kvm_memory_slot *dont)
819{
820}
821
822int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
823{
824 return 0;
825}
826
817/* Section: memory related */ 827/* Section: memory related */
818int kvm_arch_prepare_memory_region(struct kvm *kvm, 828int kvm_arch_prepare_memory_region(struct kvm *kvm,
819 struct kvm_memory_slot *memslot, 829 struct kvm_memory_slot *memslot,
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c24125cd0c63..74c9edf2bb18 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -483,6 +483,15 @@ struct kvm_vcpu_arch {
483 } osvw; 483 } osvw;
484}; 484};
485 485
486struct kvm_lpage_info {
487 unsigned long rmap_pde;
488 int write_count;
489};
490
491struct kvm_arch_memory_slot {
492 struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
493};
494
486struct kvm_arch { 495struct kvm_arch {
487 unsigned int n_used_mmu_pages; 496 unsigned int n_used_mmu_pages;
488 unsigned int n_requested_mmu_pages; 497 unsigned int n_requested_mmu_pages;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 37e7f100a0e0..ff053ca32303 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -689,7 +689,7 @@ static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
689 unsigned long idx; 689 unsigned long idx;
690 690
691 idx = gfn_to_index(gfn, slot->base_gfn, level); 691 idx = gfn_to_index(gfn, slot->base_gfn, level);
692 return &slot->lpage_info[level - 2][idx]; 692 return &slot->arch.lpage_info[level - 2][idx];
693} 693}
694 694
695static void account_shadowed(struct kvm *kvm, gfn_t gfn) 695static void account_shadowed(struct kvm *kvm, gfn_t gfn)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3df0b7a140b0..ca74c1dadf3a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6239,6 +6239,65 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
6239 put_page(kvm->arch.ept_identity_pagetable); 6239 put_page(kvm->arch.ept_identity_pagetable);
6240} 6240}
6241 6241
6242void kvm_arch_free_memslot(struct kvm_memory_slot *free,
6243 struct kvm_memory_slot *dont)
6244{
6245 int i;
6246
6247 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
6248 if (!dont || free->arch.lpage_info[i] != dont->arch.lpage_info[i]) {
6249 vfree(free->arch.lpage_info[i]);
6250 free->arch.lpage_info[i] = NULL;
6251 }
6252 }
6253}
6254
6255int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
6256{
6257 int i;
6258
6259 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
6260 unsigned long ugfn;
6261 int lpages;
6262 int level = i + 2;
6263
6264 lpages = gfn_to_index(slot->base_gfn + npages - 1,
6265 slot->base_gfn, level) + 1;
6266
6267 slot->arch.lpage_info[i] =
6268 vzalloc(lpages * sizeof(*slot->arch.lpage_info[i]));
6269 if (!slot->arch.lpage_info[i])
6270 goto out_free;
6271
6272 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
6273 slot->arch.lpage_info[i][0].write_count = 1;
6274 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
6275 slot->arch.lpage_info[i][lpages - 1].write_count = 1;
6276 ugfn = slot->userspace_addr >> PAGE_SHIFT;
6277 /*
6278 * If the gfn and userspace address are not aligned wrt each
6279 * other, or if explicitly asked to, disable large page
6280 * support for this slot
6281 */
6282 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
6283 !kvm_largepages_enabled()) {
6284 unsigned long j;
6285
6286 for (j = 0; j < lpages; ++j)
6287 slot->arch.lpage_info[i][j].write_count = 1;
6288 }
6289 }
6290
6291 return 0;
6292
6293out_free:
6294 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
6295 vfree(slot->arch.lpage_info[i]);
6296 slot->arch.lpage_info[i] = NULL;
6297 }
6298 return -ENOMEM;
6299}
6300
6242int kvm_arch_prepare_memory_region(struct kvm *kvm, 6301int kvm_arch_prepare_memory_region(struct kvm *kvm,
6243 struct kvm_memory_slot *memslot, 6302 struct kvm_memory_slot *memslot,
6244 struct kvm_memory_slot old, 6303 struct kvm_memory_slot old,
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 7a08496b974a..355e44555c39 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -171,11 +171,6 @@ static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
171 */ 171 */
172#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) 172#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
173 173
174struct kvm_lpage_info {
175 unsigned long rmap_pde;
176 int write_count;
177};
178
179struct kvm_memory_slot { 174struct kvm_memory_slot {
180 gfn_t base_gfn; 175 gfn_t base_gfn;
181 unsigned long npages; 176 unsigned long npages;
@@ -184,7 +179,7 @@ struct kvm_memory_slot {
184 unsigned long *dirty_bitmap; 179 unsigned long *dirty_bitmap;
185 unsigned long *dirty_bitmap_head; 180 unsigned long *dirty_bitmap_head;
186 unsigned long nr_dirty_pages; 181 unsigned long nr_dirty_pages;
187 struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; 182 struct kvm_arch_memory_slot arch;
188 unsigned long userspace_addr; 183 unsigned long userspace_addr;
189 int user_alloc; 184 int user_alloc;
190 int id; 185 int id;
@@ -376,6 +371,9 @@ int kvm_set_memory_region(struct kvm *kvm,
376int __kvm_set_memory_region(struct kvm *kvm, 371int __kvm_set_memory_region(struct kvm *kvm,
377 struct kvm_userspace_memory_region *mem, 372 struct kvm_userspace_memory_region *mem,
378 int user_alloc); 373 int user_alloc);
374void kvm_arch_free_memslot(struct kvm_memory_slot *free,
375 struct kvm_memory_slot *dont);
376int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
379int kvm_arch_prepare_memory_region(struct kvm *kvm, 377int kvm_arch_prepare_memory_region(struct kvm *kvm,
380 struct kvm_memory_slot *memslot, 378 struct kvm_memory_slot *memslot,
381 struct kvm_memory_slot old, 379 struct kvm_memory_slot old,
@@ -385,6 +383,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
385 struct kvm_userspace_memory_region *mem, 383 struct kvm_userspace_memory_region *mem,
386 struct kvm_memory_slot old, 384 struct kvm_memory_slot old,
387 int user_alloc); 385 int user_alloc);
386bool kvm_largepages_enabled(void);
388void kvm_disable_largepages(void); 387void kvm_disable_largepages(void);
389void kvm_arch_flush_shadow(struct kvm *kvm); 388void kvm_arch_flush_shadow(struct kvm *kvm);
390 389
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a30447c5eb4a..8340e0e62034 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -535,21 +535,13 @@ static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
535static void kvm_free_physmem_slot(struct kvm_memory_slot *free, 535static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
536 struct kvm_memory_slot *dont) 536 struct kvm_memory_slot *dont)
537{ 537{
538 int i;
539
540 if (!dont || free->rmap != dont->rmap) 538 if (!dont || free->rmap != dont->rmap)
541 vfree(free->rmap); 539 vfree(free->rmap);
542 540
543 if (!dont || free->dirty_bitmap != dont->dirty_bitmap) 541 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
544 kvm_destroy_dirty_bitmap(free); 542 kvm_destroy_dirty_bitmap(free);
545 543
546 544 kvm_arch_free_memslot(free, dont);
547 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
548 if (!dont || free->lpage_info[i] != dont->lpage_info[i]) {
549 vfree(free->lpage_info[i]);
550 free->lpage_info[i] = NULL;
551 }
552 }
553 545
554 free->npages = 0; 546 free->npages = 0;
555 free->rmap = NULL; 547 free->rmap = NULL;
@@ -685,53 +677,6 @@ void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new)
685 slots->generation++; 677 slots->generation++;
686} 678}
687 679
688#ifndef CONFIG_S390
689static int create_lpage_info(struct kvm_memory_slot *slot, unsigned long npages)
690{
691 int i;
692
693 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
694 unsigned long ugfn;
695 int lpages;
696 int level = i + 2;
697
698 lpages = gfn_to_index(slot->base_gfn + npages - 1,
699 slot->base_gfn, level) + 1;
700
701 slot->lpage_info[i] = vzalloc(lpages * sizeof(*slot->lpage_info[i]));
702 if (!slot->lpage_info[i])
703 goto out_free;
704
705 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
706 slot->lpage_info[i][0].write_count = 1;
707 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
708 slot->lpage_info[i][lpages - 1].write_count = 1;
709 ugfn = slot->userspace_addr >> PAGE_SHIFT;
710 /*
711 * If the gfn and userspace address are not aligned wrt each
712 * other, or if explicitly asked to, disable large page
713 * support for this slot
714 */
715 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
716 !largepages_enabled) {
717 unsigned long j;
718
719 for (j = 0; j < lpages; ++j)
720 slot->lpage_info[i][j].write_count = 1;
721 }
722 }
723
724 return 0;
725
726out_free:
727 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
728 vfree(slot->lpage_info[i]);
729 slot->lpage_info[i] = NULL;
730 }
731 return -ENOMEM;
732}
733#endif /* not defined CONFIG_S390 */
734
735/* 680/*
736 * Allocate some memory and give it an address in the guest physical address 681 * Allocate some memory and give it an address in the guest physical address
737 * space. 682 * space.
@@ -819,10 +764,9 @@ int __kvm_set_memory_region(struct kvm *kvm,
819 new.rmap = vzalloc(npages * sizeof(*new.rmap)); 764 new.rmap = vzalloc(npages * sizeof(*new.rmap));
820 if (!new.rmap) 765 if (!new.rmap)
821 goto out_free; 766 goto out_free;
822
823 if (create_lpage_info(&new, npages))
824 goto out_free;
825#endif /* not defined CONFIG_S390 */ 767#endif /* not defined CONFIG_S390 */
768 if (kvm_arch_create_memslot(&new, npages))
769 goto out_free;
826 } 770 }
827 771
828 /* Allocate page dirty bitmap if needed */ 772 /* Allocate page dirty bitmap if needed */
@@ -880,8 +824,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
880 if (!npages) { 824 if (!npages) {
881 new.rmap = NULL; 825 new.rmap = NULL;
882 new.dirty_bitmap = NULL; 826 new.dirty_bitmap = NULL;
883 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) 827 memset(&new.arch, 0, sizeof(new.arch));
884 new.lpage_info[i] = NULL;
885 } 828 }
886 829
887 update_memslots(slots, &new); 830 update_memslots(slots, &new);
@@ -968,6 +911,11 @@ out:
968 return r; 911 return r;
969} 912}
970 913
914bool kvm_largepages_enabled(void)
915{
916 return largepages_enabled;
917}
918
971void kvm_disable_largepages(void) 919void kvm_disable_largepages(void)
972{ 920{
973 largepages_enabled = false; 921 largepages_enabled = false;