aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>2012-02-07 23:02:18 -0500
committerAvi Kivity <avi@redhat.com>2012-03-08 07:10:22 -0500
commitdb3fe4eb45f3555d91a7124e18cf3a2f2a30eb90 (patch)
tree5d294feef8f6281d4cd6c67180e0514c74e87079 /virt
parent189a2f7b24677deced3d2a9803969ba69f4b75f6 (diff)
KVM: Introduce kvm_memory_slot::arch and move lpage_info into it
Some members of kvm_memory_slot are not used by every architecture. This patch is the first step to make this difference clear by introducing kvm_memory_slot::arch; lpage_info is moved into it. Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c70
1 files changed, 9 insertions, 61 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a30447c5eb4a..8340e0e62034 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -535,21 +535,13 @@ static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
535static void kvm_free_physmem_slot(struct kvm_memory_slot *free, 535static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
536 struct kvm_memory_slot *dont) 536 struct kvm_memory_slot *dont)
537{ 537{
538 int i;
539
540 if (!dont || free->rmap != dont->rmap) 538 if (!dont || free->rmap != dont->rmap)
541 vfree(free->rmap); 539 vfree(free->rmap);
542 540
543 if (!dont || free->dirty_bitmap != dont->dirty_bitmap) 541 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
544 kvm_destroy_dirty_bitmap(free); 542 kvm_destroy_dirty_bitmap(free);
545 543
546 544 kvm_arch_free_memslot(free, dont);
547 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
548 if (!dont || free->lpage_info[i] != dont->lpage_info[i]) {
549 vfree(free->lpage_info[i]);
550 free->lpage_info[i] = NULL;
551 }
552 }
553 545
554 free->npages = 0; 546 free->npages = 0;
555 free->rmap = NULL; 547 free->rmap = NULL;
@@ -685,53 +677,6 @@ void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new)
685 slots->generation++; 677 slots->generation++;
686} 678}
687 679
688#ifndef CONFIG_S390
689static int create_lpage_info(struct kvm_memory_slot *slot, unsigned long npages)
690{
691 int i;
692
693 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
694 unsigned long ugfn;
695 int lpages;
696 int level = i + 2;
697
698 lpages = gfn_to_index(slot->base_gfn + npages - 1,
699 slot->base_gfn, level) + 1;
700
701 slot->lpage_info[i] = vzalloc(lpages * sizeof(*slot->lpage_info[i]));
702 if (!slot->lpage_info[i])
703 goto out_free;
704
705 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
706 slot->lpage_info[i][0].write_count = 1;
707 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
708 slot->lpage_info[i][lpages - 1].write_count = 1;
709 ugfn = slot->userspace_addr >> PAGE_SHIFT;
710 /*
711 * If the gfn and userspace address are not aligned wrt each
712 * other, or if explicitly asked to, disable large page
713 * support for this slot
714 */
715 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
716 !largepages_enabled) {
717 unsigned long j;
718
719 for (j = 0; j < lpages; ++j)
720 slot->lpage_info[i][j].write_count = 1;
721 }
722 }
723
724 return 0;
725
726out_free:
727 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
728 vfree(slot->lpage_info[i]);
729 slot->lpage_info[i] = NULL;
730 }
731 return -ENOMEM;
732}
733#endif /* not defined CONFIG_S390 */
734
735/* 680/*
736 * Allocate some memory and give it an address in the guest physical address 681 * Allocate some memory and give it an address in the guest physical address
737 * space. 682 * space.
@@ -819,10 +764,9 @@ int __kvm_set_memory_region(struct kvm *kvm,
819 new.rmap = vzalloc(npages * sizeof(*new.rmap)); 764 new.rmap = vzalloc(npages * sizeof(*new.rmap));
820 if (!new.rmap) 765 if (!new.rmap)
821 goto out_free; 766 goto out_free;
822
823 if (create_lpage_info(&new, npages))
824 goto out_free;
825#endif /* not defined CONFIG_S390 */ 767#endif /* not defined CONFIG_S390 */
768 if (kvm_arch_create_memslot(&new, npages))
769 goto out_free;
826 } 770 }
827 771
828 /* Allocate page dirty bitmap if needed */ 772 /* Allocate page dirty bitmap if needed */
@@ -880,8 +824,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
880 if (!npages) { 824 if (!npages) {
881 new.rmap = NULL; 825 new.rmap = NULL;
882 new.dirty_bitmap = NULL; 826 new.dirty_bitmap = NULL;
883 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) 827 memset(&new.arch, 0, sizeof(new.arch));
884 new.lpage_info[i] = NULL;
885 } 828 }
886 829
887 update_memslots(slots, &new); 830 update_memslots(slots, &new);
@@ -968,6 +911,11 @@ out:
968 return r; 911 return r;
969} 912}
970 913
914bool kvm_largepages_enabled(void)
915{
916 return largepages_enabled;
917}
918
971void kvm_disable_largepages(void) 919void kvm_disable_largepages(void)
972{ 920{
973 largepages_enabled = false; 921 largepages_enabled = false;