aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/kvm_main.c
diff options
context:
space:
mode:
authorTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>2012-02-07 23:00:13 -0500
committerAvi Kivity <avi@redhat.com>2012-03-08 07:10:20 -0500
commita64f273a08d16bc66ccc5546bd28b1bba554ec81 (patch)
treeaf305dee1d5c6c2fed1e6c913a18c4f43e16940a /virt/kvm/kvm_main.c
parentfb03cb6f44236f4bef62a0dda8e025ff5ca51417 (diff)
KVM: Split lpage_info creation out from __kvm_set_memory_region()
This makes it easy to make lpage_info architecture specific. Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r--virt/kvm/kvm_main.c83
1 files changed, 52 insertions, 31 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 415fe816fc15..7adaa2063415 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -685,6 +685,56 @@ void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new)
685 slots->generation++; 685 slots->generation++;
686} 686}
687 687
688#ifndef CONFIG_S390
689static int create_lpage_info(struct kvm_memory_slot *slot, unsigned long npages)
690{
691 int i;
692
693 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
694 unsigned long ugfn;
695 int lpages;
696 int level = i + 2;
697
698 if (slot->lpage_info[i])
699 continue;
700
701 lpages = gfn_to_index(slot->base_gfn + npages - 1,
702 slot->base_gfn, level) + 1;
703
704 slot->lpage_info[i] = vzalloc(lpages * sizeof(*slot->lpage_info[i]));
705 if (!slot->lpage_info[i])
706 goto out_free;
707
708 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
709 slot->lpage_info[i][0].write_count = 1;
710 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
711 slot->lpage_info[i][lpages - 1].write_count = 1;
712 ugfn = slot->userspace_addr >> PAGE_SHIFT;
713 /*
714 * If the gfn and userspace address are not aligned wrt each
715 * other, or if explicitly asked to, disable large page
716 * support for this slot
717 */
718 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
719 !largepages_enabled) {
720 unsigned long j;
721
722 for (j = 0; j < lpages; ++j)
723 slot->lpage_info[i][j].write_count = 1;
724 }
725 }
726
727 return 0;
728
729out_free:
730 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
731 vfree(slot->lpage_info[i]);
732 slot->lpage_info[i] = NULL;
733 }
734 return -ENOMEM;
735}
736#endif /* not defined CONFIG_S390 */
737
688/* 738/*
689 * Allocate some memory and give it an address in the guest physical address 739 * Allocate some memory and give it an address in the guest physical address
690 * space. 740 * space.
@@ -778,37 +828,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
778 if (!npages) 828 if (!npages)
779 goto skip_lpage; 829 goto skip_lpage;
780 830
781 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { 831 if (create_lpage_info(&new, npages))
782 unsigned long ugfn; 832 goto out_free;
783 unsigned long j;
784 int lpages;
785 int level = i + 2;
786
787 if (new.lpage_info[i])
788 continue;
789
790 lpages = gfn_to_index(base_gfn + npages - 1, base_gfn, level) + 1;
791
792 new.lpage_info[i] = vzalloc(lpages * sizeof(*new.lpage_info[i]));
793
794 if (!new.lpage_info[i])
795 goto out_free;
796
797 if (base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
798 new.lpage_info[i][0].write_count = 1;
799 if ((base_gfn+npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
800 new.lpage_info[i][lpages - 1].write_count = 1;
801 ugfn = new.userspace_addr >> PAGE_SHIFT;
802 /*
803 * If the gfn and userspace address are not aligned wrt each
804 * other, or if explicitly asked to, disable large page
805 * support for this slot
806 */
807 if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
808 !largepages_enabled)
809 for (j = 0; j < lpages; ++j)
810 new.lpage_info[i][j].write_count = 1;
811 }
812 833
813skip_lpage: 834skip_lpage:
814 835