aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kvm/kvm-ia64.c
diff options
context:
space:
mode:
authorXiantao Zhang <xiantao.zhang@intel.com>2008-10-23 02:56:44 -0400
committerAvi Kivity <avi@redhat.com>2008-12-31 09:51:49 -0500
commita917f7af3905953329361d29b6db78eb17b4d44c (patch)
treea873216c93f09af69f9a68fa831df822a3810fd8 /arch/ia64/kvm/kvm-ia64.c
parent1d5a4d9b92028d9fe77da34037bd5a1ebfecc733 (diff)
KVM: ia64: Re-organize data sturure of guests' data area
1. Increase the size of data area to 64M 2. Support more vcpus and memory, 128 vcpus and 256G memory are supported for guests. 3. Add the boundary check for memory and vcpu allocation. With this patch, kvm guest's data area looks as follow: * * +----------------------+ ------- KVM_VM_DATA_SIZE * | vcpu[n]'s data | | ___________________KVM_STK_OFFSET * | | | / | * | .......... | | /vcpu's struct&stack | * | .......... | | /---------------------|---- 0 * | vcpu[5]'s data | | / vpd | * | vcpu[4]'s data | |/-----------------------| * | vcpu[3]'s data | / vtlb | * | vcpu[2]'s data | /|------------------------| * | vcpu[1]'s data |/ | vhpt | * | vcpu[0]'s data |____________________________| * +----------------------+ | * | memory dirty log | | * +----------------------+ | * | vm's data struct | | * +----------------------+ | * | | | * | | | * | | | * | | | * | | | * | | | * | | | * | vm's p2m table | | * | | | * | | | * | | | | * vm's data->| | | | * +----------------------+ ------- 0 * To support large memory, needs to increase the size of p2m. * To support more vcpus, needs to ensure it has enough space to * hold vcpus' data. */ Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/ia64/kvm/kvm-ia64.c')
-rw-r--r--arch/ia64/kvm/kvm-ia64.c60
1 files changed, 31 insertions, 29 deletions
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index af1464f7a6ad..43e45f6afcda 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -698,27 +698,24 @@ out:
698 return r; 698 return r;
699} 699}
700 700
701/*
702 * Allocate 16M memory for every vm to hold its specific data.
703 * Its memory map is defined in kvm_host.h.
704 */
705static struct kvm *kvm_alloc_kvm(void) 701static struct kvm *kvm_alloc_kvm(void)
706{ 702{
707 703
708 struct kvm *kvm; 704 struct kvm *kvm;
709 uint64_t vm_base; 705 uint64_t vm_base;
710 706
707 BUG_ON(sizeof(struct kvm) > KVM_VM_STRUCT_SIZE);
708
711 vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE)); 709 vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE));
712 710
713 if (!vm_base) 711 if (!vm_base)
714 return ERR_PTR(-ENOMEM); 712 return ERR_PTR(-ENOMEM);
715 printk(KERN_DEBUG"kvm: VM data's base Address:0x%lx\n", vm_base);
716 713
717 /* Zero all pages before use! */
718 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); 714 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
719 715 kvm = (struct kvm *)(vm_base +
720 kvm = (struct kvm *)(vm_base + KVM_VM_OFS); 716 offsetof(struct kvm_vm_data, kvm_vm_struct));
721 kvm->arch.vm_base = vm_base; 717 kvm->arch.vm_base = vm_base;
718 printk(KERN_DEBUG"kvm: vm's data area:0x%lx\n", vm_base);
722 719
723 return kvm; 720 return kvm;
724} 721}
@@ -760,21 +757,12 @@ static void kvm_build_io_pmt(struct kvm *kvm)
760 757
761static void kvm_init_vm(struct kvm *kvm) 758static void kvm_init_vm(struct kvm *kvm)
762{ 759{
763 long vm_base;
764
765 BUG_ON(!kvm); 760 BUG_ON(!kvm);
766 761
767 kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0; 762 kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0;
768 kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4; 763 kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4;
769 kvm->arch.vmm_init_rr = VMM_INIT_RR; 764 kvm->arch.vmm_init_rr = VMM_INIT_RR;
770 765
771 vm_base = kvm->arch.vm_base;
772 if (vm_base) {
773 kvm->arch.vhpt_base = vm_base + KVM_VHPT_OFS;
774 kvm->arch.vtlb_base = vm_base + KVM_VTLB_OFS;
775 kvm->arch.vpd_base = vm_base + KVM_VPD_OFS;
776 }
777
778 /* 766 /*
779 *Fill P2M entries for MMIO/IO ranges 767 *Fill P2M entries for MMIO/IO ranges
780 */ 768 */
@@ -864,7 +852,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
864 goto out; 852 goto out;
865 r = copy_from_user(vcpu + 1, regs->saved_stack + 853 r = copy_from_user(vcpu + 1, regs->saved_stack +
866 sizeof(struct kvm_vcpu), 854 sizeof(struct kvm_vcpu),
867 IA64_STK_OFFSET - sizeof(struct kvm_vcpu)); 855 KVM_STK_OFFSET - sizeof(struct kvm_vcpu));
868 if (r) 856 if (r)
869 goto out; 857 goto out;
870 vcpu->arch.exit_data = 858 vcpu->arch.exit_data =
@@ -1166,10 +1154,11 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1166 /*Set entry address for first run.*/ 1154 /*Set entry address for first run.*/
1167 regs->cr_iip = PALE_RESET_ENTRY; 1155 regs->cr_iip = PALE_RESET_ENTRY;
1168 1156
1169 /*Initilize itc offset for vcpus*/ 1157 /*Initialize itc offset for vcpus*/
1170 itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC); 1158 itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC);
1171 for (i = 0; i < MAX_VCPU_NUM; i++) { 1159 for (i = 0; i < KVM_MAX_VCPUS; i++) {
1172 v = (struct kvm_vcpu *)((char *)vcpu + VCPU_SIZE * i); 1160 v = (struct kvm_vcpu *)((char *)vcpu +
1161 sizeof(struct kvm_vcpu_data) * i);
1173 v->arch.itc_offset = itc_offset; 1162 v->arch.itc_offset = itc_offset;
1174 v->arch.last_itc = 0; 1163 v->arch.last_itc = 0;
1175 } 1164 }
@@ -1183,7 +1172,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1183 vcpu->arch.apic->vcpu = vcpu; 1172 vcpu->arch.apic->vcpu = vcpu;
1184 1173
1185 p_ctx->gr[1] = 0; 1174 p_ctx->gr[1] = 0;
1186 p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + IA64_STK_OFFSET); 1175 p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + KVM_STK_OFFSET);
1187 p_ctx->gr[13] = (unsigned long)vmm_vcpu; 1176 p_ctx->gr[13] = (unsigned long)vmm_vcpu;
1188 p_ctx->psr = 0x1008522000UL; 1177 p_ctx->psr = 0x1008522000UL;
1189 p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/ 1178 p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/
@@ -1218,12 +1207,12 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1218 vcpu->arch.hlt_timer.function = hlt_timer_fn; 1207 vcpu->arch.hlt_timer.function = hlt_timer_fn;
1219 1208
1220 vcpu->arch.last_run_cpu = -1; 1209 vcpu->arch.last_run_cpu = -1;
1221 vcpu->arch.vpd = (struct vpd *)VPD_ADDR(vcpu->vcpu_id); 1210 vcpu->arch.vpd = (struct vpd *)VPD_BASE(vcpu->vcpu_id);
1222 vcpu->arch.vsa_base = kvm_vsa_base; 1211 vcpu->arch.vsa_base = kvm_vsa_base;
1223 vcpu->arch.__gp = kvm_vmm_gp; 1212 vcpu->arch.__gp = kvm_vmm_gp;
1224 vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock); 1213 vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock);
1225 vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_ADDR(vcpu->vcpu_id); 1214 vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_BASE(vcpu->vcpu_id);
1226 vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_ADDR(vcpu->vcpu_id); 1215 vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_BASE(vcpu->vcpu_id);
1227 init_ptce_info(vcpu); 1216 init_ptce_info(vcpu);
1228 1217
1229 r = 0; 1218 r = 0;
@@ -1273,12 +1262,22 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1273 int r; 1262 int r;
1274 int cpu; 1263 int cpu;
1275 1264
1265 BUG_ON(sizeof(struct kvm_vcpu) > VCPU_STRUCT_SIZE/2);
1266
1267 r = -EINVAL;
1268 if (id >= KVM_MAX_VCPUS) {
1269 printk(KERN_ERR"kvm: Can't configure vcpus > %ld",
1270 KVM_MAX_VCPUS);
1271 goto fail;
1272 }
1273
1276 r = -ENOMEM; 1274 r = -ENOMEM;
1277 if (!vm_base) { 1275 if (!vm_base) {
1278 printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id); 1276 printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id);
1279 goto fail; 1277 goto fail;
1280 } 1278 }
1281 vcpu = (struct kvm_vcpu *)(vm_base + KVM_VCPU_OFS + VCPU_SIZE * id); 1279 vcpu = (struct kvm_vcpu *)(vm_base + offsetof(struct kvm_vm_data,
1280 vcpu_data[id].vcpu_struct));
1282 vcpu->kvm = kvm; 1281 vcpu->kvm = kvm;
1283 1282
1284 cpu = get_cpu(); 1283 cpu = get_cpu();
@@ -1396,7 +1395,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1396 sizeof(union context)); 1395 sizeof(union context));
1397 if (r) 1396 if (r)
1398 goto out; 1397 goto out;
1399 r = copy_to_user(regs->saved_stack, (void *)vcpu, IA64_STK_OFFSET); 1398 r = copy_to_user(regs->saved_stack, (void *)vcpu, KVM_STK_OFFSET);
1400 if (r) 1399 if (r)
1401 goto out; 1400 goto out;
1402 SAVE_REGS(mp_state); 1401 SAVE_REGS(mp_state);
@@ -1457,6 +1456,9 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
1457 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot]; 1456 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
1458 unsigned long base_gfn = memslot->base_gfn; 1457 unsigned long base_gfn = memslot->base_gfn;
1459 1458
1459 if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT))
1460 return -ENOMEM;
1461
1460 for (i = 0; i < npages; i++) { 1462 for (i = 0; i < npages; i++) {
1461 pfn = gfn_to_pfn(kvm, base_gfn + i); 1463 pfn = gfn_to_pfn(kvm, base_gfn + i);
1462 if (!kvm_is_mmio_pfn(pfn)) { 1464 if (!kvm_is_mmio_pfn(pfn)) {
@@ -1631,8 +1633,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
1631 struct kvm_memory_slot *memslot; 1633 struct kvm_memory_slot *memslot;
1632 int r, i; 1634 int r, i;
1633 long n, base; 1635 long n, base;
1634 unsigned long *dirty_bitmap = (unsigned long *)((void *)kvm - KVM_VM_OFS 1636 unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
1635 + KVM_MEM_DIRTY_LOG_OFS); 1637 offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
1636 1638
1637 r = -EINVAL; 1639 r = -EINVAL;
1638 if (log->slot >= KVM_MEMORY_SLOTS) 1640 if (log->slot >= KVM_MEMORY_SLOTS)