diff options
Diffstat (limited to 'arch/ia64/kvm/kvm-ia64.c')
-rw-r--r-- | arch/ia64/kvm/kvm-ia64.c | 110 |
1 files changed, 49 insertions, 61 deletions
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index af1464f7a6ad..4e586f6110aa 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/bitops.h> | 31 | #include <linux/bitops.h> |
32 | #include <linux/hrtimer.h> | 32 | #include <linux/hrtimer.h> |
33 | #include <linux/uaccess.h> | 33 | #include <linux/uaccess.h> |
34 | #include <linux/iommu.h> | ||
34 | #include <linux/intel-iommu.h> | 35 | #include <linux/intel-iommu.h> |
35 | 36 | ||
36 | #include <asm/pgtable.h> | 37 | #include <asm/pgtable.h> |
@@ -180,7 +181,6 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
180 | 181 | ||
181 | switch (ext) { | 182 | switch (ext) { |
182 | case KVM_CAP_IRQCHIP: | 183 | case KVM_CAP_IRQCHIP: |
183 | case KVM_CAP_USER_MEMORY: | ||
184 | case KVM_CAP_MP_STATE: | 184 | case KVM_CAP_MP_STATE: |
185 | 185 | ||
186 | r = 1; | 186 | r = 1; |
@@ -189,7 +189,7 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
189 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | 189 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; |
190 | break; | 190 | break; |
191 | case KVM_CAP_IOMMU: | 191 | case KVM_CAP_IOMMU: |
192 | r = intel_iommu_found(); | 192 | r = iommu_found(); |
193 | break; | 193 | break; |
194 | default: | 194 | default: |
195 | r = 0; | 195 | r = 0; |
@@ -439,7 +439,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu) | |||
439 | expires = div64_u64(itc_diff, cyc_per_usec); | 439 | expires = div64_u64(itc_diff, cyc_per_usec); |
440 | kt = ktime_set(0, 1000 * expires); | 440 | kt = ktime_set(0, 1000 * expires); |
441 | 441 | ||
442 | down_read(&vcpu->kvm->slots_lock); | ||
443 | vcpu->arch.ht_active = 1; | 442 | vcpu->arch.ht_active = 1; |
444 | hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); | 443 | hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); |
445 | 444 | ||
@@ -452,7 +451,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu) | |||
452 | if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) | 451 | if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) |
453 | vcpu->arch.mp_state = | 452 | vcpu->arch.mp_state = |
454 | KVM_MP_STATE_RUNNABLE; | 453 | KVM_MP_STATE_RUNNABLE; |
455 | up_read(&vcpu->kvm->slots_lock); | ||
456 | 454 | ||
457 | if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) | 455 | if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) |
458 | return -EINTR; | 456 | return -EINTR; |
@@ -476,6 +474,13 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu, | |||
476 | return 1; | 474 | return 1; |
477 | } | 475 | } |
478 | 476 | ||
477 | static int handle_vcpu_debug(struct kvm_vcpu *vcpu, | ||
478 | struct kvm_run *kvm_run) | ||
479 | { | ||
480 | printk("VMM: %s", vcpu->arch.log_buf); | ||
481 | return 1; | ||
482 | } | ||
483 | |||
479 | static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu, | 484 | static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu, |
480 | struct kvm_run *kvm_run) = { | 485 | struct kvm_run *kvm_run) = { |
481 | [EXIT_REASON_VM_PANIC] = handle_vm_error, | 486 | [EXIT_REASON_VM_PANIC] = handle_vm_error, |
@@ -487,6 +492,7 @@ static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu, | |||
487 | [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, | 492 | [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, |
488 | [EXIT_REASON_IPI] = handle_ipi, | 493 | [EXIT_REASON_IPI] = handle_ipi, |
489 | [EXIT_REASON_PTC_G] = handle_global_purge, | 494 | [EXIT_REASON_PTC_G] = handle_global_purge, |
495 | [EXIT_REASON_DEBUG] = handle_vcpu_debug, | ||
490 | 496 | ||
491 | }; | 497 | }; |
492 | 498 | ||
@@ -698,27 +704,24 @@ out: | |||
698 | return r; | 704 | return r; |
699 | } | 705 | } |
700 | 706 | ||
701 | /* | ||
702 | * Allocate 16M memory for every vm to hold its specific data. | ||
703 | * Its memory map is defined in kvm_host.h. | ||
704 | */ | ||
705 | static struct kvm *kvm_alloc_kvm(void) | 707 | static struct kvm *kvm_alloc_kvm(void) |
706 | { | 708 | { |
707 | 709 | ||
708 | struct kvm *kvm; | 710 | struct kvm *kvm; |
709 | uint64_t vm_base; | 711 | uint64_t vm_base; |
710 | 712 | ||
713 | BUG_ON(sizeof(struct kvm) > KVM_VM_STRUCT_SIZE); | ||
714 | |||
711 | vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE)); | 715 | vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE)); |
712 | 716 | ||
713 | if (!vm_base) | 717 | if (!vm_base) |
714 | return ERR_PTR(-ENOMEM); | 718 | return ERR_PTR(-ENOMEM); |
715 | printk(KERN_DEBUG"kvm: VM data's base Address:0x%lx\n", vm_base); | ||
716 | 719 | ||
717 | /* Zero all pages before use! */ | ||
718 | memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); | 720 | memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); |
719 | 721 | kvm = (struct kvm *)(vm_base + | |
720 | kvm = (struct kvm *)(vm_base + KVM_VM_OFS); | 722 | offsetof(struct kvm_vm_data, kvm_vm_struct)); |
721 | kvm->arch.vm_base = vm_base; | 723 | kvm->arch.vm_base = vm_base; |
724 | printk(KERN_DEBUG"kvm: vm's data area:0x%lx\n", vm_base); | ||
722 | 725 | ||
723 | return kvm; | 726 | return kvm; |
724 | } | 727 | } |
@@ -760,21 +763,12 @@ static void kvm_build_io_pmt(struct kvm *kvm) | |||
760 | 763 | ||
761 | static void kvm_init_vm(struct kvm *kvm) | 764 | static void kvm_init_vm(struct kvm *kvm) |
762 | { | 765 | { |
763 | long vm_base; | ||
764 | |||
765 | BUG_ON(!kvm); | 766 | BUG_ON(!kvm); |
766 | 767 | ||
767 | kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0; | 768 | kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0; |
768 | kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4; | 769 | kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4; |
769 | kvm->arch.vmm_init_rr = VMM_INIT_RR; | 770 | kvm->arch.vmm_init_rr = VMM_INIT_RR; |
770 | 771 | ||
771 | vm_base = kvm->arch.vm_base; | ||
772 | if (vm_base) { | ||
773 | kvm->arch.vhpt_base = vm_base + KVM_VHPT_OFS; | ||
774 | kvm->arch.vtlb_base = vm_base + KVM_VTLB_OFS; | ||
775 | kvm->arch.vpd_base = vm_base + KVM_VPD_OFS; | ||
776 | } | ||
777 | |||
778 | /* | 772 | /* |
779 | *Fill P2M entries for MMIO/IO ranges | 773 | *Fill P2M entries for MMIO/IO ranges |
780 | */ | 774 | */ |
@@ -838,9 +832,8 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) | |||
838 | 832 | ||
839 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | 833 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
840 | { | 834 | { |
841 | int i; | ||
842 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | 835 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); |
843 | int r; | 836 | int i; |
844 | 837 | ||
845 | vcpu_load(vcpu); | 838 | vcpu_load(vcpu); |
846 | 839 | ||
@@ -857,18 +850,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
857 | 850 | ||
858 | vpd->vpr = regs->vpd.vpr; | 851 | vpd->vpr = regs->vpd.vpr; |
859 | 852 | ||
860 | r = -EFAULT; | 853 | memcpy(&vcpu->arch.guest, ®s->saved_guest, sizeof(union context)); |
861 | r = copy_from_user(&vcpu->arch.guest, regs->saved_guest, | ||
862 | sizeof(union context)); | ||
863 | if (r) | ||
864 | goto out; | ||
865 | r = copy_from_user(vcpu + 1, regs->saved_stack + | ||
866 | sizeof(struct kvm_vcpu), | ||
867 | IA64_STK_OFFSET - sizeof(struct kvm_vcpu)); | ||
868 | if (r) | ||
869 | goto out; | ||
870 | vcpu->arch.exit_data = | ||
871 | ((struct kvm_vcpu *)(regs->saved_stack))->arch.exit_data; | ||
872 | 854 | ||
873 | RESTORE_REGS(mp_state); | 855 | RESTORE_REGS(mp_state); |
874 | RESTORE_REGS(vmm_rr); | 856 | RESTORE_REGS(vmm_rr); |
@@ -902,9 +884,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
902 | set_bit(KVM_REQ_RESUME, &vcpu->requests); | 884 | set_bit(KVM_REQ_RESUME, &vcpu->requests); |
903 | 885 | ||
904 | vcpu_put(vcpu); | 886 | vcpu_put(vcpu); |
905 | r = 0; | 887 | |
906 | out: | 888 | return 0; |
907 | return r; | ||
908 | } | 889 | } |
909 | 890 | ||
910 | long kvm_arch_vm_ioctl(struct file *filp, | 891 | long kvm_arch_vm_ioctl(struct file *filp, |
@@ -1166,10 +1147,11 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
1166 | /*Set entry address for first run.*/ | 1147 | /*Set entry address for first run.*/ |
1167 | regs->cr_iip = PALE_RESET_ENTRY; | 1148 | regs->cr_iip = PALE_RESET_ENTRY; |
1168 | 1149 | ||
1169 | /*Initilize itc offset for vcpus*/ | 1150 | /*Initialize itc offset for vcpus*/ |
1170 | itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC); | 1151 | itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC); |
1171 | for (i = 0; i < MAX_VCPU_NUM; i++) { | 1152 | for (i = 0; i < KVM_MAX_VCPUS; i++) { |
1172 | v = (struct kvm_vcpu *)((char *)vcpu + VCPU_SIZE * i); | 1153 | v = (struct kvm_vcpu *)((char *)vcpu + |
1154 | sizeof(struct kvm_vcpu_data) * i); | ||
1173 | v->arch.itc_offset = itc_offset; | 1155 | v->arch.itc_offset = itc_offset; |
1174 | v->arch.last_itc = 0; | 1156 | v->arch.last_itc = 0; |
1175 | } | 1157 | } |
@@ -1183,7 +1165,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
1183 | vcpu->arch.apic->vcpu = vcpu; | 1165 | vcpu->arch.apic->vcpu = vcpu; |
1184 | 1166 | ||
1185 | p_ctx->gr[1] = 0; | 1167 | p_ctx->gr[1] = 0; |
1186 | p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + IA64_STK_OFFSET); | 1168 | p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + KVM_STK_OFFSET); |
1187 | p_ctx->gr[13] = (unsigned long)vmm_vcpu; | 1169 | p_ctx->gr[13] = (unsigned long)vmm_vcpu; |
1188 | p_ctx->psr = 0x1008522000UL; | 1170 | p_ctx->psr = 0x1008522000UL; |
1189 | p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/ | 1171 | p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/ |
@@ -1218,12 +1200,12 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
1218 | vcpu->arch.hlt_timer.function = hlt_timer_fn; | 1200 | vcpu->arch.hlt_timer.function = hlt_timer_fn; |
1219 | 1201 | ||
1220 | vcpu->arch.last_run_cpu = -1; | 1202 | vcpu->arch.last_run_cpu = -1; |
1221 | vcpu->arch.vpd = (struct vpd *)VPD_ADDR(vcpu->vcpu_id); | 1203 | vcpu->arch.vpd = (struct vpd *)VPD_BASE(vcpu->vcpu_id); |
1222 | vcpu->arch.vsa_base = kvm_vsa_base; | 1204 | vcpu->arch.vsa_base = kvm_vsa_base; |
1223 | vcpu->arch.__gp = kvm_vmm_gp; | 1205 | vcpu->arch.__gp = kvm_vmm_gp; |
1224 | vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock); | 1206 | vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock); |
1225 | vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_ADDR(vcpu->vcpu_id); | 1207 | vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_BASE(vcpu->vcpu_id); |
1226 | vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_ADDR(vcpu->vcpu_id); | 1208 | vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_BASE(vcpu->vcpu_id); |
1227 | init_ptce_info(vcpu); | 1209 | init_ptce_info(vcpu); |
1228 | 1210 | ||
1229 | r = 0; | 1211 | r = 0; |
@@ -1273,12 +1255,22 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | |||
1273 | int r; | 1255 | int r; |
1274 | int cpu; | 1256 | int cpu; |
1275 | 1257 | ||
1258 | BUG_ON(sizeof(struct kvm_vcpu) > VCPU_STRUCT_SIZE/2); | ||
1259 | |||
1260 | r = -EINVAL; | ||
1261 | if (id >= KVM_MAX_VCPUS) { | ||
1262 | printk(KERN_ERR"kvm: Can't configure vcpus > %ld", | ||
1263 | KVM_MAX_VCPUS); | ||
1264 | goto fail; | ||
1265 | } | ||
1266 | |||
1276 | r = -ENOMEM; | 1267 | r = -ENOMEM; |
1277 | if (!vm_base) { | 1268 | if (!vm_base) { |
1278 | printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id); | 1269 | printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id); |
1279 | goto fail; | 1270 | goto fail; |
1280 | } | 1271 | } |
1281 | vcpu = (struct kvm_vcpu *)(vm_base + KVM_VCPU_OFS + VCPU_SIZE * id); | 1272 | vcpu = (struct kvm_vcpu *)(vm_base + offsetof(struct kvm_vm_data, |
1273 | vcpu_data[id].vcpu_struct)); | ||
1282 | vcpu->kvm = kvm; | 1274 | vcpu->kvm = kvm; |
1283 | 1275 | ||
1284 | cpu = get_cpu(); | 1276 | cpu = get_cpu(); |
@@ -1374,9 +1366,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
1374 | 1366 | ||
1375 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | 1367 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
1376 | { | 1368 | { |
1377 | int i; | ||
1378 | int r; | ||
1379 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | 1369 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); |
1370 | int i; | ||
1371 | |||
1380 | vcpu_load(vcpu); | 1372 | vcpu_load(vcpu); |
1381 | 1373 | ||
1382 | for (i = 0; i < 16; i++) { | 1374 | for (i = 0; i < 16; i++) { |
@@ -1391,14 +1383,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
1391 | regs->vpd.vpsr = vpd->vpsr; | 1383 | regs->vpd.vpsr = vpd->vpsr; |
1392 | regs->vpd.vpr = vpd->vpr; | 1384 | regs->vpd.vpr = vpd->vpr; |
1393 | 1385 | ||
1394 | r = -EFAULT; | 1386 | memcpy(®s->saved_guest, &vcpu->arch.guest, sizeof(union context)); |
1395 | r = copy_to_user(regs->saved_guest, &vcpu->arch.guest, | 1387 | |
1396 | sizeof(union context)); | ||
1397 | if (r) | ||
1398 | goto out; | ||
1399 | r = copy_to_user(regs->saved_stack, (void *)vcpu, IA64_STK_OFFSET); | ||
1400 | if (r) | ||
1401 | goto out; | ||
1402 | SAVE_REGS(mp_state); | 1388 | SAVE_REGS(mp_state); |
1403 | SAVE_REGS(vmm_rr); | 1389 | SAVE_REGS(vmm_rr); |
1404 | memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS); | 1390 | memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS); |
@@ -1426,10 +1412,9 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
1426 | SAVE_REGS(metaphysical_saved_rr4); | 1412 | SAVE_REGS(metaphysical_saved_rr4); |
1427 | SAVE_REGS(fp_psr); | 1413 | SAVE_REGS(fp_psr); |
1428 | SAVE_REGS(saved_gp); | 1414 | SAVE_REGS(saved_gp); |
1415 | |||
1429 | vcpu_put(vcpu); | 1416 | vcpu_put(vcpu); |
1430 | r = 0; | 1417 | return 0; |
1431 | out: | ||
1432 | return r; | ||
1433 | } | 1418 | } |
1434 | 1419 | ||
1435 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | 1420 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) |
@@ -1457,6 +1442,9 @@ int kvm_arch_set_memory_region(struct kvm *kvm, | |||
1457 | struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot]; | 1442 | struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot]; |
1458 | unsigned long base_gfn = memslot->base_gfn; | 1443 | unsigned long base_gfn = memslot->base_gfn; |
1459 | 1444 | ||
1445 | if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT)) | ||
1446 | return -ENOMEM; | ||
1447 | |||
1460 | for (i = 0; i < npages; i++) { | 1448 | for (i = 0; i < npages; i++) { |
1461 | pfn = gfn_to_pfn(kvm, base_gfn + i); | 1449 | pfn = gfn_to_pfn(kvm, base_gfn + i); |
1462 | if (!kvm_is_mmio_pfn(pfn)) { | 1450 | if (!kvm_is_mmio_pfn(pfn)) { |
@@ -1631,8 +1619,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm, | |||
1631 | struct kvm_memory_slot *memslot; | 1619 | struct kvm_memory_slot *memslot; |
1632 | int r, i; | 1620 | int r, i; |
1633 | long n, base; | 1621 | long n, base; |
1634 | unsigned long *dirty_bitmap = (unsigned long *)((void *)kvm - KVM_VM_OFS | 1622 | unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + |
1635 | + KVM_MEM_DIRTY_LOG_OFS); | 1623 | offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); |
1636 | 1624 | ||
1637 | r = -EINVAL; | 1625 | r = -EINVAL; |
1638 | if (log->slot >= KVM_MEMORY_SLOTS) | 1626 | if (log->slot >= KVM_MEMORY_SLOTS) |