diff options
Diffstat (limited to 'arch/ia64/kvm/kvm-ia64.c')
-rw-r--r-- | arch/ia64/kvm/kvm-ia64.c | 92 |
1 files changed, 47 insertions, 45 deletions
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index a312c9e9b9ef..af1464f7a6ad 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c | |||
@@ -385,6 +385,7 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
385 | struct kvm *kvm = vcpu->kvm; | 385 | struct kvm *kvm = vcpu->kvm; |
386 | struct call_data call_data; | 386 | struct call_data call_data; |
387 | int i; | 387 | int i; |
388 | |||
388 | call_data.ptc_g_data = p->u.ptc_g_data; | 389 | call_data.ptc_g_data = p->u.ptc_g_data; |
389 | 390 | ||
390 | for (i = 0; i < KVM_MAX_VCPUS; i++) { | 391 | for (i = 0; i < KVM_MAX_VCPUS; i++) { |
@@ -418,33 +419,41 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu) | |||
418 | ktime_t kt; | 419 | ktime_t kt; |
419 | long itc_diff; | 420 | long itc_diff; |
420 | unsigned long vcpu_now_itc; | 421 | unsigned long vcpu_now_itc; |
421 | |||
422 | unsigned long expires; | 422 | unsigned long expires; |
423 | struct hrtimer *p_ht = &vcpu->arch.hlt_timer; | 423 | struct hrtimer *p_ht = &vcpu->arch.hlt_timer; |
424 | unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec; | 424 | unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec; |
425 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | 425 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); |
426 | 426 | ||
427 | vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset; | 427 | if (irqchip_in_kernel(vcpu->kvm)) { |
428 | 428 | ||
429 | if (time_after(vcpu_now_itc, vpd->itm)) { | 429 | vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset; |
430 | vcpu->arch.timer_check = 1; | ||
431 | return 1; | ||
432 | } | ||
433 | itc_diff = vpd->itm - vcpu_now_itc; | ||
434 | if (itc_diff < 0) | ||
435 | itc_diff = -itc_diff; | ||
436 | 430 | ||
437 | expires = div64_u64(itc_diff, cyc_per_usec); | 431 | if (time_after(vcpu_now_itc, vpd->itm)) { |
438 | kt = ktime_set(0, 1000 * expires); | 432 | vcpu->arch.timer_check = 1; |
439 | vcpu->arch.ht_active = 1; | 433 | return 1; |
440 | hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); | 434 | } |
435 | itc_diff = vpd->itm - vcpu_now_itc; | ||
436 | if (itc_diff < 0) | ||
437 | itc_diff = -itc_diff; | ||
438 | |||
439 | expires = div64_u64(itc_diff, cyc_per_usec); | ||
440 | kt = ktime_set(0, 1000 * expires); | ||
441 | |||
442 | down_read(&vcpu->kvm->slots_lock); | ||
443 | vcpu->arch.ht_active = 1; | ||
444 | hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); | ||
441 | 445 | ||
442 | if (irqchip_in_kernel(vcpu->kvm)) { | ||
443 | vcpu->arch.mp_state = KVM_MP_STATE_HALTED; | 446 | vcpu->arch.mp_state = KVM_MP_STATE_HALTED; |
444 | kvm_vcpu_block(vcpu); | 447 | kvm_vcpu_block(vcpu); |
445 | hrtimer_cancel(p_ht); | 448 | hrtimer_cancel(p_ht); |
446 | vcpu->arch.ht_active = 0; | 449 | vcpu->arch.ht_active = 0; |
447 | 450 | ||
451 | if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests)) | ||
452 | if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) | ||
453 | vcpu->arch.mp_state = | ||
454 | KVM_MP_STATE_RUNNABLE; | ||
455 | up_read(&vcpu->kvm->slots_lock); | ||
456 | |||
448 | if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) | 457 | if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) |
449 | return -EINTR; | 458 | return -EINTR; |
450 | return 1; | 459 | return 1; |
@@ -484,10 +493,6 @@ static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu, | |||
484 | static const int kvm_vti_max_exit_handlers = | 493 | static const int kvm_vti_max_exit_handlers = |
485 | sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers); | 494 | sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers); |
486 | 495 | ||
487 | static void kvm_prepare_guest_switch(struct kvm_vcpu *vcpu) | ||
488 | { | ||
489 | } | ||
490 | |||
491 | static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu) | 496 | static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu) |
492 | { | 497 | { |
493 | struct exit_ctl_data *p_exit_data; | 498 | struct exit_ctl_data *p_exit_data; |
@@ -600,8 +605,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
600 | 605 | ||
601 | again: | 606 | again: |
602 | preempt_disable(); | 607 | preempt_disable(); |
603 | |||
604 | kvm_prepare_guest_switch(vcpu); | ||
605 | local_irq_disable(); | 608 | local_irq_disable(); |
606 | 609 | ||
607 | if (signal_pending(current)) { | 610 | if (signal_pending(current)) { |
@@ -614,7 +617,7 @@ again: | |||
614 | 617 | ||
615 | vcpu->guest_mode = 1; | 618 | vcpu->guest_mode = 1; |
616 | kvm_guest_enter(); | 619 | kvm_guest_enter(); |
617 | 620 | down_read(&vcpu->kvm->slots_lock); | |
618 | r = vti_vcpu_run(vcpu, kvm_run); | 621 | r = vti_vcpu_run(vcpu, kvm_run); |
619 | if (r < 0) { | 622 | if (r < 0) { |
620 | local_irq_enable(); | 623 | local_irq_enable(); |
@@ -634,9 +637,8 @@ again: | |||
634 | * But we need to prevent reordering, hence this barrier(): | 637 | * But we need to prevent reordering, hence this barrier(): |
635 | */ | 638 | */ |
636 | barrier(); | 639 | barrier(); |
637 | |||
638 | kvm_guest_exit(); | 640 | kvm_guest_exit(); |
639 | 641 | up_read(&vcpu->kvm->slots_lock); | |
640 | preempt_enable(); | 642 | preempt_enable(); |
641 | 643 | ||
642 | r = kvm_handle_exit(kvm_run, vcpu); | 644 | r = kvm_handle_exit(kvm_run, vcpu); |
@@ -671,15 +673,16 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
671 | 673 | ||
672 | vcpu_load(vcpu); | 674 | vcpu_load(vcpu); |
673 | 675 | ||
676 | if (vcpu->sigset_active) | ||
677 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | ||
678 | |||
674 | if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { | 679 | if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { |
675 | kvm_vcpu_block(vcpu); | 680 | kvm_vcpu_block(vcpu); |
676 | vcpu_put(vcpu); | 681 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); |
677 | return -EAGAIN; | 682 | r = -EAGAIN; |
683 | goto out; | ||
678 | } | 684 | } |
679 | 685 | ||
680 | if (vcpu->sigset_active) | ||
681 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | ||
682 | |||
683 | if (vcpu->mmio_needed) { | 686 | if (vcpu->mmio_needed) { |
684 | memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); | 687 | memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); |
685 | kvm_set_mmio_data(vcpu); | 688 | kvm_set_mmio_data(vcpu); |
@@ -687,7 +690,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
687 | vcpu->mmio_needed = 0; | 690 | vcpu->mmio_needed = 0; |
688 | } | 691 | } |
689 | r = __vcpu_run(vcpu, kvm_run); | 692 | r = __vcpu_run(vcpu, kvm_run); |
690 | 693 | out: | |
691 | if (vcpu->sigset_active) | 694 | if (vcpu->sigset_active) |
692 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | 695 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); |
693 | 696 | ||
@@ -778,6 +781,9 @@ static void kvm_init_vm(struct kvm *kvm) | |||
778 | kvm_build_io_pmt(kvm); | 781 | kvm_build_io_pmt(kvm); |
779 | 782 | ||
780 | INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); | 783 | INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); |
784 | |||
785 | /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ | ||
786 | set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); | ||
781 | } | 787 | } |
782 | 788 | ||
783 | struct kvm *kvm_arch_create_vm(void) | 789 | struct kvm *kvm_arch_create_vm(void) |
@@ -941,9 +947,8 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
941 | goto out; | 947 | goto out; |
942 | if (irqchip_in_kernel(kvm)) { | 948 | if (irqchip_in_kernel(kvm)) { |
943 | mutex_lock(&kvm->lock); | 949 | mutex_lock(&kvm->lock); |
944 | kvm_ioapic_set_irq(kvm->arch.vioapic, | 950 | kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, |
945 | irq_event.irq, | 951 | irq_event.irq, irq_event.level); |
946 | irq_event.level); | ||
947 | mutex_unlock(&kvm->lock); | 952 | mutex_unlock(&kvm->lock); |
948 | r = 0; | 953 | r = 0; |
949 | } | 954 | } |
@@ -1123,15 +1128,16 @@ static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data) | |||
1123 | wait_queue_head_t *q; | 1128 | wait_queue_head_t *q; |
1124 | 1129 | ||
1125 | vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer); | 1130 | vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer); |
1131 | q = &vcpu->wq; | ||
1132 | |||
1126 | if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED) | 1133 | if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED) |
1127 | goto out; | 1134 | goto out; |
1128 | 1135 | ||
1129 | q = &vcpu->wq; | 1136 | if (waitqueue_active(q)) |
1130 | if (waitqueue_active(q)) { | ||
1131 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | ||
1132 | wake_up_interruptible(q); | 1137 | wake_up_interruptible(q); |
1133 | } | 1138 | |
1134 | out: | 1139 | out: |
1140 | vcpu->arch.timer_fired = 1; | ||
1135 | vcpu->arch.timer_check = 1; | 1141 | vcpu->arch.timer_check = 1; |
1136 | return HRTIMER_NORESTART; | 1142 | return HRTIMER_NORESTART; |
1137 | } | 1143 | } |
@@ -1700,12 +1706,14 @@ static void vcpu_kick_intr(void *info) | |||
1700 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu) | 1706 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu) |
1701 | { | 1707 | { |
1702 | int ipi_pcpu = vcpu->cpu; | 1708 | int ipi_pcpu = vcpu->cpu; |
1709 | int cpu = get_cpu(); | ||
1703 | 1710 | ||
1704 | if (waitqueue_active(&vcpu->wq)) | 1711 | if (waitqueue_active(&vcpu->wq)) |
1705 | wake_up_interruptible(&vcpu->wq); | 1712 | wake_up_interruptible(&vcpu->wq); |
1706 | 1713 | ||
1707 | if (vcpu->guest_mode) | 1714 | if (vcpu->guest_mode && cpu != ipi_pcpu) |
1708 | smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0); | 1715 | smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0); |
1716 | put_cpu(); | ||
1709 | } | 1717 | } |
1710 | 1718 | ||
1711 | int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig) | 1719 | int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig) |
@@ -1715,13 +1723,7 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig) | |||
1715 | 1723 | ||
1716 | if (!test_and_set_bit(vec, &vpd->irr[0])) { | 1724 | if (!test_and_set_bit(vec, &vpd->irr[0])) { |
1717 | vcpu->arch.irq_new_pending = 1; | 1725 | vcpu->arch.irq_new_pending = 1; |
1718 | if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) | 1726 | kvm_vcpu_kick(vcpu); |
1719 | kvm_vcpu_kick(vcpu); | ||
1720 | else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) { | ||
1721 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | ||
1722 | if (waitqueue_active(&vcpu->wq)) | ||
1723 | wake_up_interruptible(&vcpu->wq); | ||
1724 | } | ||
1725 | return 1; | 1727 | return 1; |
1726 | } | 1728 | } |
1727 | return 0; | 1729 | return 0; |
@@ -1791,7 +1793,7 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) | |||
1791 | 1793 | ||
1792 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | 1794 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) |
1793 | { | 1795 | { |
1794 | return 0; | 1796 | return vcpu->arch.timer_fired; |
1795 | } | 1797 | } |
1796 | 1798 | ||
1797 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) | 1799 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) |