diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-28 12:50:11 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-28 12:50:11 -0400 |
commit | a1865769254dd4eefbc1e857d17bc2a77d5f8580 (patch) | |
tree | 0e323458d99e2e1222da0157e80fc44e4313f5e2 | |
parent | 0d8762c9ee40cf83d5dbf3a22843bc566912b592 (diff) | |
parent | e45948b071d8be59044ac232d99a2ca83fd93266 (diff) |
Merge branch 'kvm-updates/2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm
* 'kvm-updates/2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm:
KVM: ia64: Makefile fix for forcing to re-generate asm-offsets.h
KVM: Future-proof device assignment ABI
KVM: ia64: Fix halt emulation logic
KVM: Fix guest shared interrupt with in-kernel irqchip
KVM: MMU: sync root on paravirt TLB flush
-rw-r--r-- | arch/ia64/include/asm/kvm_host.h | 6 | ||||
-rw-r--r-- | arch/ia64/kvm/Makefile | 8 | ||||
-rw-r--r-- | arch/ia64/kvm/kvm-ia64.c | 80 | ||||
-rw-r--r-- | arch/ia64/kvm/kvm_fw.c | 9 | ||||
-rw-r--r-- | arch/ia64/kvm/process.c | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 3 | ||||
-rw-r--r-- | arch/x86/kvm/i8254.c | 11 | ||||
-rw-r--r-- | arch/x86/kvm/i8254.h | 1 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 1 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 6 | ||||
-rw-r--r-- | include/linux/kvm.h | 6 | ||||
-rw-r--r-- | include/linux/kvm_host.h | 7 | ||||
-rw-r--r-- | virt/kvm/irq_comm.c | 42 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 12 |
14 files changed, 138 insertions, 56 deletions
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h index 85db124d37f6..c60d324da540 100644 --- a/arch/ia64/include/asm/kvm_host.h +++ b/arch/ia64/include/asm/kvm_host.h | |||
@@ -365,7 +365,8 @@ struct kvm_vcpu_arch { | |||
365 | long itc_offset; | 365 | long itc_offset; |
366 | unsigned long itc_check; | 366 | unsigned long itc_check; |
367 | unsigned long timer_check; | 367 | unsigned long timer_check; |
368 | unsigned long timer_pending; | 368 | unsigned int timer_pending; |
369 | unsigned int timer_fired; | ||
369 | 370 | ||
370 | unsigned long vrr[8]; | 371 | unsigned long vrr[8]; |
371 | unsigned long ibr[8]; | 372 | unsigned long ibr[8]; |
@@ -417,6 +418,9 @@ struct kvm_arch { | |||
417 | struct list_head assigned_dev_head; | 418 | struct list_head assigned_dev_head; |
418 | struct dmar_domain *intel_iommu_domain; | 419 | struct dmar_domain *intel_iommu_domain; |
419 | struct hlist_head irq_ack_notifier_list; | 420 | struct hlist_head irq_ack_notifier_list; |
421 | |||
422 | unsigned long irq_sources_bitmap; | ||
423 | unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; | ||
420 | }; | 424 | }; |
421 | 425 | ||
422 | union cpuid3_t { | 426 | union cpuid3_t { |
diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile index cf37f8f490c0..3ab4d6d50704 100644 --- a/arch/ia64/kvm/Makefile +++ b/arch/ia64/kvm/Makefile | |||
@@ -29,13 +29,18 @@ define cmd_offsets | |||
29 | echo ""; \ | 29 | echo ""; \ |
30 | echo "#endif" ) > $@ | 30 | echo "#endif" ) > $@ |
31 | endef | 31 | endef |
32 | |||
32 | # We use internal rules to avoid the "is up to date" message from make | 33 | # We use internal rules to avoid the "is up to date" message from make |
33 | arch/ia64/kvm/asm-offsets.s: arch/ia64/kvm/asm-offsets.c | 34 | arch/ia64/kvm/asm-offsets.s: arch/ia64/kvm/asm-offsets.c \ |
35 | $(wildcard $(srctree)/arch/ia64/include/asm/*.h)\ | ||
36 | $(wildcard $(srctree)/include/linux/*.h) | ||
34 | $(call if_changed_dep,cc_s_c) | 37 | $(call if_changed_dep,cc_s_c) |
35 | 38 | ||
36 | $(obj)/$(offsets-file): arch/ia64/kvm/asm-offsets.s | 39 | $(obj)/$(offsets-file): arch/ia64/kvm/asm-offsets.s |
37 | $(call cmd,offsets) | 40 | $(call cmd,offsets) |
38 | 41 | ||
42 | FORCE : $(obj)/$(offsets-file) | ||
43 | |||
39 | # | 44 | # |
40 | # Makefile for Kernel-based Virtual Machine module | 45 | # Makefile for Kernel-based Virtual Machine module |
41 | # | 46 | # |
@@ -53,7 +58,6 @@ endif | |||
53 | kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o | 58 | kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o |
54 | obj-$(CONFIG_KVM) += kvm.o | 59 | obj-$(CONFIG_KVM) += kvm.o |
55 | 60 | ||
56 | FORCE : $(obj)/$(offsets-file) | ||
57 | EXTRA_CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127 | 61 | EXTRA_CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127 |
58 | kvm-intel-objs = vmm.o vmm_ivt.o trampoline.o vcpu.o optvfault.o mmio.o \ | 62 | kvm-intel-objs = vmm.o vmm_ivt.o trampoline.o vcpu.o optvfault.o mmio.o \ |
59 | vtlb.o process.o | 63 | vtlb.o process.o |
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index a312c9e9b9ef..3caac477de9e 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c | |||
@@ -385,6 +385,7 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
385 | struct kvm *kvm = vcpu->kvm; | 385 | struct kvm *kvm = vcpu->kvm; |
386 | struct call_data call_data; | 386 | struct call_data call_data; |
387 | int i; | 387 | int i; |
388 | |||
388 | call_data.ptc_g_data = p->u.ptc_g_data; | 389 | call_data.ptc_g_data = p->u.ptc_g_data; |
389 | 390 | ||
390 | for (i = 0; i < KVM_MAX_VCPUS; i++) { | 391 | for (i = 0; i < KVM_MAX_VCPUS; i++) { |
@@ -418,33 +419,41 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu) | |||
418 | ktime_t kt; | 419 | ktime_t kt; |
419 | long itc_diff; | 420 | long itc_diff; |
420 | unsigned long vcpu_now_itc; | 421 | unsigned long vcpu_now_itc; |
421 | |||
422 | unsigned long expires; | 422 | unsigned long expires; |
423 | struct hrtimer *p_ht = &vcpu->arch.hlt_timer; | 423 | struct hrtimer *p_ht = &vcpu->arch.hlt_timer; |
424 | unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec; | 424 | unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec; |
425 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | 425 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); |
426 | 426 | ||
427 | vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset; | 427 | if (irqchip_in_kernel(vcpu->kvm)) { |
428 | 428 | ||
429 | if (time_after(vcpu_now_itc, vpd->itm)) { | 429 | vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset; |
430 | vcpu->arch.timer_check = 1; | ||
431 | return 1; | ||
432 | } | ||
433 | itc_diff = vpd->itm - vcpu_now_itc; | ||
434 | if (itc_diff < 0) | ||
435 | itc_diff = -itc_diff; | ||
436 | 430 | ||
437 | expires = div64_u64(itc_diff, cyc_per_usec); | 431 | if (time_after(vcpu_now_itc, vpd->itm)) { |
438 | kt = ktime_set(0, 1000 * expires); | 432 | vcpu->arch.timer_check = 1; |
439 | vcpu->arch.ht_active = 1; | 433 | return 1; |
440 | hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); | 434 | } |
435 | itc_diff = vpd->itm - vcpu_now_itc; | ||
436 | if (itc_diff < 0) | ||
437 | itc_diff = -itc_diff; | ||
438 | |||
439 | expires = div64_u64(itc_diff, cyc_per_usec); | ||
440 | kt = ktime_set(0, 1000 * expires); | ||
441 | |||
442 | down_read(&vcpu->kvm->slots_lock); | ||
443 | vcpu->arch.ht_active = 1; | ||
444 | hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); | ||
441 | 445 | ||
442 | if (irqchip_in_kernel(vcpu->kvm)) { | ||
443 | vcpu->arch.mp_state = KVM_MP_STATE_HALTED; | 446 | vcpu->arch.mp_state = KVM_MP_STATE_HALTED; |
444 | kvm_vcpu_block(vcpu); | 447 | kvm_vcpu_block(vcpu); |
445 | hrtimer_cancel(p_ht); | 448 | hrtimer_cancel(p_ht); |
446 | vcpu->arch.ht_active = 0; | 449 | vcpu->arch.ht_active = 0; |
447 | 450 | ||
451 | if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests)) | ||
452 | if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) | ||
453 | vcpu->arch.mp_state = | ||
454 | KVM_MP_STATE_RUNNABLE; | ||
455 | up_read(&vcpu->kvm->slots_lock); | ||
456 | |||
448 | if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) | 457 | if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) |
449 | return -EINTR; | 458 | return -EINTR; |
450 | return 1; | 459 | return 1; |
@@ -484,10 +493,6 @@ static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu, | |||
484 | static const int kvm_vti_max_exit_handlers = | 493 | static const int kvm_vti_max_exit_handlers = |
485 | sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers); | 494 | sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers); |
486 | 495 | ||
487 | static void kvm_prepare_guest_switch(struct kvm_vcpu *vcpu) | ||
488 | { | ||
489 | } | ||
490 | |||
491 | static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu) | 496 | static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu) |
492 | { | 497 | { |
493 | struct exit_ctl_data *p_exit_data; | 498 | struct exit_ctl_data *p_exit_data; |
@@ -600,8 +605,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
600 | 605 | ||
601 | again: | 606 | again: |
602 | preempt_disable(); | 607 | preempt_disable(); |
603 | |||
604 | kvm_prepare_guest_switch(vcpu); | ||
605 | local_irq_disable(); | 608 | local_irq_disable(); |
606 | 609 | ||
607 | if (signal_pending(current)) { | 610 | if (signal_pending(current)) { |
@@ -614,7 +617,7 @@ again: | |||
614 | 617 | ||
615 | vcpu->guest_mode = 1; | 618 | vcpu->guest_mode = 1; |
616 | kvm_guest_enter(); | 619 | kvm_guest_enter(); |
617 | 620 | down_read(&vcpu->kvm->slots_lock); | |
618 | r = vti_vcpu_run(vcpu, kvm_run); | 621 | r = vti_vcpu_run(vcpu, kvm_run); |
619 | if (r < 0) { | 622 | if (r < 0) { |
620 | local_irq_enable(); | 623 | local_irq_enable(); |
@@ -634,9 +637,8 @@ again: | |||
634 | * But we need to prevent reordering, hence this barrier(): | 637 | * But we need to prevent reordering, hence this barrier(): |
635 | */ | 638 | */ |
636 | barrier(); | 639 | barrier(); |
637 | |||
638 | kvm_guest_exit(); | 640 | kvm_guest_exit(); |
639 | 641 | up_read(&vcpu->kvm->slots_lock); | |
640 | preempt_enable(); | 642 | preempt_enable(); |
641 | 643 | ||
642 | r = kvm_handle_exit(kvm_run, vcpu); | 644 | r = kvm_handle_exit(kvm_run, vcpu); |
@@ -673,6 +675,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
673 | 675 | ||
674 | if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { | 676 | if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { |
675 | kvm_vcpu_block(vcpu); | 677 | kvm_vcpu_block(vcpu); |
678 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); | ||
676 | vcpu_put(vcpu); | 679 | vcpu_put(vcpu); |
677 | return -EAGAIN; | 680 | return -EAGAIN; |
678 | } | 681 | } |
@@ -778,6 +781,9 @@ static void kvm_init_vm(struct kvm *kvm) | |||
778 | kvm_build_io_pmt(kvm); | 781 | kvm_build_io_pmt(kvm); |
779 | 782 | ||
780 | INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); | 783 | INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); |
784 | |||
785 | /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ | ||
786 | set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); | ||
781 | } | 787 | } |
782 | 788 | ||
783 | struct kvm *kvm_arch_create_vm(void) | 789 | struct kvm *kvm_arch_create_vm(void) |
@@ -941,9 +947,8 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
941 | goto out; | 947 | goto out; |
942 | if (irqchip_in_kernel(kvm)) { | 948 | if (irqchip_in_kernel(kvm)) { |
943 | mutex_lock(&kvm->lock); | 949 | mutex_lock(&kvm->lock); |
944 | kvm_ioapic_set_irq(kvm->arch.vioapic, | 950 | kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, |
945 | irq_event.irq, | 951 | irq_event.irq, irq_event.level); |
946 | irq_event.level); | ||
947 | mutex_unlock(&kvm->lock); | 952 | mutex_unlock(&kvm->lock); |
948 | r = 0; | 953 | r = 0; |
949 | } | 954 | } |
@@ -1123,15 +1128,16 @@ static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data) | |||
1123 | wait_queue_head_t *q; | 1128 | wait_queue_head_t *q; |
1124 | 1129 | ||
1125 | vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer); | 1130 | vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer); |
1131 | q = &vcpu->wq; | ||
1132 | |||
1126 | if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED) | 1133 | if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED) |
1127 | goto out; | 1134 | goto out; |
1128 | 1135 | ||
1129 | q = &vcpu->wq; | 1136 | if (waitqueue_active(q)) |
1130 | if (waitqueue_active(q)) { | ||
1131 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | ||
1132 | wake_up_interruptible(q); | 1137 | wake_up_interruptible(q); |
1133 | } | 1138 | |
1134 | out: | 1139 | out: |
1140 | vcpu->arch.timer_fired = 1; | ||
1135 | vcpu->arch.timer_check = 1; | 1141 | vcpu->arch.timer_check = 1; |
1136 | return HRTIMER_NORESTART; | 1142 | return HRTIMER_NORESTART; |
1137 | } | 1143 | } |
@@ -1700,12 +1706,14 @@ static void vcpu_kick_intr(void *info) | |||
1700 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu) | 1706 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu) |
1701 | { | 1707 | { |
1702 | int ipi_pcpu = vcpu->cpu; | 1708 | int ipi_pcpu = vcpu->cpu; |
1709 | int cpu = get_cpu(); | ||
1703 | 1710 | ||
1704 | if (waitqueue_active(&vcpu->wq)) | 1711 | if (waitqueue_active(&vcpu->wq)) |
1705 | wake_up_interruptible(&vcpu->wq); | 1712 | wake_up_interruptible(&vcpu->wq); |
1706 | 1713 | ||
1707 | if (vcpu->guest_mode) | 1714 | if (vcpu->guest_mode && cpu != ipi_pcpu) |
1708 | smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0); | 1715 | smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0); |
1716 | put_cpu(); | ||
1709 | } | 1717 | } |
1710 | 1718 | ||
1711 | int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig) | 1719 | int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig) |
@@ -1715,13 +1723,7 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig) | |||
1715 | 1723 | ||
1716 | if (!test_and_set_bit(vec, &vpd->irr[0])) { | 1724 | if (!test_and_set_bit(vec, &vpd->irr[0])) { |
1717 | vcpu->arch.irq_new_pending = 1; | 1725 | vcpu->arch.irq_new_pending = 1; |
1718 | if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) | 1726 | kvm_vcpu_kick(vcpu); |
1719 | kvm_vcpu_kick(vcpu); | ||
1720 | else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) { | ||
1721 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | ||
1722 | if (waitqueue_active(&vcpu->wq)) | ||
1723 | wake_up_interruptible(&vcpu->wq); | ||
1724 | } | ||
1725 | return 1; | 1727 | return 1; |
1726 | } | 1728 | } |
1727 | return 0; | 1729 | return 0; |
@@ -1791,7 +1793,7 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) | |||
1791 | 1793 | ||
1792 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | 1794 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) |
1793 | { | 1795 | { |
1794 | return 0; | 1796 | return vcpu->arch.timer_fired; |
1795 | } | 1797 | } |
1796 | 1798 | ||
1797 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) | 1799 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) |
diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c index 0c69d9ec92d4..cb7600bdff9d 100644 --- a/arch/ia64/kvm/kvm_fw.c +++ b/arch/ia64/kvm/kvm_fw.c | |||
@@ -286,6 +286,12 @@ static u64 kvm_get_pal_call_index(struct kvm_vcpu *vcpu) | |||
286 | return index; | 286 | return index; |
287 | } | 287 | } |
288 | 288 | ||
289 | static void prepare_for_halt(struct kvm_vcpu *vcpu) | ||
290 | { | ||
291 | vcpu->arch.timer_pending = 1; | ||
292 | vcpu->arch.timer_fired = 0; | ||
293 | } | ||
294 | |||
289 | int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) | 295 | int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) |
290 | { | 296 | { |
291 | 297 | ||
@@ -304,11 +310,10 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
304 | break; | 310 | break; |
305 | case PAL_HALT_LIGHT: | 311 | case PAL_HALT_LIGHT: |
306 | { | 312 | { |
307 | vcpu->arch.timer_pending = 1; | ||
308 | INIT_PAL_STATUS_SUCCESS(result); | 313 | INIT_PAL_STATUS_SUCCESS(result); |
314 | prepare_for_halt(vcpu); | ||
309 | if (kvm_highest_pending_irq(vcpu) == -1) | 315 | if (kvm_highest_pending_irq(vcpu) == -1) |
310 | ret = kvm_emulate_halt(vcpu); | 316 | ret = kvm_emulate_halt(vcpu); |
311 | |||
312 | } | 317 | } |
313 | break; | 318 | break; |
314 | 319 | ||
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c index 3417783ae164..800817307b7b 100644 --- a/arch/ia64/kvm/process.c +++ b/arch/ia64/kvm/process.c | |||
@@ -713,7 +713,7 @@ void leave_hypervisor_tail(void) | |||
713 | if (!(VCPU(v, itv) & (1 << 16))) { | 713 | if (!(VCPU(v, itv) & (1 << 16))) { |
714 | vcpu_pend_interrupt(v, VCPU(v, itv) | 714 | vcpu_pend_interrupt(v, VCPU(v, itv) |
715 | & 0xff); | 715 | & 0xff); |
716 | VMX(v, itc_check) = 0; | 716 | VMX(v, itc_check) = 0; |
717 | } else { | 717 | } else { |
718 | v->arch.timer_pending = 1; | 718 | v->arch.timer_pending = 1; |
719 | } | 719 | } |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 65679d006337..8346be87cfa1 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -364,6 +364,9 @@ struct kvm_arch{ | |||
364 | 364 | ||
365 | struct page *ept_identity_pagetable; | 365 | struct page *ept_identity_pagetable; |
366 | bool ept_identity_pagetable_done; | 366 | bool ept_identity_pagetable_done; |
367 | |||
368 | unsigned long irq_sources_bitmap; | ||
369 | unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; | ||
367 | }; | 370 | }; |
368 | 371 | ||
369 | struct kvm_vm_stat { | 372 | struct kvm_vm_stat { |
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 11c6725fb798..8772dc946823 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c | |||
@@ -545,6 +545,12 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm) | |||
545 | if (!pit) | 545 | if (!pit) |
546 | return NULL; | 546 | return NULL; |
547 | 547 | ||
548 | mutex_lock(&kvm->lock); | ||
549 | pit->irq_source_id = kvm_request_irq_source_id(kvm); | ||
550 | mutex_unlock(&kvm->lock); | ||
551 | if (pit->irq_source_id < 0) | ||
552 | return NULL; | ||
553 | |||
548 | mutex_init(&pit->pit_state.lock); | 554 | mutex_init(&pit->pit_state.lock); |
549 | mutex_lock(&pit->pit_state.lock); | 555 | mutex_lock(&pit->pit_state.lock); |
550 | spin_lock_init(&pit->pit_state.inject_lock); | 556 | spin_lock_init(&pit->pit_state.inject_lock); |
@@ -587,6 +593,7 @@ void kvm_free_pit(struct kvm *kvm) | |||
587 | mutex_lock(&kvm->arch.vpit->pit_state.lock); | 593 | mutex_lock(&kvm->arch.vpit->pit_state.lock); |
588 | timer = &kvm->arch.vpit->pit_state.pit_timer.timer; | 594 | timer = &kvm->arch.vpit->pit_state.pit_timer.timer; |
589 | hrtimer_cancel(timer); | 595 | hrtimer_cancel(timer); |
596 | kvm_free_irq_source_id(kvm, kvm->arch.vpit->irq_source_id); | ||
590 | mutex_unlock(&kvm->arch.vpit->pit_state.lock); | 597 | mutex_unlock(&kvm->arch.vpit->pit_state.lock); |
591 | kfree(kvm->arch.vpit); | 598 | kfree(kvm->arch.vpit); |
592 | } | 599 | } |
@@ -595,8 +602,8 @@ void kvm_free_pit(struct kvm *kvm) | |||
595 | static void __inject_pit_timer_intr(struct kvm *kvm) | 602 | static void __inject_pit_timer_intr(struct kvm *kvm) |
596 | { | 603 | { |
597 | mutex_lock(&kvm->lock); | 604 | mutex_lock(&kvm->lock); |
598 | kvm_set_irq(kvm, 0, 1); | 605 | kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1); |
599 | kvm_set_irq(kvm, 0, 0); | 606 | kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0); |
600 | mutex_unlock(&kvm->lock); | 607 | mutex_unlock(&kvm->lock); |
601 | } | 608 | } |
602 | 609 | ||
diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h index e436d4983aa1..4178022b97aa 100644 --- a/arch/x86/kvm/i8254.h +++ b/arch/x86/kvm/i8254.h | |||
@@ -44,6 +44,7 @@ struct kvm_pit { | |||
44 | struct kvm_io_device speaker_dev; | 44 | struct kvm_io_device speaker_dev; |
45 | struct kvm *kvm; | 45 | struct kvm *kvm; |
46 | struct kvm_kpit_state pit_state; | 46 | struct kvm_kpit_state pit_state; |
47 | int irq_source_id; | ||
47 | }; | 48 | }; |
48 | 49 | ||
49 | #define KVM_PIT_BASE_ADDRESS 0x40 | 50 | #define KVM_PIT_BASE_ADDRESS 0x40 |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 99c239c5c0ac..2a5e64881d9b 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2634,6 +2634,7 @@ static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu, | |||
2634 | static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu) | 2634 | static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu) |
2635 | { | 2635 | { |
2636 | kvm_x86_ops->tlb_flush(vcpu); | 2636 | kvm_x86_ops->tlb_flush(vcpu); |
2637 | set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests); | ||
2637 | return 1; | 2638 | return 1; |
2638 | } | 2639 | } |
2639 | 2640 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4f0677d1eae8..f1f8ff2f1fa2 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1742,7 +1742,8 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
1742 | goto out; | 1742 | goto out; |
1743 | if (irqchip_in_kernel(kvm)) { | 1743 | if (irqchip_in_kernel(kvm)) { |
1744 | mutex_lock(&kvm->lock); | 1744 | mutex_lock(&kvm->lock); |
1745 | kvm_set_irq(kvm, irq_event.irq, irq_event.level); | 1745 | kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, |
1746 | irq_event.irq, irq_event.level); | ||
1746 | mutex_unlock(&kvm->lock); | 1747 | mutex_unlock(&kvm->lock); |
1747 | r = 0; | 1748 | r = 0; |
1748 | } | 1749 | } |
@@ -4013,6 +4014,9 @@ struct kvm *kvm_arch_create_vm(void) | |||
4013 | INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); | 4014 | INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); |
4014 | INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); | 4015 | INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); |
4015 | 4016 | ||
4017 | /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ | ||
4018 | set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); | ||
4019 | |||
4016 | return kvm; | 4020 | return kvm; |
4017 | } | 4021 | } |
4018 | 4022 | ||
diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 797fcd781242..f18b86fa8655 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h | |||
@@ -489,6 +489,9 @@ struct kvm_assigned_pci_dev { | |||
489 | __u32 busnr; | 489 | __u32 busnr; |
490 | __u32 devfn; | 490 | __u32 devfn; |
491 | __u32 flags; | 491 | __u32 flags; |
492 | union { | ||
493 | __u32 reserved[12]; | ||
494 | }; | ||
492 | }; | 495 | }; |
493 | 496 | ||
494 | struct kvm_assigned_irq { | 497 | struct kvm_assigned_irq { |
@@ -496,6 +499,9 @@ struct kvm_assigned_irq { | |||
496 | __u32 host_irq; | 499 | __u32 host_irq; |
497 | __u32 guest_irq; | 500 | __u32 guest_irq; |
498 | __u32 flags; | 501 | __u32 flags; |
502 | union { | ||
503 | __u32 reserved[12]; | ||
504 | }; | ||
499 | }; | 505 | }; |
500 | 506 | ||
501 | #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) | 507 | #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 3833c48fae3a..bb92be2153bc 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -37,6 +37,8 @@ | |||
37 | #define KVM_REQ_UNHALT 6 | 37 | #define KVM_REQ_UNHALT 6 |
38 | #define KVM_REQ_MMU_SYNC 7 | 38 | #define KVM_REQ_MMU_SYNC 7 |
39 | 39 | ||
40 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 | ||
41 | |||
40 | struct kvm_vcpu; | 42 | struct kvm_vcpu; |
41 | extern struct kmem_cache *kvm_vcpu_cache; | 43 | extern struct kmem_cache *kvm_vcpu_cache; |
42 | 44 | ||
@@ -306,15 +308,18 @@ struct kvm_assigned_dev_kernel { | |||
306 | int host_irq; | 308 | int host_irq; |
307 | int guest_irq; | 309 | int guest_irq; |
308 | int irq_requested; | 310 | int irq_requested; |
311 | int irq_source_id; | ||
309 | struct pci_dev *dev; | 312 | struct pci_dev *dev; |
310 | struct kvm *kvm; | 313 | struct kvm *kvm; |
311 | }; | 314 | }; |
312 | void kvm_set_irq(struct kvm *kvm, int irq, int level); | 315 | void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level); |
313 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi); | 316 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi); |
314 | void kvm_register_irq_ack_notifier(struct kvm *kvm, | 317 | void kvm_register_irq_ack_notifier(struct kvm *kvm, |
315 | struct kvm_irq_ack_notifier *kian); | 318 | struct kvm_irq_ack_notifier *kian); |
316 | void kvm_unregister_irq_ack_notifier(struct kvm *kvm, | 319 | void kvm_unregister_irq_ack_notifier(struct kvm *kvm, |
317 | struct kvm_irq_ack_notifier *kian); | 320 | struct kvm_irq_ack_notifier *kian); |
321 | int kvm_request_irq_source_id(struct kvm *kvm); | ||
322 | void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); | ||
318 | 323 | ||
319 | #ifdef CONFIG_DMAR | 324 | #ifdef CONFIG_DMAR |
320 | int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn, | 325 | int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn, |
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index d0169f5e6047..55ad76ee2d09 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c | |||
@@ -25,15 +25,23 @@ | |||
25 | #include "ioapic.h" | 25 | #include "ioapic.h" |
26 | 26 | ||
27 | /* This should be called with the kvm->lock mutex held */ | 27 | /* This should be called with the kvm->lock mutex held */ |
28 | void kvm_set_irq(struct kvm *kvm, int irq, int level) | 28 | void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level) |
29 | { | 29 | { |
30 | unsigned long *irq_state = (unsigned long *)&kvm->arch.irq_states[irq]; | ||
31 | |||
32 | /* Logical OR for level trig interrupt */ | ||
33 | if (level) | ||
34 | set_bit(irq_source_id, irq_state); | ||
35 | else | ||
36 | clear_bit(irq_source_id, irq_state); | ||
37 | |||
30 | /* Not possible to detect if the guest uses the PIC or the | 38 | /* Not possible to detect if the guest uses the PIC or the |
31 | * IOAPIC. So set the bit in both. The guest will ignore | 39 | * IOAPIC. So set the bit in both. The guest will ignore |
32 | * writes to the unused one. | 40 | * writes to the unused one. |
33 | */ | 41 | */ |
34 | kvm_ioapic_set_irq(kvm->arch.vioapic, irq, level); | 42 | kvm_ioapic_set_irq(kvm->arch.vioapic, irq, !!(*irq_state)); |
35 | #ifdef CONFIG_X86 | 43 | #ifdef CONFIG_X86 |
36 | kvm_pic_set_irq(pic_irqchip(kvm), irq, level); | 44 | kvm_pic_set_irq(pic_irqchip(kvm), irq, !!(*irq_state)); |
37 | #endif | 45 | #endif |
38 | } | 46 | } |
39 | 47 | ||
@@ -58,3 +66,31 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm, | |||
58 | { | 66 | { |
59 | hlist_del(&kian->link); | 67 | hlist_del(&kian->link); |
60 | } | 68 | } |
69 | |||
70 | /* The caller must hold kvm->lock mutex */ | ||
71 | int kvm_request_irq_source_id(struct kvm *kvm) | ||
72 | { | ||
73 | unsigned long *bitmap = &kvm->arch.irq_sources_bitmap; | ||
74 | int irq_source_id = find_first_zero_bit(bitmap, | ||
75 | sizeof(kvm->arch.irq_sources_bitmap)); | ||
76 | if (irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) { | ||
77 | printk(KERN_WARNING "kvm: exhaust allocatable IRQ sources!\n"); | ||
78 | irq_source_id = -EFAULT; | ||
79 | } else | ||
80 | set_bit(irq_source_id, bitmap); | ||
81 | return irq_source_id; | ||
82 | } | ||
83 | |||
84 | void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id) | ||
85 | { | ||
86 | int i; | ||
87 | |||
88 | if (irq_source_id <= 0 || | ||
89 | irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) { | ||
90 | printk(KERN_ERR "kvm: IRQ source ID out of range!\n"); | ||
91 | return; | ||
92 | } | ||
93 | for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++) | ||
94 | clear_bit(irq_source_id, &kvm->arch.irq_states[i]); | ||
95 | clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap); | ||
96 | } | ||
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index cf0ab8ed3845..a87f45edfae8 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -105,14 +105,12 @@ static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work) | |||
105 | */ | 105 | */ |
106 | mutex_lock(&assigned_dev->kvm->lock); | 106 | mutex_lock(&assigned_dev->kvm->lock); |
107 | kvm_set_irq(assigned_dev->kvm, | 107 | kvm_set_irq(assigned_dev->kvm, |
108 | assigned_dev->irq_source_id, | ||
108 | assigned_dev->guest_irq, 1); | 109 | assigned_dev->guest_irq, 1); |
109 | mutex_unlock(&assigned_dev->kvm->lock); | 110 | mutex_unlock(&assigned_dev->kvm->lock); |
110 | kvm_put_kvm(assigned_dev->kvm); | 111 | kvm_put_kvm(assigned_dev->kvm); |
111 | } | 112 | } |
112 | 113 | ||
113 | /* FIXME: Implement the OR logic needed to make shared interrupts on | ||
114 | * this line behave properly | ||
115 | */ | ||
116 | static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id) | 114 | static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id) |
117 | { | 115 | { |
118 | struct kvm_assigned_dev_kernel *assigned_dev = | 116 | struct kvm_assigned_dev_kernel *assigned_dev = |
@@ -134,7 +132,7 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian) | |||
134 | 132 | ||
135 | dev = container_of(kian, struct kvm_assigned_dev_kernel, | 133 | dev = container_of(kian, struct kvm_assigned_dev_kernel, |
136 | ack_notifier); | 134 | ack_notifier); |
137 | kvm_set_irq(dev->kvm, dev->guest_irq, 0); | 135 | kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0); |
138 | enable_irq(dev->host_irq); | 136 | enable_irq(dev->host_irq); |
139 | } | 137 | } |
140 | 138 | ||
@@ -146,6 +144,7 @@ static void kvm_free_assigned_device(struct kvm *kvm, | |||
146 | free_irq(assigned_dev->host_irq, (void *)assigned_dev); | 144 | free_irq(assigned_dev->host_irq, (void *)assigned_dev); |
147 | 145 | ||
148 | kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier); | 146 | kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier); |
147 | kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id); | ||
149 | 148 | ||
150 | if (cancel_work_sync(&assigned_dev->interrupt_work)) | 149 | if (cancel_work_sync(&assigned_dev->interrupt_work)) |
151 | /* We had pending work. That means we will have to take | 150 | /* We had pending work. That means we will have to take |
@@ -215,6 +214,11 @@ static int kvm_vm_ioctl_assign_irq(struct kvm *kvm, | |||
215 | match->ack_notifier.gsi = assigned_irq->guest_irq; | 214 | match->ack_notifier.gsi = assigned_irq->guest_irq; |
216 | match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq; | 215 | match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq; |
217 | kvm_register_irq_ack_notifier(kvm, &match->ack_notifier); | 216 | kvm_register_irq_ack_notifier(kvm, &match->ack_notifier); |
217 | r = kvm_request_irq_source_id(kvm); | ||
218 | if (r < 0) | ||
219 | goto out_release; | ||
220 | else | ||
221 | match->irq_source_id = r; | ||
218 | 222 | ||
219 | /* Even though this is PCI, we don't want to use shared | 223 | /* Even though this is PCI, we don't want to use shared |
220 | * interrupts. Sharing host devices with guest-assigned devices | 224 | * interrupts. Sharing host devices with guest-assigned devices |