aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2009-01-04 10:10:50 -0500
committerAvi Kivity <avi@redhat.com>2009-03-24 05:03:03 -0400
commit75858a84a6207f5e60196f6bbd18fde4250e5759 (patch)
treead8ab5d60a616270c61d88a9af08713cefbc9d35
parent5d9b8e30f543a9f21a968a4cda71e8f6d1c66a61 (diff)
KVM: Interrupt mask notifiers for ioapic
Allow clients to request notifications when the guest masks or unmasks a particular irq line. This complements irq ack notifications, as the guest will not ack an irq line that is masked. Currently implemented for the ioapic only. Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--include/linux/kvm_host.h17
-rw-r--r--virt/kvm/ioapic.c6
-rw-r--r--virt/kvm/irq_comm.c24
-rw-r--r--virt/kvm/kvm_main.c3
4 files changed, 50 insertions, 0 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 3cf0ede3fd73..99963f36a6db 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -127,6 +127,10 @@ struct kvm {
127 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; 127 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
128#endif 128#endif
129 129
130#ifdef CONFIG_HAVE_KVM_IRQCHIP
131 struct hlist_head mask_notifier_list;
132#endif
133
130#ifdef KVM_ARCH_WANT_MMU_NOTIFIER 134#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
131 struct mmu_notifier mmu_notifier; 135 struct mmu_notifier mmu_notifier;
132 unsigned long mmu_notifier_seq; 136 unsigned long mmu_notifier_seq;
@@ -320,6 +324,19 @@ struct kvm_assigned_dev_kernel {
320 struct pci_dev *dev; 324 struct pci_dev *dev;
321 struct kvm *kvm; 325 struct kvm *kvm;
322}; 326};
327
328struct kvm_irq_mask_notifier {
329 void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
330 int irq;
331 struct hlist_node link;
332};
333
334void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
335 struct kvm_irq_mask_notifier *kimn);
336void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
337 struct kvm_irq_mask_notifier *kimn);
338void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask);
339
323void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level); 340void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level);
324void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi); 341void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi);
325void kvm_register_irq_ack_notifier(struct kvm *kvm, 342void kvm_register_irq_ack_notifier(struct kvm *kvm,
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index 23b81cf242af..e85a2bcd2db1 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -101,6 +101,7 @@ static void ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx)
101static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) 101static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
102{ 102{
103 unsigned index; 103 unsigned index;
104 bool mask_before, mask_after;
104 105
105 switch (ioapic->ioregsel) { 106 switch (ioapic->ioregsel) {
106 case IOAPIC_REG_VERSION: 107 case IOAPIC_REG_VERSION:
@@ -120,6 +121,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
120 ioapic_debug("change redir index %x val %x\n", index, val); 121 ioapic_debug("change redir index %x val %x\n", index, val);
121 if (index >= IOAPIC_NUM_PINS) 122 if (index >= IOAPIC_NUM_PINS)
122 return; 123 return;
124 mask_before = ioapic->redirtbl[index].fields.mask;
123 if (ioapic->ioregsel & 1) { 125 if (ioapic->ioregsel & 1) {
124 ioapic->redirtbl[index].bits &= 0xffffffff; 126 ioapic->redirtbl[index].bits &= 0xffffffff;
125 ioapic->redirtbl[index].bits |= (u64) val << 32; 127 ioapic->redirtbl[index].bits |= (u64) val << 32;
@@ -128,6 +130,9 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
128 ioapic->redirtbl[index].bits |= (u32) val; 130 ioapic->redirtbl[index].bits |= (u32) val;
129 ioapic->redirtbl[index].fields.remote_irr = 0; 131 ioapic->redirtbl[index].fields.remote_irr = 0;
130 } 132 }
133 mask_after = ioapic->redirtbl[index].fields.mask;
134 if (mask_before != mask_after)
135 kvm_fire_mask_notifiers(ioapic->kvm, index, mask_after);
131 if (ioapic->irr & (1 << index)) 136 if (ioapic->irr & (1 << index))
132 ioapic_service(ioapic, index); 137 ioapic_service(ioapic, index);
133 break; 138 break;
@@ -426,3 +431,4 @@ int kvm_ioapic_init(struct kvm *kvm)
426 kvm_io_bus_register_dev(&kvm->mmio_bus, &ioapic->dev); 431 kvm_io_bus_register_dev(&kvm->mmio_bus, &ioapic->dev);
427 return 0; 432 return 0;
428} 433}
434
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index aa5d1e5c497e..5162a411e4d2 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -99,3 +99,27 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
99 clear_bit(irq_source_id, &kvm->arch.irq_states[i]); 99 clear_bit(irq_source_id, &kvm->arch.irq_states[i]);
100 clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap); 100 clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
101} 101}
102
103void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
104 struct kvm_irq_mask_notifier *kimn)
105{
106 kimn->irq = irq;
107 hlist_add_head(&kimn->link, &kvm->mask_notifier_list);
108}
109
110void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
111 struct kvm_irq_mask_notifier *kimn)
112{
113 hlist_del(&kimn->link);
114}
115
116void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask)
117{
118 struct kvm_irq_mask_notifier *kimn;
119 struct hlist_node *n;
120
121 hlist_for_each_entry(kimn, n, &kvm->mask_notifier_list, link)
122 if (kimn->irq == irq)
123 kimn->func(kimn, mask);
124}
125
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 04401e17c758..786a3ae373b0 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -842,6 +842,9 @@ static struct kvm *kvm_create_vm(void)
842 842
843 if (IS_ERR(kvm)) 843 if (IS_ERR(kvm))
844 goto out; 844 goto out;
845#ifdef CONFIG_HAVE_KVM_IRQCHIP
846 INIT_HLIST_HEAD(&kvm->mask_notifier_list);
847#endif
845 848
846#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 849#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
847 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 850 page = alloc_page(GFP_KERNEL | __GFP_ZERO);