aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/ioapic.c57
-rw-r--r--virt/kvm/ioapic.h6
-rw-r--r--virt/kvm/irq_comm.c71
3 files changed, 60 insertions, 74 deletions
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index 43969bbf127f..1eddae94bab3 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -142,58 +142,33 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
142 } 142 }
143} 143}
144 144
145int ioapic_deliver_entry(struct kvm *kvm, union kvm_ioapic_redirect_entry *e)
146{
147 DECLARE_BITMAP(deliver_bitmask, KVM_MAX_VCPUS);
148 int i, r = -1;
149
150 kvm_get_intr_delivery_bitmask(kvm, NULL, e->fields.dest_id,
151 e->fields.dest_mode,
152 e->fields.delivery_mode == IOAPIC_LOWEST_PRIORITY,
153 0, deliver_bitmask);
154
155 if (find_first_bit(deliver_bitmask, KVM_MAX_VCPUS) >= KVM_MAX_VCPUS) {
156 ioapic_debug("no target on destination\n");
157 return r;
158 }
159
160 while ((i = find_first_bit(deliver_bitmask, KVM_MAX_VCPUS))
161 < KVM_MAX_VCPUS) {
162 struct kvm_vcpu *vcpu = kvm->vcpus[i];
163 __clear_bit(i, deliver_bitmask);
164 if (vcpu) {
165 if (r < 0)
166 r = 0;
167 r += kvm_apic_set_irq(vcpu, e->fields.vector,
168 e->fields.delivery_mode,
169 e->fields.trig_mode);
170 } else
171 ioapic_debug("null destination vcpu: "
172 "mask=%x vector=%x delivery_mode=%x\n",
173 e->fields.deliver_bitmask,
174 e->fields.vector, e->fields.delivery_mode);
175 }
176 return r;
177}
178
179static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) 145static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
180{ 146{
181 union kvm_ioapic_redirect_entry entry = ioapic->redirtbl[irq]; 147 union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
148 struct kvm_lapic_irq irqe;
182 149
183 ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x " 150 ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
184 "vector=%x trig_mode=%x\n", 151 "vector=%x trig_mode=%x\n",
185 entry.fields.dest, entry.fields.dest_mode, 152 entry->fields.dest, entry->fields.dest_mode,
186 entry.fields.delivery_mode, entry.fields.vector, 153 entry->fields.delivery_mode, entry->fields.vector,
187 entry.fields.trig_mode); 154 entry->fields.trig_mode);
155
156 irqe.dest_id = entry->fields.dest_id;
157 irqe.vector = entry->fields.vector;
158 irqe.dest_mode = entry->fields.dest_mode;
159 irqe.trig_mode = entry->fields.trig_mode;
160 irqe.delivery_mode = entry->fields.delivery_mode << 8;
161 irqe.level = 1;
162 irqe.shorthand = 0;
188 163
189#ifdef CONFIG_X86 164#ifdef CONFIG_X86
190 /* Always delivery PIT interrupt to vcpu 0 */ 165 /* Always delivery PIT interrupt to vcpu 0 */
191 if (irq == 0) { 166 if (irq == 0) {
192 entry.fields.dest_mode = 0; /* Physical mode. */ 167 irqe.dest_mode = 0; /* Physical mode. */
193 entry.fields.dest_id = ioapic->kvm->vcpus[0]->vcpu_id; 168 irqe.dest_id = ioapic->kvm->vcpus[0]->vcpu_id;
194 } 169 }
195#endif 170#endif
196 return ioapic_deliver_entry(ioapic->kvm, &entry); 171 return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe);
197} 172}
198 173
199int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) 174int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
index e7bc92d895ff..7080b713c160 100644
--- a/virt/kvm/ioapic.h
+++ b/virt/kvm/ioapic.h
@@ -71,8 +71,6 @@ void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode);
71int kvm_ioapic_init(struct kvm *kvm); 71int kvm_ioapic_init(struct kvm *kvm);
72int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level); 72int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
73void kvm_ioapic_reset(struct kvm_ioapic *ioapic); 73void kvm_ioapic_reset(struct kvm_ioapic *ioapic);
74void kvm_get_intr_delivery_bitmask(struct kvm *kvm, struct kvm_lapic *src, 74int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
75 int dest_id, int dest_mode, bool low_prio, int short_hand, 75 struct kvm_lapic_irq *irq);
76 unsigned long *deliver_bitmask);
77int ioapic_deliver_entry(struct kvm *kvm, union kvm_ioapic_redirect_entry *e);
78#endif 76#endif
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index f5e059b67cd4..4fa1f604b425 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -22,6 +22,9 @@
22#include <linux/kvm_host.h> 22#include <linux/kvm_host.h>
23 23
24#include <asm/msidef.h> 24#include <asm/msidef.h>
25#ifdef CONFIG_IA64
26#include <asm/iosapic.h>
27#endif
25 28
26#include "irq.h" 29#include "irq.h"
27 30
@@ -43,61 +46,71 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
43 return kvm_ioapic_set_irq(kvm->arch.vioapic, e->irqchip.pin, level); 46 return kvm_ioapic_set_irq(kvm->arch.vioapic, e->irqchip.pin, level);
44} 47}
45 48
46void kvm_get_intr_delivery_bitmask(struct kvm *kvm, struct kvm_lapic *src, 49inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq)
47 int dest_id, int dest_mode, bool low_prio, int short_hand,
48 unsigned long *deliver_bitmask)
49{ 50{
50 int i, lowest = -1; 51#ifdef CONFIG_IA64
51 struct kvm_vcpu *vcpu; 52 return irq->delivery_mode ==
53 (IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT);
54#else
55 return irq->delivery_mode == APIC_DM_LOWEST;
56#endif
57}
52 58
53 if (dest_mode == 0 && dest_id == 0xff && low_prio) 59int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
60 struct kvm_lapic_irq *irq)
61{
62 int i, r = -1;
63 struct kvm_vcpu *vcpu, *lowest = NULL;
64
65 if (irq->dest_mode == 0 && irq->dest_id == 0xff &&
66 kvm_is_dm_lowest_prio(irq))
54 printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n"); 67 printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n");
55 68
56 bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
57 for (i = 0; i < KVM_MAX_VCPUS; i++) { 69 for (i = 0; i < KVM_MAX_VCPUS; i++) {
58 vcpu = kvm->vcpus[i]; 70 vcpu = kvm->vcpus[i];
59 71
60 if (!vcpu || !kvm_apic_present(vcpu)) 72 if (!vcpu || !kvm_apic_present(vcpu))
61 continue; 73 continue;
62 74
63 if (!kvm_apic_match_dest(vcpu, src, short_hand, dest_id, 75 if (!kvm_apic_match_dest(vcpu, src, irq->shorthand,
64 dest_mode)) 76 irq->dest_id, irq->dest_mode))
65 continue; 77 continue;
66 78
67 if (!low_prio) { 79 if (!kvm_is_dm_lowest_prio(irq)) {
68 __set_bit(i, deliver_bitmask); 80 if (r < 0)
81 r = 0;
82 r += kvm_apic_set_irq(vcpu, irq);
69 } else { 83 } else {
70 if (lowest < 0) 84 if (!lowest)
71 lowest = i; 85 lowest = vcpu;
72 if (kvm_apic_compare_prio(vcpu, kvm->vcpus[lowest]) < 0) 86 else if (kvm_apic_compare_prio(vcpu, lowest) < 0)
73 lowest = i; 87 lowest = vcpu;
74 } 88 }
75 } 89 }
76 90
77 if (lowest != -1) 91 if (lowest)
78 __set_bit(lowest, deliver_bitmask); 92 r = kvm_apic_set_irq(lowest, irq);
93
94 return r;
79} 95}
80 96
81static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, 97static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
82 struct kvm *kvm, int level) 98 struct kvm *kvm, int level)
83{ 99{
84 union kvm_ioapic_redirect_entry entry; 100 struct kvm_lapic_irq irq;
85 101
86 entry.bits = 0; 102 irq.dest_id = (e->msi.address_lo &
87 entry.fields.dest_id = (e->msi.address_lo &
88 MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; 103 MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
89 entry.fields.vector = (e->msi.data & 104 irq.vector = (e->msi.data &
90 MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT; 105 MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
91 entry.fields.dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT, 106 irq.dest_mode = (1 << MSI_ADDR_DEST_MODE_SHIFT) & e->msi.address_lo;
92 (unsigned long *)&e->msi.address_lo); 107 irq.trig_mode = (1 << MSI_DATA_TRIGGER_SHIFT) & e->msi.data;
93 entry.fields.trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT, 108 irq.delivery_mode = e->msi.data & 0x700;
94 (unsigned long *)&e->msi.data); 109 irq.level = 1;
95 entry.fields.delivery_mode = test_bit( 110 irq.shorthand = 0;
96 MSI_DATA_DELIVERY_MODE_SHIFT,
97 (unsigned long *)&e->msi.data);
98 111
99 /* TODO Deal with RH bit of MSI message address */ 112 /* TODO Deal with RH bit of MSI message address */
100 return ioapic_deliver_entry(kvm, &entry); 113 return kvm_irq_delivery_to_apic(kvm, NULL, &irq);
101} 114}
102 115
103/* This should be called with the kvm->lock mutex held 116/* This should be called with the kvm->lock mutex held