aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-03-28 18:24:12 -0400
committerIngo Molnar <mingo@elte.hu>2009-03-28 18:24:12 -0400
commitd00ab2fdd4dc4361c97777bc1fef7234329d4659 (patch)
treeb8d8f98c1af633bbc1570b4270b39727737beebf /virt/kvm
parent88f502fedba82eff252b6420e8b8328e4ae25c67 (diff)
parent7c730ccdc1188b97f5c8cb690906242c7ed75c22 (diff)
Merge branch 'linus' into core/futexes
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/ioapic.c39
-rw-r--r--virt/kvm/ioapic.h2
-rw-r--r--virt/kvm/irq_comm.c297
-rw-r--r--virt/kvm/kvm_main.c141
4 files changed, 380 insertions, 99 deletions
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index 23b81cf242af..c3b99def9cbc 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -83,24 +83,28 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
83 return result; 83 return result;
84} 84}
85 85
86static void ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx) 86static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx)
87{ 87{
88 union ioapic_redir_entry *pent; 88 union ioapic_redir_entry *pent;
89 int injected = -1;
89 90
90 pent = &ioapic->redirtbl[idx]; 91 pent = &ioapic->redirtbl[idx];
91 92
92 if (!pent->fields.mask) { 93 if (!pent->fields.mask) {
93 int injected = ioapic_deliver(ioapic, idx); 94 injected = ioapic_deliver(ioapic, idx);
94 if (injected && pent->fields.trig_mode == IOAPIC_LEVEL_TRIG) 95 if (injected && pent->fields.trig_mode == IOAPIC_LEVEL_TRIG)
95 pent->fields.remote_irr = 1; 96 pent->fields.remote_irr = 1;
96 } 97 }
97 if (!pent->fields.trig_mode) 98 if (!pent->fields.trig_mode)
98 ioapic->irr &= ~(1 << idx); 99 ioapic->irr &= ~(1 << idx);
100
101 return injected;
99} 102}
100 103
101static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) 104static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
102{ 105{
103 unsigned index; 106 unsigned index;
107 bool mask_before, mask_after;
104 108
105 switch (ioapic->ioregsel) { 109 switch (ioapic->ioregsel) {
106 case IOAPIC_REG_VERSION: 110 case IOAPIC_REG_VERSION:
@@ -120,6 +124,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
120 ioapic_debug("change redir index %x val %x\n", index, val); 124 ioapic_debug("change redir index %x val %x\n", index, val);
121 if (index >= IOAPIC_NUM_PINS) 125 if (index >= IOAPIC_NUM_PINS)
122 return; 126 return;
127 mask_before = ioapic->redirtbl[index].fields.mask;
123 if (ioapic->ioregsel & 1) { 128 if (ioapic->ioregsel & 1) {
124 ioapic->redirtbl[index].bits &= 0xffffffff; 129 ioapic->redirtbl[index].bits &= 0xffffffff;
125 ioapic->redirtbl[index].bits |= (u64) val << 32; 130 ioapic->redirtbl[index].bits |= (u64) val << 32;
@@ -128,6 +133,9 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
128 ioapic->redirtbl[index].bits |= (u32) val; 133 ioapic->redirtbl[index].bits |= (u32) val;
129 ioapic->redirtbl[index].fields.remote_irr = 0; 134 ioapic->redirtbl[index].fields.remote_irr = 0;
130 } 135 }
136 mask_after = ioapic->redirtbl[index].fields.mask;
137 if (mask_before != mask_after)
138 kvm_fire_mask_notifiers(ioapic->kvm, index, mask_after);
131 if (ioapic->irr & (1 << index)) 139 if (ioapic->irr & (1 << index))
132 ioapic_service(ioapic, index); 140 ioapic_service(ioapic, index);
133 break; 141 break;
@@ -202,7 +210,7 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
202 u8 trig_mode = ioapic->redirtbl[irq].fields.trig_mode; 210 u8 trig_mode = ioapic->redirtbl[irq].fields.trig_mode;
203 u32 deliver_bitmask; 211 u32 deliver_bitmask;
204 struct kvm_vcpu *vcpu; 212 struct kvm_vcpu *vcpu;
205 int vcpu_id, r = 0; 213 int vcpu_id, r = -1;
206 214
207 ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x " 215 ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
208 "vector=%x trig_mode=%x\n", 216 "vector=%x trig_mode=%x\n",
@@ -242,7 +250,9 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
242 deliver_bitmask &= ~(1 << vcpu_id); 250 deliver_bitmask &= ~(1 << vcpu_id);
243 vcpu = ioapic->kvm->vcpus[vcpu_id]; 251 vcpu = ioapic->kvm->vcpus[vcpu_id];
244 if (vcpu) { 252 if (vcpu) {
245 r = ioapic_inj_irq(ioapic, vcpu, vector, 253 if (r < 0)
254 r = 0;
255 r += ioapic_inj_irq(ioapic, vcpu, vector,
246 trig_mode, delivery_mode); 256 trig_mode, delivery_mode);
247 } 257 }
248 } 258 }
@@ -253,8 +263,10 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
253 continue; 263 continue;
254 deliver_bitmask &= ~(1 << vcpu_id); 264 deliver_bitmask &= ~(1 << vcpu_id);
255 vcpu = ioapic->kvm->vcpus[vcpu_id]; 265 vcpu = ioapic->kvm->vcpus[vcpu_id];
256 if (vcpu) 266 if (vcpu) {
257 ioapic_inj_nmi(vcpu); 267 ioapic_inj_nmi(vcpu);
268 r = 1;
269 }
258 else 270 else
259 ioapic_debug("NMI to vcpu %d failed\n", 271 ioapic_debug("NMI to vcpu %d failed\n",
260 vcpu->vcpu_id); 272 vcpu->vcpu_id);
@@ -268,11 +280,12 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
268 return r; 280 return r;
269} 281}
270 282
271void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) 283int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
272{ 284{
273 u32 old_irr = ioapic->irr; 285 u32 old_irr = ioapic->irr;
274 u32 mask = 1 << irq; 286 u32 mask = 1 << irq;
275 union ioapic_redir_entry entry; 287 union ioapic_redir_entry entry;
288 int ret = 1;
276 289
277 if (irq >= 0 && irq < IOAPIC_NUM_PINS) { 290 if (irq >= 0 && irq < IOAPIC_NUM_PINS) {
278 entry = ioapic->redirtbl[irq]; 291 entry = ioapic->redirtbl[irq];
@@ -283,25 +296,26 @@ void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
283 ioapic->irr |= mask; 296 ioapic->irr |= mask;
284 if ((!entry.fields.trig_mode && old_irr != ioapic->irr) 297 if ((!entry.fields.trig_mode && old_irr != ioapic->irr)
285 || !entry.fields.remote_irr) 298 || !entry.fields.remote_irr)
286 ioapic_service(ioapic, irq); 299 ret = ioapic_service(ioapic, irq);
287 } 300 }
288 } 301 }
302 return ret;
289} 303}
290 304
291static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int gsi, 305static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int pin,
292 int trigger_mode) 306 int trigger_mode)
293{ 307{
294 union ioapic_redir_entry *ent; 308 union ioapic_redir_entry *ent;
295 309
296 ent = &ioapic->redirtbl[gsi]; 310 ent = &ioapic->redirtbl[pin];
297 311
298 kvm_notify_acked_irq(ioapic->kvm, gsi); 312 kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, pin);
299 313
300 if (trigger_mode == IOAPIC_LEVEL_TRIG) { 314 if (trigger_mode == IOAPIC_LEVEL_TRIG) {
301 ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); 315 ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
302 ent->fields.remote_irr = 0; 316 ent->fields.remote_irr = 0;
303 if (!ent->fields.mask && (ioapic->irr & (1 << gsi))) 317 if (!ent->fields.mask && (ioapic->irr & (1 << pin)))
304 ioapic_service(ioapic, gsi); 318 ioapic_service(ioapic, pin);
305 } 319 }
306} 320}
307 321
@@ -426,3 +440,4 @@ int kvm_ioapic_init(struct kvm *kvm)
426 kvm_io_bus_register_dev(&kvm->mmio_bus, &ioapic->dev); 440 kvm_io_bus_register_dev(&kvm->mmio_bus, &ioapic->dev);
427 return 0; 441 return 0;
428} 442}
443
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
index 49c9581d2586..a34bd5e6436b 100644
--- a/virt/kvm/ioapic.h
+++ b/virt/kvm/ioapic.h
@@ -83,7 +83,7 @@ struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
83 unsigned long bitmap); 83 unsigned long bitmap);
84void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode); 84void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode);
85int kvm_ioapic_init(struct kvm *kvm); 85int kvm_ioapic_init(struct kvm *kvm);
86void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level); 86int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
87void kvm_ioapic_reset(struct kvm_ioapic *ioapic); 87void kvm_ioapic_reset(struct kvm_ioapic *ioapic);
88u32 kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest, 88u32 kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
89 u8 dest_mode); 89 u8 dest_mode);
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index aa5d1e5c497e..864ac5483baa 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -20,35 +20,132 @@
20 */ 20 */
21 21
22#include <linux/kvm_host.h> 22#include <linux/kvm_host.h>
23
24#include <asm/msidef.h>
25
23#include "irq.h" 26#include "irq.h"
24 27
25#include "ioapic.h" 28#include "ioapic.h"
26 29
27/* This should be called with the kvm->lock mutex held */ 30static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
28void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level) 31 struct kvm *kvm, int level)
32{
33#ifdef CONFIG_X86
34 return kvm_pic_set_irq(pic_irqchip(kvm), e->irqchip.pin, level);
35#else
36 return -1;
37#endif
38}
39
40static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
41 struct kvm *kvm, int level)
42{
43 return kvm_ioapic_set_irq(kvm->arch.vioapic, e->irqchip.pin, level);
44}
45
46static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
47 struct kvm *kvm, int level)
48{
49 int vcpu_id, r = -1;
50 struct kvm_vcpu *vcpu;
51 struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
52 int dest_id = (e->msi.address_lo & MSI_ADDR_DEST_ID_MASK)
53 >> MSI_ADDR_DEST_ID_SHIFT;
54 int vector = (e->msi.data & MSI_DATA_VECTOR_MASK)
55 >> MSI_DATA_VECTOR_SHIFT;
56 int dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
57 (unsigned long *)&e->msi.address_lo);
58 int trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
59 (unsigned long *)&e->msi.data);
60 int delivery_mode = test_bit(MSI_DATA_DELIVERY_MODE_SHIFT,
61 (unsigned long *)&e->msi.data);
62 u32 deliver_bitmask;
63
64 BUG_ON(!ioapic);
65
66 deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic,
67 dest_id, dest_mode);
68 /* IOAPIC delivery mode value is the same as MSI here */
69 switch (delivery_mode) {
70 case IOAPIC_LOWEST_PRIORITY:
71 vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector,
72 deliver_bitmask);
73 if (vcpu != NULL)
74 r = kvm_apic_set_irq(vcpu, vector, trig_mode);
75 else
76 printk(KERN_INFO "kvm: null lowest priority vcpu!\n");
77 break;
78 case IOAPIC_FIXED:
79 for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
80 if (!(deliver_bitmask & (1 << vcpu_id)))
81 continue;
82 deliver_bitmask &= ~(1 << vcpu_id);
83 vcpu = ioapic->kvm->vcpus[vcpu_id];
84 if (vcpu) {
85 if (r < 0)
86 r = 0;
87 r += kvm_apic_set_irq(vcpu, vector, trig_mode);
88 }
89 }
90 break;
91 default:
92 break;
93 }
94 return r;
95}
96
97/* This should be called with the kvm->lock mutex held
98 * Return value:
99 * < 0 Interrupt was ignored (masked or not delivered for other reasons)
100 * = 0 Interrupt was coalesced (previous irq is still pending)
101 * > 0 Number of CPUs interrupt was delivered to
102 */
103int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level)
29{ 104{
30 unsigned long *irq_state = (unsigned long *)&kvm->arch.irq_states[irq]; 105 struct kvm_kernel_irq_routing_entry *e;
106 unsigned long *irq_state, sig_level;
107 int ret = -1;
108
109 if (irq < KVM_IOAPIC_NUM_PINS) {
110 irq_state = (unsigned long *)&kvm->arch.irq_states[irq];
31 111
32 /* Logical OR for level trig interrupt */ 112 /* Logical OR for level trig interrupt */
33 if (level) 113 if (level)
34 set_bit(irq_source_id, irq_state); 114 set_bit(irq_source_id, irq_state);
35 else 115 else
36 clear_bit(irq_source_id, irq_state); 116 clear_bit(irq_source_id, irq_state);
117 sig_level = !!(*irq_state);
118 } else /* Deal with MSI/MSI-X */
119 sig_level = 1;
37 120
38 /* Not possible to detect if the guest uses the PIC or the 121 /* Not possible to detect if the guest uses the PIC or the
39 * IOAPIC. So set the bit in both. The guest will ignore 122 * IOAPIC. So set the bit in both. The guest will ignore
40 * writes to the unused one. 123 * writes to the unused one.
41 */ 124 */
42 kvm_ioapic_set_irq(kvm->arch.vioapic, irq, !!(*irq_state)); 125 list_for_each_entry(e, &kvm->irq_routing, link)
43#ifdef CONFIG_X86 126 if (e->gsi == irq) {
44 kvm_pic_set_irq(pic_irqchip(kvm), irq, !!(*irq_state)); 127 int r = e->set(e, kvm, sig_level);
45#endif 128 if (r < 0)
129 continue;
130
131 ret = r + ((ret < 0) ? 0 : ret);
132 }
133 return ret;
46} 134}
47 135
48void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi) 136void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
49{ 137{
138 struct kvm_kernel_irq_routing_entry *e;
50 struct kvm_irq_ack_notifier *kian; 139 struct kvm_irq_ack_notifier *kian;
51 struct hlist_node *n; 140 struct hlist_node *n;
141 unsigned gsi = pin;
142
143 list_for_each_entry(e, &kvm->irq_routing, link)
144 if (e->irqchip.irqchip == irqchip &&
145 e->irqchip.pin == pin) {
146 gsi = e->gsi;
147 break;
148 }
52 149
53 hlist_for_each_entry(kian, n, &kvm->arch.irq_ack_notifier_list, link) 150 hlist_for_each_entry(kian, n, &kvm->arch.irq_ack_notifier_list, link)
54 if (kian->gsi == gsi) 151 if (kian->gsi == gsi)
@@ -99,3 +196,177 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
99 clear_bit(irq_source_id, &kvm->arch.irq_states[i]); 196 clear_bit(irq_source_id, &kvm->arch.irq_states[i]);
100 clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap); 197 clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
101} 198}
199
200void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
201 struct kvm_irq_mask_notifier *kimn)
202{
203 kimn->irq = irq;
204 hlist_add_head(&kimn->link, &kvm->mask_notifier_list);
205}
206
207void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
208 struct kvm_irq_mask_notifier *kimn)
209{
210 hlist_del(&kimn->link);
211}
212
213void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask)
214{
215 struct kvm_irq_mask_notifier *kimn;
216 struct hlist_node *n;
217
218 hlist_for_each_entry(kimn, n, &kvm->mask_notifier_list, link)
219 if (kimn->irq == irq)
220 kimn->func(kimn, mask);
221}
222
223static void __kvm_free_irq_routing(struct list_head *irq_routing)
224{
225 struct kvm_kernel_irq_routing_entry *e, *n;
226
227 list_for_each_entry_safe(e, n, irq_routing, link)
228 kfree(e);
229}
230
231void kvm_free_irq_routing(struct kvm *kvm)
232{
233 __kvm_free_irq_routing(&kvm->irq_routing);
234}
235
236static int setup_routing_entry(struct kvm_kernel_irq_routing_entry *e,
237 const struct kvm_irq_routing_entry *ue)
238{
239 int r = -EINVAL;
240 int delta;
241
242 e->gsi = ue->gsi;
243 switch (ue->type) {
244 case KVM_IRQ_ROUTING_IRQCHIP:
245 delta = 0;
246 switch (ue->u.irqchip.irqchip) {
247 case KVM_IRQCHIP_PIC_MASTER:
248 e->set = kvm_set_pic_irq;
249 break;
250 case KVM_IRQCHIP_PIC_SLAVE:
251 e->set = kvm_set_pic_irq;
252 delta = 8;
253 break;
254 case KVM_IRQCHIP_IOAPIC:
255 e->set = kvm_set_ioapic_irq;
256 break;
257 default:
258 goto out;
259 }
260 e->irqchip.irqchip = ue->u.irqchip.irqchip;
261 e->irqchip.pin = ue->u.irqchip.pin + delta;
262 break;
263 case KVM_IRQ_ROUTING_MSI:
264 e->set = kvm_set_msi;
265 e->msi.address_lo = ue->u.msi.address_lo;
266 e->msi.address_hi = ue->u.msi.address_hi;
267 e->msi.data = ue->u.msi.data;
268 break;
269 default:
270 goto out;
271 }
272 r = 0;
273out:
274 return r;
275}
276
277
278int kvm_set_irq_routing(struct kvm *kvm,
279 const struct kvm_irq_routing_entry *ue,
280 unsigned nr,
281 unsigned flags)
282{
283 struct list_head irq_list = LIST_HEAD_INIT(irq_list);
284 struct list_head tmp = LIST_HEAD_INIT(tmp);
285 struct kvm_kernel_irq_routing_entry *e = NULL;
286 unsigned i;
287 int r;
288
289 for (i = 0; i < nr; ++i) {
290 r = -EINVAL;
291 if (ue->gsi >= KVM_MAX_IRQ_ROUTES)
292 goto out;
293 if (ue->flags)
294 goto out;
295 r = -ENOMEM;
296 e = kzalloc(sizeof(*e), GFP_KERNEL);
297 if (!e)
298 goto out;
299 r = setup_routing_entry(e, ue);
300 if (r)
301 goto out;
302 ++ue;
303 list_add(&e->link, &irq_list);
304 e = NULL;
305 }
306
307 mutex_lock(&kvm->lock);
308 list_splice(&kvm->irq_routing, &tmp);
309 INIT_LIST_HEAD(&kvm->irq_routing);
310 list_splice(&irq_list, &kvm->irq_routing);
311 INIT_LIST_HEAD(&irq_list);
312 list_splice(&tmp, &irq_list);
313 mutex_unlock(&kvm->lock);
314
315 r = 0;
316
317out:
318 kfree(e);
319 __kvm_free_irq_routing(&irq_list);
320 return r;
321}
322
323#define IOAPIC_ROUTING_ENTRY(irq) \
324 { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \
325 .u.irqchip.irqchip = KVM_IRQCHIP_IOAPIC, .u.irqchip.pin = (irq) }
326#define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq)
327
328#ifdef CONFIG_X86
329# define PIC_ROUTING_ENTRY(irq) \
330 { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \
331 .u.irqchip.irqchip = SELECT_PIC(irq), .u.irqchip.pin = (irq) % 8 }
332# define ROUTING_ENTRY2(irq) \
333 IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq)
334#else
335# define ROUTING_ENTRY2(irq) \
336 IOAPIC_ROUTING_ENTRY(irq)
337#endif
338
339static const struct kvm_irq_routing_entry default_routing[] = {
340 ROUTING_ENTRY2(0), ROUTING_ENTRY2(1),
341 ROUTING_ENTRY2(2), ROUTING_ENTRY2(3),
342 ROUTING_ENTRY2(4), ROUTING_ENTRY2(5),
343 ROUTING_ENTRY2(6), ROUTING_ENTRY2(7),
344 ROUTING_ENTRY2(8), ROUTING_ENTRY2(9),
345 ROUTING_ENTRY2(10), ROUTING_ENTRY2(11),
346 ROUTING_ENTRY2(12), ROUTING_ENTRY2(13),
347 ROUTING_ENTRY2(14), ROUTING_ENTRY2(15),
348 ROUTING_ENTRY1(16), ROUTING_ENTRY1(17),
349 ROUTING_ENTRY1(18), ROUTING_ENTRY1(19),
350 ROUTING_ENTRY1(20), ROUTING_ENTRY1(21),
351 ROUTING_ENTRY1(22), ROUTING_ENTRY1(23),
352#ifdef CONFIG_IA64
353 ROUTING_ENTRY1(24), ROUTING_ENTRY1(25),
354 ROUTING_ENTRY1(26), ROUTING_ENTRY1(27),
355 ROUTING_ENTRY1(28), ROUTING_ENTRY1(29),
356 ROUTING_ENTRY1(30), ROUTING_ENTRY1(31),
357 ROUTING_ENTRY1(32), ROUTING_ENTRY1(33),
358 ROUTING_ENTRY1(34), ROUTING_ENTRY1(35),
359 ROUTING_ENTRY1(36), ROUTING_ENTRY1(37),
360 ROUTING_ENTRY1(38), ROUTING_ENTRY1(39),
361 ROUTING_ENTRY1(40), ROUTING_ENTRY1(41),
362 ROUTING_ENTRY1(42), ROUTING_ENTRY1(43),
363 ROUTING_ENTRY1(44), ROUTING_ENTRY1(45),
364 ROUTING_ENTRY1(46), ROUTING_ENTRY1(47),
365#endif
366};
367
368int kvm_setup_default_irq_routing(struct kvm *kvm)
369{
370 return kvm_set_irq_routing(kvm, default_routing,
371 ARRAY_SIZE(default_routing), 0);
372}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 29a667ce35b0..605697e9c4dd 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -47,10 +47,6 @@
47#include <asm/uaccess.h> 47#include <asm/uaccess.h>
48#include <asm/pgtable.h> 48#include <asm/pgtable.h>
49 49
50#ifdef CONFIG_X86
51#include <asm/msidef.h>
52#endif
53
54#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 50#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
55#include "coalesced_mmio.h" 51#include "coalesced_mmio.h"
56#endif 52#endif
@@ -85,57 +81,6 @@ static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
85static bool kvm_rebooting; 81static bool kvm_rebooting;
86 82
87#ifdef KVM_CAP_DEVICE_ASSIGNMENT 83#ifdef KVM_CAP_DEVICE_ASSIGNMENT
88
89#ifdef CONFIG_X86
90static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev)
91{
92 int vcpu_id;
93 struct kvm_vcpu *vcpu;
94 struct kvm_ioapic *ioapic = ioapic_irqchip(dev->kvm);
95 int dest_id = (dev->guest_msi.address_lo & MSI_ADDR_DEST_ID_MASK)
96 >> MSI_ADDR_DEST_ID_SHIFT;
97 int vector = (dev->guest_msi.data & MSI_DATA_VECTOR_MASK)
98 >> MSI_DATA_VECTOR_SHIFT;
99 int dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
100 (unsigned long *)&dev->guest_msi.address_lo);
101 int trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
102 (unsigned long *)&dev->guest_msi.data);
103 int delivery_mode = test_bit(MSI_DATA_DELIVERY_MODE_SHIFT,
104 (unsigned long *)&dev->guest_msi.data);
105 u32 deliver_bitmask;
106
107 BUG_ON(!ioapic);
108
109 deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic,
110 dest_id, dest_mode);
111 /* IOAPIC delivery mode value is the same as MSI here */
112 switch (delivery_mode) {
113 case IOAPIC_LOWEST_PRIORITY:
114 vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector,
115 deliver_bitmask);
116 if (vcpu != NULL)
117 kvm_apic_set_irq(vcpu, vector, trig_mode);
118 else
119 printk(KERN_INFO "kvm: null lowest priority vcpu!\n");
120 break;
121 case IOAPIC_FIXED:
122 for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
123 if (!(deliver_bitmask & (1 << vcpu_id)))
124 continue;
125 deliver_bitmask &= ~(1 << vcpu_id);
126 vcpu = ioapic->kvm->vcpus[vcpu_id];
127 if (vcpu)
128 kvm_apic_set_irq(vcpu, vector, trig_mode);
129 }
130 break;
131 default:
132 printk(KERN_INFO "kvm: unsupported MSI delivery mode\n");
133 }
134}
135#else
136static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev) {}
137#endif
138
139static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head, 84static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
140 int assigned_dev_id) 85 int assigned_dev_id)
141{ 86{
@@ -162,13 +107,10 @@ static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
162 * finer-grained lock, update this 107 * finer-grained lock, update this
163 */ 108 */
164 mutex_lock(&assigned_dev->kvm->lock); 109 mutex_lock(&assigned_dev->kvm->lock);
165 if (assigned_dev->irq_requested_type & KVM_ASSIGNED_DEV_GUEST_INTX) 110 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
166 kvm_set_irq(assigned_dev->kvm, 111 assigned_dev->guest_irq, 1);
167 assigned_dev->irq_source_id, 112
168 assigned_dev->guest_irq, 1); 113 if (assigned_dev->irq_requested_type & KVM_ASSIGNED_DEV_GUEST_MSI) {
169 else if (assigned_dev->irq_requested_type &
170 KVM_ASSIGNED_DEV_GUEST_MSI) {
171 assigned_device_msi_dispatch(assigned_dev);
172 enable_irq(assigned_dev->host_irq); 114 enable_irq(assigned_dev->host_irq);
173 assigned_dev->host_irq_disabled = false; 115 assigned_dev->host_irq_disabled = false;
174 } 116 }
@@ -331,18 +273,24 @@ static int assigned_device_update_msi(struct kvm *kvm,
331{ 273{
332 int r; 274 int r;
333 275
276 adev->guest_irq = airq->guest_irq;
334 if (airq->flags & KVM_DEV_IRQ_ASSIGN_ENABLE_MSI) { 277 if (airq->flags & KVM_DEV_IRQ_ASSIGN_ENABLE_MSI) {
335 /* x86 don't care upper address of guest msi message addr */ 278 /* x86 don't care upper address of guest msi message addr */
336 adev->irq_requested_type |= KVM_ASSIGNED_DEV_GUEST_MSI; 279 adev->irq_requested_type |= KVM_ASSIGNED_DEV_GUEST_MSI;
337 adev->irq_requested_type &= ~KVM_ASSIGNED_DEV_GUEST_INTX; 280 adev->irq_requested_type &= ~KVM_ASSIGNED_DEV_GUEST_INTX;
338 adev->guest_msi.address_lo = airq->guest_msi.addr_lo;
339 adev->guest_msi.data = airq->guest_msi.data;
340 adev->ack_notifier.gsi = -1; 281 adev->ack_notifier.gsi = -1;
341 } else if (msi2intx) { 282 } else if (msi2intx) {
342 adev->irq_requested_type |= KVM_ASSIGNED_DEV_GUEST_INTX; 283 adev->irq_requested_type |= KVM_ASSIGNED_DEV_GUEST_INTX;
343 adev->irq_requested_type &= ~KVM_ASSIGNED_DEV_GUEST_MSI; 284 adev->irq_requested_type &= ~KVM_ASSIGNED_DEV_GUEST_MSI;
344 adev->guest_irq = airq->guest_irq;
345 adev->ack_notifier.gsi = airq->guest_irq; 285 adev->ack_notifier.gsi = airq->guest_irq;
286 } else {
287 /*
288 * Guest require to disable device MSI, we disable MSI and
289 * re-enable INTx by default again. Notice it's only for
290 * non-msi2intx.
291 */
292 assigned_device_update_intx(kvm, adev, airq);
293 return 0;
346 } 294 }
347 295
348 if (adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI) 296 if (adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI)
@@ -379,6 +327,7 @@ static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
379{ 327{
380 int r = 0; 328 int r = 0;
381 struct kvm_assigned_dev_kernel *match; 329 struct kvm_assigned_dev_kernel *match;
330 u32 current_flags = 0, changed_flags;
382 331
383 mutex_lock(&kvm->lock); 332 mutex_lock(&kvm->lock);
384 333
@@ -416,8 +365,13 @@ static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
416 } 365 }
417 } 366 }
418 367
419 if ((!msi2intx && 368 if ((match->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI) &&
420 (assigned_irq->flags & KVM_DEV_IRQ_ASSIGN_ENABLE_MSI)) || 369 (match->irq_requested_type & KVM_ASSIGNED_DEV_GUEST_MSI))
370 current_flags |= KVM_DEV_IRQ_ASSIGN_ENABLE_MSI;
371
372 changed_flags = assigned_irq->flags ^ current_flags;
373
374 if ((changed_flags & KVM_DEV_IRQ_ASSIGN_MSI_ACTION) ||
421 (msi2intx && match->dev->msi_enabled)) { 375 (msi2intx && match->dev->msi_enabled)) {
422#ifdef CONFIG_X86 376#ifdef CONFIG_X86
423 r = assigned_device_update_msi(kvm, match, assigned_irq); 377 r = assigned_device_update_msi(kvm, match, assigned_irq);
@@ -563,7 +517,7 @@ static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
563 goto out; 517 goto out;
564 } 518 }
565 519
566 if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) 520 if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
567 kvm_deassign_device(kvm, match); 521 kvm_deassign_device(kvm, match);
568 522
569 kvm_free_assigned_device(kvm, match); 523 kvm_free_assigned_device(kvm, match);
@@ -581,8 +535,10 @@ static inline int valid_vcpu(int n)
581 535
582inline int kvm_is_mmio_pfn(pfn_t pfn) 536inline int kvm_is_mmio_pfn(pfn_t pfn)
583{ 537{
584 if (pfn_valid(pfn)) 538 if (pfn_valid(pfn)) {
585 return PageReserved(pfn_to_page(pfn)); 539 struct page *page = compound_head(pfn_to_page(pfn));
540 return PageReserved(page);
541 }
586 542
587 return true; 543 return true;
588} 544}
@@ -828,6 +784,10 @@ static struct kvm *kvm_create_vm(void)
828 784
829 if (IS_ERR(kvm)) 785 if (IS_ERR(kvm))
830 goto out; 786 goto out;
787#ifdef CONFIG_HAVE_KVM_IRQCHIP
788 INIT_LIST_HEAD(&kvm->irq_routing);
789 INIT_HLIST_HEAD(&kvm->mask_notifier_list);
790#endif
831 791
832#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 792#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
833 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 793 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
@@ -909,6 +869,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
909 spin_lock(&kvm_lock); 869 spin_lock(&kvm_lock);
910 list_del(&kvm->vm_list); 870 list_del(&kvm->vm_list);
911 spin_unlock(&kvm_lock); 871 spin_unlock(&kvm_lock);
872 kvm_free_irq_routing(kvm);
912 kvm_io_bus_destroy(&kvm->pio_bus); 873 kvm_io_bus_destroy(&kvm->pio_bus);
913 kvm_io_bus_destroy(&kvm->mmio_bus); 874 kvm_io_bus_destroy(&kvm->mmio_bus);
914#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 875#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
@@ -1755,13 +1716,13 @@ out_free2:
1755 r = 0; 1716 r = 0;
1756 break; 1717 break;
1757 } 1718 }
1758 case KVM_DEBUG_GUEST: { 1719 case KVM_SET_GUEST_DEBUG: {
1759 struct kvm_debug_guest dbg; 1720 struct kvm_guest_debug dbg;
1760 1721
1761 r = -EFAULT; 1722 r = -EFAULT;
1762 if (copy_from_user(&dbg, argp, sizeof dbg)) 1723 if (copy_from_user(&dbg, argp, sizeof dbg))
1763 goto out; 1724 goto out;
1764 r = kvm_arch_vcpu_ioctl_debug_guest(vcpu, &dbg); 1725 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
1765 if (r) 1726 if (r)
1766 goto out; 1727 goto out;
1767 r = 0; 1728 r = 0;
@@ -1929,6 +1890,36 @@ static long kvm_vm_ioctl(struct file *filp,
1929 break; 1890 break;
1930 } 1891 }
1931#endif 1892#endif
1893#ifdef KVM_CAP_IRQ_ROUTING
1894 case KVM_SET_GSI_ROUTING: {
1895 struct kvm_irq_routing routing;
1896 struct kvm_irq_routing __user *urouting;
1897 struct kvm_irq_routing_entry *entries;
1898
1899 r = -EFAULT;
1900 if (copy_from_user(&routing, argp, sizeof(routing)))
1901 goto out;
1902 r = -EINVAL;
1903 if (routing.nr >= KVM_MAX_IRQ_ROUTES)
1904 goto out;
1905 if (routing.flags)
1906 goto out;
1907 r = -ENOMEM;
1908 entries = vmalloc(routing.nr * sizeof(*entries));
1909 if (!entries)
1910 goto out;
1911 r = -EFAULT;
1912 urouting = argp;
1913 if (copy_from_user(entries, urouting->entries,
1914 routing.nr * sizeof(*entries)))
1915 goto out_free_irq_routing;
1916 r = kvm_set_irq_routing(kvm, entries, routing.nr,
1917 routing.flags);
1918 out_free_irq_routing:
1919 vfree(entries);
1920 break;
1921 }
1922#endif
1932 default: 1923 default:
1933 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 1924 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
1934 } 1925 }
@@ -1995,6 +1986,10 @@ static long kvm_dev_ioctl_check_extension_generic(long arg)
1995 case KVM_CAP_USER_MEMORY: 1986 case KVM_CAP_USER_MEMORY:
1996 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 1987 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
1997 return 1; 1988 return 1;
1989#ifdef CONFIG_HAVE_KVM_IRQCHIP
1990 case KVM_CAP_IRQ_ROUTING:
1991 return KVM_MAX_IRQ_ROUTES;
1992#endif
1998 default: 1993 default:
1999 break; 1994 break;
2000 } 1995 }