aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/ioapic.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/ioapic.c')
-rw-r--r--virt/kvm/ioapic.c119
1 files changed, 98 insertions, 21 deletions
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index 9fe140bb38ec..7c79c1d76d0c 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -33,6 +33,7 @@
33#include <linux/smp.h> 33#include <linux/smp.h>
34#include <linux/hrtimer.h> 34#include <linux/hrtimer.h>
35#include <linux/io.h> 35#include <linux/io.h>
36#include <linux/slab.h>
36#include <asm/processor.h> 37#include <asm/processor.h>
37#include <asm/page.h> 38#include <asm/page.h>
38#include <asm/current.h> 39#include <asm/current.h>
@@ -100,6 +101,19 @@ static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx)
100 return injected; 101 return injected;
101} 102}
102 103
104static void update_handled_vectors(struct kvm_ioapic *ioapic)
105{
106 DECLARE_BITMAP(handled_vectors, 256);
107 int i;
108
109 memset(handled_vectors, 0, sizeof(handled_vectors));
110 for (i = 0; i < IOAPIC_NUM_PINS; ++i)
111 __set_bit(ioapic->redirtbl[i].fields.vector, handled_vectors);
112 memcpy(ioapic->handled_vectors, handled_vectors,
113 sizeof(handled_vectors));
114 smp_wmb();
115}
116
103static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) 117static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
104{ 118{
105 unsigned index; 119 unsigned index;
@@ -134,6 +148,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
134 e->bits |= (u32) val; 148 e->bits |= (u32) val;
135 e->fields.remote_irr = 0; 149 e->fields.remote_irr = 0;
136 } 150 }
151 update_handled_vectors(ioapic);
137 mask_after = e->fields.mask; 152 mask_after = e->fields.mask;
138 if (mask_before != mask_after) 153 if (mask_before != mask_after)
139 kvm_fire_mask_notifiers(ioapic->kvm, index, mask_after); 154 kvm_fire_mask_notifiers(ioapic->kvm, index, mask_after);
@@ -182,6 +197,7 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
182 union kvm_ioapic_redirect_entry entry; 197 union kvm_ioapic_redirect_entry entry;
183 int ret = 1; 198 int ret = 1;
184 199
200 spin_lock(&ioapic->lock);
185 if (irq >= 0 && irq < IOAPIC_NUM_PINS) { 201 if (irq >= 0 && irq < IOAPIC_NUM_PINS) {
186 entry = ioapic->redirtbl[irq]; 202 entry = ioapic->redirtbl[irq];
187 level ^= entry.fields.polarity; 203 level ^= entry.fields.polarity;
@@ -198,34 +214,54 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
198 } 214 }
199 trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0); 215 trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
200 } 216 }
217 spin_unlock(&ioapic->lock);
218
201 return ret; 219 return ret;
202} 220}
203 221
204static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int pin, 222static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector,
205 int trigger_mode) 223 int trigger_mode)
206{ 224{
207 union kvm_ioapic_redirect_entry *ent; 225 int i;
226
227 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
228 union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
208 229
209 ent = &ioapic->redirtbl[pin]; 230 if (ent->fields.vector != vector)
231 continue;
210 232
211 kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, pin); 233 /*
234 * We are dropping lock while calling ack notifiers because ack
235 * notifier callbacks for assigned devices call into IOAPIC
236 * recursively. Since remote_irr is cleared only after call
237 * to notifiers if the same vector will be delivered while lock
238 * is dropped it will be put into irr and will be delivered
239 * after ack notifier returns.
240 */
241 spin_unlock(&ioapic->lock);
242 kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i);
243 spin_lock(&ioapic->lock);
244
245 if (trigger_mode != IOAPIC_LEVEL_TRIG)
246 continue;
212 247
213 if (trigger_mode == IOAPIC_LEVEL_TRIG) {
214 ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); 248 ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
215 ent->fields.remote_irr = 0; 249 ent->fields.remote_irr = 0;
216 if (!ent->fields.mask && (ioapic->irr & (1 << pin))) 250 if (!ent->fields.mask && (ioapic->irr & (1 << i)))
217 ioapic_service(ioapic, pin); 251 ioapic_service(ioapic, i);
218 } 252 }
219} 253}
220 254
221void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode) 255void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode)
222{ 256{
223 struct kvm_ioapic *ioapic = kvm->arch.vioapic; 257 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
224 int i;
225 258
226 for (i = 0; i < IOAPIC_NUM_PINS; i++) 259 smp_rmb();
227 if (ioapic->redirtbl[i].fields.vector == vector) 260 if (!test_bit(vector, ioapic->handled_vectors))
228 __kvm_ioapic_update_eoi(ioapic, i, trigger_mode); 261 return;
262 spin_lock(&ioapic->lock);
263 __kvm_ioapic_update_eoi(ioapic, vector, trigger_mode);
264 spin_unlock(&ioapic->lock);
229} 265}
230 266
231static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev) 267static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev)
@@ -250,8 +286,8 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
250 ioapic_debug("addr %lx\n", (unsigned long)addr); 286 ioapic_debug("addr %lx\n", (unsigned long)addr);
251 ASSERT(!(addr & 0xf)); /* check alignment */ 287 ASSERT(!(addr & 0xf)); /* check alignment */
252 288
253 mutex_lock(&ioapic->kvm->irq_lock);
254 addr &= 0xff; 289 addr &= 0xff;
290 spin_lock(&ioapic->lock);
255 switch (addr) { 291 switch (addr) {
256 case IOAPIC_REG_SELECT: 292 case IOAPIC_REG_SELECT:
257 result = ioapic->ioregsel; 293 result = ioapic->ioregsel;
@@ -265,6 +301,8 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
265 result = 0; 301 result = 0;
266 break; 302 break;
267 } 303 }
304 spin_unlock(&ioapic->lock);
305
268 switch (len) { 306 switch (len) {
269 case 8: 307 case 8:
270 *(u64 *) val = result; 308 *(u64 *) val = result;
@@ -277,7 +315,6 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
277 default: 315 default:
278 printk(KERN_WARNING "ioapic: wrong length %d\n", len); 316 printk(KERN_WARNING "ioapic: wrong length %d\n", len);
279 } 317 }
280 mutex_unlock(&ioapic->kvm->irq_lock);
281 return 0; 318 return 0;
282} 319}
283 320
@@ -293,15 +330,15 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
293 (void*)addr, len, val); 330 (void*)addr, len, val);
294 ASSERT(!(addr & 0xf)); /* check alignment */ 331 ASSERT(!(addr & 0xf)); /* check alignment */
295 332
296 mutex_lock(&ioapic->kvm->irq_lock);
297 if (len == 4 || len == 8) 333 if (len == 4 || len == 8)
298 data = *(u32 *) val; 334 data = *(u32 *) val;
299 else { 335 else {
300 printk(KERN_WARNING "ioapic: Unsupported size %d\n", len); 336 printk(KERN_WARNING "ioapic: Unsupported size %d\n", len);
301 goto unlock; 337 return 0;
302 } 338 }
303 339
304 addr &= 0xff; 340 addr &= 0xff;
341 spin_lock(&ioapic->lock);
305 switch (addr) { 342 switch (addr) {
306 case IOAPIC_REG_SELECT: 343 case IOAPIC_REG_SELECT:
307 ioapic->ioregsel = data; 344 ioapic->ioregsel = data;
@@ -312,15 +349,14 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
312 break; 349 break;
313#ifdef CONFIG_IA64 350#ifdef CONFIG_IA64
314 case IOAPIC_REG_EOI: 351 case IOAPIC_REG_EOI:
315 kvm_ioapic_update_eoi(ioapic->kvm, data, IOAPIC_LEVEL_TRIG); 352 __kvm_ioapic_update_eoi(ioapic, data, IOAPIC_LEVEL_TRIG);
316 break; 353 break;
317#endif 354#endif
318 355
319 default: 356 default:
320 break; 357 break;
321 } 358 }
322unlock: 359 spin_unlock(&ioapic->lock);
323 mutex_unlock(&ioapic->kvm->irq_lock);
324 return 0; 360 return 0;
325} 361}
326 362
@@ -334,6 +370,7 @@ void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
334 ioapic->ioregsel = 0; 370 ioapic->ioregsel = 0;
335 ioapic->irr = 0; 371 ioapic->irr = 0;
336 ioapic->id = 0; 372 ioapic->id = 0;
373 update_handled_vectors(ioapic);
337} 374}
338 375
339static const struct kvm_io_device_ops ioapic_mmio_ops = { 376static const struct kvm_io_device_ops ioapic_mmio_ops = {
@@ -349,14 +386,54 @@ int kvm_ioapic_init(struct kvm *kvm)
349 ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL); 386 ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL);
350 if (!ioapic) 387 if (!ioapic)
351 return -ENOMEM; 388 return -ENOMEM;
389 spin_lock_init(&ioapic->lock);
352 kvm->arch.vioapic = ioapic; 390 kvm->arch.vioapic = ioapic;
353 kvm_ioapic_reset(ioapic); 391 kvm_ioapic_reset(ioapic);
354 kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops); 392 kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
355 ioapic->kvm = kvm; 393 ioapic->kvm = kvm;
356 ret = kvm_io_bus_register_dev(kvm, &kvm->mmio_bus, &ioapic->dev); 394 mutex_lock(&kvm->slots_lock);
357 if (ret < 0) 395 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
396 mutex_unlock(&kvm->slots_lock);
397 if (ret < 0) {
398 kvm->arch.vioapic = NULL;
358 kfree(ioapic); 399 kfree(ioapic);
400 }
359 401
360 return ret; 402 return ret;
361} 403}
362 404
405void kvm_ioapic_destroy(struct kvm *kvm)
406{
407 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
408
409 if (ioapic) {
410 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
411 kvm->arch.vioapic = NULL;
412 kfree(ioapic);
413 }
414}
415
416int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
417{
418 struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
419 if (!ioapic)
420 return -EINVAL;
421
422 spin_lock(&ioapic->lock);
423 memcpy(state, ioapic, sizeof(struct kvm_ioapic_state));
424 spin_unlock(&ioapic->lock);
425 return 0;
426}
427
428int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
429{
430 struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
431 if (!ioapic)
432 return -EINVAL;
433
434 spin_lock(&ioapic->lock);
435 memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
436 update_handled_vectors(ioapic);
437 spin_unlock(&ioapic->lock);
438 return 0;
439}