aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/i8254.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/i8254.c')
-rw-r--r--arch/x86/kvm/i8254.c160
1 files changed, 104 insertions, 56 deletions
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 21f68e00524f..82ad523b4901 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -231,7 +231,7 @@ int pit_has_pending_timer(struct kvm_vcpu *vcpu)
231{ 231{
232 struct kvm_pit *pit = vcpu->kvm->arch.vpit; 232 struct kvm_pit *pit = vcpu->kvm->arch.vpit;
233 233
234 if (pit && vcpu->vcpu_id == 0 && pit->pit_state.irq_ack) 234 if (pit && kvm_vcpu_is_bsp(vcpu) && pit->pit_state.irq_ack)
235 return atomic_read(&pit->pit_state.pit_timer.pending); 235 return atomic_read(&pit->pit_state.pit_timer.pending);
236 return 0; 236 return 0;
237} 237}
@@ -252,7 +252,7 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
252 struct kvm_pit *pit = vcpu->kvm->arch.vpit; 252 struct kvm_pit *pit = vcpu->kvm->arch.vpit;
253 struct hrtimer *timer; 253 struct hrtimer *timer;
254 254
255 if (vcpu->vcpu_id != 0 || !pit) 255 if (!kvm_vcpu_is_bsp(vcpu) || !pit)
256 return; 256 return;
257 257
258 timer = &pit->pit_state.pit_timer.timer; 258 timer = &pit->pit_state.pit_timer.timer;
@@ -294,7 +294,7 @@ static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period)
294 pt->timer.function = kvm_timer_fn; 294 pt->timer.function = kvm_timer_fn;
295 pt->t_ops = &kpit_ops; 295 pt->t_ops = &kpit_ops;
296 pt->kvm = ps->pit->kvm; 296 pt->kvm = ps->pit->kvm;
297 pt->vcpu_id = 0; 297 pt->vcpu = pt->kvm->bsp_vcpu;
298 298
299 atomic_set(&pt->pending, 0); 299 atomic_set(&pt->pending, 0);
300 ps->irq_ack = 1; 300 ps->irq_ack = 1;
@@ -332,33 +332,62 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val)
332 case 1: 332 case 1:
333 /* FIXME: enhance mode 4 precision */ 333 /* FIXME: enhance mode 4 precision */
334 case 4: 334 case 4:
335 create_pit_timer(ps, val, 0); 335 if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)) {
336 create_pit_timer(ps, val, 0);
337 }
336 break; 338 break;
337 case 2: 339 case 2:
338 case 3: 340 case 3:
339 create_pit_timer(ps, val, 1); 341 if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)){
342 create_pit_timer(ps, val, 1);
343 }
340 break; 344 break;
341 default: 345 default:
342 destroy_pit_timer(&ps->pit_timer); 346 destroy_pit_timer(&ps->pit_timer);
343 } 347 }
344} 348}
345 349
346void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val) 350void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val, int hpet_legacy_start)
351{
352 u8 saved_mode;
353 if (hpet_legacy_start) {
354 /* save existing mode for later reenablement */
355 saved_mode = kvm->arch.vpit->pit_state.channels[0].mode;
356 kvm->arch.vpit->pit_state.channels[0].mode = 0xff; /* disable timer */
357 pit_load_count(kvm, channel, val);
358 kvm->arch.vpit->pit_state.channels[0].mode = saved_mode;
359 } else {
360 pit_load_count(kvm, channel, val);
361 }
362}
363
364static inline struct kvm_pit *dev_to_pit(struct kvm_io_device *dev)
365{
366 return container_of(dev, struct kvm_pit, dev);
367}
368
369static inline struct kvm_pit *speaker_to_pit(struct kvm_io_device *dev)
347{ 370{
348 mutex_lock(&kvm->arch.vpit->pit_state.lock); 371 return container_of(dev, struct kvm_pit, speaker_dev);
349 pit_load_count(kvm, channel, val);
350 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
351} 372}
352 373
353static void pit_ioport_write(struct kvm_io_device *this, 374static inline int pit_in_range(gpa_t addr)
354 gpa_t addr, int len, const void *data)
355{ 375{
356 struct kvm_pit *pit = (struct kvm_pit *)this->private; 376 return ((addr >= KVM_PIT_BASE_ADDRESS) &&
377 (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH));
378}
379
380static int pit_ioport_write(struct kvm_io_device *this,
381 gpa_t addr, int len, const void *data)
382{
383 struct kvm_pit *pit = dev_to_pit(this);
357 struct kvm_kpit_state *pit_state = &pit->pit_state; 384 struct kvm_kpit_state *pit_state = &pit->pit_state;
358 struct kvm *kvm = pit->kvm; 385 struct kvm *kvm = pit->kvm;
359 int channel, access; 386 int channel, access;
360 struct kvm_kpit_channel_state *s; 387 struct kvm_kpit_channel_state *s;
361 u32 val = *(u32 *) data; 388 u32 val = *(u32 *) data;
389 if (!pit_in_range(addr))
390 return -EOPNOTSUPP;
362 391
363 val &= 0xff; 392 val &= 0xff;
364 addr &= KVM_PIT_CHANNEL_MASK; 393 addr &= KVM_PIT_CHANNEL_MASK;
@@ -421,16 +450,19 @@ static void pit_ioport_write(struct kvm_io_device *this,
421 } 450 }
422 451
423 mutex_unlock(&pit_state->lock); 452 mutex_unlock(&pit_state->lock);
453 return 0;
424} 454}
425 455
426static void pit_ioport_read(struct kvm_io_device *this, 456static int pit_ioport_read(struct kvm_io_device *this,
427 gpa_t addr, int len, void *data) 457 gpa_t addr, int len, void *data)
428{ 458{
429 struct kvm_pit *pit = (struct kvm_pit *)this->private; 459 struct kvm_pit *pit = dev_to_pit(this);
430 struct kvm_kpit_state *pit_state = &pit->pit_state; 460 struct kvm_kpit_state *pit_state = &pit->pit_state;
431 struct kvm *kvm = pit->kvm; 461 struct kvm *kvm = pit->kvm;
432 int ret, count; 462 int ret, count;
433 struct kvm_kpit_channel_state *s; 463 struct kvm_kpit_channel_state *s;
464 if (!pit_in_range(addr))
465 return -EOPNOTSUPP;
434 466
435 addr &= KVM_PIT_CHANNEL_MASK; 467 addr &= KVM_PIT_CHANNEL_MASK;
436 s = &pit_state->channels[addr]; 468 s = &pit_state->channels[addr];
@@ -485,37 +517,36 @@ static void pit_ioport_read(struct kvm_io_device *this,
485 memcpy(data, (char *)&ret, len); 517 memcpy(data, (char *)&ret, len);
486 518
487 mutex_unlock(&pit_state->lock); 519 mutex_unlock(&pit_state->lock);
520 return 0;
488} 521}
489 522
490static int pit_in_range(struct kvm_io_device *this, gpa_t addr, 523static int speaker_ioport_write(struct kvm_io_device *this,
491 int len, int is_write) 524 gpa_t addr, int len, const void *data)
492{
493 return ((addr >= KVM_PIT_BASE_ADDRESS) &&
494 (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH));
495}
496
497static void speaker_ioport_write(struct kvm_io_device *this,
498 gpa_t addr, int len, const void *data)
499{ 525{
500 struct kvm_pit *pit = (struct kvm_pit *)this->private; 526 struct kvm_pit *pit = speaker_to_pit(this);
501 struct kvm_kpit_state *pit_state = &pit->pit_state; 527 struct kvm_kpit_state *pit_state = &pit->pit_state;
502 struct kvm *kvm = pit->kvm; 528 struct kvm *kvm = pit->kvm;
503 u32 val = *(u32 *) data; 529 u32 val = *(u32 *) data;
530 if (addr != KVM_SPEAKER_BASE_ADDRESS)
531 return -EOPNOTSUPP;
504 532
505 mutex_lock(&pit_state->lock); 533 mutex_lock(&pit_state->lock);
506 pit_state->speaker_data_on = (val >> 1) & 1; 534 pit_state->speaker_data_on = (val >> 1) & 1;
507 pit_set_gate(kvm, 2, val & 1); 535 pit_set_gate(kvm, 2, val & 1);
508 mutex_unlock(&pit_state->lock); 536 mutex_unlock(&pit_state->lock);
537 return 0;
509} 538}
510 539
511static void speaker_ioport_read(struct kvm_io_device *this, 540static int speaker_ioport_read(struct kvm_io_device *this,
512 gpa_t addr, int len, void *data) 541 gpa_t addr, int len, void *data)
513{ 542{
514 struct kvm_pit *pit = (struct kvm_pit *)this->private; 543 struct kvm_pit *pit = speaker_to_pit(this);
515 struct kvm_kpit_state *pit_state = &pit->pit_state; 544 struct kvm_kpit_state *pit_state = &pit->pit_state;
516 struct kvm *kvm = pit->kvm; 545 struct kvm *kvm = pit->kvm;
517 unsigned int refresh_clock; 546 unsigned int refresh_clock;
518 int ret; 547 int ret;
548 if (addr != KVM_SPEAKER_BASE_ADDRESS)
549 return -EOPNOTSUPP;
519 550
520 /* Refresh clock toggles at about 15us. We approximate as 2^14ns. */ 551 /* Refresh clock toggles at about 15us. We approximate as 2^14ns. */
521 refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1; 552 refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1;
@@ -527,12 +558,7 @@ static void speaker_ioport_read(struct kvm_io_device *this,
527 len = sizeof(ret); 558 len = sizeof(ret);
528 memcpy(data, (char *)&ret, len); 559 memcpy(data, (char *)&ret, len);
529 mutex_unlock(&pit_state->lock); 560 mutex_unlock(&pit_state->lock);
530} 561 return 0;
531
532static int speaker_in_range(struct kvm_io_device *this, gpa_t addr,
533 int len, int is_write)
534{
535 return (addr == KVM_SPEAKER_BASE_ADDRESS);
536} 562}
537 563
538void kvm_pit_reset(struct kvm_pit *pit) 564void kvm_pit_reset(struct kvm_pit *pit)
@@ -541,6 +567,7 @@ void kvm_pit_reset(struct kvm_pit *pit)
541 struct kvm_kpit_channel_state *c; 567 struct kvm_kpit_channel_state *c;
542 568
543 mutex_lock(&pit->pit_state.lock); 569 mutex_lock(&pit->pit_state.lock);
570 pit->pit_state.flags = 0;
544 for (i = 0; i < 3; i++) { 571 for (i = 0; i < 3; i++) {
545 c = &pit->pit_state.channels[i]; 572 c = &pit->pit_state.channels[i];
546 c->mode = 0xff; 573 c->mode = 0xff;
@@ -563,10 +590,22 @@ static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask)
563 } 590 }
564} 591}
565 592
566struct kvm_pit *kvm_create_pit(struct kvm *kvm) 593static const struct kvm_io_device_ops pit_dev_ops = {
594 .read = pit_ioport_read,
595 .write = pit_ioport_write,
596};
597
598static const struct kvm_io_device_ops speaker_dev_ops = {
599 .read = speaker_ioport_read,
600 .write = speaker_ioport_write,
601};
602
603/* Caller must have writers lock on slots_lock */
604struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
567{ 605{
568 struct kvm_pit *pit; 606 struct kvm_pit *pit;
569 struct kvm_kpit_state *pit_state; 607 struct kvm_kpit_state *pit_state;
608 int ret;
570 609
571 pit = kzalloc(sizeof(struct kvm_pit), GFP_KERNEL); 610 pit = kzalloc(sizeof(struct kvm_pit), GFP_KERNEL);
572 if (!pit) 611 if (!pit)
@@ -582,19 +621,6 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm)
582 mutex_lock(&pit->pit_state.lock); 621 mutex_lock(&pit->pit_state.lock);
583 spin_lock_init(&pit->pit_state.inject_lock); 622 spin_lock_init(&pit->pit_state.inject_lock);
584 623
585 /* Initialize PIO device */
586 pit->dev.read = pit_ioport_read;
587 pit->dev.write = pit_ioport_write;
588 pit->dev.in_range = pit_in_range;
589 pit->dev.private = pit;
590 kvm_io_bus_register_dev(&kvm->pio_bus, &pit->dev);
591
592 pit->speaker_dev.read = speaker_ioport_read;
593 pit->speaker_dev.write = speaker_ioport_write;
594 pit->speaker_dev.in_range = speaker_in_range;
595 pit->speaker_dev.private = pit;
596 kvm_io_bus_register_dev(&kvm->pio_bus, &pit->speaker_dev);
597
598 kvm->arch.vpit = pit; 624 kvm->arch.vpit = pit;
599 pit->kvm = kvm; 625 pit->kvm = kvm;
600 626
@@ -613,7 +639,30 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm)
613 pit->mask_notifier.func = pit_mask_notifer; 639 pit->mask_notifier.func = pit_mask_notifer;
614 kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier); 640 kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
615 641
642 kvm_iodevice_init(&pit->dev, &pit_dev_ops);
643 ret = __kvm_io_bus_register_dev(&kvm->pio_bus, &pit->dev);
644 if (ret < 0)
645 goto fail;
646
647 if (flags & KVM_PIT_SPEAKER_DUMMY) {
648 kvm_iodevice_init(&pit->speaker_dev, &speaker_dev_ops);
649 ret = __kvm_io_bus_register_dev(&kvm->pio_bus,
650 &pit->speaker_dev);
651 if (ret < 0)
652 goto fail_unregister;
653 }
654
616 return pit; 655 return pit;
656
657fail_unregister:
658 __kvm_io_bus_unregister_dev(&kvm->pio_bus, &pit->dev);
659
660fail:
661 if (pit->irq_source_id >= 0)
662 kvm_free_irq_source_id(kvm, pit->irq_source_id);
663
664 kfree(pit);
665 return NULL;
617} 666}
618 667
619void kvm_free_pit(struct kvm *kvm) 668void kvm_free_pit(struct kvm *kvm)
@@ -623,6 +672,8 @@ void kvm_free_pit(struct kvm *kvm)
623 if (kvm->arch.vpit) { 672 if (kvm->arch.vpit) {
624 kvm_unregister_irq_mask_notifier(kvm, 0, 673 kvm_unregister_irq_mask_notifier(kvm, 0,
625 &kvm->arch.vpit->mask_notifier); 674 &kvm->arch.vpit->mask_notifier);
675 kvm_unregister_irq_ack_notifier(kvm,
676 &kvm->arch.vpit->pit_state.irq_ack_notifier);
626 mutex_lock(&kvm->arch.vpit->pit_state.lock); 677 mutex_lock(&kvm->arch.vpit->pit_state.lock);
627 timer = &kvm->arch.vpit->pit_state.pit_timer.timer; 678 timer = &kvm->arch.vpit->pit_state.pit_timer.timer;
628 hrtimer_cancel(timer); 679 hrtimer_cancel(timer);
@@ -637,10 +688,10 @@ static void __inject_pit_timer_intr(struct kvm *kvm)
637 struct kvm_vcpu *vcpu; 688 struct kvm_vcpu *vcpu;
638 int i; 689 int i;
639 690
640 mutex_lock(&kvm->lock); 691 mutex_lock(&kvm->irq_lock);
641 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1); 692 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1);
642 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0); 693 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0);
643 mutex_unlock(&kvm->lock); 694 mutex_unlock(&kvm->irq_lock);
644 695
645 /* 696 /*
646 * Provides NMI watchdog support via Virtual Wire mode. 697 * Provides NMI watchdog support via Virtual Wire mode.
@@ -652,11 +703,8 @@ static void __inject_pit_timer_intr(struct kvm *kvm)
652 * VCPU0, and only if its LVT0 is in EXTINT mode. 703 * VCPU0, and only if its LVT0 is in EXTINT mode.
653 */ 704 */
654 if (kvm->arch.vapics_in_nmi_mode > 0) 705 if (kvm->arch.vapics_in_nmi_mode > 0)
655 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 706 kvm_for_each_vcpu(i, vcpu, kvm)
656 vcpu = kvm->vcpus[i]; 707 kvm_apic_nmi_wd_deliver(vcpu);
657 if (vcpu)
658 kvm_apic_nmi_wd_deliver(vcpu);
659 }
660} 708}
661 709
662void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu) 710void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu)
@@ -665,7 +713,7 @@ void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu)
665 struct kvm *kvm = vcpu->kvm; 713 struct kvm *kvm = vcpu->kvm;
666 struct kvm_kpit_state *ps; 714 struct kvm_kpit_state *ps;
667 715
668 if (vcpu && pit) { 716 if (pit) {
669 int inject = 0; 717 int inject = 0;
670 ps = &pit->pit_state; 718 ps = &pit->pit_state;
671 719