aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/i8254.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/i8254.c')
-rw-r--r--arch/x86/kvm/i8254.c41
1 files changed, 23 insertions, 18 deletions
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 144e7f60b5e2..0150affad25d 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -29,7 +29,10 @@
29 * Based on QEMU and Xen. 29 * Based on QEMU and Xen.
30 */ 30 */
31 31
32#define pr_fmt(fmt) "pit: " fmt
33
32#include <linux/kvm_host.h> 34#include <linux/kvm_host.h>
35#include <linux/slab.h>
33 36
34#include "irq.h" 37#include "irq.h"
35#include "i8254.h" 38#include "i8254.h"
@@ -240,11 +243,11 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
240{ 243{
241 struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state, 244 struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state,
242 irq_ack_notifier); 245 irq_ack_notifier);
243 spin_lock(&ps->inject_lock); 246 raw_spin_lock(&ps->inject_lock);
244 if (atomic_dec_return(&ps->pit_timer.pending) < 0) 247 if (atomic_dec_return(&ps->pit_timer.pending) < 0)
245 atomic_inc(&ps->pit_timer.pending); 248 atomic_inc(&ps->pit_timer.pending);
246 ps->irq_ack = 1; 249 ps->irq_ack = 1;
247 spin_unlock(&ps->inject_lock); 250 raw_spin_unlock(&ps->inject_lock);
248} 251}
249 252
250void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu) 253void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
@@ -262,7 +265,7 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
262 265
263static void destroy_pit_timer(struct kvm_timer *pt) 266static void destroy_pit_timer(struct kvm_timer *pt)
264{ 267{
265 pr_debug("pit: execute del timer!\n"); 268 pr_debug("execute del timer!\n");
266 hrtimer_cancel(&pt->timer); 269 hrtimer_cancel(&pt->timer);
267} 270}
268 271
@@ -284,7 +287,7 @@ static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period)
284 287
285 interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ); 288 interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ);
286 289
287 pr_debug("pit: create pit timer, interval is %llu nsec\n", interval); 290 pr_debug("create pit timer, interval is %llu nsec\n", interval);
288 291
289 /* TODO The new value only affected after the retriggered */ 292 /* TODO The new value only affected after the retriggered */
290 hrtimer_cancel(&pt->timer); 293 hrtimer_cancel(&pt->timer);
@@ -309,7 +312,7 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val)
309 312
310 WARN_ON(!mutex_is_locked(&ps->lock)); 313 WARN_ON(!mutex_is_locked(&ps->lock));
311 314
312 pr_debug("pit: load_count val is %d, channel is %d\n", val, channel); 315 pr_debug("load_count val is %d, channel is %d\n", val, channel);
313 316
314 /* 317 /*
315 * The largest possible initial count is 0; this is equivalent 318 * The largest possible initial count is 0; this is equivalent
@@ -395,8 +398,8 @@ static int pit_ioport_write(struct kvm_io_device *this,
395 mutex_lock(&pit_state->lock); 398 mutex_lock(&pit_state->lock);
396 399
397 if (val != 0) 400 if (val != 0)
398 pr_debug("pit: write addr is 0x%x, len is %d, val is 0x%x\n", 401 pr_debug("write addr is 0x%x, len is %d, val is 0x%x\n",
399 (unsigned int)addr, len, val); 402 (unsigned int)addr, len, val);
400 403
401 if (addr == 3) { 404 if (addr == 3) {
402 channel = val >> 6; 405 channel = val >> 6;
@@ -465,6 +468,9 @@ static int pit_ioport_read(struct kvm_io_device *this,
465 return -EOPNOTSUPP; 468 return -EOPNOTSUPP;
466 469
467 addr &= KVM_PIT_CHANNEL_MASK; 470 addr &= KVM_PIT_CHANNEL_MASK;
471 if (addr == 3)
472 return 0;
473
468 s = &pit_state->channels[addr]; 474 s = &pit_state->channels[addr];
469 475
470 mutex_lock(&pit_state->lock); 476 mutex_lock(&pit_state->lock);
@@ -600,7 +606,7 @@ static const struct kvm_io_device_ops speaker_dev_ops = {
600 .write = speaker_ioport_write, 606 .write = speaker_ioport_write,
601}; 607};
602 608
603/* Caller must have writers lock on slots_lock */ 609/* Caller must hold slots_lock */
604struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) 610struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
605{ 611{
606 struct kvm_pit *pit; 612 struct kvm_pit *pit;
@@ -619,7 +625,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
619 625
620 mutex_init(&pit->pit_state.lock); 626 mutex_init(&pit->pit_state.lock);
621 mutex_lock(&pit->pit_state.lock); 627 mutex_lock(&pit->pit_state.lock);
622 spin_lock_init(&pit->pit_state.inject_lock); 628 raw_spin_lock_init(&pit->pit_state.inject_lock);
623 629
624 kvm->arch.vpit = pit; 630 kvm->arch.vpit = pit;
625 pit->kvm = kvm; 631 pit->kvm = kvm;
@@ -640,13 +646,13 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
640 kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier); 646 kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
641 647
642 kvm_iodevice_init(&pit->dev, &pit_dev_ops); 648 kvm_iodevice_init(&pit->dev, &pit_dev_ops);
643 ret = __kvm_io_bus_register_dev(&kvm->pio_bus, &pit->dev); 649 ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, &pit->dev);
644 if (ret < 0) 650 if (ret < 0)
645 goto fail; 651 goto fail;
646 652
647 if (flags & KVM_PIT_SPEAKER_DUMMY) { 653 if (flags & KVM_PIT_SPEAKER_DUMMY) {
648 kvm_iodevice_init(&pit->speaker_dev, &speaker_dev_ops); 654 kvm_iodevice_init(&pit->speaker_dev, &speaker_dev_ops);
649 ret = __kvm_io_bus_register_dev(&kvm->pio_bus, 655 ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS,
650 &pit->speaker_dev); 656 &pit->speaker_dev);
651 if (ret < 0) 657 if (ret < 0)
652 goto fail_unregister; 658 goto fail_unregister;
@@ -655,11 +661,12 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
655 return pit; 661 return pit;
656 662
657fail_unregister: 663fail_unregister:
658 __kvm_io_bus_unregister_dev(&kvm->pio_bus, &pit->dev); 664 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev);
659 665
660fail: 666fail:
661 if (pit->irq_source_id >= 0) 667 kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
662 kvm_free_irq_source_id(kvm, pit->irq_source_id); 668 kvm_unregister_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier);
669 kvm_free_irq_source_id(kvm, pit->irq_source_id);
663 670
664 kfree(pit); 671 kfree(pit);
665 return NULL; 672 return NULL;
@@ -688,10 +695,8 @@ static void __inject_pit_timer_intr(struct kvm *kvm)
688 struct kvm_vcpu *vcpu; 695 struct kvm_vcpu *vcpu;
689 int i; 696 int i;
690 697
691 mutex_lock(&kvm->irq_lock);
692 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1); 698 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1);
693 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0); 699 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0);
694 mutex_unlock(&kvm->irq_lock);
695 700
696 /* 701 /*
697 * Provides NMI watchdog support via Virtual Wire mode. 702 * Provides NMI watchdog support via Virtual Wire mode.
@@ -720,12 +725,12 @@ void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu)
720 /* Try to inject pending interrupts when 725 /* Try to inject pending interrupts when
721 * last one has been acked. 726 * last one has been acked.
722 */ 727 */
723 spin_lock(&ps->inject_lock); 728 raw_spin_lock(&ps->inject_lock);
724 if (atomic_read(&ps->pit_timer.pending) && ps->irq_ack) { 729 if (atomic_read(&ps->pit_timer.pending) && ps->irq_ack) {
725 ps->irq_ack = 0; 730 ps->irq_ack = 0;
726 inject = 1; 731 inject = 1;
727 } 732 }
728 spin_unlock(&ps->inject_lock); 733 raw_spin_unlock(&ps->inject_lock);
729 if (inject) 734 if (inject)
730 __inject_pit_timer_intr(kvm); 735 __inject_pit_timer_intr(kvm);
731 } 736 }