diff options
author | Arjan van de Ven <arjan@linux.intel.com> | 2008-10-17 12:20:26 -0400 |
---|---|---|
committer | Arjan van de Ven <arjan@linux.intel.com> | 2008-10-17 12:20:26 -0400 |
commit | 651dab4264e4ba0e563f5ff56f748127246e9065 (patch) | |
tree | 016630974bdcb00fe529b673f96d389e0fd6dc94 /arch/x86/kvm/i8254.c | |
parent | 40b8606253552109815786e5d4b0de98782d31f5 (diff) | |
parent | 2e532d68a2b3e2aa6b19731501222069735c741c (diff) |
Merge commit 'linus/master' into merge-linus
Conflicts:
arch/x86/kvm/i8254.c
Diffstat (limited to 'arch/x86/kvm/i8254.c')
-rw-r--r-- | arch/x86/kvm/i8254.c | 83 |
1 files changed, 40 insertions, 43 deletions
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 1bf8f57a3041..11c6725fb798 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c | |||
@@ -200,13 +200,14 @@ static int __pit_timer_fn(struct kvm_kpit_state *ps) | |||
200 | 200 | ||
201 | if (!atomic_inc_and_test(&pt->pending)) | 201 | if (!atomic_inc_and_test(&pt->pending)) |
202 | set_bit(KVM_REQ_PENDING_TIMER, &vcpu0->requests); | 202 | set_bit(KVM_REQ_PENDING_TIMER, &vcpu0->requests); |
203 | if (vcpu0 && waitqueue_active(&vcpu0->wq)) { | 203 | |
204 | vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE; | 204 | if (vcpu0 && waitqueue_active(&vcpu0->wq)) |
205 | wake_up_interruptible(&vcpu0->wq); | 205 | wake_up_interruptible(&vcpu0->wq); |
206 | } | ||
207 | 206 | ||
208 | hrtimer_add_expires_ns(&pt->timer, pt->period); | 207 | hrtimer_add_expires_ns(&pt->timer, pt->period); |
209 | pt->scheduled = ktime_to_ns(hrtimer_get_expires(&pt->timer)); | 208 | pt->scheduled = hrtimer_get_expires_ns(&pt->timer); |
209 | if (pt->period) | ||
210 | ps->channels[0].count_load_time = hrtimer_get_expires(&pt->timer); | ||
210 | 211 | ||
211 | return (pt->period == 0 ? 0 : 1); | 212 | return (pt->period == 0 ? 0 : 1); |
212 | } | 213 | } |
@@ -215,12 +216,22 @@ int pit_has_pending_timer(struct kvm_vcpu *vcpu) | |||
215 | { | 216 | { |
216 | struct kvm_pit *pit = vcpu->kvm->arch.vpit; | 217 | struct kvm_pit *pit = vcpu->kvm->arch.vpit; |
217 | 218 | ||
218 | if (pit && vcpu->vcpu_id == 0 && pit->pit_state.inject_pending) | 219 | if (pit && vcpu->vcpu_id == 0 && pit->pit_state.irq_ack) |
219 | return atomic_read(&pit->pit_state.pit_timer.pending); | 220 | return atomic_read(&pit->pit_state.pit_timer.pending); |
220 | |||
221 | return 0; | 221 | return 0; |
222 | } | 222 | } |
223 | 223 | ||
224 | static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian) | ||
225 | { | ||
226 | struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state, | ||
227 | irq_ack_notifier); | ||
228 | spin_lock(&ps->inject_lock); | ||
229 | if (atomic_dec_return(&ps->pit_timer.pending) < 0) | ||
230 | atomic_inc(&ps->pit_timer.pending); | ||
231 | ps->irq_ack = 1; | ||
232 | spin_unlock(&ps->inject_lock); | ||
233 | } | ||
234 | |||
224 | static enum hrtimer_restart pit_timer_fn(struct hrtimer *data) | 235 | static enum hrtimer_restart pit_timer_fn(struct hrtimer *data) |
225 | { | 236 | { |
226 | struct kvm_kpit_state *ps; | 237 | struct kvm_kpit_state *ps; |
@@ -255,8 +266,9 @@ static void destroy_pit_timer(struct kvm_kpit_timer *pt) | |||
255 | hrtimer_cancel(&pt->timer); | 266 | hrtimer_cancel(&pt->timer); |
256 | } | 267 | } |
257 | 268 | ||
258 | static void create_pit_timer(struct kvm_kpit_timer *pt, u32 val, int is_period) | 269 | static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period) |
259 | { | 270 | { |
271 | struct kvm_kpit_timer *pt = &ps->pit_timer; | ||
260 | s64 interval; | 272 | s64 interval; |
261 | 273 | ||
262 | interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ); | 274 | interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ); |
@@ -268,6 +280,7 @@ static void create_pit_timer(struct kvm_kpit_timer *pt, u32 val, int is_period) | |||
268 | pt->period = (is_period == 0) ? 0 : interval; | 280 | pt->period = (is_period == 0) ? 0 : interval; |
269 | pt->timer.function = pit_timer_fn; | 281 | pt->timer.function = pit_timer_fn; |
270 | atomic_set(&pt->pending, 0); | 282 | atomic_set(&pt->pending, 0); |
283 | ps->irq_ack = 1; | ||
271 | 284 | ||
272 | hrtimer_start(&pt->timer, ktime_add_ns(ktime_get(), interval), | 285 | hrtimer_start(&pt->timer, ktime_add_ns(ktime_get(), interval), |
273 | HRTIMER_MODE_ABS); | 286 | HRTIMER_MODE_ABS); |
@@ -302,11 +315,11 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val) | |||
302 | case 1: | 315 | case 1: |
303 | /* FIXME: enhance mode 4 precision */ | 316 | /* FIXME: enhance mode 4 precision */ |
304 | case 4: | 317 | case 4: |
305 | create_pit_timer(&ps->pit_timer, val, 0); | 318 | create_pit_timer(ps, val, 0); |
306 | break; | 319 | break; |
307 | case 2: | 320 | case 2: |
308 | case 3: | 321 | case 3: |
309 | create_pit_timer(&ps->pit_timer, val, 1); | 322 | create_pit_timer(ps, val, 1); |
310 | break; | 323 | break; |
311 | default: | 324 | default: |
312 | destroy_pit_timer(&ps->pit_timer); | 325 | destroy_pit_timer(&ps->pit_timer); |
@@ -520,7 +533,7 @@ void kvm_pit_reset(struct kvm_pit *pit) | |||
520 | mutex_unlock(&pit->pit_state.lock); | 533 | mutex_unlock(&pit->pit_state.lock); |
521 | 534 | ||
522 | atomic_set(&pit->pit_state.pit_timer.pending, 0); | 535 | atomic_set(&pit->pit_state.pit_timer.pending, 0); |
523 | pit->pit_state.inject_pending = 1; | 536 | pit->pit_state.irq_ack = 1; |
524 | } | 537 | } |
525 | 538 | ||
526 | struct kvm_pit *kvm_create_pit(struct kvm *kvm) | 539 | struct kvm_pit *kvm_create_pit(struct kvm *kvm) |
@@ -534,6 +547,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm) | |||
534 | 547 | ||
535 | mutex_init(&pit->pit_state.lock); | 548 | mutex_init(&pit->pit_state.lock); |
536 | mutex_lock(&pit->pit_state.lock); | 549 | mutex_lock(&pit->pit_state.lock); |
550 | spin_lock_init(&pit->pit_state.inject_lock); | ||
537 | 551 | ||
538 | /* Initialize PIO device */ | 552 | /* Initialize PIO device */ |
539 | pit->dev.read = pit_ioport_read; | 553 | pit->dev.read = pit_ioport_read; |
@@ -555,6 +569,9 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm) | |||
555 | pit_state->pit = pit; | 569 | pit_state->pit = pit; |
556 | hrtimer_init(&pit_state->pit_timer.timer, | 570 | hrtimer_init(&pit_state->pit_timer.timer, |
557 | CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | 571 | CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
572 | pit_state->irq_ack_notifier.gsi = 0; | ||
573 | pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq; | ||
574 | kvm_register_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier); | ||
558 | mutex_unlock(&pit->pit_state.lock); | 575 | mutex_unlock(&pit->pit_state.lock); |
559 | 576 | ||
560 | kvm_pit_reset(pit); | 577 | kvm_pit_reset(pit); |
@@ -578,10 +595,8 @@ void kvm_free_pit(struct kvm *kvm) | |||
578 | static void __inject_pit_timer_intr(struct kvm *kvm) | 595 | static void __inject_pit_timer_intr(struct kvm *kvm) |
579 | { | 596 | { |
580 | mutex_lock(&kvm->lock); | 597 | mutex_lock(&kvm->lock); |
581 | kvm_ioapic_set_irq(kvm->arch.vioapic, 0, 1); | 598 | kvm_set_irq(kvm, 0, 1); |
582 | kvm_ioapic_set_irq(kvm->arch.vioapic, 0, 0); | 599 | kvm_set_irq(kvm, 0, 0); |
583 | kvm_pic_set_irq(pic_irqchip(kvm), 0, 1); | ||
584 | kvm_pic_set_irq(pic_irqchip(kvm), 0, 0); | ||
585 | mutex_unlock(&kvm->lock); | 600 | mutex_unlock(&kvm->lock); |
586 | } | 601 | } |
587 | 602 | ||
@@ -592,37 +607,19 @@ void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu) | |||
592 | struct kvm_kpit_state *ps; | 607 | struct kvm_kpit_state *ps; |
593 | 608 | ||
594 | if (vcpu && pit) { | 609 | if (vcpu && pit) { |
610 | int inject = 0; | ||
595 | ps = &pit->pit_state; | 611 | ps = &pit->pit_state; |
596 | 612 | ||
597 | /* Try to inject pending interrupts when: | 613 | /* Try to inject pending interrupts when |
598 | * 1. Pending exists | 614 | * last one has been acked. |
599 | * 2. Last interrupt was accepted or waited for too long time*/ | 615 | */ |
600 | if (atomic_read(&ps->pit_timer.pending) && | 616 | spin_lock(&ps->inject_lock); |
601 | (ps->inject_pending || | 617 | if (atomic_read(&ps->pit_timer.pending) && ps->irq_ack) { |
602 | (jiffies - ps->last_injected_time | 618 | ps->irq_ack = 0; |
603 | >= KVM_MAX_PIT_INTR_INTERVAL))) { | 619 | inject = 1; |
604 | ps->inject_pending = 0; | ||
605 | __inject_pit_timer_intr(kvm); | ||
606 | ps->last_injected_time = jiffies; | ||
607 | } | ||
608 | } | ||
609 | } | ||
610 | |||
611 | void kvm_pit_timer_intr_post(struct kvm_vcpu *vcpu, int vec) | ||
612 | { | ||
613 | struct kvm_arch *arch = &vcpu->kvm->arch; | ||
614 | struct kvm_kpit_state *ps; | ||
615 | |||
616 | if (vcpu && arch->vpit) { | ||
617 | ps = &arch->vpit->pit_state; | ||
618 | if (atomic_read(&ps->pit_timer.pending) && | ||
619 | (((arch->vpic->pics[0].imr & 1) == 0 && | ||
620 | arch->vpic->pics[0].irq_base == vec) || | ||
621 | (arch->vioapic->redirtbl[0].fields.vector == vec && | ||
622 | arch->vioapic->redirtbl[0].fields.mask != 1))) { | ||
623 | ps->inject_pending = 1; | ||
624 | atomic_dec(&ps->pit_timer.pending); | ||
625 | ps->channels[0].count_load_time = ktime_get(); | ||
626 | } | 620 | } |
621 | spin_unlock(&ps->inject_lock); | ||
622 | if (inject) | ||
623 | __inject_pit_timer_intr(kvm); | ||
627 | } | 624 | } |
628 | } | 625 | } |