aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/include/asm/kvm_host.h3
-rw-r--r--arch/ia64/kvm/kvm-ia64.c72
-rw-r--r--arch/ia64/kvm/kvm_fw.c9
-rw-r--r--arch/ia64/kvm/process.c2
4 files changed, 46 insertions, 40 deletions
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index 04c0b88f7b3a..c60d324da540 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -365,7 +365,8 @@ struct kvm_vcpu_arch {
365 long itc_offset; 365 long itc_offset;
366 unsigned long itc_check; 366 unsigned long itc_check;
367 unsigned long timer_check; 367 unsigned long timer_check;
368 unsigned long timer_pending; 368 unsigned int timer_pending;
369 unsigned int timer_fired;
369 370
370 unsigned long vrr[8]; 371 unsigned long vrr[8];
371 unsigned long ibr[8]; 372 unsigned long ibr[8];
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 8a2b13ff0aff..3caac477de9e 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -385,6 +385,7 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
385 struct kvm *kvm = vcpu->kvm; 385 struct kvm *kvm = vcpu->kvm;
386 struct call_data call_data; 386 struct call_data call_data;
387 int i; 387 int i;
388
388 call_data.ptc_g_data = p->u.ptc_g_data; 389 call_data.ptc_g_data = p->u.ptc_g_data;
389 390
390 for (i = 0; i < KVM_MAX_VCPUS; i++) { 391 for (i = 0; i < KVM_MAX_VCPUS; i++) {
@@ -418,33 +419,41 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
418 ktime_t kt; 419 ktime_t kt;
419 long itc_diff; 420 long itc_diff;
420 unsigned long vcpu_now_itc; 421 unsigned long vcpu_now_itc;
421
422 unsigned long expires; 422 unsigned long expires;
423 struct hrtimer *p_ht = &vcpu->arch.hlt_timer; 423 struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
424 unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec; 424 unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec;
425 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); 425 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
426 426
427 vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset; 427 if (irqchip_in_kernel(vcpu->kvm)) {
428 428
429 if (time_after(vcpu_now_itc, vpd->itm)) { 429 vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset;
430 vcpu->arch.timer_check = 1;
431 return 1;
432 }
433 itc_diff = vpd->itm - vcpu_now_itc;
434 if (itc_diff < 0)
435 itc_diff = -itc_diff;
436 430
437 expires = div64_u64(itc_diff, cyc_per_usec); 431 if (time_after(vcpu_now_itc, vpd->itm)) {
438 kt = ktime_set(0, 1000 * expires); 432 vcpu->arch.timer_check = 1;
439 vcpu->arch.ht_active = 1; 433 return 1;
440 hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); 434 }
435 itc_diff = vpd->itm - vcpu_now_itc;
436 if (itc_diff < 0)
437 itc_diff = -itc_diff;
438
439 expires = div64_u64(itc_diff, cyc_per_usec);
440 kt = ktime_set(0, 1000 * expires);
441
442 down_read(&vcpu->kvm->slots_lock);
443 vcpu->arch.ht_active = 1;
444 hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
441 445
442 if (irqchip_in_kernel(vcpu->kvm)) {
443 vcpu->arch.mp_state = KVM_MP_STATE_HALTED; 446 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
444 kvm_vcpu_block(vcpu); 447 kvm_vcpu_block(vcpu);
445 hrtimer_cancel(p_ht); 448 hrtimer_cancel(p_ht);
446 vcpu->arch.ht_active = 0; 449 vcpu->arch.ht_active = 0;
447 450
451 if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
452 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
453 vcpu->arch.mp_state =
454 KVM_MP_STATE_RUNNABLE;
455 up_read(&vcpu->kvm->slots_lock);
456
448 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) 457 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
449 return -EINTR; 458 return -EINTR;
450 return 1; 459 return 1;
@@ -484,10 +493,6 @@ static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
484static const int kvm_vti_max_exit_handlers = 493static const int kvm_vti_max_exit_handlers =
485 sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers); 494 sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers);
486 495
487static void kvm_prepare_guest_switch(struct kvm_vcpu *vcpu)
488{
489}
490
491static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu) 496static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu)
492{ 497{
493 struct exit_ctl_data *p_exit_data; 498 struct exit_ctl_data *p_exit_data;
@@ -600,8 +605,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
600 605
601again: 606again:
602 preempt_disable(); 607 preempt_disable();
603
604 kvm_prepare_guest_switch(vcpu);
605 local_irq_disable(); 608 local_irq_disable();
606 609
607 if (signal_pending(current)) { 610 if (signal_pending(current)) {
@@ -614,7 +617,7 @@ again:
614 617
615 vcpu->guest_mode = 1; 618 vcpu->guest_mode = 1;
616 kvm_guest_enter(); 619 kvm_guest_enter();
617 620 down_read(&vcpu->kvm->slots_lock);
618 r = vti_vcpu_run(vcpu, kvm_run); 621 r = vti_vcpu_run(vcpu, kvm_run);
619 if (r < 0) { 622 if (r < 0) {
620 local_irq_enable(); 623 local_irq_enable();
@@ -634,9 +637,8 @@ again:
634 * But we need to prevent reordering, hence this barrier(): 637 * But we need to prevent reordering, hence this barrier():
635 */ 638 */
636 barrier(); 639 barrier();
637
638 kvm_guest_exit(); 640 kvm_guest_exit();
639 641 up_read(&vcpu->kvm->slots_lock);
640 preempt_enable(); 642 preempt_enable();
641 643
642 r = kvm_handle_exit(kvm_run, vcpu); 644 r = kvm_handle_exit(kvm_run, vcpu);
@@ -673,6 +675,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
673 675
674 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { 676 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
675 kvm_vcpu_block(vcpu); 677 kvm_vcpu_block(vcpu);
678 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
676 vcpu_put(vcpu); 679 vcpu_put(vcpu);
677 return -EAGAIN; 680 return -EAGAIN;
678 } 681 }
@@ -1125,15 +1128,16 @@ static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data)
1125 wait_queue_head_t *q; 1128 wait_queue_head_t *q;
1126 1129
1127 vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer); 1130 vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer);
1131 q = &vcpu->wq;
1132
1128 if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED) 1133 if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED)
1129 goto out; 1134 goto out;
1130 1135
1131 q = &vcpu->wq; 1136 if (waitqueue_active(q))
1132 if (waitqueue_active(q)) {
1133 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
1134 wake_up_interruptible(q); 1137 wake_up_interruptible(q);
1135 } 1138
1136out: 1139out:
1140 vcpu->arch.timer_fired = 1;
1137 vcpu->arch.timer_check = 1; 1141 vcpu->arch.timer_check = 1;
1138 return HRTIMER_NORESTART; 1142 return HRTIMER_NORESTART;
1139} 1143}
@@ -1702,12 +1706,14 @@ static void vcpu_kick_intr(void *info)
1702void kvm_vcpu_kick(struct kvm_vcpu *vcpu) 1706void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
1703{ 1707{
1704 int ipi_pcpu = vcpu->cpu; 1708 int ipi_pcpu = vcpu->cpu;
1709 int cpu = get_cpu();
1705 1710
1706 if (waitqueue_active(&vcpu->wq)) 1711 if (waitqueue_active(&vcpu->wq))
1707 wake_up_interruptible(&vcpu->wq); 1712 wake_up_interruptible(&vcpu->wq);
1708 1713
1709 if (vcpu->guest_mode) 1714 if (vcpu->guest_mode && cpu != ipi_pcpu)
1710 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0); 1715 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
1716 put_cpu();
1711} 1717}
1712 1718
1713int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig) 1719int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
@@ -1717,13 +1723,7 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
1717 1723
1718 if (!test_and_set_bit(vec, &vpd->irr[0])) { 1724 if (!test_and_set_bit(vec, &vpd->irr[0])) {
1719 vcpu->arch.irq_new_pending = 1; 1725 vcpu->arch.irq_new_pending = 1;
1720 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) 1726 kvm_vcpu_kick(vcpu);
1721 kvm_vcpu_kick(vcpu);
1722 else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) {
1723 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
1724 if (waitqueue_active(&vcpu->wq))
1725 wake_up_interruptible(&vcpu->wq);
1726 }
1727 return 1; 1727 return 1;
1728 } 1728 }
1729 return 0; 1729 return 0;
@@ -1793,7 +1793,7 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
1793 1793
1794int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 1794int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1795{ 1795{
1796 return 0; 1796 return vcpu->arch.timer_fired;
1797} 1797}
1798 1798
1799gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) 1799gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c
index 0c69d9ec92d4..cb7600bdff9d 100644
--- a/arch/ia64/kvm/kvm_fw.c
+++ b/arch/ia64/kvm/kvm_fw.c
@@ -286,6 +286,12 @@ static u64 kvm_get_pal_call_index(struct kvm_vcpu *vcpu)
286 return index; 286 return index;
287} 287}
288 288
289static void prepare_for_halt(struct kvm_vcpu *vcpu)
290{
291 vcpu->arch.timer_pending = 1;
292 vcpu->arch.timer_fired = 0;
293}
294
289int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) 295int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
290{ 296{
291 297
@@ -304,11 +310,10 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
304 break; 310 break;
305 case PAL_HALT_LIGHT: 311 case PAL_HALT_LIGHT:
306 { 312 {
307 vcpu->arch.timer_pending = 1;
308 INIT_PAL_STATUS_SUCCESS(result); 313 INIT_PAL_STATUS_SUCCESS(result);
314 prepare_for_halt(vcpu);
309 if (kvm_highest_pending_irq(vcpu) == -1) 315 if (kvm_highest_pending_irq(vcpu) == -1)
310 ret = kvm_emulate_halt(vcpu); 316 ret = kvm_emulate_halt(vcpu);
311
312 } 317 }
313 break; 318 break;
314 319
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c
index 3417783ae164..800817307b7b 100644
--- a/arch/ia64/kvm/process.c
+++ b/arch/ia64/kvm/process.c
@@ -713,7 +713,7 @@ void leave_hypervisor_tail(void)
713 if (!(VCPU(v, itv) & (1 << 16))) { 713 if (!(VCPU(v, itv) & (1 << 16))) {
714 vcpu_pend_interrupt(v, VCPU(v, itv) 714 vcpu_pend_interrupt(v, VCPU(v, itv)
715 & 0xff); 715 & 0xff);
716 VMX(v, itc_check) = 0; 716 VMX(v, itc_check) = 0;
717 } else { 717 } else {
718 v->arch.timer_pending = 1; 718 v->arch.timer_pending = 1;
719 } 719 }