aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/lapic.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2015-03-05 14:52:18 -0500
committerIngo Molnar <mingo@kernel.org>2015-03-05 14:52:18 -0500
commit33ca8a53f262b4af40611bea331b8c87d133af72 (patch)
treed6468c820a556c4915bcb5b761204a0fb19e8225 /arch/x86/kvm/lapic.c
parentdb2dcb4f91d5fec5c346a82c309187ee821e2495 (diff)
parent13a7a6ac0a11197edcd0f756a035f472b42cdf8b (diff)
Merge tag 'v4.0-rc2' into irq/core, to refresh the tree before applying new changes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kvm/lapic.c')
-rw-r--r--arch/x86/kvm/lapic.c150
1 files changed, 99 insertions, 51 deletions
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 4f0c0b954686..e55b5fc344eb 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -33,6 +33,7 @@
33#include <asm/page.h> 33#include <asm/page.h>
34#include <asm/current.h> 34#include <asm/current.h>
35#include <asm/apicdef.h> 35#include <asm/apicdef.h>
36#include <asm/delay.h>
36#include <linux/atomic.h> 37#include <linux/atomic.h>
37#include <linux/jump_label.h> 38#include <linux/jump_label.h>
38#include "kvm_cache_regs.h" 39#include "kvm_cache_regs.h"
@@ -192,6 +193,9 @@ static void recalculate_apic_map(struct kvm *kvm)
192 u16 cid, lid; 193 u16 cid, lid;
193 u32 ldr, aid; 194 u32 ldr, aid;
194 195
196 if (!kvm_apic_present(vcpu))
197 continue;
198
195 aid = kvm_apic_id(apic); 199 aid = kvm_apic_id(apic);
196 ldr = kvm_apic_get_reg(apic, APIC_LDR); 200 ldr = kvm_apic_get_reg(apic, APIC_LDR);
197 cid = apic_cluster_id(new, ldr); 201 cid = apic_cluster_id(new, ldr);
@@ -324,17 +328,24 @@ static u8 count_vectors(void *bitmap)
324 return count; 328 return count;
325} 329}
326 330
327void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir) 331void __kvm_apic_update_irr(u32 *pir, void *regs)
328{ 332{
329 u32 i, pir_val; 333 u32 i, pir_val;
330 struct kvm_lapic *apic = vcpu->arch.apic;
331 334
332 for (i = 0; i <= 7; i++) { 335 for (i = 0; i <= 7; i++) {
333 pir_val = xchg(&pir[i], 0); 336 pir_val = xchg(&pir[i], 0);
334 if (pir_val) 337 if (pir_val)
335 *((u32 *)(apic->regs + APIC_IRR + i * 0x10)) |= pir_val; 338 *((u32 *)(regs + APIC_IRR + i * 0x10)) |= pir_val;
336 } 339 }
337} 340}
341EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
342
343void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir)
344{
345 struct kvm_lapic *apic = vcpu->arch.apic;
346
347 __kvm_apic_update_irr(pir, apic->regs);
348}
338EXPORT_SYMBOL_GPL(kvm_apic_update_irr); 349EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
339 350
340static inline void apic_set_irr(int vec, struct kvm_lapic *apic) 351static inline void apic_set_irr(int vec, struct kvm_lapic *apic)
@@ -402,7 +413,7 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
402 * because the processor can modify ISR under the hood. Instead 413 * because the processor can modify ISR under the hood. Instead
403 * just set SVI. 414 * just set SVI.
404 */ 415 */
405 if (unlikely(kvm_apic_vid_enabled(vcpu->kvm))) 416 if (unlikely(kvm_x86_ops->hwapic_isr_update))
406 kvm_x86_ops->hwapic_isr_update(vcpu->kvm, vec); 417 kvm_x86_ops->hwapic_isr_update(vcpu->kvm, vec);
407 else { 418 else {
408 ++apic->isr_count; 419 ++apic->isr_count;
@@ -450,7 +461,7 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
450 * on the other hand isr_count and highest_isr_cache are unused 461 * on the other hand isr_count and highest_isr_cache are unused
451 * and must be left alone. 462 * and must be left alone.
452 */ 463 */
453 if (unlikely(kvm_apic_vid_enabled(vcpu->kvm))) 464 if (unlikely(kvm_x86_ops->hwapic_isr_update))
454 kvm_x86_ops->hwapic_isr_update(vcpu->kvm, 465 kvm_x86_ops->hwapic_isr_update(vcpu->kvm,
455 apic_find_highest_isr(apic)); 466 apic_find_highest_isr(apic));
456 else { 467 else {
@@ -577,55 +588,48 @@ static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
577 apic_update_ppr(apic); 588 apic_update_ppr(apic);
578} 589}
579 590
580static int kvm_apic_broadcast(struct kvm_lapic *apic, u32 dest) 591static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 dest)
581{ 592{
582 return dest == (apic_x2apic_mode(apic) ? 593 return dest == (apic_x2apic_mode(apic) ?
583 X2APIC_BROADCAST : APIC_BROADCAST); 594 X2APIC_BROADCAST : APIC_BROADCAST);
584} 595}
585 596
586int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 dest) 597static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 dest)
587{ 598{
588 return kvm_apic_id(apic) == dest || kvm_apic_broadcast(apic, dest); 599 return kvm_apic_id(apic) == dest || kvm_apic_broadcast(apic, dest);
589} 600}
590 601
591int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda) 602static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
592{ 603{
593 int result = 0;
594 u32 logical_id; 604 u32 logical_id;
595 605
596 if (kvm_apic_broadcast(apic, mda)) 606 if (kvm_apic_broadcast(apic, mda))
597 return 1; 607 return true;
598 608
599 if (apic_x2apic_mode(apic)) { 609 logical_id = kvm_apic_get_reg(apic, APIC_LDR);
600 logical_id = kvm_apic_get_reg(apic, APIC_LDR); 610
601 return logical_id & mda; 611 if (apic_x2apic_mode(apic))
602 } 612 return ((logical_id >> 16) == (mda >> 16))
613 && (logical_id & mda & 0xffff) != 0;
603 614
604 logical_id = GET_APIC_LOGICAL_ID(kvm_apic_get_reg(apic, APIC_LDR)); 615 logical_id = GET_APIC_LOGICAL_ID(logical_id);
605 616
606 switch (kvm_apic_get_reg(apic, APIC_DFR)) { 617 switch (kvm_apic_get_reg(apic, APIC_DFR)) {
607 case APIC_DFR_FLAT: 618 case APIC_DFR_FLAT:
608 if (logical_id & mda) 619 return (logical_id & mda) != 0;
609 result = 1;
610 break;
611 case APIC_DFR_CLUSTER: 620 case APIC_DFR_CLUSTER:
612 if (((logical_id >> 4) == (mda >> 0x4)) 621 return ((logical_id >> 4) == (mda >> 4))
613 && (logical_id & mda & 0xf)) 622 && (logical_id & mda & 0xf) != 0;
614 result = 1;
615 break;
616 default: 623 default:
617 apic_debug("Bad DFR vcpu %d: %08x\n", 624 apic_debug("Bad DFR vcpu %d: %08x\n",
618 apic->vcpu->vcpu_id, kvm_apic_get_reg(apic, APIC_DFR)); 625 apic->vcpu->vcpu_id, kvm_apic_get_reg(apic, APIC_DFR));
619 break; 626 return false;
620 } 627 }
621
622 return result;
623} 628}
624 629
625int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, 630bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
626 int short_hand, unsigned int dest, int dest_mode) 631 int short_hand, unsigned int dest, int dest_mode)
627{ 632{
628 int result = 0;
629 struct kvm_lapic *target = vcpu->arch.apic; 633 struct kvm_lapic *target = vcpu->arch.apic;
630 634
631 apic_debug("target %p, source %p, dest 0x%x, " 635 apic_debug("target %p, source %p, dest 0x%x, "
@@ -635,29 +639,21 @@ int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
635 ASSERT(target); 639 ASSERT(target);
636 switch (short_hand) { 640 switch (short_hand) {
637 case APIC_DEST_NOSHORT: 641 case APIC_DEST_NOSHORT:
638 if (dest_mode == 0) 642 if (dest_mode == APIC_DEST_PHYSICAL)
639 /* Physical mode. */ 643 return kvm_apic_match_physical_addr(target, dest);
640 result = kvm_apic_match_physical_addr(target, dest);
641 else 644 else
642 /* Logical mode. */ 645 return kvm_apic_match_logical_addr(target, dest);
643 result = kvm_apic_match_logical_addr(target, dest);
644 break;
645 case APIC_DEST_SELF: 646 case APIC_DEST_SELF:
646 result = (target == source); 647 return target == source;
647 break;
648 case APIC_DEST_ALLINC: 648 case APIC_DEST_ALLINC:
649 result = 1; 649 return true;
650 break;
651 case APIC_DEST_ALLBUT: 650 case APIC_DEST_ALLBUT:
652 result = (target != source); 651 return target != source;
653 break;
654 default: 652 default:
655 apic_debug("kvm: apic: Bad dest shorthand value %x\n", 653 apic_debug("kvm: apic: Bad dest shorthand value %x\n",
656 short_hand); 654 short_hand);
657 break; 655 return false;
658 } 656 }
659
660 return result;
661} 657}
662 658
663bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, 659bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
@@ -690,7 +686,7 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
690 686
691 ret = true; 687 ret = true;
692 688
693 if (irq->dest_mode == 0) { /* physical mode */ 689 if (irq->dest_mode == APIC_DEST_PHYSICAL) {
694 if (irq->dest_id >= ARRAY_SIZE(map->phys_map)) 690 if (irq->dest_id >= ARRAY_SIZE(map->phys_map))
695 goto out; 691 goto out;
696 692
@@ -1073,25 +1069,72 @@ static void apic_timer_expired(struct kvm_lapic *apic)
1073{ 1069{
1074 struct kvm_vcpu *vcpu = apic->vcpu; 1070 struct kvm_vcpu *vcpu = apic->vcpu;
1075 wait_queue_head_t *q = &vcpu->wq; 1071 wait_queue_head_t *q = &vcpu->wq;
1072 struct kvm_timer *ktimer = &apic->lapic_timer;
1076 1073
1077 /*
1078 * Note: KVM_REQ_PENDING_TIMER is implicitly checked in
1079 * vcpu_enter_guest.
1080 */
1081 if (atomic_read(&apic->lapic_timer.pending)) 1074 if (atomic_read(&apic->lapic_timer.pending))
1082 return; 1075 return;
1083 1076
1084 atomic_inc(&apic->lapic_timer.pending); 1077 atomic_inc(&apic->lapic_timer.pending);
1085 /* FIXME: this code should not know anything about vcpus */ 1078 kvm_set_pending_timer(vcpu);
1086 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1087 1079
1088 if (waitqueue_active(q)) 1080 if (waitqueue_active(q))
1089 wake_up_interruptible(q); 1081 wake_up_interruptible(q);
1082
1083 if (apic_lvtt_tscdeadline(apic))
1084 ktimer->expired_tscdeadline = ktimer->tscdeadline;
1085}
1086
1087/*
1088 * On APICv, this test will cause a busy wait
1089 * during a higher-priority task.
1090 */
1091
1092static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
1093{
1094 struct kvm_lapic *apic = vcpu->arch.apic;
1095 u32 reg = kvm_apic_get_reg(apic, APIC_LVTT);
1096
1097 if (kvm_apic_hw_enabled(apic)) {
1098 int vec = reg & APIC_VECTOR_MASK;
1099 void *bitmap = apic->regs + APIC_ISR;
1100
1101 if (kvm_x86_ops->deliver_posted_interrupt)
1102 bitmap = apic->regs + APIC_IRR;
1103
1104 if (apic_test_vector(vec, bitmap))
1105 return true;
1106 }
1107 return false;
1108}
1109
1110void wait_lapic_expire(struct kvm_vcpu *vcpu)
1111{
1112 struct kvm_lapic *apic = vcpu->arch.apic;
1113 u64 guest_tsc, tsc_deadline;
1114
1115 if (!kvm_vcpu_has_lapic(vcpu))
1116 return;
1117
1118 if (apic->lapic_timer.expired_tscdeadline == 0)
1119 return;
1120
1121 if (!lapic_timer_int_injected(vcpu))
1122 return;
1123
1124 tsc_deadline = apic->lapic_timer.expired_tscdeadline;
1125 apic->lapic_timer.expired_tscdeadline = 0;
1126 guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc());
1127 trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
1128
1129 /* __delay is delay_tsc whenever the hardware has TSC, thus always. */
1130 if (guest_tsc < tsc_deadline)
1131 __delay(tsc_deadline - guest_tsc);
1090} 1132}
1091 1133
1092static void start_apic_timer(struct kvm_lapic *apic) 1134static void start_apic_timer(struct kvm_lapic *apic)
1093{ 1135{
1094 ktime_t now; 1136 ktime_t now;
1137
1095 atomic_set(&apic->lapic_timer.pending, 0); 1138 atomic_set(&apic->lapic_timer.pending, 0);
1096 1139
1097 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) { 1140 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
@@ -1137,6 +1180,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
1137 /* lapic timer in tsc deadline mode */ 1180 /* lapic timer in tsc deadline mode */
1138 u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline; 1181 u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline;
1139 u64 ns = 0; 1182 u64 ns = 0;
1183 ktime_t expire;
1140 struct kvm_vcpu *vcpu = apic->vcpu; 1184 struct kvm_vcpu *vcpu = apic->vcpu;
1141 unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz; 1185 unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
1142 unsigned long flags; 1186 unsigned long flags;
@@ -1151,8 +1195,10 @@ static void start_apic_timer(struct kvm_lapic *apic)
1151 if (likely(tscdeadline > guest_tsc)) { 1195 if (likely(tscdeadline > guest_tsc)) {
1152 ns = (tscdeadline - guest_tsc) * 1000000ULL; 1196 ns = (tscdeadline - guest_tsc) * 1000000ULL;
1153 do_div(ns, this_tsc_khz); 1197 do_div(ns, this_tsc_khz);
1198 expire = ktime_add_ns(now, ns);
1199 expire = ktime_sub_ns(expire, lapic_timer_advance_ns);
1154 hrtimer_start(&apic->lapic_timer.timer, 1200 hrtimer_start(&apic->lapic_timer.timer,
1155 ktime_add_ns(now, ns), HRTIMER_MODE_ABS); 1201 expire, HRTIMER_MODE_ABS);
1156 } else 1202 } else
1157 apic_timer_expired(apic); 1203 apic_timer_expired(apic);
1158 1204
@@ -1742,7 +1788,9 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
1742 if (kvm_x86_ops->hwapic_irr_update) 1788 if (kvm_x86_ops->hwapic_irr_update)
1743 kvm_x86_ops->hwapic_irr_update(vcpu, 1789 kvm_x86_ops->hwapic_irr_update(vcpu,
1744 apic_find_highest_irr(apic)); 1790 apic_find_highest_irr(apic));
1745 kvm_x86_ops->hwapic_isr_update(vcpu->kvm, apic_find_highest_isr(apic)); 1791 if (unlikely(kvm_x86_ops->hwapic_isr_update))
1792 kvm_x86_ops->hwapic_isr_update(vcpu->kvm,
1793 apic_find_highest_isr(apic));
1746 kvm_make_request(KVM_REQ_EVENT, vcpu); 1794 kvm_make_request(KVM_REQ_EVENT, vcpu);
1747 kvm_rtc_eoi_tracking_restore_one(vcpu); 1795 kvm_rtc_eoi_tracking_restore_one(vcpu);
1748} 1796}