diff options
Diffstat (limited to 'arch/x86/kvm/lapic.c')
-rw-r--r-- | arch/x86/kvm/lapic.c | 210 |
1 files changed, 134 insertions, 76 deletions
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index b8345dd41b25..4f0c0b954686 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -68,6 +68,9 @@ | |||
68 | #define MAX_APIC_VECTOR 256 | 68 | #define MAX_APIC_VECTOR 256 |
69 | #define APIC_VECTORS_PER_REG 32 | 69 | #define APIC_VECTORS_PER_REG 32 |
70 | 70 | ||
71 | #define APIC_BROADCAST 0xFF | ||
72 | #define X2APIC_BROADCAST 0xFFFFFFFFul | ||
73 | |||
71 | #define VEC_POS(v) ((v) & (32 - 1)) | 74 | #define VEC_POS(v) ((v) & (32 - 1)) |
72 | #define REG_POS(v) (((v) >> 5) << 4) | 75 | #define REG_POS(v) (((v) >> 5) << 4) |
73 | 76 | ||
@@ -129,8 +132,6 @@ static inline int kvm_apic_id(struct kvm_lapic *apic) | |||
129 | return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff; | 132 | return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff; |
130 | } | 133 | } |
131 | 134 | ||
132 | #define KVM_X2APIC_CID_BITS 0 | ||
133 | |||
134 | static void recalculate_apic_map(struct kvm *kvm) | 135 | static void recalculate_apic_map(struct kvm *kvm) |
135 | { | 136 | { |
136 | struct kvm_apic_map *new, *old = NULL; | 137 | struct kvm_apic_map *new, *old = NULL; |
@@ -149,42 +150,56 @@ static void recalculate_apic_map(struct kvm *kvm) | |||
149 | new->cid_shift = 8; | 150 | new->cid_shift = 8; |
150 | new->cid_mask = 0; | 151 | new->cid_mask = 0; |
151 | new->lid_mask = 0xff; | 152 | new->lid_mask = 0xff; |
153 | new->broadcast = APIC_BROADCAST; | ||
152 | 154 | ||
153 | kvm_for_each_vcpu(i, vcpu, kvm) { | 155 | kvm_for_each_vcpu(i, vcpu, kvm) { |
154 | struct kvm_lapic *apic = vcpu->arch.apic; | 156 | struct kvm_lapic *apic = vcpu->arch.apic; |
155 | u16 cid, lid; | ||
156 | u32 ldr; | ||
157 | 157 | ||
158 | if (!kvm_apic_present(vcpu)) | 158 | if (!kvm_apic_present(vcpu)) |
159 | continue; | 159 | continue; |
160 | 160 | ||
161 | if (apic_x2apic_mode(apic)) { | ||
162 | new->ldr_bits = 32; | ||
163 | new->cid_shift = 16; | ||
164 | new->cid_mask = new->lid_mask = 0xffff; | ||
165 | new->broadcast = X2APIC_BROADCAST; | ||
166 | } else if (kvm_apic_get_reg(apic, APIC_LDR)) { | ||
167 | if (kvm_apic_get_reg(apic, APIC_DFR) == | ||
168 | APIC_DFR_CLUSTER) { | ||
169 | new->cid_shift = 4; | ||
170 | new->cid_mask = 0xf; | ||
171 | new->lid_mask = 0xf; | ||
172 | } else { | ||
173 | new->cid_shift = 8; | ||
174 | new->cid_mask = 0; | ||
175 | new->lid_mask = 0xff; | ||
176 | } | ||
177 | } | ||
178 | |||
161 | /* | 179 | /* |
162 | * All APICs have to be configured in the same mode by an OS. | 180 | * All APICs have to be configured in the same mode by an OS. |
163 | * We take advatage of this while building logical id loockup | 181 | * We take advatage of this while building logical id loockup |
164 | * table. After reset APICs are in xapic/flat mode, so if we | 182 | * table. After reset APICs are in software disabled mode, so if |
165 | * find apic with different setting we assume this is the mode | 183 | * we find apic with different setting we assume this is the mode |
166 | * OS wants all apics to be in; build lookup table accordingly. | 184 | * OS wants all apics to be in; build lookup table accordingly. |
167 | */ | 185 | */ |
168 | if (apic_x2apic_mode(apic)) { | 186 | if (kvm_apic_sw_enabled(apic)) |
169 | new->ldr_bits = 32; | 187 | break; |
170 | new->cid_shift = 16; | 188 | } |
171 | new->cid_mask = (1 << KVM_X2APIC_CID_BITS) - 1; | ||
172 | new->lid_mask = 0xffff; | ||
173 | } else if (kvm_apic_sw_enabled(apic) && | ||
174 | !new->cid_mask /* flat mode */ && | ||
175 | kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_CLUSTER) { | ||
176 | new->cid_shift = 4; | ||
177 | new->cid_mask = 0xf; | ||
178 | new->lid_mask = 0xf; | ||
179 | } | ||
180 | 189 | ||
181 | new->phys_map[kvm_apic_id(apic)] = apic; | 190 | kvm_for_each_vcpu(i, vcpu, kvm) { |
191 | struct kvm_lapic *apic = vcpu->arch.apic; | ||
192 | u16 cid, lid; | ||
193 | u32 ldr, aid; | ||
182 | 194 | ||
195 | aid = kvm_apic_id(apic); | ||
183 | ldr = kvm_apic_get_reg(apic, APIC_LDR); | 196 | ldr = kvm_apic_get_reg(apic, APIC_LDR); |
184 | cid = apic_cluster_id(new, ldr); | 197 | cid = apic_cluster_id(new, ldr); |
185 | lid = apic_logical_id(new, ldr); | 198 | lid = apic_logical_id(new, ldr); |
186 | 199 | ||
187 | if (lid) | 200 | if (aid < ARRAY_SIZE(new->phys_map)) |
201 | new->phys_map[aid] = apic; | ||
202 | if (lid && cid < ARRAY_SIZE(new->logical_map)) | ||
188 | new->logical_map[cid][ffs(lid) - 1] = apic; | 203 | new->logical_map[cid][ffs(lid) - 1] = apic; |
189 | } | 204 | } |
190 | out: | 205 | out: |
@@ -201,11 +216,13 @@ out: | |||
201 | 216 | ||
202 | static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val) | 217 | static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val) |
203 | { | 218 | { |
204 | u32 prev = kvm_apic_get_reg(apic, APIC_SPIV); | 219 | bool enabled = val & APIC_SPIV_APIC_ENABLED; |
205 | 220 | ||
206 | apic_set_reg(apic, APIC_SPIV, val); | 221 | apic_set_reg(apic, APIC_SPIV, val); |
207 | if ((prev ^ val) & APIC_SPIV_APIC_ENABLED) { | 222 | |
208 | if (val & APIC_SPIV_APIC_ENABLED) { | 223 | if (enabled != apic->sw_enabled) { |
224 | apic->sw_enabled = enabled; | ||
225 | if (enabled) { | ||
209 | static_key_slow_dec_deferred(&apic_sw_disabled); | 226 | static_key_slow_dec_deferred(&apic_sw_disabled); |
210 | recalculate_apic_map(apic->vcpu->kvm); | 227 | recalculate_apic_map(apic->vcpu->kvm); |
211 | } else | 228 | } else |
@@ -237,21 +254,17 @@ static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type) | |||
237 | 254 | ||
238 | static inline int apic_lvtt_oneshot(struct kvm_lapic *apic) | 255 | static inline int apic_lvtt_oneshot(struct kvm_lapic *apic) |
239 | { | 256 | { |
240 | return ((kvm_apic_get_reg(apic, APIC_LVTT) & | 257 | return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT; |
241 | apic->lapic_timer.timer_mode_mask) == APIC_LVT_TIMER_ONESHOT); | ||
242 | } | 258 | } |
243 | 259 | ||
244 | static inline int apic_lvtt_period(struct kvm_lapic *apic) | 260 | static inline int apic_lvtt_period(struct kvm_lapic *apic) |
245 | { | 261 | { |
246 | return ((kvm_apic_get_reg(apic, APIC_LVTT) & | 262 | return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC; |
247 | apic->lapic_timer.timer_mode_mask) == APIC_LVT_TIMER_PERIODIC); | ||
248 | } | 263 | } |
249 | 264 | ||
250 | static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic) | 265 | static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic) |
251 | { | 266 | { |
252 | return ((kvm_apic_get_reg(apic, APIC_LVTT) & | 267 | return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE; |
253 | apic->lapic_timer.timer_mode_mask) == | ||
254 | APIC_LVT_TIMER_TSCDEADLINE); | ||
255 | } | 268 | } |
256 | 269 | ||
257 | static inline int apic_lvt_nmi_mode(u32 lvt_val) | 270 | static inline int apic_lvt_nmi_mode(u32 lvt_val) |
@@ -326,8 +339,12 @@ EXPORT_SYMBOL_GPL(kvm_apic_update_irr); | |||
326 | 339 | ||
327 | static inline void apic_set_irr(int vec, struct kvm_lapic *apic) | 340 | static inline void apic_set_irr(int vec, struct kvm_lapic *apic) |
328 | { | 341 | { |
329 | apic->irr_pending = true; | ||
330 | apic_set_vector(vec, apic->regs + APIC_IRR); | 342 | apic_set_vector(vec, apic->regs + APIC_IRR); |
343 | /* | ||
344 | * irr_pending must be true if any interrupt is pending; set it after | ||
345 | * APIC_IRR to avoid race with apic_clear_irr | ||
346 | */ | ||
347 | apic->irr_pending = true; | ||
331 | } | 348 | } |
332 | 349 | ||
333 | static inline int apic_search_irr(struct kvm_lapic *apic) | 350 | static inline int apic_search_irr(struct kvm_lapic *apic) |
@@ -359,13 +376,15 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic) | |||
359 | 376 | ||
360 | vcpu = apic->vcpu; | 377 | vcpu = apic->vcpu; |
361 | 378 | ||
362 | apic_clear_vector(vec, apic->regs + APIC_IRR); | 379 | if (unlikely(kvm_apic_vid_enabled(vcpu->kvm))) { |
363 | if (unlikely(kvm_apic_vid_enabled(vcpu->kvm))) | ||
364 | /* try to update RVI */ | 380 | /* try to update RVI */ |
381 | apic_clear_vector(vec, apic->regs + APIC_IRR); | ||
365 | kvm_make_request(KVM_REQ_EVENT, vcpu); | 382 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
366 | else { | 383 | } else { |
367 | vec = apic_search_irr(apic); | 384 | apic->irr_pending = false; |
368 | apic->irr_pending = (vec != -1); | 385 | apic_clear_vector(vec, apic->regs + APIC_IRR); |
386 | if (apic_search_irr(apic) != -1) | ||
387 | apic->irr_pending = true; | ||
369 | } | 388 | } |
370 | } | 389 | } |
371 | 390 | ||
@@ -558,16 +577,25 @@ static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr) | |||
558 | apic_update_ppr(apic); | 577 | apic_update_ppr(apic); |
559 | } | 578 | } |
560 | 579 | ||
561 | int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest) | 580 | static int kvm_apic_broadcast(struct kvm_lapic *apic, u32 dest) |
581 | { | ||
582 | return dest == (apic_x2apic_mode(apic) ? | ||
583 | X2APIC_BROADCAST : APIC_BROADCAST); | ||
584 | } | ||
585 | |||
586 | int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 dest) | ||
562 | { | 587 | { |
563 | return dest == 0xff || kvm_apic_id(apic) == dest; | 588 | return kvm_apic_id(apic) == dest || kvm_apic_broadcast(apic, dest); |
564 | } | 589 | } |
565 | 590 | ||
566 | int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda) | 591 | int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda) |
567 | { | 592 | { |
568 | int result = 0; | 593 | int result = 0; |
569 | u32 logical_id; | 594 | u32 logical_id; |
570 | 595 | ||
596 | if (kvm_apic_broadcast(apic, mda)) | ||
597 | return 1; | ||
598 | |||
571 | if (apic_x2apic_mode(apic)) { | 599 | if (apic_x2apic_mode(apic)) { |
572 | logical_id = kvm_apic_get_reg(apic, APIC_LDR); | 600 | logical_id = kvm_apic_get_reg(apic, APIC_LDR); |
573 | return logical_id & mda; | 601 | return logical_id & mda; |
@@ -595,7 +623,7 @@ int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda) | |||
595 | } | 623 | } |
596 | 624 | ||
597 | int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, | 625 | int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, |
598 | int short_hand, int dest, int dest_mode) | 626 | int short_hand, unsigned int dest, int dest_mode) |
599 | { | 627 | { |
600 | int result = 0; | 628 | int result = 0; |
601 | struct kvm_lapic *target = vcpu->arch.apic; | 629 | struct kvm_lapic *target = vcpu->arch.apic; |
@@ -657,15 +685,24 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, | |||
657 | if (!map) | 685 | if (!map) |
658 | goto out; | 686 | goto out; |
659 | 687 | ||
688 | if (irq->dest_id == map->broadcast) | ||
689 | goto out; | ||
690 | |||
691 | ret = true; | ||
692 | |||
660 | if (irq->dest_mode == 0) { /* physical mode */ | 693 | if (irq->dest_mode == 0) { /* physical mode */ |
661 | if (irq->delivery_mode == APIC_DM_LOWEST || | 694 | if (irq->dest_id >= ARRAY_SIZE(map->phys_map)) |
662 | irq->dest_id == 0xff) | ||
663 | goto out; | 695 | goto out; |
664 | dst = &map->phys_map[irq->dest_id & 0xff]; | 696 | |
697 | dst = &map->phys_map[irq->dest_id]; | ||
665 | } else { | 698 | } else { |
666 | u32 mda = irq->dest_id << (32 - map->ldr_bits); | 699 | u32 mda = irq->dest_id << (32 - map->ldr_bits); |
700 | u16 cid = apic_cluster_id(map, mda); | ||
701 | |||
702 | if (cid >= ARRAY_SIZE(map->logical_map)) | ||
703 | goto out; | ||
667 | 704 | ||
668 | dst = map->logical_map[apic_cluster_id(map, mda)]; | 705 | dst = map->logical_map[cid]; |
669 | 706 | ||
670 | bitmap = apic_logical_id(map, mda); | 707 | bitmap = apic_logical_id(map, mda); |
671 | 708 | ||
@@ -691,8 +728,6 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, | |||
691 | *r = 0; | 728 | *r = 0; |
692 | *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map); | 729 | *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map); |
693 | } | 730 | } |
694 | |||
695 | ret = true; | ||
696 | out: | 731 | out: |
697 | rcu_read_unlock(); | 732 | rcu_read_unlock(); |
698 | return ret; | 733 | return ret; |
@@ -1034,6 +1069,26 @@ static void update_divide_count(struct kvm_lapic *apic) | |||
1034 | apic->divide_count); | 1069 | apic->divide_count); |
1035 | } | 1070 | } |
1036 | 1071 | ||
1072 | static void apic_timer_expired(struct kvm_lapic *apic) | ||
1073 | { | ||
1074 | struct kvm_vcpu *vcpu = apic->vcpu; | ||
1075 | wait_queue_head_t *q = &vcpu->wq; | ||
1076 | |||
1077 | /* | ||
1078 | * Note: KVM_REQ_PENDING_TIMER is implicitly checked in | ||
1079 | * vcpu_enter_guest. | ||
1080 | */ | ||
1081 | if (atomic_read(&apic->lapic_timer.pending)) | ||
1082 | return; | ||
1083 | |||
1084 | atomic_inc(&apic->lapic_timer.pending); | ||
1085 | /* FIXME: this code should not know anything about vcpus */ | ||
1086 | kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); | ||
1087 | |||
1088 | if (waitqueue_active(q)) | ||
1089 | wake_up_interruptible(q); | ||
1090 | } | ||
1091 | |||
1037 | static void start_apic_timer(struct kvm_lapic *apic) | 1092 | static void start_apic_timer(struct kvm_lapic *apic) |
1038 | { | 1093 | { |
1039 | ktime_t now; | 1094 | ktime_t now; |
@@ -1096,9 +1151,10 @@ static void start_apic_timer(struct kvm_lapic *apic) | |||
1096 | if (likely(tscdeadline > guest_tsc)) { | 1151 | if (likely(tscdeadline > guest_tsc)) { |
1097 | ns = (tscdeadline - guest_tsc) * 1000000ULL; | 1152 | ns = (tscdeadline - guest_tsc) * 1000000ULL; |
1098 | do_div(ns, this_tsc_khz); | 1153 | do_div(ns, this_tsc_khz); |
1099 | } | 1154 | hrtimer_start(&apic->lapic_timer.timer, |
1100 | hrtimer_start(&apic->lapic_timer.timer, | 1155 | ktime_add_ns(now, ns), HRTIMER_MODE_ABS); |
1101 | ktime_add_ns(now, ns), HRTIMER_MODE_ABS); | 1156 | } else |
1157 | apic_timer_expired(apic); | ||
1102 | 1158 | ||
1103 | local_irq_restore(flags); | 1159 | local_irq_restore(flags); |
1104 | } | 1160 | } |
@@ -1203,17 +1259,20 @@ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) | |||
1203 | 1259 | ||
1204 | break; | 1260 | break; |
1205 | 1261 | ||
1206 | case APIC_LVTT: | 1262 | case APIC_LVTT: { |
1207 | if ((kvm_apic_get_reg(apic, APIC_LVTT) & | 1263 | u32 timer_mode = val & apic->lapic_timer.timer_mode_mask; |
1208 | apic->lapic_timer.timer_mode_mask) != | 1264 | |
1209 | (val & apic->lapic_timer.timer_mode_mask)) | 1265 | if (apic->lapic_timer.timer_mode != timer_mode) { |
1266 | apic->lapic_timer.timer_mode = timer_mode; | ||
1210 | hrtimer_cancel(&apic->lapic_timer.timer); | 1267 | hrtimer_cancel(&apic->lapic_timer.timer); |
1268 | } | ||
1211 | 1269 | ||
1212 | if (!kvm_apic_sw_enabled(apic)) | 1270 | if (!kvm_apic_sw_enabled(apic)) |
1213 | val |= APIC_LVT_MASKED; | 1271 | val |= APIC_LVT_MASKED; |
1214 | val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask); | 1272 | val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask); |
1215 | apic_set_reg(apic, APIC_LVTT, val); | 1273 | apic_set_reg(apic, APIC_LVTT, val); |
1216 | break; | 1274 | break; |
1275 | } | ||
1217 | 1276 | ||
1218 | case APIC_TMICT: | 1277 | case APIC_TMICT: |
1219 | if (apic_lvtt_tscdeadline(apic)) | 1278 | if (apic_lvtt_tscdeadline(apic)) |
@@ -1320,7 +1379,7 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu) | |||
1320 | if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE)) | 1379 | if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE)) |
1321 | static_key_slow_dec_deferred(&apic_hw_disabled); | 1380 | static_key_slow_dec_deferred(&apic_hw_disabled); |
1322 | 1381 | ||
1323 | if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_APIC_ENABLED)) | 1382 | if (!apic->sw_enabled) |
1324 | static_key_slow_dec_deferred(&apic_sw_disabled); | 1383 | static_key_slow_dec_deferred(&apic_sw_disabled); |
1325 | 1384 | ||
1326 | if (apic->regs) | 1385 | if (apic->regs) |
@@ -1355,9 +1414,6 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data) | |||
1355 | return; | 1414 | return; |
1356 | 1415 | ||
1357 | hrtimer_cancel(&apic->lapic_timer.timer); | 1416 | hrtimer_cancel(&apic->lapic_timer.timer); |
1358 | /* Inject here so clearing tscdeadline won't override new value */ | ||
1359 | if (apic_has_pending_timer(vcpu)) | ||
1360 | kvm_inject_apic_timer_irqs(vcpu); | ||
1361 | apic->lapic_timer.tscdeadline = data; | 1417 | apic->lapic_timer.tscdeadline = data; |
1362 | start_apic_timer(apic); | 1418 | start_apic_timer(apic); |
1363 | } | 1419 | } |
@@ -1422,6 +1478,10 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) | |||
1422 | apic->base_address = apic->vcpu->arch.apic_base & | 1478 | apic->base_address = apic->vcpu->arch.apic_base & |
1423 | MSR_IA32_APICBASE_BASE; | 1479 | MSR_IA32_APICBASE_BASE; |
1424 | 1480 | ||
1481 | if ((value & MSR_IA32_APICBASE_ENABLE) && | ||
1482 | apic->base_address != APIC_DEFAULT_PHYS_BASE) | ||
1483 | pr_warn_once("APIC base relocation is unsupported by KVM"); | ||
1484 | |||
1425 | /* with FSB delivery interrupt, we can restart APIC functionality */ | 1485 | /* with FSB delivery interrupt, we can restart APIC functionality */ |
1426 | apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is " | 1486 | apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is " |
1427 | "0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address); | 1487 | "0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address); |
@@ -1447,6 +1507,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu) | |||
1447 | 1507 | ||
1448 | for (i = 0; i < APIC_LVT_NUM; i++) | 1508 | for (i = 0; i < APIC_LVT_NUM; i++) |
1449 | apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); | 1509 | apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); |
1510 | apic->lapic_timer.timer_mode = 0; | ||
1450 | apic_set_reg(apic, APIC_LVT0, | 1511 | apic_set_reg(apic, APIC_LVT0, |
1451 | SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); | 1512 | SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); |
1452 | 1513 | ||
@@ -1538,23 +1599,8 @@ static enum hrtimer_restart apic_timer_fn(struct hrtimer *data) | |||
1538 | { | 1599 | { |
1539 | struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer); | 1600 | struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer); |
1540 | struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer); | 1601 | struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer); |
1541 | struct kvm_vcpu *vcpu = apic->vcpu; | ||
1542 | wait_queue_head_t *q = &vcpu->wq; | ||
1543 | |||
1544 | /* | ||
1545 | * There is a race window between reading and incrementing, but we do | ||
1546 | * not care about potentially losing timer events in the !reinject | ||
1547 | * case anyway. Note: KVM_REQ_PENDING_TIMER is implicitly checked | ||
1548 | * in vcpu_enter_guest. | ||
1549 | */ | ||
1550 | if (!atomic_read(&ktimer->pending)) { | ||
1551 | atomic_inc(&ktimer->pending); | ||
1552 | /* FIXME: this code should not know anything about vcpus */ | ||
1553 | kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); | ||
1554 | } | ||
1555 | 1602 | ||
1556 | if (waitqueue_active(q)) | 1603 | apic_timer_expired(apic); |
1557 | wake_up_interruptible(q); | ||
1558 | 1604 | ||
1559 | if (lapic_is_periodic(apic)) { | 1605 | if (lapic_is_periodic(apic)) { |
1560 | hrtimer_add_expires_ns(&ktimer->timer, ktimer->period); | 1606 | hrtimer_add_expires_ns(&ktimer->timer, ktimer->period); |
@@ -1693,6 +1739,9 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu, | |||
1693 | apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm) ? | 1739 | apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm) ? |
1694 | 1 : count_vectors(apic->regs + APIC_ISR); | 1740 | 1 : count_vectors(apic->regs + APIC_ISR); |
1695 | apic->highest_isr_cache = -1; | 1741 | apic->highest_isr_cache = -1; |
1742 | if (kvm_x86_ops->hwapic_irr_update) | ||
1743 | kvm_x86_ops->hwapic_irr_update(vcpu, | ||
1744 | apic_find_highest_irr(apic)); | ||
1696 | kvm_x86_ops->hwapic_isr_update(vcpu->kvm, apic_find_highest_isr(apic)); | 1745 | kvm_x86_ops->hwapic_isr_update(vcpu->kvm, apic_find_highest_isr(apic)); |
1697 | kvm_make_request(KVM_REQ_EVENT, vcpu); | 1746 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
1698 | kvm_rtc_eoi_tracking_restore_one(vcpu); | 1747 | kvm_rtc_eoi_tracking_restore_one(vcpu); |
@@ -1837,8 +1886,11 @@ int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data) | |||
1837 | if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic)) | 1886 | if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic)) |
1838 | return 1; | 1887 | return 1; |
1839 | 1888 | ||
1889 | if (reg == APIC_ICR2) | ||
1890 | return 1; | ||
1891 | |||
1840 | /* if this is ICR write vector before command */ | 1892 | /* if this is ICR write vector before command */ |
1841 | if (msr == 0x830) | 1893 | if (reg == APIC_ICR) |
1842 | apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32)); | 1894 | apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32)); |
1843 | return apic_reg_write(apic, reg, (u32)data); | 1895 | return apic_reg_write(apic, reg, (u32)data); |
1844 | } | 1896 | } |
@@ -1851,9 +1903,15 @@ int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data) | |||
1851 | if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic)) | 1903 | if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic)) |
1852 | return 1; | 1904 | return 1; |
1853 | 1905 | ||
1906 | if (reg == APIC_DFR || reg == APIC_ICR2) { | ||
1907 | apic_debug("KVM_APIC_READ: read x2apic reserved register %x\n", | ||
1908 | reg); | ||
1909 | return 1; | ||
1910 | } | ||
1911 | |||
1854 | if (apic_reg_read(apic, reg, 4, &low)) | 1912 | if (apic_reg_read(apic, reg, 4, &low)) |
1855 | return 1; | 1913 | return 1; |
1856 | if (msr == 0x830) | 1914 | if (reg == APIC_ICR) |
1857 | apic_reg_read(apic, APIC_ICR2, 4, &high); | 1915 | apic_reg_read(apic, APIC_ICR2, 4, &high); |
1858 | 1916 | ||
1859 | *data = (((u64)high) << 32) | low; | 1917 | *data = (((u64)high) << 32) | low; |
@@ -1908,7 +1966,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data) | |||
1908 | void kvm_apic_accept_events(struct kvm_vcpu *vcpu) | 1966 | void kvm_apic_accept_events(struct kvm_vcpu *vcpu) |
1909 | { | 1967 | { |
1910 | struct kvm_lapic *apic = vcpu->arch.apic; | 1968 | struct kvm_lapic *apic = vcpu->arch.apic; |
1911 | unsigned int sipi_vector; | 1969 | u8 sipi_vector; |
1912 | unsigned long pe; | 1970 | unsigned long pe; |
1913 | 1971 | ||
1914 | if (!kvm_vcpu_has_lapic(vcpu) || !apic->pending_events) | 1972 | if (!kvm_vcpu_has_lapic(vcpu) || !apic->pending_events) |