aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/lapic.c
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
commit8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch)
treea8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /arch/x86/kvm/lapic.c
parent406089d01562f1e2bf9f089fd7637009ebaad589 (diff)
Patched in Tegra support.
Diffstat (limited to 'arch/x86/kvm/lapic.c')
-rw-r--r--arch/x86/kvm/lapic.c841
1 files changed, 166 insertions, 675 deletions
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 9392f527f10..57dcbd4308f 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -34,12 +34,10 @@
34#include <asm/current.h> 34#include <asm/current.h>
35#include <asm/apicdef.h> 35#include <asm/apicdef.h>
36#include <linux/atomic.h> 36#include <linux/atomic.h>
37#include <linux/jump_label.h>
38#include "kvm_cache_regs.h" 37#include "kvm_cache_regs.h"
39#include "irq.h" 38#include "irq.h"
40#include "trace.h" 39#include "trace.h"
41#include "x86.h" 40#include "x86.h"
42#include "cpuid.h"
43 41
44#ifndef CONFIG_X86_64 42#ifndef CONFIG_X86_64
45#define mod_64(x, y) ((x) - (y) * div64_u64(x, y)) 43#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
@@ -66,13 +64,14 @@
66#define APIC_DEST_NOSHORT 0x0 64#define APIC_DEST_NOSHORT 0x0
67#define APIC_DEST_MASK 0x800 65#define APIC_DEST_MASK 0x800
68#define MAX_APIC_VECTOR 256 66#define MAX_APIC_VECTOR 256
69#define APIC_VECTORS_PER_REG 32
70 67
71#define VEC_POS(v) ((v) & (32 - 1)) 68#define VEC_POS(v) ((v) & (32 - 1))
72#define REG_POS(v) (((v) >> 5) << 4) 69#define REG_POS(v) (((v) >> 5) << 4)
73 70
74static unsigned int min_timer_period_us = 500; 71static inline u32 apic_get_reg(struct kvm_lapic *apic, int reg_off)
75module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR); 72{
73 return *((u32 *) (apic->regs + reg_off));
74}
76 75
77static inline void apic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val) 76static inline void apic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
78{ 77{
@@ -89,11 +88,6 @@ static inline int apic_test_and_clear_vector(int vec, void *bitmap)
89 return test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); 88 return test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
90} 89}
91 90
92static inline int apic_test_vector(int vec, void *bitmap)
93{
94 return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
95}
96
97static inline void apic_set_vector(int vec, void *bitmap) 91static inline void apic_set_vector(int vec, void *bitmap)
98{ 92{
99 set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); 93 set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
@@ -104,33 +98,19 @@ static inline void apic_clear_vector(int vec, void *bitmap)
104 clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); 98 clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
105} 99}
106 100
107static inline int __apic_test_and_set_vector(int vec, void *bitmap) 101static inline int apic_hw_enabled(struct kvm_lapic *apic)
108{ 102{
109 return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); 103 return (apic)->vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE;
110} 104}
111 105
112static inline int __apic_test_and_clear_vector(int vec, void *bitmap) 106static inline int apic_sw_enabled(struct kvm_lapic *apic)
113{ 107{
114 return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); 108 return apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_APIC_ENABLED;
115}
116
117struct static_key_deferred apic_hw_disabled __read_mostly;
118struct static_key_deferred apic_sw_disabled __read_mostly;
119
120static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
121{
122 if ((kvm_apic_get_reg(apic, APIC_SPIV) ^ val) & APIC_SPIV_APIC_ENABLED) {
123 if (val & APIC_SPIV_APIC_ENABLED)
124 static_key_slow_dec_deferred(&apic_sw_disabled);
125 else
126 static_key_slow_inc(&apic_sw_disabled.key);
127 }
128 apic_set_reg(apic, APIC_SPIV, val);
129} 109}
130 110
131static inline int apic_enabled(struct kvm_lapic *apic) 111static inline int apic_enabled(struct kvm_lapic *apic)
132{ 112{
133 return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic); 113 return apic_sw_enabled(apic) && apic_hw_enabled(apic);
134} 114}
135 115
136#define LVT_MASK \ 116#define LVT_MASK \
@@ -140,137 +120,24 @@ static inline int apic_enabled(struct kvm_lapic *apic)
140 (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \ 120 (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
141 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER) 121 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
142 122
143static inline int apic_x2apic_mode(struct kvm_lapic *apic)
144{
145 return apic->vcpu->arch.apic_base & X2APIC_ENABLE;
146}
147
148static inline int kvm_apic_id(struct kvm_lapic *apic) 123static inline int kvm_apic_id(struct kvm_lapic *apic)
149{ 124{
150 return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff; 125 return (apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
151}
152
153static inline u16 apic_cluster_id(struct kvm_apic_map *map, u32 ldr)
154{
155 u16 cid;
156 ldr >>= 32 - map->ldr_bits;
157 cid = (ldr >> map->cid_shift) & map->cid_mask;
158
159 BUG_ON(cid >= ARRAY_SIZE(map->logical_map));
160
161 return cid;
162}
163
164static inline u16 apic_logical_id(struct kvm_apic_map *map, u32 ldr)
165{
166 ldr >>= (32 - map->ldr_bits);
167 return ldr & map->lid_mask;
168}
169
170static void recalculate_apic_map(struct kvm *kvm)
171{
172 struct kvm_apic_map *new, *old = NULL;
173 struct kvm_vcpu *vcpu;
174 int i;
175
176 new = kzalloc(sizeof(struct kvm_apic_map), GFP_KERNEL);
177
178 mutex_lock(&kvm->arch.apic_map_lock);
179
180 if (!new)
181 goto out;
182
183 new->ldr_bits = 8;
184 /* flat mode is default */
185 new->cid_shift = 8;
186 new->cid_mask = 0;
187 new->lid_mask = 0xff;
188
189 kvm_for_each_vcpu(i, vcpu, kvm) {
190 struct kvm_lapic *apic = vcpu->arch.apic;
191 u16 cid, lid;
192 u32 ldr;
193
194 if (!kvm_apic_present(vcpu))
195 continue;
196
197 /*
198 * All APICs have to be configured in the same mode by an OS.
199 * We take advatage of this while building logical id loockup
200 * table. After reset APICs are in xapic/flat mode, so if we
201 * find apic with different setting we assume this is the mode
202 * OS wants all apics to be in; build lookup table accordingly.
203 */
204 if (apic_x2apic_mode(apic)) {
205 new->ldr_bits = 32;
206 new->cid_shift = 16;
207 new->cid_mask = new->lid_mask = 0xffff;
208 } else if (kvm_apic_sw_enabled(apic) &&
209 !new->cid_mask /* flat mode */ &&
210 kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_CLUSTER) {
211 new->cid_shift = 4;
212 new->cid_mask = 0xf;
213 new->lid_mask = 0xf;
214 }
215
216 new->phys_map[kvm_apic_id(apic)] = apic;
217
218 ldr = kvm_apic_get_reg(apic, APIC_LDR);
219 cid = apic_cluster_id(new, ldr);
220 lid = apic_logical_id(new, ldr);
221
222 if (lid)
223 new->logical_map[cid][ffs(lid) - 1] = apic;
224 }
225out:
226 old = rcu_dereference_protected(kvm->arch.apic_map,
227 lockdep_is_held(&kvm->arch.apic_map_lock));
228 rcu_assign_pointer(kvm->arch.apic_map, new);
229 mutex_unlock(&kvm->arch.apic_map_lock);
230
231 if (old)
232 kfree_rcu(old, rcu);
233}
234
235static inline void kvm_apic_set_id(struct kvm_lapic *apic, u8 id)
236{
237 apic_set_reg(apic, APIC_ID, id << 24);
238 recalculate_apic_map(apic->vcpu->kvm);
239}
240
241static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
242{
243 apic_set_reg(apic, APIC_LDR, id);
244 recalculate_apic_map(apic->vcpu->kvm);
245} 126}
246 127
247static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type) 128static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
248{ 129{
249 return !(kvm_apic_get_reg(apic, lvt_type) & APIC_LVT_MASKED); 130 return !(apic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
250} 131}
251 132
252static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type) 133static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type)
253{ 134{
254 return kvm_apic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK; 135 return apic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK;
255}
256
257static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
258{
259 return ((kvm_apic_get_reg(apic, APIC_LVTT) &
260 apic->lapic_timer.timer_mode_mask) == APIC_LVT_TIMER_ONESHOT);
261} 136}
262 137
263static inline int apic_lvtt_period(struct kvm_lapic *apic) 138static inline int apic_lvtt_period(struct kvm_lapic *apic)
264{ 139{
265 return ((kvm_apic_get_reg(apic, APIC_LVTT) & 140 return apic_get_reg(apic, APIC_LVTT) & APIC_LVT_TIMER_PERIODIC;
266 apic->lapic_timer.timer_mode_mask) == APIC_LVT_TIMER_PERIODIC);
267}
268
269static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
270{
271 return ((kvm_apic_get_reg(apic, APIC_LVTT) &
272 apic->lapic_timer.timer_mode_mask) ==
273 APIC_LVT_TIMER_TSCDEADLINE);
274} 141}
275 142
276static inline int apic_lvt_nmi_mode(u32 lvt_val) 143static inline int apic_lvt_nmi_mode(u32 lvt_val)
@@ -284,7 +151,7 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu)
284 struct kvm_cpuid_entry2 *feat; 151 struct kvm_cpuid_entry2 *feat;
285 u32 v = APIC_VERSION; 152 u32 v = APIC_VERSION;
286 153
287 if (!kvm_vcpu_has_lapic(vcpu)) 154 if (!irqchip_in_kernel(vcpu->kvm))
288 return; 155 return;
289 156
290 feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0); 157 feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
@@ -293,8 +160,13 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu)
293 apic_set_reg(apic, APIC_LVR, v); 160 apic_set_reg(apic, APIC_LVR, v);
294} 161}
295 162
296static const unsigned int apic_lvt_mask[APIC_LVT_NUM] = { 163static inline int apic_x2apic_mode(struct kvm_lapic *apic)
297 LVT_MASK , /* part LVTT mask, timer mode mask added at runtime */ 164{
165 return apic->vcpu->arch.apic_base & X2APIC_ENABLE;
166}
167
168static unsigned int apic_lvt_mask[APIC_LVT_NUM] = {
169 LVT_MASK | APIC_LVT_TIMER_PERIODIC, /* LVTT */
298 LVT_MASK | APIC_MODE_MASK, /* LVTTHMR */ 170 LVT_MASK | APIC_MODE_MASK, /* LVTTHMR */
299 LVT_MASK | APIC_MODE_MASK, /* LVTPC */ 171 LVT_MASK | APIC_MODE_MASK, /* LVTPC */
300 LINT_MASK, LINT_MASK, /* LVT0-1 */ 172 LINT_MASK, LINT_MASK, /* LVT0-1 */
@@ -303,31 +175,16 @@ static const unsigned int apic_lvt_mask[APIC_LVT_NUM] = {
303 175
304static int find_highest_vector(void *bitmap) 176static int find_highest_vector(void *bitmap)
305{ 177{
306 int vec; 178 u32 *word = bitmap;
307 u32 *reg; 179 int word_offset = MAX_APIC_VECTOR >> 5;
308
309 for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG;
310 vec >= 0; vec -= APIC_VECTORS_PER_REG) {
311 reg = bitmap + REG_POS(vec);
312 if (*reg)
313 return fls(*reg) - 1 + vec;
314 }
315
316 return -1;
317}
318 180
319static u8 count_vectors(void *bitmap) 181 while ((word_offset != 0) && (word[(--word_offset) << 2] == 0))
320{ 182 continue;
321 int vec;
322 u32 *reg;
323 u8 count = 0;
324 183
325 for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) { 184 if (likely(!word_offset && !word[0]))
326 reg = bitmap + REG_POS(vec); 185 return -1;
327 count += hweight32(*reg); 186 else
328 } 187 return fls(word[word_offset << 2]) - 1 + (word_offset << 5);
329
330 return count;
331} 188}
332 189
333static inline int apic_test_and_set_irr(int vec, struct kvm_lapic *apic) 190static inline int apic_test_and_set_irr(int vec, struct kvm_lapic *apic)
@@ -362,29 +219,9 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
362 apic->irr_pending = true; 219 apic->irr_pending = true;
363} 220}
364 221
365static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
366{
367 if (!__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
368 ++apic->isr_count;
369 BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
370 /*
371 * ISR (in service register) bit is set when injecting an interrupt.
372 * The highest vector is injected. Thus the latest bit set matches
373 * the highest bit in ISR.
374 */
375 apic->highest_isr_cache = vec;
376}
377
378static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
379{
380 if (__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
381 --apic->isr_count;
382 BUG_ON(apic->isr_count < 0);
383 apic->highest_isr_cache = -1;
384}
385
386int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu) 222int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
387{ 223{
224 struct kvm_lapic *apic = vcpu->arch.apic;
388 int highest_irr; 225 int highest_irr;
389 226
390 /* This may race with setting of irr in __apic_accept_irq() and 227 /* This may race with setting of irr in __apic_accept_irq() and
@@ -392,9 +229,9 @@ int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
392 * will cause vmexit immediately and the value will be recalculated 229 * will cause vmexit immediately and the value will be recalculated
393 * on the next vmentry. 230 * on the next vmentry.
394 */ 231 */
395 if (!kvm_vcpu_has_lapic(vcpu)) 232 if (!apic)
396 return 0; 233 return 0;
397 highest_irr = apic_find_highest_irr(vcpu->arch.apic); 234 highest_irr = apic_find_highest_irr(apic);
398 235
399 return highest_irr; 236 return highest_irr;
400} 237}
@@ -410,61 +247,9 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq)
410 irq->level, irq->trig_mode); 247 irq->level, irq->trig_mode);
411} 248}
412 249
413static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
414{
415
416 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
417 sizeof(val));
418}
419
420static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
421{
422
423 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
424 sizeof(*val));
425}
426
427static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
428{
429 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
430}
431
432static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
433{
434 u8 val;
435 if (pv_eoi_get_user(vcpu, &val) < 0)
436 apic_debug("Can't read EOI MSR value: 0x%llx\n",
437 (unsigned long long)vcpi->arch.pv_eoi.msr_val);
438 return val & 0x1;
439}
440
441static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
442{
443 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) {
444 apic_debug("Can't set EOI MSR value: 0x%llx\n",
445 (unsigned long long)vcpi->arch.pv_eoi.msr_val);
446 return;
447 }
448 __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
449}
450
451static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
452{
453 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) {
454 apic_debug("Can't clear EOI MSR value: 0x%llx\n",
455 (unsigned long long)vcpi->arch.pv_eoi.msr_val);
456 return;
457 }
458 __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
459}
460
461static inline int apic_find_highest_isr(struct kvm_lapic *apic) 250static inline int apic_find_highest_isr(struct kvm_lapic *apic)
462{ 251{
463 int result; 252 int result;
464 if (!apic->isr_count)
465 return -1;
466 if (likely(apic->highest_isr_cache != -1))
467 return apic->highest_isr_cache;
468 253
469 result = find_highest_vector(apic->regs + APIC_ISR); 254 result = find_highest_vector(apic->regs + APIC_ISR);
470 ASSERT(result == -1 || result >= 16); 255 ASSERT(result == -1 || result >= 16);
@@ -477,8 +262,8 @@ static void apic_update_ppr(struct kvm_lapic *apic)
477 u32 tpr, isrv, ppr, old_ppr; 262 u32 tpr, isrv, ppr, old_ppr;
478 int isr; 263 int isr;
479 264
480 old_ppr = kvm_apic_get_reg(apic, APIC_PROCPRI); 265 old_ppr = apic_get_reg(apic, APIC_PROCPRI);
481 tpr = kvm_apic_get_reg(apic, APIC_TASKPRI); 266 tpr = apic_get_reg(apic, APIC_TASKPRI);
482 isr = apic_find_highest_isr(apic); 267 isr = apic_find_highest_isr(apic);
483 isrv = (isr != -1) ? isr : 0; 268 isrv = (isr != -1) ? isr : 0;
484 269
@@ -514,13 +299,13 @@ int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
514 u32 logical_id; 299 u32 logical_id;
515 300
516 if (apic_x2apic_mode(apic)) { 301 if (apic_x2apic_mode(apic)) {
517 logical_id = kvm_apic_get_reg(apic, APIC_LDR); 302 logical_id = apic_get_reg(apic, APIC_LDR);
518 return logical_id & mda; 303 return logical_id & mda;
519 } 304 }
520 305
521 logical_id = GET_APIC_LOGICAL_ID(kvm_apic_get_reg(apic, APIC_LDR)); 306 logical_id = GET_APIC_LOGICAL_ID(apic_get_reg(apic, APIC_LDR));
522 307
523 switch (kvm_apic_get_reg(apic, APIC_DFR)) { 308 switch (apic_get_reg(apic, APIC_DFR)) {
524 case APIC_DFR_FLAT: 309 case APIC_DFR_FLAT:
525 if (logical_id & mda) 310 if (logical_id & mda)
526 result = 1; 311 result = 1;
@@ -531,8 +316,8 @@ int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
531 result = 1; 316 result = 1;
532 break; 317 break;
533 default: 318 default:
534 apic_debug("Bad DFR vcpu %d: %08x\n", 319 printk(KERN_WARNING "Bad DFR vcpu %d: %08x\n",
535 apic->vcpu->vcpu_id, kvm_apic_get_reg(apic, APIC_DFR)); 320 apic->vcpu->vcpu_id, apic_get_reg(apic, APIC_DFR));
536 break; 321 break;
537 } 322 }
538 323
@@ -569,80 +354,14 @@ int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
569 result = (target != source); 354 result = (target != source);
570 break; 355 break;
571 default: 356 default:
572 apic_debug("kvm: apic: Bad dest shorthand value %x\n", 357 printk(KERN_WARNING "Bad dest shorthand value %x\n",
573 short_hand); 358 short_hand);
574 break; 359 break;
575 } 360 }
576 361
577 return result; 362 return result;
578} 363}
579 364
580bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
581 struct kvm_lapic_irq *irq, int *r)
582{
583 struct kvm_apic_map *map;
584 unsigned long bitmap = 1;
585 struct kvm_lapic **dst;
586 int i;
587 bool ret = false;
588
589 *r = -1;
590
591 if (irq->shorthand == APIC_DEST_SELF) {
592 *r = kvm_apic_set_irq(src->vcpu, irq);
593 return true;
594 }
595
596 if (irq->shorthand)
597 return false;
598
599 rcu_read_lock();
600 map = rcu_dereference(kvm->arch.apic_map);
601
602 if (!map)
603 goto out;
604
605 if (irq->dest_mode == 0) { /* physical mode */
606 if (irq->delivery_mode == APIC_DM_LOWEST ||
607 irq->dest_id == 0xff)
608 goto out;
609 dst = &map->phys_map[irq->dest_id & 0xff];
610 } else {
611 u32 mda = irq->dest_id << (32 - map->ldr_bits);
612
613 dst = map->logical_map[apic_cluster_id(map, mda)];
614
615 bitmap = apic_logical_id(map, mda);
616
617 if (irq->delivery_mode == APIC_DM_LOWEST) {
618 int l = -1;
619 for_each_set_bit(i, &bitmap, 16) {
620 if (!dst[i])
621 continue;
622 if (l < 0)
623 l = i;
624 else if (kvm_apic_compare_prio(dst[i]->vcpu, dst[l]->vcpu) < 0)
625 l = i;
626 }
627
628 bitmap = (l >= 0) ? 1 << l : 0;
629 }
630 }
631
632 for_each_set_bit(i, &bitmap, 16) {
633 if (!dst[i])
634 continue;
635 if (*r < 0)
636 *r = 0;
637 *r += kvm_apic_set_irq(dst[i]->vcpu, irq);
638 }
639
640 ret = true;
641out:
642 rcu_read_unlock();
643 return ret;
644}
645
646/* 365/*
647 * Add a pending IRQ into lapic. 366 * Add a pending IRQ into lapic.
648 * Return 1 if successfully added and 0 if discarded. 367 * Return 1 if successfully added and 0 if discarded.
@@ -682,11 +401,11 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
682 break; 401 break;
683 402
684 case APIC_DM_REMRD: 403 case APIC_DM_REMRD:
685 apic_debug("Ignoring delivery mode 3\n"); 404 printk(KERN_DEBUG "Ignoring delivery mode 3\n");
686 break; 405 break;
687 406
688 case APIC_DM_SMI: 407 case APIC_DM_SMI:
689 apic_debug("Ignoring guest SMI\n"); 408 printk(KERN_DEBUG "Ignoring guest SMI\n");
690 break; 409 break;
691 410
692 case APIC_DM_NMI: 411 case APIC_DM_NMI:
@@ -696,7 +415,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
696 break; 415 break;
697 416
698 case APIC_DM_INIT: 417 case APIC_DM_INIT:
699 if (!trig_mode || level) { 418 if (level) {
700 result = 1; 419 result = 1;
701 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; 420 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
702 kvm_make_request(KVM_REQ_EVENT, vcpu); 421 kvm_make_request(KVM_REQ_EVENT, vcpu);
@@ -740,39 +459,33 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
740 return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio; 459 return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
741} 460}
742 461
743static int apic_set_eoi(struct kvm_lapic *apic) 462static void apic_set_eoi(struct kvm_lapic *apic)
744{ 463{
745 int vector = apic_find_highest_isr(apic); 464 int vector = apic_find_highest_isr(apic);
746 465 int trigger_mode;
747 trace_kvm_eoi(apic, vector);
748
749 /* 466 /*
750 * Not every write EOI will has corresponding ISR, 467 * Not every write EOI will has corresponding ISR,
751 * one example is when Kernel check timer on setup_IO_APIC 468 * one example is when Kernel check timer on setup_IO_APIC
752 */ 469 */
753 if (vector == -1) 470 if (vector == -1)
754 return vector; 471 return;
755 472
756 apic_clear_isr(vector, apic); 473 apic_clear_vector(vector, apic->regs + APIC_ISR);
757 apic_update_ppr(apic); 474 apic_update_ppr(apic);
758 475
759 if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) && 476 if (apic_test_and_clear_vector(vector, apic->regs + APIC_TMR))
760 kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) { 477 trigger_mode = IOAPIC_LEVEL_TRIG;
761 int trigger_mode; 478 else
762 if (apic_test_vector(vector, apic->regs + APIC_TMR)) 479 trigger_mode = IOAPIC_EDGE_TRIG;
763 trigger_mode = IOAPIC_LEVEL_TRIG; 480 if (!(apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI))
764 else
765 trigger_mode = IOAPIC_EDGE_TRIG;
766 kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode); 481 kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode);
767 }
768 kvm_make_request(KVM_REQ_EVENT, apic->vcpu); 482 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
769 return vector;
770} 483}
771 484
772static void apic_send_ipi(struct kvm_lapic *apic) 485static void apic_send_ipi(struct kvm_lapic *apic)
773{ 486{
774 u32 icr_low = kvm_apic_get_reg(apic, APIC_ICR); 487 u32 icr_low = apic_get_reg(apic, APIC_ICR);
775 u32 icr_high = kvm_apic_get_reg(apic, APIC_ICR2); 488 u32 icr_high = apic_get_reg(apic, APIC_ICR2);
776 struct kvm_lapic_irq irq; 489 struct kvm_lapic_irq irq;
777 490
778 irq.vector = icr_low & APIC_VECTOR_MASK; 491 irq.vector = icr_low & APIC_VECTOR_MASK;
@@ -807,7 +520,7 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic)
807 ASSERT(apic != NULL); 520 ASSERT(apic != NULL);
808 521
809 /* if initial count is 0, current count should also be 0 */ 522 /* if initial count is 0, current count should also be 0 */
810 if (kvm_apic_get_reg(apic, APIC_TMICT) == 0) 523 if (apic_get_reg(apic, APIC_TMICT) == 0)
811 return 0; 524 return 0;
812 525
813 remaining = hrtimer_get_remaining(&apic->lapic_timer.timer); 526 remaining = hrtimer_get_remaining(&apic->lapic_timer.timer);
@@ -852,24 +565,20 @@ static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
852 val = kvm_apic_id(apic) << 24; 565 val = kvm_apic_id(apic) << 24;
853 break; 566 break;
854 case APIC_ARBPRI: 567 case APIC_ARBPRI:
855 apic_debug("Access APIC ARBPRI register which is for P6\n"); 568 printk(KERN_WARNING "Access APIC ARBPRI register "
569 "which is for P6\n");
856 break; 570 break;
857 571
858 case APIC_TMCCT: /* Timer CCR */ 572 case APIC_TMCCT: /* Timer CCR */
859 if (apic_lvtt_tscdeadline(apic))
860 return 0;
861
862 val = apic_get_tmcct(apic); 573 val = apic_get_tmcct(apic);
863 break; 574 break;
864 case APIC_PROCPRI: 575
865 apic_update_ppr(apic);
866 val = kvm_apic_get_reg(apic, offset);
867 break;
868 case APIC_TASKPRI: 576 case APIC_TASKPRI:
869 report_tpr_access(apic, false); 577 report_tpr_access(apic, false);
870 /* fall thru */ 578 /* fall thru */
871 default: 579 default:
872 val = kvm_apic_get_reg(apic, offset); 580 apic_update_ppr(apic);
581 val = apic_get_reg(apic, offset);
873 break; 582 break;
874 } 583 }
875 584
@@ -886,7 +595,7 @@ static int apic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
886{ 595{
887 unsigned char alignment = offset & 0xf; 596 unsigned char alignment = offset & 0xf;
888 u32 result; 597 u32 result;
889 /* this bitmask has a bit cleared for each reserved register */ 598 /* this bitmask has a bit cleared for each reserver register */
890 static const u64 rmask = 0x43ff01ffffffe70cULL; 599 static const u64 rmask = 0x43ff01ffffffe70cULL;
891 600
892 if ((alignment + len) > 4) { 601 if ((alignment + len) > 4) {
@@ -921,7 +630,7 @@ static int apic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
921 630
922static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr) 631static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
923{ 632{
924 return kvm_apic_hw_enabled(apic) && 633 return apic_hw_enabled(apic) &&
925 addr >= apic->base_address && 634 addr >= apic->base_address &&
926 addr < apic->base_address + LAPIC_MMIO_LENGTH; 635 addr < apic->base_address + LAPIC_MMIO_LENGTH;
927} 636}
@@ -944,7 +653,7 @@ static void update_divide_count(struct kvm_lapic *apic)
944{ 653{
945 u32 tmp1, tmp2, tdcr; 654 u32 tmp1, tmp2, tdcr;
946 655
947 tdcr = kvm_apic_get_reg(apic, APIC_TDCR); 656 tdcr = apic_get_reg(apic, APIC_TDCR);
948 tmp1 = tdcr & 0xf; 657 tmp1 = tdcr & 0xf;
949 tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1; 658 tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
950 apic->divide_count = 0x1 << (tmp2 & 0x7); 659 apic->divide_count = 0x1 << (tmp2 & 0x7);
@@ -955,77 +664,42 @@ static void update_divide_count(struct kvm_lapic *apic)
955 664
956static void start_apic_timer(struct kvm_lapic *apic) 665static void start_apic_timer(struct kvm_lapic *apic)
957{ 666{
958 ktime_t now; 667 ktime_t now = apic->lapic_timer.timer.base->get_time();
959 atomic_set(&apic->lapic_timer.pending, 0);
960 668
961 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) { 669 apic->lapic_timer.period = (u64)apic_get_reg(apic, APIC_TMICT) *
962 /* lapic timer in oneshot or periodic mode */ 670 APIC_BUS_CYCLE_NS * apic->divide_count;
963 now = apic->lapic_timer.timer.base->get_time(); 671 atomic_set(&apic->lapic_timer.pending, 0);
964 apic->lapic_timer.period = (u64)kvm_apic_get_reg(apic, APIC_TMICT)
965 * APIC_BUS_CYCLE_NS * apic->divide_count;
966 672
967 if (!apic->lapic_timer.period) 673 if (!apic->lapic_timer.period)
968 return; 674 return;
969 /* 675 /*
970 * Do not allow the guest to program periodic timers with small 676 * Do not allow the guest to program periodic timers with small
971 * interval, since the hrtimers are not throttled by the host 677 * interval, since the hrtimers are not throttled by the host
972 * scheduler. 678 * scheduler.
973 */ 679 */
974 if (apic_lvtt_period(apic)) { 680 if (apic_lvtt_period(apic)) {
975 s64 min_period = min_timer_period_us * 1000LL; 681 if (apic->lapic_timer.period < NSEC_PER_MSEC/2)
976 682 apic->lapic_timer.period = NSEC_PER_MSEC/2;
977 if (apic->lapic_timer.period < min_period) { 683 }
978 pr_info_ratelimited(
979 "kvm: vcpu %i: requested %lld ns "
980 "lapic timer period limited to %lld ns\n",
981 apic->vcpu->vcpu_id,
982 apic->lapic_timer.period, min_period);
983 apic->lapic_timer.period = min_period;
984 }
985 }
986 684
987 hrtimer_start(&apic->lapic_timer.timer, 685 hrtimer_start(&apic->lapic_timer.timer,
988 ktime_add_ns(now, apic->lapic_timer.period), 686 ktime_add_ns(now, apic->lapic_timer.period),
989 HRTIMER_MODE_ABS); 687 HRTIMER_MODE_ABS);
990 688
991 apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016" 689 apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
992 PRIx64 ", " 690 PRIx64 ", "
993 "timer initial count 0x%x, period %lldns, " 691 "timer initial count 0x%x, period %lldns, "
994 "expire @ 0x%016" PRIx64 ".\n", __func__, 692 "expire @ 0x%016" PRIx64 ".\n", __func__,
995 APIC_BUS_CYCLE_NS, ktime_to_ns(now), 693 APIC_BUS_CYCLE_NS, ktime_to_ns(now),
996 kvm_apic_get_reg(apic, APIC_TMICT), 694 apic_get_reg(apic, APIC_TMICT),
997 apic->lapic_timer.period, 695 apic->lapic_timer.period,
998 ktime_to_ns(ktime_add_ns(now, 696 ktime_to_ns(ktime_add_ns(now,
999 apic->lapic_timer.period))); 697 apic->lapic_timer.period)));
1000 } else if (apic_lvtt_tscdeadline(apic)) {
1001 /* lapic timer in tsc deadline mode */
1002 u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline;
1003 u64 ns = 0;
1004 struct kvm_vcpu *vcpu = apic->vcpu;
1005 unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
1006 unsigned long flags;
1007
1008 if (unlikely(!tscdeadline || !this_tsc_khz))
1009 return;
1010
1011 local_irq_save(flags);
1012
1013 now = apic->lapic_timer.timer.base->get_time();
1014 guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc());
1015 if (likely(tscdeadline > guest_tsc)) {
1016 ns = (tscdeadline - guest_tsc) * 1000000ULL;
1017 do_div(ns, this_tsc_khz);
1018 }
1019 hrtimer_start(&apic->lapic_timer.timer,
1020 ktime_add_ns(now, ns), HRTIMER_MODE_ABS);
1021
1022 local_irq_restore(flags);
1023 }
1024} 698}
1025 699
1026static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val) 700static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
1027{ 701{
1028 int nmi_wd_enabled = apic_lvt_nmi_mode(kvm_apic_get_reg(apic, APIC_LVT0)); 702 int nmi_wd_enabled = apic_lvt_nmi_mode(apic_get_reg(apic, APIC_LVT0));
1029 703
1030 if (apic_lvt_nmi_mode(lvt0_val)) { 704 if (apic_lvt_nmi_mode(lvt0_val)) {
1031 if (!nmi_wd_enabled) { 705 if (!nmi_wd_enabled) {
@@ -1046,7 +720,7 @@ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
1046 switch (reg) { 720 switch (reg) {
1047 case APIC_ID: /* Local APIC ID */ 721 case APIC_ID: /* Local APIC ID */
1048 if (!apic_x2apic_mode(apic)) 722 if (!apic_x2apic_mode(apic))
1049 kvm_apic_set_id(apic, val >> 24); 723 apic_set_reg(apic, APIC_ID, val);
1050 else 724 else
1051 ret = 1; 725 ret = 1;
1052 break; 726 break;
@@ -1062,30 +736,29 @@ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
1062 736
1063 case APIC_LDR: 737 case APIC_LDR:
1064 if (!apic_x2apic_mode(apic)) 738 if (!apic_x2apic_mode(apic))
1065 kvm_apic_set_ldr(apic, val & APIC_LDR_MASK); 739 apic_set_reg(apic, APIC_LDR, val & APIC_LDR_MASK);
1066 else 740 else
1067 ret = 1; 741 ret = 1;
1068 break; 742 break;
1069 743
1070 case APIC_DFR: 744 case APIC_DFR:
1071 if (!apic_x2apic_mode(apic)) { 745 if (!apic_x2apic_mode(apic))
1072 apic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF); 746 apic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
1073 recalculate_apic_map(apic->vcpu->kvm); 747 else
1074 } else
1075 ret = 1; 748 ret = 1;
1076 break; 749 break;
1077 750
1078 case APIC_SPIV: { 751 case APIC_SPIV: {
1079 u32 mask = 0x3ff; 752 u32 mask = 0x3ff;
1080 if (kvm_apic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI) 753 if (apic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
1081 mask |= APIC_SPIV_DIRECTED_EOI; 754 mask |= APIC_SPIV_DIRECTED_EOI;
1082 apic_set_spiv(apic, val & mask); 755 apic_set_reg(apic, APIC_SPIV, val & mask);
1083 if (!(val & APIC_SPIV_APIC_ENABLED)) { 756 if (!(val & APIC_SPIV_APIC_ENABLED)) {
1084 int i; 757 int i;
1085 u32 lvt_val; 758 u32 lvt_val;
1086 759
1087 for (i = 0; i < APIC_LVT_NUM; i++) { 760 for (i = 0; i < APIC_LVT_NUM; i++) {
1088 lvt_val = kvm_apic_get_reg(apic, 761 lvt_val = apic_get_reg(apic,
1089 APIC_LVTT + 0x10 * i); 762 APIC_LVTT + 0x10 * i);
1090 apic_set_reg(apic, APIC_LVTT + 0x10 * i, 763 apic_set_reg(apic, APIC_LVTT + 0x10 * i,
1091 lvt_val | APIC_LVT_MASKED); 764 lvt_val | APIC_LVT_MASKED);
@@ -1109,12 +782,13 @@ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
1109 782
1110 case APIC_LVT0: 783 case APIC_LVT0:
1111 apic_manage_nmi_watchdog(apic, val); 784 apic_manage_nmi_watchdog(apic, val);
785 case APIC_LVTT:
1112 case APIC_LVTTHMR: 786 case APIC_LVTTHMR:
1113 case APIC_LVTPC: 787 case APIC_LVTPC:
1114 case APIC_LVT1: 788 case APIC_LVT1:
1115 case APIC_LVTERR: 789 case APIC_LVTERR:
1116 /* TODO: Check vector */ 790 /* TODO: Check vector */
1117 if (!kvm_apic_sw_enabled(apic)) 791 if (!apic_sw_enabled(apic))
1118 val |= APIC_LVT_MASKED; 792 val |= APIC_LVT_MASKED;
1119 793
1120 val &= apic_lvt_mask[(reg - APIC_LVTT) >> 4]; 794 val &= apic_lvt_mask[(reg - APIC_LVTT) >> 4];
@@ -1122,22 +796,7 @@ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
1122 796
1123 break; 797 break;
1124 798
1125 case APIC_LVTT:
1126 if ((kvm_apic_get_reg(apic, APIC_LVTT) &
1127 apic->lapic_timer.timer_mode_mask) !=
1128 (val & apic->lapic_timer.timer_mode_mask))
1129 hrtimer_cancel(&apic->lapic_timer.timer);
1130
1131 if (!kvm_apic_sw_enabled(apic))
1132 val |= APIC_LVT_MASKED;
1133 val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
1134 apic_set_reg(apic, APIC_LVTT, val);
1135 break;
1136
1137 case APIC_TMICT: 799 case APIC_TMICT:
1138 if (apic_lvtt_tscdeadline(apic))
1139 break;
1140
1141 hrtimer_cancel(&apic->lapic_timer.timer); 800 hrtimer_cancel(&apic->lapic_timer.timer);
1142 apic_set_reg(apic, APIC_TMICT, val); 801 apic_set_reg(apic, APIC_TMICT, val);
1143 start_apic_timer(apic); 802 start_apic_timer(apic);
@@ -1145,14 +804,14 @@ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
1145 804
1146 case APIC_TDCR: 805 case APIC_TDCR:
1147 if (val & 4) 806 if (val & 4)
1148 apic_debug("KVM_WRITE:TDCR %x\n", val); 807 printk(KERN_ERR "KVM_WRITE:TDCR %x\n", val);
1149 apic_set_reg(apic, APIC_TDCR, val); 808 apic_set_reg(apic, APIC_TDCR, val);
1150 update_divide_count(apic); 809 update_divide_count(apic);
1151 break; 810 break;
1152 811
1153 case APIC_ESR: 812 case APIC_ESR:
1154 if (apic_x2apic_mode(apic) && val != 0) { 813 if (apic_x2apic_mode(apic) && val != 0) {
1155 apic_debug("KVM_WRITE:ESR not zero %x\n", val); 814 printk(KERN_ERR "KVM_WRITE:ESR not zero %x\n", val);
1156 ret = 1; 815 ret = 1;
1157 } 816 }
1158 break; 817 break;
@@ -1205,32 +864,17 @@ static int apic_mmio_write(struct kvm_io_device *this,
1205 return 0; 864 return 0;
1206} 865}
1207 866
1208void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
1209{
1210 if (kvm_vcpu_has_lapic(vcpu))
1211 apic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
1212}
1213EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
1214
1215void kvm_free_lapic(struct kvm_vcpu *vcpu) 867void kvm_free_lapic(struct kvm_vcpu *vcpu)
1216{ 868{
1217 struct kvm_lapic *apic = vcpu->arch.apic;
1218
1219 if (!vcpu->arch.apic) 869 if (!vcpu->arch.apic)
1220 return; 870 return;
1221 871
1222 hrtimer_cancel(&apic->lapic_timer.timer); 872 hrtimer_cancel(&vcpu->arch.apic->lapic_timer.timer);
1223
1224 if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
1225 static_key_slow_dec_deferred(&apic_hw_disabled);
1226
1227 if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_APIC_ENABLED))
1228 static_key_slow_dec_deferred(&apic_sw_disabled);
1229 873
1230 if (apic->regs) 874 if (vcpu->arch.apic->regs)
1231 free_page((unsigned long)apic->regs); 875 free_page((unsigned long)vcpu->arch.apic->regs);
1232 876
1233 kfree(apic); 877 kfree(vcpu->arch.apic);
1234} 878}
1235 879
1236/* 880/*
@@ -1239,49 +883,24 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu)
1239 *---------------------------------------------------------------------- 883 *----------------------------------------------------------------------
1240 */ 884 */
1241 885
1242u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
1243{
1244 struct kvm_lapic *apic = vcpu->arch.apic;
1245
1246 if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) ||
1247 apic_lvtt_period(apic))
1248 return 0;
1249
1250 return apic->lapic_timer.tscdeadline;
1251}
1252
1253void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
1254{
1255 struct kvm_lapic *apic = vcpu->arch.apic;
1256
1257 if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) ||
1258 apic_lvtt_period(apic))
1259 return;
1260
1261 hrtimer_cancel(&apic->lapic_timer.timer);
1262 apic->lapic_timer.tscdeadline = data;
1263 start_apic_timer(apic);
1264}
1265
1266void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8) 886void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
1267{ 887{
1268 struct kvm_lapic *apic = vcpu->arch.apic; 888 struct kvm_lapic *apic = vcpu->arch.apic;
1269 889
1270 if (!kvm_vcpu_has_lapic(vcpu)) 890 if (!apic)
1271 return; 891 return;
1272
1273 apic_set_tpr(apic, ((cr8 & 0x0f) << 4) 892 apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
1274 | (kvm_apic_get_reg(apic, APIC_TASKPRI) & 4)); 893 | (apic_get_reg(apic, APIC_TASKPRI) & 4));
1275} 894}
1276 895
1277u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu) 896u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
1278{ 897{
898 struct kvm_lapic *apic = vcpu->arch.apic;
1279 u64 tpr; 899 u64 tpr;
1280 900
1281 if (!kvm_vcpu_has_lapic(vcpu)) 901 if (!apic)
1282 return 0; 902 return 0;
1283 903 tpr = (u64) apic_get_reg(apic, APIC_TASKPRI);
1284 tpr = (u64) kvm_apic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
1285 904
1286 return (tpr & 0xf0) >> 4; 905 return (tpr & 0xf0) >> 4;
1287} 906}
@@ -1296,23 +915,14 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
1296 return; 915 return;
1297 } 916 }
1298 917
1299 /* update jump label if enable bit changes */
1300 if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) {
1301 if (value & MSR_IA32_APICBASE_ENABLE)
1302 static_key_slow_dec_deferred(&apic_hw_disabled);
1303 else
1304 static_key_slow_inc(&apic_hw_disabled.key);
1305 recalculate_apic_map(vcpu->kvm);
1306 }
1307
1308 if (!kvm_vcpu_is_bsp(apic->vcpu)) 918 if (!kvm_vcpu_is_bsp(apic->vcpu))
1309 value &= ~MSR_IA32_APICBASE_BSP; 919 value &= ~MSR_IA32_APICBASE_BSP;
1310 920
1311 vcpu->arch.apic_base = value; 921 vcpu->arch.apic_base = value;
1312 if (apic_x2apic_mode(apic)) { 922 if (apic_x2apic_mode(apic)) {
1313 u32 id = kvm_apic_id(apic); 923 u32 id = kvm_apic_id(apic);
1314 u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf)); 924 u32 ldr = ((id & ~0xf) << 16) | (1 << (id & 0xf));
1315 kvm_apic_set_ldr(apic, ldr); 925 apic_set_reg(apic, APIC_LDR, ldr);
1316 } 926 }
1317 apic->base_address = apic->vcpu->arch.apic_base & 927 apic->base_address = apic->vcpu->arch.apic_base &
1318 MSR_IA32_APICBASE_BASE; 928 MSR_IA32_APICBASE_BASE;
@@ -1337,7 +947,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
1337 /* Stop the timer in case it's a reset to an active apic */ 947 /* Stop the timer in case it's a reset to an active apic */
1338 hrtimer_cancel(&apic->lapic_timer.timer); 948 hrtimer_cancel(&apic->lapic_timer.timer);
1339 949
1340 kvm_apic_set_id(apic, vcpu->vcpu_id); 950 apic_set_reg(apic, APIC_ID, vcpu->vcpu_id << 24);
1341 kvm_apic_set_version(apic->vcpu); 951 kvm_apic_set_version(apic->vcpu);
1342 952
1343 for (i = 0; i < APIC_LVT_NUM; i++) 953 for (i = 0; i < APIC_LVT_NUM; i++)
@@ -1346,9 +956,9 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
1346 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); 956 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
1347 957
1348 apic_set_reg(apic, APIC_DFR, 0xffffffffU); 958 apic_set_reg(apic, APIC_DFR, 0xffffffffU);
1349 apic_set_spiv(apic, 0xff); 959 apic_set_reg(apic, APIC_SPIV, 0xff);
1350 apic_set_reg(apic, APIC_TASKPRI, 0); 960 apic_set_reg(apic, APIC_TASKPRI, 0);
1351 kvm_apic_set_ldr(apic, 0); 961 apic_set_reg(apic, APIC_LDR, 0);
1352 apic_set_reg(apic, APIC_ESR, 0); 962 apic_set_reg(apic, APIC_ESR, 0);
1353 apic_set_reg(apic, APIC_ICR, 0); 963 apic_set_reg(apic, APIC_ICR, 0);
1354 apic_set_reg(apic, APIC_ICR2, 0); 964 apic_set_reg(apic, APIC_ICR2, 0);
@@ -1360,18 +970,13 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
1360 apic_set_reg(apic, APIC_TMR + 0x10 * i, 0); 970 apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
1361 } 971 }
1362 apic->irr_pending = false; 972 apic->irr_pending = false;
1363 apic->isr_count = 0;
1364 apic->highest_isr_cache = -1;
1365 update_divide_count(apic); 973 update_divide_count(apic);
1366 atomic_set(&apic->lapic_timer.pending, 0); 974 atomic_set(&apic->lapic_timer.pending, 0);
1367 if (kvm_vcpu_is_bsp(vcpu)) 975 if (kvm_vcpu_is_bsp(vcpu))
1368 kvm_lapic_set_base(vcpu, 976 vcpu->arch.apic_base |= MSR_IA32_APICBASE_BSP;
1369 vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP);
1370 vcpu->arch.pv_eoi.msr_val = 0;
1371 apic_update_ppr(apic); 977 apic_update_ppr(apic);
1372 978
1373 vcpu->arch.apic_arb_prio = 0; 979 vcpu->arch.apic_arb_prio = 0;
1374 vcpu->arch.apic_attention = 0;
1375 980
1376 apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr=" 981 apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr="
1377 "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__, 982 "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__,
@@ -1379,34 +984,45 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
1379 vcpu->arch.apic_base, apic->base_address); 984 vcpu->arch.apic_base, apic->base_address);
1380} 985}
1381 986
987bool kvm_apic_present(struct kvm_vcpu *vcpu)
988{
989 return vcpu->arch.apic && apic_hw_enabled(vcpu->arch.apic);
990}
991
992int kvm_lapic_enabled(struct kvm_vcpu *vcpu)
993{
994 return kvm_apic_present(vcpu) && apic_sw_enabled(vcpu->arch.apic);
995}
996
1382/* 997/*
1383 *---------------------------------------------------------------------- 998 *----------------------------------------------------------------------
1384 * timer interface 999 * timer interface
1385 *---------------------------------------------------------------------- 1000 *----------------------------------------------------------------------
1386 */ 1001 */
1387 1002
1388static bool lapic_is_periodic(struct kvm_lapic *apic) 1003static bool lapic_is_periodic(struct kvm_timer *ktimer)
1389{ 1004{
1005 struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic,
1006 lapic_timer);
1390 return apic_lvtt_period(apic); 1007 return apic_lvtt_period(apic);
1391} 1008}
1392 1009
1393int apic_has_pending_timer(struct kvm_vcpu *vcpu) 1010int apic_has_pending_timer(struct kvm_vcpu *vcpu)
1394{ 1011{
1395 struct kvm_lapic *apic = vcpu->arch.apic; 1012 struct kvm_lapic *lapic = vcpu->arch.apic;
1396 1013
1397 if (kvm_vcpu_has_lapic(vcpu) && apic_enabled(apic) && 1014 if (lapic && apic_enabled(lapic) && apic_lvt_enabled(lapic, APIC_LVTT))
1398 apic_lvt_enabled(apic, APIC_LVTT)) 1015 return atomic_read(&lapic->lapic_timer.pending);
1399 return atomic_read(&apic->lapic_timer.pending);
1400 1016
1401 return 0; 1017 return 0;
1402} 1018}
1403 1019
1404int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type) 1020static int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
1405{ 1021{
1406 u32 reg = kvm_apic_get_reg(apic, lvt_type); 1022 u32 reg = apic_get_reg(apic, lvt_type);
1407 int vector, mode, trig_mode; 1023 int vector, mode, trig_mode;
1408 1024
1409 if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) { 1025 if (apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
1410 vector = reg & APIC_VECTOR_MASK; 1026 vector = reg & APIC_VECTOR_MASK;
1411 mode = reg & APIC_MODE_MASK; 1027 mode = reg & APIC_MODE_MASK;
1412 trig_mode = reg & APIC_LVT_LEVEL_TRIGGER; 1028 trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
@@ -1423,40 +1039,15 @@ void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
1423 kvm_apic_local_deliver(apic, APIC_LVT0); 1039 kvm_apic_local_deliver(apic, APIC_LVT0);
1424} 1040}
1425 1041
1042static struct kvm_timer_ops lapic_timer_ops = {
1043 .is_periodic = lapic_is_periodic,
1044};
1045
1426static const struct kvm_io_device_ops apic_mmio_ops = { 1046static const struct kvm_io_device_ops apic_mmio_ops = {
1427 .read = apic_mmio_read, 1047 .read = apic_mmio_read,
1428 .write = apic_mmio_write, 1048 .write = apic_mmio_write,
1429}; 1049};
1430 1050
1431static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
1432{
1433 struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
1434 struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
1435 struct kvm_vcpu *vcpu = apic->vcpu;
1436 wait_queue_head_t *q = &vcpu->wq;
1437
1438 /*
1439 * There is a race window between reading and incrementing, but we do
1440 * not care about potentially losing timer events in the !reinject
1441 * case anyway. Note: KVM_REQ_PENDING_TIMER is implicitly checked
1442 * in vcpu_enter_guest.
1443 */
1444 if (!atomic_read(&ktimer->pending)) {
1445 atomic_inc(&ktimer->pending);
1446 /* FIXME: this code should not know anything about vcpus */
1447 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1448 }
1449
1450 if (waitqueue_active(q))
1451 wake_up_interruptible(q);
1452
1453 if (lapic_is_periodic(apic)) {
1454 hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
1455 return HRTIMER_RESTART;
1456 } else
1457 return HRTIMER_NORESTART;
1458}
1459
1460int kvm_create_lapic(struct kvm_vcpu *vcpu) 1051int kvm_create_lapic(struct kvm_vcpu *vcpu)
1461{ 1052{
1462 struct kvm_lapic *apic; 1053 struct kvm_lapic *apic;
@@ -1480,17 +1071,14 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
1480 1071
1481 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, 1072 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
1482 HRTIMER_MODE_ABS); 1073 HRTIMER_MODE_ABS);
1483 apic->lapic_timer.timer.function = apic_timer_fn; 1074 apic->lapic_timer.timer.function = kvm_timer_fn;
1075 apic->lapic_timer.t_ops = &lapic_timer_ops;
1076 apic->lapic_timer.kvm = vcpu->kvm;
1077 apic->lapic_timer.vcpu = vcpu;
1484 1078
1485 /* 1079 apic->base_address = APIC_DEFAULT_PHYS_BASE;
1486 * APIC is created enabled. This will prevent kvm_lapic_set_base from 1080 vcpu->arch.apic_base = APIC_DEFAULT_PHYS_BASE;
1487 * thinking that APIC satet has changed.
1488 */
1489 vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
1490 kvm_lapic_set_base(vcpu,
1491 APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE);
1492 1081
1493 static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */
1494 kvm_lapic_reset(vcpu); 1082 kvm_lapic_reset(vcpu);
1495 kvm_iodevice_init(&apic->dev, &apic_mmio_ops); 1083 kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
1496 1084
@@ -1506,23 +1094,23 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
1506 struct kvm_lapic *apic = vcpu->arch.apic; 1094 struct kvm_lapic *apic = vcpu->arch.apic;
1507 int highest_irr; 1095 int highest_irr;
1508 1096
1509 if (!kvm_vcpu_has_lapic(vcpu) || !apic_enabled(apic)) 1097 if (!apic || !apic_enabled(apic))
1510 return -1; 1098 return -1;
1511 1099
1512 apic_update_ppr(apic); 1100 apic_update_ppr(apic);
1513 highest_irr = apic_find_highest_irr(apic); 1101 highest_irr = apic_find_highest_irr(apic);
1514 if ((highest_irr == -1) || 1102 if ((highest_irr == -1) ||
1515 ((highest_irr & 0xF0) <= kvm_apic_get_reg(apic, APIC_PROCPRI))) 1103 ((highest_irr & 0xF0) <= apic_get_reg(apic, APIC_PROCPRI)))
1516 return -1; 1104 return -1;
1517 return highest_irr; 1105 return highest_irr;
1518} 1106}
1519 1107
1520int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu) 1108int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
1521{ 1109{
1522 u32 lvt0 = kvm_apic_get_reg(vcpu->arch.apic, APIC_LVT0); 1110 u32 lvt0 = apic_get_reg(vcpu->arch.apic, APIC_LVT0);
1523 int r = 0; 1111 int r = 0;
1524 1112
1525 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) 1113 if (!apic_hw_enabled(vcpu->arch.apic))
1526 r = 1; 1114 r = 1;
1527 if ((lvt0 & APIC_LVT_MASKED) == 0 && 1115 if ((lvt0 & APIC_LVT_MASKED) == 0 &&
1528 GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT) 1116 GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
@@ -1534,10 +1122,7 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
1534{ 1122{
1535 struct kvm_lapic *apic = vcpu->arch.apic; 1123 struct kvm_lapic *apic = vcpu->arch.apic;
1536 1124
1537 if (!kvm_vcpu_has_lapic(vcpu)) 1125 if (apic && atomic_read(&apic->lapic_timer.pending) > 0) {
1538 return;
1539
1540 if (atomic_read(&apic->lapic_timer.pending) > 0) {
1541 if (kvm_apic_local_deliver(apic, APIC_LVTT)) 1126 if (kvm_apic_local_deliver(apic, APIC_LVTT))
1542 atomic_dec(&apic->lapic_timer.pending); 1127 atomic_dec(&apic->lapic_timer.pending);
1543 } 1128 }
@@ -1551,23 +1136,18 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
1551 if (vector == -1) 1136 if (vector == -1)
1552 return -1; 1137 return -1;
1553 1138
1554 apic_set_isr(vector, apic); 1139 apic_set_vector(vector, apic->regs + APIC_ISR);
1555 apic_update_ppr(apic); 1140 apic_update_ppr(apic);
1556 apic_clear_irr(vector, apic); 1141 apic_clear_irr(vector, apic);
1557 return vector; 1142 return vector;
1558} 1143}
1559 1144
1560void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu, 1145void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
1561 struct kvm_lapic_state *s)
1562{ 1146{
1563 struct kvm_lapic *apic = vcpu->arch.apic; 1147 struct kvm_lapic *apic = vcpu->arch.apic;
1564 1148
1565 kvm_lapic_set_base(vcpu, vcpu->arch.apic_base); 1149 apic->base_address = vcpu->arch.apic_base &
1566 /* set SPIV separately to get count of SW disabled APICs right */ 1150 MSR_IA32_APICBASE_BASE;
1567 apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
1568 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
1569 /* call kvm_apic_set_id() to put apic into apic_map */
1570 kvm_apic_set_id(apic, kvm_apic_id(apic));
1571 kvm_apic_set_version(vcpu); 1151 kvm_apic_set_version(vcpu);
1572 1152
1573 apic_update_ppr(apic); 1153 apic_update_ppr(apic);
@@ -1575,117 +1155,49 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
1575 update_divide_count(apic); 1155 update_divide_count(apic);
1576 start_apic_timer(apic); 1156 start_apic_timer(apic);
1577 apic->irr_pending = true; 1157 apic->irr_pending = true;
1578 apic->isr_count = count_vectors(apic->regs + APIC_ISR);
1579 apic->highest_isr_cache = -1;
1580 kvm_make_request(KVM_REQ_EVENT, vcpu); 1158 kvm_make_request(KVM_REQ_EVENT, vcpu);
1581} 1159}
1582 1160
1583void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) 1161void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
1584{ 1162{
1163 struct kvm_lapic *apic = vcpu->arch.apic;
1585 struct hrtimer *timer; 1164 struct hrtimer *timer;
1586 1165
1587 if (!kvm_vcpu_has_lapic(vcpu)) 1166 if (!apic)
1588 return; 1167 return;
1589 1168
1590 timer = &vcpu->arch.apic->lapic_timer.timer; 1169 timer = &apic->lapic_timer.timer;
1591 if (hrtimer_cancel(timer)) 1170 if (hrtimer_cancel(timer))
1592 hrtimer_start_expires(timer, HRTIMER_MODE_ABS); 1171 hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
1593} 1172}
1594 1173
1595/*
1596 * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
1597 *
1598 * Detect whether guest triggered PV EOI since the
1599 * last entry. If yes, set EOI on guests's behalf.
1600 * Clear PV EOI in guest memory in any case.
1601 */
1602static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
1603 struct kvm_lapic *apic)
1604{
1605 bool pending;
1606 int vector;
1607 /*
1608 * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
1609 * and KVM_PV_EOI_ENABLED in guest memory as follows:
1610 *
1611 * KVM_APIC_PV_EOI_PENDING is unset:
1612 * -> host disabled PV EOI.
1613 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
1614 * -> host enabled PV EOI, guest did not execute EOI yet.
1615 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
1616 * -> host enabled PV EOI, guest executed EOI.
1617 */
1618 BUG_ON(!pv_eoi_enabled(vcpu));
1619 pending = pv_eoi_get_pending(vcpu);
1620 /*
1621 * Clear pending bit in any case: it will be set again on vmentry.
1622 * While this might not be ideal from performance point of view,
1623 * this makes sure pv eoi is only enabled when we know it's safe.
1624 */
1625 pv_eoi_clr_pending(vcpu);
1626 if (pending)
1627 return;
1628 vector = apic_set_eoi(apic);
1629 trace_kvm_pv_eoi(apic, vector);
1630}
1631
1632void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) 1174void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
1633{ 1175{
1634 u32 data; 1176 u32 data;
1635 void *vapic; 1177 void *vapic;
1636 1178
1637 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention)) 1179 if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr)
1638 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
1639
1640 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
1641 return; 1180 return;
1642 1181
1643 vapic = kmap_atomic(vcpu->arch.apic->vapic_page); 1182 vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0);
1644 data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)); 1183 data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr));
1645 kunmap_atomic(vapic); 1184 kunmap_atomic(vapic, KM_USER0);
1646 1185
1647 apic_set_tpr(vcpu->arch.apic, data & 0xff); 1186 apic_set_tpr(vcpu->arch.apic, data & 0xff);
1648} 1187}
1649 1188
1650/*
1651 * apic_sync_pv_eoi_to_guest - called before vmentry
1652 *
1653 * Detect whether it's safe to enable PV EOI and
1654 * if yes do so.
1655 */
1656static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
1657 struct kvm_lapic *apic)
1658{
1659 if (!pv_eoi_enabled(vcpu) ||
1660 /* IRR set or many bits in ISR: could be nested. */
1661 apic->irr_pending ||
1662 /* Cache not set: could be safe but we don't bother. */
1663 apic->highest_isr_cache == -1 ||
1664 /* Need EOI to update ioapic. */
1665 kvm_ioapic_handles_vector(vcpu->kvm, apic->highest_isr_cache)) {
1666 /*
1667 * PV EOI was disabled by apic_sync_pv_eoi_from_guest
1668 * so we need not do anything here.
1669 */
1670 return;
1671 }
1672
1673 pv_eoi_set_pending(apic->vcpu);
1674}
1675
1676void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu) 1189void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
1677{ 1190{
1678 u32 data, tpr; 1191 u32 data, tpr;
1679 int max_irr, max_isr; 1192 int max_irr, max_isr;
1680 struct kvm_lapic *apic = vcpu->arch.apic; 1193 struct kvm_lapic *apic;
1681 void *vapic; 1194 void *vapic;
1682 1195
1683 apic_sync_pv_eoi_to_guest(vcpu, apic); 1196 if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr)
1684
1685 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
1686 return; 1197 return;
1687 1198
1688 tpr = kvm_apic_get_reg(apic, APIC_TASKPRI) & 0xff; 1199 apic = vcpu->arch.apic;
1200 tpr = apic_get_reg(apic, APIC_TASKPRI) & 0xff;
1689 max_irr = apic_find_highest_irr(apic); 1201 max_irr = apic_find_highest_irr(apic);
1690 if (max_irr < 0) 1202 if (max_irr < 0)
1691 max_irr = 0; 1203 max_irr = 0;
@@ -1694,18 +1206,17 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
1694 max_isr = 0; 1206 max_isr = 0;
1695 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24); 1207 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
1696 1208
1697 vapic = kmap_atomic(vcpu->arch.apic->vapic_page); 1209 vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0);
1698 *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data; 1210 *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data;
1699 kunmap_atomic(vapic); 1211 kunmap_atomic(vapic, KM_USER0);
1700} 1212}
1701 1213
1702void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) 1214void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
1703{ 1215{
1216 if (!irqchip_in_kernel(vcpu->kvm))
1217 return;
1218
1704 vcpu->arch.apic->vapic_addr = vapic_addr; 1219 vcpu->arch.apic->vapic_addr = vapic_addr;
1705 if (vapic_addr)
1706 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
1707 else
1708 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
1709} 1220}
1710 1221
1711int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data) 1222int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
@@ -1744,7 +1255,7 @@ int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
1744{ 1255{
1745 struct kvm_lapic *apic = vcpu->arch.apic; 1256 struct kvm_lapic *apic = vcpu->arch.apic;
1746 1257
1747 if (!kvm_vcpu_has_lapic(vcpu)) 1258 if (!irqchip_in_kernel(vcpu->kvm))
1748 return 1; 1259 return 1;
1749 1260
1750 /* if this is ICR write vector before command */ 1261 /* if this is ICR write vector before command */
@@ -1758,7 +1269,7 @@ int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
1758 struct kvm_lapic *apic = vcpu->arch.apic; 1269 struct kvm_lapic *apic = vcpu->arch.apic;
1759 u32 low, high = 0; 1270 u32 low, high = 0;
1760 1271
1761 if (!kvm_vcpu_has_lapic(vcpu)) 1272 if (!irqchip_in_kernel(vcpu->kvm))
1762 return 1; 1273 return 1;
1763 1274
1764 if (apic_reg_read(apic, reg, 4, &low)) 1275 if (apic_reg_read(apic, reg, 4, &low))
@@ -1770,23 +1281,3 @@ int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
1770 1281
1771 return 0; 1282 return 0;
1772} 1283}
1773
1774int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
1775{
1776 u64 addr = data & ~KVM_MSR_ENABLED;
1777 if (!IS_ALIGNED(addr, 4))
1778 return 1;
1779
1780 vcpu->arch.pv_eoi.msr_val = data;
1781 if (!pv_eoi_enabled(vcpu))
1782 return 0;
1783 return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
1784 addr);
1785}
1786
1787void kvm_lapic_init(void)
1788{
1789 /* do not patch jump label more than once per second */
1790 jump_label_rate_limit(&apic_hw_disabled, HZ);
1791 jump_label_rate_limit(&apic_sw_disabled, HZ);
1792}