diff options
Diffstat (limited to 'arch/x86/kvm/lapic.c')
-rw-r--r-- | arch/x86/kvm/lapic.c | 1154 |
1 files changed, 1154 insertions, 0 deletions
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c new file mode 100644 index 000000000000..2cbee9479ce4 --- /dev/null +++ b/arch/x86/kvm/lapic.c | |||
@@ -0,0 +1,1154 @@ | |||
1 | |||
2 | /* | ||
3 | * Local APIC virtualization | ||
4 | * | ||
5 | * Copyright (C) 2006 Qumranet, Inc. | ||
6 | * Copyright (C) 2007 Novell | ||
7 | * Copyright (C) 2007 Intel | ||
8 | * | ||
9 | * Authors: | ||
10 | * Dor Laor <dor.laor@qumranet.com> | ||
11 | * Gregory Haskins <ghaskins@novell.com> | ||
12 | * Yaozu (Eddie) Dong <eddie.dong@intel.com> | ||
13 | * | ||
14 | * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation. | ||
15 | * | ||
16 | * This work is licensed under the terms of the GNU GPL, version 2. See | ||
17 | * the COPYING file in the top-level directory. | ||
18 | */ | ||
19 | |||
20 | #include <linux/kvm_host.h> | ||
21 | #include <linux/kvm.h> | ||
22 | #include <linux/mm.h> | ||
23 | #include <linux/highmem.h> | ||
24 | #include <linux/smp.h> | ||
25 | #include <linux/hrtimer.h> | ||
26 | #include <linux/io.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <asm/processor.h> | ||
29 | #include <asm/msr.h> | ||
30 | #include <asm/page.h> | ||
31 | #include <asm/current.h> | ||
32 | #include <asm/apicdef.h> | ||
33 | #include <asm/atomic.h> | ||
34 | #include <asm/div64.h> | ||
35 | #include "irq.h" | ||
36 | |||
37 | #define PRId64 "d" | ||
38 | #define PRIx64 "llx" | ||
39 | #define PRIu64 "u" | ||
40 | #define PRIo64 "o" | ||
41 | |||
42 | #define APIC_BUS_CYCLE_NS 1 | ||
43 | |||
44 | /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */ | ||
45 | #define apic_debug(fmt, arg...) | ||
46 | |||
47 | #define APIC_LVT_NUM 6 | ||
48 | /* 14 is the version for Xeon and Pentium 8.4.8*/ | ||
49 | #define APIC_VERSION (0x14UL | ((APIC_LVT_NUM - 1) << 16)) | ||
50 | #define LAPIC_MMIO_LENGTH (1 << 12) | ||
51 | /* followed define is not in apicdef.h */ | ||
52 | #define APIC_SHORT_MASK 0xc0000 | ||
53 | #define APIC_DEST_NOSHORT 0x0 | ||
54 | #define APIC_DEST_MASK 0x800 | ||
55 | #define MAX_APIC_VECTOR 256 | ||
56 | |||
57 | #define VEC_POS(v) ((v) & (32 - 1)) | ||
58 | #define REG_POS(v) (((v) >> 5) << 4) | ||
59 | |||
60 | static inline u32 apic_get_reg(struct kvm_lapic *apic, int reg_off) | ||
61 | { | ||
62 | return *((u32 *) (apic->regs + reg_off)); | ||
63 | } | ||
64 | |||
65 | static inline void apic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val) | ||
66 | { | ||
67 | *((u32 *) (apic->regs + reg_off)) = val; | ||
68 | } | ||
69 | |||
70 | static inline int apic_test_and_set_vector(int vec, void *bitmap) | ||
71 | { | ||
72 | return test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); | ||
73 | } | ||
74 | |||
75 | static inline int apic_test_and_clear_vector(int vec, void *bitmap) | ||
76 | { | ||
77 | return test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); | ||
78 | } | ||
79 | |||
80 | static inline void apic_set_vector(int vec, void *bitmap) | ||
81 | { | ||
82 | set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); | ||
83 | } | ||
84 | |||
85 | static inline void apic_clear_vector(int vec, void *bitmap) | ||
86 | { | ||
87 | clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); | ||
88 | } | ||
89 | |||
90 | static inline int apic_hw_enabled(struct kvm_lapic *apic) | ||
91 | { | ||
92 | return (apic)->vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE; | ||
93 | } | ||
94 | |||
95 | static inline int apic_sw_enabled(struct kvm_lapic *apic) | ||
96 | { | ||
97 | return apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_APIC_ENABLED; | ||
98 | } | ||
99 | |||
100 | static inline int apic_enabled(struct kvm_lapic *apic) | ||
101 | { | ||
102 | return apic_sw_enabled(apic) && apic_hw_enabled(apic); | ||
103 | } | ||
104 | |||
105 | #define LVT_MASK \ | ||
106 | (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK) | ||
107 | |||
108 | #define LINT_MASK \ | ||
109 | (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \ | ||
110 | APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER) | ||
111 | |||
112 | static inline int kvm_apic_id(struct kvm_lapic *apic) | ||
113 | { | ||
114 | return (apic_get_reg(apic, APIC_ID) >> 24) & 0xff; | ||
115 | } | ||
116 | |||
117 | static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type) | ||
118 | { | ||
119 | return !(apic_get_reg(apic, lvt_type) & APIC_LVT_MASKED); | ||
120 | } | ||
121 | |||
122 | static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type) | ||
123 | { | ||
124 | return apic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK; | ||
125 | } | ||
126 | |||
127 | static inline int apic_lvtt_period(struct kvm_lapic *apic) | ||
128 | { | ||
129 | return apic_get_reg(apic, APIC_LVTT) & APIC_LVT_TIMER_PERIODIC; | ||
130 | } | ||
131 | |||
132 | static unsigned int apic_lvt_mask[APIC_LVT_NUM] = { | ||
133 | LVT_MASK | APIC_LVT_TIMER_PERIODIC, /* LVTT */ | ||
134 | LVT_MASK | APIC_MODE_MASK, /* LVTTHMR */ | ||
135 | LVT_MASK | APIC_MODE_MASK, /* LVTPC */ | ||
136 | LINT_MASK, LINT_MASK, /* LVT0-1 */ | ||
137 | LVT_MASK /* LVTERR */ | ||
138 | }; | ||
139 | |||
140 | static int find_highest_vector(void *bitmap) | ||
141 | { | ||
142 | u32 *word = bitmap; | ||
143 | int word_offset = MAX_APIC_VECTOR >> 5; | ||
144 | |||
145 | while ((word_offset != 0) && (word[(--word_offset) << 2] == 0)) | ||
146 | continue; | ||
147 | |||
148 | if (likely(!word_offset && !word[0])) | ||
149 | return -1; | ||
150 | else | ||
151 | return fls(word[word_offset << 2]) - 1 + (word_offset << 5); | ||
152 | } | ||
153 | |||
154 | static inline int apic_test_and_set_irr(int vec, struct kvm_lapic *apic) | ||
155 | { | ||
156 | return apic_test_and_set_vector(vec, apic->regs + APIC_IRR); | ||
157 | } | ||
158 | |||
159 | static inline void apic_clear_irr(int vec, struct kvm_lapic *apic) | ||
160 | { | ||
161 | apic_clear_vector(vec, apic->regs + APIC_IRR); | ||
162 | } | ||
163 | |||
164 | static inline int apic_find_highest_irr(struct kvm_lapic *apic) | ||
165 | { | ||
166 | int result; | ||
167 | |||
168 | result = find_highest_vector(apic->regs + APIC_IRR); | ||
169 | ASSERT(result == -1 || result >= 16); | ||
170 | |||
171 | return result; | ||
172 | } | ||
173 | |||
174 | int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu) | ||
175 | { | ||
176 | struct kvm_lapic *apic = vcpu->arch.apic; | ||
177 | int highest_irr; | ||
178 | |||
179 | if (!apic) | ||
180 | return 0; | ||
181 | highest_irr = apic_find_highest_irr(apic); | ||
182 | |||
183 | return highest_irr; | ||
184 | } | ||
185 | EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr); | ||
186 | |||
187 | int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig) | ||
188 | { | ||
189 | struct kvm_lapic *apic = vcpu->arch.apic; | ||
190 | |||
191 | if (!apic_test_and_set_irr(vec, apic)) { | ||
192 | /* a new pending irq is set in IRR */ | ||
193 | if (trig) | ||
194 | apic_set_vector(vec, apic->regs + APIC_TMR); | ||
195 | else | ||
196 | apic_clear_vector(vec, apic->regs + APIC_TMR); | ||
197 | kvm_vcpu_kick(apic->vcpu); | ||
198 | return 1; | ||
199 | } | ||
200 | return 0; | ||
201 | } | ||
202 | |||
203 | static inline int apic_find_highest_isr(struct kvm_lapic *apic) | ||
204 | { | ||
205 | int result; | ||
206 | |||
207 | result = find_highest_vector(apic->regs + APIC_ISR); | ||
208 | ASSERT(result == -1 || result >= 16); | ||
209 | |||
210 | return result; | ||
211 | } | ||
212 | |||
213 | static void apic_update_ppr(struct kvm_lapic *apic) | ||
214 | { | ||
215 | u32 tpr, isrv, ppr; | ||
216 | int isr; | ||
217 | |||
218 | tpr = apic_get_reg(apic, APIC_TASKPRI); | ||
219 | isr = apic_find_highest_isr(apic); | ||
220 | isrv = (isr != -1) ? isr : 0; | ||
221 | |||
222 | if ((tpr & 0xf0) >= (isrv & 0xf0)) | ||
223 | ppr = tpr & 0xff; | ||
224 | else | ||
225 | ppr = isrv & 0xf0; | ||
226 | |||
227 | apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x", | ||
228 | apic, ppr, isr, isrv); | ||
229 | |||
230 | apic_set_reg(apic, APIC_PROCPRI, ppr); | ||
231 | } | ||
232 | |||
233 | static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr) | ||
234 | { | ||
235 | apic_set_reg(apic, APIC_TASKPRI, tpr); | ||
236 | apic_update_ppr(apic); | ||
237 | } | ||
238 | |||
239 | int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest) | ||
240 | { | ||
241 | return kvm_apic_id(apic) == dest; | ||
242 | } | ||
243 | |||
244 | int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda) | ||
245 | { | ||
246 | int result = 0; | ||
247 | u8 logical_id; | ||
248 | |||
249 | logical_id = GET_APIC_LOGICAL_ID(apic_get_reg(apic, APIC_LDR)); | ||
250 | |||
251 | switch (apic_get_reg(apic, APIC_DFR)) { | ||
252 | case APIC_DFR_FLAT: | ||
253 | if (logical_id & mda) | ||
254 | result = 1; | ||
255 | break; | ||
256 | case APIC_DFR_CLUSTER: | ||
257 | if (((logical_id >> 4) == (mda >> 0x4)) | ||
258 | && (logical_id & mda & 0xf)) | ||
259 | result = 1; | ||
260 | break; | ||
261 | default: | ||
262 | printk(KERN_WARNING "Bad DFR vcpu %d: %08x\n", | ||
263 | apic->vcpu->vcpu_id, apic_get_reg(apic, APIC_DFR)); | ||
264 | break; | ||
265 | } | ||
266 | |||
267 | return result; | ||
268 | } | ||
269 | |||
270 | static int apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, | ||
271 | int short_hand, int dest, int dest_mode) | ||
272 | { | ||
273 | int result = 0; | ||
274 | struct kvm_lapic *target = vcpu->arch.apic; | ||
275 | |||
276 | apic_debug("target %p, source %p, dest 0x%x, " | ||
277 | "dest_mode 0x%x, short_hand 0x%x", | ||
278 | target, source, dest, dest_mode, short_hand); | ||
279 | |||
280 | ASSERT(!target); | ||
281 | switch (short_hand) { | ||
282 | case APIC_DEST_NOSHORT: | ||
283 | if (dest_mode == 0) { | ||
284 | /* Physical mode. */ | ||
285 | if ((dest == 0xFF) || (dest == kvm_apic_id(target))) | ||
286 | result = 1; | ||
287 | } else | ||
288 | /* Logical mode. */ | ||
289 | result = kvm_apic_match_logical_addr(target, dest); | ||
290 | break; | ||
291 | case APIC_DEST_SELF: | ||
292 | if (target == source) | ||
293 | result = 1; | ||
294 | break; | ||
295 | case APIC_DEST_ALLINC: | ||
296 | result = 1; | ||
297 | break; | ||
298 | case APIC_DEST_ALLBUT: | ||
299 | if (target != source) | ||
300 | result = 1; | ||
301 | break; | ||
302 | default: | ||
303 | printk(KERN_WARNING "Bad dest shorthand value %x\n", | ||
304 | short_hand); | ||
305 | break; | ||
306 | } | ||
307 | |||
308 | return result; | ||
309 | } | ||
310 | |||
311 | /* | ||
312 | * Add a pending IRQ into lapic. | ||
313 | * Return 1 if successfully added and 0 if discarded. | ||
314 | */ | ||
315 | static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, | ||
316 | int vector, int level, int trig_mode) | ||
317 | { | ||
318 | int orig_irr, result = 0; | ||
319 | struct kvm_vcpu *vcpu = apic->vcpu; | ||
320 | |||
321 | switch (delivery_mode) { | ||
322 | case APIC_DM_FIXED: | ||
323 | case APIC_DM_LOWEST: | ||
324 | /* FIXME add logic for vcpu on reset */ | ||
325 | if (unlikely(!apic_enabled(apic))) | ||
326 | break; | ||
327 | |||
328 | orig_irr = apic_test_and_set_irr(vector, apic); | ||
329 | if (orig_irr && trig_mode) { | ||
330 | apic_debug("level trig mode repeatedly for vector %d", | ||
331 | vector); | ||
332 | break; | ||
333 | } | ||
334 | |||
335 | if (trig_mode) { | ||
336 | apic_debug("level trig mode for vector %d", vector); | ||
337 | apic_set_vector(vector, apic->regs + APIC_TMR); | ||
338 | } else | ||
339 | apic_clear_vector(vector, apic->regs + APIC_TMR); | ||
340 | |||
341 | if (vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE) | ||
342 | kvm_vcpu_kick(vcpu); | ||
343 | else if (vcpu->arch.mp_state == VCPU_MP_STATE_HALTED) { | ||
344 | vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; | ||
345 | if (waitqueue_active(&vcpu->wq)) | ||
346 | wake_up_interruptible(&vcpu->wq); | ||
347 | } | ||
348 | |||
349 | result = (orig_irr == 0); | ||
350 | break; | ||
351 | |||
352 | case APIC_DM_REMRD: | ||
353 | printk(KERN_DEBUG "Ignoring delivery mode 3\n"); | ||
354 | break; | ||
355 | |||
356 | case APIC_DM_SMI: | ||
357 | printk(KERN_DEBUG "Ignoring guest SMI\n"); | ||
358 | break; | ||
359 | case APIC_DM_NMI: | ||
360 | printk(KERN_DEBUG "Ignoring guest NMI\n"); | ||
361 | break; | ||
362 | |||
363 | case APIC_DM_INIT: | ||
364 | if (level) { | ||
365 | if (vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE) | ||
366 | printk(KERN_DEBUG | ||
367 | "INIT on a runnable vcpu %d\n", | ||
368 | vcpu->vcpu_id); | ||
369 | vcpu->arch.mp_state = VCPU_MP_STATE_INIT_RECEIVED; | ||
370 | kvm_vcpu_kick(vcpu); | ||
371 | } else { | ||
372 | printk(KERN_DEBUG | ||
373 | "Ignoring de-assert INIT to vcpu %d\n", | ||
374 | vcpu->vcpu_id); | ||
375 | } | ||
376 | |||
377 | break; | ||
378 | |||
379 | case APIC_DM_STARTUP: | ||
380 | printk(KERN_DEBUG "SIPI to vcpu %d vector 0x%02x\n", | ||
381 | vcpu->vcpu_id, vector); | ||
382 | if (vcpu->arch.mp_state == VCPU_MP_STATE_INIT_RECEIVED) { | ||
383 | vcpu->arch.sipi_vector = vector; | ||
384 | vcpu->arch.mp_state = VCPU_MP_STATE_SIPI_RECEIVED; | ||
385 | if (waitqueue_active(&vcpu->wq)) | ||
386 | wake_up_interruptible(&vcpu->wq); | ||
387 | } | ||
388 | break; | ||
389 | |||
390 | default: | ||
391 | printk(KERN_ERR "TODO: unsupported delivery mode %x\n", | ||
392 | delivery_mode); | ||
393 | break; | ||
394 | } | ||
395 | return result; | ||
396 | } | ||
397 | |||
398 | static struct kvm_lapic *kvm_apic_round_robin(struct kvm *kvm, u8 vector, | ||
399 | unsigned long bitmap) | ||
400 | { | ||
401 | int last; | ||
402 | int next; | ||
403 | struct kvm_lapic *apic = NULL; | ||
404 | |||
405 | last = kvm->arch.round_robin_prev_vcpu; | ||
406 | next = last; | ||
407 | |||
408 | do { | ||
409 | if (++next == KVM_MAX_VCPUS) | ||
410 | next = 0; | ||
411 | if (kvm->vcpus[next] == NULL || !test_bit(next, &bitmap)) | ||
412 | continue; | ||
413 | apic = kvm->vcpus[next]->arch.apic; | ||
414 | if (apic && apic_enabled(apic)) | ||
415 | break; | ||
416 | apic = NULL; | ||
417 | } while (next != last); | ||
418 | kvm->arch.round_robin_prev_vcpu = next; | ||
419 | |||
420 | if (!apic) | ||
421 | printk(KERN_DEBUG "vcpu not ready for apic_round_robin\n"); | ||
422 | |||
423 | return apic; | ||
424 | } | ||
425 | |||
426 | struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector, | ||
427 | unsigned long bitmap) | ||
428 | { | ||
429 | struct kvm_lapic *apic; | ||
430 | |||
431 | apic = kvm_apic_round_robin(kvm, vector, bitmap); | ||
432 | if (apic) | ||
433 | return apic->vcpu; | ||
434 | return NULL; | ||
435 | } | ||
436 | |||
437 | static void apic_set_eoi(struct kvm_lapic *apic) | ||
438 | { | ||
439 | int vector = apic_find_highest_isr(apic); | ||
440 | |||
441 | /* | ||
442 | * Not every write EOI will has corresponding ISR, | ||
443 | * one example is when Kernel check timer on setup_IO_APIC | ||
444 | */ | ||
445 | if (vector == -1) | ||
446 | return; | ||
447 | |||
448 | apic_clear_vector(vector, apic->regs + APIC_ISR); | ||
449 | apic_update_ppr(apic); | ||
450 | |||
451 | if (apic_test_and_clear_vector(vector, apic->regs + APIC_TMR)) | ||
452 | kvm_ioapic_update_eoi(apic->vcpu->kvm, vector); | ||
453 | } | ||
454 | |||
455 | static void apic_send_ipi(struct kvm_lapic *apic) | ||
456 | { | ||
457 | u32 icr_low = apic_get_reg(apic, APIC_ICR); | ||
458 | u32 icr_high = apic_get_reg(apic, APIC_ICR2); | ||
459 | |||
460 | unsigned int dest = GET_APIC_DEST_FIELD(icr_high); | ||
461 | unsigned int short_hand = icr_low & APIC_SHORT_MASK; | ||
462 | unsigned int trig_mode = icr_low & APIC_INT_LEVELTRIG; | ||
463 | unsigned int level = icr_low & APIC_INT_ASSERT; | ||
464 | unsigned int dest_mode = icr_low & APIC_DEST_MASK; | ||
465 | unsigned int delivery_mode = icr_low & APIC_MODE_MASK; | ||
466 | unsigned int vector = icr_low & APIC_VECTOR_MASK; | ||
467 | |||
468 | struct kvm_vcpu *target; | ||
469 | struct kvm_vcpu *vcpu; | ||
470 | unsigned long lpr_map = 0; | ||
471 | int i; | ||
472 | |||
473 | apic_debug("icr_high 0x%x, icr_low 0x%x, " | ||
474 | "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, " | ||
475 | "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x\n", | ||
476 | icr_high, icr_low, short_hand, dest, | ||
477 | trig_mode, level, dest_mode, delivery_mode, vector); | ||
478 | |||
479 | for (i = 0; i < KVM_MAX_VCPUS; i++) { | ||
480 | vcpu = apic->vcpu->kvm->vcpus[i]; | ||
481 | if (!vcpu) | ||
482 | continue; | ||
483 | |||
484 | if (vcpu->arch.apic && | ||
485 | apic_match_dest(vcpu, apic, short_hand, dest, dest_mode)) { | ||
486 | if (delivery_mode == APIC_DM_LOWEST) | ||
487 | set_bit(vcpu->vcpu_id, &lpr_map); | ||
488 | else | ||
489 | __apic_accept_irq(vcpu->arch.apic, delivery_mode, | ||
490 | vector, level, trig_mode); | ||
491 | } | ||
492 | } | ||
493 | |||
494 | if (delivery_mode == APIC_DM_LOWEST) { | ||
495 | target = kvm_get_lowest_prio_vcpu(vcpu->kvm, vector, lpr_map); | ||
496 | if (target != NULL) | ||
497 | __apic_accept_irq(target->arch.apic, delivery_mode, | ||
498 | vector, level, trig_mode); | ||
499 | } | ||
500 | } | ||
501 | |||
502 | static u32 apic_get_tmcct(struct kvm_lapic *apic) | ||
503 | { | ||
504 | u64 counter_passed; | ||
505 | ktime_t passed, now; | ||
506 | u32 tmcct; | ||
507 | |||
508 | ASSERT(apic != NULL); | ||
509 | |||
510 | now = apic->timer.dev.base->get_time(); | ||
511 | tmcct = apic_get_reg(apic, APIC_TMICT); | ||
512 | |||
513 | /* if initial count is 0, current count should also be 0 */ | ||
514 | if (tmcct == 0) | ||
515 | return 0; | ||
516 | |||
517 | if (unlikely(ktime_to_ns(now) <= | ||
518 | ktime_to_ns(apic->timer.last_update))) { | ||
519 | /* Wrap around */ | ||
520 | passed = ktime_add(( { | ||
521 | (ktime_t) { | ||
522 | .tv64 = KTIME_MAX - | ||
523 | (apic->timer.last_update).tv64}; } | ||
524 | ), now); | ||
525 | apic_debug("time elapsed\n"); | ||
526 | } else | ||
527 | passed = ktime_sub(now, apic->timer.last_update); | ||
528 | |||
529 | counter_passed = div64_64(ktime_to_ns(passed), | ||
530 | (APIC_BUS_CYCLE_NS * apic->timer.divide_count)); | ||
531 | |||
532 | if (counter_passed > tmcct) { | ||
533 | if (unlikely(!apic_lvtt_period(apic))) { | ||
534 | /* one-shot timers stick at 0 until reset */ | ||
535 | tmcct = 0; | ||
536 | } else { | ||
537 | /* | ||
538 | * periodic timers reset to APIC_TMICT when they | ||
539 | * hit 0. The while loop simulates this happening N | ||
540 | * times. (counter_passed %= tmcct) would also work, | ||
541 | * but might be slower or not work on 32-bit?? | ||
542 | */ | ||
543 | while (counter_passed > tmcct) | ||
544 | counter_passed -= tmcct; | ||
545 | tmcct -= counter_passed; | ||
546 | } | ||
547 | } else { | ||
548 | tmcct -= counter_passed; | ||
549 | } | ||
550 | |||
551 | return tmcct; | ||
552 | } | ||
553 | |||
554 | static void __report_tpr_access(struct kvm_lapic *apic, bool write) | ||
555 | { | ||
556 | struct kvm_vcpu *vcpu = apic->vcpu; | ||
557 | struct kvm_run *run = vcpu->run; | ||
558 | |||
559 | set_bit(KVM_REQ_REPORT_TPR_ACCESS, &vcpu->requests); | ||
560 | kvm_x86_ops->cache_regs(vcpu); | ||
561 | run->tpr_access.rip = vcpu->arch.rip; | ||
562 | run->tpr_access.is_write = write; | ||
563 | } | ||
564 | |||
565 | static inline void report_tpr_access(struct kvm_lapic *apic, bool write) | ||
566 | { | ||
567 | if (apic->vcpu->arch.tpr_access_reporting) | ||
568 | __report_tpr_access(apic, write); | ||
569 | } | ||
570 | |||
571 | static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset) | ||
572 | { | ||
573 | u32 val = 0; | ||
574 | |||
575 | if (offset >= LAPIC_MMIO_LENGTH) | ||
576 | return 0; | ||
577 | |||
578 | switch (offset) { | ||
579 | case APIC_ARBPRI: | ||
580 | printk(KERN_WARNING "Access APIC ARBPRI register " | ||
581 | "which is for P6\n"); | ||
582 | break; | ||
583 | |||
584 | case APIC_TMCCT: /* Timer CCR */ | ||
585 | val = apic_get_tmcct(apic); | ||
586 | break; | ||
587 | |||
588 | case APIC_TASKPRI: | ||
589 | report_tpr_access(apic, false); | ||
590 | /* fall thru */ | ||
591 | default: | ||
592 | apic_update_ppr(apic); | ||
593 | val = apic_get_reg(apic, offset); | ||
594 | break; | ||
595 | } | ||
596 | |||
597 | return val; | ||
598 | } | ||
599 | |||
600 | static void apic_mmio_read(struct kvm_io_device *this, | ||
601 | gpa_t address, int len, void *data) | ||
602 | { | ||
603 | struct kvm_lapic *apic = (struct kvm_lapic *)this->private; | ||
604 | unsigned int offset = address - apic->base_address; | ||
605 | unsigned char alignment = offset & 0xf; | ||
606 | u32 result; | ||
607 | |||
608 | if ((alignment + len) > 4) { | ||
609 | printk(KERN_ERR "KVM_APIC_READ: alignment error %lx %d", | ||
610 | (unsigned long)address, len); | ||
611 | return; | ||
612 | } | ||
613 | result = __apic_read(apic, offset & ~0xf); | ||
614 | |||
615 | switch (len) { | ||
616 | case 1: | ||
617 | case 2: | ||
618 | case 4: | ||
619 | memcpy(data, (char *)&result + alignment, len); | ||
620 | break; | ||
621 | default: | ||
622 | printk(KERN_ERR "Local APIC read with len = %x, " | ||
623 | "should be 1,2, or 4 instead\n", len); | ||
624 | break; | ||
625 | } | ||
626 | } | ||
627 | |||
628 | static void update_divide_count(struct kvm_lapic *apic) | ||
629 | { | ||
630 | u32 tmp1, tmp2, tdcr; | ||
631 | |||
632 | tdcr = apic_get_reg(apic, APIC_TDCR); | ||
633 | tmp1 = tdcr & 0xf; | ||
634 | tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1; | ||
635 | apic->timer.divide_count = 0x1 << (tmp2 & 0x7); | ||
636 | |||
637 | apic_debug("timer divide count is 0x%x\n", | ||
638 | apic->timer.divide_count); | ||
639 | } | ||
640 | |||
641 | static void start_apic_timer(struct kvm_lapic *apic) | ||
642 | { | ||
643 | ktime_t now = apic->timer.dev.base->get_time(); | ||
644 | |||
645 | apic->timer.last_update = now; | ||
646 | |||
647 | apic->timer.period = apic_get_reg(apic, APIC_TMICT) * | ||
648 | APIC_BUS_CYCLE_NS * apic->timer.divide_count; | ||
649 | atomic_set(&apic->timer.pending, 0); | ||
650 | hrtimer_start(&apic->timer.dev, | ||
651 | ktime_add_ns(now, apic->timer.period), | ||
652 | HRTIMER_MODE_ABS); | ||
653 | |||
654 | apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016" | ||
655 | PRIx64 ", " | ||
656 | "timer initial count 0x%x, period %lldns, " | ||
657 | "expire @ 0x%016" PRIx64 ".\n", __FUNCTION__, | ||
658 | APIC_BUS_CYCLE_NS, ktime_to_ns(now), | ||
659 | apic_get_reg(apic, APIC_TMICT), | ||
660 | apic->timer.period, | ||
661 | ktime_to_ns(ktime_add_ns(now, | ||
662 | apic->timer.period))); | ||
663 | } | ||
664 | |||
665 | static void apic_mmio_write(struct kvm_io_device *this, | ||
666 | gpa_t address, int len, const void *data) | ||
667 | { | ||
668 | struct kvm_lapic *apic = (struct kvm_lapic *)this->private; | ||
669 | unsigned int offset = address - apic->base_address; | ||
670 | unsigned char alignment = offset & 0xf; | ||
671 | u32 val; | ||
672 | |||
673 | /* | ||
674 | * APIC register must be aligned on 128-bits boundary. | ||
675 | * 32/64/128 bits registers must be accessed thru 32 bits. | ||
676 | * Refer SDM 8.4.1 | ||
677 | */ | ||
678 | if (len != 4 || alignment) { | ||
679 | if (printk_ratelimit()) | ||
680 | printk(KERN_ERR "apic write: bad size=%d %lx\n", | ||
681 | len, (long)address); | ||
682 | return; | ||
683 | } | ||
684 | |||
685 | val = *(u32 *) data; | ||
686 | |||
687 | /* too common printing */ | ||
688 | if (offset != APIC_EOI) | ||
689 | apic_debug("%s: offset 0x%x with length 0x%x, and value is " | ||
690 | "0x%x\n", __FUNCTION__, offset, len, val); | ||
691 | |||
692 | offset &= 0xff0; | ||
693 | |||
694 | switch (offset) { | ||
695 | case APIC_ID: /* Local APIC ID */ | ||
696 | apic_set_reg(apic, APIC_ID, val); | ||
697 | break; | ||
698 | |||
699 | case APIC_TASKPRI: | ||
700 | report_tpr_access(apic, true); | ||
701 | apic_set_tpr(apic, val & 0xff); | ||
702 | break; | ||
703 | |||
704 | case APIC_EOI: | ||
705 | apic_set_eoi(apic); | ||
706 | break; | ||
707 | |||
708 | case APIC_LDR: | ||
709 | apic_set_reg(apic, APIC_LDR, val & APIC_LDR_MASK); | ||
710 | break; | ||
711 | |||
712 | case APIC_DFR: | ||
713 | apic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF); | ||
714 | break; | ||
715 | |||
716 | case APIC_SPIV: | ||
717 | apic_set_reg(apic, APIC_SPIV, val & 0x3ff); | ||
718 | if (!(val & APIC_SPIV_APIC_ENABLED)) { | ||
719 | int i; | ||
720 | u32 lvt_val; | ||
721 | |||
722 | for (i = 0; i < APIC_LVT_NUM; i++) { | ||
723 | lvt_val = apic_get_reg(apic, | ||
724 | APIC_LVTT + 0x10 * i); | ||
725 | apic_set_reg(apic, APIC_LVTT + 0x10 * i, | ||
726 | lvt_val | APIC_LVT_MASKED); | ||
727 | } | ||
728 | atomic_set(&apic->timer.pending, 0); | ||
729 | |||
730 | } | ||
731 | break; | ||
732 | |||
733 | case APIC_ICR: | ||
734 | /* No delay here, so we always clear the pending bit */ | ||
735 | apic_set_reg(apic, APIC_ICR, val & ~(1 << 12)); | ||
736 | apic_send_ipi(apic); | ||
737 | break; | ||
738 | |||
739 | case APIC_ICR2: | ||
740 | apic_set_reg(apic, APIC_ICR2, val & 0xff000000); | ||
741 | break; | ||
742 | |||
743 | case APIC_LVTT: | ||
744 | case APIC_LVTTHMR: | ||
745 | case APIC_LVTPC: | ||
746 | case APIC_LVT0: | ||
747 | case APIC_LVT1: | ||
748 | case APIC_LVTERR: | ||
749 | /* TODO: Check vector */ | ||
750 | if (!apic_sw_enabled(apic)) | ||
751 | val |= APIC_LVT_MASKED; | ||
752 | |||
753 | val &= apic_lvt_mask[(offset - APIC_LVTT) >> 4]; | ||
754 | apic_set_reg(apic, offset, val); | ||
755 | |||
756 | break; | ||
757 | |||
758 | case APIC_TMICT: | ||
759 | hrtimer_cancel(&apic->timer.dev); | ||
760 | apic_set_reg(apic, APIC_TMICT, val); | ||
761 | start_apic_timer(apic); | ||
762 | return; | ||
763 | |||
764 | case APIC_TDCR: | ||
765 | if (val & 4) | ||
766 | printk(KERN_ERR "KVM_WRITE:TDCR %x\n", val); | ||
767 | apic_set_reg(apic, APIC_TDCR, val); | ||
768 | update_divide_count(apic); | ||
769 | break; | ||
770 | |||
771 | default: | ||
772 | apic_debug("Local APIC Write to read-only register %x\n", | ||
773 | offset); | ||
774 | break; | ||
775 | } | ||
776 | |||
777 | } | ||
778 | |||
779 | static int apic_mmio_range(struct kvm_io_device *this, gpa_t addr) | ||
780 | { | ||
781 | struct kvm_lapic *apic = (struct kvm_lapic *)this->private; | ||
782 | int ret = 0; | ||
783 | |||
784 | |||
785 | if (apic_hw_enabled(apic) && | ||
786 | (addr >= apic->base_address) && | ||
787 | (addr < (apic->base_address + LAPIC_MMIO_LENGTH))) | ||
788 | ret = 1; | ||
789 | |||
790 | return ret; | ||
791 | } | ||
792 | |||
793 | void kvm_free_lapic(struct kvm_vcpu *vcpu) | ||
794 | { | ||
795 | if (!vcpu->arch.apic) | ||
796 | return; | ||
797 | |||
798 | hrtimer_cancel(&vcpu->arch.apic->timer.dev); | ||
799 | |||
800 | if (vcpu->arch.apic->regs_page) | ||
801 | __free_page(vcpu->arch.apic->regs_page); | ||
802 | |||
803 | kfree(vcpu->arch.apic); | ||
804 | } | ||
805 | |||
806 | /* | ||
807 | *---------------------------------------------------------------------- | ||
808 | * LAPIC interface | ||
809 | *---------------------------------------------------------------------- | ||
810 | */ | ||
811 | |||
812 | void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8) | ||
813 | { | ||
814 | struct kvm_lapic *apic = vcpu->arch.apic; | ||
815 | |||
816 | if (!apic) | ||
817 | return; | ||
818 | apic_set_tpr(apic, ((cr8 & 0x0f) << 4) | ||
819 | | (apic_get_reg(apic, APIC_TASKPRI) & 4)); | ||
820 | } | ||
821 | |||
822 | u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu) | ||
823 | { | ||
824 | struct kvm_lapic *apic = vcpu->arch.apic; | ||
825 | u64 tpr; | ||
826 | |||
827 | if (!apic) | ||
828 | return 0; | ||
829 | tpr = (u64) apic_get_reg(apic, APIC_TASKPRI); | ||
830 | |||
831 | return (tpr & 0xf0) >> 4; | ||
832 | } | ||
833 | EXPORT_SYMBOL_GPL(kvm_lapic_get_cr8); | ||
834 | |||
835 | void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) | ||
836 | { | ||
837 | struct kvm_lapic *apic = vcpu->arch.apic; | ||
838 | |||
839 | if (!apic) { | ||
840 | value |= MSR_IA32_APICBASE_BSP; | ||
841 | vcpu->arch.apic_base = value; | ||
842 | return; | ||
843 | } | ||
844 | if (apic->vcpu->vcpu_id) | ||
845 | value &= ~MSR_IA32_APICBASE_BSP; | ||
846 | |||
847 | vcpu->arch.apic_base = value; | ||
848 | apic->base_address = apic->vcpu->arch.apic_base & | ||
849 | MSR_IA32_APICBASE_BASE; | ||
850 | |||
851 | /* with FSB delivery interrupt, we can restart APIC functionality */ | ||
852 | apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is " | ||
853 | "0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address); | ||
854 | |||
855 | } | ||
856 | |||
857 | u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu) | ||
858 | { | ||
859 | return vcpu->arch.apic_base; | ||
860 | } | ||
861 | EXPORT_SYMBOL_GPL(kvm_lapic_get_base); | ||
862 | |||
863 | void kvm_lapic_reset(struct kvm_vcpu *vcpu) | ||
864 | { | ||
865 | struct kvm_lapic *apic; | ||
866 | int i; | ||
867 | |||
868 | apic_debug("%s\n", __FUNCTION__); | ||
869 | |||
870 | ASSERT(vcpu); | ||
871 | apic = vcpu->arch.apic; | ||
872 | ASSERT(apic != NULL); | ||
873 | |||
874 | /* Stop the timer in case it's a reset to an active apic */ | ||
875 | hrtimer_cancel(&apic->timer.dev); | ||
876 | |||
877 | apic_set_reg(apic, APIC_ID, vcpu->vcpu_id << 24); | ||
878 | apic_set_reg(apic, APIC_LVR, APIC_VERSION); | ||
879 | |||
880 | for (i = 0; i < APIC_LVT_NUM; i++) | ||
881 | apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); | ||
882 | apic_set_reg(apic, APIC_LVT0, | ||
883 | SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); | ||
884 | |||
885 | apic_set_reg(apic, APIC_DFR, 0xffffffffU); | ||
886 | apic_set_reg(apic, APIC_SPIV, 0xff); | ||
887 | apic_set_reg(apic, APIC_TASKPRI, 0); | ||
888 | apic_set_reg(apic, APIC_LDR, 0); | ||
889 | apic_set_reg(apic, APIC_ESR, 0); | ||
890 | apic_set_reg(apic, APIC_ICR, 0); | ||
891 | apic_set_reg(apic, APIC_ICR2, 0); | ||
892 | apic_set_reg(apic, APIC_TDCR, 0); | ||
893 | apic_set_reg(apic, APIC_TMICT, 0); | ||
894 | for (i = 0; i < 8; i++) { | ||
895 | apic_set_reg(apic, APIC_IRR + 0x10 * i, 0); | ||
896 | apic_set_reg(apic, APIC_ISR + 0x10 * i, 0); | ||
897 | apic_set_reg(apic, APIC_TMR + 0x10 * i, 0); | ||
898 | } | ||
899 | update_divide_count(apic); | ||
900 | atomic_set(&apic->timer.pending, 0); | ||
901 | if (vcpu->vcpu_id == 0) | ||
902 | vcpu->arch.apic_base |= MSR_IA32_APICBASE_BSP; | ||
903 | apic_update_ppr(apic); | ||
904 | |||
905 | apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr=" | ||
906 | "0x%016" PRIx64 ", base_address=0x%0lx.\n", __FUNCTION__, | ||
907 | vcpu, kvm_apic_id(apic), | ||
908 | vcpu->arch.apic_base, apic->base_address); | ||
909 | } | ||
910 | EXPORT_SYMBOL_GPL(kvm_lapic_reset); | ||
911 | |||
912 | int kvm_lapic_enabled(struct kvm_vcpu *vcpu) | ||
913 | { | ||
914 | struct kvm_lapic *apic = vcpu->arch.apic; | ||
915 | int ret = 0; | ||
916 | |||
917 | if (!apic) | ||
918 | return 0; | ||
919 | ret = apic_enabled(apic); | ||
920 | |||
921 | return ret; | ||
922 | } | ||
923 | EXPORT_SYMBOL_GPL(kvm_lapic_enabled); | ||
924 | |||
925 | /* | ||
926 | *---------------------------------------------------------------------- | ||
927 | * timer interface | ||
928 | *---------------------------------------------------------------------- | ||
929 | */ | ||
930 | |||
931 | /* TODO: make sure __apic_timer_fn runs in current pCPU */ | ||
932 | static int __apic_timer_fn(struct kvm_lapic *apic) | ||
933 | { | ||
934 | int result = 0; | ||
935 | wait_queue_head_t *q = &apic->vcpu->wq; | ||
936 | |||
937 | atomic_inc(&apic->timer.pending); | ||
938 | if (waitqueue_active(q)) { | ||
939 | apic->vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; | ||
940 | wake_up_interruptible(q); | ||
941 | } | ||
942 | if (apic_lvtt_period(apic)) { | ||
943 | result = 1; | ||
944 | apic->timer.dev.expires = ktime_add_ns( | ||
945 | apic->timer.dev.expires, | ||
946 | apic->timer.period); | ||
947 | } | ||
948 | return result; | ||
949 | } | ||
950 | |||
951 | static int __inject_apic_timer_irq(struct kvm_lapic *apic) | ||
952 | { | ||
953 | int vector; | ||
954 | |||
955 | vector = apic_lvt_vector(apic, APIC_LVTT); | ||
956 | return __apic_accept_irq(apic, APIC_DM_FIXED, vector, 1, 0); | ||
957 | } | ||
958 | |||
959 | static enum hrtimer_restart apic_timer_fn(struct hrtimer *data) | ||
960 | { | ||
961 | struct kvm_lapic *apic; | ||
962 | int restart_timer = 0; | ||
963 | |||
964 | apic = container_of(data, struct kvm_lapic, timer.dev); | ||
965 | |||
966 | restart_timer = __apic_timer_fn(apic); | ||
967 | |||
968 | if (restart_timer) | ||
969 | return HRTIMER_RESTART; | ||
970 | else | ||
971 | return HRTIMER_NORESTART; | ||
972 | } | ||
973 | |||
974 | int kvm_create_lapic(struct kvm_vcpu *vcpu) | ||
975 | { | ||
976 | struct kvm_lapic *apic; | ||
977 | |||
978 | ASSERT(vcpu != NULL); | ||
979 | apic_debug("apic_init %d\n", vcpu->vcpu_id); | ||
980 | |||
981 | apic = kzalloc(sizeof(*apic), GFP_KERNEL); | ||
982 | if (!apic) | ||
983 | goto nomem; | ||
984 | |||
985 | vcpu->arch.apic = apic; | ||
986 | |||
987 | apic->regs_page = alloc_page(GFP_KERNEL); | ||
988 | if (apic->regs_page == NULL) { | ||
989 | printk(KERN_ERR "malloc apic regs error for vcpu %x\n", | ||
990 | vcpu->vcpu_id); | ||
991 | goto nomem_free_apic; | ||
992 | } | ||
993 | apic->regs = page_address(apic->regs_page); | ||
994 | memset(apic->regs, 0, PAGE_SIZE); | ||
995 | apic->vcpu = vcpu; | ||
996 | |||
997 | hrtimer_init(&apic->timer.dev, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
998 | apic->timer.dev.function = apic_timer_fn; | ||
999 | apic->base_address = APIC_DEFAULT_PHYS_BASE; | ||
1000 | vcpu->arch.apic_base = APIC_DEFAULT_PHYS_BASE; | ||
1001 | |||
1002 | kvm_lapic_reset(vcpu); | ||
1003 | apic->dev.read = apic_mmio_read; | ||
1004 | apic->dev.write = apic_mmio_write; | ||
1005 | apic->dev.in_range = apic_mmio_range; | ||
1006 | apic->dev.private = apic; | ||
1007 | |||
1008 | return 0; | ||
1009 | nomem_free_apic: | ||
1010 | kfree(apic); | ||
1011 | nomem: | ||
1012 | return -ENOMEM; | ||
1013 | } | ||
1014 | EXPORT_SYMBOL_GPL(kvm_create_lapic); | ||
1015 | |||
1016 | int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu) | ||
1017 | { | ||
1018 | struct kvm_lapic *apic = vcpu->arch.apic; | ||
1019 | int highest_irr; | ||
1020 | |||
1021 | if (!apic || !apic_enabled(apic)) | ||
1022 | return -1; | ||
1023 | |||
1024 | apic_update_ppr(apic); | ||
1025 | highest_irr = apic_find_highest_irr(apic); | ||
1026 | if ((highest_irr == -1) || | ||
1027 | ((highest_irr & 0xF0) <= apic_get_reg(apic, APIC_PROCPRI))) | ||
1028 | return -1; | ||
1029 | return highest_irr; | ||
1030 | } | ||
1031 | |||
1032 | int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu) | ||
1033 | { | ||
1034 | u32 lvt0 = apic_get_reg(vcpu->arch.apic, APIC_LVT0); | ||
1035 | int r = 0; | ||
1036 | |||
1037 | if (vcpu->vcpu_id == 0) { | ||
1038 | if (!apic_hw_enabled(vcpu->arch.apic)) | ||
1039 | r = 1; | ||
1040 | if ((lvt0 & APIC_LVT_MASKED) == 0 && | ||
1041 | GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT) | ||
1042 | r = 1; | ||
1043 | } | ||
1044 | return r; | ||
1045 | } | ||
1046 | |||
1047 | void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu) | ||
1048 | { | ||
1049 | struct kvm_lapic *apic = vcpu->arch.apic; | ||
1050 | |||
1051 | if (apic && apic_lvt_enabled(apic, APIC_LVTT) && | ||
1052 | atomic_read(&apic->timer.pending) > 0) { | ||
1053 | if (__inject_apic_timer_irq(apic)) | ||
1054 | atomic_dec(&apic->timer.pending); | ||
1055 | } | ||
1056 | } | ||
1057 | |||
1058 | void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec) | ||
1059 | { | ||
1060 | struct kvm_lapic *apic = vcpu->arch.apic; | ||
1061 | |||
1062 | if (apic && apic_lvt_vector(apic, APIC_LVTT) == vec) | ||
1063 | apic->timer.last_update = ktime_add_ns( | ||
1064 | apic->timer.last_update, | ||
1065 | apic->timer.period); | ||
1066 | } | ||
1067 | |||
1068 | int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu) | ||
1069 | { | ||
1070 | int vector = kvm_apic_has_interrupt(vcpu); | ||
1071 | struct kvm_lapic *apic = vcpu->arch.apic; | ||
1072 | |||
1073 | if (vector == -1) | ||
1074 | return -1; | ||
1075 | |||
1076 | apic_set_vector(vector, apic->regs + APIC_ISR); | ||
1077 | apic_update_ppr(apic); | ||
1078 | apic_clear_irr(vector, apic); | ||
1079 | return vector; | ||
1080 | } | ||
1081 | |||
1082 | void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu) | ||
1083 | { | ||
1084 | struct kvm_lapic *apic = vcpu->arch.apic; | ||
1085 | |||
1086 | apic->base_address = vcpu->arch.apic_base & | ||
1087 | MSR_IA32_APICBASE_BASE; | ||
1088 | apic_set_reg(apic, APIC_LVR, APIC_VERSION); | ||
1089 | apic_update_ppr(apic); | ||
1090 | hrtimer_cancel(&apic->timer.dev); | ||
1091 | update_divide_count(apic); | ||
1092 | start_apic_timer(apic); | ||
1093 | } | ||
1094 | |||
1095 | void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) | ||
1096 | { | ||
1097 | struct kvm_lapic *apic = vcpu->arch.apic; | ||
1098 | struct hrtimer *timer; | ||
1099 | |||
1100 | if (!apic) | ||
1101 | return; | ||
1102 | |||
1103 | timer = &apic->timer.dev; | ||
1104 | if (hrtimer_cancel(timer)) | ||
1105 | hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS); | ||
1106 | } | ||
1107 | |||
1108 | void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) | ||
1109 | { | ||
1110 | u32 data; | ||
1111 | void *vapic; | ||
1112 | |||
1113 | if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr) | ||
1114 | return; | ||
1115 | |||
1116 | vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0); | ||
1117 | data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)); | ||
1118 | kunmap_atomic(vapic, KM_USER0); | ||
1119 | |||
1120 | apic_set_tpr(vcpu->arch.apic, data & 0xff); | ||
1121 | } | ||
1122 | |||
1123 | void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu) | ||
1124 | { | ||
1125 | u32 data, tpr; | ||
1126 | int max_irr, max_isr; | ||
1127 | struct kvm_lapic *apic; | ||
1128 | void *vapic; | ||
1129 | |||
1130 | if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr) | ||
1131 | return; | ||
1132 | |||
1133 | apic = vcpu->arch.apic; | ||
1134 | tpr = apic_get_reg(apic, APIC_TASKPRI) & 0xff; | ||
1135 | max_irr = apic_find_highest_irr(apic); | ||
1136 | if (max_irr < 0) | ||
1137 | max_irr = 0; | ||
1138 | max_isr = apic_find_highest_isr(apic); | ||
1139 | if (max_isr < 0) | ||
1140 | max_isr = 0; | ||
1141 | data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24); | ||
1142 | |||
1143 | vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0); | ||
1144 | *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data; | ||
1145 | kunmap_atomic(vapic, KM_USER0); | ||
1146 | } | ||
1147 | |||
1148 | void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) | ||
1149 | { | ||
1150 | if (!irqchip_in_kernel(vcpu->kvm)) | ||
1151 | return; | ||
1152 | |||
1153 | vcpu->arch.apic->vapic_addr = vapic_addr; | ||
1154 | } | ||