diff options
Diffstat (limited to 'drivers/kvm/vmx.c')
-rw-r--r-- | drivers/kvm/vmx.c | 2673 |
1 files changed, 0 insertions, 2673 deletions
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c deleted file mode 100644 index 11ca2340d38f..000000000000 --- a/drivers/kvm/vmx.c +++ /dev/null | |||
@@ -1,2673 +0,0 @@ | |||
1 | /* | ||
2 | * Kernel-based Virtual Machine driver for Linux | ||
3 | * | ||
4 | * This module enables machines with Intel VT-x extensions to run virtual | ||
5 | * machines without emulation or binary translation. | ||
6 | * | ||
7 | * Copyright (C) 2006 Qumranet, Inc. | ||
8 | * | ||
9 | * Authors: | ||
10 | * Avi Kivity <avi@qumranet.com> | ||
11 | * Yaniv Kamay <yaniv@qumranet.com> | ||
12 | * | ||
13 | * This work is licensed under the terms of the GNU GPL, version 2. See | ||
14 | * the COPYING file in the top-level directory. | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #include "kvm.h" | ||
19 | #include "x86.h" | ||
20 | #include "x86_emulate.h" | ||
21 | #include "irq.h" | ||
22 | #include "vmx.h" | ||
23 | #include "segment_descriptor.h" | ||
24 | #include "mmu.h" | ||
25 | |||
26 | #include <linux/module.h> | ||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/mm.h> | ||
29 | #include <linux/highmem.h> | ||
30 | #include <linux/sched.h> | ||
31 | #include <linux/moduleparam.h> | ||
32 | |||
33 | #include <asm/io.h> | ||
34 | #include <asm/desc.h> | ||
35 | |||
36 | MODULE_AUTHOR("Qumranet"); | ||
37 | MODULE_LICENSE("GPL"); | ||
38 | |||
39 | static int bypass_guest_pf = 1; | ||
40 | module_param(bypass_guest_pf, bool, 0); | ||
41 | |||
42 | struct vmcs { | ||
43 | u32 revision_id; | ||
44 | u32 abort; | ||
45 | char data[0]; | ||
46 | }; | ||
47 | |||
48 | struct vcpu_vmx { | ||
49 | struct kvm_vcpu vcpu; | ||
50 | int launched; | ||
51 | u8 fail; | ||
52 | u32 idt_vectoring_info; | ||
53 | struct kvm_msr_entry *guest_msrs; | ||
54 | struct kvm_msr_entry *host_msrs; | ||
55 | int nmsrs; | ||
56 | int save_nmsrs; | ||
57 | int msr_offset_efer; | ||
58 | #ifdef CONFIG_X86_64 | ||
59 | int msr_offset_kernel_gs_base; | ||
60 | #endif | ||
61 | struct vmcs *vmcs; | ||
62 | struct { | ||
63 | int loaded; | ||
64 | u16 fs_sel, gs_sel, ldt_sel; | ||
65 | int gs_ldt_reload_needed; | ||
66 | int fs_reload_needed; | ||
67 | int guest_efer_loaded; | ||
68 | } host_state; | ||
69 | struct { | ||
70 | struct { | ||
71 | bool pending; | ||
72 | u8 vector; | ||
73 | unsigned rip; | ||
74 | } irq; | ||
75 | } rmode; | ||
76 | }; | ||
77 | |||
78 | static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) | ||
79 | { | ||
80 | return container_of(vcpu, struct vcpu_vmx, vcpu); | ||
81 | } | ||
82 | |||
83 | static int init_rmode_tss(struct kvm *kvm); | ||
84 | |||
85 | static DEFINE_PER_CPU(struct vmcs *, vmxarea); | ||
86 | static DEFINE_PER_CPU(struct vmcs *, current_vmcs); | ||
87 | |||
88 | static struct page *vmx_io_bitmap_a; | ||
89 | static struct page *vmx_io_bitmap_b; | ||
90 | |||
91 | static struct vmcs_config { | ||
92 | int size; | ||
93 | int order; | ||
94 | u32 revision_id; | ||
95 | u32 pin_based_exec_ctrl; | ||
96 | u32 cpu_based_exec_ctrl; | ||
97 | u32 cpu_based_2nd_exec_ctrl; | ||
98 | u32 vmexit_ctrl; | ||
99 | u32 vmentry_ctrl; | ||
100 | } vmcs_config; | ||
101 | |||
102 | #define VMX_SEGMENT_FIELD(seg) \ | ||
103 | [VCPU_SREG_##seg] = { \ | ||
104 | .selector = GUEST_##seg##_SELECTOR, \ | ||
105 | .base = GUEST_##seg##_BASE, \ | ||
106 | .limit = GUEST_##seg##_LIMIT, \ | ||
107 | .ar_bytes = GUEST_##seg##_AR_BYTES, \ | ||
108 | } | ||
109 | |||
110 | static struct kvm_vmx_segment_field { | ||
111 | unsigned selector; | ||
112 | unsigned base; | ||
113 | unsigned limit; | ||
114 | unsigned ar_bytes; | ||
115 | } kvm_vmx_segment_fields[] = { | ||
116 | VMX_SEGMENT_FIELD(CS), | ||
117 | VMX_SEGMENT_FIELD(DS), | ||
118 | VMX_SEGMENT_FIELD(ES), | ||
119 | VMX_SEGMENT_FIELD(FS), | ||
120 | VMX_SEGMENT_FIELD(GS), | ||
121 | VMX_SEGMENT_FIELD(SS), | ||
122 | VMX_SEGMENT_FIELD(TR), | ||
123 | VMX_SEGMENT_FIELD(LDTR), | ||
124 | }; | ||
125 | |||
126 | /* | ||
127 | * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it | ||
128 | * away by decrementing the array size. | ||
129 | */ | ||
130 | static const u32 vmx_msr_index[] = { | ||
131 | #ifdef CONFIG_X86_64 | ||
132 | MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE, | ||
133 | #endif | ||
134 | MSR_EFER, MSR_K6_STAR, | ||
135 | }; | ||
136 | #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index) | ||
137 | |||
138 | static void load_msrs(struct kvm_msr_entry *e, int n) | ||
139 | { | ||
140 | int i; | ||
141 | |||
142 | for (i = 0; i < n; ++i) | ||
143 | wrmsrl(e[i].index, e[i].data); | ||
144 | } | ||
145 | |||
146 | static void save_msrs(struct kvm_msr_entry *e, int n) | ||
147 | { | ||
148 | int i; | ||
149 | |||
150 | for (i = 0; i < n; ++i) | ||
151 | rdmsrl(e[i].index, e[i].data); | ||
152 | } | ||
153 | |||
154 | static inline int is_page_fault(u32 intr_info) | ||
155 | { | ||
156 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | | ||
157 | INTR_INFO_VALID_MASK)) == | ||
158 | (INTR_TYPE_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK); | ||
159 | } | ||
160 | |||
161 | static inline int is_no_device(u32 intr_info) | ||
162 | { | ||
163 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | | ||
164 | INTR_INFO_VALID_MASK)) == | ||
165 | (INTR_TYPE_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK); | ||
166 | } | ||
167 | |||
168 | static inline int is_invalid_opcode(u32 intr_info) | ||
169 | { | ||
170 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | | ||
171 | INTR_INFO_VALID_MASK)) == | ||
172 | (INTR_TYPE_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK); | ||
173 | } | ||
174 | |||
175 | static inline int is_external_interrupt(u32 intr_info) | ||
176 | { | ||
177 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) | ||
178 | == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); | ||
179 | } | ||
180 | |||
181 | static inline int cpu_has_vmx_tpr_shadow(void) | ||
182 | { | ||
183 | return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW); | ||
184 | } | ||
185 | |||
186 | static inline int vm_need_tpr_shadow(struct kvm *kvm) | ||
187 | { | ||
188 | return ((cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm))); | ||
189 | } | ||
190 | |||
191 | static inline int cpu_has_secondary_exec_ctrls(void) | ||
192 | { | ||
193 | return (vmcs_config.cpu_based_exec_ctrl & | ||
194 | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS); | ||
195 | } | ||
196 | |||
197 | static inline int cpu_has_vmx_virtualize_apic_accesses(void) | ||
198 | { | ||
199 | return (vmcs_config.cpu_based_2nd_exec_ctrl & | ||
200 | SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES); | ||
201 | } | ||
202 | |||
203 | static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm) | ||
204 | { | ||
205 | return ((cpu_has_vmx_virtualize_apic_accesses()) && | ||
206 | (irqchip_in_kernel(kvm))); | ||
207 | } | ||
208 | |||
209 | static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) | ||
210 | { | ||
211 | int i; | ||
212 | |||
213 | for (i = 0; i < vmx->nmsrs; ++i) | ||
214 | if (vmx->guest_msrs[i].index == msr) | ||
215 | return i; | ||
216 | return -1; | ||
217 | } | ||
218 | |||
219 | static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) | ||
220 | { | ||
221 | int i; | ||
222 | |||
223 | i = __find_msr_index(vmx, msr); | ||
224 | if (i >= 0) | ||
225 | return &vmx->guest_msrs[i]; | ||
226 | return NULL; | ||
227 | } | ||
228 | |||
229 | static void vmcs_clear(struct vmcs *vmcs) | ||
230 | { | ||
231 | u64 phys_addr = __pa(vmcs); | ||
232 | u8 error; | ||
233 | |||
234 | asm volatile (ASM_VMX_VMCLEAR_RAX "; setna %0" | ||
235 | : "=g"(error) : "a"(&phys_addr), "m"(phys_addr) | ||
236 | : "cc", "memory"); | ||
237 | if (error) | ||
238 | printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n", | ||
239 | vmcs, phys_addr); | ||
240 | } | ||
241 | |||
242 | static void __vcpu_clear(void *arg) | ||
243 | { | ||
244 | struct vcpu_vmx *vmx = arg; | ||
245 | int cpu = raw_smp_processor_id(); | ||
246 | |||
247 | if (vmx->vcpu.cpu == cpu) | ||
248 | vmcs_clear(vmx->vmcs); | ||
249 | if (per_cpu(current_vmcs, cpu) == vmx->vmcs) | ||
250 | per_cpu(current_vmcs, cpu) = NULL; | ||
251 | rdtscll(vmx->vcpu.arch.host_tsc); | ||
252 | } | ||
253 | |||
254 | static void vcpu_clear(struct vcpu_vmx *vmx) | ||
255 | { | ||
256 | if (vmx->vcpu.cpu == -1) | ||
257 | return; | ||
258 | smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 0, 1); | ||
259 | vmx->launched = 0; | ||
260 | } | ||
261 | |||
262 | static unsigned long vmcs_readl(unsigned long field) | ||
263 | { | ||
264 | unsigned long value; | ||
265 | |||
266 | asm volatile (ASM_VMX_VMREAD_RDX_RAX | ||
267 | : "=a"(value) : "d"(field) : "cc"); | ||
268 | return value; | ||
269 | } | ||
270 | |||
271 | static u16 vmcs_read16(unsigned long field) | ||
272 | { | ||
273 | return vmcs_readl(field); | ||
274 | } | ||
275 | |||
276 | static u32 vmcs_read32(unsigned long field) | ||
277 | { | ||
278 | return vmcs_readl(field); | ||
279 | } | ||
280 | |||
281 | static u64 vmcs_read64(unsigned long field) | ||
282 | { | ||
283 | #ifdef CONFIG_X86_64 | ||
284 | return vmcs_readl(field); | ||
285 | #else | ||
286 | return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32); | ||
287 | #endif | ||
288 | } | ||
289 | |||
290 | static noinline void vmwrite_error(unsigned long field, unsigned long value) | ||
291 | { | ||
292 | printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n", | ||
293 | field, value, vmcs_read32(VM_INSTRUCTION_ERROR)); | ||
294 | dump_stack(); | ||
295 | } | ||
296 | |||
297 | static void vmcs_writel(unsigned long field, unsigned long value) | ||
298 | { | ||
299 | u8 error; | ||
300 | |||
301 | asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0" | ||
302 | : "=q"(error) : "a"(value), "d"(field) : "cc"); | ||
303 | if (unlikely(error)) | ||
304 | vmwrite_error(field, value); | ||
305 | } | ||
306 | |||
307 | static void vmcs_write16(unsigned long field, u16 value) | ||
308 | { | ||
309 | vmcs_writel(field, value); | ||
310 | } | ||
311 | |||
312 | static void vmcs_write32(unsigned long field, u32 value) | ||
313 | { | ||
314 | vmcs_writel(field, value); | ||
315 | } | ||
316 | |||
317 | static void vmcs_write64(unsigned long field, u64 value) | ||
318 | { | ||
319 | #ifdef CONFIG_X86_64 | ||
320 | vmcs_writel(field, value); | ||
321 | #else | ||
322 | vmcs_writel(field, value); | ||
323 | asm volatile (""); | ||
324 | vmcs_writel(field+1, value >> 32); | ||
325 | #endif | ||
326 | } | ||
327 | |||
328 | static void vmcs_clear_bits(unsigned long field, u32 mask) | ||
329 | { | ||
330 | vmcs_writel(field, vmcs_readl(field) & ~mask); | ||
331 | } | ||
332 | |||
333 | static void vmcs_set_bits(unsigned long field, u32 mask) | ||
334 | { | ||
335 | vmcs_writel(field, vmcs_readl(field) | mask); | ||
336 | } | ||
337 | |||
338 | static void update_exception_bitmap(struct kvm_vcpu *vcpu) | ||
339 | { | ||
340 | u32 eb; | ||
341 | |||
342 | eb = (1u << PF_VECTOR) | (1u << UD_VECTOR); | ||
343 | if (!vcpu->fpu_active) | ||
344 | eb |= 1u << NM_VECTOR; | ||
345 | if (vcpu->guest_debug.enabled) | ||
346 | eb |= 1u << 1; | ||
347 | if (vcpu->arch.rmode.active) | ||
348 | eb = ~0; | ||
349 | vmcs_write32(EXCEPTION_BITMAP, eb); | ||
350 | } | ||
351 | |||
352 | static void reload_tss(void) | ||
353 | { | ||
354 | #ifndef CONFIG_X86_64 | ||
355 | |||
356 | /* | ||
357 | * VT restores TR but not its size. Useless. | ||
358 | */ | ||
359 | struct descriptor_table gdt; | ||
360 | struct segment_descriptor *descs; | ||
361 | |||
362 | get_gdt(&gdt); | ||
363 | descs = (void *)gdt.base; | ||
364 | descs[GDT_ENTRY_TSS].type = 9; /* available TSS */ | ||
365 | load_TR_desc(); | ||
366 | #endif | ||
367 | } | ||
368 | |||
369 | static void load_transition_efer(struct vcpu_vmx *vmx) | ||
370 | { | ||
371 | int efer_offset = vmx->msr_offset_efer; | ||
372 | u64 host_efer = vmx->host_msrs[efer_offset].data; | ||
373 | u64 guest_efer = vmx->guest_msrs[efer_offset].data; | ||
374 | u64 ignore_bits; | ||
375 | |||
376 | if (efer_offset < 0) | ||
377 | return; | ||
378 | /* | ||
379 | * NX is emulated; LMA and LME handled by hardware; SCE meaninless | ||
380 | * outside long mode | ||
381 | */ | ||
382 | ignore_bits = EFER_NX | EFER_SCE; | ||
383 | #ifdef CONFIG_X86_64 | ||
384 | ignore_bits |= EFER_LMA | EFER_LME; | ||
385 | /* SCE is meaningful only in long mode on Intel */ | ||
386 | if (guest_efer & EFER_LMA) | ||
387 | ignore_bits &= ~(u64)EFER_SCE; | ||
388 | #endif | ||
389 | if ((guest_efer & ~ignore_bits) == (host_efer & ~ignore_bits)) | ||
390 | return; | ||
391 | |||
392 | vmx->host_state.guest_efer_loaded = 1; | ||
393 | guest_efer &= ~ignore_bits; | ||
394 | guest_efer |= host_efer & ignore_bits; | ||
395 | wrmsrl(MSR_EFER, guest_efer); | ||
396 | vmx->vcpu.stat.efer_reload++; | ||
397 | } | ||
398 | |||
399 | static void reload_host_efer(struct vcpu_vmx *vmx) | ||
400 | { | ||
401 | if (vmx->host_state.guest_efer_loaded) { | ||
402 | vmx->host_state.guest_efer_loaded = 0; | ||
403 | load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1); | ||
404 | } | ||
405 | } | ||
406 | |||
407 | static void vmx_save_host_state(struct kvm_vcpu *vcpu) | ||
408 | { | ||
409 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
410 | |||
411 | if (vmx->host_state.loaded) | ||
412 | return; | ||
413 | |||
414 | vmx->host_state.loaded = 1; | ||
415 | /* | ||
416 | * Set host fs and gs selectors. Unfortunately, 22.2.3 does not | ||
417 | * allow segment selectors with cpl > 0 or ti == 1. | ||
418 | */ | ||
419 | vmx->host_state.ldt_sel = read_ldt(); | ||
420 | vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; | ||
421 | vmx->host_state.fs_sel = read_fs(); | ||
422 | if (!(vmx->host_state.fs_sel & 7)) { | ||
423 | vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); | ||
424 | vmx->host_state.fs_reload_needed = 0; | ||
425 | } else { | ||
426 | vmcs_write16(HOST_FS_SELECTOR, 0); | ||
427 | vmx->host_state.fs_reload_needed = 1; | ||
428 | } | ||
429 | vmx->host_state.gs_sel = read_gs(); | ||
430 | if (!(vmx->host_state.gs_sel & 7)) | ||
431 | vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); | ||
432 | else { | ||
433 | vmcs_write16(HOST_GS_SELECTOR, 0); | ||
434 | vmx->host_state.gs_ldt_reload_needed = 1; | ||
435 | } | ||
436 | |||
437 | #ifdef CONFIG_X86_64 | ||
438 | vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE)); | ||
439 | vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE)); | ||
440 | #else | ||
441 | vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel)); | ||
442 | vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel)); | ||
443 | #endif | ||
444 | |||
445 | #ifdef CONFIG_X86_64 | ||
446 | if (is_long_mode(&vmx->vcpu)) | ||
447 | save_msrs(vmx->host_msrs + | ||
448 | vmx->msr_offset_kernel_gs_base, 1); | ||
449 | |||
450 | #endif | ||
451 | load_msrs(vmx->guest_msrs, vmx->save_nmsrs); | ||
452 | load_transition_efer(vmx); | ||
453 | } | ||
454 | |||
455 | static void vmx_load_host_state(struct vcpu_vmx *vmx) | ||
456 | { | ||
457 | unsigned long flags; | ||
458 | |||
459 | if (!vmx->host_state.loaded) | ||
460 | return; | ||
461 | |||
462 | ++vmx->vcpu.stat.host_state_reload; | ||
463 | vmx->host_state.loaded = 0; | ||
464 | if (vmx->host_state.fs_reload_needed) | ||
465 | load_fs(vmx->host_state.fs_sel); | ||
466 | if (vmx->host_state.gs_ldt_reload_needed) { | ||
467 | load_ldt(vmx->host_state.ldt_sel); | ||
468 | /* | ||
469 | * If we have to reload gs, we must take care to | ||
470 | * preserve our gs base. | ||
471 | */ | ||
472 | local_irq_save(flags); | ||
473 | load_gs(vmx->host_state.gs_sel); | ||
474 | #ifdef CONFIG_X86_64 | ||
475 | wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); | ||
476 | #endif | ||
477 | local_irq_restore(flags); | ||
478 | } | ||
479 | reload_tss(); | ||
480 | save_msrs(vmx->guest_msrs, vmx->save_nmsrs); | ||
481 | load_msrs(vmx->host_msrs, vmx->save_nmsrs); | ||
482 | reload_host_efer(vmx); | ||
483 | } | ||
484 | |||
485 | /* | ||
486 | * Switches to specified vcpu, until a matching vcpu_put(), but assumes | ||
487 | * vcpu mutex is already taken. | ||
488 | */ | ||
489 | static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | ||
490 | { | ||
491 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
492 | u64 phys_addr = __pa(vmx->vmcs); | ||
493 | u64 tsc_this, delta; | ||
494 | |||
495 | if (vcpu->cpu != cpu) { | ||
496 | vcpu_clear(vmx); | ||
497 | kvm_migrate_apic_timer(vcpu); | ||
498 | } | ||
499 | |||
500 | if (per_cpu(current_vmcs, cpu) != vmx->vmcs) { | ||
501 | u8 error; | ||
502 | |||
503 | per_cpu(current_vmcs, cpu) = vmx->vmcs; | ||
504 | asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0" | ||
505 | : "=g"(error) : "a"(&phys_addr), "m"(phys_addr) | ||
506 | : "cc"); | ||
507 | if (error) | ||
508 | printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n", | ||
509 | vmx->vmcs, phys_addr); | ||
510 | } | ||
511 | |||
512 | if (vcpu->cpu != cpu) { | ||
513 | struct descriptor_table dt; | ||
514 | unsigned long sysenter_esp; | ||
515 | |||
516 | vcpu->cpu = cpu; | ||
517 | /* | ||
518 | * Linux uses per-cpu TSS and GDT, so set these when switching | ||
519 | * processors. | ||
520 | */ | ||
521 | vmcs_writel(HOST_TR_BASE, read_tr_base()); /* 22.2.4 */ | ||
522 | get_gdt(&dt); | ||
523 | vmcs_writel(HOST_GDTR_BASE, dt.base); /* 22.2.4 */ | ||
524 | |||
525 | rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); | ||
526 | vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ | ||
527 | |||
528 | /* | ||
529 | * Make sure the time stamp counter is monotonous. | ||
530 | */ | ||
531 | rdtscll(tsc_this); | ||
532 | delta = vcpu->arch.host_tsc - tsc_this; | ||
533 | vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta); | ||
534 | } | ||
535 | } | ||
536 | |||
537 | static void vmx_vcpu_put(struct kvm_vcpu *vcpu) | ||
538 | { | ||
539 | vmx_load_host_state(to_vmx(vcpu)); | ||
540 | } | ||
541 | |||
542 | static void vmx_fpu_activate(struct kvm_vcpu *vcpu) | ||
543 | { | ||
544 | if (vcpu->fpu_active) | ||
545 | return; | ||
546 | vcpu->fpu_active = 1; | ||
547 | vmcs_clear_bits(GUEST_CR0, X86_CR0_TS); | ||
548 | if (vcpu->arch.cr0 & X86_CR0_TS) | ||
549 | vmcs_set_bits(GUEST_CR0, X86_CR0_TS); | ||
550 | update_exception_bitmap(vcpu); | ||
551 | } | ||
552 | |||
553 | static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu) | ||
554 | { | ||
555 | if (!vcpu->fpu_active) | ||
556 | return; | ||
557 | vcpu->fpu_active = 0; | ||
558 | vmcs_set_bits(GUEST_CR0, X86_CR0_TS); | ||
559 | update_exception_bitmap(vcpu); | ||
560 | } | ||
561 | |||
562 | static void vmx_vcpu_decache(struct kvm_vcpu *vcpu) | ||
563 | { | ||
564 | vcpu_clear(to_vmx(vcpu)); | ||
565 | } | ||
566 | |||
567 | static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) | ||
568 | { | ||
569 | return vmcs_readl(GUEST_RFLAGS); | ||
570 | } | ||
571 | |||
572 | static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) | ||
573 | { | ||
574 | if (vcpu->arch.rmode.active) | ||
575 | rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; | ||
576 | vmcs_writel(GUEST_RFLAGS, rflags); | ||
577 | } | ||
578 | |||
579 | static void skip_emulated_instruction(struct kvm_vcpu *vcpu) | ||
580 | { | ||
581 | unsigned long rip; | ||
582 | u32 interruptibility; | ||
583 | |||
584 | rip = vmcs_readl(GUEST_RIP); | ||
585 | rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN); | ||
586 | vmcs_writel(GUEST_RIP, rip); | ||
587 | |||
588 | /* | ||
589 | * We emulated an instruction, so temporary interrupt blocking | ||
590 | * should be removed, if set. | ||
591 | */ | ||
592 | interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); | ||
593 | if (interruptibility & 3) | ||
594 | vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, | ||
595 | interruptibility & ~3); | ||
596 | vcpu->arch.interrupt_window_open = 1; | ||
597 | } | ||
598 | |||
599 | static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, | ||
600 | bool has_error_code, u32 error_code) | ||
601 | { | ||
602 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, | ||
603 | nr | INTR_TYPE_EXCEPTION | ||
604 | | (has_error_code ? INTR_INFO_DELIEVER_CODE_MASK : 0) | ||
605 | | INTR_INFO_VALID_MASK); | ||
606 | if (has_error_code) | ||
607 | vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); | ||
608 | } | ||
609 | |||
610 | static bool vmx_exception_injected(struct kvm_vcpu *vcpu) | ||
611 | { | ||
612 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
613 | |||
614 | return !(vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); | ||
615 | } | ||
616 | |||
617 | /* | ||
618 | * Swap MSR entry in host/guest MSR entry array. | ||
619 | */ | ||
620 | #ifdef CONFIG_X86_64 | ||
621 | static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) | ||
622 | { | ||
623 | struct kvm_msr_entry tmp; | ||
624 | |||
625 | tmp = vmx->guest_msrs[to]; | ||
626 | vmx->guest_msrs[to] = vmx->guest_msrs[from]; | ||
627 | vmx->guest_msrs[from] = tmp; | ||
628 | tmp = vmx->host_msrs[to]; | ||
629 | vmx->host_msrs[to] = vmx->host_msrs[from]; | ||
630 | vmx->host_msrs[from] = tmp; | ||
631 | } | ||
632 | #endif | ||
633 | |||
634 | /* | ||
635 | * Set up the vmcs to automatically save and restore system | ||
636 | * msrs. Don't touch the 64-bit msrs if the guest is in legacy | ||
637 | * mode, as fiddling with msrs is very expensive. | ||
638 | */ | ||
639 | static void setup_msrs(struct vcpu_vmx *vmx) | ||
640 | { | ||
641 | int save_nmsrs; | ||
642 | |||
643 | save_nmsrs = 0; | ||
644 | #ifdef CONFIG_X86_64 | ||
645 | if (is_long_mode(&vmx->vcpu)) { | ||
646 | int index; | ||
647 | |||
648 | index = __find_msr_index(vmx, MSR_SYSCALL_MASK); | ||
649 | if (index >= 0) | ||
650 | move_msr_up(vmx, index, save_nmsrs++); | ||
651 | index = __find_msr_index(vmx, MSR_LSTAR); | ||
652 | if (index >= 0) | ||
653 | move_msr_up(vmx, index, save_nmsrs++); | ||
654 | index = __find_msr_index(vmx, MSR_CSTAR); | ||
655 | if (index >= 0) | ||
656 | move_msr_up(vmx, index, save_nmsrs++); | ||
657 | index = __find_msr_index(vmx, MSR_KERNEL_GS_BASE); | ||
658 | if (index >= 0) | ||
659 | move_msr_up(vmx, index, save_nmsrs++); | ||
660 | /* | ||
661 | * MSR_K6_STAR is only needed on long mode guests, and only | ||
662 | * if efer.sce is enabled. | ||
663 | */ | ||
664 | index = __find_msr_index(vmx, MSR_K6_STAR); | ||
665 | if ((index >= 0) && (vmx->vcpu.arch.shadow_efer & EFER_SCE)) | ||
666 | move_msr_up(vmx, index, save_nmsrs++); | ||
667 | } | ||
668 | #endif | ||
669 | vmx->save_nmsrs = save_nmsrs; | ||
670 | |||
671 | #ifdef CONFIG_X86_64 | ||
672 | vmx->msr_offset_kernel_gs_base = | ||
673 | __find_msr_index(vmx, MSR_KERNEL_GS_BASE); | ||
674 | #endif | ||
675 | vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER); | ||
676 | } | ||
677 | |||
678 | /* | ||
679 | * reads and returns guest's timestamp counter "register" | ||
680 | * guest_tsc = host_tsc + tsc_offset -- 21.3 | ||
681 | */ | ||
682 | static u64 guest_read_tsc(void) | ||
683 | { | ||
684 | u64 host_tsc, tsc_offset; | ||
685 | |||
686 | rdtscll(host_tsc); | ||
687 | tsc_offset = vmcs_read64(TSC_OFFSET); | ||
688 | return host_tsc + tsc_offset; | ||
689 | } | ||
690 | |||
691 | /* | ||
692 | * writes 'guest_tsc' into guest's timestamp counter "register" | ||
693 | * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc | ||
694 | */ | ||
695 | static void guest_write_tsc(u64 guest_tsc) | ||
696 | { | ||
697 | u64 host_tsc; | ||
698 | |||
699 | rdtscll(host_tsc); | ||
700 | vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc); | ||
701 | } | ||
702 | |||
703 | /* | ||
704 | * Reads an msr value (of 'msr_index') into 'pdata'. | ||
705 | * Returns 0 on success, non-0 otherwise. | ||
706 | * Assumes vcpu_load() was already called. | ||
707 | */ | ||
708 | static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | ||
709 | { | ||
710 | u64 data; | ||
711 | struct kvm_msr_entry *msr; | ||
712 | |||
713 | if (!pdata) { | ||
714 | printk(KERN_ERR "BUG: get_msr called with NULL pdata\n"); | ||
715 | return -EINVAL; | ||
716 | } | ||
717 | |||
718 | switch (msr_index) { | ||
719 | #ifdef CONFIG_X86_64 | ||
720 | case MSR_FS_BASE: | ||
721 | data = vmcs_readl(GUEST_FS_BASE); | ||
722 | break; | ||
723 | case MSR_GS_BASE: | ||
724 | data = vmcs_readl(GUEST_GS_BASE); | ||
725 | break; | ||
726 | case MSR_EFER: | ||
727 | return kvm_get_msr_common(vcpu, msr_index, pdata); | ||
728 | #endif | ||
729 | case MSR_IA32_TIME_STAMP_COUNTER: | ||
730 | data = guest_read_tsc(); | ||
731 | break; | ||
732 | case MSR_IA32_SYSENTER_CS: | ||
733 | data = vmcs_read32(GUEST_SYSENTER_CS); | ||
734 | break; | ||
735 | case MSR_IA32_SYSENTER_EIP: | ||
736 | data = vmcs_readl(GUEST_SYSENTER_EIP); | ||
737 | break; | ||
738 | case MSR_IA32_SYSENTER_ESP: | ||
739 | data = vmcs_readl(GUEST_SYSENTER_ESP); | ||
740 | break; | ||
741 | default: | ||
742 | msr = find_msr_entry(to_vmx(vcpu), msr_index); | ||
743 | if (msr) { | ||
744 | data = msr->data; | ||
745 | break; | ||
746 | } | ||
747 | return kvm_get_msr_common(vcpu, msr_index, pdata); | ||
748 | } | ||
749 | |||
750 | *pdata = data; | ||
751 | return 0; | ||
752 | } | ||
753 | |||
754 | /* | ||
755 | * Writes msr value into into the appropriate "register". | ||
756 | * Returns 0 on success, non-0 otherwise. | ||
757 | * Assumes vcpu_load() was already called. | ||
758 | */ | ||
759 | static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | ||
760 | { | ||
761 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
762 | struct kvm_msr_entry *msr; | ||
763 | int ret = 0; | ||
764 | |||
765 | switch (msr_index) { | ||
766 | #ifdef CONFIG_X86_64 | ||
767 | case MSR_EFER: | ||
768 | ret = kvm_set_msr_common(vcpu, msr_index, data); | ||
769 | if (vmx->host_state.loaded) { | ||
770 | reload_host_efer(vmx); | ||
771 | load_transition_efer(vmx); | ||
772 | } | ||
773 | break; | ||
774 | case MSR_FS_BASE: | ||
775 | vmcs_writel(GUEST_FS_BASE, data); | ||
776 | break; | ||
777 | case MSR_GS_BASE: | ||
778 | vmcs_writel(GUEST_GS_BASE, data); | ||
779 | break; | ||
780 | #endif | ||
781 | case MSR_IA32_SYSENTER_CS: | ||
782 | vmcs_write32(GUEST_SYSENTER_CS, data); | ||
783 | break; | ||
784 | case MSR_IA32_SYSENTER_EIP: | ||
785 | vmcs_writel(GUEST_SYSENTER_EIP, data); | ||
786 | break; | ||
787 | case MSR_IA32_SYSENTER_ESP: | ||
788 | vmcs_writel(GUEST_SYSENTER_ESP, data); | ||
789 | break; | ||
790 | case MSR_IA32_TIME_STAMP_COUNTER: | ||
791 | guest_write_tsc(data); | ||
792 | break; | ||
793 | default: | ||
794 | msr = find_msr_entry(vmx, msr_index); | ||
795 | if (msr) { | ||
796 | msr->data = data; | ||
797 | if (vmx->host_state.loaded) | ||
798 | load_msrs(vmx->guest_msrs, vmx->save_nmsrs); | ||
799 | break; | ||
800 | } | ||
801 | ret = kvm_set_msr_common(vcpu, msr_index, data); | ||
802 | } | ||
803 | |||
804 | return ret; | ||
805 | } | ||
806 | |||
807 | /* | ||
808 | * Sync the rsp and rip registers into the vcpu structure. This allows | ||
809 | * registers to be accessed by indexing vcpu->arch.regs. | ||
810 | */ | ||
811 | static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu) | ||
812 | { | ||
813 | vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); | ||
814 | vcpu->arch.rip = vmcs_readl(GUEST_RIP); | ||
815 | } | ||
816 | |||
817 | /* | ||
818 | * Syncs rsp and rip back into the vmcs. Should be called after possible | ||
819 | * modification. | ||
820 | */ | ||
821 | static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu) | ||
822 | { | ||
823 | vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); | ||
824 | vmcs_writel(GUEST_RIP, vcpu->arch.rip); | ||
825 | } | ||
826 | |||
827 | static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) | ||
828 | { | ||
829 | unsigned long dr7 = 0x400; | ||
830 | int old_singlestep; | ||
831 | |||
832 | old_singlestep = vcpu->guest_debug.singlestep; | ||
833 | |||
834 | vcpu->guest_debug.enabled = dbg->enabled; | ||
835 | if (vcpu->guest_debug.enabled) { | ||
836 | int i; | ||
837 | |||
838 | dr7 |= 0x200; /* exact */ | ||
839 | for (i = 0; i < 4; ++i) { | ||
840 | if (!dbg->breakpoints[i].enabled) | ||
841 | continue; | ||
842 | vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address; | ||
843 | dr7 |= 2 << (i*2); /* global enable */ | ||
844 | dr7 |= 0 << (i*4+16); /* execution breakpoint */ | ||
845 | } | ||
846 | |||
847 | vcpu->guest_debug.singlestep = dbg->singlestep; | ||
848 | } else | ||
849 | vcpu->guest_debug.singlestep = 0; | ||
850 | |||
851 | if (old_singlestep && !vcpu->guest_debug.singlestep) { | ||
852 | unsigned long flags; | ||
853 | |||
854 | flags = vmcs_readl(GUEST_RFLAGS); | ||
855 | flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF); | ||
856 | vmcs_writel(GUEST_RFLAGS, flags); | ||
857 | } | ||
858 | |||
859 | update_exception_bitmap(vcpu); | ||
860 | vmcs_writel(GUEST_DR7, dr7); | ||
861 | |||
862 | return 0; | ||
863 | } | ||
864 | |||
865 | static int vmx_get_irq(struct kvm_vcpu *vcpu) | ||
866 | { | ||
867 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
868 | u32 idtv_info_field; | ||
869 | |||
870 | idtv_info_field = vmx->idt_vectoring_info; | ||
871 | if (idtv_info_field & INTR_INFO_VALID_MASK) { | ||
872 | if (is_external_interrupt(idtv_info_field)) | ||
873 | return idtv_info_field & VECTORING_INFO_VECTOR_MASK; | ||
874 | else | ||
875 | printk(KERN_DEBUG "pending exception: not handled yet\n"); | ||
876 | } | ||
877 | return -1; | ||
878 | } | ||
879 | |||
880 | static __init int cpu_has_kvm_support(void) | ||
881 | { | ||
882 | unsigned long ecx = cpuid_ecx(1); | ||
883 | return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */ | ||
884 | } | ||
885 | |||
886 | static __init int vmx_disabled_by_bios(void) | ||
887 | { | ||
888 | u64 msr; | ||
889 | |||
890 | rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); | ||
891 | return (msr & (MSR_IA32_FEATURE_CONTROL_LOCKED | | ||
892 | MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED)) | ||
893 | == MSR_IA32_FEATURE_CONTROL_LOCKED; | ||
894 | /* locked but not enabled */ | ||
895 | } | ||
896 | |||
897 | static void hardware_enable(void *garbage) | ||
898 | { | ||
899 | int cpu = raw_smp_processor_id(); | ||
900 | u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); | ||
901 | u64 old; | ||
902 | |||
903 | rdmsrl(MSR_IA32_FEATURE_CONTROL, old); | ||
904 | if ((old & (MSR_IA32_FEATURE_CONTROL_LOCKED | | ||
905 | MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED)) | ||
906 | != (MSR_IA32_FEATURE_CONTROL_LOCKED | | ||
907 | MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED)) | ||
908 | /* enable and lock */ | ||
909 | wrmsrl(MSR_IA32_FEATURE_CONTROL, old | | ||
910 | MSR_IA32_FEATURE_CONTROL_LOCKED | | ||
911 | MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED); | ||
912 | write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */ | ||
913 | asm volatile (ASM_VMX_VMXON_RAX : : "a"(&phys_addr), "m"(phys_addr) | ||
914 | : "memory", "cc"); | ||
915 | } | ||
916 | |||
917 | static void hardware_disable(void *garbage) | ||
918 | { | ||
919 | asm volatile (ASM_VMX_VMXOFF : : : "cc"); | ||
920 | } | ||
921 | |||
922 | static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, | ||
923 | u32 msr, u32 *result) | ||
924 | { | ||
925 | u32 vmx_msr_low, vmx_msr_high; | ||
926 | u32 ctl = ctl_min | ctl_opt; | ||
927 | |||
928 | rdmsr(msr, vmx_msr_low, vmx_msr_high); | ||
929 | |||
930 | ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */ | ||
931 | ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */ | ||
932 | |||
933 | /* Ensure minimum (required) set of control bits are supported. */ | ||
934 | if (ctl_min & ~ctl) | ||
935 | return -EIO; | ||
936 | |||
937 | *result = ctl; | ||
938 | return 0; | ||
939 | } | ||
940 | |||
941 | static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) | ||
942 | { | ||
943 | u32 vmx_msr_low, vmx_msr_high; | ||
944 | u32 min, opt; | ||
945 | u32 _pin_based_exec_control = 0; | ||
946 | u32 _cpu_based_exec_control = 0; | ||
947 | u32 _cpu_based_2nd_exec_control = 0; | ||
948 | u32 _vmexit_control = 0; | ||
949 | u32 _vmentry_control = 0; | ||
950 | |||
951 | min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING; | ||
952 | opt = 0; | ||
953 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS, | ||
954 | &_pin_based_exec_control) < 0) | ||
955 | return -EIO; | ||
956 | |||
957 | min = CPU_BASED_HLT_EXITING | | ||
958 | #ifdef CONFIG_X86_64 | ||
959 | CPU_BASED_CR8_LOAD_EXITING | | ||
960 | CPU_BASED_CR8_STORE_EXITING | | ||
961 | #endif | ||
962 | CPU_BASED_USE_IO_BITMAPS | | ||
963 | CPU_BASED_MOV_DR_EXITING | | ||
964 | CPU_BASED_USE_TSC_OFFSETING; | ||
965 | opt = CPU_BASED_TPR_SHADOW | | ||
966 | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; | ||
967 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, | ||
968 | &_cpu_based_exec_control) < 0) | ||
969 | return -EIO; | ||
970 | #ifdef CONFIG_X86_64 | ||
971 | if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) | ||
972 | _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING & | ||
973 | ~CPU_BASED_CR8_STORE_EXITING; | ||
974 | #endif | ||
975 | if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) { | ||
976 | min = 0; | ||
977 | opt = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | | ||
978 | SECONDARY_EXEC_WBINVD_EXITING; | ||
979 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS2, | ||
980 | &_cpu_based_2nd_exec_control) < 0) | ||
981 | return -EIO; | ||
982 | } | ||
983 | #ifndef CONFIG_X86_64 | ||
984 | if (!(_cpu_based_2nd_exec_control & | ||
985 | SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) | ||
986 | _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW; | ||
987 | #endif | ||
988 | |||
989 | min = 0; | ||
990 | #ifdef CONFIG_X86_64 | ||
991 | min |= VM_EXIT_HOST_ADDR_SPACE_SIZE; | ||
992 | #endif | ||
993 | opt = 0; | ||
994 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS, | ||
995 | &_vmexit_control) < 0) | ||
996 | return -EIO; | ||
997 | |||
998 | min = opt = 0; | ||
999 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS, | ||
1000 | &_vmentry_control) < 0) | ||
1001 | return -EIO; | ||
1002 | |||
1003 | rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high); | ||
1004 | |||
1005 | /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */ | ||
1006 | if ((vmx_msr_high & 0x1fff) > PAGE_SIZE) | ||
1007 | return -EIO; | ||
1008 | |||
1009 | #ifdef CONFIG_X86_64 | ||
1010 | /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */ | ||
1011 | if (vmx_msr_high & (1u<<16)) | ||
1012 | return -EIO; | ||
1013 | #endif | ||
1014 | |||
1015 | /* Require Write-Back (WB) memory type for VMCS accesses. */ | ||
1016 | if (((vmx_msr_high >> 18) & 15) != 6) | ||
1017 | return -EIO; | ||
1018 | |||
1019 | vmcs_conf->size = vmx_msr_high & 0x1fff; | ||
1020 | vmcs_conf->order = get_order(vmcs_config.size); | ||
1021 | vmcs_conf->revision_id = vmx_msr_low; | ||
1022 | |||
1023 | vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control; | ||
1024 | vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control; | ||
1025 | vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control; | ||
1026 | vmcs_conf->vmexit_ctrl = _vmexit_control; | ||
1027 | vmcs_conf->vmentry_ctrl = _vmentry_control; | ||
1028 | |||
1029 | return 0; | ||
1030 | } | ||
1031 | |||
1032 | static struct vmcs *alloc_vmcs_cpu(int cpu) | ||
1033 | { | ||
1034 | int node = cpu_to_node(cpu); | ||
1035 | struct page *pages; | ||
1036 | struct vmcs *vmcs; | ||
1037 | |||
1038 | pages = alloc_pages_node(node, GFP_KERNEL, vmcs_config.order); | ||
1039 | if (!pages) | ||
1040 | return NULL; | ||
1041 | vmcs = page_address(pages); | ||
1042 | memset(vmcs, 0, vmcs_config.size); | ||
1043 | vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */ | ||
1044 | return vmcs; | ||
1045 | } | ||
1046 | |||
1047 | static struct vmcs *alloc_vmcs(void) | ||
1048 | { | ||
1049 | return alloc_vmcs_cpu(raw_smp_processor_id()); | ||
1050 | } | ||
1051 | |||
1052 | static void free_vmcs(struct vmcs *vmcs) | ||
1053 | { | ||
1054 | free_pages((unsigned long)vmcs, vmcs_config.order); | ||
1055 | } | ||
1056 | |||
1057 | static void free_kvm_area(void) | ||
1058 | { | ||
1059 | int cpu; | ||
1060 | |||
1061 | for_each_online_cpu(cpu) | ||
1062 | free_vmcs(per_cpu(vmxarea, cpu)); | ||
1063 | } | ||
1064 | |||
1065 | static __init int alloc_kvm_area(void) | ||
1066 | { | ||
1067 | int cpu; | ||
1068 | |||
1069 | for_each_online_cpu(cpu) { | ||
1070 | struct vmcs *vmcs; | ||
1071 | |||
1072 | vmcs = alloc_vmcs_cpu(cpu); | ||
1073 | if (!vmcs) { | ||
1074 | free_kvm_area(); | ||
1075 | return -ENOMEM; | ||
1076 | } | ||
1077 | |||
1078 | per_cpu(vmxarea, cpu) = vmcs; | ||
1079 | } | ||
1080 | return 0; | ||
1081 | } | ||
1082 | |||
1083 | static __init int hardware_setup(void) | ||
1084 | { | ||
1085 | if (setup_vmcs_config(&vmcs_config) < 0) | ||
1086 | return -EIO; | ||
1087 | return alloc_kvm_area(); | ||
1088 | } | ||
1089 | |||
1090 | static __exit void hardware_unsetup(void) | ||
1091 | { | ||
1092 | free_kvm_area(); | ||
1093 | } | ||
1094 | |||
1095 | static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save) | ||
1096 | { | ||
1097 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; | ||
1098 | |||
1099 | if (vmcs_readl(sf->base) == save->base && (save->base & AR_S_MASK)) { | ||
1100 | vmcs_write16(sf->selector, save->selector); | ||
1101 | vmcs_writel(sf->base, save->base); | ||
1102 | vmcs_write32(sf->limit, save->limit); | ||
1103 | vmcs_write32(sf->ar_bytes, save->ar); | ||
1104 | } else { | ||
1105 | u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK) | ||
1106 | << AR_DPL_SHIFT; | ||
1107 | vmcs_write32(sf->ar_bytes, 0x93 | dpl); | ||
1108 | } | ||
1109 | } | ||
1110 | |||
1111 | static void enter_pmode(struct kvm_vcpu *vcpu) | ||
1112 | { | ||
1113 | unsigned long flags; | ||
1114 | |||
1115 | vcpu->arch.rmode.active = 0; | ||
1116 | |||
1117 | vmcs_writel(GUEST_TR_BASE, vcpu->arch.rmode.tr.base); | ||
1118 | vmcs_write32(GUEST_TR_LIMIT, vcpu->arch.rmode.tr.limit); | ||
1119 | vmcs_write32(GUEST_TR_AR_BYTES, vcpu->arch.rmode.tr.ar); | ||
1120 | |||
1121 | flags = vmcs_readl(GUEST_RFLAGS); | ||
1122 | flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM); | ||
1123 | flags |= (vcpu->arch.rmode.save_iopl << IOPL_SHIFT); | ||
1124 | vmcs_writel(GUEST_RFLAGS, flags); | ||
1125 | |||
1126 | vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | | ||
1127 | (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME)); | ||
1128 | |||
1129 | update_exception_bitmap(vcpu); | ||
1130 | |||
1131 | fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->arch.rmode.es); | ||
1132 | fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->arch.rmode.ds); | ||
1133 | fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->arch.rmode.gs); | ||
1134 | fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->arch.rmode.fs); | ||
1135 | |||
1136 | vmcs_write16(GUEST_SS_SELECTOR, 0); | ||
1137 | vmcs_write32(GUEST_SS_AR_BYTES, 0x93); | ||
1138 | |||
1139 | vmcs_write16(GUEST_CS_SELECTOR, | ||
1140 | vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK); | ||
1141 | vmcs_write32(GUEST_CS_AR_BYTES, 0x9b); | ||
1142 | } | ||
1143 | |||
1144 | static gva_t rmode_tss_base(struct kvm *kvm) | ||
1145 | { | ||
1146 | if (!kvm->arch.tss_addr) { | ||
1147 | gfn_t base_gfn = kvm->memslots[0].base_gfn + | ||
1148 | kvm->memslots[0].npages - 3; | ||
1149 | return base_gfn << PAGE_SHIFT; | ||
1150 | } | ||
1151 | return kvm->arch.tss_addr; | ||
1152 | } | ||
1153 | |||
1154 | static void fix_rmode_seg(int seg, struct kvm_save_segment *save) | ||
1155 | { | ||
1156 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; | ||
1157 | |||
1158 | save->selector = vmcs_read16(sf->selector); | ||
1159 | save->base = vmcs_readl(sf->base); | ||
1160 | save->limit = vmcs_read32(sf->limit); | ||
1161 | save->ar = vmcs_read32(sf->ar_bytes); | ||
1162 | vmcs_write16(sf->selector, save->base >> 4); | ||
1163 | vmcs_write32(sf->base, save->base & 0xfffff); | ||
1164 | vmcs_write32(sf->limit, 0xffff); | ||
1165 | vmcs_write32(sf->ar_bytes, 0xf3); | ||
1166 | } | ||
1167 | |||
1168 | static void enter_rmode(struct kvm_vcpu *vcpu) | ||
1169 | { | ||
1170 | unsigned long flags; | ||
1171 | |||
1172 | vcpu->arch.rmode.active = 1; | ||
1173 | |||
1174 | vcpu->arch.rmode.tr.base = vmcs_readl(GUEST_TR_BASE); | ||
1175 | vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm)); | ||
1176 | |||
1177 | vcpu->arch.rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT); | ||
1178 | vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1); | ||
1179 | |||
1180 | vcpu->arch.rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES); | ||
1181 | vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); | ||
1182 | |||
1183 | flags = vmcs_readl(GUEST_RFLAGS); | ||
1184 | vcpu->arch.rmode.save_iopl | ||
1185 | = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; | ||
1186 | |||
1187 | flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; | ||
1188 | |||
1189 | vmcs_writel(GUEST_RFLAGS, flags); | ||
1190 | vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME); | ||
1191 | update_exception_bitmap(vcpu); | ||
1192 | |||
1193 | vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4); | ||
1194 | vmcs_write32(GUEST_SS_LIMIT, 0xffff); | ||
1195 | vmcs_write32(GUEST_SS_AR_BYTES, 0xf3); | ||
1196 | |||
1197 | vmcs_write32(GUEST_CS_AR_BYTES, 0xf3); | ||
1198 | vmcs_write32(GUEST_CS_LIMIT, 0xffff); | ||
1199 | if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000) | ||
1200 | vmcs_writel(GUEST_CS_BASE, 0xf0000); | ||
1201 | vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4); | ||
1202 | |||
1203 | fix_rmode_seg(VCPU_SREG_ES, &vcpu->arch.rmode.es); | ||
1204 | fix_rmode_seg(VCPU_SREG_DS, &vcpu->arch.rmode.ds); | ||
1205 | fix_rmode_seg(VCPU_SREG_GS, &vcpu->arch.rmode.gs); | ||
1206 | fix_rmode_seg(VCPU_SREG_FS, &vcpu->arch.rmode.fs); | ||
1207 | |||
1208 | kvm_mmu_reset_context(vcpu); | ||
1209 | init_rmode_tss(vcpu->kvm); | ||
1210 | } | ||
1211 | |||
1212 | #ifdef CONFIG_X86_64 | ||
1213 | |||
1214 | static void enter_lmode(struct kvm_vcpu *vcpu) | ||
1215 | { | ||
1216 | u32 guest_tr_ar; | ||
1217 | |||
1218 | guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES); | ||
1219 | if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) { | ||
1220 | printk(KERN_DEBUG "%s: tss fixup for long mode. \n", | ||
1221 | __FUNCTION__); | ||
1222 | vmcs_write32(GUEST_TR_AR_BYTES, | ||
1223 | (guest_tr_ar & ~AR_TYPE_MASK) | ||
1224 | | AR_TYPE_BUSY_64_TSS); | ||
1225 | } | ||
1226 | |||
1227 | vcpu->arch.shadow_efer |= EFER_LMA; | ||
1228 | |||
1229 | find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME; | ||
1230 | vmcs_write32(VM_ENTRY_CONTROLS, | ||
1231 | vmcs_read32(VM_ENTRY_CONTROLS) | ||
1232 | | VM_ENTRY_IA32E_MODE); | ||
1233 | } | ||
1234 | |||
1235 | static void exit_lmode(struct kvm_vcpu *vcpu) | ||
1236 | { | ||
1237 | vcpu->arch.shadow_efer &= ~EFER_LMA; | ||
1238 | |||
1239 | vmcs_write32(VM_ENTRY_CONTROLS, | ||
1240 | vmcs_read32(VM_ENTRY_CONTROLS) | ||
1241 | & ~VM_ENTRY_IA32E_MODE); | ||
1242 | } | ||
1243 | |||
1244 | #endif | ||
1245 | |||
1246 | static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) | ||
1247 | { | ||
1248 | vcpu->arch.cr4 &= KVM_GUEST_CR4_MASK; | ||
1249 | vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK; | ||
1250 | } | ||
1251 | |||
1252 | static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | ||
1253 | { | ||
1254 | vmx_fpu_deactivate(vcpu); | ||
1255 | |||
1256 | if (vcpu->arch.rmode.active && (cr0 & X86_CR0_PE)) | ||
1257 | enter_pmode(vcpu); | ||
1258 | |||
1259 | if (!vcpu->arch.rmode.active && !(cr0 & X86_CR0_PE)) | ||
1260 | enter_rmode(vcpu); | ||
1261 | |||
1262 | #ifdef CONFIG_X86_64 | ||
1263 | if (vcpu->arch.shadow_efer & EFER_LME) { | ||
1264 | if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) | ||
1265 | enter_lmode(vcpu); | ||
1266 | if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) | ||
1267 | exit_lmode(vcpu); | ||
1268 | } | ||
1269 | #endif | ||
1270 | |||
1271 | vmcs_writel(CR0_READ_SHADOW, cr0); | ||
1272 | vmcs_writel(GUEST_CR0, | ||
1273 | (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON); | ||
1274 | vcpu->arch.cr0 = cr0; | ||
1275 | |||
1276 | if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE)) | ||
1277 | vmx_fpu_activate(vcpu); | ||
1278 | } | ||
1279 | |||
1280 | static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | ||
1281 | { | ||
1282 | vmcs_writel(GUEST_CR3, cr3); | ||
1283 | if (vcpu->arch.cr0 & X86_CR0_PE) | ||
1284 | vmx_fpu_deactivate(vcpu); | ||
1285 | } | ||
1286 | |||
1287 | static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | ||
1288 | { | ||
1289 | vmcs_writel(CR4_READ_SHADOW, cr4); | ||
1290 | vmcs_writel(GUEST_CR4, cr4 | (vcpu->arch.rmode.active ? | ||
1291 | KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON)); | ||
1292 | vcpu->arch.cr4 = cr4; | ||
1293 | } | ||
1294 | |||
1295 | #ifdef CONFIG_X86_64 | ||
1296 | |||
1297 | static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) | ||
1298 | { | ||
1299 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
1300 | struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); | ||
1301 | |||
1302 | vcpu->arch.shadow_efer = efer; | ||
1303 | if (efer & EFER_LMA) { | ||
1304 | vmcs_write32(VM_ENTRY_CONTROLS, | ||
1305 | vmcs_read32(VM_ENTRY_CONTROLS) | | ||
1306 | VM_ENTRY_IA32E_MODE); | ||
1307 | msr->data = efer; | ||
1308 | |||
1309 | } else { | ||
1310 | vmcs_write32(VM_ENTRY_CONTROLS, | ||
1311 | vmcs_read32(VM_ENTRY_CONTROLS) & | ||
1312 | ~VM_ENTRY_IA32E_MODE); | ||
1313 | |||
1314 | msr->data = efer & ~EFER_LME; | ||
1315 | } | ||
1316 | setup_msrs(vmx); | ||
1317 | } | ||
1318 | |||
1319 | #endif | ||
1320 | |||
1321 | static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) | ||
1322 | { | ||
1323 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; | ||
1324 | |||
1325 | return vmcs_readl(sf->base); | ||
1326 | } | ||
1327 | |||
1328 | static void vmx_get_segment(struct kvm_vcpu *vcpu, | ||
1329 | struct kvm_segment *var, int seg) | ||
1330 | { | ||
1331 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; | ||
1332 | u32 ar; | ||
1333 | |||
1334 | var->base = vmcs_readl(sf->base); | ||
1335 | var->limit = vmcs_read32(sf->limit); | ||
1336 | var->selector = vmcs_read16(sf->selector); | ||
1337 | ar = vmcs_read32(sf->ar_bytes); | ||
1338 | if (ar & AR_UNUSABLE_MASK) | ||
1339 | ar = 0; | ||
1340 | var->type = ar & 15; | ||
1341 | var->s = (ar >> 4) & 1; | ||
1342 | var->dpl = (ar >> 5) & 3; | ||
1343 | var->present = (ar >> 7) & 1; | ||
1344 | var->avl = (ar >> 12) & 1; | ||
1345 | var->l = (ar >> 13) & 1; | ||
1346 | var->db = (ar >> 14) & 1; | ||
1347 | var->g = (ar >> 15) & 1; | ||
1348 | var->unusable = (ar >> 16) & 1; | ||
1349 | } | ||
1350 | |||
1351 | static u32 vmx_segment_access_rights(struct kvm_segment *var) | ||
1352 | { | ||
1353 | u32 ar; | ||
1354 | |||
1355 | if (var->unusable) | ||
1356 | ar = 1 << 16; | ||
1357 | else { | ||
1358 | ar = var->type & 15; | ||
1359 | ar |= (var->s & 1) << 4; | ||
1360 | ar |= (var->dpl & 3) << 5; | ||
1361 | ar |= (var->present & 1) << 7; | ||
1362 | ar |= (var->avl & 1) << 12; | ||
1363 | ar |= (var->l & 1) << 13; | ||
1364 | ar |= (var->db & 1) << 14; | ||
1365 | ar |= (var->g & 1) << 15; | ||
1366 | } | ||
1367 | if (ar == 0) /* a 0 value means unusable */ | ||
1368 | ar = AR_UNUSABLE_MASK; | ||
1369 | |||
1370 | return ar; | ||
1371 | } | ||
1372 | |||
1373 | static void vmx_set_segment(struct kvm_vcpu *vcpu, | ||
1374 | struct kvm_segment *var, int seg) | ||
1375 | { | ||
1376 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; | ||
1377 | u32 ar; | ||
1378 | |||
1379 | if (vcpu->arch.rmode.active && seg == VCPU_SREG_TR) { | ||
1380 | vcpu->arch.rmode.tr.selector = var->selector; | ||
1381 | vcpu->arch.rmode.tr.base = var->base; | ||
1382 | vcpu->arch.rmode.tr.limit = var->limit; | ||
1383 | vcpu->arch.rmode.tr.ar = vmx_segment_access_rights(var); | ||
1384 | return; | ||
1385 | } | ||
1386 | vmcs_writel(sf->base, var->base); | ||
1387 | vmcs_write32(sf->limit, var->limit); | ||
1388 | vmcs_write16(sf->selector, var->selector); | ||
1389 | if (vcpu->arch.rmode.active && var->s) { | ||
1390 | /* | ||
1391 | * Hack real-mode segments into vm86 compatibility. | ||
1392 | */ | ||
1393 | if (var->base == 0xffff0000 && var->selector == 0xf000) | ||
1394 | vmcs_writel(sf->base, 0xf0000); | ||
1395 | ar = 0xf3; | ||
1396 | } else | ||
1397 | ar = vmx_segment_access_rights(var); | ||
1398 | vmcs_write32(sf->ar_bytes, ar); | ||
1399 | } | ||
1400 | |||
1401 | static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) | ||
1402 | { | ||
1403 | u32 ar = vmcs_read32(GUEST_CS_AR_BYTES); | ||
1404 | |||
1405 | *db = (ar >> 14) & 1; | ||
1406 | *l = (ar >> 13) & 1; | ||
1407 | } | ||
1408 | |||
1409 | static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | ||
1410 | { | ||
1411 | dt->limit = vmcs_read32(GUEST_IDTR_LIMIT); | ||
1412 | dt->base = vmcs_readl(GUEST_IDTR_BASE); | ||
1413 | } | ||
1414 | |||
1415 | static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | ||
1416 | { | ||
1417 | vmcs_write32(GUEST_IDTR_LIMIT, dt->limit); | ||
1418 | vmcs_writel(GUEST_IDTR_BASE, dt->base); | ||
1419 | } | ||
1420 | |||
1421 | static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | ||
1422 | { | ||
1423 | dt->limit = vmcs_read32(GUEST_GDTR_LIMIT); | ||
1424 | dt->base = vmcs_readl(GUEST_GDTR_BASE); | ||
1425 | } | ||
1426 | |||
1427 | static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | ||
1428 | { | ||
1429 | vmcs_write32(GUEST_GDTR_LIMIT, dt->limit); | ||
1430 | vmcs_writel(GUEST_GDTR_BASE, dt->base); | ||
1431 | } | ||
1432 | |||
1433 | static int init_rmode_tss(struct kvm *kvm) | ||
1434 | { | ||
1435 | gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT; | ||
1436 | u16 data = 0; | ||
1437 | int r; | ||
1438 | |||
1439 | r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); | ||
1440 | if (r < 0) | ||
1441 | return 0; | ||
1442 | data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE; | ||
1443 | r = kvm_write_guest_page(kvm, fn++, &data, 0x66, sizeof(u16)); | ||
1444 | if (r < 0) | ||
1445 | return 0; | ||
1446 | r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE); | ||
1447 | if (r < 0) | ||
1448 | return 0; | ||
1449 | r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); | ||
1450 | if (r < 0) | ||
1451 | return 0; | ||
1452 | data = ~0; | ||
1453 | r = kvm_write_guest_page(kvm, fn, &data, RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1, | ||
1454 | sizeof(u8)); | ||
1455 | if (r < 0) | ||
1456 | return 0; | ||
1457 | return 1; | ||
1458 | } | ||
1459 | |||
1460 | static void seg_setup(int seg) | ||
1461 | { | ||
1462 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; | ||
1463 | |||
1464 | vmcs_write16(sf->selector, 0); | ||
1465 | vmcs_writel(sf->base, 0); | ||
1466 | vmcs_write32(sf->limit, 0xffff); | ||
1467 | vmcs_write32(sf->ar_bytes, 0x93); | ||
1468 | } | ||
1469 | |||
1470 | static int alloc_apic_access_page(struct kvm *kvm) | ||
1471 | { | ||
1472 | struct kvm_userspace_memory_region kvm_userspace_mem; | ||
1473 | int r = 0; | ||
1474 | |||
1475 | mutex_lock(&kvm->lock); | ||
1476 | if (kvm->arch.apic_access_page) | ||
1477 | goto out; | ||
1478 | kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; | ||
1479 | kvm_userspace_mem.flags = 0; | ||
1480 | kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL; | ||
1481 | kvm_userspace_mem.memory_size = PAGE_SIZE; | ||
1482 | r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0); | ||
1483 | if (r) | ||
1484 | goto out; | ||
1485 | kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00); | ||
1486 | out: | ||
1487 | mutex_unlock(&kvm->lock); | ||
1488 | return r; | ||
1489 | } | ||
1490 | |||
1491 | /* | ||
1492 | * Sets up the vmcs for emulated real mode. | ||
1493 | */ | ||
1494 | static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | ||
1495 | { | ||
1496 | u32 host_sysenter_cs; | ||
1497 | u32 junk; | ||
1498 | unsigned long a; | ||
1499 | struct descriptor_table dt; | ||
1500 | int i; | ||
1501 | unsigned long kvm_vmx_return; | ||
1502 | u32 exec_control; | ||
1503 | |||
1504 | /* I/O */ | ||
1505 | vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a)); | ||
1506 | vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b)); | ||
1507 | |||
1508 | vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */ | ||
1509 | |||
1510 | /* Control */ | ||
1511 | vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, | ||
1512 | vmcs_config.pin_based_exec_ctrl); | ||
1513 | |||
1514 | exec_control = vmcs_config.cpu_based_exec_ctrl; | ||
1515 | if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) { | ||
1516 | exec_control &= ~CPU_BASED_TPR_SHADOW; | ||
1517 | #ifdef CONFIG_X86_64 | ||
1518 | exec_control |= CPU_BASED_CR8_STORE_EXITING | | ||
1519 | CPU_BASED_CR8_LOAD_EXITING; | ||
1520 | #endif | ||
1521 | } | ||
1522 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control); | ||
1523 | |||
1524 | if (cpu_has_secondary_exec_ctrls()) { | ||
1525 | exec_control = vmcs_config.cpu_based_2nd_exec_ctrl; | ||
1526 | if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) | ||
1527 | exec_control &= | ||
1528 | ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; | ||
1529 | vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); | ||
1530 | } | ||
1531 | |||
1532 | vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf); | ||
1533 | vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf); | ||
1534 | vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */ | ||
1535 | |||
1536 | vmcs_writel(HOST_CR0, read_cr0()); /* 22.2.3 */ | ||
1537 | vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */ | ||
1538 | vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */ | ||
1539 | |||
1540 | vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ | ||
1541 | vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ | ||
1542 | vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ | ||
1543 | vmcs_write16(HOST_FS_SELECTOR, read_fs()); /* 22.2.4 */ | ||
1544 | vmcs_write16(HOST_GS_SELECTOR, read_gs()); /* 22.2.4 */ | ||
1545 | vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ | ||
1546 | #ifdef CONFIG_X86_64 | ||
1547 | rdmsrl(MSR_FS_BASE, a); | ||
1548 | vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */ | ||
1549 | rdmsrl(MSR_GS_BASE, a); | ||
1550 | vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */ | ||
1551 | #else | ||
1552 | vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */ | ||
1553 | vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */ | ||
1554 | #endif | ||
1555 | |||
1556 | vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ | ||
1557 | |||
1558 | get_idt(&dt); | ||
1559 | vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */ | ||
1560 | |||
1561 | asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return)); | ||
1562 | vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */ | ||
1563 | vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); | ||
1564 | vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); | ||
1565 | vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); | ||
1566 | |||
1567 | rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk); | ||
1568 | vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs); | ||
1569 | rdmsrl(MSR_IA32_SYSENTER_ESP, a); | ||
1570 | vmcs_writel(HOST_IA32_SYSENTER_ESP, a); /* 22.2.3 */ | ||
1571 | rdmsrl(MSR_IA32_SYSENTER_EIP, a); | ||
1572 | vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */ | ||
1573 | |||
1574 | for (i = 0; i < NR_VMX_MSR; ++i) { | ||
1575 | u32 index = vmx_msr_index[i]; | ||
1576 | u32 data_low, data_high; | ||
1577 | u64 data; | ||
1578 | int j = vmx->nmsrs; | ||
1579 | |||
1580 | if (rdmsr_safe(index, &data_low, &data_high) < 0) | ||
1581 | continue; | ||
1582 | if (wrmsr_safe(index, data_low, data_high) < 0) | ||
1583 | continue; | ||
1584 | data = data_low | ((u64)data_high << 32); | ||
1585 | vmx->host_msrs[j].index = index; | ||
1586 | vmx->host_msrs[j].reserved = 0; | ||
1587 | vmx->host_msrs[j].data = data; | ||
1588 | vmx->guest_msrs[j] = vmx->host_msrs[j]; | ||
1589 | ++vmx->nmsrs; | ||
1590 | } | ||
1591 | |||
1592 | vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl); | ||
1593 | |||
1594 | /* 22.2.1, 20.8.1 */ | ||
1595 | vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl); | ||
1596 | |||
1597 | vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); | ||
1598 | vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK); | ||
1599 | |||
1600 | if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) | ||
1601 | if (alloc_apic_access_page(vmx->vcpu.kvm) != 0) | ||
1602 | return -ENOMEM; | ||
1603 | |||
1604 | return 0; | ||
1605 | } | ||
1606 | |||
1607 | static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) | ||
1608 | { | ||
1609 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
1610 | u64 msr; | ||
1611 | int ret; | ||
1612 | |||
1613 | if (!init_rmode_tss(vmx->vcpu.kvm)) { | ||
1614 | ret = -ENOMEM; | ||
1615 | goto out; | ||
1616 | } | ||
1617 | |||
1618 | vmx->vcpu.arch.rmode.active = 0; | ||
1619 | |||
1620 | vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); | ||
1621 | set_cr8(&vmx->vcpu, 0); | ||
1622 | msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; | ||
1623 | if (vmx->vcpu.vcpu_id == 0) | ||
1624 | msr |= MSR_IA32_APICBASE_BSP; | ||
1625 | kvm_set_apic_base(&vmx->vcpu, msr); | ||
1626 | |||
1627 | fx_init(&vmx->vcpu); | ||
1628 | |||
1629 | /* | ||
1630 | * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode | ||
1631 | * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh. | ||
1632 | */ | ||
1633 | if (vmx->vcpu.vcpu_id == 0) { | ||
1634 | vmcs_write16(GUEST_CS_SELECTOR, 0xf000); | ||
1635 | vmcs_writel(GUEST_CS_BASE, 0x000f0000); | ||
1636 | } else { | ||
1637 | vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.arch.sipi_vector << 8); | ||
1638 | vmcs_writel(GUEST_CS_BASE, vmx->vcpu.arch.sipi_vector << 12); | ||
1639 | } | ||
1640 | vmcs_write32(GUEST_CS_LIMIT, 0xffff); | ||
1641 | vmcs_write32(GUEST_CS_AR_BYTES, 0x9b); | ||
1642 | |||
1643 | seg_setup(VCPU_SREG_DS); | ||
1644 | seg_setup(VCPU_SREG_ES); | ||
1645 | seg_setup(VCPU_SREG_FS); | ||
1646 | seg_setup(VCPU_SREG_GS); | ||
1647 | seg_setup(VCPU_SREG_SS); | ||
1648 | |||
1649 | vmcs_write16(GUEST_TR_SELECTOR, 0); | ||
1650 | vmcs_writel(GUEST_TR_BASE, 0); | ||
1651 | vmcs_write32(GUEST_TR_LIMIT, 0xffff); | ||
1652 | vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); | ||
1653 | |||
1654 | vmcs_write16(GUEST_LDTR_SELECTOR, 0); | ||
1655 | vmcs_writel(GUEST_LDTR_BASE, 0); | ||
1656 | vmcs_write32(GUEST_LDTR_LIMIT, 0xffff); | ||
1657 | vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082); | ||
1658 | |||
1659 | vmcs_write32(GUEST_SYSENTER_CS, 0); | ||
1660 | vmcs_writel(GUEST_SYSENTER_ESP, 0); | ||
1661 | vmcs_writel(GUEST_SYSENTER_EIP, 0); | ||
1662 | |||
1663 | vmcs_writel(GUEST_RFLAGS, 0x02); | ||
1664 | if (vmx->vcpu.vcpu_id == 0) | ||
1665 | vmcs_writel(GUEST_RIP, 0xfff0); | ||
1666 | else | ||
1667 | vmcs_writel(GUEST_RIP, 0); | ||
1668 | vmcs_writel(GUEST_RSP, 0); | ||
1669 | |||
1670 | /* todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 */ | ||
1671 | vmcs_writel(GUEST_DR7, 0x400); | ||
1672 | |||
1673 | vmcs_writel(GUEST_GDTR_BASE, 0); | ||
1674 | vmcs_write32(GUEST_GDTR_LIMIT, 0xffff); | ||
1675 | |||
1676 | vmcs_writel(GUEST_IDTR_BASE, 0); | ||
1677 | vmcs_write32(GUEST_IDTR_LIMIT, 0xffff); | ||
1678 | |||
1679 | vmcs_write32(GUEST_ACTIVITY_STATE, 0); | ||
1680 | vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0); | ||
1681 | vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0); | ||
1682 | |||
1683 | guest_write_tsc(0); | ||
1684 | |||
1685 | /* Special registers */ | ||
1686 | vmcs_write64(GUEST_IA32_DEBUGCTL, 0); | ||
1687 | |||
1688 | setup_msrs(vmx); | ||
1689 | |||
1690 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */ | ||
1691 | |||
1692 | if (cpu_has_vmx_tpr_shadow()) { | ||
1693 | vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0); | ||
1694 | if (vm_need_tpr_shadow(vmx->vcpu.kvm)) | ||
1695 | vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, | ||
1696 | page_to_phys(vmx->vcpu.arch.apic->regs_page)); | ||
1697 | vmcs_write32(TPR_THRESHOLD, 0); | ||
1698 | } | ||
1699 | |||
1700 | if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) | ||
1701 | vmcs_write64(APIC_ACCESS_ADDR, | ||
1702 | page_to_phys(vmx->vcpu.kvm->arch.apic_access_page)); | ||
1703 | |||
1704 | vmx->vcpu.arch.cr0 = 0x60000010; | ||
1705 | vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */ | ||
1706 | vmx_set_cr4(&vmx->vcpu, 0); | ||
1707 | #ifdef CONFIG_X86_64 | ||
1708 | vmx_set_efer(&vmx->vcpu, 0); | ||
1709 | #endif | ||
1710 | vmx_fpu_activate(&vmx->vcpu); | ||
1711 | update_exception_bitmap(&vmx->vcpu); | ||
1712 | |||
1713 | return 0; | ||
1714 | |||
1715 | out: | ||
1716 | return ret; | ||
1717 | } | ||
1718 | |||
1719 | static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq) | ||
1720 | { | ||
1721 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
1722 | |||
1723 | if (vcpu->arch.rmode.active) { | ||
1724 | vmx->rmode.irq.pending = true; | ||
1725 | vmx->rmode.irq.vector = irq; | ||
1726 | vmx->rmode.irq.rip = vmcs_readl(GUEST_RIP); | ||
1727 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, | ||
1728 | irq | INTR_TYPE_SOFT_INTR | INTR_INFO_VALID_MASK); | ||
1729 | vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1); | ||
1730 | vmcs_writel(GUEST_RIP, vmx->rmode.irq.rip - 1); | ||
1731 | return; | ||
1732 | } | ||
1733 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, | ||
1734 | irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); | ||
1735 | } | ||
1736 | |||
1737 | static void kvm_do_inject_irq(struct kvm_vcpu *vcpu) | ||
1738 | { | ||
1739 | int word_index = __ffs(vcpu->arch.irq_summary); | ||
1740 | int bit_index = __ffs(vcpu->arch.irq_pending[word_index]); | ||
1741 | int irq = word_index * BITS_PER_LONG + bit_index; | ||
1742 | |||
1743 | clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]); | ||
1744 | if (!vcpu->arch.irq_pending[word_index]) | ||
1745 | clear_bit(word_index, &vcpu->arch.irq_summary); | ||
1746 | vmx_inject_irq(vcpu, irq); | ||
1747 | } | ||
1748 | |||
1749 | |||
1750 | static void do_interrupt_requests(struct kvm_vcpu *vcpu, | ||
1751 | struct kvm_run *kvm_run) | ||
1752 | { | ||
1753 | u32 cpu_based_vm_exec_control; | ||
1754 | |||
1755 | vcpu->arch.interrupt_window_open = | ||
1756 | ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && | ||
1757 | (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0); | ||
1758 | |||
1759 | if (vcpu->arch.interrupt_window_open && | ||
1760 | vcpu->arch.irq_summary && | ||
1761 | !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK)) | ||
1762 | /* | ||
1763 | * If interrupts enabled, and not blocked by sti or mov ss. Good. | ||
1764 | */ | ||
1765 | kvm_do_inject_irq(vcpu); | ||
1766 | |||
1767 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); | ||
1768 | if (!vcpu->arch.interrupt_window_open && | ||
1769 | (vcpu->arch.irq_summary || kvm_run->request_interrupt_window)) | ||
1770 | /* | ||
1771 | * Interrupts blocked. Wait for unblock. | ||
1772 | */ | ||
1773 | cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; | ||
1774 | else | ||
1775 | cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; | ||
1776 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); | ||
1777 | } | ||
1778 | |||
1779 | static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) | ||
1780 | { | ||
1781 | int ret; | ||
1782 | struct kvm_userspace_memory_region tss_mem = { | ||
1783 | .slot = 8, | ||
1784 | .guest_phys_addr = addr, | ||
1785 | .memory_size = PAGE_SIZE * 3, | ||
1786 | .flags = 0, | ||
1787 | }; | ||
1788 | |||
1789 | ret = kvm_set_memory_region(kvm, &tss_mem, 0); | ||
1790 | if (ret) | ||
1791 | return ret; | ||
1792 | kvm->arch.tss_addr = addr; | ||
1793 | return 0; | ||
1794 | } | ||
1795 | |||
1796 | static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu) | ||
1797 | { | ||
1798 | struct kvm_guest_debug *dbg = &vcpu->guest_debug; | ||
1799 | |||
1800 | set_debugreg(dbg->bp[0], 0); | ||
1801 | set_debugreg(dbg->bp[1], 1); | ||
1802 | set_debugreg(dbg->bp[2], 2); | ||
1803 | set_debugreg(dbg->bp[3], 3); | ||
1804 | |||
1805 | if (dbg->singlestep) { | ||
1806 | unsigned long flags; | ||
1807 | |||
1808 | flags = vmcs_readl(GUEST_RFLAGS); | ||
1809 | flags |= X86_EFLAGS_TF | X86_EFLAGS_RF; | ||
1810 | vmcs_writel(GUEST_RFLAGS, flags); | ||
1811 | } | ||
1812 | } | ||
1813 | |||
1814 | static int handle_rmode_exception(struct kvm_vcpu *vcpu, | ||
1815 | int vec, u32 err_code) | ||
1816 | { | ||
1817 | if (!vcpu->arch.rmode.active) | ||
1818 | return 0; | ||
1819 | |||
1820 | /* | ||
1821 | * Instruction with address size override prefix opcode 0x67 | ||
1822 | * Cause the #SS fault with 0 error code in VM86 mode. | ||
1823 | */ | ||
1824 | if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) | ||
1825 | if (emulate_instruction(vcpu, NULL, 0, 0, 0) == EMULATE_DONE) | ||
1826 | return 1; | ||
1827 | return 0; | ||
1828 | } | ||
1829 | |||
1830 | static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
1831 | { | ||
1832 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
1833 | u32 intr_info, error_code; | ||
1834 | unsigned long cr2, rip; | ||
1835 | u32 vect_info; | ||
1836 | enum emulation_result er; | ||
1837 | |||
1838 | vect_info = vmx->idt_vectoring_info; | ||
1839 | intr_info = vmcs_read32(VM_EXIT_INTR_INFO); | ||
1840 | |||
1841 | if ((vect_info & VECTORING_INFO_VALID_MASK) && | ||
1842 | !is_page_fault(intr_info)) | ||
1843 | printk(KERN_ERR "%s: unexpected, vectoring info 0x%x " | ||
1844 | "intr info 0x%x\n", __FUNCTION__, vect_info, intr_info); | ||
1845 | |||
1846 | if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) { | ||
1847 | int irq = vect_info & VECTORING_INFO_VECTOR_MASK; | ||
1848 | set_bit(irq, vcpu->arch.irq_pending); | ||
1849 | set_bit(irq / BITS_PER_LONG, &vcpu->arch.irq_summary); | ||
1850 | } | ||
1851 | |||
1852 | if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */ | ||
1853 | return 1; /* already handled by vmx_vcpu_run() */ | ||
1854 | |||
1855 | if (is_no_device(intr_info)) { | ||
1856 | vmx_fpu_activate(vcpu); | ||
1857 | return 1; | ||
1858 | } | ||
1859 | |||
1860 | if (is_invalid_opcode(intr_info)) { | ||
1861 | er = emulate_instruction(vcpu, kvm_run, 0, 0, 0); | ||
1862 | if (er != EMULATE_DONE) | ||
1863 | kvm_queue_exception(vcpu, UD_VECTOR); | ||
1864 | return 1; | ||
1865 | } | ||
1866 | |||
1867 | error_code = 0; | ||
1868 | rip = vmcs_readl(GUEST_RIP); | ||
1869 | if (intr_info & INTR_INFO_DELIEVER_CODE_MASK) | ||
1870 | error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); | ||
1871 | if (is_page_fault(intr_info)) { | ||
1872 | cr2 = vmcs_readl(EXIT_QUALIFICATION); | ||
1873 | return kvm_mmu_page_fault(vcpu, cr2, error_code); | ||
1874 | } | ||
1875 | |||
1876 | if (vcpu->arch.rmode.active && | ||
1877 | handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK, | ||
1878 | error_code)) { | ||
1879 | if (vcpu->arch.halt_request) { | ||
1880 | vcpu->arch.halt_request = 0; | ||
1881 | return kvm_emulate_halt(vcpu); | ||
1882 | } | ||
1883 | return 1; | ||
1884 | } | ||
1885 | |||
1886 | if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == | ||
1887 | (INTR_TYPE_EXCEPTION | 1)) { | ||
1888 | kvm_run->exit_reason = KVM_EXIT_DEBUG; | ||
1889 | return 0; | ||
1890 | } | ||
1891 | kvm_run->exit_reason = KVM_EXIT_EXCEPTION; | ||
1892 | kvm_run->ex.exception = intr_info & INTR_INFO_VECTOR_MASK; | ||
1893 | kvm_run->ex.error_code = error_code; | ||
1894 | return 0; | ||
1895 | } | ||
1896 | |||
1897 | static int handle_external_interrupt(struct kvm_vcpu *vcpu, | ||
1898 | struct kvm_run *kvm_run) | ||
1899 | { | ||
1900 | ++vcpu->stat.irq_exits; | ||
1901 | return 1; | ||
1902 | } | ||
1903 | |||
1904 | static int handle_triple_fault(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
1905 | { | ||
1906 | kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; | ||
1907 | return 0; | ||
1908 | } | ||
1909 | |||
1910 | static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
1911 | { | ||
1912 | unsigned long exit_qualification; | ||
1913 | int size, down, in, string, rep; | ||
1914 | unsigned port; | ||
1915 | |||
1916 | ++vcpu->stat.io_exits; | ||
1917 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | ||
1918 | string = (exit_qualification & 16) != 0; | ||
1919 | |||
1920 | if (string) { | ||
1921 | if (emulate_instruction(vcpu, | ||
1922 | kvm_run, 0, 0, 0) == EMULATE_DO_MMIO) | ||
1923 | return 0; | ||
1924 | return 1; | ||
1925 | } | ||
1926 | |||
1927 | size = (exit_qualification & 7) + 1; | ||
1928 | in = (exit_qualification & 8) != 0; | ||
1929 | down = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0; | ||
1930 | rep = (exit_qualification & 32) != 0; | ||
1931 | port = exit_qualification >> 16; | ||
1932 | |||
1933 | return kvm_emulate_pio(vcpu, kvm_run, in, size, port); | ||
1934 | } | ||
1935 | |||
1936 | static void | ||
1937 | vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) | ||
1938 | { | ||
1939 | /* | ||
1940 | * Patch in the VMCALL instruction: | ||
1941 | */ | ||
1942 | hypercall[0] = 0x0f; | ||
1943 | hypercall[1] = 0x01; | ||
1944 | hypercall[2] = 0xc1; | ||
1945 | } | ||
1946 | |||
1947 | static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
1948 | { | ||
1949 | unsigned long exit_qualification; | ||
1950 | int cr; | ||
1951 | int reg; | ||
1952 | |||
1953 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | ||
1954 | cr = exit_qualification & 15; | ||
1955 | reg = (exit_qualification >> 8) & 15; | ||
1956 | switch ((exit_qualification >> 4) & 3) { | ||
1957 | case 0: /* mov to cr */ | ||
1958 | switch (cr) { | ||
1959 | case 0: | ||
1960 | vcpu_load_rsp_rip(vcpu); | ||
1961 | set_cr0(vcpu, vcpu->arch.regs[reg]); | ||
1962 | skip_emulated_instruction(vcpu); | ||
1963 | return 1; | ||
1964 | case 3: | ||
1965 | vcpu_load_rsp_rip(vcpu); | ||
1966 | set_cr3(vcpu, vcpu->arch.regs[reg]); | ||
1967 | skip_emulated_instruction(vcpu); | ||
1968 | return 1; | ||
1969 | case 4: | ||
1970 | vcpu_load_rsp_rip(vcpu); | ||
1971 | set_cr4(vcpu, vcpu->arch.regs[reg]); | ||
1972 | skip_emulated_instruction(vcpu); | ||
1973 | return 1; | ||
1974 | case 8: | ||
1975 | vcpu_load_rsp_rip(vcpu); | ||
1976 | set_cr8(vcpu, vcpu->arch.regs[reg]); | ||
1977 | skip_emulated_instruction(vcpu); | ||
1978 | if (irqchip_in_kernel(vcpu->kvm)) | ||
1979 | return 1; | ||
1980 | kvm_run->exit_reason = KVM_EXIT_SET_TPR; | ||
1981 | return 0; | ||
1982 | }; | ||
1983 | break; | ||
1984 | case 2: /* clts */ | ||
1985 | vcpu_load_rsp_rip(vcpu); | ||
1986 | vmx_fpu_deactivate(vcpu); | ||
1987 | vcpu->arch.cr0 &= ~X86_CR0_TS; | ||
1988 | vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); | ||
1989 | vmx_fpu_activate(vcpu); | ||
1990 | skip_emulated_instruction(vcpu); | ||
1991 | return 1; | ||
1992 | case 1: /*mov from cr*/ | ||
1993 | switch (cr) { | ||
1994 | case 3: | ||
1995 | vcpu_load_rsp_rip(vcpu); | ||
1996 | vcpu->arch.regs[reg] = vcpu->arch.cr3; | ||
1997 | vcpu_put_rsp_rip(vcpu); | ||
1998 | skip_emulated_instruction(vcpu); | ||
1999 | return 1; | ||
2000 | case 8: | ||
2001 | vcpu_load_rsp_rip(vcpu); | ||
2002 | vcpu->arch.regs[reg] = get_cr8(vcpu); | ||
2003 | vcpu_put_rsp_rip(vcpu); | ||
2004 | skip_emulated_instruction(vcpu); | ||
2005 | return 1; | ||
2006 | } | ||
2007 | break; | ||
2008 | case 3: /* lmsw */ | ||
2009 | lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f); | ||
2010 | |||
2011 | skip_emulated_instruction(vcpu); | ||
2012 | return 1; | ||
2013 | default: | ||
2014 | break; | ||
2015 | } | ||
2016 | kvm_run->exit_reason = 0; | ||
2017 | pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n", | ||
2018 | (int)(exit_qualification >> 4) & 3, cr); | ||
2019 | return 0; | ||
2020 | } | ||
2021 | |||
2022 | static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
2023 | { | ||
2024 | unsigned long exit_qualification; | ||
2025 | unsigned long val; | ||
2026 | int dr, reg; | ||
2027 | |||
2028 | /* | ||
2029 | * FIXME: this code assumes the host is debugging the guest. | ||
2030 | * need to deal with guest debugging itself too. | ||
2031 | */ | ||
2032 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | ||
2033 | dr = exit_qualification & 7; | ||
2034 | reg = (exit_qualification >> 8) & 15; | ||
2035 | vcpu_load_rsp_rip(vcpu); | ||
2036 | if (exit_qualification & 16) { | ||
2037 | /* mov from dr */ | ||
2038 | switch (dr) { | ||
2039 | case 6: | ||
2040 | val = 0xffff0ff0; | ||
2041 | break; | ||
2042 | case 7: | ||
2043 | val = 0x400; | ||
2044 | break; | ||
2045 | default: | ||
2046 | val = 0; | ||
2047 | } | ||
2048 | vcpu->arch.regs[reg] = val; | ||
2049 | } else { | ||
2050 | /* mov to dr */ | ||
2051 | } | ||
2052 | vcpu_put_rsp_rip(vcpu); | ||
2053 | skip_emulated_instruction(vcpu); | ||
2054 | return 1; | ||
2055 | } | ||
2056 | |||
2057 | static int handle_cpuid(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
2058 | { | ||
2059 | kvm_emulate_cpuid(vcpu); | ||
2060 | return 1; | ||
2061 | } | ||
2062 | |||
2063 | static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
2064 | { | ||
2065 | u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; | ||
2066 | u64 data; | ||
2067 | |||
2068 | if (vmx_get_msr(vcpu, ecx, &data)) { | ||
2069 | kvm_inject_gp(vcpu, 0); | ||
2070 | return 1; | ||
2071 | } | ||
2072 | |||
2073 | /* FIXME: handling of bits 32:63 of rax, rdx */ | ||
2074 | vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u; | ||
2075 | vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u; | ||
2076 | skip_emulated_instruction(vcpu); | ||
2077 | return 1; | ||
2078 | } | ||
2079 | |||
2080 | static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
2081 | { | ||
2082 | u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; | ||
2083 | u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) | ||
2084 | | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); | ||
2085 | |||
2086 | if (vmx_set_msr(vcpu, ecx, data) != 0) { | ||
2087 | kvm_inject_gp(vcpu, 0); | ||
2088 | return 1; | ||
2089 | } | ||
2090 | |||
2091 | skip_emulated_instruction(vcpu); | ||
2092 | return 1; | ||
2093 | } | ||
2094 | |||
2095 | static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu, | ||
2096 | struct kvm_run *kvm_run) | ||
2097 | { | ||
2098 | return 1; | ||
2099 | } | ||
2100 | |||
2101 | static int handle_interrupt_window(struct kvm_vcpu *vcpu, | ||
2102 | struct kvm_run *kvm_run) | ||
2103 | { | ||
2104 | u32 cpu_based_vm_exec_control; | ||
2105 | |||
2106 | /* clear pending irq */ | ||
2107 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); | ||
2108 | cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; | ||
2109 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); | ||
2110 | /* | ||
2111 | * If the user space waits to inject interrupts, exit as soon as | ||
2112 | * possible | ||
2113 | */ | ||
2114 | if (kvm_run->request_interrupt_window && | ||
2115 | !vcpu->arch.irq_summary) { | ||
2116 | kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; | ||
2117 | ++vcpu->stat.irq_window_exits; | ||
2118 | return 0; | ||
2119 | } | ||
2120 | return 1; | ||
2121 | } | ||
2122 | |||
2123 | static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
2124 | { | ||
2125 | skip_emulated_instruction(vcpu); | ||
2126 | return kvm_emulate_halt(vcpu); | ||
2127 | } | ||
2128 | |||
2129 | static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
2130 | { | ||
2131 | skip_emulated_instruction(vcpu); | ||
2132 | kvm_emulate_hypercall(vcpu); | ||
2133 | return 1; | ||
2134 | } | ||
2135 | |||
2136 | static int handle_wbinvd(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
2137 | { | ||
2138 | skip_emulated_instruction(vcpu); | ||
2139 | /* TODO: Add support for VT-d/pass-through device */ | ||
2140 | return 1; | ||
2141 | } | ||
2142 | |||
2143 | static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
2144 | { | ||
2145 | u64 exit_qualification; | ||
2146 | enum emulation_result er; | ||
2147 | unsigned long offset; | ||
2148 | |||
2149 | exit_qualification = vmcs_read64(EXIT_QUALIFICATION); | ||
2150 | offset = exit_qualification & 0xffful; | ||
2151 | |||
2152 | er = emulate_instruction(vcpu, kvm_run, 0, 0, 0); | ||
2153 | |||
2154 | if (er != EMULATE_DONE) { | ||
2155 | printk(KERN_ERR | ||
2156 | "Fail to handle apic access vmexit! Offset is 0x%lx\n", | ||
2157 | offset); | ||
2158 | return -ENOTSUPP; | ||
2159 | } | ||
2160 | return 1; | ||
2161 | } | ||
2162 | |||
2163 | /* | ||
2164 | * The exit handlers return 1 if the exit was handled fully and guest execution | ||
2165 | * may resume. Otherwise they set the kvm_run parameter to indicate what needs | ||
2166 | * to be done to userspace and return 0. | ||
2167 | */ | ||
2168 | static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu, | ||
2169 | struct kvm_run *kvm_run) = { | ||
2170 | [EXIT_REASON_EXCEPTION_NMI] = handle_exception, | ||
2171 | [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, | ||
2172 | [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault, | ||
2173 | [EXIT_REASON_IO_INSTRUCTION] = handle_io, | ||
2174 | [EXIT_REASON_CR_ACCESS] = handle_cr, | ||
2175 | [EXIT_REASON_DR_ACCESS] = handle_dr, | ||
2176 | [EXIT_REASON_CPUID] = handle_cpuid, | ||
2177 | [EXIT_REASON_MSR_READ] = handle_rdmsr, | ||
2178 | [EXIT_REASON_MSR_WRITE] = handle_wrmsr, | ||
2179 | [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window, | ||
2180 | [EXIT_REASON_HLT] = handle_halt, | ||
2181 | [EXIT_REASON_VMCALL] = handle_vmcall, | ||
2182 | [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, | ||
2183 | [EXIT_REASON_APIC_ACCESS] = handle_apic_access, | ||
2184 | [EXIT_REASON_WBINVD] = handle_wbinvd, | ||
2185 | }; | ||
2186 | |||
2187 | static const int kvm_vmx_max_exit_handlers = | ||
2188 | ARRAY_SIZE(kvm_vmx_exit_handlers); | ||
2189 | |||
2190 | /* | ||
2191 | * The guest has exited. See if we can fix it or if we need userspace | ||
2192 | * assistance. | ||
2193 | */ | ||
2194 | static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | ||
2195 | { | ||
2196 | u32 exit_reason = vmcs_read32(VM_EXIT_REASON); | ||
2197 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
2198 | u32 vectoring_info = vmx->idt_vectoring_info; | ||
2199 | |||
2200 | if (unlikely(vmx->fail)) { | ||
2201 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; | ||
2202 | kvm_run->fail_entry.hardware_entry_failure_reason | ||
2203 | = vmcs_read32(VM_INSTRUCTION_ERROR); | ||
2204 | return 0; | ||
2205 | } | ||
2206 | |||
2207 | if ((vectoring_info & VECTORING_INFO_VALID_MASK) && | ||
2208 | exit_reason != EXIT_REASON_EXCEPTION_NMI) | ||
2209 | printk(KERN_WARNING "%s: unexpected, valid vectoring info and " | ||
2210 | "exit reason is 0x%x\n", __FUNCTION__, exit_reason); | ||
2211 | if (exit_reason < kvm_vmx_max_exit_handlers | ||
2212 | && kvm_vmx_exit_handlers[exit_reason]) | ||
2213 | return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run); | ||
2214 | else { | ||
2215 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | ||
2216 | kvm_run->hw.hardware_exit_reason = exit_reason; | ||
2217 | } | ||
2218 | return 0; | ||
2219 | } | ||
2220 | |||
2221 | static void vmx_flush_tlb(struct kvm_vcpu *vcpu) | ||
2222 | { | ||
2223 | } | ||
2224 | |||
2225 | static void update_tpr_threshold(struct kvm_vcpu *vcpu) | ||
2226 | { | ||
2227 | int max_irr, tpr; | ||
2228 | |||
2229 | if (!vm_need_tpr_shadow(vcpu->kvm)) | ||
2230 | return; | ||
2231 | |||
2232 | if (!kvm_lapic_enabled(vcpu) || | ||
2233 | ((max_irr = kvm_lapic_find_highest_irr(vcpu)) == -1)) { | ||
2234 | vmcs_write32(TPR_THRESHOLD, 0); | ||
2235 | return; | ||
2236 | } | ||
2237 | |||
2238 | tpr = (kvm_lapic_get_cr8(vcpu) & 0x0f) << 4; | ||
2239 | vmcs_write32(TPR_THRESHOLD, (max_irr > tpr) ? tpr >> 4 : max_irr >> 4); | ||
2240 | } | ||
2241 | |||
2242 | static void enable_irq_window(struct kvm_vcpu *vcpu) | ||
2243 | { | ||
2244 | u32 cpu_based_vm_exec_control; | ||
2245 | |||
2246 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); | ||
2247 | cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; | ||
2248 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); | ||
2249 | } | ||
2250 | |||
2251 | static void vmx_intr_assist(struct kvm_vcpu *vcpu) | ||
2252 | { | ||
2253 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
2254 | u32 idtv_info_field, intr_info_field; | ||
2255 | int has_ext_irq, interrupt_window_open; | ||
2256 | int vector; | ||
2257 | |||
2258 | update_tpr_threshold(vcpu); | ||
2259 | |||
2260 | has_ext_irq = kvm_cpu_has_interrupt(vcpu); | ||
2261 | intr_info_field = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD); | ||
2262 | idtv_info_field = vmx->idt_vectoring_info; | ||
2263 | if (intr_info_field & INTR_INFO_VALID_MASK) { | ||
2264 | if (idtv_info_field & INTR_INFO_VALID_MASK) { | ||
2265 | /* TODO: fault when IDT_Vectoring */ | ||
2266 | if (printk_ratelimit()) | ||
2267 | printk(KERN_ERR "Fault when IDT_Vectoring\n"); | ||
2268 | } | ||
2269 | if (has_ext_irq) | ||
2270 | enable_irq_window(vcpu); | ||
2271 | return; | ||
2272 | } | ||
2273 | if (unlikely(idtv_info_field & INTR_INFO_VALID_MASK)) { | ||
2274 | if ((idtv_info_field & VECTORING_INFO_TYPE_MASK) | ||
2275 | == INTR_TYPE_EXT_INTR | ||
2276 | && vcpu->arch.rmode.active) { | ||
2277 | u8 vect = idtv_info_field & VECTORING_INFO_VECTOR_MASK; | ||
2278 | |||
2279 | vmx_inject_irq(vcpu, vect); | ||
2280 | if (unlikely(has_ext_irq)) | ||
2281 | enable_irq_window(vcpu); | ||
2282 | return; | ||
2283 | } | ||
2284 | |||
2285 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field); | ||
2286 | vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, | ||
2287 | vmcs_read32(VM_EXIT_INSTRUCTION_LEN)); | ||
2288 | |||
2289 | if (unlikely(idtv_info_field & INTR_INFO_DELIEVER_CODE_MASK)) | ||
2290 | vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, | ||
2291 | vmcs_read32(IDT_VECTORING_ERROR_CODE)); | ||
2292 | if (unlikely(has_ext_irq)) | ||
2293 | enable_irq_window(vcpu); | ||
2294 | return; | ||
2295 | } | ||
2296 | if (!has_ext_irq) | ||
2297 | return; | ||
2298 | interrupt_window_open = | ||
2299 | ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && | ||
2300 | (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0); | ||
2301 | if (interrupt_window_open) { | ||
2302 | vector = kvm_cpu_get_interrupt(vcpu); | ||
2303 | vmx_inject_irq(vcpu, vector); | ||
2304 | kvm_timer_intr_post(vcpu, vector); | ||
2305 | } else | ||
2306 | enable_irq_window(vcpu); | ||
2307 | } | ||
2308 | |||
2309 | /* | ||
2310 | * Failure to inject an interrupt should give us the information | ||
2311 | * in IDT_VECTORING_INFO_FIELD. However, if the failure occurs | ||
2312 | * when fetching the interrupt redirection bitmap in the real-mode | ||
2313 | * tss, this doesn't happen. So we do it ourselves. | ||
2314 | */ | ||
2315 | static void fixup_rmode_irq(struct vcpu_vmx *vmx) | ||
2316 | { | ||
2317 | vmx->rmode.irq.pending = 0; | ||
2318 | if (vmcs_readl(GUEST_RIP) + 1 != vmx->rmode.irq.rip) | ||
2319 | return; | ||
2320 | vmcs_writel(GUEST_RIP, vmx->rmode.irq.rip); | ||
2321 | if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) { | ||
2322 | vmx->idt_vectoring_info &= ~VECTORING_INFO_TYPE_MASK; | ||
2323 | vmx->idt_vectoring_info |= INTR_TYPE_EXT_INTR; | ||
2324 | return; | ||
2325 | } | ||
2326 | vmx->idt_vectoring_info = | ||
2327 | VECTORING_INFO_VALID_MASK | ||
2328 | | INTR_TYPE_EXT_INTR | ||
2329 | | vmx->rmode.irq.vector; | ||
2330 | } | ||
2331 | |||
2332 | static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
2333 | { | ||
2334 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
2335 | u32 intr_info; | ||
2336 | |||
2337 | /* | ||
2338 | * Loading guest fpu may have cleared host cr0.ts | ||
2339 | */ | ||
2340 | vmcs_writel(HOST_CR0, read_cr0()); | ||
2341 | |||
2342 | asm( | ||
2343 | /* Store host registers */ | ||
2344 | #ifdef CONFIG_X86_64 | ||
2345 | "push %%rdx; push %%rbp;" | ||
2346 | "push %%rcx \n\t" | ||
2347 | #else | ||
2348 | "push %%edx; push %%ebp;" | ||
2349 | "push %%ecx \n\t" | ||
2350 | #endif | ||
2351 | ASM_VMX_VMWRITE_RSP_RDX "\n\t" | ||
2352 | /* Check if vmlaunch of vmresume is needed */ | ||
2353 | "cmpl $0, %c[launched](%0) \n\t" | ||
2354 | /* Load guest registers. Don't clobber flags. */ | ||
2355 | #ifdef CONFIG_X86_64 | ||
2356 | "mov %c[cr2](%0), %%rax \n\t" | ||
2357 | "mov %%rax, %%cr2 \n\t" | ||
2358 | "mov %c[rax](%0), %%rax \n\t" | ||
2359 | "mov %c[rbx](%0), %%rbx \n\t" | ||
2360 | "mov %c[rdx](%0), %%rdx \n\t" | ||
2361 | "mov %c[rsi](%0), %%rsi \n\t" | ||
2362 | "mov %c[rdi](%0), %%rdi \n\t" | ||
2363 | "mov %c[rbp](%0), %%rbp \n\t" | ||
2364 | "mov %c[r8](%0), %%r8 \n\t" | ||
2365 | "mov %c[r9](%0), %%r9 \n\t" | ||
2366 | "mov %c[r10](%0), %%r10 \n\t" | ||
2367 | "mov %c[r11](%0), %%r11 \n\t" | ||
2368 | "mov %c[r12](%0), %%r12 \n\t" | ||
2369 | "mov %c[r13](%0), %%r13 \n\t" | ||
2370 | "mov %c[r14](%0), %%r14 \n\t" | ||
2371 | "mov %c[r15](%0), %%r15 \n\t" | ||
2372 | "mov %c[rcx](%0), %%rcx \n\t" /* kills %0 (rcx) */ | ||
2373 | #else | ||
2374 | "mov %c[cr2](%0), %%eax \n\t" | ||
2375 | "mov %%eax, %%cr2 \n\t" | ||
2376 | "mov %c[rax](%0), %%eax \n\t" | ||
2377 | "mov %c[rbx](%0), %%ebx \n\t" | ||
2378 | "mov %c[rdx](%0), %%edx \n\t" | ||
2379 | "mov %c[rsi](%0), %%esi \n\t" | ||
2380 | "mov %c[rdi](%0), %%edi \n\t" | ||
2381 | "mov %c[rbp](%0), %%ebp \n\t" | ||
2382 | "mov %c[rcx](%0), %%ecx \n\t" /* kills %0 (ecx) */ | ||
2383 | #endif | ||
2384 | /* Enter guest mode */ | ||
2385 | "jne .Llaunched \n\t" | ||
2386 | ASM_VMX_VMLAUNCH "\n\t" | ||
2387 | "jmp .Lkvm_vmx_return \n\t" | ||
2388 | ".Llaunched: " ASM_VMX_VMRESUME "\n\t" | ||
2389 | ".Lkvm_vmx_return: " | ||
2390 | /* Save guest registers, load host registers, keep flags */ | ||
2391 | #ifdef CONFIG_X86_64 | ||
2392 | "xchg %0, (%%rsp) \n\t" | ||
2393 | "mov %%rax, %c[rax](%0) \n\t" | ||
2394 | "mov %%rbx, %c[rbx](%0) \n\t" | ||
2395 | "pushq (%%rsp); popq %c[rcx](%0) \n\t" | ||
2396 | "mov %%rdx, %c[rdx](%0) \n\t" | ||
2397 | "mov %%rsi, %c[rsi](%0) \n\t" | ||
2398 | "mov %%rdi, %c[rdi](%0) \n\t" | ||
2399 | "mov %%rbp, %c[rbp](%0) \n\t" | ||
2400 | "mov %%r8, %c[r8](%0) \n\t" | ||
2401 | "mov %%r9, %c[r9](%0) \n\t" | ||
2402 | "mov %%r10, %c[r10](%0) \n\t" | ||
2403 | "mov %%r11, %c[r11](%0) \n\t" | ||
2404 | "mov %%r12, %c[r12](%0) \n\t" | ||
2405 | "mov %%r13, %c[r13](%0) \n\t" | ||
2406 | "mov %%r14, %c[r14](%0) \n\t" | ||
2407 | "mov %%r15, %c[r15](%0) \n\t" | ||
2408 | "mov %%cr2, %%rax \n\t" | ||
2409 | "mov %%rax, %c[cr2](%0) \n\t" | ||
2410 | |||
2411 | "pop %%rbp; pop %%rbp; pop %%rdx \n\t" | ||
2412 | #else | ||
2413 | "xchg %0, (%%esp) \n\t" | ||
2414 | "mov %%eax, %c[rax](%0) \n\t" | ||
2415 | "mov %%ebx, %c[rbx](%0) \n\t" | ||
2416 | "pushl (%%esp); popl %c[rcx](%0) \n\t" | ||
2417 | "mov %%edx, %c[rdx](%0) \n\t" | ||
2418 | "mov %%esi, %c[rsi](%0) \n\t" | ||
2419 | "mov %%edi, %c[rdi](%0) \n\t" | ||
2420 | "mov %%ebp, %c[rbp](%0) \n\t" | ||
2421 | "mov %%cr2, %%eax \n\t" | ||
2422 | "mov %%eax, %c[cr2](%0) \n\t" | ||
2423 | |||
2424 | "pop %%ebp; pop %%ebp; pop %%edx \n\t" | ||
2425 | #endif | ||
2426 | "setbe %c[fail](%0) \n\t" | ||
2427 | : : "c"(vmx), "d"((unsigned long)HOST_RSP), | ||
2428 | [launched]"i"(offsetof(struct vcpu_vmx, launched)), | ||
2429 | [fail]"i"(offsetof(struct vcpu_vmx, fail)), | ||
2430 | [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])), | ||
2431 | [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])), | ||
2432 | [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])), | ||
2433 | [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])), | ||
2434 | [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])), | ||
2435 | [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])), | ||
2436 | [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])), | ||
2437 | #ifdef CONFIG_X86_64 | ||
2438 | [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])), | ||
2439 | [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])), | ||
2440 | [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])), | ||
2441 | [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])), | ||
2442 | [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])), | ||
2443 | [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])), | ||
2444 | [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])), | ||
2445 | [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])), | ||
2446 | #endif | ||
2447 | [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)) | ||
2448 | : "cc", "memory" | ||
2449 | #ifdef CONFIG_X86_64 | ||
2450 | , "rbx", "rdi", "rsi" | ||
2451 | , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" | ||
2452 | #else | ||
2453 | , "ebx", "edi", "rsi" | ||
2454 | #endif | ||
2455 | ); | ||
2456 | |||
2457 | vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); | ||
2458 | if (vmx->rmode.irq.pending) | ||
2459 | fixup_rmode_irq(vmx); | ||
2460 | |||
2461 | vcpu->arch.interrupt_window_open = | ||
2462 | (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0; | ||
2463 | |||
2464 | asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); | ||
2465 | vmx->launched = 1; | ||
2466 | |||
2467 | intr_info = vmcs_read32(VM_EXIT_INTR_INFO); | ||
2468 | |||
2469 | /* We need to handle NMIs before interrupts are enabled */ | ||
2470 | if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */ | ||
2471 | asm("int $2"); | ||
2472 | } | ||
2473 | |||
2474 | static void vmx_free_vmcs(struct kvm_vcpu *vcpu) | ||
2475 | { | ||
2476 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
2477 | |||
2478 | if (vmx->vmcs) { | ||
2479 | on_each_cpu(__vcpu_clear, vmx, 0, 1); | ||
2480 | free_vmcs(vmx->vmcs); | ||
2481 | vmx->vmcs = NULL; | ||
2482 | } | ||
2483 | } | ||
2484 | |||
2485 | static void vmx_free_vcpu(struct kvm_vcpu *vcpu) | ||
2486 | { | ||
2487 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
2488 | |||
2489 | vmx_free_vmcs(vcpu); | ||
2490 | kfree(vmx->host_msrs); | ||
2491 | kfree(vmx->guest_msrs); | ||
2492 | kvm_vcpu_uninit(vcpu); | ||
2493 | kmem_cache_free(kvm_vcpu_cache, vmx); | ||
2494 | } | ||
2495 | |||
2496 | static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) | ||
2497 | { | ||
2498 | int err; | ||
2499 | struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); | ||
2500 | int cpu; | ||
2501 | |||
2502 | if (!vmx) | ||
2503 | return ERR_PTR(-ENOMEM); | ||
2504 | |||
2505 | err = kvm_vcpu_init(&vmx->vcpu, kvm, id); | ||
2506 | if (err) | ||
2507 | goto free_vcpu; | ||
2508 | |||
2509 | vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
2510 | if (!vmx->guest_msrs) { | ||
2511 | err = -ENOMEM; | ||
2512 | goto uninit_vcpu; | ||
2513 | } | ||
2514 | |||
2515 | vmx->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
2516 | if (!vmx->host_msrs) | ||
2517 | goto free_guest_msrs; | ||
2518 | |||
2519 | vmx->vmcs = alloc_vmcs(); | ||
2520 | if (!vmx->vmcs) | ||
2521 | goto free_msrs; | ||
2522 | |||
2523 | vmcs_clear(vmx->vmcs); | ||
2524 | |||
2525 | cpu = get_cpu(); | ||
2526 | vmx_vcpu_load(&vmx->vcpu, cpu); | ||
2527 | err = vmx_vcpu_setup(vmx); | ||
2528 | vmx_vcpu_put(&vmx->vcpu); | ||
2529 | put_cpu(); | ||
2530 | if (err) | ||
2531 | goto free_vmcs; | ||
2532 | |||
2533 | return &vmx->vcpu; | ||
2534 | |||
2535 | free_vmcs: | ||
2536 | free_vmcs(vmx->vmcs); | ||
2537 | free_msrs: | ||
2538 | kfree(vmx->host_msrs); | ||
2539 | free_guest_msrs: | ||
2540 | kfree(vmx->guest_msrs); | ||
2541 | uninit_vcpu: | ||
2542 | kvm_vcpu_uninit(&vmx->vcpu); | ||
2543 | free_vcpu: | ||
2544 | kmem_cache_free(kvm_vcpu_cache, vmx); | ||
2545 | return ERR_PTR(err); | ||
2546 | } | ||
2547 | |||
2548 | static void __init vmx_check_processor_compat(void *rtn) | ||
2549 | { | ||
2550 | struct vmcs_config vmcs_conf; | ||
2551 | |||
2552 | *(int *)rtn = 0; | ||
2553 | if (setup_vmcs_config(&vmcs_conf) < 0) | ||
2554 | *(int *)rtn = -EIO; | ||
2555 | if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) { | ||
2556 | printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n", | ||
2557 | smp_processor_id()); | ||
2558 | *(int *)rtn = -EIO; | ||
2559 | } | ||
2560 | } | ||
2561 | |||
2562 | static struct kvm_x86_ops vmx_x86_ops = { | ||
2563 | .cpu_has_kvm_support = cpu_has_kvm_support, | ||
2564 | .disabled_by_bios = vmx_disabled_by_bios, | ||
2565 | .hardware_setup = hardware_setup, | ||
2566 | .hardware_unsetup = hardware_unsetup, | ||
2567 | .check_processor_compatibility = vmx_check_processor_compat, | ||
2568 | .hardware_enable = hardware_enable, | ||
2569 | .hardware_disable = hardware_disable, | ||
2570 | |||
2571 | .vcpu_create = vmx_create_vcpu, | ||
2572 | .vcpu_free = vmx_free_vcpu, | ||
2573 | .vcpu_reset = vmx_vcpu_reset, | ||
2574 | |||
2575 | .prepare_guest_switch = vmx_save_host_state, | ||
2576 | .vcpu_load = vmx_vcpu_load, | ||
2577 | .vcpu_put = vmx_vcpu_put, | ||
2578 | .vcpu_decache = vmx_vcpu_decache, | ||
2579 | |||
2580 | .set_guest_debug = set_guest_debug, | ||
2581 | .guest_debug_pre = kvm_guest_debug_pre, | ||
2582 | .get_msr = vmx_get_msr, | ||
2583 | .set_msr = vmx_set_msr, | ||
2584 | .get_segment_base = vmx_get_segment_base, | ||
2585 | .get_segment = vmx_get_segment, | ||
2586 | .set_segment = vmx_set_segment, | ||
2587 | .get_cs_db_l_bits = vmx_get_cs_db_l_bits, | ||
2588 | .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits, | ||
2589 | .set_cr0 = vmx_set_cr0, | ||
2590 | .set_cr3 = vmx_set_cr3, | ||
2591 | .set_cr4 = vmx_set_cr4, | ||
2592 | #ifdef CONFIG_X86_64 | ||
2593 | .set_efer = vmx_set_efer, | ||
2594 | #endif | ||
2595 | .get_idt = vmx_get_idt, | ||
2596 | .set_idt = vmx_set_idt, | ||
2597 | .get_gdt = vmx_get_gdt, | ||
2598 | .set_gdt = vmx_set_gdt, | ||
2599 | .cache_regs = vcpu_load_rsp_rip, | ||
2600 | .decache_regs = vcpu_put_rsp_rip, | ||
2601 | .get_rflags = vmx_get_rflags, | ||
2602 | .set_rflags = vmx_set_rflags, | ||
2603 | |||
2604 | .tlb_flush = vmx_flush_tlb, | ||
2605 | |||
2606 | .run = vmx_vcpu_run, | ||
2607 | .handle_exit = kvm_handle_exit, | ||
2608 | .skip_emulated_instruction = skip_emulated_instruction, | ||
2609 | .patch_hypercall = vmx_patch_hypercall, | ||
2610 | .get_irq = vmx_get_irq, | ||
2611 | .set_irq = vmx_inject_irq, | ||
2612 | .queue_exception = vmx_queue_exception, | ||
2613 | .exception_injected = vmx_exception_injected, | ||
2614 | .inject_pending_irq = vmx_intr_assist, | ||
2615 | .inject_pending_vectors = do_interrupt_requests, | ||
2616 | |||
2617 | .set_tss_addr = vmx_set_tss_addr, | ||
2618 | }; | ||
2619 | |||
2620 | static int __init vmx_init(void) | ||
2621 | { | ||
2622 | void *iova; | ||
2623 | int r; | ||
2624 | |||
2625 | vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); | ||
2626 | if (!vmx_io_bitmap_a) | ||
2627 | return -ENOMEM; | ||
2628 | |||
2629 | vmx_io_bitmap_b = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); | ||
2630 | if (!vmx_io_bitmap_b) { | ||
2631 | r = -ENOMEM; | ||
2632 | goto out; | ||
2633 | } | ||
2634 | |||
2635 | /* | ||
2636 | * Allow direct access to the PC debug port (it is often used for I/O | ||
2637 | * delays, but the vmexits simply slow things down). | ||
2638 | */ | ||
2639 | iova = kmap(vmx_io_bitmap_a); | ||
2640 | memset(iova, 0xff, PAGE_SIZE); | ||
2641 | clear_bit(0x80, iova); | ||
2642 | kunmap(vmx_io_bitmap_a); | ||
2643 | |||
2644 | iova = kmap(vmx_io_bitmap_b); | ||
2645 | memset(iova, 0xff, PAGE_SIZE); | ||
2646 | kunmap(vmx_io_bitmap_b); | ||
2647 | |||
2648 | r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE); | ||
2649 | if (r) | ||
2650 | goto out1; | ||
2651 | |||
2652 | if (bypass_guest_pf) | ||
2653 | kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull); | ||
2654 | |||
2655 | return 0; | ||
2656 | |||
2657 | out1: | ||
2658 | __free_page(vmx_io_bitmap_b); | ||
2659 | out: | ||
2660 | __free_page(vmx_io_bitmap_a); | ||
2661 | return r; | ||
2662 | } | ||
2663 | |||
2664 | static void __exit vmx_exit(void) | ||
2665 | { | ||
2666 | __free_page(vmx_io_bitmap_b); | ||
2667 | __free_page(vmx_io_bitmap_a); | ||
2668 | |||
2669 | kvm_exit(); | ||
2670 | } | ||
2671 | |||
2672 | module_init(vmx_init) | ||
2673 | module_exit(vmx_exit) | ||