diff options
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r-- | arch/x86/kvm/vmx.c | 2671 |
1 files changed, 2671 insertions, 0 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c new file mode 100644 index 000000000000..fc494aff5d8b --- /dev/null +++ b/arch/x86/kvm/vmx.c | |||
@@ -0,0 +1,2671 @@ | |||
1 | /* | ||
2 | * Kernel-based Virtual Machine driver for Linux | ||
3 | * | ||
4 | * This module enables machines with Intel VT-x extensions to run virtual | ||
5 | * machines without emulation or binary translation. | ||
6 | * | ||
7 | * Copyright (C) 2006 Qumranet, Inc. | ||
8 | * | ||
9 | * Authors: | ||
10 | * Avi Kivity <avi@qumranet.com> | ||
11 | * Yaniv Kamay <yaniv@qumranet.com> | ||
12 | * | ||
13 | * This work is licensed under the terms of the GNU GPL, version 2. See | ||
14 | * the COPYING file in the top-level directory. | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #include "irq.h" | ||
19 | #include "vmx.h" | ||
20 | #include "segment_descriptor.h" | ||
21 | #include "mmu.h" | ||
22 | |||
23 | #include <linux/kvm_host.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/mm.h> | ||
27 | #include <linux/highmem.h> | ||
28 | #include <linux/sched.h> | ||
29 | #include <linux/moduleparam.h> | ||
30 | |||
31 | #include <asm/io.h> | ||
32 | #include <asm/desc.h> | ||
33 | |||
34 | MODULE_AUTHOR("Qumranet"); | ||
35 | MODULE_LICENSE("GPL"); | ||
36 | |||
37 | static int bypass_guest_pf = 1; | ||
38 | module_param(bypass_guest_pf, bool, 0); | ||
39 | |||
40 | struct vmcs { | ||
41 | u32 revision_id; | ||
42 | u32 abort; | ||
43 | char data[0]; | ||
44 | }; | ||
45 | |||
46 | struct vcpu_vmx { | ||
47 | struct kvm_vcpu vcpu; | ||
48 | int launched; | ||
49 | u8 fail; | ||
50 | u32 idt_vectoring_info; | ||
51 | struct kvm_msr_entry *guest_msrs; | ||
52 | struct kvm_msr_entry *host_msrs; | ||
53 | int nmsrs; | ||
54 | int save_nmsrs; | ||
55 | int msr_offset_efer; | ||
56 | #ifdef CONFIG_X86_64 | ||
57 | int msr_offset_kernel_gs_base; | ||
58 | #endif | ||
59 | struct vmcs *vmcs; | ||
60 | struct { | ||
61 | int loaded; | ||
62 | u16 fs_sel, gs_sel, ldt_sel; | ||
63 | int gs_ldt_reload_needed; | ||
64 | int fs_reload_needed; | ||
65 | int guest_efer_loaded; | ||
66 | } host_state; | ||
67 | struct { | ||
68 | struct { | ||
69 | bool pending; | ||
70 | u8 vector; | ||
71 | unsigned rip; | ||
72 | } irq; | ||
73 | } rmode; | ||
74 | }; | ||
75 | |||
76 | static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) | ||
77 | { | ||
78 | return container_of(vcpu, struct vcpu_vmx, vcpu); | ||
79 | } | ||
80 | |||
81 | static int init_rmode_tss(struct kvm *kvm); | ||
82 | |||
83 | static DEFINE_PER_CPU(struct vmcs *, vmxarea); | ||
84 | static DEFINE_PER_CPU(struct vmcs *, current_vmcs); | ||
85 | |||
86 | static struct page *vmx_io_bitmap_a; | ||
87 | static struct page *vmx_io_bitmap_b; | ||
88 | |||
89 | static struct vmcs_config { | ||
90 | int size; | ||
91 | int order; | ||
92 | u32 revision_id; | ||
93 | u32 pin_based_exec_ctrl; | ||
94 | u32 cpu_based_exec_ctrl; | ||
95 | u32 cpu_based_2nd_exec_ctrl; | ||
96 | u32 vmexit_ctrl; | ||
97 | u32 vmentry_ctrl; | ||
98 | } vmcs_config; | ||
99 | |||
100 | #define VMX_SEGMENT_FIELD(seg) \ | ||
101 | [VCPU_SREG_##seg] = { \ | ||
102 | .selector = GUEST_##seg##_SELECTOR, \ | ||
103 | .base = GUEST_##seg##_BASE, \ | ||
104 | .limit = GUEST_##seg##_LIMIT, \ | ||
105 | .ar_bytes = GUEST_##seg##_AR_BYTES, \ | ||
106 | } | ||
107 | |||
108 | static struct kvm_vmx_segment_field { | ||
109 | unsigned selector; | ||
110 | unsigned base; | ||
111 | unsigned limit; | ||
112 | unsigned ar_bytes; | ||
113 | } kvm_vmx_segment_fields[] = { | ||
114 | VMX_SEGMENT_FIELD(CS), | ||
115 | VMX_SEGMENT_FIELD(DS), | ||
116 | VMX_SEGMENT_FIELD(ES), | ||
117 | VMX_SEGMENT_FIELD(FS), | ||
118 | VMX_SEGMENT_FIELD(GS), | ||
119 | VMX_SEGMENT_FIELD(SS), | ||
120 | VMX_SEGMENT_FIELD(TR), | ||
121 | VMX_SEGMENT_FIELD(LDTR), | ||
122 | }; | ||
123 | |||
124 | /* | ||
125 | * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it | ||
126 | * away by decrementing the array size. | ||
127 | */ | ||
128 | static const u32 vmx_msr_index[] = { | ||
129 | #ifdef CONFIG_X86_64 | ||
130 | MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE, | ||
131 | #endif | ||
132 | MSR_EFER, MSR_K6_STAR, | ||
133 | }; | ||
134 | #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index) | ||
135 | |||
136 | static void load_msrs(struct kvm_msr_entry *e, int n) | ||
137 | { | ||
138 | int i; | ||
139 | |||
140 | for (i = 0; i < n; ++i) | ||
141 | wrmsrl(e[i].index, e[i].data); | ||
142 | } | ||
143 | |||
144 | static void save_msrs(struct kvm_msr_entry *e, int n) | ||
145 | { | ||
146 | int i; | ||
147 | |||
148 | for (i = 0; i < n; ++i) | ||
149 | rdmsrl(e[i].index, e[i].data); | ||
150 | } | ||
151 | |||
152 | static inline int is_page_fault(u32 intr_info) | ||
153 | { | ||
154 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | | ||
155 | INTR_INFO_VALID_MASK)) == | ||
156 | (INTR_TYPE_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK); | ||
157 | } | ||
158 | |||
159 | static inline int is_no_device(u32 intr_info) | ||
160 | { | ||
161 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | | ||
162 | INTR_INFO_VALID_MASK)) == | ||
163 | (INTR_TYPE_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK); | ||
164 | } | ||
165 | |||
166 | static inline int is_invalid_opcode(u32 intr_info) | ||
167 | { | ||
168 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | | ||
169 | INTR_INFO_VALID_MASK)) == | ||
170 | (INTR_TYPE_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK); | ||
171 | } | ||
172 | |||
173 | static inline int is_external_interrupt(u32 intr_info) | ||
174 | { | ||
175 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) | ||
176 | == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); | ||
177 | } | ||
178 | |||
179 | static inline int cpu_has_vmx_tpr_shadow(void) | ||
180 | { | ||
181 | return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW); | ||
182 | } | ||
183 | |||
184 | static inline int vm_need_tpr_shadow(struct kvm *kvm) | ||
185 | { | ||
186 | return ((cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm))); | ||
187 | } | ||
188 | |||
189 | static inline int cpu_has_secondary_exec_ctrls(void) | ||
190 | { | ||
191 | return (vmcs_config.cpu_based_exec_ctrl & | ||
192 | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS); | ||
193 | } | ||
194 | |||
195 | static inline int cpu_has_vmx_virtualize_apic_accesses(void) | ||
196 | { | ||
197 | return (vmcs_config.cpu_based_2nd_exec_ctrl & | ||
198 | SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES); | ||
199 | } | ||
200 | |||
201 | static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm) | ||
202 | { | ||
203 | return ((cpu_has_vmx_virtualize_apic_accesses()) && | ||
204 | (irqchip_in_kernel(kvm))); | ||
205 | } | ||
206 | |||
207 | static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) | ||
208 | { | ||
209 | int i; | ||
210 | |||
211 | for (i = 0; i < vmx->nmsrs; ++i) | ||
212 | if (vmx->guest_msrs[i].index == msr) | ||
213 | return i; | ||
214 | return -1; | ||
215 | } | ||
216 | |||
217 | static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) | ||
218 | { | ||
219 | int i; | ||
220 | |||
221 | i = __find_msr_index(vmx, msr); | ||
222 | if (i >= 0) | ||
223 | return &vmx->guest_msrs[i]; | ||
224 | return NULL; | ||
225 | } | ||
226 | |||
227 | static void vmcs_clear(struct vmcs *vmcs) | ||
228 | { | ||
229 | u64 phys_addr = __pa(vmcs); | ||
230 | u8 error; | ||
231 | |||
232 | asm volatile (ASM_VMX_VMCLEAR_RAX "; setna %0" | ||
233 | : "=g"(error) : "a"(&phys_addr), "m"(phys_addr) | ||
234 | : "cc", "memory"); | ||
235 | if (error) | ||
236 | printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n", | ||
237 | vmcs, phys_addr); | ||
238 | } | ||
239 | |||
240 | static void __vcpu_clear(void *arg) | ||
241 | { | ||
242 | struct vcpu_vmx *vmx = arg; | ||
243 | int cpu = raw_smp_processor_id(); | ||
244 | |||
245 | if (vmx->vcpu.cpu == cpu) | ||
246 | vmcs_clear(vmx->vmcs); | ||
247 | if (per_cpu(current_vmcs, cpu) == vmx->vmcs) | ||
248 | per_cpu(current_vmcs, cpu) = NULL; | ||
249 | rdtscll(vmx->vcpu.arch.host_tsc); | ||
250 | } | ||
251 | |||
252 | static void vcpu_clear(struct vcpu_vmx *vmx) | ||
253 | { | ||
254 | if (vmx->vcpu.cpu == -1) | ||
255 | return; | ||
256 | smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 0, 1); | ||
257 | vmx->launched = 0; | ||
258 | } | ||
259 | |||
260 | static unsigned long vmcs_readl(unsigned long field) | ||
261 | { | ||
262 | unsigned long value; | ||
263 | |||
264 | asm volatile (ASM_VMX_VMREAD_RDX_RAX | ||
265 | : "=a"(value) : "d"(field) : "cc"); | ||
266 | return value; | ||
267 | } | ||
268 | |||
269 | static u16 vmcs_read16(unsigned long field) | ||
270 | { | ||
271 | return vmcs_readl(field); | ||
272 | } | ||
273 | |||
274 | static u32 vmcs_read32(unsigned long field) | ||
275 | { | ||
276 | return vmcs_readl(field); | ||
277 | } | ||
278 | |||
279 | static u64 vmcs_read64(unsigned long field) | ||
280 | { | ||
281 | #ifdef CONFIG_X86_64 | ||
282 | return vmcs_readl(field); | ||
283 | #else | ||
284 | return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32); | ||
285 | #endif | ||
286 | } | ||
287 | |||
288 | static noinline void vmwrite_error(unsigned long field, unsigned long value) | ||
289 | { | ||
290 | printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n", | ||
291 | field, value, vmcs_read32(VM_INSTRUCTION_ERROR)); | ||
292 | dump_stack(); | ||
293 | } | ||
294 | |||
295 | static void vmcs_writel(unsigned long field, unsigned long value) | ||
296 | { | ||
297 | u8 error; | ||
298 | |||
299 | asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0" | ||
300 | : "=q"(error) : "a"(value), "d"(field) : "cc"); | ||
301 | if (unlikely(error)) | ||
302 | vmwrite_error(field, value); | ||
303 | } | ||
304 | |||
305 | static void vmcs_write16(unsigned long field, u16 value) | ||
306 | { | ||
307 | vmcs_writel(field, value); | ||
308 | } | ||
309 | |||
310 | static void vmcs_write32(unsigned long field, u32 value) | ||
311 | { | ||
312 | vmcs_writel(field, value); | ||
313 | } | ||
314 | |||
315 | static void vmcs_write64(unsigned long field, u64 value) | ||
316 | { | ||
317 | #ifdef CONFIG_X86_64 | ||
318 | vmcs_writel(field, value); | ||
319 | #else | ||
320 | vmcs_writel(field, value); | ||
321 | asm volatile (""); | ||
322 | vmcs_writel(field+1, value >> 32); | ||
323 | #endif | ||
324 | } | ||
325 | |||
326 | static void vmcs_clear_bits(unsigned long field, u32 mask) | ||
327 | { | ||
328 | vmcs_writel(field, vmcs_readl(field) & ~mask); | ||
329 | } | ||
330 | |||
331 | static void vmcs_set_bits(unsigned long field, u32 mask) | ||
332 | { | ||
333 | vmcs_writel(field, vmcs_readl(field) | mask); | ||
334 | } | ||
335 | |||
336 | static void update_exception_bitmap(struct kvm_vcpu *vcpu) | ||
337 | { | ||
338 | u32 eb; | ||
339 | |||
340 | eb = (1u << PF_VECTOR) | (1u << UD_VECTOR); | ||
341 | if (!vcpu->fpu_active) | ||
342 | eb |= 1u << NM_VECTOR; | ||
343 | if (vcpu->guest_debug.enabled) | ||
344 | eb |= 1u << 1; | ||
345 | if (vcpu->arch.rmode.active) | ||
346 | eb = ~0; | ||
347 | vmcs_write32(EXCEPTION_BITMAP, eb); | ||
348 | } | ||
349 | |||
350 | static void reload_tss(void) | ||
351 | { | ||
352 | #ifndef CONFIG_X86_64 | ||
353 | |||
354 | /* | ||
355 | * VT restores TR but not its size. Useless. | ||
356 | */ | ||
357 | struct descriptor_table gdt; | ||
358 | struct segment_descriptor *descs; | ||
359 | |||
360 | get_gdt(&gdt); | ||
361 | descs = (void *)gdt.base; | ||
362 | descs[GDT_ENTRY_TSS].type = 9; /* available TSS */ | ||
363 | load_TR_desc(); | ||
364 | #endif | ||
365 | } | ||
366 | |||
367 | static void load_transition_efer(struct vcpu_vmx *vmx) | ||
368 | { | ||
369 | int efer_offset = vmx->msr_offset_efer; | ||
370 | u64 host_efer = vmx->host_msrs[efer_offset].data; | ||
371 | u64 guest_efer = vmx->guest_msrs[efer_offset].data; | ||
372 | u64 ignore_bits; | ||
373 | |||
374 | if (efer_offset < 0) | ||
375 | return; | ||
376 | /* | ||
377 | * NX is emulated; LMA and LME handled by hardware; SCE meaninless | ||
378 | * outside long mode | ||
379 | */ | ||
380 | ignore_bits = EFER_NX | EFER_SCE; | ||
381 | #ifdef CONFIG_X86_64 | ||
382 | ignore_bits |= EFER_LMA | EFER_LME; | ||
383 | /* SCE is meaningful only in long mode on Intel */ | ||
384 | if (guest_efer & EFER_LMA) | ||
385 | ignore_bits &= ~(u64)EFER_SCE; | ||
386 | #endif | ||
387 | if ((guest_efer & ~ignore_bits) == (host_efer & ~ignore_bits)) | ||
388 | return; | ||
389 | |||
390 | vmx->host_state.guest_efer_loaded = 1; | ||
391 | guest_efer &= ~ignore_bits; | ||
392 | guest_efer |= host_efer & ignore_bits; | ||
393 | wrmsrl(MSR_EFER, guest_efer); | ||
394 | vmx->vcpu.stat.efer_reload++; | ||
395 | } | ||
396 | |||
397 | static void reload_host_efer(struct vcpu_vmx *vmx) | ||
398 | { | ||
399 | if (vmx->host_state.guest_efer_loaded) { | ||
400 | vmx->host_state.guest_efer_loaded = 0; | ||
401 | load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1); | ||
402 | } | ||
403 | } | ||
404 | |||
405 | static void vmx_save_host_state(struct kvm_vcpu *vcpu) | ||
406 | { | ||
407 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
408 | |||
409 | if (vmx->host_state.loaded) | ||
410 | return; | ||
411 | |||
412 | vmx->host_state.loaded = 1; | ||
413 | /* | ||
414 | * Set host fs and gs selectors. Unfortunately, 22.2.3 does not | ||
415 | * allow segment selectors with cpl > 0 or ti == 1. | ||
416 | */ | ||
417 | vmx->host_state.ldt_sel = read_ldt(); | ||
418 | vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; | ||
419 | vmx->host_state.fs_sel = read_fs(); | ||
420 | if (!(vmx->host_state.fs_sel & 7)) { | ||
421 | vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); | ||
422 | vmx->host_state.fs_reload_needed = 0; | ||
423 | } else { | ||
424 | vmcs_write16(HOST_FS_SELECTOR, 0); | ||
425 | vmx->host_state.fs_reload_needed = 1; | ||
426 | } | ||
427 | vmx->host_state.gs_sel = read_gs(); | ||
428 | if (!(vmx->host_state.gs_sel & 7)) | ||
429 | vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); | ||
430 | else { | ||
431 | vmcs_write16(HOST_GS_SELECTOR, 0); | ||
432 | vmx->host_state.gs_ldt_reload_needed = 1; | ||
433 | } | ||
434 | |||
435 | #ifdef CONFIG_X86_64 | ||
436 | vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE)); | ||
437 | vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE)); | ||
438 | #else | ||
439 | vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel)); | ||
440 | vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel)); | ||
441 | #endif | ||
442 | |||
443 | #ifdef CONFIG_X86_64 | ||
444 | if (is_long_mode(&vmx->vcpu)) | ||
445 | save_msrs(vmx->host_msrs + | ||
446 | vmx->msr_offset_kernel_gs_base, 1); | ||
447 | |||
448 | #endif | ||
449 | load_msrs(vmx->guest_msrs, vmx->save_nmsrs); | ||
450 | load_transition_efer(vmx); | ||
451 | } | ||
452 | |||
453 | static void vmx_load_host_state(struct vcpu_vmx *vmx) | ||
454 | { | ||
455 | unsigned long flags; | ||
456 | |||
457 | if (!vmx->host_state.loaded) | ||
458 | return; | ||
459 | |||
460 | ++vmx->vcpu.stat.host_state_reload; | ||
461 | vmx->host_state.loaded = 0; | ||
462 | if (vmx->host_state.fs_reload_needed) | ||
463 | load_fs(vmx->host_state.fs_sel); | ||
464 | if (vmx->host_state.gs_ldt_reload_needed) { | ||
465 | load_ldt(vmx->host_state.ldt_sel); | ||
466 | /* | ||
467 | * If we have to reload gs, we must take care to | ||
468 | * preserve our gs base. | ||
469 | */ | ||
470 | local_irq_save(flags); | ||
471 | load_gs(vmx->host_state.gs_sel); | ||
472 | #ifdef CONFIG_X86_64 | ||
473 | wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); | ||
474 | #endif | ||
475 | local_irq_restore(flags); | ||
476 | } | ||
477 | reload_tss(); | ||
478 | save_msrs(vmx->guest_msrs, vmx->save_nmsrs); | ||
479 | load_msrs(vmx->host_msrs, vmx->save_nmsrs); | ||
480 | reload_host_efer(vmx); | ||
481 | } | ||
482 | |||
483 | /* | ||
484 | * Switches to specified vcpu, until a matching vcpu_put(), but assumes | ||
485 | * vcpu mutex is already taken. | ||
486 | */ | ||
487 | static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | ||
488 | { | ||
489 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
490 | u64 phys_addr = __pa(vmx->vmcs); | ||
491 | u64 tsc_this, delta; | ||
492 | |||
493 | if (vcpu->cpu != cpu) { | ||
494 | vcpu_clear(vmx); | ||
495 | kvm_migrate_apic_timer(vcpu); | ||
496 | } | ||
497 | |||
498 | if (per_cpu(current_vmcs, cpu) != vmx->vmcs) { | ||
499 | u8 error; | ||
500 | |||
501 | per_cpu(current_vmcs, cpu) = vmx->vmcs; | ||
502 | asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0" | ||
503 | : "=g"(error) : "a"(&phys_addr), "m"(phys_addr) | ||
504 | : "cc"); | ||
505 | if (error) | ||
506 | printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n", | ||
507 | vmx->vmcs, phys_addr); | ||
508 | } | ||
509 | |||
510 | if (vcpu->cpu != cpu) { | ||
511 | struct descriptor_table dt; | ||
512 | unsigned long sysenter_esp; | ||
513 | |||
514 | vcpu->cpu = cpu; | ||
515 | /* | ||
516 | * Linux uses per-cpu TSS and GDT, so set these when switching | ||
517 | * processors. | ||
518 | */ | ||
519 | vmcs_writel(HOST_TR_BASE, read_tr_base()); /* 22.2.4 */ | ||
520 | get_gdt(&dt); | ||
521 | vmcs_writel(HOST_GDTR_BASE, dt.base); /* 22.2.4 */ | ||
522 | |||
523 | rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); | ||
524 | vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ | ||
525 | |||
526 | /* | ||
527 | * Make sure the time stamp counter is monotonous. | ||
528 | */ | ||
529 | rdtscll(tsc_this); | ||
530 | delta = vcpu->arch.host_tsc - tsc_this; | ||
531 | vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta); | ||
532 | } | ||
533 | } | ||
534 | |||
535 | static void vmx_vcpu_put(struct kvm_vcpu *vcpu) | ||
536 | { | ||
537 | vmx_load_host_state(to_vmx(vcpu)); | ||
538 | } | ||
539 | |||
540 | static void vmx_fpu_activate(struct kvm_vcpu *vcpu) | ||
541 | { | ||
542 | if (vcpu->fpu_active) | ||
543 | return; | ||
544 | vcpu->fpu_active = 1; | ||
545 | vmcs_clear_bits(GUEST_CR0, X86_CR0_TS); | ||
546 | if (vcpu->arch.cr0 & X86_CR0_TS) | ||
547 | vmcs_set_bits(GUEST_CR0, X86_CR0_TS); | ||
548 | update_exception_bitmap(vcpu); | ||
549 | } | ||
550 | |||
551 | static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu) | ||
552 | { | ||
553 | if (!vcpu->fpu_active) | ||
554 | return; | ||
555 | vcpu->fpu_active = 0; | ||
556 | vmcs_set_bits(GUEST_CR0, X86_CR0_TS); | ||
557 | update_exception_bitmap(vcpu); | ||
558 | } | ||
559 | |||
560 | static void vmx_vcpu_decache(struct kvm_vcpu *vcpu) | ||
561 | { | ||
562 | vcpu_clear(to_vmx(vcpu)); | ||
563 | } | ||
564 | |||
565 | static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) | ||
566 | { | ||
567 | return vmcs_readl(GUEST_RFLAGS); | ||
568 | } | ||
569 | |||
570 | static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) | ||
571 | { | ||
572 | if (vcpu->arch.rmode.active) | ||
573 | rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; | ||
574 | vmcs_writel(GUEST_RFLAGS, rflags); | ||
575 | } | ||
576 | |||
577 | static void skip_emulated_instruction(struct kvm_vcpu *vcpu) | ||
578 | { | ||
579 | unsigned long rip; | ||
580 | u32 interruptibility; | ||
581 | |||
582 | rip = vmcs_readl(GUEST_RIP); | ||
583 | rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN); | ||
584 | vmcs_writel(GUEST_RIP, rip); | ||
585 | |||
586 | /* | ||
587 | * We emulated an instruction, so temporary interrupt blocking | ||
588 | * should be removed, if set. | ||
589 | */ | ||
590 | interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); | ||
591 | if (interruptibility & 3) | ||
592 | vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, | ||
593 | interruptibility & ~3); | ||
594 | vcpu->arch.interrupt_window_open = 1; | ||
595 | } | ||
596 | |||
597 | static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, | ||
598 | bool has_error_code, u32 error_code) | ||
599 | { | ||
600 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, | ||
601 | nr | INTR_TYPE_EXCEPTION | ||
602 | | (has_error_code ? INTR_INFO_DELIEVER_CODE_MASK : 0) | ||
603 | | INTR_INFO_VALID_MASK); | ||
604 | if (has_error_code) | ||
605 | vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); | ||
606 | } | ||
607 | |||
608 | static bool vmx_exception_injected(struct kvm_vcpu *vcpu) | ||
609 | { | ||
610 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
611 | |||
612 | return !(vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); | ||
613 | } | ||
614 | |||
615 | /* | ||
616 | * Swap MSR entry in host/guest MSR entry array. | ||
617 | */ | ||
618 | #ifdef CONFIG_X86_64 | ||
619 | static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) | ||
620 | { | ||
621 | struct kvm_msr_entry tmp; | ||
622 | |||
623 | tmp = vmx->guest_msrs[to]; | ||
624 | vmx->guest_msrs[to] = vmx->guest_msrs[from]; | ||
625 | vmx->guest_msrs[from] = tmp; | ||
626 | tmp = vmx->host_msrs[to]; | ||
627 | vmx->host_msrs[to] = vmx->host_msrs[from]; | ||
628 | vmx->host_msrs[from] = tmp; | ||
629 | } | ||
630 | #endif | ||
631 | |||
632 | /* | ||
633 | * Set up the vmcs to automatically save and restore system | ||
634 | * msrs. Don't touch the 64-bit msrs if the guest is in legacy | ||
635 | * mode, as fiddling with msrs is very expensive. | ||
636 | */ | ||
637 | static void setup_msrs(struct vcpu_vmx *vmx) | ||
638 | { | ||
639 | int save_nmsrs; | ||
640 | |||
641 | save_nmsrs = 0; | ||
642 | #ifdef CONFIG_X86_64 | ||
643 | if (is_long_mode(&vmx->vcpu)) { | ||
644 | int index; | ||
645 | |||
646 | index = __find_msr_index(vmx, MSR_SYSCALL_MASK); | ||
647 | if (index >= 0) | ||
648 | move_msr_up(vmx, index, save_nmsrs++); | ||
649 | index = __find_msr_index(vmx, MSR_LSTAR); | ||
650 | if (index >= 0) | ||
651 | move_msr_up(vmx, index, save_nmsrs++); | ||
652 | index = __find_msr_index(vmx, MSR_CSTAR); | ||
653 | if (index >= 0) | ||
654 | move_msr_up(vmx, index, save_nmsrs++); | ||
655 | index = __find_msr_index(vmx, MSR_KERNEL_GS_BASE); | ||
656 | if (index >= 0) | ||
657 | move_msr_up(vmx, index, save_nmsrs++); | ||
658 | /* | ||
659 | * MSR_K6_STAR is only needed on long mode guests, and only | ||
660 | * if efer.sce is enabled. | ||
661 | */ | ||
662 | index = __find_msr_index(vmx, MSR_K6_STAR); | ||
663 | if ((index >= 0) && (vmx->vcpu.arch.shadow_efer & EFER_SCE)) | ||
664 | move_msr_up(vmx, index, save_nmsrs++); | ||
665 | } | ||
666 | #endif | ||
667 | vmx->save_nmsrs = save_nmsrs; | ||
668 | |||
669 | #ifdef CONFIG_X86_64 | ||
670 | vmx->msr_offset_kernel_gs_base = | ||
671 | __find_msr_index(vmx, MSR_KERNEL_GS_BASE); | ||
672 | #endif | ||
673 | vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER); | ||
674 | } | ||
675 | |||
676 | /* | ||
677 | * reads and returns guest's timestamp counter "register" | ||
678 | * guest_tsc = host_tsc + tsc_offset -- 21.3 | ||
679 | */ | ||
680 | static u64 guest_read_tsc(void) | ||
681 | { | ||
682 | u64 host_tsc, tsc_offset; | ||
683 | |||
684 | rdtscll(host_tsc); | ||
685 | tsc_offset = vmcs_read64(TSC_OFFSET); | ||
686 | return host_tsc + tsc_offset; | ||
687 | } | ||
688 | |||
689 | /* | ||
690 | * writes 'guest_tsc' into guest's timestamp counter "register" | ||
691 | * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc | ||
692 | */ | ||
693 | static void guest_write_tsc(u64 guest_tsc) | ||
694 | { | ||
695 | u64 host_tsc; | ||
696 | |||
697 | rdtscll(host_tsc); | ||
698 | vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc); | ||
699 | } | ||
700 | |||
701 | /* | ||
702 | * Reads an msr value (of 'msr_index') into 'pdata'. | ||
703 | * Returns 0 on success, non-0 otherwise. | ||
704 | * Assumes vcpu_load() was already called. | ||
705 | */ | ||
706 | static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | ||
707 | { | ||
708 | u64 data; | ||
709 | struct kvm_msr_entry *msr; | ||
710 | |||
711 | if (!pdata) { | ||
712 | printk(KERN_ERR "BUG: get_msr called with NULL pdata\n"); | ||
713 | return -EINVAL; | ||
714 | } | ||
715 | |||
716 | switch (msr_index) { | ||
717 | #ifdef CONFIG_X86_64 | ||
718 | case MSR_FS_BASE: | ||
719 | data = vmcs_readl(GUEST_FS_BASE); | ||
720 | break; | ||
721 | case MSR_GS_BASE: | ||
722 | data = vmcs_readl(GUEST_GS_BASE); | ||
723 | break; | ||
724 | case MSR_EFER: | ||
725 | return kvm_get_msr_common(vcpu, msr_index, pdata); | ||
726 | #endif | ||
727 | case MSR_IA32_TIME_STAMP_COUNTER: | ||
728 | data = guest_read_tsc(); | ||
729 | break; | ||
730 | case MSR_IA32_SYSENTER_CS: | ||
731 | data = vmcs_read32(GUEST_SYSENTER_CS); | ||
732 | break; | ||
733 | case MSR_IA32_SYSENTER_EIP: | ||
734 | data = vmcs_readl(GUEST_SYSENTER_EIP); | ||
735 | break; | ||
736 | case MSR_IA32_SYSENTER_ESP: | ||
737 | data = vmcs_readl(GUEST_SYSENTER_ESP); | ||
738 | break; | ||
739 | default: | ||
740 | msr = find_msr_entry(to_vmx(vcpu), msr_index); | ||
741 | if (msr) { | ||
742 | data = msr->data; | ||
743 | break; | ||
744 | } | ||
745 | return kvm_get_msr_common(vcpu, msr_index, pdata); | ||
746 | } | ||
747 | |||
748 | *pdata = data; | ||
749 | return 0; | ||
750 | } | ||
751 | |||
752 | /* | ||
753 | * Writes msr value into into the appropriate "register". | ||
754 | * Returns 0 on success, non-0 otherwise. | ||
755 | * Assumes vcpu_load() was already called. | ||
756 | */ | ||
757 | static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | ||
758 | { | ||
759 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
760 | struct kvm_msr_entry *msr; | ||
761 | int ret = 0; | ||
762 | |||
763 | switch (msr_index) { | ||
764 | #ifdef CONFIG_X86_64 | ||
765 | case MSR_EFER: | ||
766 | ret = kvm_set_msr_common(vcpu, msr_index, data); | ||
767 | if (vmx->host_state.loaded) { | ||
768 | reload_host_efer(vmx); | ||
769 | load_transition_efer(vmx); | ||
770 | } | ||
771 | break; | ||
772 | case MSR_FS_BASE: | ||
773 | vmcs_writel(GUEST_FS_BASE, data); | ||
774 | break; | ||
775 | case MSR_GS_BASE: | ||
776 | vmcs_writel(GUEST_GS_BASE, data); | ||
777 | break; | ||
778 | #endif | ||
779 | case MSR_IA32_SYSENTER_CS: | ||
780 | vmcs_write32(GUEST_SYSENTER_CS, data); | ||
781 | break; | ||
782 | case MSR_IA32_SYSENTER_EIP: | ||
783 | vmcs_writel(GUEST_SYSENTER_EIP, data); | ||
784 | break; | ||
785 | case MSR_IA32_SYSENTER_ESP: | ||
786 | vmcs_writel(GUEST_SYSENTER_ESP, data); | ||
787 | break; | ||
788 | case MSR_IA32_TIME_STAMP_COUNTER: | ||
789 | guest_write_tsc(data); | ||
790 | break; | ||
791 | default: | ||
792 | msr = find_msr_entry(vmx, msr_index); | ||
793 | if (msr) { | ||
794 | msr->data = data; | ||
795 | if (vmx->host_state.loaded) | ||
796 | load_msrs(vmx->guest_msrs, vmx->save_nmsrs); | ||
797 | break; | ||
798 | } | ||
799 | ret = kvm_set_msr_common(vcpu, msr_index, data); | ||
800 | } | ||
801 | |||
802 | return ret; | ||
803 | } | ||
804 | |||
805 | /* | ||
806 | * Sync the rsp and rip registers into the vcpu structure. This allows | ||
807 | * registers to be accessed by indexing vcpu->arch.regs. | ||
808 | */ | ||
809 | static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu) | ||
810 | { | ||
811 | vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); | ||
812 | vcpu->arch.rip = vmcs_readl(GUEST_RIP); | ||
813 | } | ||
814 | |||
815 | /* | ||
816 | * Syncs rsp and rip back into the vmcs. Should be called after possible | ||
817 | * modification. | ||
818 | */ | ||
819 | static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu) | ||
820 | { | ||
821 | vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); | ||
822 | vmcs_writel(GUEST_RIP, vcpu->arch.rip); | ||
823 | } | ||
824 | |||
825 | static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) | ||
826 | { | ||
827 | unsigned long dr7 = 0x400; | ||
828 | int old_singlestep; | ||
829 | |||
830 | old_singlestep = vcpu->guest_debug.singlestep; | ||
831 | |||
832 | vcpu->guest_debug.enabled = dbg->enabled; | ||
833 | if (vcpu->guest_debug.enabled) { | ||
834 | int i; | ||
835 | |||
836 | dr7 |= 0x200; /* exact */ | ||
837 | for (i = 0; i < 4; ++i) { | ||
838 | if (!dbg->breakpoints[i].enabled) | ||
839 | continue; | ||
840 | vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address; | ||
841 | dr7 |= 2 << (i*2); /* global enable */ | ||
842 | dr7 |= 0 << (i*4+16); /* execution breakpoint */ | ||
843 | } | ||
844 | |||
845 | vcpu->guest_debug.singlestep = dbg->singlestep; | ||
846 | } else | ||
847 | vcpu->guest_debug.singlestep = 0; | ||
848 | |||
849 | if (old_singlestep && !vcpu->guest_debug.singlestep) { | ||
850 | unsigned long flags; | ||
851 | |||
852 | flags = vmcs_readl(GUEST_RFLAGS); | ||
853 | flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF); | ||
854 | vmcs_writel(GUEST_RFLAGS, flags); | ||
855 | } | ||
856 | |||
857 | update_exception_bitmap(vcpu); | ||
858 | vmcs_writel(GUEST_DR7, dr7); | ||
859 | |||
860 | return 0; | ||
861 | } | ||
862 | |||
863 | static int vmx_get_irq(struct kvm_vcpu *vcpu) | ||
864 | { | ||
865 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
866 | u32 idtv_info_field; | ||
867 | |||
868 | idtv_info_field = vmx->idt_vectoring_info; | ||
869 | if (idtv_info_field & INTR_INFO_VALID_MASK) { | ||
870 | if (is_external_interrupt(idtv_info_field)) | ||
871 | return idtv_info_field & VECTORING_INFO_VECTOR_MASK; | ||
872 | else | ||
873 | printk(KERN_DEBUG "pending exception: not handled yet\n"); | ||
874 | } | ||
875 | return -1; | ||
876 | } | ||
877 | |||
878 | static __init int cpu_has_kvm_support(void) | ||
879 | { | ||
880 | unsigned long ecx = cpuid_ecx(1); | ||
881 | return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */ | ||
882 | } | ||
883 | |||
884 | static __init int vmx_disabled_by_bios(void) | ||
885 | { | ||
886 | u64 msr; | ||
887 | |||
888 | rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); | ||
889 | return (msr & (MSR_IA32_FEATURE_CONTROL_LOCKED | | ||
890 | MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED)) | ||
891 | == MSR_IA32_FEATURE_CONTROL_LOCKED; | ||
892 | /* locked but not enabled */ | ||
893 | } | ||
894 | |||
895 | static void hardware_enable(void *garbage) | ||
896 | { | ||
897 | int cpu = raw_smp_processor_id(); | ||
898 | u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); | ||
899 | u64 old; | ||
900 | |||
901 | rdmsrl(MSR_IA32_FEATURE_CONTROL, old); | ||
902 | if ((old & (MSR_IA32_FEATURE_CONTROL_LOCKED | | ||
903 | MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED)) | ||
904 | != (MSR_IA32_FEATURE_CONTROL_LOCKED | | ||
905 | MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED)) | ||
906 | /* enable and lock */ | ||
907 | wrmsrl(MSR_IA32_FEATURE_CONTROL, old | | ||
908 | MSR_IA32_FEATURE_CONTROL_LOCKED | | ||
909 | MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED); | ||
910 | write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */ | ||
911 | asm volatile (ASM_VMX_VMXON_RAX : : "a"(&phys_addr), "m"(phys_addr) | ||
912 | : "memory", "cc"); | ||
913 | } | ||
914 | |||
915 | static void hardware_disable(void *garbage) | ||
916 | { | ||
917 | asm volatile (ASM_VMX_VMXOFF : : : "cc"); | ||
918 | } | ||
919 | |||
920 | static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, | ||
921 | u32 msr, u32 *result) | ||
922 | { | ||
923 | u32 vmx_msr_low, vmx_msr_high; | ||
924 | u32 ctl = ctl_min | ctl_opt; | ||
925 | |||
926 | rdmsr(msr, vmx_msr_low, vmx_msr_high); | ||
927 | |||
928 | ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */ | ||
929 | ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */ | ||
930 | |||
931 | /* Ensure minimum (required) set of control bits are supported. */ | ||
932 | if (ctl_min & ~ctl) | ||
933 | return -EIO; | ||
934 | |||
935 | *result = ctl; | ||
936 | return 0; | ||
937 | } | ||
938 | |||
939 | static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) | ||
940 | { | ||
941 | u32 vmx_msr_low, vmx_msr_high; | ||
942 | u32 min, opt; | ||
943 | u32 _pin_based_exec_control = 0; | ||
944 | u32 _cpu_based_exec_control = 0; | ||
945 | u32 _cpu_based_2nd_exec_control = 0; | ||
946 | u32 _vmexit_control = 0; | ||
947 | u32 _vmentry_control = 0; | ||
948 | |||
949 | min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING; | ||
950 | opt = 0; | ||
951 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS, | ||
952 | &_pin_based_exec_control) < 0) | ||
953 | return -EIO; | ||
954 | |||
955 | min = CPU_BASED_HLT_EXITING | | ||
956 | #ifdef CONFIG_X86_64 | ||
957 | CPU_BASED_CR8_LOAD_EXITING | | ||
958 | CPU_BASED_CR8_STORE_EXITING | | ||
959 | #endif | ||
960 | CPU_BASED_USE_IO_BITMAPS | | ||
961 | CPU_BASED_MOV_DR_EXITING | | ||
962 | CPU_BASED_USE_TSC_OFFSETING; | ||
963 | opt = CPU_BASED_TPR_SHADOW | | ||
964 | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; | ||
965 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, | ||
966 | &_cpu_based_exec_control) < 0) | ||
967 | return -EIO; | ||
968 | #ifdef CONFIG_X86_64 | ||
969 | if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) | ||
970 | _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING & | ||
971 | ~CPU_BASED_CR8_STORE_EXITING; | ||
972 | #endif | ||
973 | if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) { | ||
974 | min = 0; | ||
975 | opt = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | | ||
976 | SECONDARY_EXEC_WBINVD_EXITING; | ||
977 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS2, | ||
978 | &_cpu_based_2nd_exec_control) < 0) | ||
979 | return -EIO; | ||
980 | } | ||
981 | #ifndef CONFIG_X86_64 | ||
982 | if (!(_cpu_based_2nd_exec_control & | ||
983 | SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) | ||
984 | _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW; | ||
985 | #endif | ||
986 | |||
987 | min = 0; | ||
988 | #ifdef CONFIG_X86_64 | ||
989 | min |= VM_EXIT_HOST_ADDR_SPACE_SIZE; | ||
990 | #endif | ||
991 | opt = 0; | ||
992 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS, | ||
993 | &_vmexit_control) < 0) | ||
994 | return -EIO; | ||
995 | |||
996 | min = opt = 0; | ||
997 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS, | ||
998 | &_vmentry_control) < 0) | ||
999 | return -EIO; | ||
1000 | |||
1001 | rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high); | ||
1002 | |||
1003 | /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */ | ||
1004 | if ((vmx_msr_high & 0x1fff) > PAGE_SIZE) | ||
1005 | return -EIO; | ||
1006 | |||
1007 | #ifdef CONFIG_X86_64 | ||
1008 | /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */ | ||
1009 | if (vmx_msr_high & (1u<<16)) | ||
1010 | return -EIO; | ||
1011 | #endif | ||
1012 | |||
1013 | /* Require Write-Back (WB) memory type for VMCS accesses. */ | ||
1014 | if (((vmx_msr_high >> 18) & 15) != 6) | ||
1015 | return -EIO; | ||
1016 | |||
1017 | vmcs_conf->size = vmx_msr_high & 0x1fff; | ||
1018 | vmcs_conf->order = get_order(vmcs_config.size); | ||
1019 | vmcs_conf->revision_id = vmx_msr_low; | ||
1020 | |||
1021 | vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control; | ||
1022 | vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control; | ||
1023 | vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control; | ||
1024 | vmcs_conf->vmexit_ctrl = _vmexit_control; | ||
1025 | vmcs_conf->vmentry_ctrl = _vmentry_control; | ||
1026 | |||
1027 | return 0; | ||
1028 | } | ||
1029 | |||
1030 | static struct vmcs *alloc_vmcs_cpu(int cpu) | ||
1031 | { | ||
1032 | int node = cpu_to_node(cpu); | ||
1033 | struct page *pages; | ||
1034 | struct vmcs *vmcs; | ||
1035 | |||
1036 | pages = alloc_pages_node(node, GFP_KERNEL, vmcs_config.order); | ||
1037 | if (!pages) | ||
1038 | return NULL; | ||
1039 | vmcs = page_address(pages); | ||
1040 | memset(vmcs, 0, vmcs_config.size); | ||
1041 | vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */ | ||
1042 | return vmcs; | ||
1043 | } | ||
1044 | |||
1045 | static struct vmcs *alloc_vmcs(void) | ||
1046 | { | ||
1047 | return alloc_vmcs_cpu(raw_smp_processor_id()); | ||
1048 | } | ||
1049 | |||
1050 | static void free_vmcs(struct vmcs *vmcs) | ||
1051 | { | ||
1052 | free_pages((unsigned long)vmcs, vmcs_config.order); | ||
1053 | } | ||
1054 | |||
1055 | static void free_kvm_area(void) | ||
1056 | { | ||
1057 | int cpu; | ||
1058 | |||
1059 | for_each_online_cpu(cpu) | ||
1060 | free_vmcs(per_cpu(vmxarea, cpu)); | ||
1061 | } | ||
1062 | |||
1063 | static __init int alloc_kvm_area(void) | ||
1064 | { | ||
1065 | int cpu; | ||
1066 | |||
1067 | for_each_online_cpu(cpu) { | ||
1068 | struct vmcs *vmcs; | ||
1069 | |||
1070 | vmcs = alloc_vmcs_cpu(cpu); | ||
1071 | if (!vmcs) { | ||
1072 | free_kvm_area(); | ||
1073 | return -ENOMEM; | ||
1074 | } | ||
1075 | |||
1076 | per_cpu(vmxarea, cpu) = vmcs; | ||
1077 | } | ||
1078 | return 0; | ||
1079 | } | ||
1080 | |||
1081 | static __init int hardware_setup(void) | ||
1082 | { | ||
1083 | if (setup_vmcs_config(&vmcs_config) < 0) | ||
1084 | return -EIO; | ||
1085 | return alloc_kvm_area(); | ||
1086 | } | ||
1087 | |||
1088 | static __exit void hardware_unsetup(void) | ||
1089 | { | ||
1090 | free_kvm_area(); | ||
1091 | } | ||
1092 | |||
1093 | static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save) | ||
1094 | { | ||
1095 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; | ||
1096 | |||
1097 | if (vmcs_readl(sf->base) == save->base && (save->base & AR_S_MASK)) { | ||
1098 | vmcs_write16(sf->selector, save->selector); | ||
1099 | vmcs_writel(sf->base, save->base); | ||
1100 | vmcs_write32(sf->limit, save->limit); | ||
1101 | vmcs_write32(sf->ar_bytes, save->ar); | ||
1102 | } else { | ||
1103 | u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK) | ||
1104 | << AR_DPL_SHIFT; | ||
1105 | vmcs_write32(sf->ar_bytes, 0x93 | dpl); | ||
1106 | } | ||
1107 | } | ||
1108 | |||
1109 | static void enter_pmode(struct kvm_vcpu *vcpu) | ||
1110 | { | ||
1111 | unsigned long flags; | ||
1112 | |||
1113 | vcpu->arch.rmode.active = 0; | ||
1114 | |||
1115 | vmcs_writel(GUEST_TR_BASE, vcpu->arch.rmode.tr.base); | ||
1116 | vmcs_write32(GUEST_TR_LIMIT, vcpu->arch.rmode.tr.limit); | ||
1117 | vmcs_write32(GUEST_TR_AR_BYTES, vcpu->arch.rmode.tr.ar); | ||
1118 | |||
1119 | flags = vmcs_readl(GUEST_RFLAGS); | ||
1120 | flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM); | ||
1121 | flags |= (vcpu->arch.rmode.save_iopl << IOPL_SHIFT); | ||
1122 | vmcs_writel(GUEST_RFLAGS, flags); | ||
1123 | |||
1124 | vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | | ||
1125 | (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME)); | ||
1126 | |||
1127 | update_exception_bitmap(vcpu); | ||
1128 | |||
1129 | fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->arch.rmode.es); | ||
1130 | fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->arch.rmode.ds); | ||
1131 | fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->arch.rmode.gs); | ||
1132 | fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->arch.rmode.fs); | ||
1133 | |||
1134 | vmcs_write16(GUEST_SS_SELECTOR, 0); | ||
1135 | vmcs_write32(GUEST_SS_AR_BYTES, 0x93); | ||
1136 | |||
1137 | vmcs_write16(GUEST_CS_SELECTOR, | ||
1138 | vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK); | ||
1139 | vmcs_write32(GUEST_CS_AR_BYTES, 0x9b); | ||
1140 | } | ||
1141 | |||
1142 | static gva_t rmode_tss_base(struct kvm *kvm) | ||
1143 | { | ||
1144 | if (!kvm->arch.tss_addr) { | ||
1145 | gfn_t base_gfn = kvm->memslots[0].base_gfn + | ||
1146 | kvm->memslots[0].npages - 3; | ||
1147 | return base_gfn << PAGE_SHIFT; | ||
1148 | } | ||
1149 | return kvm->arch.tss_addr; | ||
1150 | } | ||
1151 | |||
1152 | static void fix_rmode_seg(int seg, struct kvm_save_segment *save) | ||
1153 | { | ||
1154 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; | ||
1155 | |||
1156 | save->selector = vmcs_read16(sf->selector); | ||
1157 | save->base = vmcs_readl(sf->base); | ||
1158 | save->limit = vmcs_read32(sf->limit); | ||
1159 | save->ar = vmcs_read32(sf->ar_bytes); | ||
1160 | vmcs_write16(sf->selector, save->base >> 4); | ||
1161 | vmcs_write32(sf->base, save->base & 0xfffff); | ||
1162 | vmcs_write32(sf->limit, 0xffff); | ||
1163 | vmcs_write32(sf->ar_bytes, 0xf3); | ||
1164 | } | ||
1165 | |||
1166 | static void enter_rmode(struct kvm_vcpu *vcpu) | ||
1167 | { | ||
1168 | unsigned long flags; | ||
1169 | |||
1170 | vcpu->arch.rmode.active = 1; | ||
1171 | |||
1172 | vcpu->arch.rmode.tr.base = vmcs_readl(GUEST_TR_BASE); | ||
1173 | vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm)); | ||
1174 | |||
1175 | vcpu->arch.rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT); | ||
1176 | vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1); | ||
1177 | |||
1178 | vcpu->arch.rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES); | ||
1179 | vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); | ||
1180 | |||
1181 | flags = vmcs_readl(GUEST_RFLAGS); | ||
1182 | vcpu->arch.rmode.save_iopl | ||
1183 | = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; | ||
1184 | |||
1185 | flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; | ||
1186 | |||
1187 | vmcs_writel(GUEST_RFLAGS, flags); | ||
1188 | vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME); | ||
1189 | update_exception_bitmap(vcpu); | ||
1190 | |||
1191 | vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4); | ||
1192 | vmcs_write32(GUEST_SS_LIMIT, 0xffff); | ||
1193 | vmcs_write32(GUEST_SS_AR_BYTES, 0xf3); | ||
1194 | |||
1195 | vmcs_write32(GUEST_CS_AR_BYTES, 0xf3); | ||
1196 | vmcs_write32(GUEST_CS_LIMIT, 0xffff); | ||
1197 | if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000) | ||
1198 | vmcs_writel(GUEST_CS_BASE, 0xf0000); | ||
1199 | vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4); | ||
1200 | |||
1201 | fix_rmode_seg(VCPU_SREG_ES, &vcpu->arch.rmode.es); | ||
1202 | fix_rmode_seg(VCPU_SREG_DS, &vcpu->arch.rmode.ds); | ||
1203 | fix_rmode_seg(VCPU_SREG_GS, &vcpu->arch.rmode.gs); | ||
1204 | fix_rmode_seg(VCPU_SREG_FS, &vcpu->arch.rmode.fs); | ||
1205 | |||
1206 | kvm_mmu_reset_context(vcpu); | ||
1207 | init_rmode_tss(vcpu->kvm); | ||
1208 | } | ||
1209 | |||
1210 | #ifdef CONFIG_X86_64 | ||
1211 | |||
1212 | static void enter_lmode(struct kvm_vcpu *vcpu) | ||
1213 | { | ||
1214 | u32 guest_tr_ar; | ||
1215 | |||
1216 | guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES); | ||
1217 | if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) { | ||
1218 | printk(KERN_DEBUG "%s: tss fixup for long mode. \n", | ||
1219 | __FUNCTION__); | ||
1220 | vmcs_write32(GUEST_TR_AR_BYTES, | ||
1221 | (guest_tr_ar & ~AR_TYPE_MASK) | ||
1222 | | AR_TYPE_BUSY_64_TSS); | ||
1223 | } | ||
1224 | |||
1225 | vcpu->arch.shadow_efer |= EFER_LMA; | ||
1226 | |||
1227 | find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME; | ||
1228 | vmcs_write32(VM_ENTRY_CONTROLS, | ||
1229 | vmcs_read32(VM_ENTRY_CONTROLS) | ||
1230 | | VM_ENTRY_IA32E_MODE); | ||
1231 | } | ||
1232 | |||
1233 | static void exit_lmode(struct kvm_vcpu *vcpu) | ||
1234 | { | ||
1235 | vcpu->arch.shadow_efer &= ~EFER_LMA; | ||
1236 | |||
1237 | vmcs_write32(VM_ENTRY_CONTROLS, | ||
1238 | vmcs_read32(VM_ENTRY_CONTROLS) | ||
1239 | & ~VM_ENTRY_IA32E_MODE); | ||
1240 | } | ||
1241 | |||
1242 | #endif | ||
1243 | |||
1244 | static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) | ||
1245 | { | ||
1246 | vcpu->arch.cr4 &= KVM_GUEST_CR4_MASK; | ||
1247 | vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK; | ||
1248 | } | ||
1249 | |||
1250 | static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | ||
1251 | { | ||
1252 | vmx_fpu_deactivate(vcpu); | ||
1253 | |||
1254 | if (vcpu->arch.rmode.active && (cr0 & X86_CR0_PE)) | ||
1255 | enter_pmode(vcpu); | ||
1256 | |||
1257 | if (!vcpu->arch.rmode.active && !(cr0 & X86_CR0_PE)) | ||
1258 | enter_rmode(vcpu); | ||
1259 | |||
1260 | #ifdef CONFIG_X86_64 | ||
1261 | if (vcpu->arch.shadow_efer & EFER_LME) { | ||
1262 | if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) | ||
1263 | enter_lmode(vcpu); | ||
1264 | if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) | ||
1265 | exit_lmode(vcpu); | ||
1266 | } | ||
1267 | #endif | ||
1268 | |||
1269 | vmcs_writel(CR0_READ_SHADOW, cr0); | ||
1270 | vmcs_writel(GUEST_CR0, | ||
1271 | (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON); | ||
1272 | vcpu->arch.cr0 = cr0; | ||
1273 | |||
1274 | if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE)) | ||
1275 | vmx_fpu_activate(vcpu); | ||
1276 | } | ||
1277 | |||
1278 | static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | ||
1279 | { | ||
1280 | vmcs_writel(GUEST_CR3, cr3); | ||
1281 | if (vcpu->arch.cr0 & X86_CR0_PE) | ||
1282 | vmx_fpu_deactivate(vcpu); | ||
1283 | } | ||
1284 | |||
1285 | static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | ||
1286 | { | ||
1287 | vmcs_writel(CR4_READ_SHADOW, cr4); | ||
1288 | vmcs_writel(GUEST_CR4, cr4 | (vcpu->arch.rmode.active ? | ||
1289 | KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON)); | ||
1290 | vcpu->arch.cr4 = cr4; | ||
1291 | } | ||
1292 | |||
1293 | #ifdef CONFIG_X86_64 | ||
1294 | |||
1295 | static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) | ||
1296 | { | ||
1297 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
1298 | struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); | ||
1299 | |||
1300 | vcpu->arch.shadow_efer = efer; | ||
1301 | if (efer & EFER_LMA) { | ||
1302 | vmcs_write32(VM_ENTRY_CONTROLS, | ||
1303 | vmcs_read32(VM_ENTRY_CONTROLS) | | ||
1304 | VM_ENTRY_IA32E_MODE); | ||
1305 | msr->data = efer; | ||
1306 | |||
1307 | } else { | ||
1308 | vmcs_write32(VM_ENTRY_CONTROLS, | ||
1309 | vmcs_read32(VM_ENTRY_CONTROLS) & | ||
1310 | ~VM_ENTRY_IA32E_MODE); | ||
1311 | |||
1312 | msr->data = efer & ~EFER_LME; | ||
1313 | } | ||
1314 | setup_msrs(vmx); | ||
1315 | } | ||
1316 | |||
1317 | #endif | ||
1318 | |||
1319 | static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) | ||
1320 | { | ||
1321 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; | ||
1322 | |||
1323 | return vmcs_readl(sf->base); | ||
1324 | } | ||
1325 | |||
1326 | static void vmx_get_segment(struct kvm_vcpu *vcpu, | ||
1327 | struct kvm_segment *var, int seg) | ||
1328 | { | ||
1329 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; | ||
1330 | u32 ar; | ||
1331 | |||
1332 | var->base = vmcs_readl(sf->base); | ||
1333 | var->limit = vmcs_read32(sf->limit); | ||
1334 | var->selector = vmcs_read16(sf->selector); | ||
1335 | ar = vmcs_read32(sf->ar_bytes); | ||
1336 | if (ar & AR_UNUSABLE_MASK) | ||
1337 | ar = 0; | ||
1338 | var->type = ar & 15; | ||
1339 | var->s = (ar >> 4) & 1; | ||
1340 | var->dpl = (ar >> 5) & 3; | ||
1341 | var->present = (ar >> 7) & 1; | ||
1342 | var->avl = (ar >> 12) & 1; | ||
1343 | var->l = (ar >> 13) & 1; | ||
1344 | var->db = (ar >> 14) & 1; | ||
1345 | var->g = (ar >> 15) & 1; | ||
1346 | var->unusable = (ar >> 16) & 1; | ||
1347 | } | ||
1348 | |||
1349 | static u32 vmx_segment_access_rights(struct kvm_segment *var) | ||
1350 | { | ||
1351 | u32 ar; | ||
1352 | |||
1353 | if (var->unusable) | ||
1354 | ar = 1 << 16; | ||
1355 | else { | ||
1356 | ar = var->type & 15; | ||
1357 | ar |= (var->s & 1) << 4; | ||
1358 | ar |= (var->dpl & 3) << 5; | ||
1359 | ar |= (var->present & 1) << 7; | ||
1360 | ar |= (var->avl & 1) << 12; | ||
1361 | ar |= (var->l & 1) << 13; | ||
1362 | ar |= (var->db & 1) << 14; | ||
1363 | ar |= (var->g & 1) << 15; | ||
1364 | } | ||
1365 | if (ar == 0) /* a 0 value means unusable */ | ||
1366 | ar = AR_UNUSABLE_MASK; | ||
1367 | |||
1368 | return ar; | ||
1369 | } | ||
1370 | |||
1371 | static void vmx_set_segment(struct kvm_vcpu *vcpu, | ||
1372 | struct kvm_segment *var, int seg) | ||
1373 | { | ||
1374 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; | ||
1375 | u32 ar; | ||
1376 | |||
1377 | if (vcpu->arch.rmode.active && seg == VCPU_SREG_TR) { | ||
1378 | vcpu->arch.rmode.tr.selector = var->selector; | ||
1379 | vcpu->arch.rmode.tr.base = var->base; | ||
1380 | vcpu->arch.rmode.tr.limit = var->limit; | ||
1381 | vcpu->arch.rmode.tr.ar = vmx_segment_access_rights(var); | ||
1382 | return; | ||
1383 | } | ||
1384 | vmcs_writel(sf->base, var->base); | ||
1385 | vmcs_write32(sf->limit, var->limit); | ||
1386 | vmcs_write16(sf->selector, var->selector); | ||
1387 | if (vcpu->arch.rmode.active && var->s) { | ||
1388 | /* | ||
1389 | * Hack real-mode segments into vm86 compatibility. | ||
1390 | */ | ||
1391 | if (var->base == 0xffff0000 && var->selector == 0xf000) | ||
1392 | vmcs_writel(sf->base, 0xf0000); | ||
1393 | ar = 0xf3; | ||
1394 | } else | ||
1395 | ar = vmx_segment_access_rights(var); | ||
1396 | vmcs_write32(sf->ar_bytes, ar); | ||
1397 | } | ||
1398 | |||
1399 | static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) | ||
1400 | { | ||
1401 | u32 ar = vmcs_read32(GUEST_CS_AR_BYTES); | ||
1402 | |||
1403 | *db = (ar >> 14) & 1; | ||
1404 | *l = (ar >> 13) & 1; | ||
1405 | } | ||
1406 | |||
1407 | static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | ||
1408 | { | ||
1409 | dt->limit = vmcs_read32(GUEST_IDTR_LIMIT); | ||
1410 | dt->base = vmcs_readl(GUEST_IDTR_BASE); | ||
1411 | } | ||
1412 | |||
1413 | static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | ||
1414 | { | ||
1415 | vmcs_write32(GUEST_IDTR_LIMIT, dt->limit); | ||
1416 | vmcs_writel(GUEST_IDTR_BASE, dt->base); | ||
1417 | } | ||
1418 | |||
1419 | static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | ||
1420 | { | ||
1421 | dt->limit = vmcs_read32(GUEST_GDTR_LIMIT); | ||
1422 | dt->base = vmcs_readl(GUEST_GDTR_BASE); | ||
1423 | } | ||
1424 | |||
1425 | static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | ||
1426 | { | ||
1427 | vmcs_write32(GUEST_GDTR_LIMIT, dt->limit); | ||
1428 | vmcs_writel(GUEST_GDTR_BASE, dt->base); | ||
1429 | } | ||
1430 | |||
1431 | static int init_rmode_tss(struct kvm *kvm) | ||
1432 | { | ||
1433 | gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT; | ||
1434 | u16 data = 0; | ||
1435 | int r; | ||
1436 | |||
1437 | r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); | ||
1438 | if (r < 0) | ||
1439 | return 0; | ||
1440 | data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE; | ||
1441 | r = kvm_write_guest_page(kvm, fn++, &data, 0x66, sizeof(u16)); | ||
1442 | if (r < 0) | ||
1443 | return 0; | ||
1444 | r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE); | ||
1445 | if (r < 0) | ||
1446 | return 0; | ||
1447 | r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); | ||
1448 | if (r < 0) | ||
1449 | return 0; | ||
1450 | data = ~0; | ||
1451 | r = kvm_write_guest_page(kvm, fn, &data, RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1, | ||
1452 | sizeof(u8)); | ||
1453 | if (r < 0) | ||
1454 | return 0; | ||
1455 | return 1; | ||
1456 | } | ||
1457 | |||
1458 | static void seg_setup(int seg) | ||
1459 | { | ||
1460 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; | ||
1461 | |||
1462 | vmcs_write16(sf->selector, 0); | ||
1463 | vmcs_writel(sf->base, 0); | ||
1464 | vmcs_write32(sf->limit, 0xffff); | ||
1465 | vmcs_write32(sf->ar_bytes, 0x93); | ||
1466 | } | ||
1467 | |||
1468 | static int alloc_apic_access_page(struct kvm *kvm) | ||
1469 | { | ||
1470 | struct kvm_userspace_memory_region kvm_userspace_mem; | ||
1471 | int r = 0; | ||
1472 | |||
1473 | mutex_lock(&kvm->lock); | ||
1474 | if (kvm->arch.apic_access_page) | ||
1475 | goto out; | ||
1476 | kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; | ||
1477 | kvm_userspace_mem.flags = 0; | ||
1478 | kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL; | ||
1479 | kvm_userspace_mem.memory_size = PAGE_SIZE; | ||
1480 | r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0); | ||
1481 | if (r) | ||
1482 | goto out; | ||
1483 | kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00); | ||
1484 | out: | ||
1485 | mutex_unlock(&kvm->lock); | ||
1486 | return r; | ||
1487 | } | ||
1488 | |||
1489 | /* | ||
1490 | * Sets up the vmcs for emulated real mode. | ||
1491 | */ | ||
1492 | static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | ||
1493 | { | ||
1494 | u32 host_sysenter_cs; | ||
1495 | u32 junk; | ||
1496 | unsigned long a; | ||
1497 | struct descriptor_table dt; | ||
1498 | int i; | ||
1499 | unsigned long kvm_vmx_return; | ||
1500 | u32 exec_control; | ||
1501 | |||
1502 | /* I/O */ | ||
1503 | vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a)); | ||
1504 | vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b)); | ||
1505 | |||
1506 | vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */ | ||
1507 | |||
1508 | /* Control */ | ||
1509 | vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, | ||
1510 | vmcs_config.pin_based_exec_ctrl); | ||
1511 | |||
1512 | exec_control = vmcs_config.cpu_based_exec_ctrl; | ||
1513 | if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) { | ||
1514 | exec_control &= ~CPU_BASED_TPR_SHADOW; | ||
1515 | #ifdef CONFIG_X86_64 | ||
1516 | exec_control |= CPU_BASED_CR8_STORE_EXITING | | ||
1517 | CPU_BASED_CR8_LOAD_EXITING; | ||
1518 | #endif | ||
1519 | } | ||
1520 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control); | ||
1521 | |||
1522 | if (cpu_has_secondary_exec_ctrls()) { | ||
1523 | exec_control = vmcs_config.cpu_based_2nd_exec_ctrl; | ||
1524 | if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) | ||
1525 | exec_control &= | ||
1526 | ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; | ||
1527 | vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); | ||
1528 | } | ||
1529 | |||
1530 | vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf); | ||
1531 | vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf); | ||
1532 | vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */ | ||
1533 | |||
1534 | vmcs_writel(HOST_CR0, read_cr0()); /* 22.2.3 */ | ||
1535 | vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */ | ||
1536 | vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */ | ||
1537 | |||
1538 | vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ | ||
1539 | vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ | ||
1540 | vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ | ||
1541 | vmcs_write16(HOST_FS_SELECTOR, read_fs()); /* 22.2.4 */ | ||
1542 | vmcs_write16(HOST_GS_SELECTOR, read_gs()); /* 22.2.4 */ | ||
1543 | vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ | ||
1544 | #ifdef CONFIG_X86_64 | ||
1545 | rdmsrl(MSR_FS_BASE, a); | ||
1546 | vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */ | ||
1547 | rdmsrl(MSR_GS_BASE, a); | ||
1548 | vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */ | ||
1549 | #else | ||
1550 | vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */ | ||
1551 | vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */ | ||
1552 | #endif | ||
1553 | |||
1554 | vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ | ||
1555 | |||
1556 | get_idt(&dt); | ||
1557 | vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */ | ||
1558 | |||
1559 | asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return)); | ||
1560 | vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */ | ||
1561 | vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); | ||
1562 | vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); | ||
1563 | vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); | ||
1564 | |||
1565 | rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk); | ||
1566 | vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs); | ||
1567 | rdmsrl(MSR_IA32_SYSENTER_ESP, a); | ||
1568 | vmcs_writel(HOST_IA32_SYSENTER_ESP, a); /* 22.2.3 */ | ||
1569 | rdmsrl(MSR_IA32_SYSENTER_EIP, a); | ||
1570 | vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */ | ||
1571 | |||
1572 | for (i = 0; i < NR_VMX_MSR; ++i) { | ||
1573 | u32 index = vmx_msr_index[i]; | ||
1574 | u32 data_low, data_high; | ||
1575 | u64 data; | ||
1576 | int j = vmx->nmsrs; | ||
1577 | |||
1578 | if (rdmsr_safe(index, &data_low, &data_high) < 0) | ||
1579 | continue; | ||
1580 | if (wrmsr_safe(index, data_low, data_high) < 0) | ||
1581 | continue; | ||
1582 | data = data_low | ((u64)data_high << 32); | ||
1583 | vmx->host_msrs[j].index = index; | ||
1584 | vmx->host_msrs[j].reserved = 0; | ||
1585 | vmx->host_msrs[j].data = data; | ||
1586 | vmx->guest_msrs[j] = vmx->host_msrs[j]; | ||
1587 | ++vmx->nmsrs; | ||
1588 | } | ||
1589 | |||
1590 | vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl); | ||
1591 | |||
1592 | /* 22.2.1, 20.8.1 */ | ||
1593 | vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl); | ||
1594 | |||
1595 | vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); | ||
1596 | vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK); | ||
1597 | |||
1598 | if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) | ||
1599 | if (alloc_apic_access_page(vmx->vcpu.kvm) != 0) | ||
1600 | return -ENOMEM; | ||
1601 | |||
1602 | return 0; | ||
1603 | } | ||
1604 | |||
1605 | static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) | ||
1606 | { | ||
1607 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
1608 | u64 msr; | ||
1609 | int ret; | ||
1610 | |||
1611 | if (!init_rmode_tss(vmx->vcpu.kvm)) { | ||
1612 | ret = -ENOMEM; | ||
1613 | goto out; | ||
1614 | } | ||
1615 | |||
1616 | vmx->vcpu.arch.rmode.active = 0; | ||
1617 | |||
1618 | vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); | ||
1619 | set_cr8(&vmx->vcpu, 0); | ||
1620 | msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; | ||
1621 | if (vmx->vcpu.vcpu_id == 0) | ||
1622 | msr |= MSR_IA32_APICBASE_BSP; | ||
1623 | kvm_set_apic_base(&vmx->vcpu, msr); | ||
1624 | |||
1625 | fx_init(&vmx->vcpu); | ||
1626 | |||
1627 | /* | ||
1628 | * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode | ||
1629 | * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh. | ||
1630 | */ | ||
1631 | if (vmx->vcpu.vcpu_id == 0) { | ||
1632 | vmcs_write16(GUEST_CS_SELECTOR, 0xf000); | ||
1633 | vmcs_writel(GUEST_CS_BASE, 0x000f0000); | ||
1634 | } else { | ||
1635 | vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.arch.sipi_vector << 8); | ||
1636 | vmcs_writel(GUEST_CS_BASE, vmx->vcpu.arch.sipi_vector << 12); | ||
1637 | } | ||
1638 | vmcs_write32(GUEST_CS_LIMIT, 0xffff); | ||
1639 | vmcs_write32(GUEST_CS_AR_BYTES, 0x9b); | ||
1640 | |||
1641 | seg_setup(VCPU_SREG_DS); | ||
1642 | seg_setup(VCPU_SREG_ES); | ||
1643 | seg_setup(VCPU_SREG_FS); | ||
1644 | seg_setup(VCPU_SREG_GS); | ||
1645 | seg_setup(VCPU_SREG_SS); | ||
1646 | |||
1647 | vmcs_write16(GUEST_TR_SELECTOR, 0); | ||
1648 | vmcs_writel(GUEST_TR_BASE, 0); | ||
1649 | vmcs_write32(GUEST_TR_LIMIT, 0xffff); | ||
1650 | vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); | ||
1651 | |||
1652 | vmcs_write16(GUEST_LDTR_SELECTOR, 0); | ||
1653 | vmcs_writel(GUEST_LDTR_BASE, 0); | ||
1654 | vmcs_write32(GUEST_LDTR_LIMIT, 0xffff); | ||
1655 | vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082); | ||
1656 | |||
1657 | vmcs_write32(GUEST_SYSENTER_CS, 0); | ||
1658 | vmcs_writel(GUEST_SYSENTER_ESP, 0); | ||
1659 | vmcs_writel(GUEST_SYSENTER_EIP, 0); | ||
1660 | |||
1661 | vmcs_writel(GUEST_RFLAGS, 0x02); | ||
1662 | if (vmx->vcpu.vcpu_id == 0) | ||
1663 | vmcs_writel(GUEST_RIP, 0xfff0); | ||
1664 | else | ||
1665 | vmcs_writel(GUEST_RIP, 0); | ||
1666 | vmcs_writel(GUEST_RSP, 0); | ||
1667 | |||
1668 | /* todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 */ | ||
1669 | vmcs_writel(GUEST_DR7, 0x400); | ||
1670 | |||
1671 | vmcs_writel(GUEST_GDTR_BASE, 0); | ||
1672 | vmcs_write32(GUEST_GDTR_LIMIT, 0xffff); | ||
1673 | |||
1674 | vmcs_writel(GUEST_IDTR_BASE, 0); | ||
1675 | vmcs_write32(GUEST_IDTR_LIMIT, 0xffff); | ||
1676 | |||
1677 | vmcs_write32(GUEST_ACTIVITY_STATE, 0); | ||
1678 | vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0); | ||
1679 | vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0); | ||
1680 | |||
1681 | guest_write_tsc(0); | ||
1682 | |||
1683 | /* Special registers */ | ||
1684 | vmcs_write64(GUEST_IA32_DEBUGCTL, 0); | ||
1685 | |||
1686 | setup_msrs(vmx); | ||
1687 | |||
1688 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */ | ||
1689 | |||
1690 | if (cpu_has_vmx_tpr_shadow()) { | ||
1691 | vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0); | ||
1692 | if (vm_need_tpr_shadow(vmx->vcpu.kvm)) | ||
1693 | vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, | ||
1694 | page_to_phys(vmx->vcpu.arch.apic->regs_page)); | ||
1695 | vmcs_write32(TPR_THRESHOLD, 0); | ||
1696 | } | ||
1697 | |||
1698 | if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) | ||
1699 | vmcs_write64(APIC_ACCESS_ADDR, | ||
1700 | page_to_phys(vmx->vcpu.kvm->arch.apic_access_page)); | ||
1701 | |||
1702 | vmx->vcpu.arch.cr0 = 0x60000010; | ||
1703 | vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */ | ||
1704 | vmx_set_cr4(&vmx->vcpu, 0); | ||
1705 | #ifdef CONFIG_X86_64 | ||
1706 | vmx_set_efer(&vmx->vcpu, 0); | ||
1707 | #endif | ||
1708 | vmx_fpu_activate(&vmx->vcpu); | ||
1709 | update_exception_bitmap(&vmx->vcpu); | ||
1710 | |||
1711 | return 0; | ||
1712 | |||
1713 | out: | ||
1714 | return ret; | ||
1715 | } | ||
1716 | |||
1717 | static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq) | ||
1718 | { | ||
1719 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
1720 | |||
1721 | if (vcpu->arch.rmode.active) { | ||
1722 | vmx->rmode.irq.pending = true; | ||
1723 | vmx->rmode.irq.vector = irq; | ||
1724 | vmx->rmode.irq.rip = vmcs_readl(GUEST_RIP); | ||
1725 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, | ||
1726 | irq | INTR_TYPE_SOFT_INTR | INTR_INFO_VALID_MASK); | ||
1727 | vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1); | ||
1728 | vmcs_writel(GUEST_RIP, vmx->rmode.irq.rip - 1); | ||
1729 | return; | ||
1730 | } | ||
1731 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, | ||
1732 | irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); | ||
1733 | } | ||
1734 | |||
1735 | static void kvm_do_inject_irq(struct kvm_vcpu *vcpu) | ||
1736 | { | ||
1737 | int word_index = __ffs(vcpu->arch.irq_summary); | ||
1738 | int bit_index = __ffs(vcpu->arch.irq_pending[word_index]); | ||
1739 | int irq = word_index * BITS_PER_LONG + bit_index; | ||
1740 | |||
1741 | clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]); | ||
1742 | if (!vcpu->arch.irq_pending[word_index]) | ||
1743 | clear_bit(word_index, &vcpu->arch.irq_summary); | ||
1744 | vmx_inject_irq(vcpu, irq); | ||
1745 | } | ||
1746 | |||
1747 | |||
1748 | static void do_interrupt_requests(struct kvm_vcpu *vcpu, | ||
1749 | struct kvm_run *kvm_run) | ||
1750 | { | ||
1751 | u32 cpu_based_vm_exec_control; | ||
1752 | |||
1753 | vcpu->arch.interrupt_window_open = | ||
1754 | ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && | ||
1755 | (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0); | ||
1756 | |||
1757 | if (vcpu->arch.interrupt_window_open && | ||
1758 | vcpu->arch.irq_summary && | ||
1759 | !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK)) | ||
1760 | /* | ||
1761 | * If interrupts enabled, and not blocked by sti or mov ss. Good. | ||
1762 | */ | ||
1763 | kvm_do_inject_irq(vcpu); | ||
1764 | |||
1765 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); | ||
1766 | if (!vcpu->arch.interrupt_window_open && | ||
1767 | (vcpu->arch.irq_summary || kvm_run->request_interrupt_window)) | ||
1768 | /* | ||
1769 | * Interrupts blocked. Wait for unblock. | ||
1770 | */ | ||
1771 | cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; | ||
1772 | else | ||
1773 | cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; | ||
1774 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); | ||
1775 | } | ||
1776 | |||
1777 | static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) | ||
1778 | { | ||
1779 | int ret; | ||
1780 | struct kvm_userspace_memory_region tss_mem = { | ||
1781 | .slot = 8, | ||
1782 | .guest_phys_addr = addr, | ||
1783 | .memory_size = PAGE_SIZE * 3, | ||
1784 | .flags = 0, | ||
1785 | }; | ||
1786 | |||
1787 | ret = kvm_set_memory_region(kvm, &tss_mem, 0); | ||
1788 | if (ret) | ||
1789 | return ret; | ||
1790 | kvm->arch.tss_addr = addr; | ||
1791 | return 0; | ||
1792 | } | ||
1793 | |||
1794 | static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu) | ||
1795 | { | ||
1796 | struct kvm_guest_debug *dbg = &vcpu->guest_debug; | ||
1797 | |||
1798 | set_debugreg(dbg->bp[0], 0); | ||
1799 | set_debugreg(dbg->bp[1], 1); | ||
1800 | set_debugreg(dbg->bp[2], 2); | ||
1801 | set_debugreg(dbg->bp[3], 3); | ||
1802 | |||
1803 | if (dbg->singlestep) { | ||
1804 | unsigned long flags; | ||
1805 | |||
1806 | flags = vmcs_readl(GUEST_RFLAGS); | ||
1807 | flags |= X86_EFLAGS_TF | X86_EFLAGS_RF; | ||
1808 | vmcs_writel(GUEST_RFLAGS, flags); | ||
1809 | } | ||
1810 | } | ||
1811 | |||
1812 | static int handle_rmode_exception(struct kvm_vcpu *vcpu, | ||
1813 | int vec, u32 err_code) | ||
1814 | { | ||
1815 | if (!vcpu->arch.rmode.active) | ||
1816 | return 0; | ||
1817 | |||
1818 | /* | ||
1819 | * Instruction with address size override prefix opcode 0x67 | ||
1820 | * Cause the #SS fault with 0 error code in VM86 mode. | ||
1821 | */ | ||
1822 | if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) | ||
1823 | if (emulate_instruction(vcpu, NULL, 0, 0, 0) == EMULATE_DONE) | ||
1824 | return 1; | ||
1825 | return 0; | ||
1826 | } | ||
1827 | |||
1828 | static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
1829 | { | ||
1830 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
1831 | u32 intr_info, error_code; | ||
1832 | unsigned long cr2, rip; | ||
1833 | u32 vect_info; | ||
1834 | enum emulation_result er; | ||
1835 | |||
1836 | vect_info = vmx->idt_vectoring_info; | ||
1837 | intr_info = vmcs_read32(VM_EXIT_INTR_INFO); | ||
1838 | |||
1839 | if ((vect_info & VECTORING_INFO_VALID_MASK) && | ||
1840 | !is_page_fault(intr_info)) | ||
1841 | printk(KERN_ERR "%s: unexpected, vectoring info 0x%x " | ||
1842 | "intr info 0x%x\n", __FUNCTION__, vect_info, intr_info); | ||
1843 | |||
1844 | if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) { | ||
1845 | int irq = vect_info & VECTORING_INFO_VECTOR_MASK; | ||
1846 | set_bit(irq, vcpu->arch.irq_pending); | ||
1847 | set_bit(irq / BITS_PER_LONG, &vcpu->arch.irq_summary); | ||
1848 | } | ||
1849 | |||
1850 | if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */ | ||
1851 | return 1; /* already handled by vmx_vcpu_run() */ | ||
1852 | |||
1853 | if (is_no_device(intr_info)) { | ||
1854 | vmx_fpu_activate(vcpu); | ||
1855 | return 1; | ||
1856 | } | ||
1857 | |||
1858 | if (is_invalid_opcode(intr_info)) { | ||
1859 | er = emulate_instruction(vcpu, kvm_run, 0, 0, 0); | ||
1860 | if (er != EMULATE_DONE) | ||
1861 | kvm_queue_exception(vcpu, UD_VECTOR); | ||
1862 | return 1; | ||
1863 | } | ||
1864 | |||
1865 | error_code = 0; | ||
1866 | rip = vmcs_readl(GUEST_RIP); | ||
1867 | if (intr_info & INTR_INFO_DELIEVER_CODE_MASK) | ||
1868 | error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); | ||
1869 | if (is_page_fault(intr_info)) { | ||
1870 | cr2 = vmcs_readl(EXIT_QUALIFICATION); | ||
1871 | return kvm_mmu_page_fault(vcpu, cr2, error_code); | ||
1872 | } | ||
1873 | |||
1874 | if (vcpu->arch.rmode.active && | ||
1875 | handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK, | ||
1876 | error_code)) { | ||
1877 | if (vcpu->arch.halt_request) { | ||
1878 | vcpu->arch.halt_request = 0; | ||
1879 | return kvm_emulate_halt(vcpu); | ||
1880 | } | ||
1881 | return 1; | ||
1882 | } | ||
1883 | |||
1884 | if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == | ||
1885 | (INTR_TYPE_EXCEPTION | 1)) { | ||
1886 | kvm_run->exit_reason = KVM_EXIT_DEBUG; | ||
1887 | return 0; | ||
1888 | } | ||
1889 | kvm_run->exit_reason = KVM_EXIT_EXCEPTION; | ||
1890 | kvm_run->ex.exception = intr_info & INTR_INFO_VECTOR_MASK; | ||
1891 | kvm_run->ex.error_code = error_code; | ||
1892 | return 0; | ||
1893 | } | ||
1894 | |||
1895 | static int handle_external_interrupt(struct kvm_vcpu *vcpu, | ||
1896 | struct kvm_run *kvm_run) | ||
1897 | { | ||
1898 | ++vcpu->stat.irq_exits; | ||
1899 | return 1; | ||
1900 | } | ||
1901 | |||
1902 | static int handle_triple_fault(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
1903 | { | ||
1904 | kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; | ||
1905 | return 0; | ||
1906 | } | ||
1907 | |||
1908 | static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
1909 | { | ||
1910 | unsigned long exit_qualification; | ||
1911 | int size, down, in, string, rep; | ||
1912 | unsigned port; | ||
1913 | |||
1914 | ++vcpu->stat.io_exits; | ||
1915 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | ||
1916 | string = (exit_qualification & 16) != 0; | ||
1917 | |||
1918 | if (string) { | ||
1919 | if (emulate_instruction(vcpu, | ||
1920 | kvm_run, 0, 0, 0) == EMULATE_DO_MMIO) | ||
1921 | return 0; | ||
1922 | return 1; | ||
1923 | } | ||
1924 | |||
1925 | size = (exit_qualification & 7) + 1; | ||
1926 | in = (exit_qualification & 8) != 0; | ||
1927 | down = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0; | ||
1928 | rep = (exit_qualification & 32) != 0; | ||
1929 | port = exit_qualification >> 16; | ||
1930 | |||
1931 | return kvm_emulate_pio(vcpu, kvm_run, in, size, port); | ||
1932 | } | ||
1933 | |||
1934 | static void | ||
1935 | vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) | ||
1936 | { | ||
1937 | /* | ||
1938 | * Patch in the VMCALL instruction: | ||
1939 | */ | ||
1940 | hypercall[0] = 0x0f; | ||
1941 | hypercall[1] = 0x01; | ||
1942 | hypercall[2] = 0xc1; | ||
1943 | } | ||
1944 | |||
1945 | static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
1946 | { | ||
1947 | unsigned long exit_qualification; | ||
1948 | int cr; | ||
1949 | int reg; | ||
1950 | |||
1951 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | ||
1952 | cr = exit_qualification & 15; | ||
1953 | reg = (exit_qualification >> 8) & 15; | ||
1954 | switch ((exit_qualification >> 4) & 3) { | ||
1955 | case 0: /* mov to cr */ | ||
1956 | switch (cr) { | ||
1957 | case 0: | ||
1958 | vcpu_load_rsp_rip(vcpu); | ||
1959 | set_cr0(vcpu, vcpu->arch.regs[reg]); | ||
1960 | skip_emulated_instruction(vcpu); | ||
1961 | return 1; | ||
1962 | case 3: | ||
1963 | vcpu_load_rsp_rip(vcpu); | ||
1964 | set_cr3(vcpu, vcpu->arch.regs[reg]); | ||
1965 | skip_emulated_instruction(vcpu); | ||
1966 | return 1; | ||
1967 | case 4: | ||
1968 | vcpu_load_rsp_rip(vcpu); | ||
1969 | set_cr4(vcpu, vcpu->arch.regs[reg]); | ||
1970 | skip_emulated_instruction(vcpu); | ||
1971 | return 1; | ||
1972 | case 8: | ||
1973 | vcpu_load_rsp_rip(vcpu); | ||
1974 | set_cr8(vcpu, vcpu->arch.regs[reg]); | ||
1975 | skip_emulated_instruction(vcpu); | ||
1976 | if (irqchip_in_kernel(vcpu->kvm)) | ||
1977 | return 1; | ||
1978 | kvm_run->exit_reason = KVM_EXIT_SET_TPR; | ||
1979 | return 0; | ||
1980 | }; | ||
1981 | break; | ||
1982 | case 2: /* clts */ | ||
1983 | vcpu_load_rsp_rip(vcpu); | ||
1984 | vmx_fpu_deactivate(vcpu); | ||
1985 | vcpu->arch.cr0 &= ~X86_CR0_TS; | ||
1986 | vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); | ||
1987 | vmx_fpu_activate(vcpu); | ||
1988 | skip_emulated_instruction(vcpu); | ||
1989 | return 1; | ||
1990 | case 1: /*mov from cr*/ | ||
1991 | switch (cr) { | ||
1992 | case 3: | ||
1993 | vcpu_load_rsp_rip(vcpu); | ||
1994 | vcpu->arch.regs[reg] = vcpu->arch.cr3; | ||
1995 | vcpu_put_rsp_rip(vcpu); | ||
1996 | skip_emulated_instruction(vcpu); | ||
1997 | return 1; | ||
1998 | case 8: | ||
1999 | vcpu_load_rsp_rip(vcpu); | ||
2000 | vcpu->arch.regs[reg] = get_cr8(vcpu); | ||
2001 | vcpu_put_rsp_rip(vcpu); | ||
2002 | skip_emulated_instruction(vcpu); | ||
2003 | return 1; | ||
2004 | } | ||
2005 | break; | ||
2006 | case 3: /* lmsw */ | ||
2007 | lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f); | ||
2008 | |||
2009 | skip_emulated_instruction(vcpu); | ||
2010 | return 1; | ||
2011 | default: | ||
2012 | break; | ||
2013 | } | ||
2014 | kvm_run->exit_reason = 0; | ||
2015 | pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n", | ||
2016 | (int)(exit_qualification >> 4) & 3, cr); | ||
2017 | return 0; | ||
2018 | } | ||
2019 | |||
2020 | static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
2021 | { | ||
2022 | unsigned long exit_qualification; | ||
2023 | unsigned long val; | ||
2024 | int dr, reg; | ||
2025 | |||
2026 | /* | ||
2027 | * FIXME: this code assumes the host is debugging the guest. | ||
2028 | * need to deal with guest debugging itself too. | ||
2029 | */ | ||
2030 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | ||
2031 | dr = exit_qualification & 7; | ||
2032 | reg = (exit_qualification >> 8) & 15; | ||
2033 | vcpu_load_rsp_rip(vcpu); | ||
2034 | if (exit_qualification & 16) { | ||
2035 | /* mov from dr */ | ||
2036 | switch (dr) { | ||
2037 | case 6: | ||
2038 | val = 0xffff0ff0; | ||
2039 | break; | ||
2040 | case 7: | ||
2041 | val = 0x400; | ||
2042 | break; | ||
2043 | default: | ||
2044 | val = 0; | ||
2045 | } | ||
2046 | vcpu->arch.regs[reg] = val; | ||
2047 | } else { | ||
2048 | /* mov to dr */ | ||
2049 | } | ||
2050 | vcpu_put_rsp_rip(vcpu); | ||
2051 | skip_emulated_instruction(vcpu); | ||
2052 | return 1; | ||
2053 | } | ||
2054 | |||
2055 | static int handle_cpuid(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
2056 | { | ||
2057 | kvm_emulate_cpuid(vcpu); | ||
2058 | return 1; | ||
2059 | } | ||
2060 | |||
2061 | static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
2062 | { | ||
2063 | u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; | ||
2064 | u64 data; | ||
2065 | |||
2066 | if (vmx_get_msr(vcpu, ecx, &data)) { | ||
2067 | kvm_inject_gp(vcpu, 0); | ||
2068 | return 1; | ||
2069 | } | ||
2070 | |||
2071 | /* FIXME: handling of bits 32:63 of rax, rdx */ | ||
2072 | vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u; | ||
2073 | vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u; | ||
2074 | skip_emulated_instruction(vcpu); | ||
2075 | return 1; | ||
2076 | } | ||
2077 | |||
2078 | static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
2079 | { | ||
2080 | u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; | ||
2081 | u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) | ||
2082 | | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); | ||
2083 | |||
2084 | if (vmx_set_msr(vcpu, ecx, data) != 0) { | ||
2085 | kvm_inject_gp(vcpu, 0); | ||
2086 | return 1; | ||
2087 | } | ||
2088 | |||
2089 | skip_emulated_instruction(vcpu); | ||
2090 | return 1; | ||
2091 | } | ||
2092 | |||
2093 | static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu, | ||
2094 | struct kvm_run *kvm_run) | ||
2095 | { | ||
2096 | return 1; | ||
2097 | } | ||
2098 | |||
2099 | static int handle_interrupt_window(struct kvm_vcpu *vcpu, | ||
2100 | struct kvm_run *kvm_run) | ||
2101 | { | ||
2102 | u32 cpu_based_vm_exec_control; | ||
2103 | |||
2104 | /* clear pending irq */ | ||
2105 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); | ||
2106 | cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; | ||
2107 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); | ||
2108 | /* | ||
2109 | * If the user space waits to inject interrupts, exit as soon as | ||
2110 | * possible | ||
2111 | */ | ||
2112 | if (kvm_run->request_interrupt_window && | ||
2113 | !vcpu->arch.irq_summary) { | ||
2114 | kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; | ||
2115 | ++vcpu->stat.irq_window_exits; | ||
2116 | return 0; | ||
2117 | } | ||
2118 | return 1; | ||
2119 | } | ||
2120 | |||
2121 | static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
2122 | { | ||
2123 | skip_emulated_instruction(vcpu); | ||
2124 | return kvm_emulate_halt(vcpu); | ||
2125 | } | ||
2126 | |||
2127 | static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
2128 | { | ||
2129 | skip_emulated_instruction(vcpu); | ||
2130 | kvm_emulate_hypercall(vcpu); | ||
2131 | return 1; | ||
2132 | } | ||
2133 | |||
2134 | static int handle_wbinvd(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
2135 | { | ||
2136 | skip_emulated_instruction(vcpu); | ||
2137 | /* TODO: Add support for VT-d/pass-through device */ | ||
2138 | return 1; | ||
2139 | } | ||
2140 | |||
2141 | static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
2142 | { | ||
2143 | u64 exit_qualification; | ||
2144 | enum emulation_result er; | ||
2145 | unsigned long offset; | ||
2146 | |||
2147 | exit_qualification = vmcs_read64(EXIT_QUALIFICATION); | ||
2148 | offset = exit_qualification & 0xffful; | ||
2149 | |||
2150 | er = emulate_instruction(vcpu, kvm_run, 0, 0, 0); | ||
2151 | |||
2152 | if (er != EMULATE_DONE) { | ||
2153 | printk(KERN_ERR | ||
2154 | "Fail to handle apic access vmexit! Offset is 0x%lx\n", | ||
2155 | offset); | ||
2156 | return -ENOTSUPP; | ||
2157 | } | ||
2158 | return 1; | ||
2159 | } | ||
2160 | |||
2161 | /* | ||
2162 | * The exit handlers return 1 if the exit was handled fully and guest execution | ||
2163 | * may resume. Otherwise they set the kvm_run parameter to indicate what needs | ||
2164 | * to be done to userspace and return 0. | ||
2165 | */ | ||
2166 | static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu, | ||
2167 | struct kvm_run *kvm_run) = { | ||
2168 | [EXIT_REASON_EXCEPTION_NMI] = handle_exception, | ||
2169 | [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, | ||
2170 | [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault, | ||
2171 | [EXIT_REASON_IO_INSTRUCTION] = handle_io, | ||
2172 | [EXIT_REASON_CR_ACCESS] = handle_cr, | ||
2173 | [EXIT_REASON_DR_ACCESS] = handle_dr, | ||
2174 | [EXIT_REASON_CPUID] = handle_cpuid, | ||
2175 | [EXIT_REASON_MSR_READ] = handle_rdmsr, | ||
2176 | [EXIT_REASON_MSR_WRITE] = handle_wrmsr, | ||
2177 | [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window, | ||
2178 | [EXIT_REASON_HLT] = handle_halt, | ||
2179 | [EXIT_REASON_VMCALL] = handle_vmcall, | ||
2180 | [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, | ||
2181 | [EXIT_REASON_APIC_ACCESS] = handle_apic_access, | ||
2182 | [EXIT_REASON_WBINVD] = handle_wbinvd, | ||
2183 | }; | ||
2184 | |||
2185 | static const int kvm_vmx_max_exit_handlers = | ||
2186 | ARRAY_SIZE(kvm_vmx_exit_handlers); | ||
2187 | |||
2188 | /* | ||
2189 | * The guest has exited. See if we can fix it or if we need userspace | ||
2190 | * assistance. | ||
2191 | */ | ||
2192 | static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | ||
2193 | { | ||
2194 | u32 exit_reason = vmcs_read32(VM_EXIT_REASON); | ||
2195 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
2196 | u32 vectoring_info = vmx->idt_vectoring_info; | ||
2197 | |||
2198 | if (unlikely(vmx->fail)) { | ||
2199 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; | ||
2200 | kvm_run->fail_entry.hardware_entry_failure_reason | ||
2201 | = vmcs_read32(VM_INSTRUCTION_ERROR); | ||
2202 | return 0; | ||
2203 | } | ||
2204 | |||
2205 | if ((vectoring_info & VECTORING_INFO_VALID_MASK) && | ||
2206 | exit_reason != EXIT_REASON_EXCEPTION_NMI) | ||
2207 | printk(KERN_WARNING "%s: unexpected, valid vectoring info and " | ||
2208 | "exit reason is 0x%x\n", __FUNCTION__, exit_reason); | ||
2209 | if (exit_reason < kvm_vmx_max_exit_handlers | ||
2210 | && kvm_vmx_exit_handlers[exit_reason]) | ||
2211 | return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run); | ||
2212 | else { | ||
2213 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | ||
2214 | kvm_run->hw.hardware_exit_reason = exit_reason; | ||
2215 | } | ||
2216 | return 0; | ||
2217 | } | ||
2218 | |||
2219 | static void vmx_flush_tlb(struct kvm_vcpu *vcpu) | ||
2220 | { | ||
2221 | } | ||
2222 | |||
2223 | static void update_tpr_threshold(struct kvm_vcpu *vcpu) | ||
2224 | { | ||
2225 | int max_irr, tpr; | ||
2226 | |||
2227 | if (!vm_need_tpr_shadow(vcpu->kvm)) | ||
2228 | return; | ||
2229 | |||
2230 | if (!kvm_lapic_enabled(vcpu) || | ||
2231 | ((max_irr = kvm_lapic_find_highest_irr(vcpu)) == -1)) { | ||
2232 | vmcs_write32(TPR_THRESHOLD, 0); | ||
2233 | return; | ||
2234 | } | ||
2235 | |||
2236 | tpr = (kvm_lapic_get_cr8(vcpu) & 0x0f) << 4; | ||
2237 | vmcs_write32(TPR_THRESHOLD, (max_irr > tpr) ? tpr >> 4 : max_irr >> 4); | ||
2238 | } | ||
2239 | |||
2240 | static void enable_irq_window(struct kvm_vcpu *vcpu) | ||
2241 | { | ||
2242 | u32 cpu_based_vm_exec_control; | ||
2243 | |||
2244 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); | ||
2245 | cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; | ||
2246 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); | ||
2247 | } | ||
2248 | |||
2249 | static void vmx_intr_assist(struct kvm_vcpu *vcpu) | ||
2250 | { | ||
2251 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
2252 | u32 idtv_info_field, intr_info_field; | ||
2253 | int has_ext_irq, interrupt_window_open; | ||
2254 | int vector; | ||
2255 | |||
2256 | update_tpr_threshold(vcpu); | ||
2257 | |||
2258 | has_ext_irq = kvm_cpu_has_interrupt(vcpu); | ||
2259 | intr_info_field = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD); | ||
2260 | idtv_info_field = vmx->idt_vectoring_info; | ||
2261 | if (intr_info_field & INTR_INFO_VALID_MASK) { | ||
2262 | if (idtv_info_field & INTR_INFO_VALID_MASK) { | ||
2263 | /* TODO: fault when IDT_Vectoring */ | ||
2264 | if (printk_ratelimit()) | ||
2265 | printk(KERN_ERR "Fault when IDT_Vectoring\n"); | ||
2266 | } | ||
2267 | if (has_ext_irq) | ||
2268 | enable_irq_window(vcpu); | ||
2269 | return; | ||
2270 | } | ||
2271 | if (unlikely(idtv_info_field & INTR_INFO_VALID_MASK)) { | ||
2272 | if ((idtv_info_field & VECTORING_INFO_TYPE_MASK) | ||
2273 | == INTR_TYPE_EXT_INTR | ||
2274 | && vcpu->arch.rmode.active) { | ||
2275 | u8 vect = idtv_info_field & VECTORING_INFO_VECTOR_MASK; | ||
2276 | |||
2277 | vmx_inject_irq(vcpu, vect); | ||
2278 | if (unlikely(has_ext_irq)) | ||
2279 | enable_irq_window(vcpu); | ||
2280 | return; | ||
2281 | } | ||
2282 | |||
2283 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field); | ||
2284 | vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, | ||
2285 | vmcs_read32(VM_EXIT_INSTRUCTION_LEN)); | ||
2286 | |||
2287 | if (unlikely(idtv_info_field & INTR_INFO_DELIEVER_CODE_MASK)) | ||
2288 | vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, | ||
2289 | vmcs_read32(IDT_VECTORING_ERROR_CODE)); | ||
2290 | if (unlikely(has_ext_irq)) | ||
2291 | enable_irq_window(vcpu); | ||
2292 | return; | ||
2293 | } | ||
2294 | if (!has_ext_irq) | ||
2295 | return; | ||
2296 | interrupt_window_open = | ||
2297 | ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && | ||
2298 | (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0); | ||
2299 | if (interrupt_window_open) { | ||
2300 | vector = kvm_cpu_get_interrupt(vcpu); | ||
2301 | vmx_inject_irq(vcpu, vector); | ||
2302 | kvm_timer_intr_post(vcpu, vector); | ||
2303 | } else | ||
2304 | enable_irq_window(vcpu); | ||
2305 | } | ||
2306 | |||
2307 | /* | ||
2308 | * Failure to inject an interrupt should give us the information | ||
2309 | * in IDT_VECTORING_INFO_FIELD. However, if the failure occurs | ||
2310 | * when fetching the interrupt redirection bitmap in the real-mode | ||
2311 | * tss, this doesn't happen. So we do it ourselves. | ||
2312 | */ | ||
2313 | static void fixup_rmode_irq(struct vcpu_vmx *vmx) | ||
2314 | { | ||
2315 | vmx->rmode.irq.pending = 0; | ||
2316 | if (vmcs_readl(GUEST_RIP) + 1 != vmx->rmode.irq.rip) | ||
2317 | return; | ||
2318 | vmcs_writel(GUEST_RIP, vmx->rmode.irq.rip); | ||
2319 | if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) { | ||
2320 | vmx->idt_vectoring_info &= ~VECTORING_INFO_TYPE_MASK; | ||
2321 | vmx->idt_vectoring_info |= INTR_TYPE_EXT_INTR; | ||
2322 | return; | ||
2323 | } | ||
2324 | vmx->idt_vectoring_info = | ||
2325 | VECTORING_INFO_VALID_MASK | ||
2326 | | INTR_TYPE_EXT_INTR | ||
2327 | | vmx->rmode.irq.vector; | ||
2328 | } | ||
2329 | |||
2330 | static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
2331 | { | ||
2332 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
2333 | u32 intr_info; | ||
2334 | |||
2335 | /* | ||
2336 | * Loading guest fpu may have cleared host cr0.ts | ||
2337 | */ | ||
2338 | vmcs_writel(HOST_CR0, read_cr0()); | ||
2339 | |||
2340 | asm( | ||
2341 | /* Store host registers */ | ||
2342 | #ifdef CONFIG_X86_64 | ||
2343 | "push %%rdx; push %%rbp;" | ||
2344 | "push %%rcx \n\t" | ||
2345 | #else | ||
2346 | "push %%edx; push %%ebp;" | ||
2347 | "push %%ecx \n\t" | ||
2348 | #endif | ||
2349 | ASM_VMX_VMWRITE_RSP_RDX "\n\t" | ||
2350 | /* Check if vmlaunch of vmresume is needed */ | ||
2351 | "cmpl $0, %c[launched](%0) \n\t" | ||
2352 | /* Load guest registers. Don't clobber flags. */ | ||
2353 | #ifdef CONFIG_X86_64 | ||
2354 | "mov %c[cr2](%0), %%rax \n\t" | ||
2355 | "mov %%rax, %%cr2 \n\t" | ||
2356 | "mov %c[rax](%0), %%rax \n\t" | ||
2357 | "mov %c[rbx](%0), %%rbx \n\t" | ||
2358 | "mov %c[rdx](%0), %%rdx \n\t" | ||
2359 | "mov %c[rsi](%0), %%rsi \n\t" | ||
2360 | "mov %c[rdi](%0), %%rdi \n\t" | ||
2361 | "mov %c[rbp](%0), %%rbp \n\t" | ||
2362 | "mov %c[r8](%0), %%r8 \n\t" | ||
2363 | "mov %c[r9](%0), %%r9 \n\t" | ||
2364 | "mov %c[r10](%0), %%r10 \n\t" | ||
2365 | "mov %c[r11](%0), %%r11 \n\t" | ||
2366 | "mov %c[r12](%0), %%r12 \n\t" | ||
2367 | "mov %c[r13](%0), %%r13 \n\t" | ||
2368 | "mov %c[r14](%0), %%r14 \n\t" | ||
2369 | "mov %c[r15](%0), %%r15 \n\t" | ||
2370 | "mov %c[rcx](%0), %%rcx \n\t" /* kills %0 (rcx) */ | ||
2371 | #else | ||
2372 | "mov %c[cr2](%0), %%eax \n\t" | ||
2373 | "mov %%eax, %%cr2 \n\t" | ||
2374 | "mov %c[rax](%0), %%eax \n\t" | ||
2375 | "mov %c[rbx](%0), %%ebx \n\t" | ||
2376 | "mov %c[rdx](%0), %%edx \n\t" | ||
2377 | "mov %c[rsi](%0), %%esi \n\t" | ||
2378 | "mov %c[rdi](%0), %%edi \n\t" | ||
2379 | "mov %c[rbp](%0), %%ebp \n\t" | ||
2380 | "mov %c[rcx](%0), %%ecx \n\t" /* kills %0 (ecx) */ | ||
2381 | #endif | ||
2382 | /* Enter guest mode */ | ||
2383 | "jne .Llaunched \n\t" | ||
2384 | ASM_VMX_VMLAUNCH "\n\t" | ||
2385 | "jmp .Lkvm_vmx_return \n\t" | ||
2386 | ".Llaunched: " ASM_VMX_VMRESUME "\n\t" | ||
2387 | ".Lkvm_vmx_return: " | ||
2388 | /* Save guest registers, load host registers, keep flags */ | ||
2389 | #ifdef CONFIG_X86_64 | ||
2390 | "xchg %0, (%%rsp) \n\t" | ||
2391 | "mov %%rax, %c[rax](%0) \n\t" | ||
2392 | "mov %%rbx, %c[rbx](%0) \n\t" | ||
2393 | "pushq (%%rsp); popq %c[rcx](%0) \n\t" | ||
2394 | "mov %%rdx, %c[rdx](%0) \n\t" | ||
2395 | "mov %%rsi, %c[rsi](%0) \n\t" | ||
2396 | "mov %%rdi, %c[rdi](%0) \n\t" | ||
2397 | "mov %%rbp, %c[rbp](%0) \n\t" | ||
2398 | "mov %%r8, %c[r8](%0) \n\t" | ||
2399 | "mov %%r9, %c[r9](%0) \n\t" | ||
2400 | "mov %%r10, %c[r10](%0) \n\t" | ||
2401 | "mov %%r11, %c[r11](%0) \n\t" | ||
2402 | "mov %%r12, %c[r12](%0) \n\t" | ||
2403 | "mov %%r13, %c[r13](%0) \n\t" | ||
2404 | "mov %%r14, %c[r14](%0) \n\t" | ||
2405 | "mov %%r15, %c[r15](%0) \n\t" | ||
2406 | "mov %%cr2, %%rax \n\t" | ||
2407 | "mov %%rax, %c[cr2](%0) \n\t" | ||
2408 | |||
2409 | "pop %%rbp; pop %%rbp; pop %%rdx \n\t" | ||
2410 | #else | ||
2411 | "xchg %0, (%%esp) \n\t" | ||
2412 | "mov %%eax, %c[rax](%0) \n\t" | ||
2413 | "mov %%ebx, %c[rbx](%0) \n\t" | ||
2414 | "pushl (%%esp); popl %c[rcx](%0) \n\t" | ||
2415 | "mov %%edx, %c[rdx](%0) \n\t" | ||
2416 | "mov %%esi, %c[rsi](%0) \n\t" | ||
2417 | "mov %%edi, %c[rdi](%0) \n\t" | ||
2418 | "mov %%ebp, %c[rbp](%0) \n\t" | ||
2419 | "mov %%cr2, %%eax \n\t" | ||
2420 | "mov %%eax, %c[cr2](%0) \n\t" | ||
2421 | |||
2422 | "pop %%ebp; pop %%ebp; pop %%edx \n\t" | ||
2423 | #endif | ||
2424 | "setbe %c[fail](%0) \n\t" | ||
2425 | : : "c"(vmx), "d"((unsigned long)HOST_RSP), | ||
2426 | [launched]"i"(offsetof(struct vcpu_vmx, launched)), | ||
2427 | [fail]"i"(offsetof(struct vcpu_vmx, fail)), | ||
2428 | [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])), | ||
2429 | [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])), | ||
2430 | [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])), | ||
2431 | [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])), | ||
2432 | [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])), | ||
2433 | [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])), | ||
2434 | [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])), | ||
2435 | #ifdef CONFIG_X86_64 | ||
2436 | [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])), | ||
2437 | [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])), | ||
2438 | [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])), | ||
2439 | [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])), | ||
2440 | [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])), | ||
2441 | [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])), | ||
2442 | [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])), | ||
2443 | [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])), | ||
2444 | #endif | ||
2445 | [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)) | ||
2446 | : "cc", "memory" | ||
2447 | #ifdef CONFIG_X86_64 | ||
2448 | , "rbx", "rdi", "rsi" | ||
2449 | , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" | ||
2450 | #else | ||
2451 | , "ebx", "edi", "rsi" | ||
2452 | #endif | ||
2453 | ); | ||
2454 | |||
2455 | vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); | ||
2456 | if (vmx->rmode.irq.pending) | ||
2457 | fixup_rmode_irq(vmx); | ||
2458 | |||
2459 | vcpu->arch.interrupt_window_open = | ||
2460 | (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0; | ||
2461 | |||
2462 | asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); | ||
2463 | vmx->launched = 1; | ||
2464 | |||
2465 | intr_info = vmcs_read32(VM_EXIT_INTR_INFO); | ||
2466 | |||
2467 | /* We need to handle NMIs before interrupts are enabled */ | ||
2468 | if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */ | ||
2469 | asm("int $2"); | ||
2470 | } | ||
2471 | |||
2472 | static void vmx_free_vmcs(struct kvm_vcpu *vcpu) | ||
2473 | { | ||
2474 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
2475 | |||
2476 | if (vmx->vmcs) { | ||
2477 | on_each_cpu(__vcpu_clear, vmx, 0, 1); | ||
2478 | free_vmcs(vmx->vmcs); | ||
2479 | vmx->vmcs = NULL; | ||
2480 | } | ||
2481 | } | ||
2482 | |||
2483 | static void vmx_free_vcpu(struct kvm_vcpu *vcpu) | ||
2484 | { | ||
2485 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
2486 | |||
2487 | vmx_free_vmcs(vcpu); | ||
2488 | kfree(vmx->host_msrs); | ||
2489 | kfree(vmx->guest_msrs); | ||
2490 | kvm_vcpu_uninit(vcpu); | ||
2491 | kmem_cache_free(kvm_vcpu_cache, vmx); | ||
2492 | } | ||
2493 | |||
2494 | static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) | ||
2495 | { | ||
2496 | int err; | ||
2497 | struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); | ||
2498 | int cpu; | ||
2499 | |||
2500 | if (!vmx) | ||
2501 | return ERR_PTR(-ENOMEM); | ||
2502 | |||
2503 | err = kvm_vcpu_init(&vmx->vcpu, kvm, id); | ||
2504 | if (err) | ||
2505 | goto free_vcpu; | ||
2506 | |||
2507 | vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
2508 | if (!vmx->guest_msrs) { | ||
2509 | err = -ENOMEM; | ||
2510 | goto uninit_vcpu; | ||
2511 | } | ||
2512 | |||
2513 | vmx->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
2514 | if (!vmx->host_msrs) | ||
2515 | goto free_guest_msrs; | ||
2516 | |||
2517 | vmx->vmcs = alloc_vmcs(); | ||
2518 | if (!vmx->vmcs) | ||
2519 | goto free_msrs; | ||
2520 | |||
2521 | vmcs_clear(vmx->vmcs); | ||
2522 | |||
2523 | cpu = get_cpu(); | ||
2524 | vmx_vcpu_load(&vmx->vcpu, cpu); | ||
2525 | err = vmx_vcpu_setup(vmx); | ||
2526 | vmx_vcpu_put(&vmx->vcpu); | ||
2527 | put_cpu(); | ||
2528 | if (err) | ||
2529 | goto free_vmcs; | ||
2530 | |||
2531 | return &vmx->vcpu; | ||
2532 | |||
2533 | free_vmcs: | ||
2534 | free_vmcs(vmx->vmcs); | ||
2535 | free_msrs: | ||
2536 | kfree(vmx->host_msrs); | ||
2537 | free_guest_msrs: | ||
2538 | kfree(vmx->guest_msrs); | ||
2539 | uninit_vcpu: | ||
2540 | kvm_vcpu_uninit(&vmx->vcpu); | ||
2541 | free_vcpu: | ||
2542 | kmem_cache_free(kvm_vcpu_cache, vmx); | ||
2543 | return ERR_PTR(err); | ||
2544 | } | ||
2545 | |||
2546 | static void __init vmx_check_processor_compat(void *rtn) | ||
2547 | { | ||
2548 | struct vmcs_config vmcs_conf; | ||
2549 | |||
2550 | *(int *)rtn = 0; | ||
2551 | if (setup_vmcs_config(&vmcs_conf) < 0) | ||
2552 | *(int *)rtn = -EIO; | ||
2553 | if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) { | ||
2554 | printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n", | ||
2555 | smp_processor_id()); | ||
2556 | *(int *)rtn = -EIO; | ||
2557 | } | ||
2558 | } | ||
2559 | |||
2560 | static struct kvm_x86_ops vmx_x86_ops = { | ||
2561 | .cpu_has_kvm_support = cpu_has_kvm_support, | ||
2562 | .disabled_by_bios = vmx_disabled_by_bios, | ||
2563 | .hardware_setup = hardware_setup, | ||
2564 | .hardware_unsetup = hardware_unsetup, | ||
2565 | .check_processor_compatibility = vmx_check_processor_compat, | ||
2566 | .hardware_enable = hardware_enable, | ||
2567 | .hardware_disable = hardware_disable, | ||
2568 | |||
2569 | .vcpu_create = vmx_create_vcpu, | ||
2570 | .vcpu_free = vmx_free_vcpu, | ||
2571 | .vcpu_reset = vmx_vcpu_reset, | ||
2572 | |||
2573 | .prepare_guest_switch = vmx_save_host_state, | ||
2574 | .vcpu_load = vmx_vcpu_load, | ||
2575 | .vcpu_put = vmx_vcpu_put, | ||
2576 | .vcpu_decache = vmx_vcpu_decache, | ||
2577 | |||
2578 | .set_guest_debug = set_guest_debug, | ||
2579 | .guest_debug_pre = kvm_guest_debug_pre, | ||
2580 | .get_msr = vmx_get_msr, | ||
2581 | .set_msr = vmx_set_msr, | ||
2582 | .get_segment_base = vmx_get_segment_base, | ||
2583 | .get_segment = vmx_get_segment, | ||
2584 | .set_segment = vmx_set_segment, | ||
2585 | .get_cs_db_l_bits = vmx_get_cs_db_l_bits, | ||
2586 | .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits, | ||
2587 | .set_cr0 = vmx_set_cr0, | ||
2588 | .set_cr3 = vmx_set_cr3, | ||
2589 | .set_cr4 = vmx_set_cr4, | ||
2590 | #ifdef CONFIG_X86_64 | ||
2591 | .set_efer = vmx_set_efer, | ||
2592 | #endif | ||
2593 | .get_idt = vmx_get_idt, | ||
2594 | .set_idt = vmx_set_idt, | ||
2595 | .get_gdt = vmx_get_gdt, | ||
2596 | .set_gdt = vmx_set_gdt, | ||
2597 | .cache_regs = vcpu_load_rsp_rip, | ||
2598 | .decache_regs = vcpu_put_rsp_rip, | ||
2599 | .get_rflags = vmx_get_rflags, | ||
2600 | .set_rflags = vmx_set_rflags, | ||
2601 | |||
2602 | .tlb_flush = vmx_flush_tlb, | ||
2603 | |||
2604 | .run = vmx_vcpu_run, | ||
2605 | .handle_exit = kvm_handle_exit, | ||
2606 | .skip_emulated_instruction = skip_emulated_instruction, | ||
2607 | .patch_hypercall = vmx_patch_hypercall, | ||
2608 | .get_irq = vmx_get_irq, | ||
2609 | .set_irq = vmx_inject_irq, | ||
2610 | .queue_exception = vmx_queue_exception, | ||
2611 | .exception_injected = vmx_exception_injected, | ||
2612 | .inject_pending_irq = vmx_intr_assist, | ||
2613 | .inject_pending_vectors = do_interrupt_requests, | ||
2614 | |||
2615 | .set_tss_addr = vmx_set_tss_addr, | ||
2616 | }; | ||
2617 | |||
2618 | static int __init vmx_init(void) | ||
2619 | { | ||
2620 | void *iova; | ||
2621 | int r; | ||
2622 | |||
2623 | vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); | ||
2624 | if (!vmx_io_bitmap_a) | ||
2625 | return -ENOMEM; | ||
2626 | |||
2627 | vmx_io_bitmap_b = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); | ||
2628 | if (!vmx_io_bitmap_b) { | ||
2629 | r = -ENOMEM; | ||
2630 | goto out; | ||
2631 | } | ||
2632 | |||
2633 | /* | ||
2634 | * Allow direct access to the PC debug port (it is often used for I/O | ||
2635 | * delays, but the vmexits simply slow things down). | ||
2636 | */ | ||
2637 | iova = kmap(vmx_io_bitmap_a); | ||
2638 | memset(iova, 0xff, PAGE_SIZE); | ||
2639 | clear_bit(0x80, iova); | ||
2640 | kunmap(vmx_io_bitmap_a); | ||
2641 | |||
2642 | iova = kmap(vmx_io_bitmap_b); | ||
2643 | memset(iova, 0xff, PAGE_SIZE); | ||
2644 | kunmap(vmx_io_bitmap_b); | ||
2645 | |||
2646 | r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE); | ||
2647 | if (r) | ||
2648 | goto out1; | ||
2649 | |||
2650 | if (bypass_guest_pf) | ||
2651 | kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull); | ||
2652 | |||
2653 | return 0; | ||
2654 | |||
2655 | out1: | ||
2656 | __free_page(vmx_io_bitmap_b); | ||
2657 | out: | ||
2658 | __free_page(vmx_io_bitmap_a); | ||
2659 | return r; | ||
2660 | } | ||
2661 | |||
2662 | static void __exit vmx_exit(void) | ||
2663 | { | ||
2664 | __free_page(vmx_io_bitmap_b); | ||
2665 | __free_page(vmx_io_bitmap_a); | ||
2666 | |||
2667 | kvm_exit(); | ||
2668 | } | ||
2669 | |||
2670 | module_init(vmx_init) | ||
2671 | module_exit(vmx_exit) | ||