diff options
author | Avi Kivity <avi@qumranet.com> | 2007-12-16 04:02:48 -0500 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-01-30 11:01:18 -0500 |
commit | edf884172e9828c6234b254208af04655855038d (patch) | |
tree | f5e5d1eecaed9737eced6ba60d09fe93149751c1 /drivers/kvm/svm.c | |
parent | 9584bf2c93f56656dba0de8f6c75b54ca7995143 (diff) |
KVM: Move arch dependent files to new directory arch/x86/kvm/
This paves the way for multiple architecture support. Note that while
ioapic.c could potentially be shared with ia64, it is also moved.
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/svm.c')
-rw-r--r-- | drivers/kvm/svm.c | 1725 |
1 files changed, 0 insertions, 1725 deletions
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c deleted file mode 100644 index e606f6d18669..000000000000 --- a/drivers/kvm/svm.c +++ /dev/null | |||
@@ -1,1725 +0,0 @@ | |||
1 | /* | ||
2 | * Kernel-based Virtual Machine driver for Linux | ||
3 | * | ||
4 | * AMD SVM support | ||
5 | * | ||
6 | * Copyright (C) 2006 Qumranet, Inc. | ||
7 | * | ||
8 | * Authors: | ||
9 | * Yaniv Kamay <yaniv@qumranet.com> | ||
10 | * Avi Kivity <avi@qumranet.com> | ||
11 | * | ||
12 | * This work is licensed under the terms of the GNU GPL, version 2. See | ||
13 | * the COPYING file in the top-level directory. | ||
14 | * | ||
15 | */ | ||
16 | #include "x86.h" | ||
17 | #include "kvm_svm.h" | ||
18 | #include "x86_emulate.h" | ||
19 | #include "irq.h" | ||
20 | #include "mmu.h" | ||
21 | |||
22 | #include <linux/module.h> | ||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/vmalloc.h> | ||
25 | #include <linux/highmem.h> | ||
26 | #include <linux/sched.h> | ||
27 | |||
28 | #include <asm/desc.h> | ||
29 | |||
30 | MODULE_AUTHOR("Qumranet"); | ||
31 | MODULE_LICENSE("GPL"); | ||
32 | |||
33 | #define IOPM_ALLOC_ORDER 2 | ||
34 | #define MSRPM_ALLOC_ORDER 1 | ||
35 | |||
36 | #define DB_VECTOR 1 | ||
37 | #define UD_VECTOR 6 | ||
38 | #define GP_VECTOR 13 | ||
39 | |||
40 | #define DR7_GD_MASK (1 << 13) | ||
41 | #define DR6_BD_MASK (1 << 13) | ||
42 | |||
43 | #define SEG_TYPE_LDT 2 | ||
44 | #define SEG_TYPE_BUSY_TSS16 3 | ||
45 | |||
46 | #define SVM_FEATURE_NPT (1 << 0) | ||
47 | #define SVM_FEATURE_LBRV (1 << 1) | ||
48 | #define SVM_DEATURE_SVML (1 << 2) | ||
49 | |||
50 | static void kvm_reput_irq(struct vcpu_svm *svm); | ||
51 | |||
52 | static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) | ||
53 | { | ||
54 | return container_of(vcpu, struct vcpu_svm, vcpu); | ||
55 | } | ||
56 | |||
57 | unsigned long iopm_base; | ||
58 | unsigned long msrpm_base; | ||
59 | |||
60 | struct kvm_ldttss_desc { | ||
61 | u16 limit0; | ||
62 | u16 base0; | ||
63 | unsigned base1 : 8, type : 5, dpl : 2, p : 1; | ||
64 | unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8; | ||
65 | u32 base3; | ||
66 | u32 zero1; | ||
67 | } __attribute__((packed)); | ||
68 | |||
69 | struct svm_cpu_data { | ||
70 | int cpu; | ||
71 | |||
72 | u64 asid_generation; | ||
73 | u32 max_asid; | ||
74 | u32 next_asid; | ||
75 | struct kvm_ldttss_desc *tss_desc; | ||
76 | |||
77 | struct page *save_area; | ||
78 | }; | ||
79 | |||
80 | static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data); | ||
81 | static uint32_t svm_features; | ||
82 | |||
83 | struct svm_init_data { | ||
84 | int cpu; | ||
85 | int r; | ||
86 | }; | ||
87 | |||
88 | static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000}; | ||
89 | |||
90 | #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges) | ||
91 | #define MSRS_RANGE_SIZE 2048 | ||
92 | #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2) | ||
93 | |||
94 | #define MAX_INST_SIZE 15 | ||
95 | |||
96 | static inline u32 svm_has(u32 feat) | ||
97 | { | ||
98 | return svm_features & feat; | ||
99 | } | ||
100 | |||
101 | static inline u8 pop_irq(struct kvm_vcpu *vcpu) | ||
102 | { | ||
103 | int word_index = __ffs(vcpu->arch.irq_summary); | ||
104 | int bit_index = __ffs(vcpu->arch.irq_pending[word_index]); | ||
105 | int irq = word_index * BITS_PER_LONG + bit_index; | ||
106 | |||
107 | clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]); | ||
108 | if (!vcpu->arch.irq_pending[word_index]) | ||
109 | clear_bit(word_index, &vcpu->arch.irq_summary); | ||
110 | return irq; | ||
111 | } | ||
112 | |||
113 | static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq) | ||
114 | { | ||
115 | set_bit(irq, vcpu->arch.irq_pending); | ||
116 | set_bit(irq / BITS_PER_LONG, &vcpu->arch.irq_summary); | ||
117 | } | ||
118 | |||
119 | static inline void clgi(void) | ||
120 | { | ||
121 | asm volatile (SVM_CLGI); | ||
122 | } | ||
123 | |||
124 | static inline void stgi(void) | ||
125 | { | ||
126 | asm volatile (SVM_STGI); | ||
127 | } | ||
128 | |||
129 | static inline void invlpga(unsigned long addr, u32 asid) | ||
130 | { | ||
131 | asm volatile (SVM_INVLPGA :: "a"(addr), "c"(asid)); | ||
132 | } | ||
133 | |||
134 | static inline unsigned long kvm_read_cr2(void) | ||
135 | { | ||
136 | unsigned long cr2; | ||
137 | |||
138 | asm volatile ("mov %%cr2, %0" : "=r" (cr2)); | ||
139 | return cr2; | ||
140 | } | ||
141 | |||
142 | static inline void kvm_write_cr2(unsigned long val) | ||
143 | { | ||
144 | asm volatile ("mov %0, %%cr2" :: "r" (val)); | ||
145 | } | ||
146 | |||
147 | static inline unsigned long read_dr6(void) | ||
148 | { | ||
149 | unsigned long dr6; | ||
150 | |||
151 | asm volatile ("mov %%dr6, %0" : "=r" (dr6)); | ||
152 | return dr6; | ||
153 | } | ||
154 | |||
155 | static inline void write_dr6(unsigned long val) | ||
156 | { | ||
157 | asm volatile ("mov %0, %%dr6" :: "r" (val)); | ||
158 | } | ||
159 | |||
160 | static inline unsigned long read_dr7(void) | ||
161 | { | ||
162 | unsigned long dr7; | ||
163 | |||
164 | asm volatile ("mov %%dr7, %0" : "=r" (dr7)); | ||
165 | return dr7; | ||
166 | } | ||
167 | |||
168 | static inline void write_dr7(unsigned long val) | ||
169 | { | ||
170 | asm volatile ("mov %0, %%dr7" :: "r" (val)); | ||
171 | } | ||
172 | |||
173 | static inline void force_new_asid(struct kvm_vcpu *vcpu) | ||
174 | { | ||
175 | to_svm(vcpu)->asid_generation--; | ||
176 | } | ||
177 | |||
178 | static inline void flush_guest_tlb(struct kvm_vcpu *vcpu) | ||
179 | { | ||
180 | force_new_asid(vcpu); | ||
181 | } | ||
182 | |||
183 | static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) | ||
184 | { | ||
185 | if (!(efer & EFER_LMA)) | ||
186 | efer &= ~EFER_LME; | ||
187 | |||
188 | to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK; | ||
189 | vcpu->arch.shadow_efer = efer; | ||
190 | } | ||
191 | |||
192 | static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, | ||
193 | bool has_error_code, u32 error_code) | ||
194 | { | ||
195 | struct vcpu_svm *svm = to_svm(vcpu); | ||
196 | |||
197 | svm->vmcb->control.event_inj = nr | ||
198 | | SVM_EVTINJ_VALID | ||
199 | | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0) | ||
200 | | SVM_EVTINJ_TYPE_EXEPT; | ||
201 | svm->vmcb->control.event_inj_err = error_code; | ||
202 | } | ||
203 | |||
204 | static bool svm_exception_injected(struct kvm_vcpu *vcpu) | ||
205 | { | ||
206 | struct vcpu_svm *svm = to_svm(vcpu); | ||
207 | |||
208 | return !(svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID); | ||
209 | } | ||
210 | |||
211 | static int is_external_interrupt(u32 info) | ||
212 | { | ||
213 | info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID; | ||
214 | return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR); | ||
215 | } | ||
216 | |||
217 | static void skip_emulated_instruction(struct kvm_vcpu *vcpu) | ||
218 | { | ||
219 | struct vcpu_svm *svm = to_svm(vcpu); | ||
220 | |||
221 | if (!svm->next_rip) { | ||
222 | printk(KERN_DEBUG "%s: NOP\n", __FUNCTION__); | ||
223 | return; | ||
224 | } | ||
225 | if (svm->next_rip - svm->vmcb->save.rip > MAX_INST_SIZE) | ||
226 | printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n", | ||
227 | __FUNCTION__, | ||
228 | svm->vmcb->save.rip, | ||
229 | svm->next_rip); | ||
230 | |||
231 | vcpu->arch.rip = svm->vmcb->save.rip = svm->next_rip; | ||
232 | svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; | ||
233 | |||
234 | vcpu->arch.interrupt_window_open = 1; | ||
235 | } | ||
236 | |||
237 | static int has_svm(void) | ||
238 | { | ||
239 | uint32_t eax, ebx, ecx, edx; | ||
240 | |||
241 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { | ||
242 | printk(KERN_INFO "has_svm: not amd\n"); | ||
243 | return 0; | ||
244 | } | ||
245 | |||
246 | cpuid(0x80000000, &eax, &ebx, &ecx, &edx); | ||
247 | if (eax < SVM_CPUID_FUNC) { | ||
248 | printk(KERN_INFO "has_svm: can't execute cpuid_8000000a\n"); | ||
249 | return 0; | ||
250 | } | ||
251 | |||
252 | cpuid(0x80000001, &eax, &ebx, &ecx, &edx); | ||
253 | if (!(ecx & (1 << SVM_CPUID_FEATURE_SHIFT))) { | ||
254 | printk(KERN_DEBUG "has_svm: svm not available\n"); | ||
255 | return 0; | ||
256 | } | ||
257 | return 1; | ||
258 | } | ||
259 | |||
260 | static void svm_hardware_disable(void *garbage) | ||
261 | { | ||
262 | struct svm_cpu_data *svm_data | ||
263 | = per_cpu(svm_data, raw_smp_processor_id()); | ||
264 | |||
265 | if (svm_data) { | ||
266 | uint64_t efer; | ||
267 | |||
268 | wrmsrl(MSR_VM_HSAVE_PA, 0); | ||
269 | rdmsrl(MSR_EFER, efer); | ||
270 | wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK); | ||
271 | per_cpu(svm_data, raw_smp_processor_id()) = NULL; | ||
272 | __free_page(svm_data->save_area); | ||
273 | kfree(svm_data); | ||
274 | } | ||
275 | } | ||
276 | |||
277 | static void svm_hardware_enable(void *garbage) | ||
278 | { | ||
279 | |||
280 | struct svm_cpu_data *svm_data; | ||
281 | uint64_t efer; | ||
282 | #ifdef CONFIG_X86_64 | ||
283 | struct desc_ptr gdt_descr; | ||
284 | #else | ||
285 | struct desc_ptr gdt_descr; | ||
286 | #endif | ||
287 | struct desc_struct *gdt; | ||
288 | int me = raw_smp_processor_id(); | ||
289 | |||
290 | if (!has_svm()) { | ||
291 | printk(KERN_ERR "svm_cpu_init: err EOPNOTSUPP on %d\n", me); | ||
292 | return; | ||
293 | } | ||
294 | svm_data = per_cpu(svm_data, me); | ||
295 | |||
296 | if (!svm_data) { | ||
297 | printk(KERN_ERR "svm_cpu_init: svm_data is NULL on %d\n", | ||
298 | me); | ||
299 | return; | ||
300 | } | ||
301 | |||
302 | svm_data->asid_generation = 1; | ||
303 | svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; | ||
304 | svm_data->next_asid = svm_data->max_asid + 1; | ||
305 | svm_features = cpuid_edx(SVM_CPUID_FUNC); | ||
306 | |||
307 | asm volatile ("sgdt %0" : "=m"(gdt_descr)); | ||
308 | gdt = (struct desc_struct *)gdt_descr.address; | ||
309 | svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); | ||
310 | |||
311 | rdmsrl(MSR_EFER, efer); | ||
312 | wrmsrl(MSR_EFER, efer | MSR_EFER_SVME_MASK); | ||
313 | |||
314 | wrmsrl(MSR_VM_HSAVE_PA, | ||
315 | page_to_pfn(svm_data->save_area) << PAGE_SHIFT); | ||
316 | } | ||
317 | |||
318 | static int svm_cpu_init(int cpu) | ||
319 | { | ||
320 | struct svm_cpu_data *svm_data; | ||
321 | int r; | ||
322 | |||
323 | svm_data = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL); | ||
324 | if (!svm_data) | ||
325 | return -ENOMEM; | ||
326 | svm_data->cpu = cpu; | ||
327 | svm_data->save_area = alloc_page(GFP_KERNEL); | ||
328 | r = -ENOMEM; | ||
329 | if (!svm_data->save_area) | ||
330 | goto err_1; | ||
331 | |||
332 | per_cpu(svm_data, cpu) = svm_data; | ||
333 | |||
334 | return 0; | ||
335 | |||
336 | err_1: | ||
337 | kfree(svm_data); | ||
338 | return r; | ||
339 | |||
340 | } | ||
341 | |||
342 | static void set_msr_interception(u32 *msrpm, unsigned msr, | ||
343 | int read, int write) | ||
344 | { | ||
345 | int i; | ||
346 | |||
347 | for (i = 0; i < NUM_MSR_MAPS; i++) { | ||
348 | if (msr >= msrpm_ranges[i] && | ||
349 | msr < msrpm_ranges[i] + MSRS_IN_RANGE) { | ||
350 | u32 msr_offset = (i * MSRS_IN_RANGE + msr - | ||
351 | msrpm_ranges[i]) * 2; | ||
352 | |||
353 | u32 *base = msrpm + (msr_offset / 32); | ||
354 | u32 msr_shift = msr_offset % 32; | ||
355 | u32 mask = ((write) ? 0 : 2) | ((read) ? 0 : 1); | ||
356 | *base = (*base & ~(0x3 << msr_shift)) | | ||
357 | (mask << msr_shift); | ||
358 | return; | ||
359 | } | ||
360 | } | ||
361 | BUG(); | ||
362 | } | ||
363 | |||
364 | static __init int svm_hardware_setup(void) | ||
365 | { | ||
366 | int cpu; | ||
367 | struct page *iopm_pages; | ||
368 | struct page *msrpm_pages; | ||
369 | void *iopm_va, *msrpm_va; | ||
370 | int r; | ||
371 | |||
372 | iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER); | ||
373 | |||
374 | if (!iopm_pages) | ||
375 | return -ENOMEM; | ||
376 | |||
377 | iopm_va = page_address(iopm_pages); | ||
378 | memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER)); | ||
379 | clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */ | ||
380 | iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT; | ||
381 | |||
382 | |||
383 | msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); | ||
384 | |||
385 | r = -ENOMEM; | ||
386 | if (!msrpm_pages) | ||
387 | goto err_1; | ||
388 | |||
389 | msrpm_va = page_address(msrpm_pages); | ||
390 | memset(msrpm_va, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER)); | ||
391 | msrpm_base = page_to_pfn(msrpm_pages) << PAGE_SHIFT; | ||
392 | |||
393 | #ifdef CONFIG_X86_64 | ||
394 | set_msr_interception(msrpm_va, MSR_GS_BASE, 1, 1); | ||
395 | set_msr_interception(msrpm_va, MSR_FS_BASE, 1, 1); | ||
396 | set_msr_interception(msrpm_va, MSR_KERNEL_GS_BASE, 1, 1); | ||
397 | set_msr_interception(msrpm_va, MSR_LSTAR, 1, 1); | ||
398 | set_msr_interception(msrpm_va, MSR_CSTAR, 1, 1); | ||
399 | set_msr_interception(msrpm_va, MSR_SYSCALL_MASK, 1, 1); | ||
400 | #endif | ||
401 | set_msr_interception(msrpm_va, MSR_K6_STAR, 1, 1); | ||
402 | set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_CS, 1, 1); | ||
403 | set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_ESP, 1, 1); | ||
404 | set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_EIP, 1, 1); | ||
405 | |||
406 | for_each_online_cpu(cpu) { | ||
407 | r = svm_cpu_init(cpu); | ||
408 | if (r) | ||
409 | goto err_2; | ||
410 | } | ||
411 | return 0; | ||
412 | |||
413 | err_2: | ||
414 | __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER); | ||
415 | msrpm_base = 0; | ||
416 | err_1: | ||
417 | __free_pages(iopm_pages, IOPM_ALLOC_ORDER); | ||
418 | iopm_base = 0; | ||
419 | return r; | ||
420 | } | ||
421 | |||
422 | static __exit void svm_hardware_unsetup(void) | ||
423 | { | ||
424 | __free_pages(pfn_to_page(msrpm_base >> PAGE_SHIFT), MSRPM_ALLOC_ORDER); | ||
425 | __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER); | ||
426 | iopm_base = msrpm_base = 0; | ||
427 | } | ||
428 | |||
429 | static void init_seg(struct vmcb_seg *seg) | ||
430 | { | ||
431 | seg->selector = 0; | ||
432 | seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK | | ||
433 | SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */ | ||
434 | seg->limit = 0xffff; | ||
435 | seg->base = 0; | ||
436 | } | ||
437 | |||
438 | static void init_sys_seg(struct vmcb_seg *seg, uint32_t type) | ||
439 | { | ||
440 | seg->selector = 0; | ||
441 | seg->attrib = SVM_SELECTOR_P_MASK | type; | ||
442 | seg->limit = 0xffff; | ||
443 | seg->base = 0; | ||
444 | } | ||
445 | |||
446 | static void init_vmcb(struct vmcb *vmcb) | ||
447 | { | ||
448 | struct vmcb_control_area *control = &vmcb->control; | ||
449 | struct vmcb_save_area *save = &vmcb->save; | ||
450 | |||
451 | control->intercept_cr_read = INTERCEPT_CR0_MASK | | ||
452 | INTERCEPT_CR3_MASK | | ||
453 | INTERCEPT_CR4_MASK | | ||
454 | INTERCEPT_CR8_MASK; | ||
455 | |||
456 | control->intercept_cr_write = INTERCEPT_CR0_MASK | | ||
457 | INTERCEPT_CR3_MASK | | ||
458 | INTERCEPT_CR4_MASK | | ||
459 | INTERCEPT_CR8_MASK; | ||
460 | |||
461 | control->intercept_dr_read = INTERCEPT_DR0_MASK | | ||
462 | INTERCEPT_DR1_MASK | | ||
463 | INTERCEPT_DR2_MASK | | ||
464 | INTERCEPT_DR3_MASK; | ||
465 | |||
466 | control->intercept_dr_write = INTERCEPT_DR0_MASK | | ||
467 | INTERCEPT_DR1_MASK | | ||
468 | INTERCEPT_DR2_MASK | | ||
469 | INTERCEPT_DR3_MASK | | ||
470 | INTERCEPT_DR5_MASK | | ||
471 | INTERCEPT_DR7_MASK; | ||
472 | |||
473 | control->intercept_exceptions = (1 << PF_VECTOR) | | ||
474 | (1 << UD_VECTOR); | ||
475 | |||
476 | |||
477 | control->intercept = (1ULL << INTERCEPT_INTR) | | ||
478 | (1ULL << INTERCEPT_NMI) | | ||
479 | (1ULL << INTERCEPT_SMI) | | ||
480 | /* | ||
481 | * selective cr0 intercept bug? | ||
482 | * 0: 0f 22 d8 mov %eax,%cr3 | ||
483 | * 3: 0f 20 c0 mov %cr0,%eax | ||
484 | * 6: 0d 00 00 00 80 or $0x80000000,%eax | ||
485 | * b: 0f 22 c0 mov %eax,%cr0 | ||
486 | * set cr3 ->interception | ||
487 | * get cr0 ->interception | ||
488 | * set cr0 -> no interception | ||
489 | */ | ||
490 | /* (1ULL << INTERCEPT_SELECTIVE_CR0) | */ | ||
491 | (1ULL << INTERCEPT_CPUID) | | ||
492 | (1ULL << INTERCEPT_INVD) | | ||
493 | (1ULL << INTERCEPT_HLT) | | ||
494 | (1ULL << INTERCEPT_INVLPGA) | | ||
495 | (1ULL << INTERCEPT_IOIO_PROT) | | ||
496 | (1ULL << INTERCEPT_MSR_PROT) | | ||
497 | (1ULL << INTERCEPT_TASK_SWITCH) | | ||
498 | (1ULL << INTERCEPT_SHUTDOWN) | | ||
499 | (1ULL << INTERCEPT_VMRUN) | | ||
500 | (1ULL << INTERCEPT_VMMCALL) | | ||
501 | (1ULL << INTERCEPT_VMLOAD) | | ||
502 | (1ULL << INTERCEPT_VMSAVE) | | ||
503 | (1ULL << INTERCEPT_STGI) | | ||
504 | (1ULL << INTERCEPT_CLGI) | | ||
505 | (1ULL << INTERCEPT_SKINIT) | | ||
506 | (1ULL << INTERCEPT_WBINVD) | | ||
507 | (1ULL << INTERCEPT_MONITOR) | | ||
508 | (1ULL << INTERCEPT_MWAIT); | ||
509 | |||
510 | control->iopm_base_pa = iopm_base; | ||
511 | control->msrpm_base_pa = msrpm_base; | ||
512 | control->tsc_offset = 0; | ||
513 | control->int_ctl = V_INTR_MASKING_MASK; | ||
514 | |||
515 | init_seg(&save->es); | ||
516 | init_seg(&save->ss); | ||
517 | init_seg(&save->ds); | ||
518 | init_seg(&save->fs); | ||
519 | init_seg(&save->gs); | ||
520 | |||
521 | save->cs.selector = 0xf000; | ||
522 | /* Executable/Readable Code Segment */ | ||
523 | save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK | | ||
524 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK; | ||
525 | save->cs.limit = 0xffff; | ||
526 | /* | ||
527 | * cs.base should really be 0xffff0000, but vmx can't handle that, so | ||
528 | * be consistent with it. | ||
529 | * | ||
530 | * Replace when we have real mode working for vmx. | ||
531 | */ | ||
532 | save->cs.base = 0xf0000; | ||
533 | |||
534 | save->gdtr.limit = 0xffff; | ||
535 | save->idtr.limit = 0xffff; | ||
536 | |||
537 | init_sys_seg(&save->ldtr, SEG_TYPE_LDT); | ||
538 | init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16); | ||
539 | |||
540 | save->efer = MSR_EFER_SVME_MASK; | ||
541 | save->dr6 = 0xffff0ff0; | ||
542 | save->dr7 = 0x400; | ||
543 | save->rflags = 2; | ||
544 | save->rip = 0x0000fff0; | ||
545 | |||
546 | /* | ||
547 | * cr0 val on cpu init should be 0x60000010, we enable cpu | ||
548 | * cache by default. the orderly way is to enable cache in bios. | ||
549 | */ | ||
550 | save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP; | ||
551 | save->cr4 = X86_CR4_PAE; | ||
552 | /* rdx = ?? */ | ||
553 | } | ||
554 | |||
555 | static int svm_vcpu_reset(struct kvm_vcpu *vcpu) | ||
556 | { | ||
557 | struct vcpu_svm *svm = to_svm(vcpu); | ||
558 | |||
559 | init_vmcb(svm->vmcb); | ||
560 | |||
561 | if (vcpu->vcpu_id != 0) { | ||
562 | svm->vmcb->save.rip = 0; | ||
563 | svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12; | ||
564 | svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8; | ||
565 | } | ||
566 | |||
567 | return 0; | ||
568 | } | ||
569 | |||
570 | static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) | ||
571 | { | ||
572 | struct vcpu_svm *svm; | ||
573 | struct page *page; | ||
574 | int err; | ||
575 | |||
576 | svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); | ||
577 | if (!svm) { | ||
578 | err = -ENOMEM; | ||
579 | goto out; | ||
580 | } | ||
581 | |||
582 | err = kvm_vcpu_init(&svm->vcpu, kvm, id); | ||
583 | if (err) | ||
584 | goto free_svm; | ||
585 | |||
586 | page = alloc_page(GFP_KERNEL); | ||
587 | if (!page) { | ||
588 | err = -ENOMEM; | ||
589 | goto uninit; | ||
590 | } | ||
591 | |||
592 | svm->vmcb = page_address(page); | ||
593 | clear_page(svm->vmcb); | ||
594 | svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; | ||
595 | svm->asid_generation = 0; | ||
596 | memset(svm->db_regs, 0, sizeof(svm->db_regs)); | ||
597 | init_vmcb(svm->vmcb); | ||
598 | |||
599 | fx_init(&svm->vcpu); | ||
600 | svm->vcpu.fpu_active = 1; | ||
601 | svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; | ||
602 | if (svm->vcpu.vcpu_id == 0) | ||
603 | svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP; | ||
604 | |||
605 | return &svm->vcpu; | ||
606 | |||
607 | uninit: | ||
608 | kvm_vcpu_uninit(&svm->vcpu); | ||
609 | free_svm: | ||
610 | kmem_cache_free(kvm_vcpu_cache, svm); | ||
611 | out: | ||
612 | return ERR_PTR(err); | ||
613 | } | ||
614 | |||
615 | static void svm_free_vcpu(struct kvm_vcpu *vcpu) | ||
616 | { | ||
617 | struct vcpu_svm *svm = to_svm(vcpu); | ||
618 | |||
619 | __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT)); | ||
620 | kvm_vcpu_uninit(vcpu); | ||
621 | kmem_cache_free(kvm_vcpu_cache, svm); | ||
622 | } | ||
623 | |||
624 | static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | ||
625 | { | ||
626 | struct vcpu_svm *svm = to_svm(vcpu); | ||
627 | int i; | ||
628 | |||
629 | if (unlikely(cpu != vcpu->cpu)) { | ||
630 | u64 tsc_this, delta; | ||
631 | |||
632 | /* | ||
633 | * Make sure that the guest sees a monotonically | ||
634 | * increasing TSC. | ||
635 | */ | ||
636 | rdtscll(tsc_this); | ||
637 | delta = vcpu->arch.host_tsc - tsc_this; | ||
638 | svm->vmcb->control.tsc_offset += delta; | ||
639 | vcpu->cpu = cpu; | ||
640 | kvm_migrate_apic_timer(vcpu); | ||
641 | } | ||
642 | |||
643 | for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) | ||
644 | rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); | ||
645 | } | ||
646 | |||
647 | static void svm_vcpu_put(struct kvm_vcpu *vcpu) | ||
648 | { | ||
649 | struct vcpu_svm *svm = to_svm(vcpu); | ||
650 | int i; | ||
651 | |||
652 | ++vcpu->stat.host_state_reload; | ||
653 | for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) | ||
654 | wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); | ||
655 | |||
656 | rdtscll(vcpu->arch.host_tsc); | ||
657 | } | ||
658 | |||
659 | static void svm_vcpu_decache(struct kvm_vcpu *vcpu) | ||
660 | { | ||
661 | } | ||
662 | |||
663 | static void svm_cache_regs(struct kvm_vcpu *vcpu) | ||
664 | { | ||
665 | struct vcpu_svm *svm = to_svm(vcpu); | ||
666 | |||
667 | vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; | ||
668 | vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; | ||
669 | vcpu->arch.rip = svm->vmcb->save.rip; | ||
670 | } | ||
671 | |||
672 | static void svm_decache_regs(struct kvm_vcpu *vcpu) | ||
673 | { | ||
674 | struct vcpu_svm *svm = to_svm(vcpu); | ||
675 | svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; | ||
676 | svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; | ||
677 | svm->vmcb->save.rip = vcpu->arch.rip; | ||
678 | } | ||
679 | |||
680 | static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) | ||
681 | { | ||
682 | return to_svm(vcpu)->vmcb->save.rflags; | ||
683 | } | ||
684 | |||
685 | static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) | ||
686 | { | ||
687 | to_svm(vcpu)->vmcb->save.rflags = rflags; | ||
688 | } | ||
689 | |||
690 | static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) | ||
691 | { | ||
692 | struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; | ||
693 | |||
694 | switch (seg) { | ||
695 | case VCPU_SREG_CS: return &save->cs; | ||
696 | case VCPU_SREG_DS: return &save->ds; | ||
697 | case VCPU_SREG_ES: return &save->es; | ||
698 | case VCPU_SREG_FS: return &save->fs; | ||
699 | case VCPU_SREG_GS: return &save->gs; | ||
700 | case VCPU_SREG_SS: return &save->ss; | ||
701 | case VCPU_SREG_TR: return &save->tr; | ||
702 | case VCPU_SREG_LDTR: return &save->ldtr; | ||
703 | } | ||
704 | BUG(); | ||
705 | return NULL; | ||
706 | } | ||
707 | |||
708 | static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg) | ||
709 | { | ||
710 | struct vmcb_seg *s = svm_seg(vcpu, seg); | ||
711 | |||
712 | return s->base; | ||
713 | } | ||
714 | |||
715 | static void svm_get_segment(struct kvm_vcpu *vcpu, | ||
716 | struct kvm_segment *var, int seg) | ||
717 | { | ||
718 | struct vmcb_seg *s = svm_seg(vcpu, seg); | ||
719 | |||
720 | var->base = s->base; | ||
721 | var->limit = s->limit; | ||
722 | var->selector = s->selector; | ||
723 | var->type = s->attrib & SVM_SELECTOR_TYPE_MASK; | ||
724 | var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1; | ||
725 | var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3; | ||
726 | var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1; | ||
727 | var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1; | ||
728 | var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1; | ||
729 | var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; | ||
730 | var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1; | ||
731 | var->unusable = !var->present; | ||
732 | } | ||
733 | |||
734 | static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | ||
735 | { | ||
736 | struct vcpu_svm *svm = to_svm(vcpu); | ||
737 | |||
738 | dt->limit = svm->vmcb->save.idtr.limit; | ||
739 | dt->base = svm->vmcb->save.idtr.base; | ||
740 | } | ||
741 | |||
742 | static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | ||
743 | { | ||
744 | struct vcpu_svm *svm = to_svm(vcpu); | ||
745 | |||
746 | svm->vmcb->save.idtr.limit = dt->limit; | ||
747 | svm->vmcb->save.idtr.base = dt->base ; | ||
748 | } | ||
749 | |||
750 | static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | ||
751 | { | ||
752 | struct vcpu_svm *svm = to_svm(vcpu); | ||
753 | |||
754 | dt->limit = svm->vmcb->save.gdtr.limit; | ||
755 | dt->base = svm->vmcb->save.gdtr.base; | ||
756 | } | ||
757 | |||
758 | static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | ||
759 | { | ||
760 | struct vcpu_svm *svm = to_svm(vcpu); | ||
761 | |||
762 | svm->vmcb->save.gdtr.limit = dt->limit; | ||
763 | svm->vmcb->save.gdtr.base = dt->base ; | ||
764 | } | ||
765 | |||
766 | static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) | ||
767 | { | ||
768 | } | ||
769 | |||
770 | static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | ||
771 | { | ||
772 | struct vcpu_svm *svm = to_svm(vcpu); | ||
773 | |||
774 | #ifdef CONFIG_X86_64 | ||
775 | if (vcpu->arch.shadow_efer & EFER_LME) { | ||
776 | if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { | ||
777 | vcpu->arch.shadow_efer |= EFER_LMA; | ||
778 | svm->vmcb->save.efer |= EFER_LMA | EFER_LME; | ||
779 | } | ||
780 | |||
781 | if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) { | ||
782 | vcpu->arch.shadow_efer &= ~EFER_LMA; | ||
783 | svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); | ||
784 | } | ||
785 | } | ||
786 | #endif | ||
787 | if ((vcpu->arch.cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) { | ||
788 | svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); | ||
789 | vcpu->fpu_active = 1; | ||
790 | } | ||
791 | |||
792 | vcpu->arch.cr0 = cr0; | ||
793 | cr0 |= X86_CR0_PG | X86_CR0_WP; | ||
794 | cr0 &= ~(X86_CR0_CD | X86_CR0_NW); | ||
795 | svm->vmcb->save.cr0 = cr0; | ||
796 | } | ||
797 | |||
798 | static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | ||
799 | { | ||
800 | vcpu->arch.cr4 = cr4; | ||
801 | to_svm(vcpu)->vmcb->save.cr4 = cr4 | X86_CR4_PAE; | ||
802 | } | ||
803 | |||
804 | static void svm_set_segment(struct kvm_vcpu *vcpu, | ||
805 | struct kvm_segment *var, int seg) | ||
806 | { | ||
807 | struct vcpu_svm *svm = to_svm(vcpu); | ||
808 | struct vmcb_seg *s = svm_seg(vcpu, seg); | ||
809 | |||
810 | s->base = var->base; | ||
811 | s->limit = var->limit; | ||
812 | s->selector = var->selector; | ||
813 | if (var->unusable) | ||
814 | s->attrib = 0; | ||
815 | else { | ||
816 | s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); | ||
817 | s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; | ||
818 | s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; | ||
819 | s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT; | ||
820 | s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; | ||
821 | s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT; | ||
822 | s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT; | ||
823 | s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; | ||
824 | } | ||
825 | if (seg == VCPU_SREG_CS) | ||
826 | svm->vmcb->save.cpl | ||
827 | = (svm->vmcb->save.cs.attrib | ||
828 | >> SVM_SELECTOR_DPL_SHIFT) & 3; | ||
829 | |||
830 | } | ||
831 | |||
832 | /* FIXME: | ||
833 | |||
834 | svm(vcpu)->vmcb->control.int_ctl &= ~V_TPR_MASK; | ||
835 | svm(vcpu)->vmcb->control.int_ctl |= (sregs->cr8 & V_TPR_MASK); | ||
836 | |||
837 | */ | ||
838 | |||
839 | static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) | ||
840 | { | ||
841 | return -EOPNOTSUPP; | ||
842 | } | ||
843 | |||
844 | static int svm_get_irq(struct kvm_vcpu *vcpu) | ||
845 | { | ||
846 | struct vcpu_svm *svm = to_svm(vcpu); | ||
847 | u32 exit_int_info = svm->vmcb->control.exit_int_info; | ||
848 | |||
849 | if (is_external_interrupt(exit_int_info)) | ||
850 | return exit_int_info & SVM_EVTINJ_VEC_MASK; | ||
851 | return -1; | ||
852 | } | ||
853 | |||
854 | static void load_host_msrs(struct kvm_vcpu *vcpu) | ||
855 | { | ||
856 | #ifdef CONFIG_X86_64 | ||
857 | wrmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base); | ||
858 | #endif | ||
859 | } | ||
860 | |||
861 | static void save_host_msrs(struct kvm_vcpu *vcpu) | ||
862 | { | ||
863 | #ifdef CONFIG_X86_64 | ||
864 | rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base); | ||
865 | #endif | ||
866 | } | ||
867 | |||
868 | static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data) | ||
869 | { | ||
870 | if (svm_data->next_asid > svm_data->max_asid) { | ||
871 | ++svm_data->asid_generation; | ||
872 | svm_data->next_asid = 1; | ||
873 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; | ||
874 | } | ||
875 | |||
876 | svm->vcpu.cpu = svm_data->cpu; | ||
877 | svm->asid_generation = svm_data->asid_generation; | ||
878 | svm->vmcb->control.asid = svm_data->next_asid++; | ||
879 | } | ||
880 | |||
881 | static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) | ||
882 | { | ||
883 | return to_svm(vcpu)->db_regs[dr]; | ||
884 | } | ||
885 | |||
886 | static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, | ||
887 | int *exception) | ||
888 | { | ||
889 | struct vcpu_svm *svm = to_svm(vcpu); | ||
890 | |||
891 | *exception = 0; | ||
892 | |||
893 | if (svm->vmcb->save.dr7 & DR7_GD_MASK) { | ||
894 | svm->vmcb->save.dr7 &= ~DR7_GD_MASK; | ||
895 | svm->vmcb->save.dr6 |= DR6_BD_MASK; | ||
896 | *exception = DB_VECTOR; | ||
897 | return; | ||
898 | } | ||
899 | |||
900 | switch (dr) { | ||
901 | case 0 ... 3: | ||
902 | svm->db_regs[dr] = value; | ||
903 | return; | ||
904 | case 4 ... 5: | ||
905 | if (vcpu->arch.cr4 & X86_CR4_DE) { | ||
906 | *exception = UD_VECTOR; | ||
907 | return; | ||
908 | } | ||
909 | case 7: { | ||
910 | if (value & ~((1ULL << 32) - 1)) { | ||
911 | *exception = GP_VECTOR; | ||
912 | return; | ||
913 | } | ||
914 | svm->vmcb->save.dr7 = value; | ||
915 | return; | ||
916 | } | ||
917 | default: | ||
918 | printk(KERN_DEBUG "%s: unexpected dr %u\n", | ||
919 | __FUNCTION__, dr); | ||
920 | *exception = UD_VECTOR; | ||
921 | return; | ||
922 | } | ||
923 | } | ||
924 | |||
925 | static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | ||
926 | { | ||
927 | u32 exit_int_info = svm->vmcb->control.exit_int_info; | ||
928 | struct kvm *kvm = svm->vcpu.kvm; | ||
929 | u64 fault_address; | ||
930 | u32 error_code; | ||
931 | |||
932 | if (!irqchip_in_kernel(kvm) && | ||
933 | is_external_interrupt(exit_int_info)) | ||
934 | push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK); | ||
935 | |||
936 | fault_address = svm->vmcb->control.exit_info_2; | ||
937 | error_code = svm->vmcb->control.exit_info_1; | ||
938 | return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); | ||
939 | } | ||
940 | |||
941 | static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | ||
942 | { | ||
943 | int er; | ||
944 | |||
945 | er = emulate_instruction(&svm->vcpu, kvm_run, 0, 0, 0); | ||
946 | if (er != EMULATE_DONE) | ||
947 | kvm_queue_exception(&svm->vcpu, UD_VECTOR); | ||
948 | return 1; | ||
949 | } | ||
950 | |||
951 | static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | ||
952 | { | ||
953 | svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); | ||
954 | if (!(svm->vcpu.arch.cr0 & X86_CR0_TS)) | ||
955 | svm->vmcb->save.cr0 &= ~X86_CR0_TS; | ||
956 | svm->vcpu.fpu_active = 1; | ||
957 | |||
958 | return 1; | ||
959 | } | ||
960 | |||
961 | static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | ||
962 | { | ||
963 | /* | ||
964 | * VMCB is undefined after a SHUTDOWN intercept | ||
965 | * so reinitialize it. | ||
966 | */ | ||
967 | clear_page(svm->vmcb); | ||
968 | init_vmcb(svm->vmcb); | ||
969 | |||
970 | kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; | ||
971 | return 0; | ||
972 | } | ||
973 | |||
974 | static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | ||
975 | { | ||
976 | u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ | ||
977 | int size, down, in, string, rep; | ||
978 | unsigned port; | ||
979 | |||
980 | ++svm->vcpu.stat.io_exits; | ||
981 | |||
982 | svm->next_rip = svm->vmcb->control.exit_info_2; | ||
983 | |||
984 | string = (io_info & SVM_IOIO_STR_MASK) != 0; | ||
985 | |||
986 | if (string) { | ||
987 | if (emulate_instruction(&svm->vcpu, | ||
988 | kvm_run, 0, 0, 0) == EMULATE_DO_MMIO) | ||
989 | return 0; | ||
990 | return 1; | ||
991 | } | ||
992 | |||
993 | in = (io_info & SVM_IOIO_TYPE_MASK) != 0; | ||
994 | port = io_info >> 16; | ||
995 | size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; | ||
996 | rep = (io_info & SVM_IOIO_REP_MASK) != 0; | ||
997 | down = (svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0; | ||
998 | |||
999 | return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port); | ||
1000 | } | ||
1001 | |||
1002 | static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | ||
1003 | { | ||
1004 | return 1; | ||
1005 | } | ||
1006 | |||
1007 | static int halt_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | ||
1008 | { | ||
1009 | svm->next_rip = svm->vmcb->save.rip + 1; | ||
1010 | skip_emulated_instruction(&svm->vcpu); | ||
1011 | return kvm_emulate_halt(&svm->vcpu); | ||
1012 | } | ||
1013 | |||
1014 | static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | ||
1015 | { | ||
1016 | svm->next_rip = svm->vmcb->save.rip + 3; | ||
1017 | skip_emulated_instruction(&svm->vcpu); | ||
1018 | kvm_emulate_hypercall(&svm->vcpu); | ||
1019 | return 1; | ||
1020 | } | ||
1021 | |||
1022 | static int invalid_op_interception(struct vcpu_svm *svm, | ||
1023 | struct kvm_run *kvm_run) | ||
1024 | { | ||
1025 | kvm_queue_exception(&svm->vcpu, UD_VECTOR); | ||
1026 | return 1; | ||
1027 | } | ||
1028 | |||
1029 | static int task_switch_interception(struct vcpu_svm *svm, | ||
1030 | struct kvm_run *kvm_run) | ||
1031 | { | ||
1032 | pr_unimpl(&svm->vcpu, "%s: task switch is unsupported\n", __FUNCTION__); | ||
1033 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | ||
1034 | return 0; | ||
1035 | } | ||
1036 | |||
1037 | static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | ||
1038 | { | ||
1039 | svm->next_rip = svm->vmcb->save.rip + 2; | ||
1040 | kvm_emulate_cpuid(&svm->vcpu); | ||
1041 | return 1; | ||
1042 | } | ||
1043 | |||
1044 | static int emulate_on_interception(struct vcpu_svm *svm, | ||
1045 | struct kvm_run *kvm_run) | ||
1046 | { | ||
1047 | if (emulate_instruction(&svm->vcpu, NULL, 0, 0, 0) != EMULATE_DONE) | ||
1048 | pr_unimpl(&svm->vcpu, "%s: failed\n", __FUNCTION__); | ||
1049 | return 1; | ||
1050 | } | ||
1051 | |||
1052 | static int cr8_write_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | ||
1053 | { | ||
1054 | emulate_instruction(&svm->vcpu, NULL, 0, 0, 0); | ||
1055 | if (irqchip_in_kernel(svm->vcpu.kvm)) | ||
1056 | return 1; | ||
1057 | kvm_run->exit_reason = KVM_EXIT_SET_TPR; | ||
1058 | return 0; | ||
1059 | } | ||
1060 | |||
1061 | static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) | ||
1062 | { | ||
1063 | struct vcpu_svm *svm = to_svm(vcpu); | ||
1064 | |||
1065 | switch (ecx) { | ||
1066 | case MSR_IA32_TIME_STAMP_COUNTER: { | ||
1067 | u64 tsc; | ||
1068 | |||
1069 | rdtscll(tsc); | ||
1070 | *data = svm->vmcb->control.tsc_offset + tsc; | ||
1071 | break; | ||
1072 | } | ||
1073 | case MSR_K6_STAR: | ||
1074 | *data = svm->vmcb->save.star; | ||
1075 | break; | ||
1076 | #ifdef CONFIG_X86_64 | ||
1077 | case MSR_LSTAR: | ||
1078 | *data = svm->vmcb->save.lstar; | ||
1079 | break; | ||
1080 | case MSR_CSTAR: | ||
1081 | *data = svm->vmcb->save.cstar; | ||
1082 | break; | ||
1083 | case MSR_KERNEL_GS_BASE: | ||
1084 | *data = svm->vmcb->save.kernel_gs_base; | ||
1085 | break; | ||
1086 | case MSR_SYSCALL_MASK: | ||
1087 | *data = svm->vmcb->save.sfmask; | ||
1088 | break; | ||
1089 | #endif | ||
1090 | case MSR_IA32_SYSENTER_CS: | ||
1091 | *data = svm->vmcb->save.sysenter_cs; | ||
1092 | break; | ||
1093 | case MSR_IA32_SYSENTER_EIP: | ||
1094 | *data = svm->vmcb->save.sysenter_eip; | ||
1095 | break; | ||
1096 | case MSR_IA32_SYSENTER_ESP: | ||
1097 | *data = svm->vmcb->save.sysenter_esp; | ||
1098 | break; | ||
1099 | default: | ||
1100 | return kvm_get_msr_common(vcpu, ecx, data); | ||
1101 | } | ||
1102 | return 0; | ||
1103 | } | ||
1104 | |||
1105 | static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | ||
1106 | { | ||
1107 | u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; | ||
1108 | u64 data; | ||
1109 | |||
1110 | if (svm_get_msr(&svm->vcpu, ecx, &data)) | ||
1111 | kvm_inject_gp(&svm->vcpu, 0); | ||
1112 | else { | ||
1113 | svm->vmcb->save.rax = data & 0xffffffff; | ||
1114 | svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32; | ||
1115 | svm->next_rip = svm->vmcb->save.rip + 2; | ||
1116 | skip_emulated_instruction(&svm->vcpu); | ||
1117 | } | ||
1118 | return 1; | ||
1119 | } | ||
1120 | |||
1121 | static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) | ||
1122 | { | ||
1123 | struct vcpu_svm *svm = to_svm(vcpu); | ||
1124 | |||
1125 | switch (ecx) { | ||
1126 | case MSR_IA32_TIME_STAMP_COUNTER: { | ||
1127 | u64 tsc; | ||
1128 | |||
1129 | rdtscll(tsc); | ||
1130 | svm->vmcb->control.tsc_offset = data - tsc; | ||
1131 | break; | ||
1132 | } | ||
1133 | case MSR_K6_STAR: | ||
1134 | svm->vmcb->save.star = data; | ||
1135 | break; | ||
1136 | #ifdef CONFIG_X86_64 | ||
1137 | case MSR_LSTAR: | ||
1138 | svm->vmcb->save.lstar = data; | ||
1139 | break; | ||
1140 | case MSR_CSTAR: | ||
1141 | svm->vmcb->save.cstar = data; | ||
1142 | break; | ||
1143 | case MSR_KERNEL_GS_BASE: | ||
1144 | svm->vmcb->save.kernel_gs_base = data; | ||
1145 | break; | ||
1146 | case MSR_SYSCALL_MASK: | ||
1147 | svm->vmcb->save.sfmask = data; | ||
1148 | break; | ||
1149 | #endif | ||
1150 | case MSR_IA32_SYSENTER_CS: | ||
1151 | svm->vmcb->save.sysenter_cs = data; | ||
1152 | break; | ||
1153 | case MSR_IA32_SYSENTER_EIP: | ||
1154 | svm->vmcb->save.sysenter_eip = data; | ||
1155 | break; | ||
1156 | case MSR_IA32_SYSENTER_ESP: | ||
1157 | svm->vmcb->save.sysenter_esp = data; | ||
1158 | break; | ||
1159 | case MSR_K7_EVNTSEL0: | ||
1160 | case MSR_K7_EVNTSEL1: | ||
1161 | case MSR_K7_EVNTSEL2: | ||
1162 | case MSR_K7_EVNTSEL3: | ||
1163 | /* | ||
1164 | * only support writing 0 to the performance counters for now | ||
1165 | * to make Windows happy. Should be replaced by a real | ||
1166 | * performance counter emulation later. | ||
1167 | */ | ||
1168 | if (data != 0) | ||
1169 | goto unhandled; | ||
1170 | break; | ||
1171 | default: | ||
1172 | unhandled: | ||
1173 | return kvm_set_msr_common(vcpu, ecx, data); | ||
1174 | } | ||
1175 | return 0; | ||
1176 | } | ||
1177 | |||
1178 | static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | ||
1179 | { | ||
1180 | u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; | ||
1181 | u64 data = (svm->vmcb->save.rax & -1u) | ||
1182 | | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32); | ||
1183 | svm->next_rip = svm->vmcb->save.rip + 2; | ||
1184 | if (svm_set_msr(&svm->vcpu, ecx, data)) | ||
1185 | kvm_inject_gp(&svm->vcpu, 0); | ||
1186 | else | ||
1187 | skip_emulated_instruction(&svm->vcpu); | ||
1188 | return 1; | ||
1189 | } | ||
1190 | |||
1191 | static int msr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | ||
1192 | { | ||
1193 | if (svm->vmcb->control.exit_info_1) | ||
1194 | return wrmsr_interception(svm, kvm_run); | ||
1195 | else | ||
1196 | return rdmsr_interception(svm, kvm_run); | ||
1197 | } | ||
1198 | |||
1199 | static int interrupt_window_interception(struct vcpu_svm *svm, | ||
1200 | struct kvm_run *kvm_run) | ||
1201 | { | ||
1202 | svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR); | ||
1203 | svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; | ||
1204 | /* | ||
1205 | * If the user space waits to inject interrupts, exit as soon as | ||
1206 | * possible | ||
1207 | */ | ||
1208 | if (kvm_run->request_interrupt_window && | ||
1209 | !svm->vcpu.arch.irq_summary) { | ||
1210 | ++svm->vcpu.stat.irq_window_exits; | ||
1211 | kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; | ||
1212 | return 0; | ||
1213 | } | ||
1214 | |||
1215 | return 1; | ||
1216 | } | ||
1217 | |||
1218 | static int (*svm_exit_handlers[])(struct vcpu_svm *svm, | ||
1219 | struct kvm_run *kvm_run) = { | ||
1220 | [SVM_EXIT_READ_CR0] = emulate_on_interception, | ||
1221 | [SVM_EXIT_READ_CR3] = emulate_on_interception, | ||
1222 | [SVM_EXIT_READ_CR4] = emulate_on_interception, | ||
1223 | [SVM_EXIT_READ_CR8] = emulate_on_interception, | ||
1224 | /* for now: */ | ||
1225 | [SVM_EXIT_WRITE_CR0] = emulate_on_interception, | ||
1226 | [SVM_EXIT_WRITE_CR3] = emulate_on_interception, | ||
1227 | [SVM_EXIT_WRITE_CR4] = emulate_on_interception, | ||
1228 | [SVM_EXIT_WRITE_CR8] = cr8_write_interception, | ||
1229 | [SVM_EXIT_READ_DR0] = emulate_on_interception, | ||
1230 | [SVM_EXIT_READ_DR1] = emulate_on_interception, | ||
1231 | [SVM_EXIT_READ_DR2] = emulate_on_interception, | ||
1232 | [SVM_EXIT_READ_DR3] = emulate_on_interception, | ||
1233 | [SVM_EXIT_WRITE_DR0] = emulate_on_interception, | ||
1234 | [SVM_EXIT_WRITE_DR1] = emulate_on_interception, | ||
1235 | [SVM_EXIT_WRITE_DR2] = emulate_on_interception, | ||
1236 | [SVM_EXIT_WRITE_DR3] = emulate_on_interception, | ||
1237 | [SVM_EXIT_WRITE_DR5] = emulate_on_interception, | ||
1238 | [SVM_EXIT_WRITE_DR7] = emulate_on_interception, | ||
1239 | [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception, | ||
1240 | [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception, | ||
1241 | [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception, | ||
1242 | [SVM_EXIT_INTR] = nop_on_interception, | ||
1243 | [SVM_EXIT_NMI] = nop_on_interception, | ||
1244 | [SVM_EXIT_SMI] = nop_on_interception, | ||
1245 | [SVM_EXIT_INIT] = nop_on_interception, | ||
1246 | [SVM_EXIT_VINTR] = interrupt_window_interception, | ||
1247 | /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */ | ||
1248 | [SVM_EXIT_CPUID] = cpuid_interception, | ||
1249 | [SVM_EXIT_INVD] = emulate_on_interception, | ||
1250 | [SVM_EXIT_HLT] = halt_interception, | ||
1251 | [SVM_EXIT_INVLPG] = emulate_on_interception, | ||
1252 | [SVM_EXIT_INVLPGA] = invalid_op_interception, | ||
1253 | [SVM_EXIT_IOIO] = io_interception, | ||
1254 | [SVM_EXIT_MSR] = msr_interception, | ||
1255 | [SVM_EXIT_TASK_SWITCH] = task_switch_interception, | ||
1256 | [SVM_EXIT_SHUTDOWN] = shutdown_interception, | ||
1257 | [SVM_EXIT_VMRUN] = invalid_op_interception, | ||
1258 | [SVM_EXIT_VMMCALL] = vmmcall_interception, | ||
1259 | [SVM_EXIT_VMLOAD] = invalid_op_interception, | ||
1260 | [SVM_EXIT_VMSAVE] = invalid_op_interception, | ||
1261 | [SVM_EXIT_STGI] = invalid_op_interception, | ||
1262 | [SVM_EXIT_CLGI] = invalid_op_interception, | ||
1263 | [SVM_EXIT_SKINIT] = invalid_op_interception, | ||
1264 | [SVM_EXIT_WBINVD] = emulate_on_interception, | ||
1265 | [SVM_EXIT_MONITOR] = invalid_op_interception, | ||
1266 | [SVM_EXIT_MWAIT] = invalid_op_interception, | ||
1267 | }; | ||
1268 | |||
1269 | |||
1270 | static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | ||
1271 | { | ||
1272 | struct vcpu_svm *svm = to_svm(vcpu); | ||
1273 | u32 exit_code = svm->vmcb->control.exit_code; | ||
1274 | |||
1275 | kvm_reput_irq(svm); | ||
1276 | |||
1277 | if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { | ||
1278 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; | ||
1279 | kvm_run->fail_entry.hardware_entry_failure_reason | ||
1280 | = svm->vmcb->control.exit_code; | ||
1281 | return 0; | ||
1282 | } | ||
1283 | |||
1284 | if (is_external_interrupt(svm->vmcb->control.exit_int_info) && | ||
1285 | exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR) | ||
1286 | printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x " | ||
1287 | "exit_code 0x%x\n", | ||
1288 | __FUNCTION__, svm->vmcb->control.exit_int_info, | ||
1289 | exit_code); | ||
1290 | |||
1291 | if (exit_code >= ARRAY_SIZE(svm_exit_handlers) | ||
1292 | || !svm_exit_handlers[exit_code]) { | ||
1293 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | ||
1294 | kvm_run->hw.hardware_exit_reason = exit_code; | ||
1295 | return 0; | ||
1296 | } | ||
1297 | |||
1298 | return svm_exit_handlers[exit_code](svm, kvm_run); | ||
1299 | } | ||
1300 | |||
1301 | static void reload_tss(struct kvm_vcpu *vcpu) | ||
1302 | { | ||
1303 | int cpu = raw_smp_processor_id(); | ||
1304 | |||
1305 | struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); | ||
1306 | svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */ | ||
1307 | load_TR_desc(); | ||
1308 | } | ||
1309 | |||
1310 | static void pre_svm_run(struct vcpu_svm *svm) | ||
1311 | { | ||
1312 | int cpu = raw_smp_processor_id(); | ||
1313 | |||
1314 | struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); | ||
1315 | |||
1316 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; | ||
1317 | if (svm->vcpu.cpu != cpu || | ||
1318 | svm->asid_generation != svm_data->asid_generation) | ||
1319 | new_asid(svm, svm_data); | ||
1320 | } | ||
1321 | |||
1322 | |||
1323 | static inline void svm_inject_irq(struct vcpu_svm *svm, int irq) | ||
1324 | { | ||
1325 | struct vmcb_control_area *control; | ||
1326 | |||
1327 | control = &svm->vmcb->control; | ||
1328 | control->int_vector = irq; | ||
1329 | control->int_ctl &= ~V_INTR_PRIO_MASK; | ||
1330 | control->int_ctl |= V_IRQ_MASK | | ||
1331 | ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); | ||
1332 | } | ||
1333 | |||
1334 | static void svm_set_irq(struct kvm_vcpu *vcpu, int irq) | ||
1335 | { | ||
1336 | struct vcpu_svm *svm = to_svm(vcpu); | ||
1337 | |||
1338 | svm_inject_irq(svm, irq); | ||
1339 | } | ||
1340 | |||
1341 | static void svm_intr_assist(struct kvm_vcpu *vcpu) | ||
1342 | { | ||
1343 | struct vcpu_svm *svm = to_svm(vcpu); | ||
1344 | struct vmcb *vmcb = svm->vmcb; | ||
1345 | int intr_vector = -1; | ||
1346 | |||
1347 | if ((vmcb->control.exit_int_info & SVM_EVTINJ_VALID) && | ||
1348 | ((vmcb->control.exit_int_info & SVM_EVTINJ_TYPE_MASK) == 0)) { | ||
1349 | intr_vector = vmcb->control.exit_int_info & | ||
1350 | SVM_EVTINJ_VEC_MASK; | ||
1351 | vmcb->control.exit_int_info = 0; | ||
1352 | svm_inject_irq(svm, intr_vector); | ||
1353 | return; | ||
1354 | } | ||
1355 | |||
1356 | if (vmcb->control.int_ctl & V_IRQ_MASK) | ||
1357 | return; | ||
1358 | |||
1359 | if (!kvm_cpu_has_interrupt(vcpu)) | ||
1360 | return; | ||
1361 | |||
1362 | if (!(vmcb->save.rflags & X86_EFLAGS_IF) || | ||
1363 | (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) || | ||
1364 | (vmcb->control.event_inj & SVM_EVTINJ_VALID)) { | ||
1365 | /* unable to deliver irq, set pending irq */ | ||
1366 | vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR); | ||
1367 | svm_inject_irq(svm, 0x0); | ||
1368 | return; | ||
1369 | } | ||
1370 | /* Okay, we can deliver the interrupt: grab it and update PIC state. */ | ||
1371 | intr_vector = kvm_cpu_get_interrupt(vcpu); | ||
1372 | svm_inject_irq(svm, intr_vector); | ||
1373 | kvm_timer_intr_post(vcpu, intr_vector); | ||
1374 | } | ||
1375 | |||
1376 | static void kvm_reput_irq(struct vcpu_svm *svm) | ||
1377 | { | ||
1378 | struct vmcb_control_area *control = &svm->vmcb->control; | ||
1379 | |||
1380 | if ((control->int_ctl & V_IRQ_MASK) | ||
1381 | && !irqchip_in_kernel(svm->vcpu.kvm)) { | ||
1382 | control->int_ctl &= ~V_IRQ_MASK; | ||
1383 | push_irq(&svm->vcpu, control->int_vector); | ||
1384 | } | ||
1385 | |||
1386 | svm->vcpu.arch.interrupt_window_open = | ||
1387 | !(control->int_state & SVM_INTERRUPT_SHADOW_MASK); | ||
1388 | } | ||
1389 | |||
1390 | static void svm_do_inject_vector(struct vcpu_svm *svm) | ||
1391 | { | ||
1392 | struct kvm_vcpu *vcpu = &svm->vcpu; | ||
1393 | int word_index = __ffs(vcpu->arch.irq_summary); | ||
1394 | int bit_index = __ffs(vcpu->arch.irq_pending[word_index]); | ||
1395 | int irq = word_index * BITS_PER_LONG + bit_index; | ||
1396 | |||
1397 | clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]); | ||
1398 | if (!vcpu->arch.irq_pending[word_index]) | ||
1399 | clear_bit(word_index, &vcpu->arch.irq_summary); | ||
1400 | svm_inject_irq(svm, irq); | ||
1401 | } | ||
1402 | |||
1403 | static void do_interrupt_requests(struct kvm_vcpu *vcpu, | ||
1404 | struct kvm_run *kvm_run) | ||
1405 | { | ||
1406 | struct vcpu_svm *svm = to_svm(vcpu); | ||
1407 | struct vmcb_control_area *control = &svm->vmcb->control; | ||
1408 | |||
1409 | svm->vcpu.arch.interrupt_window_open = | ||
1410 | (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) && | ||
1411 | (svm->vmcb->save.rflags & X86_EFLAGS_IF)); | ||
1412 | |||
1413 | if (svm->vcpu.arch.interrupt_window_open && svm->vcpu.arch.irq_summary) | ||
1414 | /* | ||
1415 | * If interrupts enabled, and not blocked by sti or mov ss. Good. | ||
1416 | */ | ||
1417 | svm_do_inject_vector(svm); | ||
1418 | |||
1419 | /* | ||
1420 | * Interrupts blocked. Wait for unblock. | ||
1421 | */ | ||
1422 | if (!svm->vcpu.arch.interrupt_window_open && | ||
1423 | (svm->vcpu.arch.irq_summary || kvm_run->request_interrupt_window)) | ||
1424 | control->intercept |= 1ULL << INTERCEPT_VINTR; | ||
1425 | else | ||
1426 | control->intercept &= ~(1ULL << INTERCEPT_VINTR); | ||
1427 | } | ||
1428 | |||
1429 | static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) | ||
1430 | { | ||
1431 | return 0; | ||
1432 | } | ||
1433 | |||
1434 | static void save_db_regs(unsigned long *db_regs) | ||
1435 | { | ||
1436 | asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0])); | ||
1437 | asm volatile ("mov %%dr1, %0" : "=r"(db_regs[1])); | ||
1438 | asm volatile ("mov %%dr2, %0" : "=r"(db_regs[2])); | ||
1439 | asm volatile ("mov %%dr3, %0" : "=r"(db_regs[3])); | ||
1440 | } | ||
1441 | |||
1442 | static void load_db_regs(unsigned long *db_regs) | ||
1443 | { | ||
1444 | asm volatile ("mov %0, %%dr0" : : "r"(db_regs[0])); | ||
1445 | asm volatile ("mov %0, %%dr1" : : "r"(db_regs[1])); | ||
1446 | asm volatile ("mov %0, %%dr2" : : "r"(db_regs[2])); | ||
1447 | asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3])); | ||
1448 | } | ||
1449 | |||
1450 | static void svm_flush_tlb(struct kvm_vcpu *vcpu) | ||
1451 | { | ||
1452 | force_new_asid(vcpu); | ||
1453 | } | ||
1454 | |||
1455 | static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) | ||
1456 | { | ||
1457 | } | ||
1458 | |||
1459 | static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
1460 | { | ||
1461 | struct vcpu_svm *svm = to_svm(vcpu); | ||
1462 | u16 fs_selector; | ||
1463 | u16 gs_selector; | ||
1464 | u16 ldt_selector; | ||
1465 | |||
1466 | pre_svm_run(svm); | ||
1467 | |||
1468 | save_host_msrs(vcpu); | ||
1469 | fs_selector = read_fs(); | ||
1470 | gs_selector = read_gs(); | ||
1471 | ldt_selector = read_ldt(); | ||
1472 | svm->host_cr2 = kvm_read_cr2(); | ||
1473 | svm->host_dr6 = read_dr6(); | ||
1474 | svm->host_dr7 = read_dr7(); | ||
1475 | svm->vmcb->save.cr2 = vcpu->arch.cr2; | ||
1476 | |||
1477 | if (svm->vmcb->save.dr7 & 0xff) { | ||
1478 | write_dr7(0); | ||
1479 | save_db_regs(svm->host_db_regs); | ||
1480 | load_db_regs(svm->db_regs); | ||
1481 | } | ||
1482 | |||
1483 | clgi(); | ||
1484 | |||
1485 | local_irq_enable(); | ||
1486 | |||
1487 | asm volatile ( | ||
1488 | #ifdef CONFIG_X86_64 | ||
1489 | "push %%rbp; \n\t" | ||
1490 | #else | ||
1491 | "push %%ebp; \n\t" | ||
1492 | #endif | ||
1493 | |||
1494 | #ifdef CONFIG_X86_64 | ||
1495 | "mov %c[rbx](%[svm]), %%rbx \n\t" | ||
1496 | "mov %c[rcx](%[svm]), %%rcx \n\t" | ||
1497 | "mov %c[rdx](%[svm]), %%rdx \n\t" | ||
1498 | "mov %c[rsi](%[svm]), %%rsi \n\t" | ||
1499 | "mov %c[rdi](%[svm]), %%rdi \n\t" | ||
1500 | "mov %c[rbp](%[svm]), %%rbp \n\t" | ||
1501 | "mov %c[r8](%[svm]), %%r8 \n\t" | ||
1502 | "mov %c[r9](%[svm]), %%r9 \n\t" | ||
1503 | "mov %c[r10](%[svm]), %%r10 \n\t" | ||
1504 | "mov %c[r11](%[svm]), %%r11 \n\t" | ||
1505 | "mov %c[r12](%[svm]), %%r12 \n\t" | ||
1506 | "mov %c[r13](%[svm]), %%r13 \n\t" | ||
1507 | "mov %c[r14](%[svm]), %%r14 \n\t" | ||
1508 | "mov %c[r15](%[svm]), %%r15 \n\t" | ||
1509 | #else | ||
1510 | "mov %c[rbx](%[svm]), %%ebx \n\t" | ||
1511 | "mov %c[rcx](%[svm]), %%ecx \n\t" | ||
1512 | "mov %c[rdx](%[svm]), %%edx \n\t" | ||
1513 | "mov %c[rsi](%[svm]), %%esi \n\t" | ||
1514 | "mov %c[rdi](%[svm]), %%edi \n\t" | ||
1515 | "mov %c[rbp](%[svm]), %%ebp \n\t" | ||
1516 | #endif | ||
1517 | |||
1518 | #ifdef CONFIG_X86_64 | ||
1519 | /* Enter guest mode */ | ||
1520 | "push %%rax \n\t" | ||
1521 | "mov %c[vmcb](%[svm]), %%rax \n\t" | ||
1522 | SVM_VMLOAD "\n\t" | ||
1523 | SVM_VMRUN "\n\t" | ||
1524 | SVM_VMSAVE "\n\t" | ||
1525 | "pop %%rax \n\t" | ||
1526 | #else | ||
1527 | /* Enter guest mode */ | ||
1528 | "push %%eax \n\t" | ||
1529 | "mov %c[vmcb](%[svm]), %%eax \n\t" | ||
1530 | SVM_VMLOAD "\n\t" | ||
1531 | SVM_VMRUN "\n\t" | ||
1532 | SVM_VMSAVE "\n\t" | ||
1533 | "pop %%eax \n\t" | ||
1534 | #endif | ||
1535 | |||
1536 | /* Save guest registers, load host registers */ | ||
1537 | #ifdef CONFIG_X86_64 | ||
1538 | "mov %%rbx, %c[rbx](%[svm]) \n\t" | ||
1539 | "mov %%rcx, %c[rcx](%[svm]) \n\t" | ||
1540 | "mov %%rdx, %c[rdx](%[svm]) \n\t" | ||
1541 | "mov %%rsi, %c[rsi](%[svm]) \n\t" | ||
1542 | "mov %%rdi, %c[rdi](%[svm]) \n\t" | ||
1543 | "mov %%rbp, %c[rbp](%[svm]) \n\t" | ||
1544 | "mov %%r8, %c[r8](%[svm]) \n\t" | ||
1545 | "mov %%r9, %c[r9](%[svm]) \n\t" | ||
1546 | "mov %%r10, %c[r10](%[svm]) \n\t" | ||
1547 | "mov %%r11, %c[r11](%[svm]) \n\t" | ||
1548 | "mov %%r12, %c[r12](%[svm]) \n\t" | ||
1549 | "mov %%r13, %c[r13](%[svm]) \n\t" | ||
1550 | "mov %%r14, %c[r14](%[svm]) \n\t" | ||
1551 | "mov %%r15, %c[r15](%[svm]) \n\t" | ||
1552 | |||
1553 | "pop %%rbp; \n\t" | ||
1554 | #else | ||
1555 | "mov %%ebx, %c[rbx](%[svm]) \n\t" | ||
1556 | "mov %%ecx, %c[rcx](%[svm]) \n\t" | ||
1557 | "mov %%edx, %c[rdx](%[svm]) \n\t" | ||
1558 | "mov %%esi, %c[rsi](%[svm]) \n\t" | ||
1559 | "mov %%edi, %c[rdi](%[svm]) \n\t" | ||
1560 | "mov %%ebp, %c[rbp](%[svm]) \n\t" | ||
1561 | |||
1562 | "pop %%ebp; \n\t" | ||
1563 | #endif | ||
1564 | : | ||
1565 | : [svm]"a"(svm), | ||
1566 | [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)), | ||
1567 | [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])), | ||
1568 | [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])), | ||
1569 | [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])), | ||
1570 | [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])), | ||
1571 | [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])), | ||
1572 | [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP])) | ||
1573 | #ifdef CONFIG_X86_64 | ||
1574 | , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])), | ||
1575 | [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])), | ||
1576 | [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])), | ||
1577 | [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])), | ||
1578 | [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])), | ||
1579 | [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])), | ||
1580 | [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])), | ||
1581 | [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15])) | ||
1582 | #endif | ||
1583 | : "cc", "memory" | ||
1584 | #ifdef CONFIG_X86_64 | ||
1585 | , "rbx", "rcx", "rdx", "rsi", "rdi" | ||
1586 | , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15" | ||
1587 | #else | ||
1588 | , "ebx", "ecx", "edx" , "esi", "edi" | ||
1589 | #endif | ||
1590 | ); | ||
1591 | |||
1592 | if ((svm->vmcb->save.dr7 & 0xff)) | ||
1593 | load_db_regs(svm->host_db_regs); | ||
1594 | |||
1595 | vcpu->arch.cr2 = svm->vmcb->save.cr2; | ||
1596 | |||
1597 | write_dr6(svm->host_dr6); | ||
1598 | write_dr7(svm->host_dr7); | ||
1599 | kvm_write_cr2(svm->host_cr2); | ||
1600 | |||
1601 | load_fs(fs_selector); | ||
1602 | load_gs(gs_selector); | ||
1603 | load_ldt(ldt_selector); | ||
1604 | load_host_msrs(vcpu); | ||
1605 | |||
1606 | reload_tss(vcpu); | ||
1607 | |||
1608 | local_irq_disable(); | ||
1609 | |||
1610 | stgi(); | ||
1611 | |||
1612 | svm->next_rip = 0; | ||
1613 | } | ||
1614 | |||
1615 | static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) | ||
1616 | { | ||
1617 | struct vcpu_svm *svm = to_svm(vcpu); | ||
1618 | |||
1619 | svm->vmcb->save.cr3 = root; | ||
1620 | force_new_asid(vcpu); | ||
1621 | |||
1622 | if (vcpu->fpu_active) { | ||
1623 | svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR); | ||
1624 | svm->vmcb->save.cr0 |= X86_CR0_TS; | ||
1625 | vcpu->fpu_active = 0; | ||
1626 | } | ||
1627 | } | ||
1628 | |||
1629 | static int is_disabled(void) | ||
1630 | { | ||
1631 | u64 vm_cr; | ||
1632 | |||
1633 | rdmsrl(MSR_VM_CR, vm_cr); | ||
1634 | if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE)) | ||
1635 | return 1; | ||
1636 | |||
1637 | return 0; | ||
1638 | } | ||
1639 | |||
1640 | static void | ||
1641 | svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) | ||
1642 | { | ||
1643 | /* | ||
1644 | * Patch in the VMMCALL instruction: | ||
1645 | */ | ||
1646 | hypercall[0] = 0x0f; | ||
1647 | hypercall[1] = 0x01; | ||
1648 | hypercall[2] = 0xd9; | ||
1649 | } | ||
1650 | |||
1651 | static void svm_check_processor_compat(void *rtn) | ||
1652 | { | ||
1653 | *(int *)rtn = 0; | ||
1654 | } | ||
1655 | |||
1656 | static struct kvm_x86_ops svm_x86_ops = { | ||
1657 | .cpu_has_kvm_support = has_svm, | ||
1658 | .disabled_by_bios = is_disabled, | ||
1659 | .hardware_setup = svm_hardware_setup, | ||
1660 | .hardware_unsetup = svm_hardware_unsetup, | ||
1661 | .check_processor_compatibility = svm_check_processor_compat, | ||
1662 | .hardware_enable = svm_hardware_enable, | ||
1663 | .hardware_disable = svm_hardware_disable, | ||
1664 | |||
1665 | .vcpu_create = svm_create_vcpu, | ||
1666 | .vcpu_free = svm_free_vcpu, | ||
1667 | .vcpu_reset = svm_vcpu_reset, | ||
1668 | |||
1669 | .prepare_guest_switch = svm_prepare_guest_switch, | ||
1670 | .vcpu_load = svm_vcpu_load, | ||
1671 | .vcpu_put = svm_vcpu_put, | ||
1672 | .vcpu_decache = svm_vcpu_decache, | ||
1673 | |||
1674 | .set_guest_debug = svm_guest_debug, | ||
1675 | .get_msr = svm_get_msr, | ||
1676 | .set_msr = svm_set_msr, | ||
1677 | .get_segment_base = svm_get_segment_base, | ||
1678 | .get_segment = svm_get_segment, | ||
1679 | .set_segment = svm_set_segment, | ||
1680 | .get_cs_db_l_bits = kvm_get_cs_db_l_bits, | ||
1681 | .decache_cr4_guest_bits = svm_decache_cr4_guest_bits, | ||
1682 | .set_cr0 = svm_set_cr0, | ||
1683 | .set_cr3 = svm_set_cr3, | ||
1684 | .set_cr4 = svm_set_cr4, | ||
1685 | .set_efer = svm_set_efer, | ||
1686 | .get_idt = svm_get_idt, | ||
1687 | .set_idt = svm_set_idt, | ||
1688 | .get_gdt = svm_get_gdt, | ||
1689 | .set_gdt = svm_set_gdt, | ||
1690 | .get_dr = svm_get_dr, | ||
1691 | .set_dr = svm_set_dr, | ||
1692 | .cache_regs = svm_cache_regs, | ||
1693 | .decache_regs = svm_decache_regs, | ||
1694 | .get_rflags = svm_get_rflags, | ||
1695 | .set_rflags = svm_set_rflags, | ||
1696 | |||
1697 | .tlb_flush = svm_flush_tlb, | ||
1698 | |||
1699 | .run = svm_vcpu_run, | ||
1700 | .handle_exit = handle_exit, | ||
1701 | .skip_emulated_instruction = skip_emulated_instruction, | ||
1702 | .patch_hypercall = svm_patch_hypercall, | ||
1703 | .get_irq = svm_get_irq, | ||
1704 | .set_irq = svm_set_irq, | ||
1705 | .queue_exception = svm_queue_exception, | ||
1706 | .exception_injected = svm_exception_injected, | ||
1707 | .inject_pending_irq = svm_intr_assist, | ||
1708 | .inject_pending_vectors = do_interrupt_requests, | ||
1709 | |||
1710 | .set_tss_addr = svm_set_tss_addr, | ||
1711 | }; | ||
1712 | |||
1713 | static int __init svm_init(void) | ||
1714 | { | ||
1715 | return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm), | ||
1716 | THIS_MODULE); | ||
1717 | } | ||
1718 | |||
1719 | static void __exit svm_exit(void) | ||
1720 | { | ||
1721 | kvm_exit(); | ||
1722 | } | ||
1723 | |||
1724 | module_init(svm_init) | ||
1725 | module_exit(svm_exit) | ||