diff options
author | Gregory Haskins <ghaskins@novell.com> | 2007-07-27 08:13:10 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2007-10-13 04:18:20 -0400 |
commit | a2fa3e9f52d875f7d4ca98434603b8756be71ba8 (patch) | |
tree | 915c13bfedc867d4d2e4b98c4d3b10b6ef25d451 /drivers | |
parent | c820c2aa27bb5b6069aa708b0a0b44b59a16bfa7 (diff) |
KVM: Remove arch specific components from the general code
struct kvm_vcpu has vmx-specific members; remove them to a private structure.
Signed-off-by: Gregory Haskins <ghaskins@novell.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/kvm/kvm.h | 31 | ||||
-rw-r--r-- | drivers/kvm/kvm_main.c | 26 | ||||
-rw-r--r-- | drivers/kvm/kvm_svm.h | 3 | ||||
-rw-r--r-- | drivers/kvm/svm.c | 394 | ||||
-rw-r--r-- | drivers/kvm/vmx.c | 249 |
5 files changed, 397 insertions, 306 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index 57504ae93dbc..954a14089605 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/mm.h> | 15 | #include <linux/mm.h> |
16 | #include <asm/signal.h> | 16 | #include <asm/signal.h> |
17 | 17 | ||
18 | #include "vmx.h" | ||
19 | #include <linux/kvm.h> | 18 | #include <linux/kvm.h> |
20 | #include <linux/kvm_para.h> | 19 | #include <linux/kvm_para.h> |
21 | 20 | ||
@@ -140,14 +139,6 @@ struct kvm_mmu_page { | |||
140 | }; | 139 | }; |
141 | }; | 140 | }; |
142 | 141 | ||
143 | struct vmcs { | ||
144 | u32 revision_id; | ||
145 | u32 abort; | ||
146 | char data[0]; | ||
147 | }; | ||
148 | |||
149 | #define vmx_msr_entry kvm_msr_entry | ||
150 | |||
151 | struct kvm_vcpu; | 142 | struct kvm_vcpu; |
152 | 143 | ||
153 | /* | 144 | /* |
@@ -309,15 +300,12 @@ void kvm_io_bus_register_dev(struct kvm_io_bus *bus, | |||
309 | struct kvm_io_device *dev); | 300 | struct kvm_io_device *dev); |
310 | 301 | ||
311 | struct kvm_vcpu { | 302 | struct kvm_vcpu { |
303 | int valid; | ||
312 | struct kvm *kvm; | 304 | struct kvm *kvm; |
313 | int vcpu_id; | 305 | int vcpu_id; |
314 | union { | 306 | void *_priv; |
315 | struct vmcs *vmcs; | ||
316 | struct vcpu_svm *svm; | ||
317 | }; | ||
318 | struct mutex mutex; | 307 | struct mutex mutex; |
319 | int cpu; | 308 | int cpu; |
320 | int launched; | ||
321 | u64 host_tsc; | 309 | u64 host_tsc; |
322 | struct kvm_run *run; | 310 | struct kvm_run *run; |
323 | int interrupt_window_open; | 311 | int interrupt_window_open; |
@@ -340,14 +328,6 @@ struct kvm_vcpu { | |||
340 | u64 shadow_efer; | 328 | u64 shadow_efer; |
341 | u64 apic_base; | 329 | u64 apic_base; |
342 | u64 ia32_misc_enable_msr; | 330 | u64 ia32_misc_enable_msr; |
343 | int nmsrs; | ||
344 | int save_nmsrs; | ||
345 | int msr_offset_efer; | ||
346 | #ifdef CONFIG_X86_64 | ||
347 | int msr_offset_kernel_gs_base; | ||
348 | #endif | ||
349 | struct vmx_msr_entry *guest_msrs; | ||
350 | struct vmx_msr_entry *host_msrs; | ||
351 | 331 | ||
352 | struct kvm_mmu mmu; | 332 | struct kvm_mmu mmu; |
353 | 333 | ||
@@ -366,11 +346,6 @@ struct kvm_vcpu { | |||
366 | char *guest_fx_image; | 346 | char *guest_fx_image; |
367 | int fpu_active; | 347 | int fpu_active; |
368 | int guest_fpu_loaded; | 348 | int guest_fpu_loaded; |
369 | struct vmx_host_state { | ||
370 | int loaded; | ||
371 | u16 fs_sel, gs_sel, ldt_sel; | ||
372 | int fs_gs_ldt_reload_needed; | ||
373 | } vmx_host_state; | ||
374 | 349 | ||
375 | int mmio_needed; | 350 | int mmio_needed; |
376 | int mmio_read_completed; | 351 | int mmio_read_completed; |
@@ -579,8 +554,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); | |||
579 | 554 | ||
580 | void fx_init(struct kvm_vcpu *vcpu); | 555 | void fx_init(struct kvm_vcpu *vcpu); |
581 | 556 | ||
582 | void load_msrs(struct vmx_msr_entry *e, int n); | ||
583 | void save_msrs(struct vmx_msr_entry *e, int n); | ||
584 | void kvm_resched(struct kvm_vcpu *vcpu); | 557 | void kvm_resched(struct kvm_vcpu *vcpu); |
585 | void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); | 558 | void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); |
586 | void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); | 559 | void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); |
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 65c9a31f1d91..bf8b8f030192 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
@@ -367,7 +367,7 @@ static void free_pio_guest_pages(struct kvm_vcpu *vcpu) | |||
367 | 367 | ||
368 | static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) | 368 | static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) |
369 | { | 369 | { |
370 | if (!vcpu->vmcs) | 370 | if (!vcpu->valid) |
371 | return; | 371 | return; |
372 | 372 | ||
373 | vcpu_load(vcpu); | 373 | vcpu_load(vcpu); |
@@ -377,7 +377,7 @@ static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) | |||
377 | 377 | ||
378 | static void kvm_free_vcpu(struct kvm_vcpu *vcpu) | 378 | static void kvm_free_vcpu(struct kvm_vcpu *vcpu) |
379 | { | 379 | { |
380 | if (!vcpu->vmcs) | 380 | if (!vcpu->valid) |
381 | return; | 381 | return; |
382 | 382 | ||
383 | vcpu_load(vcpu); | 383 | vcpu_load(vcpu); |
@@ -1645,24 +1645,6 @@ void kvm_resched(struct kvm_vcpu *vcpu) | |||
1645 | } | 1645 | } |
1646 | EXPORT_SYMBOL_GPL(kvm_resched); | 1646 | EXPORT_SYMBOL_GPL(kvm_resched); |
1647 | 1647 | ||
1648 | void load_msrs(struct vmx_msr_entry *e, int n) | ||
1649 | { | ||
1650 | int i; | ||
1651 | |||
1652 | for (i = 0; i < n; ++i) | ||
1653 | wrmsrl(e[i].index, e[i].data); | ||
1654 | } | ||
1655 | EXPORT_SYMBOL_GPL(load_msrs); | ||
1656 | |||
1657 | void save_msrs(struct vmx_msr_entry *e, int n) | ||
1658 | { | ||
1659 | int i; | ||
1660 | |||
1661 | for (i = 0; i < n; ++i) | ||
1662 | rdmsrl(e[i].index, e[i].data); | ||
1663 | } | ||
1664 | EXPORT_SYMBOL_GPL(save_msrs); | ||
1665 | |||
1666 | void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) | 1648 | void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) |
1667 | { | 1649 | { |
1668 | int i; | 1650 | int i; |
@@ -2401,7 +2383,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n) | |||
2401 | 2383 | ||
2402 | mutex_lock(&vcpu->mutex); | 2384 | mutex_lock(&vcpu->mutex); |
2403 | 2385 | ||
2404 | if (vcpu->vmcs) { | 2386 | if (vcpu->valid) { |
2405 | mutex_unlock(&vcpu->mutex); | 2387 | mutex_unlock(&vcpu->mutex); |
2406 | return -EEXIST; | 2388 | return -EEXIST; |
2407 | } | 2389 | } |
@@ -2449,6 +2431,8 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n) | |||
2449 | kvm->nvcpus = n + 1; | 2431 | kvm->nvcpus = n + 1; |
2450 | spin_unlock(&kvm_lock); | 2432 | spin_unlock(&kvm_lock); |
2451 | 2433 | ||
2434 | vcpu->valid = 1; | ||
2435 | |||
2452 | return r; | 2436 | return r; |
2453 | 2437 | ||
2454 | out_free_vcpus: | 2438 | out_free_vcpus: |
diff --git a/drivers/kvm/kvm_svm.h b/drivers/kvm/kvm_svm.h index a869983d683d..82e5d77acbba 100644 --- a/drivers/kvm/kvm_svm.h +++ b/drivers/kvm/kvm_svm.h | |||
@@ -20,7 +20,10 @@ static const u32 host_save_user_msrs[] = { | |||
20 | #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs) | 20 | #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs) |
21 | #define NUM_DB_REGS 4 | 21 | #define NUM_DB_REGS 4 |
22 | 22 | ||
23 | struct kvm_vcpu; | ||
24 | |||
23 | struct vcpu_svm { | 25 | struct vcpu_svm { |
26 | struct kvm_vcpu *vcpu; | ||
24 | struct vmcb *vmcb; | 27 | struct vmcb *vmcb; |
25 | unsigned long vmcb_pa; | 28 | unsigned long vmcb_pa; |
26 | struct svm_cpu_data *svm_data; | 29 | struct svm_cpu_data *svm_data; |
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c index 850a1b1d86c5..32481876d98b 100644 --- a/drivers/kvm/svm.c +++ b/drivers/kvm/svm.c | |||
@@ -49,6 +49,11 @@ MODULE_LICENSE("GPL"); | |||
49 | #define SVM_FEATURE_LBRV (1 << 1) | 49 | #define SVM_FEATURE_LBRV (1 << 1) |
50 | #define SVM_DEATURE_SVML (1 << 2) | 50 | #define SVM_DEATURE_SVML (1 << 2) |
51 | 51 | ||
52 | static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) | ||
53 | { | ||
54 | return (struct vcpu_svm*)vcpu->_priv; | ||
55 | } | ||
56 | |||
52 | unsigned long iopm_base; | 57 | unsigned long iopm_base; |
53 | unsigned long msrpm_base; | 58 | unsigned long msrpm_base; |
54 | 59 | ||
@@ -95,7 +100,7 @@ static inline u32 svm_has(u32 feat) | |||
95 | 100 | ||
96 | static unsigned get_addr_size(struct kvm_vcpu *vcpu) | 101 | static unsigned get_addr_size(struct kvm_vcpu *vcpu) |
97 | { | 102 | { |
98 | struct vmcb_save_area *sa = &vcpu->svm->vmcb->save; | 103 | struct vmcb_save_area *sa = &to_svm(vcpu)->vmcb->save; |
99 | u16 cs_attrib; | 104 | u16 cs_attrib; |
100 | 105 | ||
101 | if (!(sa->cr0 & X86_CR0_PE) || (sa->rflags & X86_EFLAGS_VM)) | 106 | if (!(sa->cr0 & X86_CR0_PE) || (sa->rflags & X86_EFLAGS_VM)) |
@@ -181,7 +186,7 @@ static inline void write_dr7(unsigned long val) | |||
181 | 186 | ||
182 | static inline void force_new_asid(struct kvm_vcpu *vcpu) | 187 | static inline void force_new_asid(struct kvm_vcpu *vcpu) |
183 | { | 188 | { |
184 | vcpu->svm->asid_generation--; | 189 | to_svm(vcpu)->asid_generation--; |
185 | } | 190 | } |
186 | 191 | ||
187 | static inline void flush_guest_tlb(struct kvm_vcpu *vcpu) | 192 | static inline void flush_guest_tlb(struct kvm_vcpu *vcpu) |
@@ -194,22 +199,24 @@ static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) | |||
194 | if (!(efer & KVM_EFER_LMA)) | 199 | if (!(efer & KVM_EFER_LMA)) |
195 | efer &= ~KVM_EFER_LME; | 200 | efer &= ~KVM_EFER_LME; |
196 | 201 | ||
197 | vcpu->svm->vmcb->save.efer = efer | MSR_EFER_SVME_MASK; | 202 | to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK; |
198 | vcpu->shadow_efer = efer; | 203 | vcpu->shadow_efer = efer; |
199 | } | 204 | } |
200 | 205 | ||
201 | static void svm_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code) | 206 | static void svm_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code) |
202 | { | 207 | { |
203 | vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | | 208 | struct vcpu_svm *svm = to_svm(vcpu); |
209 | |||
210 | svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | | ||
204 | SVM_EVTINJ_VALID_ERR | | 211 | SVM_EVTINJ_VALID_ERR | |
205 | SVM_EVTINJ_TYPE_EXEPT | | 212 | SVM_EVTINJ_TYPE_EXEPT | |
206 | GP_VECTOR; | 213 | GP_VECTOR; |
207 | vcpu->svm->vmcb->control.event_inj_err = error_code; | 214 | svm->vmcb->control.event_inj_err = error_code; |
208 | } | 215 | } |
209 | 216 | ||
210 | static void inject_ud(struct kvm_vcpu *vcpu) | 217 | static void inject_ud(struct kvm_vcpu *vcpu) |
211 | { | 218 | { |
212 | vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | | 219 | to_svm(vcpu)->vmcb->control.event_inj = SVM_EVTINJ_VALID | |
213 | SVM_EVTINJ_TYPE_EXEPT | | 220 | SVM_EVTINJ_TYPE_EXEPT | |
214 | UD_VECTOR; | 221 | UD_VECTOR; |
215 | } | 222 | } |
@@ -228,19 +235,21 @@ static int is_external_interrupt(u32 info) | |||
228 | 235 | ||
229 | static void skip_emulated_instruction(struct kvm_vcpu *vcpu) | 236 | static void skip_emulated_instruction(struct kvm_vcpu *vcpu) |
230 | { | 237 | { |
231 | if (!vcpu->svm->next_rip) { | 238 | struct vcpu_svm *svm = to_svm(vcpu); |
239 | |||
240 | if (!svm->next_rip) { | ||
232 | printk(KERN_DEBUG "%s: NOP\n", __FUNCTION__); | 241 | printk(KERN_DEBUG "%s: NOP\n", __FUNCTION__); |
233 | return; | 242 | return; |
234 | } | 243 | } |
235 | if (vcpu->svm->next_rip - vcpu->svm->vmcb->save.rip > 15) { | 244 | if (svm->next_rip - svm->vmcb->save.rip > 15) { |
236 | printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n", | 245 | printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n", |
237 | __FUNCTION__, | 246 | __FUNCTION__, |
238 | vcpu->svm->vmcb->save.rip, | 247 | svm->vmcb->save.rip, |
239 | vcpu->svm->next_rip); | 248 | svm->next_rip); |
240 | } | 249 | } |
241 | 250 | ||
242 | vcpu->rip = vcpu->svm->vmcb->save.rip = vcpu->svm->next_rip; | 251 | vcpu->rip = svm->vmcb->save.rip = svm->next_rip; |
243 | vcpu->svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; | 252 | svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; |
244 | 253 | ||
245 | vcpu->interrupt_window_open = 1; | 254 | vcpu->interrupt_window_open = 1; |
246 | } | 255 | } |
@@ -569,23 +578,27 @@ static void init_vmcb(struct vmcb *vmcb) | |||
569 | 578 | ||
570 | static int svm_create_vcpu(struct kvm_vcpu *vcpu) | 579 | static int svm_create_vcpu(struct kvm_vcpu *vcpu) |
571 | { | 580 | { |
581 | struct vcpu_svm *svm; | ||
572 | struct page *page; | 582 | struct page *page; |
573 | int r; | 583 | int r; |
574 | 584 | ||
575 | r = -ENOMEM; | 585 | r = -ENOMEM; |
576 | vcpu->svm = kzalloc(sizeof *vcpu->svm, GFP_KERNEL); | 586 | svm = kzalloc(sizeof *svm, GFP_KERNEL); |
577 | if (!vcpu->svm) | 587 | if (!svm) |
578 | goto out1; | 588 | goto out1; |
579 | page = alloc_page(GFP_KERNEL); | 589 | page = alloc_page(GFP_KERNEL); |
580 | if (!page) | 590 | if (!page) |
581 | goto out2; | 591 | goto out2; |
582 | 592 | ||
583 | vcpu->svm->vmcb = page_address(page); | 593 | svm->vmcb = page_address(page); |
584 | clear_page(vcpu->svm->vmcb); | 594 | clear_page(svm->vmcb); |
585 | vcpu->svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; | 595 | svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; |
586 | vcpu->svm->asid_generation = 0; | 596 | svm->asid_generation = 0; |
587 | memset(vcpu->svm->db_regs, 0, sizeof(vcpu->svm->db_regs)); | 597 | memset(svm->db_regs, 0, sizeof(svm->db_regs)); |
588 | init_vmcb(vcpu->svm->vmcb); | 598 | init_vmcb(svm->vmcb); |
599 | |||
600 | svm->vcpu = vcpu; | ||
601 | vcpu->_priv = svm; | ||
589 | 602 | ||
590 | fx_init(vcpu); | 603 | fx_init(vcpu); |
591 | vcpu->fpu_active = 1; | 604 | vcpu->fpu_active = 1; |
@@ -596,22 +609,26 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu) | |||
596 | return 0; | 609 | return 0; |
597 | 610 | ||
598 | out2: | 611 | out2: |
599 | kfree(vcpu->svm); | 612 | kfree(svm); |
600 | out1: | 613 | out1: |
601 | return r; | 614 | return r; |
602 | } | 615 | } |
603 | 616 | ||
604 | static void svm_free_vcpu(struct kvm_vcpu *vcpu) | 617 | static void svm_free_vcpu(struct kvm_vcpu *vcpu) |
605 | { | 618 | { |
606 | if (!vcpu->svm) | 619 | struct vcpu_svm *svm = to_svm(vcpu); |
620 | |||
621 | if (!svm) | ||
607 | return; | 622 | return; |
608 | if (vcpu->svm->vmcb) | 623 | if (svm->vmcb) |
609 | __free_page(pfn_to_page(vcpu->svm->vmcb_pa >> PAGE_SHIFT)); | 624 | __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT)); |
610 | kfree(vcpu->svm); | 625 | kfree(svm); |
626 | vcpu->_priv = NULL; | ||
611 | } | 627 | } |
612 | 628 | ||
613 | static void svm_vcpu_load(struct kvm_vcpu *vcpu) | 629 | static void svm_vcpu_load(struct kvm_vcpu *vcpu) |
614 | { | 630 | { |
631 | struct vcpu_svm *svm = to_svm(vcpu); | ||
615 | int cpu, i; | 632 | int cpu, i; |
616 | 633 | ||
617 | cpu = get_cpu(); | 634 | cpu = get_cpu(); |
@@ -624,20 +641,21 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu) | |||
624 | */ | 641 | */ |
625 | rdtscll(tsc_this); | 642 | rdtscll(tsc_this); |
626 | delta = vcpu->host_tsc - tsc_this; | 643 | delta = vcpu->host_tsc - tsc_this; |
627 | vcpu->svm->vmcb->control.tsc_offset += delta; | 644 | svm->vmcb->control.tsc_offset += delta; |
628 | vcpu->cpu = cpu; | 645 | vcpu->cpu = cpu; |
629 | } | 646 | } |
630 | 647 | ||
631 | for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) | 648 | for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) |
632 | rdmsrl(host_save_user_msrs[i], vcpu->svm->host_user_msrs[i]); | 649 | rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); |
633 | } | 650 | } |
634 | 651 | ||
635 | static void svm_vcpu_put(struct kvm_vcpu *vcpu) | 652 | static void svm_vcpu_put(struct kvm_vcpu *vcpu) |
636 | { | 653 | { |
654 | struct vcpu_svm *svm = to_svm(vcpu); | ||
637 | int i; | 655 | int i; |
638 | 656 | ||
639 | for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) | 657 | for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) |
640 | wrmsrl(host_save_user_msrs[i], vcpu->svm->host_user_msrs[i]); | 658 | wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); |
641 | 659 | ||
642 | rdtscll(vcpu->host_tsc); | 660 | rdtscll(vcpu->host_tsc); |
643 | put_cpu(); | 661 | put_cpu(); |
@@ -649,31 +667,34 @@ static void svm_vcpu_decache(struct kvm_vcpu *vcpu) | |||
649 | 667 | ||
650 | static void svm_cache_regs(struct kvm_vcpu *vcpu) | 668 | static void svm_cache_regs(struct kvm_vcpu *vcpu) |
651 | { | 669 | { |
652 | vcpu->regs[VCPU_REGS_RAX] = vcpu->svm->vmcb->save.rax; | 670 | struct vcpu_svm *svm = to_svm(vcpu); |
653 | vcpu->regs[VCPU_REGS_RSP] = vcpu->svm->vmcb->save.rsp; | 671 | |
654 | vcpu->rip = vcpu->svm->vmcb->save.rip; | 672 | vcpu->regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; |
673 | vcpu->regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; | ||
674 | vcpu->rip = svm->vmcb->save.rip; | ||
655 | } | 675 | } |
656 | 676 | ||
657 | static void svm_decache_regs(struct kvm_vcpu *vcpu) | 677 | static void svm_decache_regs(struct kvm_vcpu *vcpu) |
658 | { | 678 | { |
659 | vcpu->svm->vmcb->save.rax = vcpu->regs[VCPU_REGS_RAX]; | 679 | struct vcpu_svm *svm = to_svm(vcpu); |
660 | vcpu->svm->vmcb->save.rsp = vcpu->regs[VCPU_REGS_RSP]; | 680 | svm->vmcb->save.rax = vcpu->regs[VCPU_REGS_RAX]; |
661 | vcpu->svm->vmcb->save.rip = vcpu->rip; | 681 | svm->vmcb->save.rsp = vcpu->regs[VCPU_REGS_RSP]; |
682 | svm->vmcb->save.rip = vcpu->rip; | ||
662 | } | 683 | } |
663 | 684 | ||
664 | static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) | 685 | static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) |
665 | { | 686 | { |
666 | return vcpu->svm->vmcb->save.rflags; | 687 | return to_svm(vcpu)->vmcb->save.rflags; |
667 | } | 688 | } |
668 | 689 | ||
669 | static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) | 690 | static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) |
670 | { | 691 | { |
671 | vcpu->svm->vmcb->save.rflags = rflags; | 692 | to_svm(vcpu)->vmcb->save.rflags = rflags; |
672 | } | 693 | } |
673 | 694 | ||
674 | static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) | 695 | static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) |
675 | { | 696 | { |
676 | struct vmcb_save_area *save = &vcpu->svm->vmcb->save; | 697 | struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; |
677 | 698 | ||
678 | switch (seg) { | 699 | switch (seg) { |
679 | case VCPU_SREG_CS: return &save->cs; | 700 | case VCPU_SREG_CS: return &save->cs; |
@@ -725,26 +746,34 @@ static void svm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) | |||
725 | 746 | ||
726 | static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | 747 | static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) |
727 | { | 748 | { |
728 | dt->limit = vcpu->svm->vmcb->save.idtr.limit; | 749 | struct vcpu_svm *svm = to_svm(vcpu); |
729 | dt->base = vcpu->svm->vmcb->save.idtr.base; | 750 | |
751 | dt->limit = svm->vmcb->save.idtr.limit; | ||
752 | dt->base = svm->vmcb->save.idtr.base; | ||
730 | } | 753 | } |
731 | 754 | ||
732 | static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | 755 | static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) |
733 | { | 756 | { |
734 | vcpu->svm->vmcb->save.idtr.limit = dt->limit; | 757 | struct vcpu_svm *svm = to_svm(vcpu); |
735 | vcpu->svm->vmcb->save.idtr.base = dt->base ; | 758 | |
759 | svm->vmcb->save.idtr.limit = dt->limit; | ||
760 | svm->vmcb->save.idtr.base = dt->base ; | ||
736 | } | 761 | } |
737 | 762 | ||
738 | static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | 763 | static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) |
739 | { | 764 | { |
740 | dt->limit = vcpu->svm->vmcb->save.gdtr.limit; | 765 | struct vcpu_svm *svm = to_svm(vcpu); |
741 | dt->base = vcpu->svm->vmcb->save.gdtr.base; | 766 | |
767 | dt->limit = svm->vmcb->save.gdtr.limit; | ||
768 | dt->base = svm->vmcb->save.gdtr.base; | ||
742 | } | 769 | } |
743 | 770 | ||
744 | static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | 771 | static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) |
745 | { | 772 | { |
746 | vcpu->svm->vmcb->save.gdtr.limit = dt->limit; | 773 | struct vcpu_svm *svm = to_svm(vcpu); |
747 | vcpu->svm->vmcb->save.gdtr.base = dt->base ; | 774 | |
775 | svm->vmcb->save.gdtr.limit = dt->limit; | ||
776 | svm->vmcb->save.gdtr.base = dt->base ; | ||
748 | } | 777 | } |
749 | 778 | ||
750 | static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) | 779 | static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) |
@@ -753,39 +782,42 @@ static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) | |||
753 | 782 | ||
754 | static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | 783 | static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) |
755 | { | 784 | { |
785 | struct vcpu_svm *svm = to_svm(vcpu); | ||
786 | |||
756 | #ifdef CONFIG_X86_64 | 787 | #ifdef CONFIG_X86_64 |
757 | if (vcpu->shadow_efer & KVM_EFER_LME) { | 788 | if (vcpu->shadow_efer & KVM_EFER_LME) { |
758 | if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { | 789 | if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { |
759 | vcpu->shadow_efer |= KVM_EFER_LMA; | 790 | vcpu->shadow_efer |= KVM_EFER_LMA; |
760 | vcpu->svm->vmcb->save.efer |= KVM_EFER_LMA | KVM_EFER_LME; | 791 | svm->vmcb->save.efer |= KVM_EFER_LMA | KVM_EFER_LME; |
761 | } | 792 | } |
762 | 793 | ||
763 | if (is_paging(vcpu) && !(cr0 & X86_CR0_PG) ) { | 794 | if (is_paging(vcpu) && !(cr0 & X86_CR0_PG) ) { |
764 | vcpu->shadow_efer &= ~KVM_EFER_LMA; | 795 | vcpu->shadow_efer &= ~KVM_EFER_LMA; |
765 | vcpu->svm->vmcb->save.efer &= ~(KVM_EFER_LMA | KVM_EFER_LME); | 796 | svm->vmcb->save.efer &= ~(KVM_EFER_LMA | KVM_EFER_LME); |
766 | } | 797 | } |
767 | } | 798 | } |
768 | #endif | 799 | #endif |
769 | if ((vcpu->cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) { | 800 | if ((vcpu->cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) { |
770 | vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); | 801 | svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); |
771 | vcpu->fpu_active = 1; | 802 | vcpu->fpu_active = 1; |
772 | } | 803 | } |
773 | 804 | ||
774 | vcpu->cr0 = cr0; | 805 | vcpu->cr0 = cr0; |
775 | cr0 |= X86_CR0_PG | X86_CR0_WP; | 806 | cr0 |= X86_CR0_PG | X86_CR0_WP; |
776 | cr0 &= ~(X86_CR0_CD | X86_CR0_NW); | 807 | cr0 &= ~(X86_CR0_CD | X86_CR0_NW); |
777 | vcpu->svm->vmcb->save.cr0 = cr0; | 808 | svm->vmcb->save.cr0 = cr0; |
778 | } | 809 | } |
779 | 810 | ||
780 | static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | 811 | static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
781 | { | 812 | { |
782 | vcpu->cr4 = cr4; | 813 | vcpu->cr4 = cr4; |
783 | vcpu->svm->vmcb->save.cr4 = cr4 | X86_CR4_PAE; | 814 | to_svm(vcpu)->vmcb->save.cr4 = cr4 | X86_CR4_PAE; |
784 | } | 815 | } |
785 | 816 | ||
786 | static void svm_set_segment(struct kvm_vcpu *vcpu, | 817 | static void svm_set_segment(struct kvm_vcpu *vcpu, |
787 | struct kvm_segment *var, int seg) | 818 | struct kvm_segment *var, int seg) |
788 | { | 819 | { |
820 | struct vcpu_svm *svm = to_svm(vcpu); | ||
789 | struct vmcb_seg *s = svm_seg(vcpu, seg); | 821 | struct vmcb_seg *s = svm_seg(vcpu, seg); |
790 | 822 | ||
791 | s->base = var->base; | 823 | s->base = var->base; |
@@ -804,16 +836,16 @@ static void svm_set_segment(struct kvm_vcpu *vcpu, | |||
804 | s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; | 836 | s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; |
805 | } | 837 | } |
806 | if (seg == VCPU_SREG_CS) | 838 | if (seg == VCPU_SREG_CS) |
807 | vcpu->svm->vmcb->save.cpl | 839 | svm->vmcb->save.cpl |
808 | = (vcpu->svm->vmcb->save.cs.attrib | 840 | = (svm->vmcb->save.cs.attrib |
809 | >> SVM_SELECTOR_DPL_SHIFT) & 3; | 841 | >> SVM_SELECTOR_DPL_SHIFT) & 3; |
810 | 842 | ||
811 | } | 843 | } |
812 | 844 | ||
813 | /* FIXME: | 845 | /* FIXME: |
814 | 846 | ||
815 | vcpu->svm->vmcb->control.int_ctl &= ~V_TPR_MASK; | 847 | svm(vcpu)->vmcb->control.int_ctl &= ~V_TPR_MASK; |
816 | vcpu->svm->vmcb->control.int_ctl |= (sregs->cr8 & V_TPR_MASK); | 848 | svm(vcpu)->vmcb->control.int_ctl |= (sregs->cr8 & V_TPR_MASK); |
817 | 849 | ||
818 | */ | 850 | */ |
819 | 851 | ||
@@ -825,55 +857,59 @@ static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) | |||
825 | static void load_host_msrs(struct kvm_vcpu *vcpu) | 857 | static void load_host_msrs(struct kvm_vcpu *vcpu) |
826 | { | 858 | { |
827 | #ifdef CONFIG_X86_64 | 859 | #ifdef CONFIG_X86_64 |
828 | wrmsrl(MSR_GS_BASE, vcpu->svm->host_gs_base); | 860 | wrmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base); |
829 | #endif | 861 | #endif |
830 | } | 862 | } |
831 | 863 | ||
832 | static void save_host_msrs(struct kvm_vcpu *vcpu) | 864 | static void save_host_msrs(struct kvm_vcpu *vcpu) |
833 | { | 865 | { |
834 | #ifdef CONFIG_X86_64 | 866 | #ifdef CONFIG_X86_64 |
835 | rdmsrl(MSR_GS_BASE, vcpu->svm->host_gs_base); | 867 | rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base); |
836 | #endif | 868 | #endif |
837 | } | 869 | } |
838 | 870 | ||
839 | static void new_asid(struct kvm_vcpu *vcpu, struct svm_cpu_data *svm_data) | 871 | static void new_asid(struct kvm_vcpu *vcpu, struct svm_cpu_data *svm_data) |
840 | { | 872 | { |
873 | struct vcpu_svm *svm = to_svm(vcpu); | ||
874 | |||
841 | if (svm_data->next_asid > svm_data->max_asid) { | 875 | if (svm_data->next_asid > svm_data->max_asid) { |
842 | ++svm_data->asid_generation; | 876 | ++svm_data->asid_generation; |
843 | svm_data->next_asid = 1; | 877 | svm_data->next_asid = 1; |
844 | vcpu->svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; | 878 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; |
845 | } | 879 | } |
846 | 880 | ||
847 | vcpu->cpu = svm_data->cpu; | 881 | vcpu->cpu = svm_data->cpu; |
848 | vcpu->svm->asid_generation = svm_data->asid_generation; | 882 | svm->asid_generation = svm_data->asid_generation; |
849 | vcpu->svm->vmcb->control.asid = svm_data->next_asid++; | 883 | svm->vmcb->control.asid = svm_data->next_asid++; |
850 | } | 884 | } |
851 | 885 | ||
852 | static void svm_invlpg(struct kvm_vcpu *vcpu, gva_t address) | 886 | static void svm_invlpg(struct kvm_vcpu *vcpu, gva_t address) |
853 | { | 887 | { |
854 | invlpga(address, vcpu->svm->vmcb->control.asid); // is needed? | 888 | invlpga(address, to_svm(vcpu)->vmcb->control.asid); // is needed? |
855 | } | 889 | } |
856 | 890 | ||
857 | static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) | 891 | static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) |
858 | { | 892 | { |
859 | return vcpu->svm->db_regs[dr]; | 893 | return to_svm(vcpu)->db_regs[dr]; |
860 | } | 894 | } |
861 | 895 | ||
862 | static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, | 896 | static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, |
863 | int *exception) | 897 | int *exception) |
864 | { | 898 | { |
899 | struct vcpu_svm *svm = to_svm(vcpu); | ||
900 | |||
865 | *exception = 0; | 901 | *exception = 0; |
866 | 902 | ||
867 | if (vcpu->svm->vmcb->save.dr7 & DR7_GD_MASK) { | 903 | if (svm->vmcb->save.dr7 & DR7_GD_MASK) { |
868 | vcpu->svm->vmcb->save.dr7 &= ~DR7_GD_MASK; | 904 | svm->vmcb->save.dr7 &= ~DR7_GD_MASK; |
869 | vcpu->svm->vmcb->save.dr6 |= DR6_BD_MASK; | 905 | svm->vmcb->save.dr6 |= DR6_BD_MASK; |
870 | *exception = DB_VECTOR; | 906 | *exception = DB_VECTOR; |
871 | return; | 907 | return; |
872 | } | 908 | } |
873 | 909 | ||
874 | switch (dr) { | 910 | switch (dr) { |
875 | case 0 ... 3: | 911 | case 0 ... 3: |
876 | vcpu->svm->db_regs[dr] = value; | 912 | svm->db_regs[dr] = value; |
877 | return; | 913 | return; |
878 | case 4 ... 5: | 914 | case 4 ... 5: |
879 | if (vcpu->cr4 & X86_CR4_DE) { | 915 | if (vcpu->cr4 & X86_CR4_DE) { |
@@ -885,7 +921,7 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, | |||
885 | *exception = GP_VECTOR; | 921 | *exception = GP_VECTOR; |
886 | return; | 922 | return; |
887 | } | 923 | } |
888 | vcpu->svm->vmcb->save.dr7 = value; | 924 | svm->vmcb->save.dr7 = value; |
889 | return; | 925 | return; |
890 | } | 926 | } |
891 | default: | 927 | default: |
@@ -898,7 +934,8 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, | |||
898 | 934 | ||
899 | static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 935 | static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
900 | { | 936 | { |
901 | u32 exit_int_info = vcpu->svm->vmcb->control.exit_int_info; | 937 | struct vcpu_svm *svm = to_svm(vcpu); |
938 | u32 exit_int_info = svm->vmcb->control.exit_int_info; | ||
902 | u64 fault_address; | 939 | u64 fault_address; |
903 | u32 error_code; | 940 | u32 error_code; |
904 | enum emulation_result er; | 941 | enum emulation_result er; |
@@ -909,8 +946,8 @@ static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
909 | 946 | ||
910 | spin_lock(&vcpu->kvm->lock); | 947 | spin_lock(&vcpu->kvm->lock); |
911 | 948 | ||
912 | fault_address = vcpu->svm->vmcb->control.exit_info_2; | 949 | fault_address = svm->vmcb->control.exit_info_2; |
913 | error_code = vcpu->svm->vmcb->control.exit_info_1; | 950 | error_code = svm->vmcb->control.exit_info_1; |
914 | r = kvm_mmu_page_fault(vcpu, fault_address, error_code); | 951 | r = kvm_mmu_page_fault(vcpu, fault_address, error_code); |
915 | if (r < 0) { | 952 | if (r < 0) { |
916 | spin_unlock(&vcpu->kvm->lock); | 953 | spin_unlock(&vcpu->kvm->lock); |
@@ -942,22 +979,25 @@ static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
942 | 979 | ||
943 | static int nm_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 980 | static int nm_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
944 | { | 981 | { |
945 | vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); | 982 | struct vcpu_svm *svm = to_svm(vcpu); |
946 | if (!(vcpu->cr0 & X86_CR0_TS)) | ||
947 | vcpu->svm->vmcb->save.cr0 &= ~X86_CR0_TS; | ||
948 | vcpu->fpu_active = 1; | ||
949 | 983 | ||
950 | return 1; | 984 | svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); |
985 | if (!(vcpu->cr0 & X86_CR0_TS)) | ||
986 | svm->vmcb->save.cr0 &= ~X86_CR0_TS; | ||
987 | vcpu->fpu_active = 1; | ||
988 | |||
989 | return 1; | ||
951 | } | 990 | } |
952 | 991 | ||
953 | static int shutdown_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 992 | static int shutdown_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
954 | { | 993 | { |
994 | struct vcpu_svm *svm = to_svm(vcpu); | ||
955 | /* | 995 | /* |
956 | * VMCB is undefined after a SHUTDOWN intercept | 996 | * VMCB is undefined after a SHUTDOWN intercept |
957 | * so reinitialize it. | 997 | * so reinitialize it. |
958 | */ | 998 | */ |
959 | clear_page(vcpu->svm->vmcb); | 999 | clear_page(svm->vmcb); |
960 | init_vmcb(vcpu->svm->vmcb); | 1000 | init_vmcb(svm->vmcb); |
961 | 1001 | ||
962 | kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; | 1002 | kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; |
963 | return 0; | 1003 | return 0; |
@@ -967,23 +1007,24 @@ static int io_get_override(struct kvm_vcpu *vcpu, | |||
967 | struct vmcb_seg **seg, | 1007 | struct vmcb_seg **seg, |
968 | int *addr_override) | 1008 | int *addr_override) |
969 | { | 1009 | { |
1010 | struct vcpu_svm *svm = to_svm(vcpu); | ||
970 | u8 inst[MAX_INST_SIZE]; | 1011 | u8 inst[MAX_INST_SIZE]; |
971 | unsigned ins_length; | 1012 | unsigned ins_length; |
972 | gva_t rip; | 1013 | gva_t rip; |
973 | int i; | 1014 | int i; |
974 | 1015 | ||
975 | rip = vcpu->svm->vmcb->save.rip; | 1016 | rip = svm->vmcb->save.rip; |
976 | ins_length = vcpu->svm->next_rip - rip; | 1017 | ins_length = svm->next_rip - rip; |
977 | rip += vcpu->svm->vmcb->save.cs.base; | 1018 | rip += svm->vmcb->save.cs.base; |
978 | 1019 | ||
979 | if (ins_length > MAX_INST_SIZE) | 1020 | if (ins_length > MAX_INST_SIZE) |
980 | printk(KERN_DEBUG | 1021 | printk(KERN_DEBUG |
981 | "%s: inst length err, cs base 0x%llx rip 0x%llx " | 1022 | "%s: inst length err, cs base 0x%llx rip 0x%llx " |
982 | "next rip 0x%llx ins_length %u\n", | 1023 | "next rip 0x%llx ins_length %u\n", |
983 | __FUNCTION__, | 1024 | __FUNCTION__, |
984 | vcpu->svm->vmcb->save.cs.base, | 1025 | svm->vmcb->save.cs.base, |
985 | vcpu->svm->vmcb->save.rip, | 1026 | svm->vmcb->save.rip, |
986 | vcpu->svm->vmcb->control.exit_info_2, | 1027 | svm->vmcb->control.exit_info_2, |
987 | ins_length); | 1028 | ins_length); |
988 | 1029 | ||
989 | if (kvm_read_guest(vcpu, rip, ins_length, inst) != ins_length) | 1030 | if (kvm_read_guest(vcpu, rip, ins_length, inst) != ins_length) |
@@ -1003,22 +1044,22 @@ static int io_get_override(struct kvm_vcpu *vcpu, | |||
1003 | *addr_override = 1; | 1044 | *addr_override = 1; |
1004 | continue; | 1045 | continue; |
1005 | case 0x2e: | 1046 | case 0x2e: |
1006 | *seg = &vcpu->svm->vmcb->save.cs; | 1047 | *seg = &svm->vmcb->save.cs; |
1007 | continue; | 1048 | continue; |
1008 | case 0x36: | 1049 | case 0x36: |
1009 | *seg = &vcpu->svm->vmcb->save.ss; | 1050 | *seg = &svm->vmcb->save.ss; |
1010 | continue; | 1051 | continue; |
1011 | case 0x3e: | 1052 | case 0x3e: |
1012 | *seg = &vcpu->svm->vmcb->save.ds; | 1053 | *seg = &svm->vmcb->save.ds; |
1013 | continue; | 1054 | continue; |
1014 | case 0x26: | 1055 | case 0x26: |
1015 | *seg = &vcpu->svm->vmcb->save.es; | 1056 | *seg = &svm->vmcb->save.es; |
1016 | continue; | 1057 | continue; |
1017 | case 0x64: | 1058 | case 0x64: |
1018 | *seg = &vcpu->svm->vmcb->save.fs; | 1059 | *seg = &svm->vmcb->save.fs; |
1019 | continue; | 1060 | continue; |
1020 | case 0x65: | 1061 | case 0x65: |
1021 | *seg = &vcpu->svm->vmcb->save.gs; | 1062 | *seg = &svm->vmcb->save.gs; |
1022 | continue; | 1063 | continue; |
1023 | default: | 1064 | default: |
1024 | return 1; | 1065 | return 1; |
@@ -1033,7 +1074,8 @@ static unsigned long io_adress(struct kvm_vcpu *vcpu, int ins, gva_t *address) | |||
1033 | unsigned long *reg; | 1074 | unsigned long *reg; |
1034 | struct vmcb_seg *seg; | 1075 | struct vmcb_seg *seg; |
1035 | int addr_override; | 1076 | int addr_override; |
1036 | struct vmcb_save_area *save_area = &vcpu->svm->vmcb->save; | 1077 | struct vcpu_svm *svm = to_svm(vcpu); |
1078 | struct vmcb_save_area *save_area = &svm->vmcb->save; | ||
1037 | u16 cs_attrib = save_area->cs.attrib; | 1079 | u16 cs_attrib = save_area->cs.attrib; |
1038 | unsigned addr_size = get_addr_size(vcpu); | 1080 | unsigned addr_size = get_addr_size(vcpu); |
1039 | 1081 | ||
@@ -1045,16 +1087,16 @@ static unsigned long io_adress(struct kvm_vcpu *vcpu, int ins, gva_t *address) | |||
1045 | 1087 | ||
1046 | if (ins) { | 1088 | if (ins) { |
1047 | reg = &vcpu->regs[VCPU_REGS_RDI]; | 1089 | reg = &vcpu->regs[VCPU_REGS_RDI]; |
1048 | seg = &vcpu->svm->vmcb->save.es; | 1090 | seg = &svm->vmcb->save.es; |
1049 | } else { | 1091 | } else { |
1050 | reg = &vcpu->regs[VCPU_REGS_RSI]; | 1092 | reg = &vcpu->regs[VCPU_REGS_RSI]; |
1051 | seg = (seg) ? seg : &vcpu->svm->vmcb->save.ds; | 1093 | seg = (seg) ? seg : &svm->vmcb->save.ds; |
1052 | } | 1094 | } |
1053 | 1095 | ||
1054 | addr_mask = ~0ULL >> (64 - (addr_size * 8)); | 1096 | addr_mask = ~0ULL >> (64 - (addr_size * 8)); |
1055 | 1097 | ||
1056 | if ((cs_attrib & SVM_SELECTOR_L_MASK) && | 1098 | if ((cs_attrib & SVM_SELECTOR_L_MASK) && |
1057 | !(vcpu->svm->vmcb->save.rflags & X86_EFLAGS_VM)) { | 1099 | !(svm->vmcb->save.rflags & X86_EFLAGS_VM)) { |
1058 | *address = (*reg & addr_mask); | 1100 | *address = (*reg & addr_mask); |
1059 | return addr_mask; | 1101 | return addr_mask; |
1060 | } | 1102 | } |
@@ -1070,7 +1112,8 @@ static unsigned long io_adress(struct kvm_vcpu *vcpu, int ins, gva_t *address) | |||
1070 | 1112 | ||
1071 | static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1113 | static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1072 | { | 1114 | { |
1073 | u32 io_info = vcpu->svm->vmcb->control.exit_info_1; //address size bug? | 1115 | struct vcpu_svm *svm = to_svm(vcpu); |
1116 | u32 io_info = svm->vmcb->control.exit_info_1; //address size bug? | ||
1074 | int size, down, in, string, rep; | 1117 | int size, down, in, string, rep; |
1075 | unsigned port; | 1118 | unsigned port; |
1076 | unsigned long count; | 1119 | unsigned long count; |
@@ -1078,7 +1121,7 @@ static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1078 | 1121 | ||
1079 | ++vcpu->stat.io_exits; | 1122 | ++vcpu->stat.io_exits; |
1080 | 1123 | ||
1081 | vcpu->svm->next_rip = vcpu->svm->vmcb->control.exit_info_2; | 1124 | svm->next_rip = svm->vmcb->control.exit_info_2; |
1082 | 1125 | ||
1083 | in = (io_info & SVM_IOIO_TYPE_MASK) != 0; | 1126 | in = (io_info & SVM_IOIO_TYPE_MASK) != 0; |
1084 | port = io_info >> 16; | 1127 | port = io_info >> 16; |
@@ -1086,7 +1129,7 @@ static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1086 | string = (io_info & SVM_IOIO_STR_MASK) != 0; | 1129 | string = (io_info & SVM_IOIO_STR_MASK) != 0; |
1087 | rep = (io_info & SVM_IOIO_REP_MASK) != 0; | 1130 | rep = (io_info & SVM_IOIO_REP_MASK) != 0; |
1088 | count = 1; | 1131 | count = 1; |
1089 | down = (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0; | 1132 | down = (svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0; |
1090 | 1133 | ||
1091 | if (string) { | 1134 | if (string) { |
1092 | unsigned addr_mask; | 1135 | unsigned addr_mask; |
@@ -1112,14 +1155,18 @@ static int nop_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1112 | 1155 | ||
1113 | static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1156 | static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1114 | { | 1157 | { |
1115 | vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 1; | 1158 | struct vcpu_svm *svm = to_svm(vcpu); |
1159 | |||
1160 | svm->next_rip = svm->vmcb->save.rip + 1; | ||
1116 | skip_emulated_instruction(vcpu); | 1161 | skip_emulated_instruction(vcpu); |
1117 | return kvm_emulate_halt(vcpu); | 1162 | return kvm_emulate_halt(vcpu); |
1118 | } | 1163 | } |
1119 | 1164 | ||
1120 | static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1165 | static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1121 | { | 1166 | { |
1122 | vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 3; | 1167 | struct vcpu_svm *svm = to_svm(vcpu); |
1168 | |||
1169 | svm->next_rip = svm->vmcb->save.rip + 3; | ||
1123 | skip_emulated_instruction(vcpu); | 1170 | skip_emulated_instruction(vcpu); |
1124 | return kvm_hypercall(vcpu, kvm_run); | 1171 | return kvm_hypercall(vcpu, kvm_run); |
1125 | } | 1172 | } |
@@ -1139,7 +1186,9 @@ static int task_switch_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_r | |||
1139 | 1186 | ||
1140 | static int cpuid_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1187 | static int cpuid_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1141 | { | 1188 | { |
1142 | vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2; | 1189 | struct vcpu_svm *svm = to_svm(vcpu); |
1190 | |||
1191 | svm->next_rip = svm->vmcb->save.rip + 2; | ||
1143 | kvm_emulate_cpuid(vcpu); | 1192 | kvm_emulate_cpuid(vcpu); |
1144 | return 1; | 1193 | return 1; |
1145 | } | 1194 | } |
@@ -1153,39 +1202,41 @@ static int emulate_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_ru | |||
1153 | 1202 | ||
1154 | static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) | 1203 | static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) |
1155 | { | 1204 | { |
1205 | struct vcpu_svm *svm = to_svm(vcpu); | ||
1206 | |||
1156 | switch (ecx) { | 1207 | switch (ecx) { |
1157 | case MSR_IA32_TIME_STAMP_COUNTER: { | 1208 | case MSR_IA32_TIME_STAMP_COUNTER: { |
1158 | u64 tsc; | 1209 | u64 tsc; |
1159 | 1210 | ||
1160 | rdtscll(tsc); | 1211 | rdtscll(tsc); |
1161 | *data = vcpu->svm->vmcb->control.tsc_offset + tsc; | 1212 | *data = svm->vmcb->control.tsc_offset + tsc; |
1162 | break; | 1213 | break; |
1163 | } | 1214 | } |
1164 | case MSR_K6_STAR: | 1215 | case MSR_K6_STAR: |
1165 | *data = vcpu->svm->vmcb->save.star; | 1216 | *data = svm->vmcb->save.star; |
1166 | break; | 1217 | break; |
1167 | #ifdef CONFIG_X86_64 | 1218 | #ifdef CONFIG_X86_64 |
1168 | case MSR_LSTAR: | 1219 | case MSR_LSTAR: |
1169 | *data = vcpu->svm->vmcb->save.lstar; | 1220 | *data = svm->vmcb->save.lstar; |
1170 | break; | 1221 | break; |
1171 | case MSR_CSTAR: | 1222 | case MSR_CSTAR: |
1172 | *data = vcpu->svm->vmcb->save.cstar; | 1223 | *data = svm->vmcb->save.cstar; |
1173 | break; | 1224 | break; |
1174 | case MSR_KERNEL_GS_BASE: | 1225 | case MSR_KERNEL_GS_BASE: |
1175 | *data = vcpu->svm->vmcb->save.kernel_gs_base; | 1226 | *data = svm->vmcb->save.kernel_gs_base; |
1176 | break; | 1227 | break; |
1177 | case MSR_SYSCALL_MASK: | 1228 | case MSR_SYSCALL_MASK: |
1178 | *data = vcpu->svm->vmcb->save.sfmask; | 1229 | *data = svm->vmcb->save.sfmask; |
1179 | break; | 1230 | break; |
1180 | #endif | 1231 | #endif |
1181 | case MSR_IA32_SYSENTER_CS: | 1232 | case MSR_IA32_SYSENTER_CS: |
1182 | *data = vcpu->svm->vmcb->save.sysenter_cs; | 1233 | *data = svm->vmcb->save.sysenter_cs; |
1183 | break; | 1234 | break; |
1184 | case MSR_IA32_SYSENTER_EIP: | 1235 | case MSR_IA32_SYSENTER_EIP: |
1185 | *data = vcpu->svm->vmcb->save.sysenter_eip; | 1236 | *data = svm->vmcb->save.sysenter_eip; |
1186 | break; | 1237 | break; |
1187 | case MSR_IA32_SYSENTER_ESP: | 1238 | case MSR_IA32_SYSENTER_ESP: |
1188 | *data = vcpu->svm->vmcb->save.sysenter_esp; | 1239 | *data = svm->vmcb->save.sysenter_esp; |
1189 | break; | 1240 | break; |
1190 | default: | 1241 | default: |
1191 | return kvm_get_msr_common(vcpu, ecx, data); | 1242 | return kvm_get_msr_common(vcpu, ecx, data); |
@@ -1195,15 +1246,16 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) | |||
1195 | 1246 | ||
1196 | static int rdmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1247 | static int rdmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1197 | { | 1248 | { |
1249 | struct vcpu_svm *svm = to_svm(vcpu); | ||
1198 | u32 ecx = vcpu->regs[VCPU_REGS_RCX]; | 1250 | u32 ecx = vcpu->regs[VCPU_REGS_RCX]; |
1199 | u64 data; | 1251 | u64 data; |
1200 | 1252 | ||
1201 | if (svm_get_msr(vcpu, ecx, &data)) | 1253 | if (svm_get_msr(vcpu, ecx, &data)) |
1202 | svm_inject_gp(vcpu, 0); | 1254 | svm_inject_gp(vcpu, 0); |
1203 | else { | 1255 | else { |
1204 | vcpu->svm->vmcb->save.rax = data & 0xffffffff; | 1256 | svm->vmcb->save.rax = data & 0xffffffff; |
1205 | vcpu->regs[VCPU_REGS_RDX] = data >> 32; | 1257 | vcpu->regs[VCPU_REGS_RDX] = data >> 32; |
1206 | vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2; | 1258 | svm->next_rip = svm->vmcb->save.rip + 2; |
1207 | skip_emulated_instruction(vcpu); | 1259 | skip_emulated_instruction(vcpu); |
1208 | } | 1260 | } |
1209 | return 1; | 1261 | return 1; |
@@ -1211,39 +1263,41 @@ static int rdmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1211 | 1263 | ||
1212 | static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) | 1264 | static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) |
1213 | { | 1265 | { |
1266 | struct vcpu_svm *svm = to_svm(vcpu); | ||
1267 | |||
1214 | switch (ecx) { | 1268 | switch (ecx) { |
1215 | case MSR_IA32_TIME_STAMP_COUNTER: { | 1269 | case MSR_IA32_TIME_STAMP_COUNTER: { |
1216 | u64 tsc; | 1270 | u64 tsc; |
1217 | 1271 | ||
1218 | rdtscll(tsc); | 1272 | rdtscll(tsc); |
1219 | vcpu->svm->vmcb->control.tsc_offset = data - tsc; | 1273 | svm->vmcb->control.tsc_offset = data - tsc; |
1220 | break; | 1274 | break; |
1221 | } | 1275 | } |
1222 | case MSR_K6_STAR: | 1276 | case MSR_K6_STAR: |
1223 | vcpu->svm->vmcb->save.star = data; | 1277 | svm->vmcb->save.star = data; |
1224 | break; | 1278 | break; |
1225 | #ifdef CONFIG_X86_64 | 1279 | #ifdef CONFIG_X86_64 |
1226 | case MSR_LSTAR: | 1280 | case MSR_LSTAR: |
1227 | vcpu->svm->vmcb->save.lstar = data; | 1281 | svm->vmcb->save.lstar = data; |
1228 | break; | 1282 | break; |
1229 | case MSR_CSTAR: | 1283 | case MSR_CSTAR: |
1230 | vcpu->svm->vmcb->save.cstar = data; | 1284 | svm->vmcb->save.cstar = data; |
1231 | break; | 1285 | break; |
1232 | case MSR_KERNEL_GS_BASE: | 1286 | case MSR_KERNEL_GS_BASE: |
1233 | vcpu->svm->vmcb->save.kernel_gs_base = data; | 1287 | svm->vmcb->save.kernel_gs_base = data; |
1234 | break; | 1288 | break; |
1235 | case MSR_SYSCALL_MASK: | 1289 | case MSR_SYSCALL_MASK: |
1236 | vcpu->svm->vmcb->save.sfmask = data; | 1290 | svm->vmcb->save.sfmask = data; |
1237 | break; | 1291 | break; |
1238 | #endif | 1292 | #endif |
1239 | case MSR_IA32_SYSENTER_CS: | 1293 | case MSR_IA32_SYSENTER_CS: |
1240 | vcpu->svm->vmcb->save.sysenter_cs = data; | 1294 | svm->vmcb->save.sysenter_cs = data; |
1241 | break; | 1295 | break; |
1242 | case MSR_IA32_SYSENTER_EIP: | 1296 | case MSR_IA32_SYSENTER_EIP: |
1243 | vcpu->svm->vmcb->save.sysenter_eip = data; | 1297 | svm->vmcb->save.sysenter_eip = data; |
1244 | break; | 1298 | break; |
1245 | case MSR_IA32_SYSENTER_ESP: | 1299 | case MSR_IA32_SYSENTER_ESP: |
1246 | vcpu->svm->vmcb->save.sysenter_esp = data; | 1300 | svm->vmcb->save.sysenter_esp = data; |
1247 | break; | 1301 | break; |
1248 | default: | 1302 | default: |
1249 | return kvm_set_msr_common(vcpu, ecx, data); | 1303 | return kvm_set_msr_common(vcpu, ecx, data); |
@@ -1253,10 +1307,11 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) | |||
1253 | 1307 | ||
1254 | static int wrmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1308 | static int wrmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1255 | { | 1309 | { |
1310 | struct vcpu_svm *svm = to_svm(vcpu); | ||
1256 | u32 ecx = vcpu->regs[VCPU_REGS_RCX]; | 1311 | u32 ecx = vcpu->regs[VCPU_REGS_RCX]; |
1257 | u64 data = (vcpu->svm->vmcb->save.rax & -1u) | 1312 | u64 data = (svm->vmcb->save.rax & -1u) |
1258 | | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32); | 1313 | | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32); |
1259 | vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2; | 1314 | svm->next_rip = svm->vmcb->save.rip + 2; |
1260 | if (svm_set_msr(vcpu, ecx, data)) | 1315 | if (svm_set_msr(vcpu, ecx, data)) |
1261 | svm_inject_gp(vcpu, 0); | 1316 | svm_inject_gp(vcpu, 0); |
1262 | else | 1317 | else |
@@ -1266,7 +1321,7 @@ static int wrmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1266 | 1321 | ||
1267 | static int msr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1322 | static int msr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1268 | { | 1323 | { |
1269 | if (vcpu->svm->vmcb->control.exit_info_1) | 1324 | if (to_svm(vcpu)->vmcb->control.exit_info_1) |
1270 | return wrmsr_interception(vcpu, kvm_run); | 1325 | return wrmsr_interception(vcpu, kvm_run); |
1271 | else | 1326 | else |
1272 | return rdmsr_interception(vcpu, kvm_run); | 1327 | return rdmsr_interception(vcpu, kvm_run); |
@@ -1338,13 +1393,14 @@ static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu, | |||
1338 | 1393 | ||
1339 | static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1394 | static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1340 | { | 1395 | { |
1341 | u32 exit_code = vcpu->svm->vmcb->control.exit_code; | 1396 | struct vcpu_svm *svm = to_svm(vcpu); |
1397 | u32 exit_code = svm->vmcb->control.exit_code; | ||
1342 | 1398 | ||
1343 | if (is_external_interrupt(vcpu->svm->vmcb->control.exit_int_info) && | 1399 | if (is_external_interrupt(svm->vmcb->control.exit_int_info) && |
1344 | exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR) | 1400 | exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR) |
1345 | printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x " | 1401 | printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x " |
1346 | "exit_code 0x%x\n", | 1402 | "exit_code 0x%x\n", |
1347 | __FUNCTION__, vcpu->svm->vmcb->control.exit_int_info, | 1403 | __FUNCTION__, svm->vmcb->control.exit_int_info, |
1348 | exit_code); | 1404 | exit_code); |
1349 | 1405 | ||
1350 | if (exit_code >= ARRAY_SIZE(svm_exit_handlers) | 1406 | if (exit_code >= ARRAY_SIZE(svm_exit_handlers) |
@@ -1368,13 +1424,14 @@ static void reload_tss(struct kvm_vcpu *vcpu) | |||
1368 | 1424 | ||
1369 | static void pre_svm_run(struct kvm_vcpu *vcpu) | 1425 | static void pre_svm_run(struct kvm_vcpu *vcpu) |
1370 | { | 1426 | { |
1427 | struct vcpu_svm *svm = to_svm(vcpu); | ||
1371 | int cpu = raw_smp_processor_id(); | 1428 | int cpu = raw_smp_processor_id(); |
1372 | 1429 | ||
1373 | struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); | 1430 | struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); |
1374 | 1431 | ||
1375 | vcpu->svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; | 1432 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; |
1376 | if (vcpu->cpu != cpu || | 1433 | if (vcpu->cpu != cpu || |
1377 | vcpu->svm->asid_generation != svm_data->asid_generation) | 1434 | svm->asid_generation != svm_data->asid_generation) |
1378 | new_asid(vcpu, svm_data); | 1435 | new_asid(vcpu, svm_data); |
1379 | } | 1436 | } |
1380 | 1437 | ||
@@ -1383,7 +1440,7 @@ static inline void kvm_do_inject_irq(struct kvm_vcpu *vcpu) | |||
1383 | { | 1440 | { |
1384 | struct vmcb_control_area *control; | 1441 | struct vmcb_control_area *control; |
1385 | 1442 | ||
1386 | control = &vcpu->svm->vmcb->control; | 1443 | control = &to_svm(vcpu)->vmcb->control; |
1387 | control->int_vector = pop_irq(vcpu); | 1444 | control->int_vector = pop_irq(vcpu); |
1388 | control->int_ctl &= ~V_INTR_PRIO_MASK; | 1445 | control->int_ctl &= ~V_INTR_PRIO_MASK; |
1389 | control->int_ctl |= V_IRQ_MASK | | 1446 | control->int_ctl |= V_IRQ_MASK | |
@@ -1392,7 +1449,7 @@ static inline void kvm_do_inject_irq(struct kvm_vcpu *vcpu) | |||
1392 | 1449 | ||
1393 | static void kvm_reput_irq(struct kvm_vcpu *vcpu) | 1450 | static void kvm_reput_irq(struct kvm_vcpu *vcpu) |
1394 | { | 1451 | { |
1395 | struct vmcb_control_area *control = &vcpu->svm->vmcb->control; | 1452 | struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control; |
1396 | 1453 | ||
1397 | if (control->int_ctl & V_IRQ_MASK) { | 1454 | if (control->int_ctl & V_IRQ_MASK) { |
1398 | control->int_ctl &= ~V_IRQ_MASK; | 1455 | control->int_ctl &= ~V_IRQ_MASK; |
@@ -1406,11 +1463,12 @@ static void kvm_reput_irq(struct kvm_vcpu *vcpu) | |||
1406 | static void do_interrupt_requests(struct kvm_vcpu *vcpu, | 1463 | static void do_interrupt_requests(struct kvm_vcpu *vcpu, |
1407 | struct kvm_run *kvm_run) | 1464 | struct kvm_run *kvm_run) |
1408 | { | 1465 | { |
1409 | struct vmcb_control_area *control = &vcpu->svm->vmcb->control; | 1466 | struct vcpu_svm *svm = to_svm(vcpu); |
1467 | struct vmcb_control_area *control = &svm->vmcb->control; | ||
1410 | 1468 | ||
1411 | vcpu->interrupt_window_open = | 1469 | vcpu->interrupt_window_open = |
1412 | (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) && | 1470 | (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) && |
1413 | (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF)); | 1471 | (svm->vmcb->save.rflags & X86_EFLAGS_IF)); |
1414 | 1472 | ||
1415 | if (vcpu->interrupt_window_open && vcpu->irq_summary) | 1473 | if (vcpu->interrupt_window_open && vcpu->irq_summary) |
1416 | /* | 1474 | /* |
@@ -1431,9 +1489,11 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu, | |||
1431 | static void post_kvm_run_save(struct kvm_vcpu *vcpu, | 1489 | static void post_kvm_run_save(struct kvm_vcpu *vcpu, |
1432 | struct kvm_run *kvm_run) | 1490 | struct kvm_run *kvm_run) |
1433 | { | 1491 | { |
1492 | struct vcpu_svm *svm = to_svm(vcpu); | ||
1493 | |||
1434 | kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open && | 1494 | kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open && |
1435 | vcpu->irq_summary == 0); | 1495 | vcpu->irq_summary == 0); |
1436 | kvm_run->if_flag = (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF) != 0; | 1496 | kvm_run->if_flag = (svm->vmcb->save.rflags & X86_EFLAGS_IF) != 0; |
1437 | kvm_run->cr8 = vcpu->cr8; | 1497 | kvm_run->cr8 = vcpu->cr8; |
1438 | kvm_run->apic_base = vcpu->apic_base; | 1498 | kvm_run->apic_base = vcpu->apic_base; |
1439 | } | 1499 | } |
@@ -1450,7 +1510,7 @@ static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu, | |||
1450 | return (!vcpu->irq_summary && | 1510 | return (!vcpu->irq_summary && |
1451 | kvm_run->request_interrupt_window && | 1511 | kvm_run->request_interrupt_window && |
1452 | vcpu->interrupt_window_open && | 1512 | vcpu->interrupt_window_open && |
1453 | (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF)); | 1513 | (to_svm(vcpu)->vmcb->save.rflags & X86_EFLAGS_IF)); |
1454 | } | 1514 | } |
1455 | 1515 | ||
1456 | static void save_db_regs(unsigned long *db_regs) | 1516 | static void save_db_regs(unsigned long *db_regs) |
@@ -1476,6 +1536,7 @@ static void svm_flush_tlb(struct kvm_vcpu *vcpu) | |||
1476 | 1536 | ||
1477 | static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1537 | static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1478 | { | 1538 | { |
1539 | struct vcpu_svm *svm = to_svm(vcpu); | ||
1479 | u16 fs_selector; | 1540 | u16 fs_selector; |
1480 | u16 gs_selector; | 1541 | u16 gs_selector; |
1481 | u16 ldt_selector; | 1542 | u16 ldt_selector; |
@@ -1502,15 +1563,15 @@ again: | |||
1502 | fs_selector = read_fs(); | 1563 | fs_selector = read_fs(); |
1503 | gs_selector = read_gs(); | 1564 | gs_selector = read_gs(); |
1504 | ldt_selector = read_ldt(); | 1565 | ldt_selector = read_ldt(); |
1505 | vcpu->svm->host_cr2 = kvm_read_cr2(); | 1566 | svm->host_cr2 = kvm_read_cr2(); |
1506 | vcpu->svm->host_dr6 = read_dr6(); | 1567 | svm->host_dr6 = read_dr6(); |
1507 | vcpu->svm->host_dr7 = read_dr7(); | 1568 | svm->host_dr7 = read_dr7(); |
1508 | vcpu->svm->vmcb->save.cr2 = vcpu->cr2; | 1569 | svm->vmcb->save.cr2 = vcpu->cr2; |
1509 | 1570 | ||
1510 | if (vcpu->svm->vmcb->save.dr7 & 0xff) { | 1571 | if (svm->vmcb->save.dr7 & 0xff) { |
1511 | write_dr7(0); | 1572 | write_dr7(0); |
1512 | save_db_regs(vcpu->svm->host_db_regs); | 1573 | save_db_regs(svm->host_db_regs); |
1513 | load_db_regs(vcpu->svm->db_regs); | 1574 | load_db_regs(svm->db_regs); |
1514 | } | 1575 | } |
1515 | 1576 | ||
1516 | if (vcpu->fpu_active) { | 1577 | if (vcpu->fpu_active) { |
@@ -1607,7 +1668,7 @@ again: | |||
1607 | #endif | 1668 | #endif |
1608 | : | 1669 | : |
1609 | : [vcpu]"a"(vcpu), | 1670 | : [vcpu]"a"(vcpu), |
1610 | [svm]"i"(offsetof(struct kvm_vcpu, svm)), | 1671 | [svm]"i"(offsetof(struct kvm_vcpu, _priv)), |
1611 | [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)), | 1672 | [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)), |
1612 | [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])), | 1673 | [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])), |
1613 | [rcx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RCX])), | 1674 | [rcx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RCX])), |
@@ -1634,14 +1695,14 @@ again: | |||
1634 | fx_restore(vcpu->host_fx_image); | 1695 | fx_restore(vcpu->host_fx_image); |
1635 | } | 1696 | } |
1636 | 1697 | ||
1637 | if ((vcpu->svm->vmcb->save.dr7 & 0xff)) | 1698 | if ((svm->vmcb->save.dr7 & 0xff)) |
1638 | load_db_regs(vcpu->svm->host_db_regs); | 1699 | load_db_regs(svm->host_db_regs); |
1639 | 1700 | ||
1640 | vcpu->cr2 = vcpu->svm->vmcb->save.cr2; | 1701 | vcpu->cr2 = svm->vmcb->save.cr2; |
1641 | 1702 | ||
1642 | write_dr6(vcpu->svm->host_dr6); | 1703 | write_dr6(svm->host_dr6); |
1643 | write_dr7(vcpu->svm->host_dr7); | 1704 | write_dr7(svm->host_dr7); |
1644 | kvm_write_cr2(vcpu->svm->host_cr2); | 1705 | kvm_write_cr2(svm->host_cr2); |
1645 | 1706 | ||
1646 | load_fs(fs_selector); | 1707 | load_fs(fs_selector); |
1647 | load_gs(gs_selector); | 1708 | load_gs(gs_selector); |
@@ -1655,18 +1716,18 @@ again: | |||
1655 | */ | 1716 | */ |
1656 | if (unlikely(prof_on == KVM_PROFILING)) | 1717 | if (unlikely(prof_on == KVM_PROFILING)) |
1657 | profile_hit(KVM_PROFILING, | 1718 | profile_hit(KVM_PROFILING, |
1658 | (void *)(unsigned long)vcpu->svm->vmcb->save.rip); | 1719 | (void *)(unsigned long)svm->vmcb->save.rip); |
1659 | 1720 | ||
1660 | stgi(); | 1721 | stgi(); |
1661 | 1722 | ||
1662 | kvm_reput_irq(vcpu); | 1723 | kvm_reput_irq(vcpu); |
1663 | 1724 | ||
1664 | vcpu->svm->next_rip = 0; | 1725 | svm->next_rip = 0; |
1665 | 1726 | ||
1666 | if (vcpu->svm->vmcb->control.exit_code == SVM_EXIT_ERR) { | 1727 | if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { |
1667 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; | 1728 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; |
1668 | kvm_run->fail_entry.hardware_entry_failure_reason | 1729 | kvm_run->fail_entry.hardware_entry_failure_reason |
1669 | = vcpu->svm->vmcb->control.exit_code; | 1730 | = svm->vmcb->control.exit_code; |
1670 | post_kvm_run_save(vcpu, kvm_run); | 1731 | post_kvm_run_save(vcpu, kvm_run); |
1671 | return 0; | 1732 | return 0; |
1672 | } | 1733 | } |
@@ -1695,12 +1756,14 @@ again: | |||
1695 | 1756 | ||
1696 | static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) | 1757 | static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) |
1697 | { | 1758 | { |
1698 | vcpu->svm->vmcb->save.cr3 = root; | 1759 | struct vcpu_svm *svm = to_svm(vcpu); |
1760 | |||
1761 | svm->vmcb->save.cr3 = root; | ||
1699 | force_new_asid(vcpu); | 1762 | force_new_asid(vcpu); |
1700 | 1763 | ||
1701 | if (vcpu->fpu_active) { | 1764 | if (vcpu->fpu_active) { |
1702 | vcpu->svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR); | 1765 | svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR); |
1703 | vcpu->svm->vmcb->save.cr0 |= X86_CR0_TS; | 1766 | svm->vmcb->save.cr0 |= X86_CR0_TS; |
1704 | vcpu->fpu_active = 0; | 1767 | vcpu->fpu_active = 0; |
1705 | } | 1768 | } |
1706 | } | 1769 | } |
@@ -1709,26 +1772,27 @@ static void svm_inject_page_fault(struct kvm_vcpu *vcpu, | |||
1709 | unsigned long addr, | 1772 | unsigned long addr, |
1710 | uint32_t err_code) | 1773 | uint32_t err_code) |
1711 | { | 1774 | { |
1712 | uint32_t exit_int_info = vcpu->svm->vmcb->control.exit_int_info; | 1775 | struct vcpu_svm *svm = to_svm(vcpu); |
1776 | uint32_t exit_int_info = svm->vmcb->control.exit_int_info; | ||
1713 | 1777 | ||
1714 | ++vcpu->stat.pf_guest; | 1778 | ++vcpu->stat.pf_guest; |
1715 | 1779 | ||
1716 | if (is_page_fault(exit_int_info)) { | 1780 | if (is_page_fault(exit_int_info)) { |
1717 | 1781 | ||
1718 | vcpu->svm->vmcb->control.event_inj_err = 0; | 1782 | svm->vmcb->control.event_inj_err = 0; |
1719 | vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | | 1783 | svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | |
1720 | SVM_EVTINJ_VALID_ERR | | 1784 | SVM_EVTINJ_VALID_ERR | |
1721 | SVM_EVTINJ_TYPE_EXEPT | | 1785 | SVM_EVTINJ_TYPE_EXEPT | |
1722 | DF_VECTOR; | 1786 | DF_VECTOR; |
1723 | return; | 1787 | return; |
1724 | } | 1788 | } |
1725 | vcpu->cr2 = addr; | 1789 | vcpu->cr2 = addr; |
1726 | vcpu->svm->vmcb->save.cr2 = addr; | 1790 | svm->vmcb->save.cr2 = addr; |
1727 | vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | | 1791 | svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | |
1728 | SVM_EVTINJ_VALID_ERR | | 1792 | SVM_EVTINJ_VALID_ERR | |
1729 | SVM_EVTINJ_TYPE_EXEPT | | 1793 | SVM_EVTINJ_TYPE_EXEPT | |
1730 | PF_VECTOR; | 1794 | PF_VECTOR; |
1731 | vcpu->svm->vmcb->control.event_inj_err = err_code; | 1795 | svm->vmcb->control.event_inj_err = err_code; |
1732 | } | 1796 | } |
1733 | 1797 | ||
1734 | 1798 | ||
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c index dac2f93d1a07..96837d6ed50b 100644 --- a/drivers/kvm/vmx.c +++ b/drivers/kvm/vmx.c | |||
@@ -32,6 +32,37 @@ | |||
32 | MODULE_AUTHOR("Qumranet"); | 32 | MODULE_AUTHOR("Qumranet"); |
33 | MODULE_LICENSE("GPL"); | 33 | MODULE_LICENSE("GPL"); |
34 | 34 | ||
35 | struct vmcs { | ||
36 | u32 revision_id; | ||
37 | u32 abort; | ||
38 | char data[0]; | ||
39 | }; | ||
40 | |||
41 | struct vcpu_vmx { | ||
42 | struct kvm_vcpu *vcpu; | ||
43 | int launched; | ||
44 | struct kvm_msr_entry *guest_msrs; | ||
45 | struct kvm_msr_entry *host_msrs; | ||
46 | int nmsrs; | ||
47 | int save_nmsrs; | ||
48 | int msr_offset_efer; | ||
49 | #ifdef CONFIG_X86_64 | ||
50 | int msr_offset_kernel_gs_base; | ||
51 | #endif | ||
52 | struct vmcs *vmcs; | ||
53 | struct { | ||
54 | int loaded; | ||
55 | u16 fs_sel, gs_sel, ldt_sel; | ||
56 | int fs_gs_ldt_reload_needed; | ||
57 | }host_state; | ||
58 | |||
59 | }; | ||
60 | |||
61 | static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) | ||
62 | { | ||
63 | return (struct vcpu_vmx*)vcpu->_priv; | ||
64 | } | ||
65 | |||
35 | static int init_rmode_tss(struct kvm *kvm); | 66 | static int init_rmode_tss(struct kvm *kvm); |
36 | 67 | ||
37 | static DEFINE_PER_CPU(struct vmcs *, vmxarea); | 68 | static DEFINE_PER_CPU(struct vmcs *, vmxarea); |
@@ -89,16 +120,33 @@ static const u32 vmx_msr_index[] = { | |||
89 | }; | 120 | }; |
90 | #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index) | 121 | #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index) |
91 | 122 | ||
92 | static inline u64 msr_efer_save_restore_bits(struct vmx_msr_entry msr) | 123 | static void load_msrs(struct kvm_msr_entry *e, int n) |
124 | { | ||
125 | int i; | ||
126 | |||
127 | for (i = 0; i < n; ++i) | ||
128 | wrmsrl(e[i].index, e[i].data); | ||
129 | } | ||
130 | |||
131 | static void save_msrs(struct kvm_msr_entry *e, int n) | ||
132 | { | ||
133 | int i; | ||
134 | |||
135 | for (i = 0; i < n; ++i) | ||
136 | rdmsrl(e[i].index, e[i].data); | ||
137 | } | ||
138 | |||
139 | static inline u64 msr_efer_save_restore_bits(struct kvm_msr_entry msr) | ||
93 | { | 140 | { |
94 | return (u64)msr.data & EFER_SAVE_RESTORE_BITS; | 141 | return (u64)msr.data & EFER_SAVE_RESTORE_BITS; |
95 | } | 142 | } |
96 | 143 | ||
97 | static inline int msr_efer_need_save_restore(struct kvm_vcpu *vcpu) | 144 | static inline int msr_efer_need_save_restore(struct kvm_vcpu *vcpu) |
98 | { | 145 | { |
99 | int efer_offset = vcpu->msr_offset_efer; | 146 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
100 | return msr_efer_save_restore_bits(vcpu->host_msrs[efer_offset]) != | 147 | int efer_offset = vmx->msr_offset_efer; |
101 | msr_efer_save_restore_bits(vcpu->guest_msrs[efer_offset]); | 148 | return msr_efer_save_restore_bits(vmx->host_msrs[efer_offset]) != |
149 | msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]); | ||
102 | } | 150 | } |
103 | 151 | ||
104 | static inline int is_page_fault(u32 intr_info) | 152 | static inline int is_page_fault(u32 intr_info) |
@@ -123,21 +171,23 @@ static inline int is_external_interrupt(u32 intr_info) | |||
123 | 171 | ||
124 | static int __find_msr_index(struct kvm_vcpu *vcpu, u32 msr) | 172 | static int __find_msr_index(struct kvm_vcpu *vcpu, u32 msr) |
125 | { | 173 | { |
174 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
126 | int i; | 175 | int i; |
127 | 176 | ||
128 | for (i = 0; i < vcpu->nmsrs; ++i) | 177 | for (i = 0; i < vmx->nmsrs; ++i) |
129 | if (vcpu->guest_msrs[i].index == msr) | 178 | if (vmx->guest_msrs[i].index == msr) |
130 | return i; | 179 | return i; |
131 | return -1; | 180 | return -1; |
132 | } | 181 | } |
133 | 182 | ||
134 | static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr) | 183 | static struct kvm_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr) |
135 | { | 184 | { |
185 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
136 | int i; | 186 | int i; |
137 | 187 | ||
138 | i = __find_msr_index(vcpu, msr); | 188 | i = __find_msr_index(vcpu, msr); |
139 | if (i >= 0) | 189 | if (i >= 0) |
140 | return &vcpu->guest_msrs[i]; | 190 | return &vmx->guest_msrs[i]; |
141 | return NULL; | 191 | return NULL; |
142 | } | 192 | } |
143 | 193 | ||
@@ -157,11 +207,12 @@ static void vmcs_clear(struct vmcs *vmcs) | |||
157 | static void __vcpu_clear(void *arg) | 207 | static void __vcpu_clear(void *arg) |
158 | { | 208 | { |
159 | struct kvm_vcpu *vcpu = arg; | 209 | struct kvm_vcpu *vcpu = arg; |
210 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
160 | int cpu = raw_smp_processor_id(); | 211 | int cpu = raw_smp_processor_id(); |
161 | 212 | ||
162 | if (vcpu->cpu == cpu) | 213 | if (vcpu->cpu == cpu) |
163 | vmcs_clear(vcpu->vmcs); | 214 | vmcs_clear(vmx->vmcs); |
164 | if (per_cpu(current_vmcs, cpu) == vcpu->vmcs) | 215 | if (per_cpu(current_vmcs, cpu) == vmx->vmcs) |
165 | per_cpu(current_vmcs, cpu) = NULL; | 216 | per_cpu(current_vmcs, cpu) = NULL; |
166 | rdtscll(vcpu->host_tsc); | 217 | rdtscll(vcpu->host_tsc); |
167 | } | 218 | } |
@@ -172,7 +223,7 @@ static void vcpu_clear(struct kvm_vcpu *vcpu) | |||
172 | smp_call_function_single(vcpu->cpu, __vcpu_clear, vcpu, 0, 1); | 223 | smp_call_function_single(vcpu->cpu, __vcpu_clear, vcpu, 0, 1); |
173 | else | 224 | else |
174 | __vcpu_clear(vcpu); | 225 | __vcpu_clear(vcpu); |
175 | vcpu->launched = 0; | 226 | to_vmx(vcpu)->launched = 0; |
176 | } | 227 | } |
177 | 228 | ||
178 | static unsigned long vmcs_readl(unsigned long field) | 229 | static unsigned long vmcs_readl(unsigned long field) |
@@ -285,80 +336,81 @@ static void reload_tss(void) | |||
285 | static void load_transition_efer(struct kvm_vcpu *vcpu) | 336 | static void load_transition_efer(struct kvm_vcpu *vcpu) |
286 | { | 337 | { |
287 | u64 trans_efer; | 338 | u64 trans_efer; |
288 | int efer_offset = vcpu->msr_offset_efer; | 339 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
340 | int efer_offset = vmx->msr_offset_efer; | ||
289 | 341 | ||
290 | trans_efer = vcpu->host_msrs[efer_offset].data; | 342 | trans_efer = vmx->host_msrs[efer_offset].data; |
291 | trans_efer &= ~EFER_SAVE_RESTORE_BITS; | 343 | trans_efer &= ~EFER_SAVE_RESTORE_BITS; |
292 | trans_efer |= msr_efer_save_restore_bits( | 344 | trans_efer |= msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]); |
293 | vcpu->guest_msrs[efer_offset]); | ||
294 | wrmsrl(MSR_EFER, trans_efer); | 345 | wrmsrl(MSR_EFER, trans_efer); |
295 | vcpu->stat.efer_reload++; | 346 | vcpu->stat.efer_reload++; |
296 | } | 347 | } |
297 | 348 | ||
298 | static void vmx_save_host_state(struct kvm_vcpu *vcpu) | 349 | static void vmx_save_host_state(struct kvm_vcpu *vcpu) |
299 | { | 350 | { |
300 | struct vmx_host_state *hs = &vcpu->vmx_host_state; | 351 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
301 | 352 | ||
302 | if (hs->loaded) | 353 | if (vmx->host_state.loaded) |
303 | return; | 354 | return; |
304 | 355 | ||
305 | hs->loaded = 1; | 356 | vmx->host_state.loaded = 1; |
306 | /* | 357 | /* |
307 | * Set host fs and gs selectors. Unfortunately, 22.2.3 does not | 358 | * Set host fs and gs selectors. Unfortunately, 22.2.3 does not |
308 | * allow segment selectors with cpl > 0 or ti == 1. | 359 | * allow segment selectors with cpl > 0 or ti == 1. |
309 | */ | 360 | */ |
310 | hs->ldt_sel = read_ldt(); | 361 | vmx->host_state.ldt_sel = read_ldt(); |
311 | hs->fs_gs_ldt_reload_needed = hs->ldt_sel; | 362 | vmx->host_state.fs_gs_ldt_reload_needed = vmx->host_state.ldt_sel; |
312 | hs->fs_sel = read_fs(); | 363 | vmx->host_state.fs_sel = read_fs(); |
313 | if (!(hs->fs_sel & 7)) | 364 | if (!(vmx->host_state.fs_sel & 7)) |
314 | vmcs_write16(HOST_FS_SELECTOR, hs->fs_sel); | 365 | vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); |
315 | else { | 366 | else { |
316 | vmcs_write16(HOST_FS_SELECTOR, 0); | 367 | vmcs_write16(HOST_FS_SELECTOR, 0); |
317 | hs->fs_gs_ldt_reload_needed = 1; | 368 | vmx->host_state.fs_gs_ldt_reload_needed = 1; |
318 | } | 369 | } |
319 | hs->gs_sel = read_gs(); | 370 | vmx->host_state.gs_sel = read_gs(); |
320 | if (!(hs->gs_sel & 7)) | 371 | if (!(vmx->host_state.gs_sel & 7)) |
321 | vmcs_write16(HOST_GS_SELECTOR, hs->gs_sel); | 372 | vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); |
322 | else { | 373 | else { |
323 | vmcs_write16(HOST_GS_SELECTOR, 0); | 374 | vmcs_write16(HOST_GS_SELECTOR, 0); |
324 | hs->fs_gs_ldt_reload_needed = 1; | 375 | vmx->host_state.fs_gs_ldt_reload_needed = 1; |
325 | } | 376 | } |
326 | 377 | ||
327 | #ifdef CONFIG_X86_64 | 378 | #ifdef CONFIG_X86_64 |
328 | vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE)); | 379 | vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE)); |
329 | vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE)); | 380 | vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE)); |
330 | #else | 381 | #else |
331 | vmcs_writel(HOST_FS_BASE, segment_base(hs->fs_sel)); | 382 | vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel)); |
332 | vmcs_writel(HOST_GS_BASE, segment_base(hs->gs_sel)); | 383 | vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel)); |
333 | #endif | 384 | #endif |
334 | 385 | ||
335 | #ifdef CONFIG_X86_64 | 386 | #ifdef CONFIG_X86_64 |
336 | if (is_long_mode(vcpu)) { | 387 | if (is_long_mode(vcpu)) { |
337 | save_msrs(vcpu->host_msrs + vcpu->msr_offset_kernel_gs_base, 1); | 388 | save_msrs(vmx->host_msrs + |
389 | vmx->msr_offset_kernel_gs_base, 1); | ||
338 | } | 390 | } |
339 | #endif | 391 | #endif |
340 | load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs); | 392 | load_msrs(vmx->guest_msrs, vmx->save_nmsrs); |
341 | if (msr_efer_need_save_restore(vcpu)) | 393 | if (msr_efer_need_save_restore(vcpu)) |
342 | load_transition_efer(vcpu); | 394 | load_transition_efer(vcpu); |
343 | } | 395 | } |
344 | 396 | ||
345 | static void vmx_load_host_state(struct kvm_vcpu *vcpu) | 397 | static void vmx_load_host_state(struct kvm_vcpu *vcpu) |
346 | { | 398 | { |
347 | struct vmx_host_state *hs = &vcpu->vmx_host_state; | 399 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
348 | 400 | ||
349 | if (!hs->loaded) | 401 | if (!vmx->host_state.loaded) |
350 | return; | 402 | return; |
351 | 403 | ||
352 | hs->loaded = 0; | 404 | vmx->host_state.loaded = 0; |
353 | if (hs->fs_gs_ldt_reload_needed) { | 405 | if (vmx->host_state.fs_gs_ldt_reload_needed) { |
354 | load_ldt(hs->ldt_sel); | 406 | load_ldt(vmx->host_state.ldt_sel); |
355 | load_fs(hs->fs_sel); | 407 | load_fs(vmx->host_state.fs_sel); |
356 | /* | 408 | /* |
357 | * If we have to reload gs, we must take care to | 409 | * If we have to reload gs, we must take care to |
358 | * preserve our gs base. | 410 | * preserve our gs base. |
359 | */ | 411 | */ |
360 | local_irq_disable(); | 412 | local_irq_disable(); |
361 | load_gs(hs->gs_sel); | 413 | load_gs(vmx->host_state.gs_sel); |
362 | #ifdef CONFIG_X86_64 | 414 | #ifdef CONFIG_X86_64 |
363 | wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); | 415 | wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); |
364 | #endif | 416 | #endif |
@@ -366,10 +418,10 @@ static void vmx_load_host_state(struct kvm_vcpu *vcpu) | |||
366 | 418 | ||
367 | reload_tss(); | 419 | reload_tss(); |
368 | } | 420 | } |
369 | save_msrs(vcpu->guest_msrs, vcpu->save_nmsrs); | 421 | save_msrs(vmx->guest_msrs, vmx->save_nmsrs); |
370 | load_msrs(vcpu->host_msrs, vcpu->save_nmsrs); | 422 | load_msrs(vmx->host_msrs, vmx->save_nmsrs); |
371 | if (msr_efer_need_save_restore(vcpu)) | 423 | if (msr_efer_need_save_restore(vcpu)) |
372 | load_msrs(vcpu->host_msrs + vcpu->msr_offset_efer, 1); | 424 | load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1); |
373 | } | 425 | } |
374 | 426 | ||
375 | /* | 427 | /* |
@@ -378,7 +430,8 @@ static void vmx_load_host_state(struct kvm_vcpu *vcpu) | |||
378 | */ | 430 | */ |
379 | static void vmx_vcpu_load(struct kvm_vcpu *vcpu) | 431 | static void vmx_vcpu_load(struct kvm_vcpu *vcpu) |
380 | { | 432 | { |
381 | u64 phys_addr = __pa(vcpu->vmcs); | 433 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
434 | u64 phys_addr = __pa(vmx->vmcs); | ||
382 | int cpu; | 435 | int cpu; |
383 | u64 tsc_this, delta; | 436 | u64 tsc_this, delta; |
384 | 437 | ||
@@ -387,16 +440,16 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu) | |||
387 | if (vcpu->cpu != cpu) | 440 | if (vcpu->cpu != cpu) |
388 | vcpu_clear(vcpu); | 441 | vcpu_clear(vcpu); |
389 | 442 | ||
390 | if (per_cpu(current_vmcs, cpu) != vcpu->vmcs) { | 443 | if (per_cpu(current_vmcs, cpu) != vmx->vmcs) { |
391 | u8 error; | 444 | u8 error; |
392 | 445 | ||
393 | per_cpu(current_vmcs, cpu) = vcpu->vmcs; | 446 | per_cpu(current_vmcs, cpu) = vmx->vmcs; |
394 | asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0" | 447 | asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0" |
395 | : "=g"(error) : "a"(&phys_addr), "m"(phys_addr) | 448 | : "=g"(error) : "a"(&phys_addr), "m"(phys_addr) |
396 | : "cc"); | 449 | : "cc"); |
397 | if (error) | 450 | if (error) |
398 | printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n", | 451 | printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n", |
399 | vcpu->vmcs, phys_addr); | 452 | vmx->vmcs, phys_addr); |
400 | } | 453 | } |
401 | 454 | ||
402 | if (vcpu->cpu != cpu) { | 455 | if (vcpu->cpu != cpu) { |
@@ -503,13 +556,15 @@ static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code) | |||
503 | */ | 556 | */ |
504 | void move_msr_up(struct kvm_vcpu *vcpu, int from, int to) | 557 | void move_msr_up(struct kvm_vcpu *vcpu, int from, int to) |
505 | { | 558 | { |
506 | struct vmx_msr_entry tmp; | 559 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
507 | tmp = vcpu->guest_msrs[to]; | 560 | struct kvm_msr_entry tmp; |
508 | vcpu->guest_msrs[to] = vcpu->guest_msrs[from]; | 561 | |
509 | vcpu->guest_msrs[from] = tmp; | 562 | tmp = vmx->guest_msrs[to]; |
510 | tmp = vcpu->host_msrs[to]; | 563 | vmx->guest_msrs[to] = vmx->guest_msrs[from]; |
511 | vcpu->host_msrs[to] = vcpu->host_msrs[from]; | 564 | vmx->guest_msrs[from] = tmp; |
512 | vcpu->host_msrs[from] = tmp; | 565 | tmp = vmx->host_msrs[to]; |
566 | vmx->host_msrs[to] = vmx->host_msrs[from]; | ||
567 | vmx->host_msrs[from] = tmp; | ||
513 | } | 568 | } |
514 | 569 | ||
515 | /* | 570 | /* |
@@ -519,6 +574,7 @@ void move_msr_up(struct kvm_vcpu *vcpu, int from, int to) | |||
519 | */ | 574 | */ |
520 | static void setup_msrs(struct kvm_vcpu *vcpu) | 575 | static void setup_msrs(struct kvm_vcpu *vcpu) |
521 | { | 576 | { |
577 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
522 | int save_nmsrs; | 578 | int save_nmsrs; |
523 | 579 | ||
524 | save_nmsrs = 0; | 580 | save_nmsrs = 0; |
@@ -547,13 +603,13 @@ static void setup_msrs(struct kvm_vcpu *vcpu) | |||
547 | move_msr_up(vcpu, index, save_nmsrs++); | 603 | move_msr_up(vcpu, index, save_nmsrs++); |
548 | } | 604 | } |
549 | #endif | 605 | #endif |
550 | vcpu->save_nmsrs = save_nmsrs; | 606 | vmx->save_nmsrs = save_nmsrs; |
551 | 607 | ||
552 | #ifdef CONFIG_X86_64 | 608 | #ifdef CONFIG_X86_64 |
553 | vcpu->msr_offset_kernel_gs_base = | 609 | vmx->msr_offset_kernel_gs_base = |
554 | __find_msr_index(vcpu, MSR_KERNEL_GS_BASE); | 610 | __find_msr_index(vcpu, MSR_KERNEL_GS_BASE); |
555 | #endif | 611 | #endif |
556 | vcpu->msr_offset_efer = __find_msr_index(vcpu, MSR_EFER); | 612 | vmx->msr_offset_efer = __find_msr_index(vcpu, MSR_EFER); |
557 | } | 613 | } |
558 | 614 | ||
559 | /* | 615 | /* |
@@ -589,7 +645,7 @@ static void guest_write_tsc(u64 guest_tsc) | |||
589 | static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | 645 | static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) |
590 | { | 646 | { |
591 | u64 data; | 647 | u64 data; |
592 | struct vmx_msr_entry *msr; | 648 | struct kvm_msr_entry *msr; |
593 | 649 | ||
594 | if (!pdata) { | 650 | if (!pdata) { |
595 | printk(KERN_ERR "BUG: get_msr called with NULL pdata\n"); | 651 | printk(KERN_ERR "BUG: get_msr called with NULL pdata\n"); |
@@ -639,14 +695,15 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | |||
639 | */ | 695 | */ |
640 | static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | 696 | static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) |
641 | { | 697 | { |
642 | struct vmx_msr_entry *msr; | 698 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
699 | struct kvm_msr_entry *msr; | ||
643 | int ret = 0; | 700 | int ret = 0; |
644 | 701 | ||
645 | switch (msr_index) { | 702 | switch (msr_index) { |
646 | #ifdef CONFIG_X86_64 | 703 | #ifdef CONFIG_X86_64 |
647 | case MSR_EFER: | 704 | case MSR_EFER: |
648 | ret = kvm_set_msr_common(vcpu, msr_index, data); | 705 | ret = kvm_set_msr_common(vcpu, msr_index, data); |
649 | if (vcpu->vmx_host_state.loaded) | 706 | if (vmx->host_state.loaded) |
650 | load_transition_efer(vcpu); | 707 | load_transition_efer(vcpu); |
651 | break; | 708 | break; |
652 | case MSR_FS_BASE: | 709 | case MSR_FS_BASE: |
@@ -672,8 +729,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | |||
672 | msr = find_msr_entry(vcpu, msr_index); | 729 | msr = find_msr_entry(vcpu, msr_index); |
673 | if (msr) { | 730 | if (msr) { |
674 | msr->data = data; | 731 | msr->data = data; |
675 | if (vcpu->vmx_host_state.loaded) | 732 | if (vmx->host_state.loaded) |
676 | load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs); | 733 | load_msrs(vmx->guest_msrs, vmx->save_nmsrs); |
677 | break; | 734 | break; |
678 | } | 735 | } |
679 | ret = kvm_set_msr_common(vcpu, msr_index, data); | 736 | ret = kvm_set_msr_common(vcpu, msr_index, data); |
@@ -1053,7 +1110,7 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
1053 | 1110 | ||
1054 | static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) | 1111 | static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) |
1055 | { | 1112 | { |
1056 | struct vmx_msr_entry *msr = find_msr_entry(vcpu, MSR_EFER); | 1113 | struct kvm_msr_entry *msr = find_msr_entry(vcpu, MSR_EFER); |
1057 | 1114 | ||
1058 | vcpu->shadow_efer = efer; | 1115 | vcpu->shadow_efer = efer; |
1059 | if (efer & EFER_LMA) { | 1116 | if (efer & EFER_LMA) { |
@@ -1244,6 +1301,7 @@ static void seg_setup(int seg) | |||
1244 | */ | 1301 | */ |
1245 | static int vmx_vcpu_setup(struct kvm_vcpu *vcpu) | 1302 | static int vmx_vcpu_setup(struct kvm_vcpu *vcpu) |
1246 | { | 1303 | { |
1304 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
1247 | u32 host_sysenter_cs; | 1305 | u32 host_sysenter_cs; |
1248 | u32 junk; | 1306 | u32 junk; |
1249 | unsigned long a; | 1307 | unsigned long a; |
@@ -1385,18 +1443,18 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu) | |||
1385 | u32 index = vmx_msr_index[i]; | 1443 | u32 index = vmx_msr_index[i]; |
1386 | u32 data_low, data_high; | 1444 | u32 data_low, data_high; |
1387 | u64 data; | 1445 | u64 data; |
1388 | int j = vcpu->nmsrs; | 1446 | int j = vmx->nmsrs; |
1389 | 1447 | ||
1390 | if (rdmsr_safe(index, &data_low, &data_high) < 0) | 1448 | if (rdmsr_safe(index, &data_low, &data_high) < 0) |
1391 | continue; | 1449 | continue; |
1392 | if (wrmsr_safe(index, data_low, data_high) < 0) | 1450 | if (wrmsr_safe(index, data_low, data_high) < 0) |
1393 | continue; | 1451 | continue; |
1394 | data = data_low | ((u64)data_high << 32); | 1452 | data = data_low | ((u64)data_high << 32); |
1395 | vcpu->host_msrs[j].index = index; | 1453 | vmx->host_msrs[j].index = index; |
1396 | vcpu->host_msrs[j].reserved = 0; | 1454 | vmx->host_msrs[j].reserved = 0; |
1397 | vcpu->host_msrs[j].data = data; | 1455 | vmx->host_msrs[j].data = data; |
1398 | vcpu->guest_msrs[j] = vcpu->host_msrs[j]; | 1456 | vmx->guest_msrs[j] = vmx->host_msrs[j]; |
1399 | ++vcpu->nmsrs; | 1457 | ++vmx->nmsrs; |
1400 | } | 1458 | } |
1401 | 1459 | ||
1402 | setup_msrs(vcpu); | 1460 | setup_msrs(vcpu); |
@@ -1999,6 +2057,7 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu) | |||
1999 | 2057 | ||
2000 | static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2058 | static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
2001 | { | 2059 | { |
2060 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
2002 | u8 fail; | 2061 | u8 fail; |
2003 | int r; | 2062 | int r; |
2004 | 2063 | ||
@@ -2123,7 +2182,7 @@ again: | |||
2123 | #endif | 2182 | #endif |
2124 | "setbe %0 \n\t" | 2183 | "setbe %0 \n\t" |
2125 | : "=q" (fail) | 2184 | : "=q" (fail) |
2126 | : "r"(vcpu->launched), "d"((unsigned long)HOST_RSP), | 2185 | : "r"(vmx->launched), "d"((unsigned long)HOST_RSP), |
2127 | "c"(vcpu), | 2186 | "c"(vcpu), |
2128 | [rax]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RAX])), | 2187 | [rax]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RAX])), |
2129 | [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])), | 2188 | [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])), |
@@ -2167,7 +2226,7 @@ again: | |||
2167 | if (unlikely(prof_on == KVM_PROFILING)) | 2226 | if (unlikely(prof_on == KVM_PROFILING)) |
2168 | profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP)); | 2227 | profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP)); |
2169 | 2228 | ||
2170 | vcpu->launched = 1; | 2229 | vmx->launched = 1; |
2171 | r = kvm_handle_exit(kvm_run, vcpu); | 2230 | r = kvm_handle_exit(kvm_run, vcpu); |
2172 | if (r > 0) { | 2231 | if (r > 0) { |
2173 | /* Give scheduler a change to reschedule. */ | 2232 | /* Give scheduler a change to reschedule. */ |
@@ -2232,10 +2291,12 @@ static void vmx_inject_page_fault(struct kvm_vcpu *vcpu, | |||
2232 | 2291 | ||
2233 | static void vmx_free_vmcs(struct kvm_vcpu *vcpu) | 2292 | static void vmx_free_vmcs(struct kvm_vcpu *vcpu) |
2234 | { | 2293 | { |
2235 | if (vcpu->vmcs) { | 2294 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
2295 | |||
2296 | if (vmx->vmcs) { | ||
2236 | on_each_cpu(__vcpu_clear, vcpu, 0, 1); | 2297 | on_each_cpu(__vcpu_clear, vcpu, 0, 1); |
2237 | free_vmcs(vcpu->vmcs); | 2298 | free_vmcs(vmx->vmcs); |
2238 | vcpu->vmcs = NULL; | 2299 | vmx->vmcs = NULL; |
2239 | } | 2300 | } |
2240 | } | 2301 | } |
2241 | 2302 | ||
@@ -2246,33 +2307,39 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu) | |||
2246 | 2307 | ||
2247 | static int vmx_create_vcpu(struct kvm_vcpu *vcpu) | 2308 | static int vmx_create_vcpu(struct kvm_vcpu *vcpu) |
2248 | { | 2309 | { |
2249 | struct vmcs *vmcs; | 2310 | struct vcpu_vmx *vmx; |
2250 | 2311 | ||
2251 | vcpu->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); | 2312 | vmx = kzalloc(sizeof(*vmx), GFP_KERNEL); |
2252 | if (!vcpu->guest_msrs) | 2313 | if (!vmx) |
2253 | return -ENOMEM; | 2314 | return -ENOMEM; |
2254 | 2315 | ||
2255 | vcpu->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); | 2316 | vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); |
2256 | if (!vcpu->host_msrs) | 2317 | if (!vmx->guest_msrs) |
2257 | goto out_free_guest_msrs; | 2318 | goto out_free; |
2258 | 2319 | ||
2259 | vmcs = alloc_vmcs(); | 2320 | vmx->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); |
2260 | if (!vmcs) | 2321 | if (!vmx->host_msrs) |
2261 | goto out_free_msrs; | 2322 | goto out_free; |
2262 | 2323 | ||
2263 | vmcs_clear(vmcs); | 2324 | vmx->vmcs = alloc_vmcs(); |
2264 | vcpu->vmcs = vmcs; | 2325 | if (!vmx->vmcs) |
2265 | vcpu->launched = 0; | 2326 | goto out_free; |
2327 | |||
2328 | vmcs_clear(vmx->vmcs); | ||
2329 | |||
2330 | vmx->vcpu = vcpu; | ||
2331 | vcpu->_priv = vmx; | ||
2266 | 2332 | ||
2267 | return 0; | 2333 | return 0; |
2268 | 2334 | ||
2269 | out_free_msrs: | 2335 | out_free: |
2270 | kfree(vcpu->host_msrs); | 2336 | if (vmx->host_msrs) |
2271 | vcpu->host_msrs = NULL; | 2337 | kfree(vmx->host_msrs); |
2338 | |||
2339 | if (vmx->guest_msrs) | ||
2340 | kfree(vmx->guest_msrs); | ||
2272 | 2341 | ||
2273 | out_free_guest_msrs: | 2342 | kfree(vmx); |
2274 | kfree(vcpu->guest_msrs); | ||
2275 | vcpu->guest_msrs = NULL; | ||
2276 | 2343 | ||
2277 | return -ENOMEM; | 2344 | return -ENOMEM; |
2278 | } | 2345 | } |