diff options
Diffstat (limited to 'drivers/kvm/svm.c')
-rw-r--r-- | drivers/kvm/svm.c | 394 |
1 files changed, 229 insertions, 165 deletions
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c index 850a1b1d86c5..32481876d98b 100644 --- a/drivers/kvm/svm.c +++ b/drivers/kvm/svm.c | |||
@@ -49,6 +49,11 @@ MODULE_LICENSE("GPL"); | |||
49 | #define SVM_FEATURE_LBRV (1 << 1) | 49 | #define SVM_FEATURE_LBRV (1 << 1) |
50 | #define SVM_DEATURE_SVML (1 << 2) | 50 | #define SVM_DEATURE_SVML (1 << 2) |
51 | 51 | ||
52 | static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) | ||
53 | { | ||
54 | return (struct vcpu_svm*)vcpu->_priv; | ||
55 | } | ||
56 | |||
52 | unsigned long iopm_base; | 57 | unsigned long iopm_base; |
53 | unsigned long msrpm_base; | 58 | unsigned long msrpm_base; |
54 | 59 | ||
@@ -95,7 +100,7 @@ static inline u32 svm_has(u32 feat) | |||
95 | 100 | ||
96 | static unsigned get_addr_size(struct kvm_vcpu *vcpu) | 101 | static unsigned get_addr_size(struct kvm_vcpu *vcpu) |
97 | { | 102 | { |
98 | struct vmcb_save_area *sa = &vcpu->svm->vmcb->save; | 103 | struct vmcb_save_area *sa = &to_svm(vcpu)->vmcb->save; |
99 | u16 cs_attrib; | 104 | u16 cs_attrib; |
100 | 105 | ||
101 | if (!(sa->cr0 & X86_CR0_PE) || (sa->rflags & X86_EFLAGS_VM)) | 106 | if (!(sa->cr0 & X86_CR0_PE) || (sa->rflags & X86_EFLAGS_VM)) |
@@ -181,7 +186,7 @@ static inline void write_dr7(unsigned long val) | |||
181 | 186 | ||
182 | static inline void force_new_asid(struct kvm_vcpu *vcpu) | 187 | static inline void force_new_asid(struct kvm_vcpu *vcpu) |
183 | { | 188 | { |
184 | vcpu->svm->asid_generation--; | 189 | to_svm(vcpu)->asid_generation--; |
185 | } | 190 | } |
186 | 191 | ||
187 | static inline void flush_guest_tlb(struct kvm_vcpu *vcpu) | 192 | static inline void flush_guest_tlb(struct kvm_vcpu *vcpu) |
@@ -194,22 +199,24 @@ static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) | |||
194 | if (!(efer & KVM_EFER_LMA)) | 199 | if (!(efer & KVM_EFER_LMA)) |
195 | efer &= ~KVM_EFER_LME; | 200 | efer &= ~KVM_EFER_LME; |
196 | 201 | ||
197 | vcpu->svm->vmcb->save.efer = efer | MSR_EFER_SVME_MASK; | 202 | to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK; |
198 | vcpu->shadow_efer = efer; | 203 | vcpu->shadow_efer = efer; |
199 | } | 204 | } |
200 | 205 | ||
201 | static void svm_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code) | 206 | static void svm_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code) |
202 | { | 207 | { |
203 | vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | | 208 | struct vcpu_svm *svm = to_svm(vcpu); |
209 | |||
210 | svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | | ||
204 | SVM_EVTINJ_VALID_ERR | | 211 | SVM_EVTINJ_VALID_ERR | |
205 | SVM_EVTINJ_TYPE_EXEPT | | 212 | SVM_EVTINJ_TYPE_EXEPT | |
206 | GP_VECTOR; | 213 | GP_VECTOR; |
207 | vcpu->svm->vmcb->control.event_inj_err = error_code; | 214 | svm->vmcb->control.event_inj_err = error_code; |
208 | } | 215 | } |
209 | 216 | ||
210 | static void inject_ud(struct kvm_vcpu *vcpu) | 217 | static void inject_ud(struct kvm_vcpu *vcpu) |
211 | { | 218 | { |
212 | vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | | 219 | to_svm(vcpu)->vmcb->control.event_inj = SVM_EVTINJ_VALID | |
213 | SVM_EVTINJ_TYPE_EXEPT | | 220 | SVM_EVTINJ_TYPE_EXEPT | |
214 | UD_VECTOR; | 221 | UD_VECTOR; |
215 | } | 222 | } |
@@ -228,19 +235,21 @@ static int is_external_interrupt(u32 info) | |||
228 | 235 | ||
229 | static void skip_emulated_instruction(struct kvm_vcpu *vcpu) | 236 | static void skip_emulated_instruction(struct kvm_vcpu *vcpu) |
230 | { | 237 | { |
231 | if (!vcpu->svm->next_rip) { | 238 | struct vcpu_svm *svm = to_svm(vcpu); |
239 | |||
240 | if (!svm->next_rip) { | ||
232 | printk(KERN_DEBUG "%s: NOP\n", __FUNCTION__); | 241 | printk(KERN_DEBUG "%s: NOP\n", __FUNCTION__); |
233 | return; | 242 | return; |
234 | } | 243 | } |
235 | if (vcpu->svm->next_rip - vcpu->svm->vmcb->save.rip > 15) { | 244 | if (svm->next_rip - svm->vmcb->save.rip > 15) { |
236 | printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n", | 245 | printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n", |
237 | __FUNCTION__, | 246 | __FUNCTION__, |
238 | vcpu->svm->vmcb->save.rip, | 247 | svm->vmcb->save.rip, |
239 | vcpu->svm->next_rip); | 248 | svm->next_rip); |
240 | } | 249 | } |
241 | 250 | ||
242 | vcpu->rip = vcpu->svm->vmcb->save.rip = vcpu->svm->next_rip; | 251 | vcpu->rip = svm->vmcb->save.rip = svm->next_rip; |
243 | vcpu->svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; | 252 | svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; |
244 | 253 | ||
245 | vcpu->interrupt_window_open = 1; | 254 | vcpu->interrupt_window_open = 1; |
246 | } | 255 | } |
@@ -569,23 +578,27 @@ static void init_vmcb(struct vmcb *vmcb) | |||
569 | 578 | ||
570 | static int svm_create_vcpu(struct kvm_vcpu *vcpu) | 579 | static int svm_create_vcpu(struct kvm_vcpu *vcpu) |
571 | { | 580 | { |
581 | struct vcpu_svm *svm; | ||
572 | struct page *page; | 582 | struct page *page; |
573 | int r; | 583 | int r; |
574 | 584 | ||
575 | r = -ENOMEM; | 585 | r = -ENOMEM; |
576 | vcpu->svm = kzalloc(sizeof *vcpu->svm, GFP_KERNEL); | 586 | svm = kzalloc(sizeof *svm, GFP_KERNEL); |
577 | if (!vcpu->svm) | 587 | if (!svm) |
578 | goto out1; | 588 | goto out1; |
579 | page = alloc_page(GFP_KERNEL); | 589 | page = alloc_page(GFP_KERNEL); |
580 | if (!page) | 590 | if (!page) |
581 | goto out2; | 591 | goto out2; |
582 | 592 | ||
583 | vcpu->svm->vmcb = page_address(page); | 593 | svm->vmcb = page_address(page); |
584 | clear_page(vcpu->svm->vmcb); | 594 | clear_page(svm->vmcb); |
585 | vcpu->svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; | 595 | svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; |
586 | vcpu->svm->asid_generation = 0; | 596 | svm->asid_generation = 0; |
587 | memset(vcpu->svm->db_regs, 0, sizeof(vcpu->svm->db_regs)); | 597 | memset(svm->db_regs, 0, sizeof(svm->db_regs)); |
588 | init_vmcb(vcpu->svm->vmcb); | 598 | init_vmcb(svm->vmcb); |
599 | |||
600 | svm->vcpu = vcpu; | ||
601 | vcpu->_priv = svm; | ||
589 | 602 | ||
590 | fx_init(vcpu); | 603 | fx_init(vcpu); |
591 | vcpu->fpu_active = 1; | 604 | vcpu->fpu_active = 1; |
@@ -596,22 +609,26 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu) | |||
596 | return 0; | 609 | return 0; |
597 | 610 | ||
598 | out2: | 611 | out2: |
599 | kfree(vcpu->svm); | 612 | kfree(svm); |
600 | out1: | 613 | out1: |
601 | return r; | 614 | return r; |
602 | } | 615 | } |
603 | 616 | ||
604 | static void svm_free_vcpu(struct kvm_vcpu *vcpu) | 617 | static void svm_free_vcpu(struct kvm_vcpu *vcpu) |
605 | { | 618 | { |
606 | if (!vcpu->svm) | 619 | struct vcpu_svm *svm = to_svm(vcpu); |
620 | |||
621 | if (!svm) | ||
607 | return; | 622 | return; |
608 | if (vcpu->svm->vmcb) | 623 | if (svm->vmcb) |
609 | __free_page(pfn_to_page(vcpu->svm->vmcb_pa >> PAGE_SHIFT)); | 624 | __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT)); |
610 | kfree(vcpu->svm); | 625 | kfree(svm); |
626 | vcpu->_priv = NULL; | ||
611 | } | 627 | } |
612 | 628 | ||
613 | static void svm_vcpu_load(struct kvm_vcpu *vcpu) | 629 | static void svm_vcpu_load(struct kvm_vcpu *vcpu) |
614 | { | 630 | { |
631 | struct vcpu_svm *svm = to_svm(vcpu); | ||
615 | int cpu, i; | 632 | int cpu, i; |
616 | 633 | ||
617 | cpu = get_cpu(); | 634 | cpu = get_cpu(); |
@@ -624,20 +641,21 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu) | |||
624 | */ | 641 | */ |
625 | rdtscll(tsc_this); | 642 | rdtscll(tsc_this); |
626 | delta = vcpu->host_tsc - tsc_this; | 643 | delta = vcpu->host_tsc - tsc_this; |
627 | vcpu->svm->vmcb->control.tsc_offset += delta; | 644 | svm->vmcb->control.tsc_offset += delta; |
628 | vcpu->cpu = cpu; | 645 | vcpu->cpu = cpu; |
629 | } | 646 | } |
630 | 647 | ||
631 | for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) | 648 | for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) |
632 | rdmsrl(host_save_user_msrs[i], vcpu->svm->host_user_msrs[i]); | 649 | rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); |
633 | } | 650 | } |
634 | 651 | ||
635 | static void svm_vcpu_put(struct kvm_vcpu *vcpu) | 652 | static void svm_vcpu_put(struct kvm_vcpu *vcpu) |
636 | { | 653 | { |
654 | struct vcpu_svm *svm = to_svm(vcpu); | ||
637 | int i; | 655 | int i; |
638 | 656 | ||
639 | for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) | 657 | for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) |
640 | wrmsrl(host_save_user_msrs[i], vcpu->svm->host_user_msrs[i]); | 658 | wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); |
641 | 659 | ||
642 | rdtscll(vcpu->host_tsc); | 660 | rdtscll(vcpu->host_tsc); |
643 | put_cpu(); | 661 | put_cpu(); |
@@ -649,31 +667,34 @@ static void svm_vcpu_decache(struct kvm_vcpu *vcpu) | |||
649 | 667 | ||
650 | static void svm_cache_regs(struct kvm_vcpu *vcpu) | 668 | static void svm_cache_regs(struct kvm_vcpu *vcpu) |
651 | { | 669 | { |
652 | vcpu->regs[VCPU_REGS_RAX] = vcpu->svm->vmcb->save.rax; | 670 | struct vcpu_svm *svm = to_svm(vcpu); |
653 | vcpu->regs[VCPU_REGS_RSP] = vcpu->svm->vmcb->save.rsp; | 671 | |
654 | vcpu->rip = vcpu->svm->vmcb->save.rip; | 672 | vcpu->regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; |
673 | vcpu->regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; | ||
674 | vcpu->rip = svm->vmcb->save.rip; | ||
655 | } | 675 | } |
656 | 676 | ||
657 | static void svm_decache_regs(struct kvm_vcpu *vcpu) | 677 | static void svm_decache_regs(struct kvm_vcpu *vcpu) |
658 | { | 678 | { |
659 | vcpu->svm->vmcb->save.rax = vcpu->regs[VCPU_REGS_RAX]; | 679 | struct vcpu_svm *svm = to_svm(vcpu); |
660 | vcpu->svm->vmcb->save.rsp = vcpu->regs[VCPU_REGS_RSP]; | 680 | svm->vmcb->save.rax = vcpu->regs[VCPU_REGS_RAX]; |
661 | vcpu->svm->vmcb->save.rip = vcpu->rip; | 681 | svm->vmcb->save.rsp = vcpu->regs[VCPU_REGS_RSP]; |
682 | svm->vmcb->save.rip = vcpu->rip; | ||
662 | } | 683 | } |
663 | 684 | ||
664 | static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) | 685 | static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) |
665 | { | 686 | { |
666 | return vcpu->svm->vmcb->save.rflags; | 687 | return to_svm(vcpu)->vmcb->save.rflags; |
667 | } | 688 | } |
668 | 689 | ||
669 | static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) | 690 | static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) |
670 | { | 691 | { |
671 | vcpu->svm->vmcb->save.rflags = rflags; | 692 | to_svm(vcpu)->vmcb->save.rflags = rflags; |
672 | } | 693 | } |
673 | 694 | ||
674 | static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) | 695 | static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) |
675 | { | 696 | { |
676 | struct vmcb_save_area *save = &vcpu->svm->vmcb->save; | 697 | struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; |
677 | 698 | ||
678 | switch (seg) { | 699 | switch (seg) { |
679 | case VCPU_SREG_CS: return &save->cs; | 700 | case VCPU_SREG_CS: return &save->cs; |
@@ -725,26 +746,34 @@ static void svm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) | |||
725 | 746 | ||
726 | static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | 747 | static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) |
727 | { | 748 | { |
728 | dt->limit = vcpu->svm->vmcb->save.idtr.limit; | 749 | struct vcpu_svm *svm = to_svm(vcpu); |
729 | dt->base = vcpu->svm->vmcb->save.idtr.base; | 750 | |
751 | dt->limit = svm->vmcb->save.idtr.limit; | ||
752 | dt->base = svm->vmcb->save.idtr.base; | ||
730 | } | 753 | } |
731 | 754 | ||
732 | static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | 755 | static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) |
733 | { | 756 | { |
734 | vcpu->svm->vmcb->save.idtr.limit = dt->limit; | 757 | struct vcpu_svm *svm = to_svm(vcpu); |
735 | vcpu->svm->vmcb->save.idtr.base = dt->base ; | 758 | |
759 | svm->vmcb->save.idtr.limit = dt->limit; | ||
760 | svm->vmcb->save.idtr.base = dt->base ; | ||
736 | } | 761 | } |
737 | 762 | ||
738 | static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | 763 | static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) |
739 | { | 764 | { |
740 | dt->limit = vcpu->svm->vmcb->save.gdtr.limit; | 765 | struct vcpu_svm *svm = to_svm(vcpu); |
741 | dt->base = vcpu->svm->vmcb->save.gdtr.base; | 766 | |
767 | dt->limit = svm->vmcb->save.gdtr.limit; | ||
768 | dt->base = svm->vmcb->save.gdtr.base; | ||
742 | } | 769 | } |
743 | 770 | ||
744 | static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | 771 | static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) |
745 | { | 772 | { |
746 | vcpu->svm->vmcb->save.gdtr.limit = dt->limit; | 773 | struct vcpu_svm *svm = to_svm(vcpu); |
747 | vcpu->svm->vmcb->save.gdtr.base = dt->base ; | 774 | |
775 | svm->vmcb->save.gdtr.limit = dt->limit; | ||
776 | svm->vmcb->save.gdtr.base = dt->base ; | ||
748 | } | 777 | } |
749 | 778 | ||
750 | static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) | 779 | static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) |
@@ -753,39 +782,42 @@ static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) | |||
753 | 782 | ||
754 | static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | 783 | static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) |
755 | { | 784 | { |
785 | struct vcpu_svm *svm = to_svm(vcpu); | ||
786 | |||
756 | #ifdef CONFIG_X86_64 | 787 | #ifdef CONFIG_X86_64 |
757 | if (vcpu->shadow_efer & KVM_EFER_LME) { | 788 | if (vcpu->shadow_efer & KVM_EFER_LME) { |
758 | if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { | 789 | if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { |
759 | vcpu->shadow_efer |= KVM_EFER_LMA; | 790 | vcpu->shadow_efer |= KVM_EFER_LMA; |
760 | vcpu->svm->vmcb->save.efer |= KVM_EFER_LMA | KVM_EFER_LME; | 791 | svm->vmcb->save.efer |= KVM_EFER_LMA | KVM_EFER_LME; |
761 | } | 792 | } |
762 | 793 | ||
763 | if (is_paging(vcpu) && !(cr0 & X86_CR0_PG) ) { | 794 | if (is_paging(vcpu) && !(cr0 & X86_CR0_PG) ) { |
764 | vcpu->shadow_efer &= ~KVM_EFER_LMA; | 795 | vcpu->shadow_efer &= ~KVM_EFER_LMA; |
765 | vcpu->svm->vmcb->save.efer &= ~(KVM_EFER_LMA | KVM_EFER_LME); | 796 | svm->vmcb->save.efer &= ~(KVM_EFER_LMA | KVM_EFER_LME); |
766 | } | 797 | } |
767 | } | 798 | } |
768 | #endif | 799 | #endif |
769 | if ((vcpu->cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) { | 800 | if ((vcpu->cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) { |
770 | vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); | 801 | svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); |
771 | vcpu->fpu_active = 1; | 802 | vcpu->fpu_active = 1; |
772 | } | 803 | } |
773 | 804 | ||
774 | vcpu->cr0 = cr0; | 805 | vcpu->cr0 = cr0; |
775 | cr0 |= X86_CR0_PG | X86_CR0_WP; | 806 | cr0 |= X86_CR0_PG | X86_CR0_WP; |
776 | cr0 &= ~(X86_CR0_CD | X86_CR0_NW); | 807 | cr0 &= ~(X86_CR0_CD | X86_CR0_NW); |
777 | vcpu->svm->vmcb->save.cr0 = cr0; | 808 | svm->vmcb->save.cr0 = cr0; |
778 | } | 809 | } |
779 | 810 | ||
780 | static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | 811 | static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
781 | { | 812 | { |
782 | vcpu->cr4 = cr4; | 813 | vcpu->cr4 = cr4; |
783 | vcpu->svm->vmcb->save.cr4 = cr4 | X86_CR4_PAE; | 814 | to_svm(vcpu)->vmcb->save.cr4 = cr4 | X86_CR4_PAE; |
784 | } | 815 | } |
785 | 816 | ||
786 | static void svm_set_segment(struct kvm_vcpu *vcpu, | 817 | static void svm_set_segment(struct kvm_vcpu *vcpu, |
787 | struct kvm_segment *var, int seg) | 818 | struct kvm_segment *var, int seg) |
788 | { | 819 | { |
820 | struct vcpu_svm *svm = to_svm(vcpu); | ||
789 | struct vmcb_seg *s = svm_seg(vcpu, seg); | 821 | struct vmcb_seg *s = svm_seg(vcpu, seg); |
790 | 822 | ||
791 | s->base = var->base; | 823 | s->base = var->base; |
@@ -804,16 +836,16 @@ static void svm_set_segment(struct kvm_vcpu *vcpu, | |||
804 | s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; | 836 | s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; |
805 | } | 837 | } |
806 | if (seg == VCPU_SREG_CS) | 838 | if (seg == VCPU_SREG_CS) |
807 | vcpu->svm->vmcb->save.cpl | 839 | svm->vmcb->save.cpl |
808 | = (vcpu->svm->vmcb->save.cs.attrib | 840 | = (svm->vmcb->save.cs.attrib |
809 | >> SVM_SELECTOR_DPL_SHIFT) & 3; | 841 | >> SVM_SELECTOR_DPL_SHIFT) & 3; |
810 | 842 | ||
811 | } | 843 | } |
812 | 844 | ||
813 | /* FIXME: | 845 | /* FIXME: |
814 | 846 | ||
815 | vcpu->svm->vmcb->control.int_ctl &= ~V_TPR_MASK; | 847 | svm(vcpu)->vmcb->control.int_ctl &= ~V_TPR_MASK; |
816 | vcpu->svm->vmcb->control.int_ctl |= (sregs->cr8 & V_TPR_MASK); | 848 | svm(vcpu)->vmcb->control.int_ctl |= (sregs->cr8 & V_TPR_MASK); |
817 | 849 | ||
818 | */ | 850 | */ |
819 | 851 | ||
@@ -825,55 +857,59 @@ static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) | |||
825 | static void load_host_msrs(struct kvm_vcpu *vcpu) | 857 | static void load_host_msrs(struct kvm_vcpu *vcpu) |
826 | { | 858 | { |
827 | #ifdef CONFIG_X86_64 | 859 | #ifdef CONFIG_X86_64 |
828 | wrmsrl(MSR_GS_BASE, vcpu->svm->host_gs_base); | 860 | wrmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base); |
829 | #endif | 861 | #endif |
830 | } | 862 | } |
831 | 863 | ||
832 | static void save_host_msrs(struct kvm_vcpu *vcpu) | 864 | static void save_host_msrs(struct kvm_vcpu *vcpu) |
833 | { | 865 | { |
834 | #ifdef CONFIG_X86_64 | 866 | #ifdef CONFIG_X86_64 |
835 | rdmsrl(MSR_GS_BASE, vcpu->svm->host_gs_base); | 867 | rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base); |
836 | #endif | 868 | #endif |
837 | } | 869 | } |
838 | 870 | ||
839 | static void new_asid(struct kvm_vcpu *vcpu, struct svm_cpu_data *svm_data) | 871 | static void new_asid(struct kvm_vcpu *vcpu, struct svm_cpu_data *svm_data) |
840 | { | 872 | { |
873 | struct vcpu_svm *svm = to_svm(vcpu); | ||
874 | |||
841 | if (svm_data->next_asid > svm_data->max_asid) { | 875 | if (svm_data->next_asid > svm_data->max_asid) { |
842 | ++svm_data->asid_generation; | 876 | ++svm_data->asid_generation; |
843 | svm_data->next_asid = 1; | 877 | svm_data->next_asid = 1; |
844 | vcpu->svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; | 878 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; |
845 | } | 879 | } |
846 | 880 | ||
847 | vcpu->cpu = svm_data->cpu; | 881 | vcpu->cpu = svm_data->cpu; |
848 | vcpu->svm->asid_generation = svm_data->asid_generation; | 882 | svm->asid_generation = svm_data->asid_generation; |
849 | vcpu->svm->vmcb->control.asid = svm_data->next_asid++; | 883 | svm->vmcb->control.asid = svm_data->next_asid++; |
850 | } | 884 | } |
851 | 885 | ||
852 | static void svm_invlpg(struct kvm_vcpu *vcpu, gva_t address) | 886 | static void svm_invlpg(struct kvm_vcpu *vcpu, gva_t address) |
853 | { | 887 | { |
854 | invlpga(address, vcpu->svm->vmcb->control.asid); // is needed? | 888 | invlpga(address, to_svm(vcpu)->vmcb->control.asid); // is needed? |
855 | } | 889 | } |
856 | 890 | ||
857 | static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) | 891 | static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) |
858 | { | 892 | { |
859 | return vcpu->svm->db_regs[dr]; | 893 | return to_svm(vcpu)->db_regs[dr]; |
860 | } | 894 | } |
861 | 895 | ||
862 | static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, | 896 | static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, |
863 | int *exception) | 897 | int *exception) |
864 | { | 898 | { |
899 | struct vcpu_svm *svm = to_svm(vcpu); | ||
900 | |||
865 | *exception = 0; | 901 | *exception = 0; |
866 | 902 | ||
867 | if (vcpu->svm->vmcb->save.dr7 & DR7_GD_MASK) { | 903 | if (svm->vmcb->save.dr7 & DR7_GD_MASK) { |
868 | vcpu->svm->vmcb->save.dr7 &= ~DR7_GD_MASK; | 904 | svm->vmcb->save.dr7 &= ~DR7_GD_MASK; |
869 | vcpu->svm->vmcb->save.dr6 |= DR6_BD_MASK; | 905 | svm->vmcb->save.dr6 |= DR6_BD_MASK; |
870 | *exception = DB_VECTOR; | 906 | *exception = DB_VECTOR; |
871 | return; | 907 | return; |
872 | } | 908 | } |
873 | 909 | ||
874 | switch (dr) { | 910 | switch (dr) { |
875 | case 0 ... 3: | 911 | case 0 ... 3: |
876 | vcpu->svm->db_regs[dr] = value; | 912 | svm->db_regs[dr] = value; |
877 | return; | 913 | return; |
878 | case 4 ... 5: | 914 | case 4 ... 5: |
879 | if (vcpu->cr4 & X86_CR4_DE) { | 915 | if (vcpu->cr4 & X86_CR4_DE) { |
@@ -885,7 +921,7 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, | |||
885 | *exception = GP_VECTOR; | 921 | *exception = GP_VECTOR; |
886 | return; | 922 | return; |
887 | } | 923 | } |
888 | vcpu->svm->vmcb->save.dr7 = value; | 924 | svm->vmcb->save.dr7 = value; |
889 | return; | 925 | return; |
890 | } | 926 | } |
891 | default: | 927 | default: |
@@ -898,7 +934,8 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, | |||
898 | 934 | ||
899 | static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 935 | static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
900 | { | 936 | { |
901 | u32 exit_int_info = vcpu->svm->vmcb->control.exit_int_info; | 937 | struct vcpu_svm *svm = to_svm(vcpu); |
938 | u32 exit_int_info = svm->vmcb->control.exit_int_info; | ||
902 | u64 fault_address; | 939 | u64 fault_address; |
903 | u32 error_code; | 940 | u32 error_code; |
904 | enum emulation_result er; | 941 | enum emulation_result er; |
@@ -909,8 +946,8 @@ static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
909 | 946 | ||
910 | spin_lock(&vcpu->kvm->lock); | 947 | spin_lock(&vcpu->kvm->lock); |
911 | 948 | ||
912 | fault_address = vcpu->svm->vmcb->control.exit_info_2; | 949 | fault_address = svm->vmcb->control.exit_info_2; |
913 | error_code = vcpu->svm->vmcb->control.exit_info_1; | 950 | error_code = svm->vmcb->control.exit_info_1; |
914 | r = kvm_mmu_page_fault(vcpu, fault_address, error_code); | 951 | r = kvm_mmu_page_fault(vcpu, fault_address, error_code); |
915 | if (r < 0) { | 952 | if (r < 0) { |
916 | spin_unlock(&vcpu->kvm->lock); | 953 | spin_unlock(&vcpu->kvm->lock); |
@@ -942,22 +979,25 @@ static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
942 | 979 | ||
943 | static int nm_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 980 | static int nm_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
944 | { | 981 | { |
945 | vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); | 982 | struct vcpu_svm *svm = to_svm(vcpu); |
946 | if (!(vcpu->cr0 & X86_CR0_TS)) | ||
947 | vcpu->svm->vmcb->save.cr0 &= ~X86_CR0_TS; | ||
948 | vcpu->fpu_active = 1; | ||
949 | 983 | ||
950 | return 1; | 984 | svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); |
985 | if (!(vcpu->cr0 & X86_CR0_TS)) | ||
986 | svm->vmcb->save.cr0 &= ~X86_CR0_TS; | ||
987 | vcpu->fpu_active = 1; | ||
988 | |||
989 | return 1; | ||
951 | } | 990 | } |
952 | 991 | ||
953 | static int shutdown_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 992 | static int shutdown_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
954 | { | 993 | { |
994 | struct vcpu_svm *svm = to_svm(vcpu); | ||
955 | /* | 995 | /* |
956 | * VMCB is undefined after a SHUTDOWN intercept | 996 | * VMCB is undefined after a SHUTDOWN intercept |
957 | * so reinitialize it. | 997 | * so reinitialize it. |
958 | */ | 998 | */ |
959 | clear_page(vcpu->svm->vmcb); | 999 | clear_page(svm->vmcb); |
960 | init_vmcb(vcpu->svm->vmcb); | 1000 | init_vmcb(svm->vmcb); |
961 | 1001 | ||
962 | kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; | 1002 | kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; |
963 | return 0; | 1003 | return 0; |
@@ -967,23 +1007,24 @@ static int io_get_override(struct kvm_vcpu *vcpu, | |||
967 | struct vmcb_seg **seg, | 1007 | struct vmcb_seg **seg, |
968 | int *addr_override) | 1008 | int *addr_override) |
969 | { | 1009 | { |
1010 | struct vcpu_svm *svm = to_svm(vcpu); | ||
970 | u8 inst[MAX_INST_SIZE]; | 1011 | u8 inst[MAX_INST_SIZE]; |
971 | unsigned ins_length; | 1012 | unsigned ins_length; |
972 | gva_t rip; | 1013 | gva_t rip; |
973 | int i; | 1014 | int i; |
974 | 1015 | ||
975 | rip = vcpu->svm->vmcb->save.rip; | 1016 | rip = svm->vmcb->save.rip; |
976 | ins_length = vcpu->svm->next_rip - rip; | 1017 | ins_length = svm->next_rip - rip; |
977 | rip += vcpu->svm->vmcb->save.cs.base; | 1018 | rip += svm->vmcb->save.cs.base; |
978 | 1019 | ||
979 | if (ins_length > MAX_INST_SIZE) | 1020 | if (ins_length > MAX_INST_SIZE) |
980 | printk(KERN_DEBUG | 1021 | printk(KERN_DEBUG |
981 | "%s: inst length err, cs base 0x%llx rip 0x%llx " | 1022 | "%s: inst length err, cs base 0x%llx rip 0x%llx " |
982 | "next rip 0x%llx ins_length %u\n", | 1023 | "next rip 0x%llx ins_length %u\n", |
983 | __FUNCTION__, | 1024 | __FUNCTION__, |
984 | vcpu->svm->vmcb->save.cs.base, | 1025 | svm->vmcb->save.cs.base, |
985 | vcpu->svm->vmcb->save.rip, | 1026 | svm->vmcb->save.rip, |
986 | vcpu->svm->vmcb->control.exit_info_2, | 1027 | svm->vmcb->control.exit_info_2, |
987 | ins_length); | 1028 | ins_length); |
988 | 1029 | ||
989 | if (kvm_read_guest(vcpu, rip, ins_length, inst) != ins_length) | 1030 | if (kvm_read_guest(vcpu, rip, ins_length, inst) != ins_length) |
@@ -1003,22 +1044,22 @@ static int io_get_override(struct kvm_vcpu *vcpu, | |||
1003 | *addr_override = 1; | 1044 | *addr_override = 1; |
1004 | continue; | 1045 | continue; |
1005 | case 0x2e: | 1046 | case 0x2e: |
1006 | *seg = &vcpu->svm->vmcb->save.cs; | 1047 | *seg = &svm->vmcb->save.cs; |
1007 | continue; | 1048 | continue; |
1008 | case 0x36: | 1049 | case 0x36: |
1009 | *seg = &vcpu->svm->vmcb->save.ss; | 1050 | *seg = &svm->vmcb->save.ss; |
1010 | continue; | 1051 | continue; |
1011 | case 0x3e: | 1052 | case 0x3e: |
1012 | *seg = &vcpu->svm->vmcb->save.ds; | 1053 | *seg = &svm->vmcb->save.ds; |
1013 | continue; | 1054 | continue; |
1014 | case 0x26: | 1055 | case 0x26: |
1015 | *seg = &vcpu->svm->vmcb->save.es; | 1056 | *seg = &svm->vmcb->save.es; |
1016 | continue; | 1057 | continue; |
1017 | case 0x64: | 1058 | case 0x64: |
1018 | *seg = &vcpu->svm->vmcb->save.fs; | 1059 | *seg = &svm->vmcb->save.fs; |
1019 | continue; | 1060 | continue; |
1020 | case 0x65: | 1061 | case 0x65: |
1021 | *seg = &vcpu->svm->vmcb->save.gs; | 1062 | *seg = &svm->vmcb->save.gs; |
1022 | continue; | 1063 | continue; |
1023 | default: | 1064 | default: |
1024 | return 1; | 1065 | return 1; |
@@ -1033,7 +1074,8 @@ static unsigned long io_adress(struct kvm_vcpu *vcpu, int ins, gva_t *address) | |||
1033 | unsigned long *reg; | 1074 | unsigned long *reg; |
1034 | struct vmcb_seg *seg; | 1075 | struct vmcb_seg *seg; |
1035 | int addr_override; | 1076 | int addr_override; |
1036 | struct vmcb_save_area *save_area = &vcpu->svm->vmcb->save; | 1077 | struct vcpu_svm *svm = to_svm(vcpu); |
1078 | struct vmcb_save_area *save_area = &svm->vmcb->save; | ||
1037 | u16 cs_attrib = save_area->cs.attrib; | 1079 | u16 cs_attrib = save_area->cs.attrib; |
1038 | unsigned addr_size = get_addr_size(vcpu); | 1080 | unsigned addr_size = get_addr_size(vcpu); |
1039 | 1081 | ||
@@ -1045,16 +1087,16 @@ static unsigned long io_adress(struct kvm_vcpu *vcpu, int ins, gva_t *address) | |||
1045 | 1087 | ||
1046 | if (ins) { | 1088 | if (ins) { |
1047 | reg = &vcpu->regs[VCPU_REGS_RDI]; | 1089 | reg = &vcpu->regs[VCPU_REGS_RDI]; |
1048 | seg = &vcpu->svm->vmcb->save.es; | 1090 | seg = &svm->vmcb->save.es; |
1049 | } else { | 1091 | } else { |
1050 | reg = &vcpu->regs[VCPU_REGS_RSI]; | 1092 | reg = &vcpu->regs[VCPU_REGS_RSI]; |
1051 | seg = (seg) ? seg : &vcpu->svm->vmcb->save.ds; | 1093 | seg = (seg) ? seg : &svm->vmcb->save.ds; |
1052 | } | 1094 | } |
1053 | 1095 | ||
1054 | addr_mask = ~0ULL >> (64 - (addr_size * 8)); | 1096 | addr_mask = ~0ULL >> (64 - (addr_size * 8)); |
1055 | 1097 | ||
1056 | if ((cs_attrib & SVM_SELECTOR_L_MASK) && | 1098 | if ((cs_attrib & SVM_SELECTOR_L_MASK) && |
1057 | !(vcpu->svm->vmcb->save.rflags & X86_EFLAGS_VM)) { | 1099 | !(svm->vmcb->save.rflags & X86_EFLAGS_VM)) { |
1058 | *address = (*reg & addr_mask); | 1100 | *address = (*reg & addr_mask); |
1059 | return addr_mask; | 1101 | return addr_mask; |
1060 | } | 1102 | } |
@@ -1070,7 +1112,8 @@ static unsigned long io_adress(struct kvm_vcpu *vcpu, int ins, gva_t *address) | |||
1070 | 1112 | ||
1071 | static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1113 | static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1072 | { | 1114 | { |
1073 | u32 io_info = vcpu->svm->vmcb->control.exit_info_1; //address size bug? | 1115 | struct vcpu_svm *svm = to_svm(vcpu); |
1116 | u32 io_info = svm->vmcb->control.exit_info_1; //address size bug? | ||
1074 | int size, down, in, string, rep; | 1117 | int size, down, in, string, rep; |
1075 | unsigned port; | 1118 | unsigned port; |
1076 | unsigned long count; | 1119 | unsigned long count; |
@@ -1078,7 +1121,7 @@ static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1078 | 1121 | ||
1079 | ++vcpu->stat.io_exits; | 1122 | ++vcpu->stat.io_exits; |
1080 | 1123 | ||
1081 | vcpu->svm->next_rip = vcpu->svm->vmcb->control.exit_info_2; | 1124 | svm->next_rip = svm->vmcb->control.exit_info_2; |
1082 | 1125 | ||
1083 | in = (io_info & SVM_IOIO_TYPE_MASK) != 0; | 1126 | in = (io_info & SVM_IOIO_TYPE_MASK) != 0; |
1084 | port = io_info >> 16; | 1127 | port = io_info >> 16; |
@@ -1086,7 +1129,7 @@ static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1086 | string = (io_info & SVM_IOIO_STR_MASK) != 0; | 1129 | string = (io_info & SVM_IOIO_STR_MASK) != 0; |
1087 | rep = (io_info & SVM_IOIO_REP_MASK) != 0; | 1130 | rep = (io_info & SVM_IOIO_REP_MASK) != 0; |
1088 | count = 1; | 1131 | count = 1; |
1089 | down = (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0; | 1132 | down = (svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0; |
1090 | 1133 | ||
1091 | if (string) { | 1134 | if (string) { |
1092 | unsigned addr_mask; | 1135 | unsigned addr_mask; |
@@ -1112,14 +1155,18 @@ static int nop_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1112 | 1155 | ||
1113 | static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1156 | static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1114 | { | 1157 | { |
1115 | vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 1; | 1158 | struct vcpu_svm *svm = to_svm(vcpu); |
1159 | |||
1160 | svm->next_rip = svm->vmcb->save.rip + 1; | ||
1116 | skip_emulated_instruction(vcpu); | 1161 | skip_emulated_instruction(vcpu); |
1117 | return kvm_emulate_halt(vcpu); | 1162 | return kvm_emulate_halt(vcpu); |
1118 | } | 1163 | } |
1119 | 1164 | ||
1120 | static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1165 | static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1121 | { | 1166 | { |
1122 | vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 3; | 1167 | struct vcpu_svm *svm = to_svm(vcpu); |
1168 | |||
1169 | svm->next_rip = svm->vmcb->save.rip + 3; | ||
1123 | skip_emulated_instruction(vcpu); | 1170 | skip_emulated_instruction(vcpu); |
1124 | return kvm_hypercall(vcpu, kvm_run); | 1171 | return kvm_hypercall(vcpu, kvm_run); |
1125 | } | 1172 | } |
@@ -1139,7 +1186,9 @@ static int task_switch_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_r | |||
1139 | 1186 | ||
1140 | static int cpuid_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1187 | static int cpuid_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1141 | { | 1188 | { |
1142 | vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2; | 1189 | struct vcpu_svm *svm = to_svm(vcpu); |
1190 | |||
1191 | svm->next_rip = svm->vmcb->save.rip + 2; | ||
1143 | kvm_emulate_cpuid(vcpu); | 1192 | kvm_emulate_cpuid(vcpu); |
1144 | return 1; | 1193 | return 1; |
1145 | } | 1194 | } |
@@ -1153,39 +1202,41 @@ static int emulate_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_ru | |||
1153 | 1202 | ||
1154 | static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) | 1203 | static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) |
1155 | { | 1204 | { |
1205 | struct vcpu_svm *svm = to_svm(vcpu); | ||
1206 | |||
1156 | switch (ecx) { | 1207 | switch (ecx) { |
1157 | case MSR_IA32_TIME_STAMP_COUNTER: { | 1208 | case MSR_IA32_TIME_STAMP_COUNTER: { |
1158 | u64 tsc; | 1209 | u64 tsc; |
1159 | 1210 | ||
1160 | rdtscll(tsc); | 1211 | rdtscll(tsc); |
1161 | *data = vcpu->svm->vmcb->control.tsc_offset + tsc; | 1212 | *data = svm->vmcb->control.tsc_offset + tsc; |
1162 | break; | 1213 | break; |
1163 | } | 1214 | } |
1164 | case MSR_K6_STAR: | 1215 | case MSR_K6_STAR: |
1165 | *data = vcpu->svm->vmcb->save.star; | 1216 | *data = svm->vmcb->save.star; |
1166 | break; | 1217 | break; |
1167 | #ifdef CONFIG_X86_64 | 1218 | #ifdef CONFIG_X86_64 |
1168 | case MSR_LSTAR: | 1219 | case MSR_LSTAR: |
1169 | *data = vcpu->svm->vmcb->save.lstar; | 1220 | *data = svm->vmcb->save.lstar; |
1170 | break; | 1221 | break; |
1171 | case MSR_CSTAR: | 1222 | case MSR_CSTAR: |
1172 | *data = vcpu->svm->vmcb->save.cstar; | 1223 | *data = svm->vmcb->save.cstar; |
1173 | break; | 1224 | break; |
1174 | case MSR_KERNEL_GS_BASE: | 1225 | case MSR_KERNEL_GS_BASE: |
1175 | *data = vcpu->svm->vmcb->save.kernel_gs_base; | 1226 | *data = svm->vmcb->save.kernel_gs_base; |
1176 | break; | 1227 | break; |
1177 | case MSR_SYSCALL_MASK: | 1228 | case MSR_SYSCALL_MASK: |
1178 | *data = vcpu->svm->vmcb->save.sfmask; | 1229 | *data = svm->vmcb->save.sfmask; |
1179 | break; | 1230 | break; |
1180 | #endif | 1231 | #endif |
1181 | case MSR_IA32_SYSENTER_CS: | 1232 | case MSR_IA32_SYSENTER_CS: |
1182 | *data = vcpu->svm->vmcb->save.sysenter_cs; | 1233 | *data = svm->vmcb->save.sysenter_cs; |
1183 | break; | 1234 | break; |
1184 | case MSR_IA32_SYSENTER_EIP: | 1235 | case MSR_IA32_SYSENTER_EIP: |
1185 | *data = vcpu->svm->vmcb->save.sysenter_eip; | 1236 | *data = svm->vmcb->save.sysenter_eip; |
1186 | break; | 1237 | break; |
1187 | case MSR_IA32_SYSENTER_ESP: | 1238 | case MSR_IA32_SYSENTER_ESP: |
1188 | *data = vcpu->svm->vmcb->save.sysenter_esp; | 1239 | *data = svm->vmcb->save.sysenter_esp; |
1189 | break; | 1240 | break; |
1190 | default: | 1241 | default: |
1191 | return kvm_get_msr_common(vcpu, ecx, data); | 1242 | return kvm_get_msr_common(vcpu, ecx, data); |
@@ -1195,15 +1246,16 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) | |||
1195 | 1246 | ||
1196 | static int rdmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1247 | static int rdmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1197 | { | 1248 | { |
1249 | struct vcpu_svm *svm = to_svm(vcpu); | ||
1198 | u32 ecx = vcpu->regs[VCPU_REGS_RCX]; | 1250 | u32 ecx = vcpu->regs[VCPU_REGS_RCX]; |
1199 | u64 data; | 1251 | u64 data; |
1200 | 1252 | ||
1201 | if (svm_get_msr(vcpu, ecx, &data)) | 1253 | if (svm_get_msr(vcpu, ecx, &data)) |
1202 | svm_inject_gp(vcpu, 0); | 1254 | svm_inject_gp(vcpu, 0); |
1203 | else { | 1255 | else { |
1204 | vcpu->svm->vmcb->save.rax = data & 0xffffffff; | 1256 | svm->vmcb->save.rax = data & 0xffffffff; |
1205 | vcpu->regs[VCPU_REGS_RDX] = data >> 32; | 1257 | vcpu->regs[VCPU_REGS_RDX] = data >> 32; |
1206 | vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2; | 1258 | svm->next_rip = svm->vmcb->save.rip + 2; |
1207 | skip_emulated_instruction(vcpu); | 1259 | skip_emulated_instruction(vcpu); |
1208 | } | 1260 | } |
1209 | return 1; | 1261 | return 1; |
@@ -1211,39 +1263,41 @@ static int rdmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1211 | 1263 | ||
1212 | static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) | 1264 | static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) |
1213 | { | 1265 | { |
1266 | struct vcpu_svm *svm = to_svm(vcpu); | ||
1267 | |||
1214 | switch (ecx) { | 1268 | switch (ecx) { |
1215 | case MSR_IA32_TIME_STAMP_COUNTER: { | 1269 | case MSR_IA32_TIME_STAMP_COUNTER: { |
1216 | u64 tsc; | 1270 | u64 tsc; |
1217 | 1271 | ||
1218 | rdtscll(tsc); | 1272 | rdtscll(tsc); |
1219 | vcpu->svm->vmcb->control.tsc_offset = data - tsc; | 1273 | svm->vmcb->control.tsc_offset = data - tsc; |
1220 | break; | 1274 | break; |
1221 | } | 1275 | } |
1222 | case MSR_K6_STAR: | 1276 | case MSR_K6_STAR: |
1223 | vcpu->svm->vmcb->save.star = data; | 1277 | svm->vmcb->save.star = data; |
1224 | break; | 1278 | break; |
1225 | #ifdef CONFIG_X86_64 | 1279 | #ifdef CONFIG_X86_64 |
1226 | case MSR_LSTAR: | 1280 | case MSR_LSTAR: |
1227 | vcpu->svm->vmcb->save.lstar = data; | 1281 | svm->vmcb->save.lstar = data; |
1228 | break; | 1282 | break; |
1229 | case MSR_CSTAR: | 1283 | case MSR_CSTAR: |
1230 | vcpu->svm->vmcb->save.cstar = data; | 1284 | svm->vmcb->save.cstar = data; |
1231 | break; | 1285 | break; |
1232 | case MSR_KERNEL_GS_BASE: | 1286 | case MSR_KERNEL_GS_BASE: |
1233 | vcpu->svm->vmcb->save.kernel_gs_base = data; | 1287 | svm->vmcb->save.kernel_gs_base = data; |
1234 | break; | 1288 | break; |
1235 | case MSR_SYSCALL_MASK: | 1289 | case MSR_SYSCALL_MASK: |
1236 | vcpu->svm->vmcb->save.sfmask = data; | 1290 | svm->vmcb->save.sfmask = data; |
1237 | break; | 1291 | break; |
1238 | #endif | 1292 | #endif |
1239 | case MSR_IA32_SYSENTER_CS: | 1293 | case MSR_IA32_SYSENTER_CS: |
1240 | vcpu->svm->vmcb->save.sysenter_cs = data; | 1294 | svm->vmcb->save.sysenter_cs = data; |
1241 | break; | 1295 | break; |
1242 | case MSR_IA32_SYSENTER_EIP: | 1296 | case MSR_IA32_SYSENTER_EIP: |
1243 | vcpu->svm->vmcb->save.sysenter_eip = data; | 1297 | svm->vmcb->save.sysenter_eip = data; |
1244 | break; | 1298 | break; |
1245 | case MSR_IA32_SYSENTER_ESP: | 1299 | case MSR_IA32_SYSENTER_ESP: |
1246 | vcpu->svm->vmcb->save.sysenter_esp = data; | 1300 | svm->vmcb->save.sysenter_esp = data; |
1247 | break; | 1301 | break; |
1248 | default: | 1302 | default: |
1249 | return kvm_set_msr_common(vcpu, ecx, data); | 1303 | return kvm_set_msr_common(vcpu, ecx, data); |
@@ -1253,10 +1307,11 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) | |||
1253 | 1307 | ||
1254 | static int wrmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1308 | static int wrmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1255 | { | 1309 | { |
1310 | struct vcpu_svm *svm = to_svm(vcpu); | ||
1256 | u32 ecx = vcpu->regs[VCPU_REGS_RCX]; | 1311 | u32 ecx = vcpu->regs[VCPU_REGS_RCX]; |
1257 | u64 data = (vcpu->svm->vmcb->save.rax & -1u) | 1312 | u64 data = (svm->vmcb->save.rax & -1u) |
1258 | | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32); | 1313 | | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32); |
1259 | vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2; | 1314 | svm->next_rip = svm->vmcb->save.rip + 2; |
1260 | if (svm_set_msr(vcpu, ecx, data)) | 1315 | if (svm_set_msr(vcpu, ecx, data)) |
1261 | svm_inject_gp(vcpu, 0); | 1316 | svm_inject_gp(vcpu, 0); |
1262 | else | 1317 | else |
@@ -1266,7 +1321,7 @@ static int wrmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1266 | 1321 | ||
1267 | static int msr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1322 | static int msr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1268 | { | 1323 | { |
1269 | if (vcpu->svm->vmcb->control.exit_info_1) | 1324 | if (to_svm(vcpu)->vmcb->control.exit_info_1) |
1270 | return wrmsr_interception(vcpu, kvm_run); | 1325 | return wrmsr_interception(vcpu, kvm_run); |
1271 | else | 1326 | else |
1272 | return rdmsr_interception(vcpu, kvm_run); | 1327 | return rdmsr_interception(vcpu, kvm_run); |
@@ -1338,13 +1393,14 @@ static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu, | |||
1338 | 1393 | ||
1339 | static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1394 | static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1340 | { | 1395 | { |
1341 | u32 exit_code = vcpu->svm->vmcb->control.exit_code; | 1396 | struct vcpu_svm *svm = to_svm(vcpu); |
1397 | u32 exit_code = svm->vmcb->control.exit_code; | ||
1342 | 1398 | ||
1343 | if (is_external_interrupt(vcpu->svm->vmcb->control.exit_int_info) && | 1399 | if (is_external_interrupt(svm->vmcb->control.exit_int_info) && |
1344 | exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR) | 1400 | exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR) |
1345 | printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x " | 1401 | printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x " |
1346 | "exit_code 0x%x\n", | 1402 | "exit_code 0x%x\n", |
1347 | __FUNCTION__, vcpu->svm->vmcb->control.exit_int_info, | 1403 | __FUNCTION__, svm->vmcb->control.exit_int_info, |
1348 | exit_code); | 1404 | exit_code); |
1349 | 1405 | ||
1350 | if (exit_code >= ARRAY_SIZE(svm_exit_handlers) | 1406 | if (exit_code >= ARRAY_SIZE(svm_exit_handlers) |
@@ -1368,13 +1424,14 @@ static void reload_tss(struct kvm_vcpu *vcpu) | |||
1368 | 1424 | ||
1369 | static void pre_svm_run(struct kvm_vcpu *vcpu) | 1425 | static void pre_svm_run(struct kvm_vcpu *vcpu) |
1370 | { | 1426 | { |
1427 | struct vcpu_svm *svm = to_svm(vcpu); | ||
1371 | int cpu = raw_smp_processor_id(); | 1428 | int cpu = raw_smp_processor_id(); |
1372 | 1429 | ||
1373 | struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); | 1430 | struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); |
1374 | 1431 | ||
1375 | vcpu->svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; | 1432 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; |
1376 | if (vcpu->cpu != cpu || | 1433 | if (vcpu->cpu != cpu || |
1377 | vcpu->svm->asid_generation != svm_data->asid_generation) | 1434 | svm->asid_generation != svm_data->asid_generation) |
1378 | new_asid(vcpu, svm_data); | 1435 | new_asid(vcpu, svm_data); |
1379 | } | 1436 | } |
1380 | 1437 | ||
@@ -1383,7 +1440,7 @@ static inline void kvm_do_inject_irq(struct kvm_vcpu *vcpu) | |||
1383 | { | 1440 | { |
1384 | struct vmcb_control_area *control; | 1441 | struct vmcb_control_area *control; |
1385 | 1442 | ||
1386 | control = &vcpu->svm->vmcb->control; | 1443 | control = &to_svm(vcpu)->vmcb->control; |
1387 | control->int_vector = pop_irq(vcpu); | 1444 | control->int_vector = pop_irq(vcpu); |
1388 | control->int_ctl &= ~V_INTR_PRIO_MASK; | 1445 | control->int_ctl &= ~V_INTR_PRIO_MASK; |
1389 | control->int_ctl |= V_IRQ_MASK | | 1446 | control->int_ctl |= V_IRQ_MASK | |
@@ -1392,7 +1449,7 @@ static inline void kvm_do_inject_irq(struct kvm_vcpu *vcpu) | |||
1392 | 1449 | ||
1393 | static void kvm_reput_irq(struct kvm_vcpu *vcpu) | 1450 | static void kvm_reput_irq(struct kvm_vcpu *vcpu) |
1394 | { | 1451 | { |
1395 | struct vmcb_control_area *control = &vcpu->svm->vmcb->control; | 1452 | struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control; |
1396 | 1453 | ||
1397 | if (control->int_ctl & V_IRQ_MASK) { | 1454 | if (control->int_ctl & V_IRQ_MASK) { |
1398 | control->int_ctl &= ~V_IRQ_MASK; | 1455 | control->int_ctl &= ~V_IRQ_MASK; |
@@ -1406,11 +1463,12 @@ static void kvm_reput_irq(struct kvm_vcpu *vcpu) | |||
1406 | static void do_interrupt_requests(struct kvm_vcpu *vcpu, | 1463 | static void do_interrupt_requests(struct kvm_vcpu *vcpu, |
1407 | struct kvm_run *kvm_run) | 1464 | struct kvm_run *kvm_run) |
1408 | { | 1465 | { |
1409 | struct vmcb_control_area *control = &vcpu->svm->vmcb->control; | 1466 | struct vcpu_svm *svm = to_svm(vcpu); |
1467 | struct vmcb_control_area *control = &svm->vmcb->control; | ||
1410 | 1468 | ||
1411 | vcpu->interrupt_window_open = | 1469 | vcpu->interrupt_window_open = |
1412 | (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) && | 1470 | (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) && |
1413 | (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF)); | 1471 | (svm->vmcb->save.rflags & X86_EFLAGS_IF)); |
1414 | 1472 | ||
1415 | if (vcpu->interrupt_window_open && vcpu->irq_summary) | 1473 | if (vcpu->interrupt_window_open && vcpu->irq_summary) |
1416 | /* | 1474 | /* |
@@ -1431,9 +1489,11 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu, | |||
1431 | static void post_kvm_run_save(struct kvm_vcpu *vcpu, | 1489 | static void post_kvm_run_save(struct kvm_vcpu *vcpu, |
1432 | struct kvm_run *kvm_run) | 1490 | struct kvm_run *kvm_run) |
1433 | { | 1491 | { |
1492 | struct vcpu_svm *svm = to_svm(vcpu); | ||
1493 | |||
1434 | kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open && | 1494 | kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open && |
1435 | vcpu->irq_summary == 0); | 1495 | vcpu->irq_summary == 0); |
1436 | kvm_run->if_flag = (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF) != 0; | 1496 | kvm_run->if_flag = (svm->vmcb->save.rflags & X86_EFLAGS_IF) != 0; |
1437 | kvm_run->cr8 = vcpu->cr8; | 1497 | kvm_run->cr8 = vcpu->cr8; |
1438 | kvm_run->apic_base = vcpu->apic_base; | 1498 | kvm_run->apic_base = vcpu->apic_base; |
1439 | } | 1499 | } |
@@ -1450,7 +1510,7 @@ static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu, | |||
1450 | return (!vcpu->irq_summary && | 1510 | return (!vcpu->irq_summary && |
1451 | kvm_run->request_interrupt_window && | 1511 | kvm_run->request_interrupt_window && |
1452 | vcpu->interrupt_window_open && | 1512 | vcpu->interrupt_window_open && |
1453 | (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF)); | 1513 | (to_svm(vcpu)->vmcb->save.rflags & X86_EFLAGS_IF)); |
1454 | } | 1514 | } |
1455 | 1515 | ||
1456 | static void save_db_regs(unsigned long *db_regs) | 1516 | static void save_db_regs(unsigned long *db_regs) |
@@ -1476,6 +1536,7 @@ static void svm_flush_tlb(struct kvm_vcpu *vcpu) | |||
1476 | 1536 | ||
1477 | static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1537 | static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1478 | { | 1538 | { |
1539 | struct vcpu_svm *svm = to_svm(vcpu); | ||
1479 | u16 fs_selector; | 1540 | u16 fs_selector; |
1480 | u16 gs_selector; | 1541 | u16 gs_selector; |
1481 | u16 ldt_selector; | 1542 | u16 ldt_selector; |
@@ -1502,15 +1563,15 @@ again: | |||
1502 | fs_selector = read_fs(); | 1563 | fs_selector = read_fs(); |
1503 | gs_selector = read_gs(); | 1564 | gs_selector = read_gs(); |
1504 | ldt_selector = read_ldt(); | 1565 | ldt_selector = read_ldt(); |
1505 | vcpu->svm->host_cr2 = kvm_read_cr2(); | 1566 | svm->host_cr2 = kvm_read_cr2(); |
1506 | vcpu->svm->host_dr6 = read_dr6(); | 1567 | svm->host_dr6 = read_dr6(); |
1507 | vcpu->svm->host_dr7 = read_dr7(); | 1568 | svm->host_dr7 = read_dr7(); |
1508 | vcpu->svm->vmcb->save.cr2 = vcpu->cr2; | 1569 | svm->vmcb->save.cr2 = vcpu->cr2; |
1509 | 1570 | ||
1510 | if (vcpu->svm->vmcb->save.dr7 & 0xff) { | 1571 | if (svm->vmcb->save.dr7 & 0xff) { |
1511 | write_dr7(0); | 1572 | write_dr7(0); |
1512 | save_db_regs(vcpu->svm->host_db_regs); | 1573 | save_db_regs(svm->host_db_regs); |
1513 | load_db_regs(vcpu->svm->db_regs); | 1574 | load_db_regs(svm->db_regs); |
1514 | } | 1575 | } |
1515 | 1576 | ||
1516 | if (vcpu->fpu_active) { | 1577 | if (vcpu->fpu_active) { |
@@ -1607,7 +1668,7 @@ again: | |||
1607 | #endif | 1668 | #endif |
1608 | : | 1669 | : |
1609 | : [vcpu]"a"(vcpu), | 1670 | : [vcpu]"a"(vcpu), |
1610 | [svm]"i"(offsetof(struct kvm_vcpu, svm)), | 1671 | [svm]"i"(offsetof(struct kvm_vcpu, _priv)), |
1611 | [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)), | 1672 | [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)), |
1612 | [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])), | 1673 | [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])), |
1613 | [rcx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RCX])), | 1674 | [rcx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RCX])), |
@@ -1634,14 +1695,14 @@ again: | |||
1634 | fx_restore(vcpu->host_fx_image); | 1695 | fx_restore(vcpu->host_fx_image); |
1635 | } | 1696 | } |
1636 | 1697 | ||
1637 | if ((vcpu->svm->vmcb->save.dr7 & 0xff)) | 1698 | if ((svm->vmcb->save.dr7 & 0xff)) |
1638 | load_db_regs(vcpu->svm->host_db_regs); | 1699 | load_db_regs(svm->host_db_regs); |
1639 | 1700 | ||
1640 | vcpu->cr2 = vcpu->svm->vmcb->save.cr2; | 1701 | vcpu->cr2 = svm->vmcb->save.cr2; |
1641 | 1702 | ||
1642 | write_dr6(vcpu->svm->host_dr6); | 1703 | write_dr6(svm->host_dr6); |
1643 | write_dr7(vcpu->svm->host_dr7); | 1704 | write_dr7(svm->host_dr7); |
1644 | kvm_write_cr2(vcpu->svm->host_cr2); | 1705 | kvm_write_cr2(svm->host_cr2); |
1645 | 1706 | ||
1646 | load_fs(fs_selector); | 1707 | load_fs(fs_selector); |
1647 | load_gs(gs_selector); | 1708 | load_gs(gs_selector); |
@@ -1655,18 +1716,18 @@ again: | |||
1655 | */ | 1716 | */ |
1656 | if (unlikely(prof_on == KVM_PROFILING)) | 1717 | if (unlikely(prof_on == KVM_PROFILING)) |
1657 | profile_hit(KVM_PROFILING, | 1718 | profile_hit(KVM_PROFILING, |
1658 | (void *)(unsigned long)vcpu->svm->vmcb->save.rip); | 1719 | (void *)(unsigned long)svm->vmcb->save.rip); |
1659 | 1720 | ||
1660 | stgi(); | 1721 | stgi(); |
1661 | 1722 | ||
1662 | kvm_reput_irq(vcpu); | 1723 | kvm_reput_irq(vcpu); |
1663 | 1724 | ||
1664 | vcpu->svm->next_rip = 0; | 1725 | svm->next_rip = 0; |
1665 | 1726 | ||
1666 | if (vcpu->svm->vmcb->control.exit_code == SVM_EXIT_ERR) { | 1727 | if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { |
1667 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; | 1728 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; |
1668 | kvm_run->fail_entry.hardware_entry_failure_reason | 1729 | kvm_run->fail_entry.hardware_entry_failure_reason |
1669 | = vcpu->svm->vmcb->control.exit_code; | 1730 | = svm->vmcb->control.exit_code; |
1670 | post_kvm_run_save(vcpu, kvm_run); | 1731 | post_kvm_run_save(vcpu, kvm_run); |
1671 | return 0; | 1732 | return 0; |
1672 | } | 1733 | } |
@@ -1695,12 +1756,14 @@ again: | |||
1695 | 1756 | ||
1696 | static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) | 1757 | static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) |
1697 | { | 1758 | { |
1698 | vcpu->svm->vmcb->save.cr3 = root; | 1759 | struct vcpu_svm *svm = to_svm(vcpu); |
1760 | |||
1761 | svm->vmcb->save.cr3 = root; | ||
1699 | force_new_asid(vcpu); | 1762 | force_new_asid(vcpu); |
1700 | 1763 | ||
1701 | if (vcpu->fpu_active) { | 1764 | if (vcpu->fpu_active) { |
1702 | vcpu->svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR); | 1765 | svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR); |
1703 | vcpu->svm->vmcb->save.cr0 |= X86_CR0_TS; | 1766 | svm->vmcb->save.cr0 |= X86_CR0_TS; |
1704 | vcpu->fpu_active = 0; | 1767 | vcpu->fpu_active = 0; |
1705 | } | 1768 | } |
1706 | } | 1769 | } |
@@ -1709,26 +1772,27 @@ static void svm_inject_page_fault(struct kvm_vcpu *vcpu, | |||
1709 | unsigned long addr, | 1772 | unsigned long addr, |
1710 | uint32_t err_code) | 1773 | uint32_t err_code) |
1711 | { | 1774 | { |
1712 | uint32_t exit_int_info = vcpu->svm->vmcb->control.exit_int_info; | 1775 | struct vcpu_svm *svm = to_svm(vcpu); |
1776 | uint32_t exit_int_info = svm->vmcb->control.exit_int_info; | ||
1713 | 1777 | ||
1714 | ++vcpu->stat.pf_guest; | 1778 | ++vcpu->stat.pf_guest; |
1715 | 1779 | ||
1716 | if (is_page_fault(exit_int_info)) { | 1780 | if (is_page_fault(exit_int_info)) { |
1717 | 1781 | ||
1718 | vcpu->svm->vmcb->control.event_inj_err = 0; | 1782 | svm->vmcb->control.event_inj_err = 0; |
1719 | vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | | 1783 | svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | |
1720 | SVM_EVTINJ_VALID_ERR | | 1784 | SVM_EVTINJ_VALID_ERR | |
1721 | SVM_EVTINJ_TYPE_EXEPT | | 1785 | SVM_EVTINJ_TYPE_EXEPT | |
1722 | DF_VECTOR; | 1786 | DF_VECTOR; |
1723 | return; | 1787 | return; |
1724 | } | 1788 | } |
1725 | vcpu->cr2 = addr; | 1789 | vcpu->cr2 = addr; |
1726 | vcpu->svm->vmcb->save.cr2 = addr; | 1790 | svm->vmcb->save.cr2 = addr; |
1727 | vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | | 1791 | svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | |
1728 | SVM_EVTINJ_VALID_ERR | | 1792 | SVM_EVTINJ_VALID_ERR | |
1729 | SVM_EVTINJ_TYPE_EXEPT | | 1793 | SVM_EVTINJ_TYPE_EXEPT | |
1730 | PF_VECTOR; | 1794 | PF_VECTOR; |
1731 | vcpu->svm->vmcb->control.event_inj_err = err_code; | 1795 | svm->vmcb->control.event_inj_err = err_code; |
1732 | } | 1796 | } |
1733 | 1797 | ||
1734 | 1798 | ||