diff options
author | Zhang Xiantao <xiantao.zhang@intel.com> | 2007-12-13 10:50:52 -0500 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-01-30 10:58:09 -0500 |
commit | ad312c7c79f781c822e37effe41307503a2bb85b (patch) | |
tree | d979bfb70e76ada58b79b456c61a0507a8f0847d /drivers/kvm/svm.c | |
parent | 682c59a3f3f211ed555b17144f2d82eb8286a1db (diff) |
KVM: Portability: Introduce kvm_vcpu_arch
Move all the architecture-specific fields in kvm_vcpu into a new struct
kvm_vcpu_arch.
Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com>
Acked-by: Carsten Otte <cotte@de.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/svm.c')
-rw-r--r-- | drivers/kvm/svm.c | 122 |
1 files changed, 61 insertions, 61 deletions
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c index ef21804a5c5c..7888638c02e8 100644 --- a/drivers/kvm/svm.c +++ b/drivers/kvm/svm.c | |||
@@ -99,20 +99,20 @@ static inline u32 svm_has(u32 feat) | |||
99 | 99 | ||
100 | static inline u8 pop_irq(struct kvm_vcpu *vcpu) | 100 | static inline u8 pop_irq(struct kvm_vcpu *vcpu) |
101 | { | 101 | { |
102 | int word_index = __ffs(vcpu->irq_summary); | 102 | int word_index = __ffs(vcpu->arch.irq_summary); |
103 | int bit_index = __ffs(vcpu->irq_pending[word_index]); | 103 | int bit_index = __ffs(vcpu->arch.irq_pending[word_index]); |
104 | int irq = word_index * BITS_PER_LONG + bit_index; | 104 | int irq = word_index * BITS_PER_LONG + bit_index; |
105 | 105 | ||
106 | clear_bit(bit_index, &vcpu->irq_pending[word_index]); | 106 | clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]); |
107 | if (!vcpu->irq_pending[word_index]) | 107 | if (!vcpu->arch.irq_pending[word_index]) |
108 | clear_bit(word_index, &vcpu->irq_summary); | 108 | clear_bit(word_index, &vcpu->arch.irq_summary); |
109 | return irq; | 109 | return irq; |
110 | } | 110 | } |
111 | 111 | ||
112 | static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq) | 112 | static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq) |
113 | { | 113 | { |
114 | set_bit(irq, vcpu->irq_pending); | 114 | set_bit(irq, vcpu->arch.irq_pending); |
115 | set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary); | 115 | set_bit(irq / BITS_PER_LONG, &vcpu->arch.irq_summary); |
116 | } | 116 | } |
117 | 117 | ||
118 | static inline void clgi(void) | 118 | static inline void clgi(void) |
@@ -185,7 +185,7 @@ static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) | |||
185 | efer &= ~EFER_LME; | 185 | efer &= ~EFER_LME; |
186 | 186 | ||
187 | to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK; | 187 | to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK; |
188 | vcpu->shadow_efer = efer; | 188 | vcpu->arch.shadow_efer = efer; |
189 | } | 189 | } |
190 | 190 | ||
191 | static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, | 191 | static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, |
@@ -227,10 +227,10 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) | |||
227 | svm->vmcb->save.rip, | 227 | svm->vmcb->save.rip, |
228 | svm->next_rip); | 228 | svm->next_rip); |
229 | 229 | ||
230 | vcpu->rip = svm->vmcb->save.rip = svm->next_rip; | 230 | vcpu->arch.rip = svm->vmcb->save.rip = svm->next_rip; |
231 | svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; | 231 | svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; |
232 | 232 | ||
233 | vcpu->interrupt_window_open = 1; | 233 | vcpu->arch.interrupt_window_open = 1; |
234 | } | 234 | } |
235 | 235 | ||
236 | static int has_svm(void) | 236 | static int has_svm(void) |
@@ -559,8 +559,8 @@ static int svm_vcpu_reset(struct kvm_vcpu *vcpu) | |||
559 | 559 | ||
560 | if (vcpu->vcpu_id != 0) { | 560 | if (vcpu->vcpu_id != 0) { |
561 | svm->vmcb->save.rip = 0; | 561 | svm->vmcb->save.rip = 0; |
562 | svm->vmcb->save.cs.base = svm->vcpu.sipi_vector << 12; | 562 | svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12; |
563 | svm->vmcb->save.cs.selector = svm->vcpu.sipi_vector << 8; | 563 | svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8; |
564 | } | 564 | } |
565 | 565 | ||
566 | return 0; | 566 | return 0; |
@@ -597,9 +597,9 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) | |||
597 | 597 | ||
598 | fx_init(&svm->vcpu); | 598 | fx_init(&svm->vcpu); |
599 | svm->vcpu.fpu_active = 1; | 599 | svm->vcpu.fpu_active = 1; |
600 | svm->vcpu.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; | 600 | svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; |
601 | if (svm->vcpu.vcpu_id == 0) | 601 | if (svm->vcpu.vcpu_id == 0) |
602 | svm->vcpu.apic_base |= MSR_IA32_APICBASE_BSP; | 602 | svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP; |
603 | 603 | ||
604 | return &svm->vcpu; | 604 | return &svm->vcpu; |
605 | 605 | ||
@@ -633,7 +633,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
633 | * increasing TSC. | 633 | * increasing TSC. |
634 | */ | 634 | */ |
635 | rdtscll(tsc_this); | 635 | rdtscll(tsc_this); |
636 | delta = vcpu->host_tsc - tsc_this; | 636 | delta = vcpu->arch.host_tsc - tsc_this; |
637 | svm->vmcb->control.tsc_offset += delta; | 637 | svm->vmcb->control.tsc_offset += delta; |
638 | vcpu->cpu = cpu; | 638 | vcpu->cpu = cpu; |
639 | kvm_migrate_apic_timer(vcpu); | 639 | kvm_migrate_apic_timer(vcpu); |
@@ -652,7 +652,7 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu) | |||
652 | for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) | 652 | for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) |
653 | wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); | 653 | wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); |
654 | 654 | ||
655 | rdtscll(vcpu->host_tsc); | 655 | rdtscll(vcpu->arch.host_tsc); |
656 | } | 656 | } |
657 | 657 | ||
658 | static void svm_vcpu_decache(struct kvm_vcpu *vcpu) | 658 | static void svm_vcpu_decache(struct kvm_vcpu *vcpu) |
@@ -663,17 +663,17 @@ static void svm_cache_regs(struct kvm_vcpu *vcpu) | |||
663 | { | 663 | { |
664 | struct vcpu_svm *svm = to_svm(vcpu); | 664 | struct vcpu_svm *svm = to_svm(vcpu); |
665 | 665 | ||
666 | vcpu->regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; | 666 | vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; |
667 | vcpu->regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; | 667 | vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; |
668 | vcpu->rip = svm->vmcb->save.rip; | 668 | vcpu->arch.rip = svm->vmcb->save.rip; |
669 | } | 669 | } |
670 | 670 | ||
671 | static void svm_decache_regs(struct kvm_vcpu *vcpu) | 671 | static void svm_decache_regs(struct kvm_vcpu *vcpu) |
672 | { | 672 | { |
673 | struct vcpu_svm *svm = to_svm(vcpu); | 673 | struct vcpu_svm *svm = to_svm(vcpu); |
674 | svm->vmcb->save.rax = vcpu->regs[VCPU_REGS_RAX]; | 674 | svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; |
675 | svm->vmcb->save.rsp = vcpu->regs[VCPU_REGS_RSP]; | 675 | svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; |
676 | svm->vmcb->save.rip = vcpu->rip; | 676 | svm->vmcb->save.rip = vcpu->arch.rip; |
677 | } | 677 | } |
678 | 678 | ||
679 | static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) | 679 | static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) |
@@ -771,24 +771,24 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
771 | struct vcpu_svm *svm = to_svm(vcpu); | 771 | struct vcpu_svm *svm = to_svm(vcpu); |
772 | 772 | ||
773 | #ifdef CONFIG_X86_64 | 773 | #ifdef CONFIG_X86_64 |
774 | if (vcpu->shadow_efer & EFER_LME) { | 774 | if (vcpu->arch.shadow_efer & EFER_LME) { |
775 | if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { | 775 | if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { |
776 | vcpu->shadow_efer |= EFER_LMA; | 776 | vcpu->arch.shadow_efer |= EFER_LMA; |
777 | svm->vmcb->save.efer |= EFER_LMA | EFER_LME; | 777 | svm->vmcb->save.efer |= EFER_LMA | EFER_LME; |
778 | } | 778 | } |
779 | 779 | ||
780 | if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) { | 780 | if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) { |
781 | vcpu->shadow_efer &= ~EFER_LMA; | 781 | vcpu->arch.shadow_efer &= ~EFER_LMA; |
782 | svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); | 782 | svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); |
783 | } | 783 | } |
784 | } | 784 | } |
785 | #endif | 785 | #endif |
786 | if ((vcpu->cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) { | 786 | if ((vcpu->arch.cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) { |
787 | svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); | 787 | svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); |
788 | vcpu->fpu_active = 1; | 788 | vcpu->fpu_active = 1; |
789 | } | 789 | } |
790 | 790 | ||
791 | vcpu->cr0 = cr0; | 791 | vcpu->arch.cr0 = cr0; |
792 | cr0 |= X86_CR0_PG | X86_CR0_WP; | 792 | cr0 |= X86_CR0_PG | X86_CR0_WP; |
793 | cr0 &= ~(X86_CR0_CD | X86_CR0_NW); | 793 | cr0 &= ~(X86_CR0_CD | X86_CR0_NW); |
794 | svm->vmcb->save.cr0 = cr0; | 794 | svm->vmcb->save.cr0 = cr0; |
@@ -796,7 +796,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
796 | 796 | ||
797 | static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | 797 | static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
798 | { | 798 | { |
799 | vcpu->cr4 = cr4; | 799 | vcpu->arch.cr4 = cr4; |
800 | to_svm(vcpu)->vmcb->save.cr4 = cr4 | X86_CR4_PAE; | 800 | to_svm(vcpu)->vmcb->save.cr4 = cr4 | X86_CR4_PAE; |
801 | } | 801 | } |
802 | 802 | ||
@@ -901,7 +901,7 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, | |||
901 | svm->db_regs[dr] = value; | 901 | svm->db_regs[dr] = value; |
902 | return; | 902 | return; |
903 | case 4 ... 5: | 903 | case 4 ... 5: |
904 | if (vcpu->cr4 & X86_CR4_DE) { | 904 | if (vcpu->arch.cr4 & X86_CR4_DE) { |
905 | *exception = UD_VECTOR; | 905 | *exception = UD_VECTOR; |
906 | return; | 906 | return; |
907 | } | 907 | } |
@@ -950,7 +950,7 @@ static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
950 | static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 950 | static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) |
951 | { | 951 | { |
952 | svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); | 952 | svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); |
953 | if (!(svm->vcpu.cr0 & X86_CR0_TS)) | 953 | if (!(svm->vcpu.arch.cr0 & X86_CR0_TS)) |
954 | svm->vmcb->save.cr0 &= ~X86_CR0_TS; | 954 | svm->vmcb->save.cr0 &= ~X86_CR0_TS; |
955 | svm->vcpu.fpu_active = 1; | 955 | svm->vcpu.fpu_active = 1; |
956 | 956 | ||
@@ -1103,14 +1103,14 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) | |||
1103 | 1103 | ||
1104 | static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1104 | static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) |
1105 | { | 1105 | { |
1106 | u32 ecx = svm->vcpu.regs[VCPU_REGS_RCX]; | 1106 | u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; |
1107 | u64 data; | 1107 | u64 data; |
1108 | 1108 | ||
1109 | if (svm_get_msr(&svm->vcpu, ecx, &data)) | 1109 | if (svm_get_msr(&svm->vcpu, ecx, &data)) |
1110 | kvm_inject_gp(&svm->vcpu, 0); | 1110 | kvm_inject_gp(&svm->vcpu, 0); |
1111 | else { | 1111 | else { |
1112 | svm->vmcb->save.rax = data & 0xffffffff; | 1112 | svm->vmcb->save.rax = data & 0xffffffff; |
1113 | svm->vcpu.regs[VCPU_REGS_RDX] = data >> 32; | 1113 | svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32; |
1114 | svm->next_rip = svm->vmcb->save.rip + 2; | 1114 | svm->next_rip = svm->vmcb->save.rip + 2; |
1115 | skip_emulated_instruction(&svm->vcpu); | 1115 | skip_emulated_instruction(&svm->vcpu); |
1116 | } | 1116 | } |
@@ -1176,9 +1176,9 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) | |||
1176 | 1176 | ||
1177 | static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1177 | static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) |
1178 | { | 1178 | { |
1179 | u32 ecx = svm->vcpu.regs[VCPU_REGS_RCX]; | 1179 | u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; |
1180 | u64 data = (svm->vmcb->save.rax & -1u) | 1180 | u64 data = (svm->vmcb->save.rax & -1u) |
1181 | | ((u64)(svm->vcpu.regs[VCPU_REGS_RDX] & -1u) << 32); | 1181 | | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32); |
1182 | svm->next_rip = svm->vmcb->save.rip + 2; | 1182 | svm->next_rip = svm->vmcb->save.rip + 2; |
1183 | if (svm_set_msr(&svm->vcpu, ecx, data)) | 1183 | if (svm_set_msr(&svm->vcpu, ecx, data)) |
1184 | kvm_inject_gp(&svm->vcpu, 0); | 1184 | kvm_inject_gp(&svm->vcpu, 0); |
@@ -1205,7 +1205,7 @@ static int interrupt_window_interception(struct vcpu_svm *svm, | |||
1205 | * possible | 1205 | * possible |
1206 | */ | 1206 | */ |
1207 | if (kvm_run->request_interrupt_window && | 1207 | if (kvm_run->request_interrupt_window && |
1208 | !svm->vcpu.irq_summary) { | 1208 | !svm->vcpu.arch.irq_summary) { |
1209 | ++svm->vcpu.stat.irq_window_exits; | 1209 | ++svm->vcpu.stat.irq_window_exits; |
1210 | kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; | 1210 | kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; |
1211 | return 0; | 1211 | return 0; |
@@ -1382,20 +1382,20 @@ static void kvm_reput_irq(struct vcpu_svm *svm) | |||
1382 | push_irq(&svm->vcpu, control->int_vector); | 1382 | push_irq(&svm->vcpu, control->int_vector); |
1383 | } | 1383 | } |
1384 | 1384 | ||
1385 | svm->vcpu.interrupt_window_open = | 1385 | svm->vcpu.arch.interrupt_window_open = |
1386 | !(control->int_state & SVM_INTERRUPT_SHADOW_MASK); | 1386 | !(control->int_state & SVM_INTERRUPT_SHADOW_MASK); |
1387 | } | 1387 | } |
1388 | 1388 | ||
1389 | static void svm_do_inject_vector(struct vcpu_svm *svm) | 1389 | static void svm_do_inject_vector(struct vcpu_svm *svm) |
1390 | { | 1390 | { |
1391 | struct kvm_vcpu *vcpu = &svm->vcpu; | 1391 | struct kvm_vcpu *vcpu = &svm->vcpu; |
1392 | int word_index = __ffs(vcpu->irq_summary); | 1392 | int word_index = __ffs(vcpu->arch.irq_summary); |
1393 | int bit_index = __ffs(vcpu->irq_pending[word_index]); | 1393 | int bit_index = __ffs(vcpu->arch.irq_pending[word_index]); |
1394 | int irq = word_index * BITS_PER_LONG + bit_index; | 1394 | int irq = word_index * BITS_PER_LONG + bit_index; |
1395 | 1395 | ||
1396 | clear_bit(bit_index, &vcpu->irq_pending[word_index]); | 1396 | clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]); |
1397 | if (!vcpu->irq_pending[word_index]) | 1397 | if (!vcpu->arch.irq_pending[word_index]) |
1398 | clear_bit(word_index, &vcpu->irq_summary); | 1398 | clear_bit(word_index, &vcpu->arch.irq_summary); |
1399 | svm_inject_irq(svm, irq); | 1399 | svm_inject_irq(svm, irq); |
1400 | } | 1400 | } |
1401 | 1401 | ||
@@ -1405,11 +1405,11 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu, | |||
1405 | struct vcpu_svm *svm = to_svm(vcpu); | 1405 | struct vcpu_svm *svm = to_svm(vcpu); |
1406 | struct vmcb_control_area *control = &svm->vmcb->control; | 1406 | struct vmcb_control_area *control = &svm->vmcb->control; |
1407 | 1407 | ||
1408 | svm->vcpu.interrupt_window_open = | 1408 | svm->vcpu.arch.interrupt_window_open = |
1409 | (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) && | 1409 | (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) && |
1410 | (svm->vmcb->save.rflags & X86_EFLAGS_IF)); | 1410 | (svm->vmcb->save.rflags & X86_EFLAGS_IF)); |
1411 | 1411 | ||
1412 | if (svm->vcpu.interrupt_window_open && svm->vcpu.irq_summary) | 1412 | if (svm->vcpu.arch.interrupt_window_open && svm->vcpu.arch.irq_summary) |
1413 | /* | 1413 | /* |
1414 | * If interrupts enabled, and not blocked by sti or mov ss. Good. | 1414 | * If interrupts enabled, and not blocked by sti or mov ss. Good. |
1415 | */ | 1415 | */ |
@@ -1418,8 +1418,8 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu, | |||
1418 | /* | 1418 | /* |
1419 | * Interrupts blocked. Wait for unblock. | 1419 | * Interrupts blocked. Wait for unblock. |
1420 | */ | 1420 | */ |
1421 | if (!svm->vcpu.interrupt_window_open && | 1421 | if (!svm->vcpu.arch.interrupt_window_open && |
1422 | (svm->vcpu.irq_summary || kvm_run->request_interrupt_window)) | 1422 | (svm->vcpu.arch.irq_summary || kvm_run->request_interrupt_window)) |
1423 | control->intercept |= 1ULL << INTERCEPT_VINTR; | 1423 | control->intercept |= 1ULL << INTERCEPT_VINTR; |
1424 | else | 1424 | else |
1425 | control->intercept &= ~(1ULL << INTERCEPT_VINTR); | 1425 | control->intercept &= ~(1ULL << INTERCEPT_VINTR); |
@@ -1471,7 +1471,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1471 | svm->host_cr2 = kvm_read_cr2(); | 1471 | svm->host_cr2 = kvm_read_cr2(); |
1472 | svm->host_dr6 = read_dr6(); | 1472 | svm->host_dr6 = read_dr6(); |
1473 | svm->host_dr7 = read_dr7(); | 1473 | svm->host_dr7 = read_dr7(); |
1474 | svm->vmcb->save.cr2 = vcpu->cr2; | 1474 | svm->vmcb->save.cr2 = vcpu->arch.cr2; |
1475 | 1475 | ||
1476 | if (svm->vmcb->save.dr7 & 0xff) { | 1476 | if (svm->vmcb->save.dr7 & 0xff) { |
1477 | write_dr7(0); | 1477 | write_dr7(0); |
@@ -1563,21 +1563,21 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1563 | : | 1563 | : |
1564 | : [svm]"a"(svm), | 1564 | : [svm]"a"(svm), |
1565 | [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)), | 1565 | [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)), |
1566 | [rbx]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_RBX])), | 1566 | [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])), |
1567 | [rcx]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_RCX])), | 1567 | [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])), |
1568 | [rdx]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_RDX])), | 1568 | [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])), |
1569 | [rsi]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_RSI])), | 1569 | [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])), |
1570 | [rdi]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_RDI])), | 1570 | [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])), |
1571 | [rbp]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_RBP])) | 1571 | [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP])) |
1572 | #ifdef CONFIG_X86_64 | 1572 | #ifdef CONFIG_X86_64 |
1573 | , [r8]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R8])), | 1573 | , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])), |
1574 | [r9]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R9])), | 1574 | [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])), |
1575 | [r10]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R10])), | 1575 | [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])), |
1576 | [r11]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R11])), | 1576 | [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])), |
1577 | [r12]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R12])), | 1577 | [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])), |
1578 | [r13]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R13])), | 1578 | [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])), |
1579 | [r14]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R14])), | 1579 | [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])), |
1580 | [r15]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R15])) | 1580 | [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15])) |
1581 | #endif | 1581 | #endif |
1582 | : "cc", "memory" | 1582 | : "cc", "memory" |
1583 | #ifdef CONFIG_X86_64 | 1583 | #ifdef CONFIG_X86_64 |
@@ -1591,7 +1591,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1591 | if ((svm->vmcb->save.dr7 & 0xff)) | 1591 | if ((svm->vmcb->save.dr7 & 0xff)) |
1592 | load_db_regs(svm->host_db_regs); | 1592 | load_db_regs(svm->host_db_regs); |
1593 | 1593 | ||
1594 | vcpu->cr2 = svm->vmcb->save.cr2; | 1594 | vcpu->arch.cr2 = svm->vmcb->save.cr2; |
1595 | 1595 | ||
1596 | write_dr6(svm->host_dr6); | 1596 | write_dr6(svm->host_dr6); |
1597 | write_dr7(svm->host_dr7); | 1597 | write_dr7(svm->host_dr7); |