diff options
author | Zhang Xiantao <xiantao.zhang@intel.com> | 2007-12-13 10:50:52 -0500 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-01-30 10:58:09 -0500 |
commit | ad312c7c79f781c822e37effe41307503a2bb85b (patch) | |
tree | d979bfb70e76ada58b79b456c61a0507a8f0847d /drivers/kvm/vmx.c | |
parent | 682c59a3f3f211ed555b17144f2d82eb8286a1db (diff) |
KVM: Portability: Introduce kvm_vcpu_arch
Move all the architecture-specific fields in kvm_vcpu into a new struct
kvm_vcpu_arch.
Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com>
Acked-by: Carsten Otte <cotte@de.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/vmx.c')
-rw-r--r-- | drivers/kvm/vmx.c | 213 |
1 files changed, 107 insertions, 106 deletions
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c index 83084348581a..cf78ebb2f36e 100644 --- a/drivers/kvm/vmx.c +++ b/drivers/kvm/vmx.c | |||
@@ -247,7 +247,7 @@ static void __vcpu_clear(void *arg) | |||
247 | vmcs_clear(vmx->vmcs); | 247 | vmcs_clear(vmx->vmcs); |
248 | if (per_cpu(current_vmcs, cpu) == vmx->vmcs) | 248 | if (per_cpu(current_vmcs, cpu) == vmx->vmcs) |
249 | per_cpu(current_vmcs, cpu) = NULL; | 249 | per_cpu(current_vmcs, cpu) = NULL; |
250 | rdtscll(vmx->vcpu.host_tsc); | 250 | rdtscll(vmx->vcpu.arch.host_tsc); |
251 | } | 251 | } |
252 | 252 | ||
253 | static void vcpu_clear(struct vcpu_vmx *vmx) | 253 | static void vcpu_clear(struct vcpu_vmx *vmx) |
@@ -343,7 +343,7 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) | |||
343 | eb |= 1u << NM_VECTOR; | 343 | eb |= 1u << NM_VECTOR; |
344 | if (vcpu->guest_debug.enabled) | 344 | if (vcpu->guest_debug.enabled) |
345 | eb |= 1u << 1; | 345 | eb |= 1u << 1; |
346 | if (vcpu->rmode.active) | 346 | if (vcpu->arch.rmode.active) |
347 | eb = ~0; | 347 | eb = ~0; |
348 | vmcs_write32(EXCEPTION_BITMAP, eb); | 348 | vmcs_write32(EXCEPTION_BITMAP, eb); |
349 | } | 349 | } |
@@ -528,7 +528,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
528 | * Make sure the time stamp counter is monotonous. | 528 | * Make sure the time stamp counter is monotonous. |
529 | */ | 529 | */ |
530 | rdtscll(tsc_this); | 530 | rdtscll(tsc_this); |
531 | delta = vcpu->host_tsc - tsc_this; | 531 | delta = vcpu->arch.host_tsc - tsc_this; |
532 | vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta); | 532 | vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta); |
533 | } | 533 | } |
534 | } | 534 | } |
@@ -544,7 +544,7 @@ static void vmx_fpu_activate(struct kvm_vcpu *vcpu) | |||
544 | return; | 544 | return; |
545 | vcpu->fpu_active = 1; | 545 | vcpu->fpu_active = 1; |
546 | vmcs_clear_bits(GUEST_CR0, X86_CR0_TS); | 546 | vmcs_clear_bits(GUEST_CR0, X86_CR0_TS); |
547 | if (vcpu->cr0 & X86_CR0_TS) | 547 | if (vcpu->arch.cr0 & X86_CR0_TS) |
548 | vmcs_set_bits(GUEST_CR0, X86_CR0_TS); | 548 | vmcs_set_bits(GUEST_CR0, X86_CR0_TS); |
549 | update_exception_bitmap(vcpu); | 549 | update_exception_bitmap(vcpu); |
550 | } | 550 | } |
@@ -570,7 +570,7 @@ static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) | |||
570 | 570 | ||
571 | static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) | 571 | static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) |
572 | { | 572 | { |
573 | if (vcpu->rmode.active) | 573 | if (vcpu->arch.rmode.active) |
574 | rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; | 574 | rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; |
575 | vmcs_writel(GUEST_RFLAGS, rflags); | 575 | vmcs_writel(GUEST_RFLAGS, rflags); |
576 | } | 576 | } |
@@ -592,7 +592,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) | |||
592 | if (interruptibility & 3) | 592 | if (interruptibility & 3) |
593 | vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, | 593 | vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, |
594 | interruptibility & ~3); | 594 | interruptibility & ~3); |
595 | vcpu->interrupt_window_open = 1; | 595 | vcpu->arch.interrupt_window_open = 1; |
596 | } | 596 | } |
597 | 597 | ||
598 | static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, | 598 | static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, |
@@ -661,7 +661,7 @@ static void setup_msrs(struct vcpu_vmx *vmx) | |||
661 | * if efer.sce is enabled. | 661 | * if efer.sce is enabled. |
662 | */ | 662 | */ |
663 | index = __find_msr_index(vmx, MSR_K6_STAR); | 663 | index = __find_msr_index(vmx, MSR_K6_STAR); |
664 | if ((index >= 0) && (vmx->vcpu.shadow_efer & EFER_SCE)) | 664 | if ((index >= 0) && (vmx->vcpu.arch.shadow_efer & EFER_SCE)) |
665 | move_msr_up(vmx, index, save_nmsrs++); | 665 | move_msr_up(vmx, index, save_nmsrs++); |
666 | } | 666 | } |
667 | #endif | 667 | #endif |
@@ -805,12 +805,12 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | |||
805 | 805 | ||
806 | /* | 806 | /* |
807 | * Sync the rsp and rip registers into the vcpu structure. This allows | 807 | * Sync the rsp and rip registers into the vcpu structure. This allows |
808 | * registers to be accessed by indexing vcpu->regs. | 808 | * registers to be accessed by indexing vcpu->arch.regs. |
809 | */ | 809 | */ |
810 | static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu) | 810 | static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu) |
811 | { | 811 | { |
812 | vcpu->regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); | 812 | vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); |
813 | vcpu->rip = vmcs_readl(GUEST_RIP); | 813 | vcpu->arch.rip = vmcs_readl(GUEST_RIP); |
814 | } | 814 | } |
815 | 815 | ||
816 | /* | 816 | /* |
@@ -819,8 +819,8 @@ static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu) | |||
819 | */ | 819 | */ |
820 | static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu) | 820 | static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu) |
821 | { | 821 | { |
822 | vmcs_writel(GUEST_RSP, vcpu->regs[VCPU_REGS_RSP]); | 822 | vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); |
823 | vmcs_writel(GUEST_RIP, vcpu->rip); | 823 | vmcs_writel(GUEST_RIP, vcpu->arch.rip); |
824 | } | 824 | } |
825 | 825 | ||
826 | static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) | 826 | static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) |
@@ -1111,15 +1111,15 @@ static void enter_pmode(struct kvm_vcpu *vcpu) | |||
1111 | { | 1111 | { |
1112 | unsigned long flags; | 1112 | unsigned long flags; |
1113 | 1113 | ||
1114 | vcpu->rmode.active = 0; | 1114 | vcpu->arch.rmode.active = 0; |
1115 | 1115 | ||
1116 | vmcs_writel(GUEST_TR_BASE, vcpu->rmode.tr.base); | 1116 | vmcs_writel(GUEST_TR_BASE, vcpu->arch.rmode.tr.base); |
1117 | vmcs_write32(GUEST_TR_LIMIT, vcpu->rmode.tr.limit); | 1117 | vmcs_write32(GUEST_TR_LIMIT, vcpu->arch.rmode.tr.limit); |
1118 | vmcs_write32(GUEST_TR_AR_BYTES, vcpu->rmode.tr.ar); | 1118 | vmcs_write32(GUEST_TR_AR_BYTES, vcpu->arch.rmode.tr.ar); |
1119 | 1119 | ||
1120 | flags = vmcs_readl(GUEST_RFLAGS); | 1120 | flags = vmcs_readl(GUEST_RFLAGS); |
1121 | flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM); | 1121 | flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM); |
1122 | flags |= (vcpu->rmode.save_iopl << IOPL_SHIFT); | 1122 | flags |= (vcpu->arch.rmode.save_iopl << IOPL_SHIFT); |
1123 | vmcs_writel(GUEST_RFLAGS, flags); | 1123 | vmcs_writel(GUEST_RFLAGS, flags); |
1124 | 1124 | ||
1125 | vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | | 1125 | vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | |
@@ -1127,10 +1127,10 @@ static void enter_pmode(struct kvm_vcpu *vcpu) | |||
1127 | 1127 | ||
1128 | update_exception_bitmap(vcpu); | 1128 | update_exception_bitmap(vcpu); |
1129 | 1129 | ||
1130 | fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->rmode.es); | 1130 | fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->arch.rmode.es); |
1131 | fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->rmode.ds); | 1131 | fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->arch.rmode.ds); |
1132 | fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->rmode.gs); | 1132 | fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->arch.rmode.gs); |
1133 | fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->rmode.fs); | 1133 | fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->arch.rmode.fs); |
1134 | 1134 | ||
1135 | vmcs_write16(GUEST_SS_SELECTOR, 0); | 1135 | vmcs_write16(GUEST_SS_SELECTOR, 0); |
1136 | vmcs_write32(GUEST_SS_AR_BYTES, 0x93); | 1136 | vmcs_write32(GUEST_SS_AR_BYTES, 0x93); |
@@ -1168,19 +1168,20 @@ static void enter_rmode(struct kvm_vcpu *vcpu) | |||
1168 | { | 1168 | { |
1169 | unsigned long flags; | 1169 | unsigned long flags; |
1170 | 1170 | ||
1171 | vcpu->rmode.active = 1; | 1171 | vcpu->arch.rmode.active = 1; |
1172 | 1172 | ||
1173 | vcpu->rmode.tr.base = vmcs_readl(GUEST_TR_BASE); | 1173 | vcpu->arch.rmode.tr.base = vmcs_readl(GUEST_TR_BASE); |
1174 | vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm)); | 1174 | vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm)); |
1175 | 1175 | ||
1176 | vcpu->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT); | 1176 | vcpu->arch.rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT); |
1177 | vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1); | 1177 | vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1); |
1178 | 1178 | ||
1179 | vcpu->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES); | 1179 | vcpu->arch.rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES); |
1180 | vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); | 1180 | vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); |
1181 | 1181 | ||
1182 | flags = vmcs_readl(GUEST_RFLAGS); | 1182 | flags = vmcs_readl(GUEST_RFLAGS); |
1183 | vcpu->rmode.save_iopl = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; | 1183 | vcpu->arch.rmode.save_iopl |
1184 | = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; | ||
1184 | 1185 | ||
1185 | flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; | 1186 | flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; |
1186 | 1187 | ||
@@ -1198,10 +1199,10 @@ static void enter_rmode(struct kvm_vcpu *vcpu) | |||
1198 | vmcs_writel(GUEST_CS_BASE, 0xf0000); | 1199 | vmcs_writel(GUEST_CS_BASE, 0xf0000); |
1199 | vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4); | 1200 | vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4); |
1200 | 1201 | ||
1201 | fix_rmode_seg(VCPU_SREG_ES, &vcpu->rmode.es); | 1202 | fix_rmode_seg(VCPU_SREG_ES, &vcpu->arch.rmode.es); |
1202 | fix_rmode_seg(VCPU_SREG_DS, &vcpu->rmode.ds); | 1203 | fix_rmode_seg(VCPU_SREG_DS, &vcpu->arch.rmode.ds); |
1203 | fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs); | 1204 | fix_rmode_seg(VCPU_SREG_GS, &vcpu->arch.rmode.gs); |
1204 | fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs); | 1205 | fix_rmode_seg(VCPU_SREG_FS, &vcpu->arch.rmode.fs); |
1205 | 1206 | ||
1206 | kvm_mmu_reset_context(vcpu); | 1207 | kvm_mmu_reset_context(vcpu); |
1207 | init_rmode_tss(vcpu->kvm); | 1208 | init_rmode_tss(vcpu->kvm); |
@@ -1222,7 +1223,7 @@ static void enter_lmode(struct kvm_vcpu *vcpu) | |||
1222 | | AR_TYPE_BUSY_64_TSS); | 1223 | | AR_TYPE_BUSY_64_TSS); |
1223 | } | 1224 | } |
1224 | 1225 | ||
1225 | vcpu->shadow_efer |= EFER_LMA; | 1226 | vcpu->arch.shadow_efer |= EFER_LMA; |
1226 | 1227 | ||
1227 | find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME; | 1228 | find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME; |
1228 | vmcs_write32(VM_ENTRY_CONTROLS, | 1229 | vmcs_write32(VM_ENTRY_CONTROLS, |
@@ -1232,7 +1233,7 @@ static void enter_lmode(struct kvm_vcpu *vcpu) | |||
1232 | 1233 | ||
1233 | static void exit_lmode(struct kvm_vcpu *vcpu) | 1234 | static void exit_lmode(struct kvm_vcpu *vcpu) |
1234 | { | 1235 | { |
1235 | vcpu->shadow_efer &= ~EFER_LMA; | 1236 | vcpu->arch.shadow_efer &= ~EFER_LMA; |
1236 | 1237 | ||
1237 | vmcs_write32(VM_ENTRY_CONTROLS, | 1238 | vmcs_write32(VM_ENTRY_CONTROLS, |
1238 | vmcs_read32(VM_ENTRY_CONTROLS) | 1239 | vmcs_read32(VM_ENTRY_CONTROLS) |
@@ -1243,22 +1244,22 @@ static void exit_lmode(struct kvm_vcpu *vcpu) | |||
1243 | 1244 | ||
1244 | static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) | 1245 | static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) |
1245 | { | 1246 | { |
1246 | vcpu->cr4 &= KVM_GUEST_CR4_MASK; | 1247 | vcpu->arch.cr4 &= KVM_GUEST_CR4_MASK; |
1247 | vcpu->cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK; | 1248 | vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK; |
1248 | } | 1249 | } |
1249 | 1250 | ||
1250 | static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | 1251 | static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) |
1251 | { | 1252 | { |
1252 | vmx_fpu_deactivate(vcpu); | 1253 | vmx_fpu_deactivate(vcpu); |
1253 | 1254 | ||
1254 | if (vcpu->rmode.active && (cr0 & X86_CR0_PE)) | 1255 | if (vcpu->arch.rmode.active && (cr0 & X86_CR0_PE)) |
1255 | enter_pmode(vcpu); | 1256 | enter_pmode(vcpu); |
1256 | 1257 | ||
1257 | if (!vcpu->rmode.active && !(cr0 & X86_CR0_PE)) | 1258 | if (!vcpu->arch.rmode.active && !(cr0 & X86_CR0_PE)) |
1258 | enter_rmode(vcpu); | 1259 | enter_rmode(vcpu); |
1259 | 1260 | ||
1260 | #ifdef CONFIG_X86_64 | 1261 | #ifdef CONFIG_X86_64 |
1261 | if (vcpu->shadow_efer & EFER_LME) { | 1262 | if (vcpu->arch.shadow_efer & EFER_LME) { |
1262 | if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) | 1263 | if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) |
1263 | enter_lmode(vcpu); | 1264 | enter_lmode(vcpu); |
1264 | if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) | 1265 | if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) |
@@ -1269,7 +1270,7 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
1269 | vmcs_writel(CR0_READ_SHADOW, cr0); | 1270 | vmcs_writel(CR0_READ_SHADOW, cr0); |
1270 | vmcs_writel(GUEST_CR0, | 1271 | vmcs_writel(GUEST_CR0, |
1271 | (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON); | 1272 | (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON); |
1272 | vcpu->cr0 = cr0; | 1273 | vcpu->arch.cr0 = cr0; |
1273 | 1274 | ||
1274 | if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE)) | 1275 | if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE)) |
1275 | vmx_fpu_activate(vcpu); | 1276 | vmx_fpu_activate(vcpu); |
@@ -1278,16 +1279,16 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
1278 | static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | 1279 | static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) |
1279 | { | 1280 | { |
1280 | vmcs_writel(GUEST_CR3, cr3); | 1281 | vmcs_writel(GUEST_CR3, cr3); |
1281 | if (vcpu->cr0 & X86_CR0_PE) | 1282 | if (vcpu->arch.cr0 & X86_CR0_PE) |
1282 | vmx_fpu_deactivate(vcpu); | 1283 | vmx_fpu_deactivate(vcpu); |
1283 | } | 1284 | } |
1284 | 1285 | ||
1285 | static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | 1286 | static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
1286 | { | 1287 | { |
1287 | vmcs_writel(CR4_READ_SHADOW, cr4); | 1288 | vmcs_writel(CR4_READ_SHADOW, cr4); |
1288 | vmcs_writel(GUEST_CR4, cr4 | (vcpu->rmode.active ? | 1289 | vmcs_writel(GUEST_CR4, cr4 | (vcpu->arch.rmode.active ? |
1289 | KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON)); | 1290 | KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON)); |
1290 | vcpu->cr4 = cr4; | 1291 | vcpu->arch.cr4 = cr4; |
1291 | } | 1292 | } |
1292 | 1293 | ||
1293 | #ifdef CONFIG_X86_64 | 1294 | #ifdef CONFIG_X86_64 |
@@ -1297,7 +1298,7 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) | |||
1297 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 1298 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
1298 | struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); | 1299 | struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); |
1299 | 1300 | ||
1300 | vcpu->shadow_efer = efer; | 1301 | vcpu->arch.shadow_efer = efer; |
1301 | if (efer & EFER_LMA) { | 1302 | if (efer & EFER_LMA) { |
1302 | vmcs_write32(VM_ENTRY_CONTROLS, | 1303 | vmcs_write32(VM_ENTRY_CONTROLS, |
1303 | vmcs_read32(VM_ENTRY_CONTROLS) | | 1304 | vmcs_read32(VM_ENTRY_CONTROLS) | |
@@ -1374,17 +1375,17 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu, | |||
1374 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; | 1375 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; |
1375 | u32 ar; | 1376 | u32 ar; |
1376 | 1377 | ||
1377 | if (vcpu->rmode.active && seg == VCPU_SREG_TR) { | 1378 | if (vcpu->arch.rmode.active && seg == VCPU_SREG_TR) { |
1378 | vcpu->rmode.tr.selector = var->selector; | 1379 | vcpu->arch.rmode.tr.selector = var->selector; |
1379 | vcpu->rmode.tr.base = var->base; | 1380 | vcpu->arch.rmode.tr.base = var->base; |
1380 | vcpu->rmode.tr.limit = var->limit; | 1381 | vcpu->arch.rmode.tr.limit = var->limit; |
1381 | vcpu->rmode.tr.ar = vmx_segment_access_rights(var); | 1382 | vcpu->arch.rmode.tr.ar = vmx_segment_access_rights(var); |
1382 | return; | 1383 | return; |
1383 | } | 1384 | } |
1384 | vmcs_writel(sf->base, var->base); | 1385 | vmcs_writel(sf->base, var->base); |
1385 | vmcs_write32(sf->limit, var->limit); | 1386 | vmcs_write32(sf->limit, var->limit); |
1386 | vmcs_write16(sf->selector, var->selector); | 1387 | vmcs_write16(sf->selector, var->selector); |
1387 | if (vcpu->rmode.active && var->s) { | 1388 | if (vcpu->arch.rmode.active && var->s) { |
1388 | /* | 1389 | /* |
1389 | * Hack real-mode segments into vm86 compatibility. | 1390 | * Hack real-mode segments into vm86 compatibility. |
1390 | */ | 1391 | */ |
@@ -1613,9 +1614,9 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) | |||
1613 | goto out; | 1614 | goto out; |
1614 | } | 1615 | } |
1615 | 1616 | ||
1616 | vmx->vcpu.rmode.active = 0; | 1617 | vmx->vcpu.arch.rmode.active = 0; |
1617 | 1618 | ||
1618 | vmx->vcpu.regs[VCPU_REGS_RDX] = get_rdx_init_val(); | 1619 | vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); |
1619 | set_cr8(&vmx->vcpu, 0); | 1620 | set_cr8(&vmx->vcpu, 0); |
1620 | msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; | 1621 | msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; |
1621 | if (vmx->vcpu.vcpu_id == 0) | 1622 | if (vmx->vcpu.vcpu_id == 0) |
@@ -1632,8 +1633,8 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) | |||
1632 | vmcs_write16(GUEST_CS_SELECTOR, 0xf000); | 1633 | vmcs_write16(GUEST_CS_SELECTOR, 0xf000); |
1633 | vmcs_writel(GUEST_CS_BASE, 0x000f0000); | 1634 | vmcs_writel(GUEST_CS_BASE, 0x000f0000); |
1634 | } else { | 1635 | } else { |
1635 | vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.sipi_vector << 8); | 1636 | vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.arch.sipi_vector << 8); |
1636 | vmcs_writel(GUEST_CS_BASE, vmx->vcpu.sipi_vector << 12); | 1637 | vmcs_writel(GUEST_CS_BASE, vmx->vcpu.arch.sipi_vector << 12); |
1637 | } | 1638 | } |
1638 | vmcs_write32(GUEST_CS_LIMIT, 0xffff); | 1639 | vmcs_write32(GUEST_CS_LIMIT, 0xffff); |
1639 | vmcs_write32(GUEST_CS_AR_BYTES, 0x9b); | 1640 | vmcs_write32(GUEST_CS_AR_BYTES, 0x9b); |
@@ -1691,7 +1692,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) | |||
1691 | vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0); | 1692 | vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0); |
1692 | if (vm_need_tpr_shadow(vmx->vcpu.kvm)) | 1693 | if (vm_need_tpr_shadow(vmx->vcpu.kvm)) |
1693 | vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, | 1694 | vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, |
1694 | page_to_phys(vmx->vcpu.apic->regs_page)); | 1695 | page_to_phys(vmx->vcpu.arch.apic->regs_page)); |
1695 | vmcs_write32(TPR_THRESHOLD, 0); | 1696 | vmcs_write32(TPR_THRESHOLD, 0); |
1696 | } | 1697 | } |
1697 | 1698 | ||
@@ -1699,8 +1700,8 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) | |||
1699 | vmcs_write64(APIC_ACCESS_ADDR, | 1700 | vmcs_write64(APIC_ACCESS_ADDR, |
1700 | page_to_phys(vmx->vcpu.kvm->apic_access_page)); | 1701 | page_to_phys(vmx->vcpu.kvm->apic_access_page)); |
1701 | 1702 | ||
1702 | vmx->vcpu.cr0 = 0x60000010; | 1703 | vmx->vcpu.arch.cr0 = 0x60000010; |
1703 | vmx_set_cr0(&vmx->vcpu, vmx->vcpu.cr0); /* enter rmode */ | 1704 | vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */ |
1704 | vmx_set_cr4(&vmx->vcpu, 0); | 1705 | vmx_set_cr4(&vmx->vcpu, 0); |
1705 | #ifdef CONFIG_X86_64 | 1706 | #ifdef CONFIG_X86_64 |
1706 | vmx_set_efer(&vmx->vcpu, 0); | 1707 | vmx_set_efer(&vmx->vcpu, 0); |
@@ -1718,7 +1719,7 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq) | |||
1718 | { | 1719 | { |
1719 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 1720 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
1720 | 1721 | ||
1721 | if (vcpu->rmode.active) { | 1722 | if (vcpu->arch.rmode.active) { |
1722 | vmx->rmode.irq.pending = true; | 1723 | vmx->rmode.irq.pending = true; |
1723 | vmx->rmode.irq.vector = irq; | 1724 | vmx->rmode.irq.vector = irq; |
1724 | vmx->rmode.irq.rip = vmcs_readl(GUEST_RIP); | 1725 | vmx->rmode.irq.rip = vmcs_readl(GUEST_RIP); |
@@ -1734,13 +1735,13 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq) | |||
1734 | 1735 | ||
1735 | static void kvm_do_inject_irq(struct kvm_vcpu *vcpu) | 1736 | static void kvm_do_inject_irq(struct kvm_vcpu *vcpu) |
1736 | { | 1737 | { |
1737 | int word_index = __ffs(vcpu->irq_summary); | 1738 | int word_index = __ffs(vcpu->arch.irq_summary); |
1738 | int bit_index = __ffs(vcpu->irq_pending[word_index]); | 1739 | int bit_index = __ffs(vcpu->arch.irq_pending[word_index]); |
1739 | int irq = word_index * BITS_PER_LONG + bit_index; | 1740 | int irq = word_index * BITS_PER_LONG + bit_index; |
1740 | 1741 | ||
1741 | clear_bit(bit_index, &vcpu->irq_pending[word_index]); | 1742 | clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]); |
1742 | if (!vcpu->irq_pending[word_index]) | 1743 | if (!vcpu->arch.irq_pending[word_index]) |
1743 | clear_bit(word_index, &vcpu->irq_summary); | 1744 | clear_bit(word_index, &vcpu->arch.irq_summary); |
1744 | vmx_inject_irq(vcpu, irq); | 1745 | vmx_inject_irq(vcpu, irq); |
1745 | } | 1746 | } |
1746 | 1747 | ||
@@ -1750,12 +1751,12 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu, | |||
1750 | { | 1751 | { |
1751 | u32 cpu_based_vm_exec_control; | 1752 | u32 cpu_based_vm_exec_control; |
1752 | 1753 | ||
1753 | vcpu->interrupt_window_open = | 1754 | vcpu->arch.interrupt_window_open = |
1754 | ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && | 1755 | ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && |
1755 | (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0); | 1756 | (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0); |
1756 | 1757 | ||
1757 | if (vcpu->interrupt_window_open && | 1758 | if (vcpu->arch.interrupt_window_open && |
1758 | vcpu->irq_summary && | 1759 | vcpu->arch.irq_summary && |
1759 | !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK)) | 1760 | !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK)) |
1760 | /* | 1761 | /* |
1761 | * If interrupts enabled, and not blocked by sti or mov ss. Good. | 1762 | * If interrupts enabled, and not blocked by sti or mov ss. Good. |
@@ -1763,8 +1764,8 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu, | |||
1763 | kvm_do_inject_irq(vcpu); | 1764 | kvm_do_inject_irq(vcpu); |
1764 | 1765 | ||
1765 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); | 1766 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); |
1766 | if (!vcpu->interrupt_window_open && | 1767 | if (!vcpu->arch.interrupt_window_open && |
1767 | (vcpu->irq_summary || kvm_run->request_interrupt_window)) | 1768 | (vcpu->arch.irq_summary || kvm_run->request_interrupt_window)) |
1768 | /* | 1769 | /* |
1769 | * Interrupts blocked. Wait for unblock. | 1770 | * Interrupts blocked. Wait for unblock. |
1770 | */ | 1771 | */ |
@@ -1812,7 +1813,7 @@ static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu) | |||
1812 | static int handle_rmode_exception(struct kvm_vcpu *vcpu, | 1813 | static int handle_rmode_exception(struct kvm_vcpu *vcpu, |
1813 | int vec, u32 err_code) | 1814 | int vec, u32 err_code) |
1814 | { | 1815 | { |
1815 | if (!vcpu->rmode.active) | 1816 | if (!vcpu->arch.rmode.active) |
1816 | return 0; | 1817 | return 0; |
1817 | 1818 | ||
1818 | /* | 1819 | /* |
@@ -1843,8 +1844,8 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1843 | 1844 | ||
1844 | if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) { | 1845 | if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) { |
1845 | int irq = vect_info & VECTORING_INFO_VECTOR_MASK; | 1846 | int irq = vect_info & VECTORING_INFO_VECTOR_MASK; |
1846 | set_bit(irq, vcpu->irq_pending); | 1847 | set_bit(irq, vcpu->arch.irq_pending); |
1847 | set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary); | 1848 | set_bit(irq / BITS_PER_LONG, &vcpu->arch.irq_summary); |
1848 | } | 1849 | } |
1849 | 1850 | ||
1850 | if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */ | 1851 | if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */ |
@@ -1871,11 +1872,11 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1871 | return kvm_mmu_page_fault(vcpu, cr2, error_code); | 1872 | return kvm_mmu_page_fault(vcpu, cr2, error_code); |
1872 | } | 1873 | } |
1873 | 1874 | ||
1874 | if (vcpu->rmode.active && | 1875 | if (vcpu->arch.rmode.active && |
1875 | handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK, | 1876 | handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK, |
1876 | error_code)) { | 1877 | error_code)) { |
1877 | if (vcpu->halt_request) { | 1878 | if (vcpu->arch.halt_request) { |
1878 | vcpu->halt_request = 0; | 1879 | vcpu->arch.halt_request = 0; |
1879 | return kvm_emulate_halt(vcpu); | 1880 | return kvm_emulate_halt(vcpu); |
1880 | } | 1881 | } |
1881 | return 1; | 1882 | return 1; |
@@ -1956,22 +1957,22 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1956 | switch (cr) { | 1957 | switch (cr) { |
1957 | case 0: | 1958 | case 0: |
1958 | vcpu_load_rsp_rip(vcpu); | 1959 | vcpu_load_rsp_rip(vcpu); |
1959 | set_cr0(vcpu, vcpu->regs[reg]); | 1960 | set_cr0(vcpu, vcpu->arch.regs[reg]); |
1960 | skip_emulated_instruction(vcpu); | 1961 | skip_emulated_instruction(vcpu); |
1961 | return 1; | 1962 | return 1; |
1962 | case 3: | 1963 | case 3: |
1963 | vcpu_load_rsp_rip(vcpu); | 1964 | vcpu_load_rsp_rip(vcpu); |
1964 | set_cr3(vcpu, vcpu->regs[reg]); | 1965 | set_cr3(vcpu, vcpu->arch.regs[reg]); |
1965 | skip_emulated_instruction(vcpu); | 1966 | skip_emulated_instruction(vcpu); |
1966 | return 1; | 1967 | return 1; |
1967 | case 4: | 1968 | case 4: |
1968 | vcpu_load_rsp_rip(vcpu); | 1969 | vcpu_load_rsp_rip(vcpu); |
1969 | set_cr4(vcpu, vcpu->regs[reg]); | 1970 | set_cr4(vcpu, vcpu->arch.regs[reg]); |
1970 | skip_emulated_instruction(vcpu); | 1971 | skip_emulated_instruction(vcpu); |
1971 | return 1; | 1972 | return 1; |
1972 | case 8: | 1973 | case 8: |
1973 | vcpu_load_rsp_rip(vcpu); | 1974 | vcpu_load_rsp_rip(vcpu); |
1974 | set_cr8(vcpu, vcpu->regs[reg]); | 1975 | set_cr8(vcpu, vcpu->arch.regs[reg]); |
1975 | skip_emulated_instruction(vcpu); | 1976 | skip_emulated_instruction(vcpu); |
1976 | if (irqchip_in_kernel(vcpu->kvm)) | 1977 | if (irqchip_in_kernel(vcpu->kvm)) |
1977 | return 1; | 1978 | return 1; |
@@ -1982,8 +1983,8 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1982 | case 2: /* clts */ | 1983 | case 2: /* clts */ |
1983 | vcpu_load_rsp_rip(vcpu); | 1984 | vcpu_load_rsp_rip(vcpu); |
1984 | vmx_fpu_deactivate(vcpu); | 1985 | vmx_fpu_deactivate(vcpu); |
1985 | vcpu->cr0 &= ~X86_CR0_TS; | 1986 | vcpu->arch.cr0 &= ~X86_CR0_TS; |
1986 | vmcs_writel(CR0_READ_SHADOW, vcpu->cr0); | 1987 | vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); |
1987 | vmx_fpu_activate(vcpu); | 1988 | vmx_fpu_activate(vcpu); |
1988 | skip_emulated_instruction(vcpu); | 1989 | skip_emulated_instruction(vcpu); |
1989 | return 1; | 1990 | return 1; |
@@ -1991,13 +1992,13 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1991 | switch (cr) { | 1992 | switch (cr) { |
1992 | case 3: | 1993 | case 3: |
1993 | vcpu_load_rsp_rip(vcpu); | 1994 | vcpu_load_rsp_rip(vcpu); |
1994 | vcpu->regs[reg] = vcpu->cr3; | 1995 | vcpu->arch.regs[reg] = vcpu->arch.cr3; |
1995 | vcpu_put_rsp_rip(vcpu); | 1996 | vcpu_put_rsp_rip(vcpu); |
1996 | skip_emulated_instruction(vcpu); | 1997 | skip_emulated_instruction(vcpu); |
1997 | return 1; | 1998 | return 1; |
1998 | case 8: | 1999 | case 8: |
1999 | vcpu_load_rsp_rip(vcpu); | 2000 | vcpu_load_rsp_rip(vcpu); |
2000 | vcpu->regs[reg] = get_cr8(vcpu); | 2001 | vcpu->arch.regs[reg] = get_cr8(vcpu); |
2001 | vcpu_put_rsp_rip(vcpu); | 2002 | vcpu_put_rsp_rip(vcpu); |
2002 | skip_emulated_instruction(vcpu); | 2003 | skip_emulated_instruction(vcpu); |
2003 | return 1; | 2004 | return 1; |
@@ -2043,7 +2044,7 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2043 | default: | 2044 | default: |
2044 | val = 0; | 2045 | val = 0; |
2045 | } | 2046 | } |
2046 | vcpu->regs[reg] = val; | 2047 | vcpu->arch.regs[reg] = val; |
2047 | } else { | 2048 | } else { |
2048 | /* mov to dr */ | 2049 | /* mov to dr */ |
2049 | } | 2050 | } |
@@ -2060,7 +2061,7 @@ static int handle_cpuid(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2060 | 2061 | ||
2061 | static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2062 | static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
2062 | { | 2063 | { |
2063 | u32 ecx = vcpu->regs[VCPU_REGS_RCX]; | 2064 | u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; |
2064 | u64 data; | 2065 | u64 data; |
2065 | 2066 | ||
2066 | if (vmx_get_msr(vcpu, ecx, &data)) { | 2067 | if (vmx_get_msr(vcpu, ecx, &data)) { |
@@ -2069,17 +2070,17 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2069 | } | 2070 | } |
2070 | 2071 | ||
2071 | /* FIXME: handling of bits 32:63 of rax, rdx */ | 2072 | /* FIXME: handling of bits 32:63 of rax, rdx */ |
2072 | vcpu->regs[VCPU_REGS_RAX] = data & -1u; | 2073 | vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u; |
2073 | vcpu->regs[VCPU_REGS_RDX] = (data >> 32) & -1u; | 2074 | vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u; |
2074 | skip_emulated_instruction(vcpu); | 2075 | skip_emulated_instruction(vcpu); |
2075 | return 1; | 2076 | return 1; |
2076 | } | 2077 | } |
2077 | 2078 | ||
2078 | static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2079 | static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
2079 | { | 2080 | { |
2080 | u32 ecx = vcpu->regs[VCPU_REGS_RCX]; | 2081 | u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; |
2081 | u64 data = (vcpu->regs[VCPU_REGS_RAX] & -1u) | 2082 | u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) |
2082 | | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32); | 2083 | | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); |
2083 | 2084 | ||
2084 | if (vmx_set_msr(vcpu, ecx, data) != 0) { | 2085 | if (vmx_set_msr(vcpu, ecx, data) != 0) { |
2085 | kvm_inject_gp(vcpu, 0); | 2086 | kvm_inject_gp(vcpu, 0); |
@@ -2110,7 +2111,7 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu, | |||
2110 | * possible | 2111 | * possible |
2111 | */ | 2112 | */ |
2112 | if (kvm_run->request_interrupt_window && | 2113 | if (kvm_run->request_interrupt_window && |
2113 | !vcpu->irq_summary) { | 2114 | !vcpu->arch.irq_summary) { |
2114 | kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; | 2115 | kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; |
2115 | ++vcpu->stat.irq_window_exits; | 2116 | ++vcpu->stat.irq_window_exits; |
2116 | return 0; | 2117 | return 0; |
@@ -2270,7 +2271,7 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu) | |||
2270 | if (unlikely(idtv_info_field & INTR_INFO_VALID_MASK)) { | 2271 | if (unlikely(idtv_info_field & INTR_INFO_VALID_MASK)) { |
2271 | if ((idtv_info_field & VECTORING_INFO_TYPE_MASK) | 2272 | if ((idtv_info_field & VECTORING_INFO_TYPE_MASK) |
2272 | == INTR_TYPE_EXT_INTR | 2273 | == INTR_TYPE_EXT_INTR |
2273 | && vcpu->rmode.active) { | 2274 | && vcpu->arch.rmode.active) { |
2274 | u8 vect = idtv_info_field & VECTORING_INFO_VECTOR_MASK; | 2275 | u8 vect = idtv_info_field & VECTORING_INFO_VECTOR_MASK; |
2275 | 2276 | ||
2276 | vmx_inject_irq(vcpu, vect); | 2277 | vmx_inject_irq(vcpu, vect); |
@@ -2424,24 +2425,24 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2424 | : : "c"(vmx), "d"((unsigned long)HOST_RSP), | 2425 | : : "c"(vmx), "d"((unsigned long)HOST_RSP), |
2425 | [launched]"i"(offsetof(struct vcpu_vmx, launched)), | 2426 | [launched]"i"(offsetof(struct vcpu_vmx, launched)), |
2426 | [fail]"i"(offsetof(struct vcpu_vmx, fail)), | 2427 | [fail]"i"(offsetof(struct vcpu_vmx, fail)), |
2427 | [rax]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_RAX])), | 2428 | [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])), |
2428 | [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_RBX])), | 2429 | [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])), |
2429 | [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_RCX])), | 2430 | [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])), |
2430 | [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_RDX])), | 2431 | [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])), |
2431 | [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_RSI])), | 2432 | [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])), |
2432 | [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_RDI])), | 2433 | [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])), |
2433 | [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_RBP])), | 2434 | [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])), |
2434 | #ifdef CONFIG_X86_64 | 2435 | #ifdef CONFIG_X86_64 |
2435 | [r8]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R8])), | 2436 | [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])), |
2436 | [r9]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R9])), | 2437 | [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])), |
2437 | [r10]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R10])), | 2438 | [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])), |
2438 | [r11]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R11])), | 2439 | [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])), |
2439 | [r12]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R12])), | 2440 | [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])), |
2440 | [r13]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R13])), | 2441 | [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])), |
2441 | [r14]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R14])), | 2442 | [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])), |
2442 | [r15]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R15])), | 2443 | [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])), |
2443 | #endif | 2444 | #endif |
2444 | [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.cr2)) | 2445 | [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)) |
2445 | : "cc", "memory" | 2446 | : "cc", "memory" |
2446 | #ifdef CONFIG_X86_64 | 2447 | #ifdef CONFIG_X86_64 |
2447 | , "rbx", "rdi", "rsi" | 2448 | , "rbx", "rdi", "rsi" |
@@ -2455,7 +2456,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2455 | if (vmx->rmode.irq.pending) | 2456 | if (vmx->rmode.irq.pending) |
2456 | fixup_rmode_irq(vmx); | 2457 | fixup_rmode_irq(vmx); |
2457 | 2458 | ||
2458 | vcpu->interrupt_window_open = | 2459 | vcpu->arch.interrupt_window_open = |
2459 | (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0; | 2460 | (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0; |
2460 | 2461 | ||
2461 | asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); | 2462 | asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); |