aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2007-07-30 02:31:43 -0400
committerAvi Kivity <avi@qumranet.com>2007-10-13 04:18:21 -0400
commit8b9cf98cc7ea7354d6d4cbc4ffdb18a26a1129d3 (patch)
treee9bb48a799538c24c2ccd6b5023446d18076c881 /drivers
parent9bd01506ee551689b90ba5822c28ef55207146af (diff)
KVM: VMX: pass vcpu_vmx internally
container_of is wonderful, but not casting at all is better. This patch changes vmx.c's internal functions to pass "struct vcpu_vmx" instead of "struct kvm_vcpu" and using container_of. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/kvm/vmx.c140
1 files changed, 65 insertions, 75 deletions
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 5b77d9b7b1ac..cc7ee3d484fb 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -140,9 +140,8 @@ static inline u64 msr_efer_save_restore_bits(struct kvm_msr_entry msr)
140 return (u64)msr.data & EFER_SAVE_RESTORE_BITS; 140 return (u64)msr.data & EFER_SAVE_RESTORE_BITS;
141} 141}
142 142
143static inline int msr_efer_need_save_restore(struct kvm_vcpu *vcpu) 143static inline int msr_efer_need_save_restore(struct vcpu_vmx *vmx)
144{ 144{
145 struct vcpu_vmx *vmx = to_vmx(vcpu);
146 int efer_offset = vmx->msr_offset_efer; 145 int efer_offset = vmx->msr_offset_efer;
147 return msr_efer_save_restore_bits(vmx->host_msrs[efer_offset]) != 146 return msr_efer_save_restore_bits(vmx->host_msrs[efer_offset]) !=
148 msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]); 147 msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
@@ -168,9 +167,8 @@ static inline int is_external_interrupt(u32 intr_info)
168 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); 167 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
169} 168}
170 169
171static int __find_msr_index(struct kvm_vcpu *vcpu, u32 msr) 170static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
172{ 171{
173 struct vcpu_vmx *vmx = to_vmx(vcpu);
174 int i; 172 int i;
175 173
176 for (i = 0; i < vmx->nmsrs; ++i) 174 for (i = 0; i < vmx->nmsrs; ++i)
@@ -179,12 +177,11 @@ static int __find_msr_index(struct kvm_vcpu *vcpu, u32 msr)
179 return -1; 177 return -1;
180} 178}
181 179
182static struct kvm_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr) 180static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
183{ 181{
184 struct vcpu_vmx *vmx = to_vmx(vcpu);
185 int i; 182 int i;
186 183
187 i = __find_msr_index(vcpu, msr); 184 i = __find_msr_index(vmx, msr);
188 if (i >= 0) 185 if (i >= 0)
189 return &vmx->guest_msrs[i]; 186 return &vmx->guest_msrs[i];
190 return NULL; 187 return NULL;
@@ -205,24 +202,24 @@ static void vmcs_clear(struct vmcs *vmcs)
205 202
206static void __vcpu_clear(void *arg) 203static void __vcpu_clear(void *arg)
207{ 204{
208 struct kvm_vcpu *vcpu = arg; 205 struct vcpu_vmx *vmx = arg;
209 struct vcpu_vmx *vmx = to_vmx(vcpu);
210 int cpu = raw_smp_processor_id(); 206 int cpu = raw_smp_processor_id();
211 207
212 if (vcpu->cpu == cpu) 208 if (vmx->vcpu.cpu == cpu)
213 vmcs_clear(vmx->vmcs); 209 vmcs_clear(vmx->vmcs);
214 if (per_cpu(current_vmcs, cpu) == vmx->vmcs) 210 if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
215 per_cpu(current_vmcs, cpu) = NULL; 211 per_cpu(current_vmcs, cpu) = NULL;
216 rdtscll(vcpu->host_tsc); 212 rdtscll(vmx->vcpu.host_tsc);
217} 213}
218 214
219static void vcpu_clear(struct kvm_vcpu *vcpu) 215static void vcpu_clear(struct vcpu_vmx *vmx)
220{ 216{
221 if (vcpu->cpu != raw_smp_processor_id() && vcpu->cpu != -1) 217 if (vmx->vcpu.cpu != raw_smp_processor_id() && vmx->vcpu.cpu != -1)
222 smp_call_function_single(vcpu->cpu, __vcpu_clear, vcpu, 0, 1); 218 smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear,
219 vmx, 0, 1);
223 else 220 else
224 __vcpu_clear(vcpu); 221 __vcpu_clear(vmx);
225 to_vmx(vcpu)->launched = 0; 222 vmx->launched = 0;
226} 223}
227 224
228static unsigned long vmcs_readl(unsigned long field) 225static unsigned long vmcs_readl(unsigned long field)
@@ -332,23 +329,20 @@ static void reload_tss(void)
332#endif 329#endif
333} 330}
334 331
335static void load_transition_efer(struct kvm_vcpu *vcpu) 332static void load_transition_efer(struct vcpu_vmx *vmx)
336{ 333{
337 u64 trans_efer; 334 u64 trans_efer;
338 struct vcpu_vmx *vmx = to_vmx(vcpu);
339 int efer_offset = vmx->msr_offset_efer; 335 int efer_offset = vmx->msr_offset_efer;
340 336
341 trans_efer = vmx->host_msrs[efer_offset].data; 337 trans_efer = vmx->host_msrs[efer_offset].data;
342 trans_efer &= ~EFER_SAVE_RESTORE_BITS; 338 trans_efer &= ~EFER_SAVE_RESTORE_BITS;
343 trans_efer |= msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]); 339 trans_efer |= msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
344 wrmsrl(MSR_EFER, trans_efer); 340 wrmsrl(MSR_EFER, trans_efer);
345 vcpu->stat.efer_reload++; 341 vmx->vcpu.stat.efer_reload++;
346} 342}
347 343
348static void vmx_save_host_state(struct kvm_vcpu *vcpu) 344static void vmx_save_host_state(struct vcpu_vmx *vmx)
349{ 345{
350 struct vcpu_vmx *vmx = to_vmx(vcpu);
351
352 if (vmx->host_state.loaded) 346 if (vmx->host_state.loaded)
353 return; 347 return;
354 348
@@ -383,19 +377,18 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
383#endif 377#endif
384 378
385#ifdef CONFIG_X86_64 379#ifdef CONFIG_X86_64
386 if (is_long_mode(vcpu)) { 380 if (is_long_mode(&vmx->vcpu)) {
387 save_msrs(vmx->host_msrs + 381 save_msrs(vmx->host_msrs +
388 vmx->msr_offset_kernel_gs_base, 1); 382 vmx->msr_offset_kernel_gs_base, 1);
389 } 383 }
390#endif 384#endif
391 load_msrs(vmx->guest_msrs, vmx->save_nmsrs); 385 load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
392 if (msr_efer_need_save_restore(vcpu)) 386 if (msr_efer_need_save_restore(vmx))
393 load_transition_efer(vcpu); 387 load_transition_efer(vmx);
394} 388}
395 389
396static void vmx_load_host_state(struct kvm_vcpu *vcpu) 390static void vmx_load_host_state(struct vcpu_vmx *vmx)
397{ 391{
398 struct vcpu_vmx *vmx = to_vmx(vcpu);
399 unsigned long flags; 392 unsigned long flags;
400 393
401 if (!vmx->host_state.loaded) 394 if (!vmx->host_state.loaded)
@@ -420,7 +413,7 @@ static void vmx_load_host_state(struct kvm_vcpu *vcpu)
420 } 413 }
421 save_msrs(vmx->guest_msrs, vmx->save_nmsrs); 414 save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
422 load_msrs(vmx->host_msrs, vmx->save_nmsrs); 415 load_msrs(vmx->host_msrs, vmx->save_nmsrs);
423 if (msr_efer_need_save_restore(vcpu)) 416 if (msr_efer_need_save_restore(vmx))
424 load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1); 417 load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
425} 418}
426 419
@@ -435,7 +428,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
435 u64 tsc_this, delta; 428 u64 tsc_this, delta;
436 429
437 if (vcpu->cpu != cpu) 430 if (vcpu->cpu != cpu)
438 vcpu_clear(vcpu); 431 vcpu_clear(vmx);
439 432
440 if (per_cpu(current_vmcs, cpu) != vmx->vmcs) { 433 if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
441 u8 error; 434 u8 error;
@@ -476,7 +469,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
476 469
477static void vmx_vcpu_put(struct kvm_vcpu *vcpu) 470static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
478{ 471{
479 vmx_load_host_state(vcpu); 472 vmx_load_host_state(to_vmx(vcpu));
480 kvm_put_guest_fpu(vcpu); 473 kvm_put_guest_fpu(vcpu);
481} 474}
482 475
@@ -502,7 +495,7 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
502 495
503static void vmx_vcpu_decache(struct kvm_vcpu *vcpu) 496static void vmx_vcpu_decache(struct kvm_vcpu *vcpu)
504{ 497{
505 vcpu_clear(vcpu); 498 vcpu_clear(to_vmx(vcpu));
506} 499}
507 500
508static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) 501static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
@@ -550,9 +543,8 @@ static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
550/* 543/*
551 * Swap MSR entry in host/guest MSR entry array. 544 * Swap MSR entry in host/guest MSR entry array.
552 */ 545 */
553void move_msr_up(struct kvm_vcpu *vcpu, int from, int to) 546static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
554{ 547{
555 struct vcpu_vmx *vmx = to_vmx(vcpu);
556 struct kvm_msr_entry tmp; 548 struct kvm_msr_entry tmp;
557 549
558 tmp = vmx->guest_msrs[to]; 550 tmp = vmx->guest_msrs[to];
@@ -568,44 +560,43 @@ void move_msr_up(struct kvm_vcpu *vcpu, int from, int to)
568 * msrs. Don't touch the 64-bit msrs if the guest is in legacy 560 * msrs. Don't touch the 64-bit msrs if the guest is in legacy
569 * mode, as fiddling with msrs is very expensive. 561 * mode, as fiddling with msrs is very expensive.
570 */ 562 */
571static void setup_msrs(struct kvm_vcpu *vcpu) 563static void setup_msrs(struct vcpu_vmx *vmx)
572{ 564{
573 struct vcpu_vmx *vmx = to_vmx(vcpu);
574 int save_nmsrs; 565 int save_nmsrs;
575 566
576 save_nmsrs = 0; 567 save_nmsrs = 0;
577#ifdef CONFIG_X86_64 568#ifdef CONFIG_X86_64
578 if (is_long_mode(vcpu)) { 569 if (is_long_mode(&vmx->vcpu)) {
579 int index; 570 int index;
580 571
581 index = __find_msr_index(vcpu, MSR_SYSCALL_MASK); 572 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
582 if (index >= 0) 573 if (index >= 0)
583 move_msr_up(vcpu, index, save_nmsrs++); 574 move_msr_up(vmx, index, save_nmsrs++);
584 index = __find_msr_index(vcpu, MSR_LSTAR); 575 index = __find_msr_index(vmx, MSR_LSTAR);
585 if (index >= 0) 576 if (index >= 0)
586 move_msr_up(vcpu, index, save_nmsrs++); 577 move_msr_up(vmx, index, save_nmsrs++);
587 index = __find_msr_index(vcpu, MSR_CSTAR); 578 index = __find_msr_index(vmx, MSR_CSTAR);
588 if (index >= 0) 579 if (index >= 0)
589 move_msr_up(vcpu, index, save_nmsrs++); 580 move_msr_up(vmx, index, save_nmsrs++);
590 index = __find_msr_index(vcpu, MSR_KERNEL_GS_BASE); 581 index = __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
591 if (index >= 0) 582 if (index >= 0)
592 move_msr_up(vcpu, index, save_nmsrs++); 583 move_msr_up(vmx, index, save_nmsrs++);
593 /* 584 /*
594 * MSR_K6_STAR is only needed on long mode guests, and only 585 * MSR_K6_STAR is only needed on long mode guests, and only
595 * if efer.sce is enabled. 586 * if efer.sce is enabled.
596 */ 587 */
597 index = __find_msr_index(vcpu, MSR_K6_STAR); 588 index = __find_msr_index(vmx, MSR_K6_STAR);
598 if ((index >= 0) && (vcpu->shadow_efer & EFER_SCE)) 589 if ((index >= 0) && (vmx->vcpu.shadow_efer & EFER_SCE))
599 move_msr_up(vcpu, index, save_nmsrs++); 590 move_msr_up(vmx, index, save_nmsrs++);
600 } 591 }
601#endif 592#endif
602 vmx->save_nmsrs = save_nmsrs; 593 vmx->save_nmsrs = save_nmsrs;
603 594
604#ifdef CONFIG_X86_64 595#ifdef CONFIG_X86_64
605 vmx->msr_offset_kernel_gs_base = 596 vmx->msr_offset_kernel_gs_base =
606 __find_msr_index(vcpu, MSR_KERNEL_GS_BASE); 597 __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
607#endif 598#endif
608 vmx->msr_offset_efer = __find_msr_index(vcpu, MSR_EFER); 599 vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER);
609} 600}
610 601
611/* 602/*
@@ -672,7 +663,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
672 data = vmcs_readl(GUEST_SYSENTER_ESP); 663 data = vmcs_readl(GUEST_SYSENTER_ESP);
673 break; 664 break;
674 default: 665 default:
675 msr = find_msr_entry(vcpu, msr_index); 666 msr = find_msr_entry(to_vmx(vcpu), msr_index);
676 if (msr) { 667 if (msr) {
677 data = msr->data; 668 data = msr->data;
678 break; 669 break;
@@ -700,7 +691,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
700 case MSR_EFER: 691 case MSR_EFER:
701 ret = kvm_set_msr_common(vcpu, msr_index, data); 692 ret = kvm_set_msr_common(vcpu, msr_index, data);
702 if (vmx->host_state.loaded) 693 if (vmx->host_state.loaded)
703 load_transition_efer(vcpu); 694 load_transition_efer(vmx);
704 break; 695 break;
705 case MSR_FS_BASE: 696 case MSR_FS_BASE:
706 vmcs_writel(GUEST_FS_BASE, data); 697 vmcs_writel(GUEST_FS_BASE, data);
@@ -722,7 +713,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
722 guest_write_tsc(data); 713 guest_write_tsc(data);
723 break; 714 break;
724 default: 715 default:
725 msr = find_msr_entry(vcpu, msr_index); 716 msr = find_msr_entry(vmx, msr_index);
726 if (msr) { 717 if (msr) {
727 msr->data = data; 718 msr->data = data;
728 if (vmx->host_state.loaded) 719 if (vmx->host_state.loaded)
@@ -1116,7 +1107,7 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
1116 1107
1117 vcpu->shadow_efer |= EFER_LMA; 1108 vcpu->shadow_efer |= EFER_LMA;
1118 1109
1119 find_msr_entry(vcpu, MSR_EFER)->data |= EFER_LMA | EFER_LME; 1110 find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME;
1120 vmcs_write32(VM_ENTRY_CONTROLS, 1111 vmcs_write32(VM_ENTRY_CONTROLS,
1121 vmcs_read32(VM_ENTRY_CONTROLS) 1112 vmcs_read32(VM_ENTRY_CONTROLS)
1122 | VM_ENTRY_CONTROLS_IA32E_MASK); 1113 | VM_ENTRY_CONTROLS_IA32E_MASK);
@@ -1186,7 +1177,8 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1186 1177
1187static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) 1178static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1188{ 1179{
1189 struct kvm_msr_entry *msr = find_msr_entry(vcpu, MSR_EFER); 1180 struct vcpu_vmx *vmx = to_vmx(vcpu);
1181 struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
1190 1182
1191 vcpu->shadow_efer = efer; 1183 vcpu->shadow_efer = efer;
1192 if (efer & EFER_LMA) { 1184 if (efer & EFER_LMA) {
@@ -1202,7 +1194,7 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1202 1194
1203 msr->data = efer & ~EFER_LME; 1195 msr->data = efer & ~EFER_LME;
1204 } 1196 }
1205 setup_msrs(vcpu); 1197 setup_msrs(vmx);
1206} 1198}
1207 1199
1208#endif 1200#endif
@@ -1364,9 +1356,8 @@ static void seg_setup(int seg)
1364/* 1356/*
1365 * Sets up the vmcs for emulated real mode. 1357 * Sets up the vmcs for emulated real mode.
1366 */ 1358 */
1367static int vmx_vcpu_setup(struct kvm_vcpu *vcpu) 1359static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
1368{ 1360{
1369 struct vcpu_vmx *vmx = to_vmx(vcpu);
1370 u32 host_sysenter_cs; 1361 u32 host_sysenter_cs;
1371 u32 junk; 1362 u32 junk;
1372 unsigned long a; 1363 unsigned long a;
@@ -1375,19 +1366,18 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1375 int ret = 0; 1366 int ret = 0;
1376 unsigned long kvm_vmx_return; 1367 unsigned long kvm_vmx_return;
1377 1368
1378 if (!init_rmode_tss(vcpu->kvm)) { 1369 if (!init_rmode_tss(vmx->vcpu.kvm)) {
1379 ret = -ENOMEM; 1370 ret = -ENOMEM;
1380 goto out; 1371 goto out;
1381 } 1372 }
1382 1373
1383 memset(vcpu->regs, 0, sizeof(vcpu->regs)); 1374 vmx->vcpu.regs[VCPU_REGS_RDX] = get_rdx_init_val();
1384 vcpu->regs[VCPU_REGS_RDX] = get_rdx_init_val(); 1375 vmx->vcpu.cr8 = 0;
1385 vcpu->cr8 = 0; 1376 vmx->vcpu.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
1386 vcpu->apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; 1377 if (vmx->vcpu.vcpu_id == 0)
1387 if (vcpu->vcpu_id == 0) 1378 vmx->vcpu.apic_base |= MSR_IA32_APICBASE_BSP;
1388 vcpu->apic_base |= MSR_IA32_APICBASE_BSP;
1389 1379
1390 fx_init(vcpu); 1380 fx_init(&vmx->vcpu);
1391 1381
1392 /* 1382 /*
1393 * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode 1383 * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
@@ -1512,7 +1502,7 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1512 ++vmx->nmsrs; 1502 ++vmx->nmsrs;
1513 } 1503 }
1514 1504
1515 setup_msrs(vcpu); 1505 setup_msrs(vmx);
1516 1506
1517 vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl); 1507 vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
1518 1508
@@ -1529,14 +1519,14 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1529 vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); 1519 vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
1530 vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK); 1520 vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
1531 1521
1532 vcpu->cr0 = 0x60000010; 1522 vmx->vcpu.cr0 = 0x60000010;
1533 vmx_set_cr0(vcpu, vcpu->cr0); // enter rmode 1523 vmx_set_cr0(&vmx->vcpu, vmx->vcpu.cr0); // enter rmode
1534 vmx_set_cr4(vcpu, 0); 1524 vmx_set_cr4(&vmx->vcpu, 0);
1535#ifdef CONFIG_X86_64 1525#ifdef CONFIG_X86_64
1536 vmx_set_efer(vcpu, 0); 1526 vmx_set_efer(&vmx->vcpu, 0);
1537#endif 1527#endif
1538 vmx_fpu_activate(vcpu); 1528 vmx_fpu_activate(&vmx->vcpu);
1539 update_exception_bitmap(vcpu); 1529 update_exception_bitmap(&vmx->vcpu);
1540 1530
1541 return 0; 1531 return 0;
1542 1532
@@ -2129,7 +2119,7 @@ again:
2129 if (!vcpu->mmio_read_completed) 2119 if (!vcpu->mmio_read_completed)
2130 do_interrupt_requests(vcpu, kvm_run); 2120 do_interrupt_requests(vcpu, kvm_run);
2131 2121
2132 vmx_save_host_state(vcpu); 2122 vmx_save_host_state(vmx);
2133 kvm_load_guest_fpu(vcpu); 2123 kvm_load_guest_fpu(vcpu);
2134 2124
2135 /* 2125 /*
@@ -2352,7 +2342,7 @@ static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
2352 struct vcpu_vmx *vmx = to_vmx(vcpu); 2342 struct vcpu_vmx *vmx = to_vmx(vcpu);
2353 2343
2354 if (vmx->vmcs) { 2344 if (vmx->vmcs) {
2355 on_each_cpu(__vcpu_clear, vcpu, 0, 1); 2345 on_each_cpu(__vcpu_clear, vmx, 0, 1);
2356 free_vmcs(vmx->vmcs); 2346 free_vmcs(vmx->vmcs);
2357 vmx->vmcs = NULL; 2347 vmx->vmcs = NULL;
2358 } 2348 }
@@ -2400,7 +2390,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
2400 2390
2401 cpu = get_cpu(); 2391 cpu = get_cpu();
2402 vmx_vcpu_load(&vmx->vcpu, cpu); 2392 vmx_vcpu_load(&vmx->vcpu, cpu);
2403 err = vmx_vcpu_setup(&vmx->vcpu); 2393 err = vmx_vcpu_setup(vmx);
2404 vmx_vcpu_put(&vmx->vcpu); 2394 vmx_vcpu_put(&vmx->vcpu);
2405 put_cpu(); 2395 put_cpu();
2406 if (err) 2396 if (err)