aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/vmx.c
diff options
context:
space:
mode:
authorGregory Haskins <ghaskins@novell.com>2007-07-27 08:13:10 -0400
committerAvi Kivity <avi@qumranet.com>2007-10-13 04:18:20 -0400
commita2fa3e9f52d875f7d4ca98434603b8756be71ba8 (patch)
tree915c13bfedc867d4d2e4b98c4d3b10b6ef25d451 /drivers/kvm/vmx.c
parentc820c2aa27bb5b6069aa708b0a0b44b59a16bfa7 (diff)
KVM: Remove arch specific components from the general code
struct kvm_vcpu has vmx-specific members; remove them to a private structure. Signed-off-by: Gregory Haskins <ghaskins@novell.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/vmx.c')
-rw-r--r--drivers/kvm/vmx.c249
1 files changed, 158 insertions, 91 deletions
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index dac2f93d1a07..96837d6ed50b 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -32,6 +32,37 @@
32MODULE_AUTHOR("Qumranet"); 32MODULE_AUTHOR("Qumranet");
33MODULE_LICENSE("GPL"); 33MODULE_LICENSE("GPL");
34 34
35struct vmcs {
36 u32 revision_id;
37 u32 abort;
38 char data[0];
39};
40
41struct vcpu_vmx {
42 struct kvm_vcpu *vcpu;
43 int launched;
44 struct kvm_msr_entry *guest_msrs;
45 struct kvm_msr_entry *host_msrs;
46 int nmsrs;
47 int save_nmsrs;
48 int msr_offset_efer;
49#ifdef CONFIG_X86_64
50 int msr_offset_kernel_gs_base;
51#endif
52 struct vmcs *vmcs;
53 struct {
54 int loaded;
55 u16 fs_sel, gs_sel, ldt_sel;
56 int fs_gs_ldt_reload_needed;
57 }host_state;
58
59};
60
61static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
62{
63 return (struct vcpu_vmx*)vcpu->_priv;
64}
65
35static int init_rmode_tss(struct kvm *kvm); 66static int init_rmode_tss(struct kvm *kvm);
36 67
37static DEFINE_PER_CPU(struct vmcs *, vmxarea); 68static DEFINE_PER_CPU(struct vmcs *, vmxarea);
@@ -89,16 +120,33 @@ static const u32 vmx_msr_index[] = {
89}; 120};
90#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index) 121#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
91 122
92static inline u64 msr_efer_save_restore_bits(struct vmx_msr_entry msr) 123static void load_msrs(struct kvm_msr_entry *e, int n)
124{
125 int i;
126
127 for (i = 0; i < n; ++i)
128 wrmsrl(e[i].index, e[i].data);
129}
130
131static void save_msrs(struct kvm_msr_entry *e, int n)
132{
133 int i;
134
135 for (i = 0; i < n; ++i)
136 rdmsrl(e[i].index, e[i].data);
137}
138
139static inline u64 msr_efer_save_restore_bits(struct kvm_msr_entry msr)
93{ 140{
94 return (u64)msr.data & EFER_SAVE_RESTORE_BITS; 141 return (u64)msr.data & EFER_SAVE_RESTORE_BITS;
95} 142}
96 143
97static inline int msr_efer_need_save_restore(struct kvm_vcpu *vcpu) 144static inline int msr_efer_need_save_restore(struct kvm_vcpu *vcpu)
98{ 145{
99 int efer_offset = vcpu->msr_offset_efer; 146 struct vcpu_vmx *vmx = to_vmx(vcpu);
100 return msr_efer_save_restore_bits(vcpu->host_msrs[efer_offset]) != 147 int efer_offset = vmx->msr_offset_efer;
101 msr_efer_save_restore_bits(vcpu->guest_msrs[efer_offset]); 148 return msr_efer_save_restore_bits(vmx->host_msrs[efer_offset]) !=
149 msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
102} 150}
103 151
104static inline int is_page_fault(u32 intr_info) 152static inline int is_page_fault(u32 intr_info)
@@ -123,21 +171,23 @@ static inline int is_external_interrupt(u32 intr_info)
123 171
124static int __find_msr_index(struct kvm_vcpu *vcpu, u32 msr) 172static int __find_msr_index(struct kvm_vcpu *vcpu, u32 msr)
125{ 173{
174 struct vcpu_vmx *vmx = to_vmx(vcpu);
126 int i; 175 int i;
127 176
128 for (i = 0; i < vcpu->nmsrs; ++i) 177 for (i = 0; i < vmx->nmsrs; ++i)
129 if (vcpu->guest_msrs[i].index == msr) 178 if (vmx->guest_msrs[i].index == msr)
130 return i; 179 return i;
131 return -1; 180 return -1;
132} 181}
133 182
134static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr) 183static struct kvm_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
135{ 184{
185 struct vcpu_vmx *vmx = to_vmx(vcpu);
136 int i; 186 int i;
137 187
138 i = __find_msr_index(vcpu, msr); 188 i = __find_msr_index(vcpu, msr);
139 if (i >= 0) 189 if (i >= 0)
140 return &vcpu->guest_msrs[i]; 190 return &vmx->guest_msrs[i];
141 return NULL; 191 return NULL;
142} 192}
143 193
@@ -157,11 +207,12 @@ static void vmcs_clear(struct vmcs *vmcs)
157static void __vcpu_clear(void *arg) 207static void __vcpu_clear(void *arg)
158{ 208{
159 struct kvm_vcpu *vcpu = arg; 209 struct kvm_vcpu *vcpu = arg;
210 struct vcpu_vmx *vmx = to_vmx(vcpu);
160 int cpu = raw_smp_processor_id(); 211 int cpu = raw_smp_processor_id();
161 212
162 if (vcpu->cpu == cpu) 213 if (vcpu->cpu == cpu)
163 vmcs_clear(vcpu->vmcs); 214 vmcs_clear(vmx->vmcs);
164 if (per_cpu(current_vmcs, cpu) == vcpu->vmcs) 215 if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
165 per_cpu(current_vmcs, cpu) = NULL; 216 per_cpu(current_vmcs, cpu) = NULL;
166 rdtscll(vcpu->host_tsc); 217 rdtscll(vcpu->host_tsc);
167} 218}
@@ -172,7 +223,7 @@ static void vcpu_clear(struct kvm_vcpu *vcpu)
172 smp_call_function_single(vcpu->cpu, __vcpu_clear, vcpu, 0, 1); 223 smp_call_function_single(vcpu->cpu, __vcpu_clear, vcpu, 0, 1);
173 else 224 else
174 __vcpu_clear(vcpu); 225 __vcpu_clear(vcpu);
175 vcpu->launched = 0; 226 to_vmx(vcpu)->launched = 0;
176} 227}
177 228
178static unsigned long vmcs_readl(unsigned long field) 229static unsigned long vmcs_readl(unsigned long field)
@@ -285,80 +336,81 @@ static void reload_tss(void)
285static void load_transition_efer(struct kvm_vcpu *vcpu) 336static void load_transition_efer(struct kvm_vcpu *vcpu)
286{ 337{
287 u64 trans_efer; 338 u64 trans_efer;
288 int efer_offset = vcpu->msr_offset_efer; 339 struct vcpu_vmx *vmx = to_vmx(vcpu);
340 int efer_offset = vmx->msr_offset_efer;
289 341
290 trans_efer = vcpu->host_msrs[efer_offset].data; 342 trans_efer = vmx->host_msrs[efer_offset].data;
291 trans_efer &= ~EFER_SAVE_RESTORE_BITS; 343 trans_efer &= ~EFER_SAVE_RESTORE_BITS;
292 trans_efer |= msr_efer_save_restore_bits( 344 trans_efer |= msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
293 vcpu->guest_msrs[efer_offset]);
294 wrmsrl(MSR_EFER, trans_efer); 345 wrmsrl(MSR_EFER, trans_efer);
295 vcpu->stat.efer_reload++; 346 vcpu->stat.efer_reload++;
296} 347}
297 348
298static void vmx_save_host_state(struct kvm_vcpu *vcpu) 349static void vmx_save_host_state(struct kvm_vcpu *vcpu)
299{ 350{
300 struct vmx_host_state *hs = &vcpu->vmx_host_state; 351 struct vcpu_vmx *vmx = to_vmx(vcpu);
301 352
302 if (hs->loaded) 353 if (vmx->host_state.loaded)
303 return; 354 return;
304 355
305 hs->loaded = 1; 356 vmx->host_state.loaded = 1;
306 /* 357 /*
307 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not 358 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
308 * allow segment selectors with cpl > 0 or ti == 1. 359 * allow segment selectors with cpl > 0 or ti == 1.
309 */ 360 */
310 hs->ldt_sel = read_ldt(); 361 vmx->host_state.ldt_sel = read_ldt();
311 hs->fs_gs_ldt_reload_needed = hs->ldt_sel; 362 vmx->host_state.fs_gs_ldt_reload_needed = vmx->host_state.ldt_sel;
312 hs->fs_sel = read_fs(); 363 vmx->host_state.fs_sel = read_fs();
313 if (!(hs->fs_sel & 7)) 364 if (!(vmx->host_state.fs_sel & 7))
314 vmcs_write16(HOST_FS_SELECTOR, hs->fs_sel); 365 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
315 else { 366 else {
316 vmcs_write16(HOST_FS_SELECTOR, 0); 367 vmcs_write16(HOST_FS_SELECTOR, 0);
317 hs->fs_gs_ldt_reload_needed = 1; 368 vmx->host_state.fs_gs_ldt_reload_needed = 1;
318 } 369 }
319 hs->gs_sel = read_gs(); 370 vmx->host_state.gs_sel = read_gs();
320 if (!(hs->gs_sel & 7)) 371 if (!(vmx->host_state.gs_sel & 7))
321 vmcs_write16(HOST_GS_SELECTOR, hs->gs_sel); 372 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
322 else { 373 else {
323 vmcs_write16(HOST_GS_SELECTOR, 0); 374 vmcs_write16(HOST_GS_SELECTOR, 0);
324 hs->fs_gs_ldt_reload_needed = 1; 375 vmx->host_state.fs_gs_ldt_reload_needed = 1;
325 } 376 }
326 377
327#ifdef CONFIG_X86_64 378#ifdef CONFIG_X86_64
328 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE)); 379 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
329 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE)); 380 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
330#else 381#else
331 vmcs_writel(HOST_FS_BASE, segment_base(hs->fs_sel)); 382 vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
332 vmcs_writel(HOST_GS_BASE, segment_base(hs->gs_sel)); 383 vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
333#endif 384#endif
334 385
335#ifdef CONFIG_X86_64 386#ifdef CONFIG_X86_64
336 if (is_long_mode(vcpu)) { 387 if (is_long_mode(vcpu)) {
337 save_msrs(vcpu->host_msrs + vcpu->msr_offset_kernel_gs_base, 1); 388 save_msrs(vmx->host_msrs +
389 vmx->msr_offset_kernel_gs_base, 1);
338 } 390 }
339#endif 391#endif
340 load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs); 392 load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
341 if (msr_efer_need_save_restore(vcpu)) 393 if (msr_efer_need_save_restore(vcpu))
342 load_transition_efer(vcpu); 394 load_transition_efer(vcpu);
343} 395}
344 396
345static void vmx_load_host_state(struct kvm_vcpu *vcpu) 397static void vmx_load_host_state(struct kvm_vcpu *vcpu)
346{ 398{
347 struct vmx_host_state *hs = &vcpu->vmx_host_state; 399 struct vcpu_vmx *vmx = to_vmx(vcpu);
348 400
349 if (!hs->loaded) 401 if (!vmx->host_state.loaded)
350 return; 402 return;
351 403
352 hs->loaded = 0; 404 vmx->host_state.loaded = 0;
353 if (hs->fs_gs_ldt_reload_needed) { 405 if (vmx->host_state.fs_gs_ldt_reload_needed) {
354 load_ldt(hs->ldt_sel); 406 load_ldt(vmx->host_state.ldt_sel);
355 load_fs(hs->fs_sel); 407 load_fs(vmx->host_state.fs_sel);
356 /* 408 /*
357 * If we have to reload gs, we must take care to 409 * If we have to reload gs, we must take care to
358 * preserve our gs base. 410 * preserve our gs base.
359 */ 411 */
360 local_irq_disable(); 412 local_irq_disable();
361 load_gs(hs->gs_sel); 413 load_gs(vmx->host_state.gs_sel);
362#ifdef CONFIG_X86_64 414#ifdef CONFIG_X86_64
363 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); 415 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
364#endif 416#endif
@@ -366,10 +418,10 @@ static void vmx_load_host_state(struct kvm_vcpu *vcpu)
366 418
367 reload_tss(); 419 reload_tss();
368 } 420 }
369 save_msrs(vcpu->guest_msrs, vcpu->save_nmsrs); 421 save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
370 load_msrs(vcpu->host_msrs, vcpu->save_nmsrs); 422 load_msrs(vmx->host_msrs, vmx->save_nmsrs);
371 if (msr_efer_need_save_restore(vcpu)) 423 if (msr_efer_need_save_restore(vcpu))
372 load_msrs(vcpu->host_msrs + vcpu->msr_offset_efer, 1); 424 load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
373} 425}
374 426
375/* 427/*
@@ -378,7 +430,8 @@ static void vmx_load_host_state(struct kvm_vcpu *vcpu)
378 */ 430 */
379static void vmx_vcpu_load(struct kvm_vcpu *vcpu) 431static void vmx_vcpu_load(struct kvm_vcpu *vcpu)
380{ 432{
381 u64 phys_addr = __pa(vcpu->vmcs); 433 struct vcpu_vmx *vmx = to_vmx(vcpu);
434 u64 phys_addr = __pa(vmx->vmcs);
382 int cpu; 435 int cpu;
383 u64 tsc_this, delta; 436 u64 tsc_this, delta;
384 437
@@ -387,16 +440,16 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu)
387 if (vcpu->cpu != cpu) 440 if (vcpu->cpu != cpu)
388 vcpu_clear(vcpu); 441 vcpu_clear(vcpu);
389 442
390 if (per_cpu(current_vmcs, cpu) != vcpu->vmcs) { 443 if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
391 u8 error; 444 u8 error;
392 445
393 per_cpu(current_vmcs, cpu) = vcpu->vmcs; 446 per_cpu(current_vmcs, cpu) = vmx->vmcs;
394 asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0" 447 asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0"
395 : "=g"(error) : "a"(&phys_addr), "m"(phys_addr) 448 : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
396 : "cc"); 449 : "cc");
397 if (error) 450 if (error)
398 printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n", 451 printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
399 vcpu->vmcs, phys_addr); 452 vmx->vmcs, phys_addr);
400 } 453 }
401 454
402 if (vcpu->cpu != cpu) { 455 if (vcpu->cpu != cpu) {
@@ -503,13 +556,15 @@ static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
503 */ 556 */
504void move_msr_up(struct kvm_vcpu *vcpu, int from, int to) 557void move_msr_up(struct kvm_vcpu *vcpu, int from, int to)
505{ 558{
506 struct vmx_msr_entry tmp; 559 struct vcpu_vmx *vmx = to_vmx(vcpu);
507 tmp = vcpu->guest_msrs[to]; 560 struct kvm_msr_entry tmp;
508 vcpu->guest_msrs[to] = vcpu->guest_msrs[from]; 561
509 vcpu->guest_msrs[from] = tmp; 562 tmp = vmx->guest_msrs[to];
510 tmp = vcpu->host_msrs[to]; 563 vmx->guest_msrs[to] = vmx->guest_msrs[from];
511 vcpu->host_msrs[to] = vcpu->host_msrs[from]; 564 vmx->guest_msrs[from] = tmp;
512 vcpu->host_msrs[from] = tmp; 565 tmp = vmx->host_msrs[to];
566 vmx->host_msrs[to] = vmx->host_msrs[from];
567 vmx->host_msrs[from] = tmp;
513} 568}
514 569
515/* 570/*
@@ -519,6 +574,7 @@ void move_msr_up(struct kvm_vcpu *vcpu, int from, int to)
519 */ 574 */
520static void setup_msrs(struct kvm_vcpu *vcpu) 575static void setup_msrs(struct kvm_vcpu *vcpu)
521{ 576{
577 struct vcpu_vmx *vmx = to_vmx(vcpu);
522 int save_nmsrs; 578 int save_nmsrs;
523 579
524 save_nmsrs = 0; 580 save_nmsrs = 0;
@@ -547,13 +603,13 @@ static void setup_msrs(struct kvm_vcpu *vcpu)
547 move_msr_up(vcpu, index, save_nmsrs++); 603 move_msr_up(vcpu, index, save_nmsrs++);
548 } 604 }
549#endif 605#endif
550 vcpu->save_nmsrs = save_nmsrs; 606 vmx->save_nmsrs = save_nmsrs;
551 607
552#ifdef CONFIG_X86_64 608#ifdef CONFIG_X86_64
553 vcpu->msr_offset_kernel_gs_base = 609 vmx->msr_offset_kernel_gs_base =
554 __find_msr_index(vcpu, MSR_KERNEL_GS_BASE); 610 __find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
555#endif 611#endif
556 vcpu->msr_offset_efer = __find_msr_index(vcpu, MSR_EFER); 612 vmx->msr_offset_efer = __find_msr_index(vcpu, MSR_EFER);
557} 613}
558 614
559/* 615/*
@@ -589,7 +645,7 @@ static void guest_write_tsc(u64 guest_tsc)
589static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) 645static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
590{ 646{
591 u64 data; 647 u64 data;
592 struct vmx_msr_entry *msr; 648 struct kvm_msr_entry *msr;
593 649
594 if (!pdata) { 650 if (!pdata) {
595 printk(KERN_ERR "BUG: get_msr called with NULL pdata\n"); 651 printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
@@ -639,14 +695,15 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
639 */ 695 */
640static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) 696static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
641{ 697{
642 struct vmx_msr_entry *msr; 698 struct vcpu_vmx *vmx = to_vmx(vcpu);
699 struct kvm_msr_entry *msr;
643 int ret = 0; 700 int ret = 0;
644 701
645 switch (msr_index) { 702 switch (msr_index) {
646#ifdef CONFIG_X86_64 703#ifdef CONFIG_X86_64
647 case MSR_EFER: 704 case MSR_EFER:
648 ret = kvm_set_msr_common(vcpu, msr_index, data); 705 ret = kvm_set_msr_common(vcpu, msr_index, data);
649 if (vcpu->vmx_host_state.loaded) 706 if (vmx->host_state.loaded)
650 load_transition_efer(vcpu); 707 load_transition_efer(vcpu);
651 break; 708 break;
652 case MSR_FS_BASE: 709 case MSR_FS_BASE:
@@ -672,8 +729,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
672 msr = find_msr_entry(vcpu, msr_index); 729 msr = find_msr_entry(vcpu, msr_index);
673 if (msr) { 730 if (msr) {
674 msr->data = data; 731 msr->data = data;
675 if (vcpu->vmx_host_state.loaded) 732 if (vmx->host_state.loaded)
676 load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs); 733 load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
677 break; 734 break;
678 } 735 }
679 ret = kvm_set_msr_common(vcpu, msr_index, data); 736 ret = kvm_set_msr_common(vcpu, msr_index, data);
@@ -1053,7 +1110,7 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1053 1110
1054static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) 1111static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1055{ 1112{
1056 struct vmx_msr_entry *msr = find_msr_entry(vcpu, MSR_EFER); 1113 struct kvm_msr_entry *msr = find_msr_entry(vcpu, MSR_EFER);
1057 1114
1058 vcpu->shadow_efer = efer; 1115 vcpu->shadow_efer = efer;
1059 if (efer & EFER_LMA) { 1116 if (efer & EFER_LMA) {
@@ -1244,6 +1301,7 @@ static void seg_setup(int seg)
1244 */ 1301 */
1245static int vmx_vcpu_setup(struct kvm_vcpu *vcpu) 1302static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1246{ 1303{
1304 struct vcpu_vmx *vmx = to_vmx(vcpu);
1247 u32 host_sysenter_cs; 1305 u32 host_sysenter_cs;
1248 u32 junk; 1306 u32 junk;
1249 unsigned long a; 1307 unsigned long a;
@@ -1385,18 +1443,18 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1385 u32 index = vmx_msr_index[i]; 1443 u32 index = vmx_msr_index[i];
1386 u32 data_low, data_high; 1444 u32 data_low, data_high;
1387 u64 data; 1445 u64 data;
1388 int j = vcpu->nmsrs; 1446 int j = vmx->nmsrs;
1389 1447
1390 if (rdmsr_safe(index, &data_low, &data_high) < 0) 1448 if (rdmsr_safe(index, &data_low, &data_high) < 0)
1391 continue; 1449 continue;
1392 if (wrmsr_safe(index, data_low, data_high) < 0) 1450 if (wrmsr_safe(index, data_low, data_high) < 0)
1393 continue; 1451 continue;
1394 data = data_low | ((u64)data_high << 32); 1452 data = data_low | ((u64)data_high << 32);
1395 vcpu->host_msrs[j].index = index; 1453 vmx->host_msrs[j].index = index;
1396 vcpu->host_msrs[j].reserved = 0; 1454 vmx->host_msrs[j].reserved = 0;
1397 vcpu->host_msrs[j].data = data; 1455 vmx->host_msrs[j].data = data;
1398 vcpu->guest_msrs[j] = vcpu->host_msrs[j]; 1456 vmx->guest_msrs[j] = vmx->host_msrs[j];
1399 ++vcpu->nmsrs; 1457 ++vmx->nmsrs;
1400 } 1458 }
1401 1459
1402 setup_msrs(vcpu); 1460 setup_msrs(vcpu);
@@ -1999,6 +2057,7 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
1999 2057
2000static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 2058static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2001{ 2059{
2060 struct vcpu_vmx *vmx = to_vmx(vcpu);
2002 u8 fail; 2061 u8 fail;
2003 int r; 2062 int r;
2004 2063
@@ -2123,7 +2182,7 @@ again:
2123#endif 2182#endif
2124 "setbe %0 \n\t" 2183 "setbe %0 \n\t"
2125 : "=q" (fail) 2184 : "=q" (fail)
2126 : "r"(vcpu->launched), "d"((unsigned long)HOST_RSP), 2185 : "r"(vmx->launched), "d"((unsigned long)HOST_RSP),
2127 "c"(vcpu), 2186 "c"(vcpu),
2128 [rax]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RAX])), 2187 [rax]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RAX])),
2129 [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])), 2188 [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])),
@@ -2167,7 +2226,7 @@ again:
2167 if (unlikely(prof_on == KVM_PROFILING)) 2226 if (unlikely(prof_on == KVM_PROFILING))
2168 profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP)); 2227 profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
2169 2228
2170 vcpu->launched = 1; 2229 vmx->launched = 1;
2171 r = kvm_handle_exit(kvm_run, vcpu); 2230 r = kvm_handle_exit(kvm_run, vcpu);
2172 if (r > 0) { 2231 if (r > 0) {
2173 /* Give scheduler a change to reschedule. */ 2232 /* Give scheduler a change to reschedule. */
@@ -2232,10 +2291,12 @@ static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
2232 2291
2233static void vmx_free_vmcs(struct kvm_vcpu *vcpu) 2292static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
2234{ 2293{
2235 if (vcpu->vmcs) { 2294 struct vcpu_vmx *vmx = to_vmx(vcpu);
2295
2296 if (vmx->vmcs) {
2236 on_each_cpu(__vcpu_clear, vcpu, 0, 1); 2297 on_each_cpu(__vcpu_clear, vcpu, 0, 1);
2237 free_vmcs(vcpu->vmcs); 2298 free_vmcs(vmx->vmcs);
2238 vcpu->vmcs = NULL; 2299 vmx->vmcs = NULL;
2239 } 2300 }
2240} 2301}
2241 2302
@@ -2246,33 +2307,39 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
2246 2307
2247static int vmx_create_vcpu(struct kvm_vcpu *vcpu) 2308static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
2248{ 2309{
2249 struct vmcs *vmcs; 2310 struct vcpu_vmx *vmx;
2250 2311
2251 vcpu->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); 2312 vmx = kzalloc(sizeof(*vmx), GFP_KERNEL);
2252 if (!vcpu->guest_msrs) 2313 if (!vmx)
2253 return -ENOMEM; 2314 return -ENOMEM;
2254 2315
2255 vcpu->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); 2316 vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
2256 if (!vcpu->host_msrs) 2317 if (!vmx->guest_msrs)
2257 goto out_free_guest_msrs; 2318 goto out_free;
2258 2319
2259 vmcs = alloc_vmcs(); 2320 vmx->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
2260 if (!vmcs) 2321 if (!vmx->host_msrs)
2261 goto out_free_msrs; 2322 goto out_free;
2262 2323
2263 vmcs_clear(vmcs); 2324 vmx->vmcs = alloc_vmcs();
2264 vcpu->vmcs = vmcs; 2325 if (!vmx->vmcs)
2265 vcpu->launched = 0; 2326 goto out_free;
2327
2328 vmcs_clear(vmx->vmcs);
2329
2330 vmx->vcpu = vcpu;
2331 vcpu->_priv = vmx;
2266 2332
2267 return 0; 2333 return 0;
2268 2334
2269out_free_msrs: 2335out_free:
2270 kfree(vcpu->host_msrs); 2336 if (vmx->host_msrs)
2271 vcpu->host_msrs = NULL; 2337 kfree(vmx->host_msrs);
2338
2339 if (vmx->guest_msrs)
2340 kfree(vmx->guest_msrs);
2272 2341
2273out_free_guest_msrs: 2342 kfree(vmx);
2274 kfree(vcpu->guest_msrs);
2275 vcpu->guest_msrs = NULL;
2276 2343
2277 return -ENOMEM; 2344 return -ENOMEM;
2278} 2345}