aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kvm/svm.c12
-rw-r--r--arch/x86/kvm/vmx.c24
-rw-r--r--arch/x86/kvm/x86.c18
-rw-r--r--include/asm-x86/kvm_host.h26
4 files changed, 39 insertions, 41 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 858e29702232..b756e876dce3 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1710,9 +1710,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1710 sync_lapic_to_cr8(vcpu); 1710 sync_lapic_to_cr8(vcpu);
1711 1711
1712 save_host_msrs(vcpu); 1712 save_host_msrs(vcpu);
1713 fs_selector = read_fs(); 1713 fs_selector = kvm_read_fs();
1714 gs_selector = read_gs(); 1714 gs_selector = kvm_read_gs();
1715 ldt_selector = read_ldt(); 1715 ldt_selector = kvm_read_ldt();
1716 svm->host_cr2 = kvm_read_cr2(); 1716 svm->host_cr2 = kvm_read_cr2();
1717 svm->host_dr6 = read_dr6(); 1717 svm->host_dr6 = read_dr6();
1718 svm->host_dr7 = read_dr7(); 1718 svm->host_dr7 = read_dr7();
@@ -1845,9 +1845,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1845 write_dr7(svm->host_dr7); 1845 write_dr7(svm->host_dr7);
1846 kvm_write_cr2(svm->host_cr2); 1846 kvm_write_cr2(svm->host_cr2);
1847 1847
1848 load_fs(fs_selector); 1848 kvm_load_fs(fs_selector);
1849 load_gs(gs_selector); 1849 kvm_load_gs(gs_selector);
1850 load_ldt(ldt_selector); 1850 kvm_load_ldt(ldt_selector);
1851 load_host_msrs(vcpu); 1851 load_host_msrs(vcpu);
1852 1852
1853 reload_tss(vcpu); 1853 reload_tss(vcpu);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index fff3b490976e..0cac63701719 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -484,7 +484,7 @@ static void reload_tss(void)
484 struct descriptor_table gdt; 484 struct descriptor_table gdt;
485 struct desc_struct *descs; 485 struct desc_struct *descs;
486 486
487 get_gdt(&gdt); 487 kvm_get_gdt(&gdt);
488 descs = (void *)gdt.base; 488 descs = (void *)gdt.base;
489 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */ 489 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
490 load_TR_desc(); 490 load_TR_desc();
@@ -540,9 +540,9 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
540 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not 540 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
541 * allow segment selectors with cpl > 0 or ti == 1. 541 * allow segment selectors with cpl > 0 or ti == 1.
542 */ 542 */
543 vmx->host_state.ldt_sel = read_ldt(); 543 vmx->host_state.ldt_sel = kvm_read_ldt();
544 vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; 544 vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
545 vmx->host_state.fs_sel = read_fs(); 545 vmx->host_state.fs_sel = kvm_read_fs();
546 if (!(vmx->host_state.fs_sel & 7)) { 546 if (!(vmx->host_state.fs_sel & 7)) {
547 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); 547 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
548 vmx->host_state.fs_reload_needed = 0; 548 vmx->host_state.fs_reload_needed = 0;
@@ -550,7 +550,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
550 vmcs_write16(HOST_FS_SELECTOR, 0); 550 vmcs_write16(HOST_FS_SELECTOR, 0);
551 vmx->host_state.fs_reload_needed = 1; 551 vmx->host_state.fs_reload_needed = 1;
552 } 552 }
553 vmx->host_state.gs_sel = read_gs(); 553 vmx->host_state.gs_sel = kvm_read_gs();
554 if (!(vmx->host_state.gs_sel & 7)) 554 if (!(vmx->host_state.gs_sel & 7))
555 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); 555 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
556 else { 556 else {
@@ -586,15 +586,15 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
586 ++vmx->vcpu.stat.host_state_reload; 586 ++vmx->vcpu.stat.host_state_reload;
587 vmx->host_state.loaded = 0; 587 vmx->host_state.loaded = 0;
588 if (vmx->host_state.fs_reload_needed) 588 if (vmx->host_state.fs_reload_needed)
589 load_fs(vmx->host_state.fs_sel); 589 kvm_load_fs(vmx->host_state.fs_sel);
590 if (vmx->host_state.gs_ldt_reload_needed) { 590 if (vmx->host_state.gs_ldt_reload_needed) {
591 load_ldt(vmx->host_state.ldt_sel); 591 kvm_load_ldt(vmx->host_state.ldt_sel);
592 /* 592 /*
593 * If we have to reload gs, we must take care to 593 * If we have to reload gs, we must take care to
594 * preserve our gs base. 594 * preserve our gs base.
595 */ 595 */
596 local_irq_save(flags); 596 local_irq_save(flags);
597 load_gs(vmx->host_state.gs_sel); 597 kvm_load_gs(vmx->host_state.gs_sel);
598#ifdef CONFIG_X86_64 598#ifdef CONFIG_X86_64
599 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); 599 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
600#endif 600#endif
@@ -654,8 +654,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
654 * Linux uses per-cpu TSS and GDT, so set these when switching 654 * Linux uses per-cpu TSS and GDT, so set these when switching
655 * processors. 655 * processors.
656 */ 656 */
657 vmcs_writel(HOST_TR_BASE, read_tr_base()); /* 22.2.4 */ 657 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
658 get_gdt(&dt); 658 kvm_get_gdt(&dt);
659 vmcs_writel(HOST_GDTR_BASE, dt.base); /* 22.2.4 */ 659 vmcs_writel(HOST_GDTR_BASE, dt.base); /* 22.2.4 */
660 660
661 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); 661 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
@@ -1943,8 +1943,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
1943 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ 1943 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
1944 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 1944 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
1945 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 1945 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
1946 vmcs_write16(HOST_FS_SELECTOR, read_fs()); /* 22.2.4 */ 1946 vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs()); /* 22.2.4 */
1947 vmcs_write16(HOST_GS_SELECTOR, read_gs()); /* 22.2.4 */ 1947 vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs()); /* 22.2.4 */
1948 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 1948 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
1949#ifdef CONFIG_X86_64 1949#ifdef CONFIG_X86_64
1950 rdmsrl(MSR_FS_BASE, a); 1950 rdmsrl(MSR_FS_BASE, a);
@@ -1958,7 +1958,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
1958 1958
1959 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ 1959 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
1960 1960
1961 get_idt(&dt); 1961 kvm_get_idt(&dt);
1962 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */ 1962 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
1963 1963
1964 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return)); 1964 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 89fc8565edee..b131f3c0cf64 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3767,14 +3767,14 @@ void fx_init(struct kvm_vcpu *vcpu)
3767 * allocate ram with GFP_KERNEL. 3767 * allocate ram with GFP_KERNEL.
3768 */ 3768 */
3769 if (!used_math()) 3769 if (!used_math())
3770 fx_save(&vcpu->arch.host_fx_image); 3770 kvm_fx_save(&vcpu->arch.host_fx_image);
3771 3771
3772 /* Initialize guest FPU by resetting ours and saving into guest's */ 3772 /* Initialize guest FPU by resetting ours and saving into guest's */
3773 preempt_disable(); 3773 preempt_disable();
3774 fx_save(&vcpu->arch.host_fx_image); 3774 kvm_fx_save(&vcpu->arch.host_fx_image);
3775 fx_finit(); 3775 kvm_fx_finit();
3776 fx_save(&vcpu->arch.guest_fx_image); 3776 kvm_fx_save(&vcpu->arch.guest_fx_image);
3777 fx_restore(&vcpu->arch.host_fx_image); 3777 kvm_fx_restore(&vcpu->arch.host_fx_image);
3778 preempt_enable(); 3778 preempt_enable();
3779 3779
3780 vcpu->arch.cr0 |= X86_CR0_ET; 3780 vcpu->arch.cr0 |= X86_CR0_ET;
@@ -3791,8 +3791,8 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
3791 return; 3791 return;
3792 3792
3793 vcpu->guest_fpu_loaded = 1; 3793 vcpu->guest_fpu_loaded = 1;
3794 fx_save(&vcpu->arch.host_fx_image); 3794 kvm_fx_save(&vcpu->arch.host_fx_image);
3795 fx_restore(&vcpu->arch.guest_fx_image); 3795 kvm_fx_restore(&vcpu->arch.guest_fx_image);
3796} 3796}
3797EXPORT_SYMBOL_GPL(kvm_load_guest_fpu); 3797EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
3798 3798
@@ -3802,8 +3802,8 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
3802 return; 3802 return;
3803 3803
3804 vcpu->guest_fpu_loaded = 0; 3804 vcpu->guest_fpu_loaded = 0;
3805 fx_save(&vcpu->arch.guest_fx_image); 3805 kvm_fx_save(&vcpu->arch.guest_fx_image);
3806 fx_restore(&vcpu->arch.host_fx_image); 3806 kvm_fx_restore(&vcpu->arch.host_fx_image);
3807 ++vcpu->stat.fpu_reload; 3807 ++vcpu->stat.fpu_reload;
3808} 3808}
3809EXPORT_SYMBOL_GPL(kvm_put_guest_fpu); 3809EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index c64d1242762b..f995783b1fdb 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -567,55 +567,53 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
567 return (struct kvm_mmu_page *)page_private(page); 567 return (struct kvm_mmu_page *)page_private(page);
568} 568}
569 569
570static inline u16 read_fs(void) 570static inline u16 kvm_read_fs(void)
571{ 571{
572 u16 seg; 572 u16 seg;
573 asm("mov %%fs, %0" : "=g"(seg)); 573 asm("mov %%fs, %0" : "=g"(seg));
574 return seg; 574 return seg;
575} 575}
576 576
577static inline u16 read_gs(void) 577static inline u16 kvm_read_gs(void)
578{ 578{
579 u16 seg; 579 u16 seg;
580 asm("mov %%gs, %0" : "=g"(seg)); 580 asm("mov %%gs, %0" : "=g"(seg));
581 return seg; 581 return seg;
582} 582}
583 583
584static inline u16 read_ldt(void) 584static inline u16 kvm_read_ldt(void)
585{ 585{
586 u16 ldt; 586 u16 ldt;
587 asm("sldt %0" : "=g"(ldt)); 587 asm("sldt %0" : "=g"(ldt));
588 return ldt; 588 return ldt;
589} 589}
590 590
591static inline void load_fs(u16 sel) 591static inline void kvm_load_fs(u16 sel)
592{ 592{
593 asm("mov %0, %%fs" : : "rm"(sel)); 593 asm("mov %0, %%fs" : : "rm"(sel));
594} 594}
595 595
596static inline void load_gs(u16 sel) 596static inline void kvm_load_gs(u16 sel)
597{ 597{
598 asm("mov %0, %%gs" : : "rm"(sel)); 598 asm("mov %0, %%gs" : : "rm"(sel));
599} 599}
600 600
601#ifndef load_ldt 601static inline void kvm_load_ldt(u16 sel)
602static inline void load_ldt(u16 sel)
603{ 602{
604 asm("lldt %0" : : "rm"(sel)); 603 asm("lldt %0" : : "rm"(sel));
605} 604}
606#endif
607 605
608static inline void get_idt(struct descriptor_table *table) 606static inline void kvm_get_idt(struct descriptor_table *table)
609{ 607{
610 asm("sidt %0" : "=m"(*table)); 608 asm("sidt %0" : "=m"(*table));
611} 609}
612 610
613static inline void get_gdt(struct descriptor_table *table) 611static inline void kvm_get_gdt(struct descriptor_table *table)
614{ 612{
615 asm("sgdt %0" : "=m"(*table)); 613 asm("sgdt %0" : "=m"(*table));
616} 614}
617 615
618static inline unsigned long read_tr_base(void) 616static inline unsigned long kvm_read_tr_base(void)
619{ 617{
620 u16 tr; 618 u16 tr;
621 asm("str %0" : "=g"(tr)); 619 asm("str %0" : "=g"(tr));
@@ -632,17 +630,17 @@ static inline unsigned long read_msr(unsigned long msr)
632} 630}
633#endif 631#endif
634 632
635static inline void fx_save(struct i387_fxsave_struct *image) 633static inline void kvm_fx_save(struct i387_fxsave_struct *image)
636{ 634{
637 asm("fxsave (%0)":: "r" (image)); 635 asm("fxsave (%0)":: "r" (image));
638} 636}
639 637
640static inline void fx_restore(struct i387_fxsave_struct *image) 638static inline void kvm_fx_restore(struct i387_fxsave_struct *image)
641{ 639{
642 asm("fxrstor (%0)":: "r" (image)); 640 asm("fxrstor (%0)":: "r" (image));
643} 641}
644 642
645static inline void fx_finit(void) 643static inline void kvm_fx_finit(void)
646{ 644{
647 asm("finit"); 645 asm("finit");
648} 646}