aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-10-19 10:46:55 -0400
committerMarcelo Tosatti <mtosatti@redhat.com>2010-10-19 12:21:45 -0400
commit9581d442b9058d3699b4be568b6e5eae38a41493 (patch)
tree76d1b596d873514fdb9b3bf75d6d7b3cbfada85d
parent2b666ca4a68cbc22483b0f2e1ba3c0e59b01ae9e (diff)
KVM: Fix fs/gs reload oops with invalid ldt
kvm reloads the host's fs and gs blindly, however the underlying segment descriptors may be invalid due to the user modifying the ldt after loading them. Fix by using the safe accessors (loadsegment() and load_gs_index()) instead of home grown unsafe versions. This is CVE-2010-3698. KVM-Stable-Tag. Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r--arch/x86/include/asm/kvm_host.h24
-rw-r--r--arch/x86/kvm/svm.c15
-rw-r--r--arch/x86/kvm/vmx.c24
3 files changed, 19 insertions, 44 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 502e53f999cf..c52e2eb40a1e 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -652,20 +652,6 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
652 return (struct kvm_mmu_page *)page_private(page); 652 return (struct kvm_mmu_page *)page_private(page);
653} 653}
654 654
655static inline u16 kvm_read_fs(void)
656{
657 u16 seg;
658 asm("mov %%fs, %0" : "=g"(seg));
659 return seg;
660}
661
662static inline u16 kvm_read_gs(void)
663{
664 u16 seg;
665 asm("mov %%gs, %0" : "=g"(seg));
666 return seg;
667}
668
669static inline u16 kvm_read_ldt(void) 655static inline u16 kvm_read_ldt(void)
670{ 656{
671 u16 ldt; 657 u16 ldt;
@@ -673,16 +659,6 @@ static inline u16 kvm_read_ldt(void)
673 return ldt; 659 return ldt;
674} 660}
675 661
676static inline void kvm_load_fs(u16 sel)
677{
678 asm("mov %0, %%fs" : : "rm"(sel));
679}
680
681static inline void kvm_load_gs(u16 sel)
682{
683 asm("mov %0, %%gs" : : "rm"(sel));
684}
685
686static inline void kvm_load_ldt(u16 sel) 662static inline void kvm_load_ldt(u16 sel)
687{ 663{
688 asm("lldt %0" : : "rm"(sel)); 664 asm("lldt %0" : : "rm"(sel));
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 81ed28cb36e6..8a3f9f64f86f 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3163,8 +3163,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
3163 sync_lapic_to_cr8(vcpu); 3163 sync_lapic_to_cr8(vcpu);
3164 3164
3165 save_host_msrs(vcpu); 3165 save_host_msrs(vcpu);
3166 fs_selector = kvm_read_fs(); 3166 savesegment(fs, fs_selector);
3167 gs_selector = kvm_read_gs(); 3167 savesegment(gs, gs_selector);
3168 ldt_selector = kvm_read_ldt(); 3168 ldt_selector = kvm_read_ldt();
3169 svm->vmcb->save.cr2 = vcpu->arch.cr2; 3169 svm->vmcb->save.cr2 = vcpu->arch.cr2;
3170 /* required for live migration with NPT */ 3170 /* required for live migration with NPT */
@@ -3251,10 +3251,15 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
3251 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; 3251 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
3252 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; 3252 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
3253 3253
3254 kvm_load_fs(fs_selector);
3255 kvm_load_gs(gs_selector);
3256 kvm_load_ldt(ldt_selector);
3257 load_host_msrs(vcpu); 3254 load_host_msrs(vcpu);
3255 loadsegment(fs, fs_selector);
3256#ifdef CONFIG_X86_64
3257 load_gs_index(gs_selector);
3258 wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
3259#else
3260 loadsegment(gs, gs_selector);
3261#endif
3262 kvm_load_ldt(ldt_selector);
3258 3263
3259 reload_tss(vcpu); 3264 reload_tss(vcpu);
3260 3265
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 49b25eee25ac..7bddfab12013 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -803,7 +803,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
803 */ 803 */
804 vmx->host_state.ldt_sel = kvm_read_ldt(); 804 vmx->host_state.ldt_sel = kvm_read_ldt();
805 vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; 805 vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
806 vmx->host_state.fs_sel = kvm_read_fs(); 806 savesegment(fs, vmx->host_state.fs_sel);
807 if (!(vmx->host_state.fs_sel & 7)) { 807 if (!(vmx->host_state.fs_sel & 7)) {
808 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); 808 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
809 vmx->host_state.fs_reload_needed = 0; 809 vmx->host_state.fs_reload_needed = 0;
@@ -811,7 +811,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
811 vmcs_write16(HOST_FS_SELECTOR, 0); 811 vmcs_write16(HOST_FS_SELECTOR, 0);
812 vmx->host_state.fs_reload_needed = 1; 812 vmx->host_state.fs_reload_needed = 1;
813 } 813 }
814 vmx->host_state.gs_sel = kvm_read_gs(); 814 savesegment(gs, vmx->host_state.gs_sel);
815 if (!(vmx->host_state.gs_sel & 7)) 815 if (!(vmx->host_state.gs_sel & 7))
816 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); 816 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
817 else { 817 else {
@@ -841,27 +841,21 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
841 841
842static void __vmx_load_host_state(struct vcpu_vmx *vmx) 842static void __vmx_load_host_state(struct vcpu_vmx *vmx)
843{ 843{
844 unsigned long flags;
845
846 if (!vmx->host_state.loaded) 844 if (!vmx->host_state.loaded)
847 return; 845 return;
848 846
849 ++vmx->vcpu.stat.host_state_reload; 847 ++vmx->vcpu.stat.host_state_reload;
850 vmx->host_state.loaded = 0; 848 vmx->host_state.loaded = 0;
851 if (vmx->host_state.fs_reload_needed) 849 if (vmx->host_state.fs_reload_needed)
852 kvm_load_fs(vmx->host_state.fs_sel); 850 loadsegment(fs, vmx->host_state.fs_sel);
853 if (vmx->host_state.gs_ldt_reload_needed) { 851 if (vmx->host_state.gs_ldt_reload_needed) {
854 kvm_load_ldt(vmx->host_state.ldt_sel); 852 kvm_load_ldt(vmx->host_state.ldt_sel);
855 /*
856 * If we have to reload gs, we must take care to
857 * preserve our gs base.
858 */
859 local_irq_save(flags);
860 kvm_load_gs(vmx->host_state.gs_sel);
861#ifdef CONFIG_X86_64 853#ifdef CONFIG_X86_64
862 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); 854 load_gs_index(vmx->host_state.gs_sel);
855 wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
856#else
857 loadsegment(gs, vmx->host_state.gs_sel);
863#endif 858#endif
864 local_irq_restore(flags);
865 } 859 }
866 reload_tss(); 860 reload_tss();
867#ifdef CONFIG_X86_64 861#ifdef CONFIG_X86_64
@@ -2589,8 +2583,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
2589 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ 2583 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
2590 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 2584 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
2591 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 2585 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
2592 vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs()); /* 22.2.4 */ 2586 vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */
2593 vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs()); /* 22.2.4 */ 2587 vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */
2594 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 2588 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
2595#ifdef CONFIG_X86_64 2589#ifdef CONFIG_X86_64
2596 rdmsrl(MSR_FS_BASE, a); 2590 rdmsrl(MSR_FS_BASE, a);