aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2010-02-18 05:15:01 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2010-03-01 10:36:14 -0500
commitc697518a861e6c43b92b848895f9926580ee63c3 (patch)
tree23c0b9169dfe783bd4615795cd5495f369d42122 /arch
parent6f550484a15ea1b468665cdf59f020bf08ccb292 (diff)
KVM: Fix segment descriptor loading
Add proper error and permission checking. This patch also change task switching code to load segment selectors before segment descriptors, like SDM requires, otherwise permission checking during segment descriptor loading will be incorrect. Cc: stable@kernel.org (2.6.33, 2.6.32) Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/kvm_host.h3
-rw-r--r--arch/x86/kvm/emulate.c30
-rw-r--r--arch/x86/kvm/x86.c177
3 files changed, 151 insertions, 59 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f9a2f66530cf..06d9e79ca37d 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -614,8 +614,7 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
614 unsigned long value); 614 unsigned long value);
615 615
616void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); 616void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
617int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, 617int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
618 int type_bits, int seg);
619 618
620int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason); 619int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason);
621 620
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 2db760ff887c..a1a7b27adf41 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -1309,7 +1309,7 @@ static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
1309 if (rc != 0) 1309 if (rc != 0)
1310 return rc; 1310 return rc;
1311 1311
1312 rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)selector, 1, seg); 1312 rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)selector, seg);
1313 return rc; 1313 return rc;
1314} 1314}
1315 1315
@@ -1491,7 +1491,7 @@ static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
1491 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes); 1491 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1492 if (rc) 1492 if (rc)
1493 return rc; 1493 return rc;
1494 rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)cs, 1, VCPU_SREG_CS); 1494 rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)cs, VCPU_SREG_CS);
1495 return rc; 1495 return rc;
1496} 1496}
1497 1497
@@ -2122,12 +2122,11 @@ special_insn:
2122 break; 2122 break;
2123 case 0x8e: { /* mov seg, r/m16 */ 2123 case 0x8e: { /* mov seg, r/m16 */
2124 uint16_t sel; 2124 uint16_t sel;
2125 int type_bits;
2126 int err;
2127 2125
2128 sel = c->src.val; 2126 sel = c->src.val;
2129 2127
2130 if (c->modrm_reg == VCPU_SREG_CS) { 2128 if (c->modrm_reg == VCPU_SREG_CS ||
2129 c->modrm_reg > VCPU_SREG_GS) {
2131 kvm_queue_exception(ctxt->vcpu, UD_VECTOR); 2130 kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
2132 goto done; 2131 goto done;
2133 } 2132 }
@@ -2135,18 +2134,7 @@ special_insn:
2135 if (c->modrm_reg == VCPU_SREG_SS) 2134 if (c->modrm_reg == VCPU_SREG_SS)
2136 toggle_interruptibility(ctxt, X86_SHADOW_INT_MOV_SS); 2135 toggle_interruptibility(ctxt, X86_SHADOW_INT_MOV_SS);
2137 2136
2138 if (c->modrm_reg <= 5) { 2137 rc = kvm_load_segment_descriptor(ctxt->vcpu, sel, c->modrm_reg);
2139 type_bits = (c->modrm_reg == 1) ? 9 : 1;
2140 err = kvm_load_segment_descriptor(ctxt->vcpu, sel,
2141 type_bits, c->modrm_reg);
2142 } else {
2143 printk(KERN_INFO "Invalid segreg in modrm byte 0x%02x\n",
2144 c->modrm);
2145 goto cannot_emulate;
2146 }
2147
2148 if (err < 0)
2149 goto cannot_emulate;
2150 2138
2151 c->dst.type = OP_NONE; /* Disable writeback. */ 2139 c->dst.type = OP_NONE; /* Disable writeback. */
2152 break; 2140 break;
@@ -2320,11 +2308,9 @@ special_insn:
2320 case 0xe9: /* jmp rel */ 2308 case 0xe9: /* jmp rel */
2321 goto jmp; 2309 goto jmp;
2322 case 0xea: /* jmp far */ 2310 case 0xea: /* jmp far */
2323 if (kvm_load_segment_descriptor(ctxt->vcpu, c->src2.val, 9, 2311 if (kvm_load_segment_descriptor(ctxt->vcpu, c->src2.val,
2324 VCPU_SREG_CS) < 0) { 2312 VCPU_SREG_CS))
2325 DPRINTF("jmp far: Failed to load CS descriptor\n"); 2313 goto done;
2326 goto cannot_emulate;
2327 }
2328 2314
2329 c->eip = c->src.val; 2315 c->eip = c->src.val;
2330 break; 2316 break;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 203ee7d0ed58..c3d2acbbb91b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4787,7 +4787,7 @@ static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int se
4787 .unusable = 0, 4787 .unusable = 0,
4788 }; 4788 };
4789 kvm_x86_ops->set_segment(vcpu, &segvar, seg); 4789 kvm_x86_ops->set_segment(vcpu, &segvar, seg);
4790 return 0; 4790 return X86EMUL_CONTINUE;
4791} 4791}
4792 4792
4793static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg) 4793static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg)
@@ -4797,43 +4797,112 @@ static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg)
4797 (kvm_get_rflags(vcpu) & X86_EFLAGS_VM); 4797 (kvm_get_rflags(vcpu) & X86_EFLAGS_VM);
4798} 4798}
4799 4799
4800static void kvm_check_segment_descriptor(struct kvm_vcpu *vcpu, int seg, 4800int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg)
4801 u16 selector)
4802{
4803 /* NULL selector is not valid for CS and SS */
4804 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
4805 if (!selector)
4806 kvm_queue_exception_e(vcpu, TS_VECTOR, selector >> 3);
4807}
4808
4809int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4810 int type_bits, int seg)
4811{ 4801{
4812 struct kvm_segment kvm_seg; 4802 struct kvm_segment kvm_seg;
4813 struct desc_struct seg_desc; 4803 struct desc_struct seg_desc;
4804 u8 dpl, rpl, cpl;
4805 unsigned err_vec = GP_VECTOR;
4806 u32 err_code = 0;
4807 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
4808 int ret;
4814 4809
4815 if (is_vm86_segment(vcpu, seg) || !is_protmode(vcpu)) 4810 if (is_vm86_segment(vcpu, seg) || !is_protmode(vcpu))
4816 return kvm_load_realmode_segment(vcpu, selector, seg); 4811 return kvm_load_realmode_segment(vcpu, selector, seg);
4817 4812
4818 if (load_guest_segment_descriptor(vcpu, selector, &seg_desc)) 4813 /* NULL selector is not valid for TR, CS and SS */
4819 return 1; 4814 if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
4815 && null_selector)
4816 goto exception;
4817
4818 /* TR should be in GDT only */
4819 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
4820 goto exception;
4821
4822 ret = load_guest_segment_descriptor(vcpu, selector, &seg_desc);
4823 if (ret)
4824 return ret;
4825
4820 seg_desct_to_kvm_desct(&seg_desc, selector, &kvm_seg); 4826 seg_desct_to_kvm_desct(&seg_desc, selector, &kvm_seg);
4821 4827
4822 kvm_check_segment_descriptor(vcpu, seg, selector); 4828 if (null_selector) { /* for NULL selector skip all following checks */
4823 kvm_seg.type |= type_bits; 4829 kvm_seg.unusable = 1;
4830 goto load;
4831 }
4824 4832
4825 if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS && 4833 err_code = selector & 0xfffc;
4826 seg != VCPU_SREG_LDTR) 4834 err_vec = GP_VECTOR;
4827 if (!kvm_seg.s)
4828 kvm_seg.unusable = 1;
4829 4835
4830 kvm_set_segment(vcpu, &kvm_seg, seg); 4836 /* can't load system descriptor into segment selecor */
4831 if (selector && !kvm_seg.unusable && kvm_seg.s) { 4837 if (seg <= VCPU_SREG_GS && !kvm_seg.s)
4838 goto exception;
4839
4840 if (!kvm_seg.present) {
4841 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
4842 goto exception;
4843 }
4844
4845 rpl = selector & 3;
4846 dpl = kvm_seg.dpl;
4847 cpl = kvm_x86_ops->get_cpl(vcpu);
4848
4849 switch (seg) {
4850 case VCPU_SREG_SS:
4851 /*
4852 * segment is not a writable data segment or segment
4853 * selector's RPL != CPL or segment selector's RPL != CPL
4854 */
4855 if (rpl != cpl || (kvm_seg.type & 0xa) != 0x2 || dpl != cpl)
4856 goto exception;
4857 break;
4858 case VCPU_SREG_CS:
4859 if (!(kvm_seg.type & 8))
4860 goto exception;
4861
4862 if (kvm_seg.type & 4) {
4863 /* conforming */
4864 if (dpl > cpl)
4865 goto exception;
4866 } else {
4867 /* nonconforming */
4868 if (rpl > cpl || dpl != cpl)
4869 goto exception;
4870 }
4871 /* CS(RPL) <- CPL */
4872 selector = (selector & 0xfffc) | cpl;
4873 break;
4874 case VCPU_SREG_TR:
4875 if (kvm_seg.s || (kvm_seg.type != 1 && kvm_seg.type != 9))
4876 goto exception;
4877 break;
4878 case VCPU_SREG_LDTR:
4879 if (kvm_seg.s || kvm_seg.type != 2)
4880 goto exception;
4881 break;
4882 default: /* DS, ES, FS, or GS */
4883 /*
4884 * segment is not a data or readable code segment or
4885 * ((segment is a data or nonconforming code segment)
4886 * and (both RPL and CPL > DPL))
4887 */
4888 if ((kvm_seg.type & 0xa) == 0x8 ||
4889 (((kvm_seg.type & 0xc) != 0xc) && (rpl > dpl && cpl > dpl)))
4890 goto exception;
4891 break;
4892 }
4893
4894 if (!kvm_seg.unusable && kvm_seg.s) {
4832 /* mark segment as accessed */ 4895 /* mark segment as accessed */
4896 kvm_seg.type |= 1;
4833 seg_desc.type |= 1; 4897 seg_desc.type |= 1;
4834 save_guest_segment_descriptor(vcpu, selector, &seg_desc); 4898 save_guest_segment_descriptor(vcpu, selector, &seg_desc);
4835 } 4899 }
4836 return 0; 4900load:
4901 kvm_set_segment(vcpu, &kvm_seg, seg);
4902 return X86EMUL_CONTINUE;
4903exception:
4904 kvm_queue_exception_e(vcpu, err_vec, err_code);
4905 return X86EMUL_PROPAGATE_FAULT;
4837} 4906}
4838 4907
4839static void save_state_to_tss32(struct kvm_vcpu *vcpu, 4908static void save_state_to_tss32(struct kvm_vcpu *vcpu,
@@ -4859,6 +4928,14 @@ static void save_state_to_tss32(struct kvm_vcpu *vcpu,
4859 tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR); 4928 tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
4860} 4929}
4861 4930
4931static void kvm_load_segment_selector(struct kvm_vcpu *vcpu, u16 sel, int seg)
4932{
4933 struct kvm_segment kvm_seg;
4934 kvm_get_segment(vcpu, &kvm_seg, seg);
4935 kvm_seg.selector = sel;
4936 kvm_set_segment(vcpu, &kvm_seg, seg);
4937}
4938
4862static int load_state_from_tss32(struct kvm_vcpu *vcpu, 4939static int load_state_from_tss32(struct kvm_vcpu *vcpu,
4863 struct tss_segment_32 *tss) 4940 struct tss_segment_32 *tss)
4864{ 4941{
@@ -4876,25 +4953,41 @@ static int load_state_from_tss32(struct kvm_vcpu *vcpu,
4876 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi); 4953 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
4877 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi); 4954 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
4878 4955
4879 if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR)) 4956 /*
4957 * SDM says that segment selectors are loaded before segment
4958 * descriptors
4959 */
4960 kvm_load_segment_selector(vcpu, tss->ldt_selector, VCPU_SREG_LDTR);
4961 kvm_load_segment_selector(vcpu, tss->es, VCPU_SREG_ES);
4962 kvm_load_segment_selector(vcpu, tss->cs, VCPU_SREG_CS);
4963 kvm_load_segment_selector(vcpu, tss->ss, VCPU_SREG_SS);
4964 kvm_load_segment_selector(vcpu, tss->ds, VCPU_SREG_DS);
4965 kvm_load_segment_selector(vcpu, tss->fs, VCPU_SREG_FS);
4966 kvm_load_segment_selector(vcpu, tss->gs, VCPU_SREG_GS);
4967
4968 /*
4969 * Now load segment descriptors. If fault happenes at this stage
4970 * it is handled in a context of new task
4971 */
4972 if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, VCPU_SREG_LDTR))
4880 return 1; 4973 return 1;
4881 4974
4882 if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES)) 4975 if (kvm_load_segment_descriptor(vcpu, tss->es, VCPU_SREG_ES))
4883 return 1; 4976 return 1;
4884 4977
4885 if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS)) 4978 if (kvm_load_segment_descriptor(vcpu, tss->cs, VCPU_SREG_CS))
4886 return 1; 4979 return 1;
4887 4980
4888 if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS)) 4981 if (kvm_load_segment_descriptor(vcpu, tss->ss, VCPU_SREG_SS))
4889 return 1; 4982 return 1;
4890 4983
4891 if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS)) 4984 if (kvm_load_segment_descriptor(vcpu, tss->ds, VCPU_SREG_DS))
4892 return 1; 4985 return 1;
4893 4986
4894 if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS)) 4987 if (kvm_load_segment_descriptor(vcpu, tss->fs, VCPU_SREG_FS))
4895 return 1; 4988 return 1;
4896 4989
4897 if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS)) 4990 if (kvm_load_segment_descriptor(vcpu, tss->gs, VCPU_SREG_GS))
4898 return 1; 4991 return 1;
4899 return 0; 4992 return 0;
4900} 4993}
@@ -4934,19 +5027,33 @@ static int load_state_from_tss16(struct kvm_vcpu *vcpu,
4934 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si); 5027 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
4935 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di); 5028 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
4936 5029
4937 if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR)) 5030 /*
5031 * SDM says that segment selectors are loaded before segment
5032 * descriptors
5033 */
5034 kvm_load_segment_selector(vcpu, tss->ldt, VCPU_SREG_LDTR);
5035 kvm_load_segment_selector(vcpu, tss->es, VCPU_SREG_ES);
5036 kvm_load_segment_selector(vcpu, tss->cs, VCPU_SREG_CS);
5037 kvm_load_segment_selector(vcpu, tss->ss, VCPU_SREG_SS);
5038 kvm_load_segment_selector(vcpu, tss->ds, VCPU_SREG_DS);
5039
5040 /*
5041 * Now load segment descriptors. If fault happenes at this stage
5042 * it is handled in a context of new task
5043 */
5044 if (kvm_load_segment_descriptor(vcpu, tss->ldt, VCPU_SREG_LDTR))
4938 return 1; 5045 return 1;
4939 5046
4940 if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES)) 5047 if (kvm_load_segment_descriptor(vcpu, tss->es, VCPU_SREG_ES))
4941 return 1; 5048 return 1;
4942 5049
4943 if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS)) 5050 if (kvm_load_segment_descriptor(vcpu, tss->cs, VCPU_SREG_CS))
4944 return 1; 5051 return 1;
4945 5052
4946 if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS)) 5053 if (kvm_load_segment_descriptor(vcpu, tss->ss, VCPU_SREG_SS))
4947 return 1; 5054 return 1;
4948 5055
4949 if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS)) 5056 if (kvm_load_segment_descriptor(vcpu, tss->ds, VCPU_SREG_DS))
4950 return 1; 5057 return 1;
4951 return 0; 5058 return 0;
4952} 5059}