aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2010-03-18 09:20:19 -0400
committerAvi Kivity <avi@redhat.com>2010-05-17 05:16:18 -0400
commitceffb4597253b2420d2f171d8b1cdf2cd3137989 (patch)
tree474379bfd74cb0e3c81063f7bc24d4b51560ad46
parent2e873022f511b82a5318c7af179f588f08d68cb9 (diff)
KVM: Use task switch from emulator.c
Remove old task switch code from x86.c Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r--arch/x86/kvm/emulate.c6
-rw-r--r--arch/x86/kvm/x86.c561
2 files changed, 22 insertions, 545 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 702bffffd27f..8225ec26efed 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2291,6 +2291,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2291 u16 old_tss_sel = ops->get_segment_selector(VCPU_SREG_TR, ctxt->vcpu); 2291 u16 old_tss_sel = ops->get_segment_selector(VCPU_SREG_TR, ctxt->vcpu);
2292 ulong old_tss_base = 2292 ulong old_tss_base =
2293 get_cached_descriptor_base(ctxt, ops, VCPU_SREG_TR); 2293 get_cached_descriptor_base(ctxt, ops, VCPU_SREG_TR);
2294 u32 desc_limit;
2294 2295
2295 /* FIXME: old_tss_base == ~0 ? */ 2296 /* FIXME: old_tss_base == ~0 ? */
2296 2297
@@ -2311,7 +2312,10 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2311 } 2312 }
2312 } 2313 }
2313 2314
2314 if (!next_tss_desc.p || desc_limit_scaled(&next_tss_desc) < 0x67) { 2315 desc_limit = desc_limit_scaled(&next_tss_desc);
2316 if (!next_tss_desc.p ||
2317 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2318 desc_limit < 0x2b)) {
2315 kvm_queue_exception_e(ctxt->vcpu, TS_VECTOR, 2319 kvm_queue_exception_e(ctxt->vcpu, TS_VECTOR,
2316 tss_selector & 0xfffc); 2320 tss_selector & 0xfffc);
2317 return X86EMUL_PROPAGATE_FAULT; 2321 return X86EMUL_PROPAGATE_FAULT;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index fbee8fbb33b5..f69854c8f339 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4832,557 +4832,30 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
4832 return 0; 4832 return 0;
4833} 4833}
4834 4834
4835static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
4836 struct kvm_segment *kvm_desct)
4837{
4838 kvm_desct->base = get_desc_base(seg_desc);
4839 kvm_desct->limit = get_desc_limit(seg_desc);
4840 if (seg_desc->g) {
4841 kvm_desct->limit <<= 12;
4842 kvm_desct->limit |= 0xfff;
4843 }
4844 kvm_desct->selector = selector;
4845 kvm_desct->type = seg_desc->type;
4846 kvm_desct->present = seg_desc->p;
4847 kvm_desct->dpl = seg_desc->dpl;
4848 kvm_desct->db = seg_desc->d;
4849 kvm_desct->s = seg_desc->s;
4850 kvm_desct->l = seg_desc->l;
4851 kvm_desct->g = seg_desc->g;
4852 kvm_desct->avl = seg_desc->avl;
4853 if (!selector)
4854 kvm_desct->unusable = 1;
4855 else
4856 kvm_desct->unusable = 0;
4857 kvm_desct->padding = 0;
4858}
4859
4860static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
4861 u16 selector,
4862 struct desc_ptr *dtable)
4863{
4864 if (selector & 1 << 2) {
4865 struct kvm_segment kvm_seg;
4866
4867 kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
4868
4869 if (kvm_seg.unusable)
4870 dtable->size = 0;
4871 else
4872 dtable->size = kvm_seg.limit;
4873 dtable->address = kvm_seg.base;
4874 }
4875 else
4876 kvm_x86_ops->get_gdt(vcpu, dtable);
4877}
4878
4879/* allowed just for 8 bytes segments */
4880static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4881 struct desc_struct *seg_desc)
4882{
4883 struct desc_ptr dtable;
4884 u16 index = selector >> 3;
4885 int ret;
4886 u32 err;
4887 gva_t addr;
4888
4889 get_segment_descriptor_dtable(vcpu, selector, &dtable);
4890
4891 if (dtable.size < index * 8 + 7) {
4892 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
4893 return X86EMUL_PROPAGATE_FAULT;
4894 }
4895 addr = dtable.base + index * 8;
4896 ret = kvm_read_guest_virt_system(addr, seg_desc, sizeof(*seg_desc),
4897 vcpu, &err);
4898 if (ret == X86EMUL_PROPAGATE_FAULT)
4899 kvm_inject_page_fault(vcpu, addr, err);
4900
4901 return ret;
4902}
4903
4904/* allowed just for 8 bytes segments */
4905static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4906 struct desc_struct *seg_desc)
4907{
4908 struct desc_ptr dtable;
4909 u16 index = selector >> 3;
4910
4911 get_segment_descriptor_dtable(vcpu, selector, &dtable);
4912
4913 if (dtable.size < index * 8 + 7)
4914 return 1;
4915 return kvm_write_guest_virt(dtable.address + index*8, seg_desc, sizeof(*seg_desc), vcpu, NULL);
4916}
4917
4918static gpa_t get_tss_base_addr_write(struct kvm_vcpu *vcpu,
4919 struct desc_struct *seg_desc)
4920{
4921 u32 base_addr = get_desc_base(seg_desc);
4922
4923 return kvm_mmu_gva_to_gpa_write(vcpu, base_addr, NULL);
4924}
4925
4926static gpa_t get_tss_base_addr_read(struct kvm_vcpu *vcpu,
4927 struct desc_struct *seg_desc)
4928{
4929 u32 base_addr = get_desc_base(seg_desc);
4930
4931 return kvm_mmu_gva_to_gpa_read(vcpu, base_addr, NULL);
4932}
4933
4934static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
4935{
4936 struct kvm_segment kvm_seg;
4937
4938 kvm_get_segment(vcpu, &kvm_seg, seg);
4939 return kvm_seg.selector;
4940}
4941
4942static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
4943{
4944 struct kvm_segment segvar = {
4945 .base = selector << 4,
4946 .limit = 0xffff,
4947 .selector = selector,
4948 .type = 3,
4949 .present = 1,
4950 .dpl = 3,
4951 .db = 0,
4952 .s = 1,
4953 .l = 0,
4954 .g = 0,
4955 .avl = 0,
4956 .unusable = 0,
4957 };
4958 kvm_x86_ops->set_segment(vcpu, &segvar, seg);
4959 return X86EMUL_CONTINUE;
4960}
4961
4962static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg)
4963{
4964 return (seg != VCPU_SREG_LDTR) &&
4965 (seg != VCPU_SREG_TR) &&
4966 (kvm_get_rflags(vcpu) & X86_EFLAGS_VM);
4967}
4968
4969int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg)
4970{
4971 struct kvm_segment kvm_seg;
4972 struct desc_struct seg_desc;
4973 u8 dpl, rpl, cpl;
4974 unsigned err_vec = GP_VECTOR;
4975 u32 err_code = 0;
4976 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
4977 int ret;
4978
4979 if (is_vm86_segment(vcpu, seg) || !is_protmode(vcpu))
4980 return kvm_load_realmode_segment(vcpu, selector, seg);
4981
4982 /* NULL selector is not valid for TR, CS and SS */
4983 if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
4984 && null_selector)
4985 goto exception;
4986
4987 /* TR should be in GDT only */
4988 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
4989 goto exception;
4990
4991 ret = load_guest_segment_descriptor(vcpu, selector, &seg_desc);
4992 if (ret)
4993 return ret;
4994
4995 seg_desct_to_kvm_desct(&seg_desc, selector, &kvm_seg);
4996
4997 if (null_selector) { /* for NULL selector skip all following checks */
4998 kvm_seg.unusable = 1;
4999 goto load;
5000 }
5001
5002 err_code = selector & 0xfffc;
5003 err_vec = GP_VECTOR;
5004
5005 /* can't load system descriptor into segment selecor */
5006 if (seg <= VCPU_SREG_GS && !kvm_seg.s)
5007 goto exception;
5008
5009 if (!kvm_seg.present) {
5010 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
5011 goto exception;
5012 }
5013
5014 rpl = selector & 3;
5015 dpl = kvm_seg.dpl;
5016 cpl = kvm_x86_ops->get_cpl(vcpu);
5017
5018 switch (seg) {
5019 case VCPU_SREG_SS:
5020 /*
5021 * segment is not a writable data segment or segment
5022 * selector's RPL != CPL or segment selector's RPL != CPL
5023 */
5024 if (rpl != cpl || (kvm_seg.type & 0xa) != 0x2 || dpl != cpl)
5025 goto exception;
5026 break;
5027 case VCPU_SREG_CS:
5028 if (!(kvm_seg.type & 8))
5029 goto exception;
5030
5031 if (kvm_seg.type & 4) {
5032 /* conforming */
5033 if (dpl > cpl)
5034 goto exception;
5035 } else {
5036 /* nonconforming */
5037 if (rpl > cpl || dpl != cpl)
5038 goto exception;
5039 }
5040 /* CS(RPL) <- CPL */
5041 selector = (selector & 0xfffc) | cpl;
5042 break;
5043 case VCPU_SREG_TR:
5044 if (kvm_seg.s || (kvm_seg.type != 1 && kvm_seg.type != 9))
5045 goto exception;
5046 break;
5047 case VCPU_SREG_LDTR:
5048 if (kvm_seg.s || kvm_seg.type != 2)
5049 goto exception;
5050 break;
5051 default: /* DS, ES, FS, or GS */
5052 /*
5053 * segment is not a data or readable code segment or
5054 * ((segment is a data or nonconforming code segment)
5055 * and (both RPL and CPL > DPL))
5056 */
5057 if ((kvm_seg.type & 0xa) == 0x8 ||
5058 (((kvm_seg.type & 0xc) != 0xc) && (rpl > dpl && cpl > dpl)))
5059 goto exception;
5060 break;
5061 }
5062
5063 if (!kvm_seg.unusable && kvm_seg.s) {
5064 /* mark segment as accessed */
5065 kvm_seg.type |= 1;
5066 seg_desc.type |= 1;
5067 save_guest_segment_descriptor(vcpu, selector, &seg_desc);
5068 }
5069load:
5070 kvm_set_segment(vcpu, &kvm_seg, seg);
5071 return X86EMUL_CONTINUE;
5072exception:
5073 kvm_queue_exception_e(vcpu, err_vec, err_code);
5074 return X86EMUL_PROPAGATE_FAULT;
5075}
5076
5077static void save_state_to_tss32(struct kvm_vcpu *vcpu,
5078 struct tss_segment_32 *tss)
5079{
5080 tss->cr3 = vcpu->arch.cr3;
5081 tss->eip = kvm_rip_read(vcpu);
5082 tss->eflags = kvm_get_rflags(vcpu);
5083 tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
5084 tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
5085 tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX);
5086 tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX);
5087 tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP);
5088 tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP);
5089 tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI);
5090 tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI);
5091 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
5092 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
5093 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
5094 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
5095 tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
5096 tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
5097 tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
5098}
5099
5100static void kvm_load_segment_selector(struct kvm_vcpu *vcpu, u16 sel, int seg)
5101{
5102 struct kvm_segment kvm_seg;
5103 kvm_get_segment(vcpu, &kvm_seg, seg);
5104 kvm_seg.selector = sel;
5105 kvm_set_segment(vcpu, &kvm_seg, seg);
5106}
5107
5108static int load_state_from_tss32(struct kvm_vcpu *vcpu,
5109 struct tss_segment_32 *tss)
5110{
5111 kvm_set_cr3(vcpu, tss->cr3);
5112
5113 kvm_rip_write(vcpu, tss->eip);
5114 kvm_set_rflags(vcpu, tss->eflags | 2);
5115
5116 kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
5117 kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
5118 kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx);
5119 kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx);
5120 kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp);
5121 kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp);
5122 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
5123 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
5124
5125 /*
5126 * SDM says that segment selectors are loaded before segment
5127 * descriptors
5128 */
5129 kvm_load_segment_selector(vcpu, tss->ldt_selector, VCPU_SREG_LDTR);
5130 kvm_load_segment_selector(vcpu, tss->es, VCPU_SREG_ES);
5131 kvm_load_segment_selector(vcpu, tss->cs, VCPU_SREG_CS);
5132 kvm_load_segment_selector(vcpu, tss->ss, VCPU_SREG_SS);
5133 kvm_load_segment_selector(vcpu, tss->ds, VCPU_SREG_DS);
5134 kvm_load_segment_selector(vcpu, tss->fs, VCPU_SREG_FS);
5135 kvm_load_segment_selector(vcpu, tss->gs, VCPU_SREG_GS);
5136
5137 /*
5138 * Now load segment descriptors. If fault happenes at this stage
5139 * it is handled in a context of new task
5140 */
5141 if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, VCPU_SREG_LDTR))
5142 return 1;
5143
5144 if (kvm_load_segment_descriptor(vcpu, tss->es, VCPU_SREG_ES))
5145 return 1;
5146
5147 if (kvm_load_segment_descriptor(vcpu, tss->cs, VCPU_SREG_CS))
5148 return 1;
5149
5150 if (kvm_load_segment_descriptor(vcpu, tss->ss, VCPU_SREG_SS))
5151 return 1;
5152
5153 if (kvm_load_segment_descriptor(vcpu, tss->ds, VCPU_SREG_DS))
5154 return 1;
5155
5156 if (kvm_load_segment_descriptor(vcpu, tss->fs, VCPU_SREG_FS))
5157 return 1;
5158
5159 if (kvm_load_segment_descriptor(vcpu, tss->gs, VCPU_SREG_GS))
5160 return 1;
5161 return 0;
5162}
5163
5164static void save_state_to_tss16(struct kvm_vcpu *vcpu,
5165 struct tss_segment_16 *tss)
5166{
5167 tss->ip = kvm_rip_read(vcpu);
5168 tss->flag = kvm_get_rflags(vcpu);
5169 tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX);
5170 tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX);
5171 tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX);
5172 tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX);
5173 tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP);
5174 tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP);
5175 tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI);
5176 tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI);
5177
5178 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
5179 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
5180 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
5181 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
5182 tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
5183}
5184
5185static int load_state_from_tss16(struct kvm_vcpu *vcpu,
5186 struct tss_segment_16 *tss)
5187{
5188 kvm_rip_write(vcpu, tss->ip);
5189 kvm_set_rflags(vcpu, tss->flag | 2);
5190 kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
5191 kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
5192 kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
5193 kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx);
5194 kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp);
5195 kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp);
5196 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
5197 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
5198
5199 /*
5200 * SDM says that segment selectors are loaded before segment
5201 * descriptors
5202 */
5203 kvm_load_segment_selector(vcpu, tss->ldt, VCPU_SREG_LDTR);
5204 kvm_load_segment_selector(vcpu, tss->es, VCPU_SREG_ES);
5205 kvm_load_segment_selector(vcpu, tss->cs, VCPU_SREG_CS);
5206 kvm_load_segment_selector(vcpu, tss->ss, VCPU_SREG_SS);
5207 kvm_load_segment_selector(vcpu, tss->ds, VCPU_SREG_DS);
5208
5209 /*
5210 * Now load segment descriptors. If fault happenes at this stage
5211 * it is handled in a context of new task
5212 */
5213 if (kvm_load_segment_descriptor(vcpu, tss->ldt, VCPU_SREG_LDTR))
5214 return 1;
5215
5216 if (kvm_load_segment_descriptor(vcpu, tss->es, VCPU_SREG_ES))
5217 return 1;
5218
5219 if (kvm_load_segment_descriptor(vcpu, tss->cs, VCPU_SREG_CS))
5220 return 1;
5221
5222 if (kvm_load_segment_descriptor(vcpu, tss->ss, VCPU_SREG_SS))
5223 return 1;
5224
5225 if (kvm_load_segment_descriptor(vcpu, tss->ds, VCPU_SREG_DS))
5226 return 1;
5227 return 0;
5228}
5229
5230static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
5231 u16 old_tss_sel, u32 old_tss_base,
5232 struct desc_struct *nseg_desc)
5233{
5234 struct tss_segment_16 tss_segment_16;
5235 int ret = 0;
5236
5237 if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
5238 sizeof tss_segment_16))
5239 goto out;
5240
5241 save_state_to_tss16(vcpu, &tss_segment_16);
5242
5243 if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
5244 sizeof tss_segment_16))
5245 goto out;
5246
5247 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
5248 &tss_segment_16, sizeof tss_segment_16))
5249 goto out;
5250
5251 if (old_tss_sel != 0xffff) {
5252 tss_segment_16.prev_task_link = old_tss_sel;
5253
5254 if (kvm_write_guest(vcpu->kvm,
5255 get_tss_base_addr_write(vcpu, nseg_desc),
5256 &tss_segment_16.prev_task_link,
5257 sizeof tss_segment_16.prev_task_link))
5258 goto out;
5259 }
5260
5261 if (load_state_from_tss16(vcpu, &tss_segment_16))
5262 goto out;
5263
5264 ret = 1;
5265out:
5266 return ret;
5267}
5268
5269static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
5270 u16 old_tss_sel, u32 old_tss_base,
5271 struct desc_struct *nseg_desc)
5272{
5273 struct tss_segment_32 tss_segment_32;
5274 int ret = 0;
5275
5276 if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
5277 sizeof tss_segment_32))
5278 goto out;
5279
5280 save_state_to_tss32(vcpu, &tss_segment_32);
5281
5282 if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
5283 sizeof tss_segment_32))
5284 goto out;
5285
5286 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
5287 &tss_segment_32, sizeof tss_segment_32))
5288 goto out;
5289
5290 if (old_tss_sel != 0xffff) {
5291 tss_segment_32.prev_task_link = old_tss_sel;
5292
5293 if (kvm_write_guest(vcpu->kvm,
5294 get_tss_base_addr_write(vcpu, nseg_desc),
5295 &tss_segment_32.prev_task_link,
5296 sizeof tss_segment_32.prev_task_link))
5297 goto out;
5298 }
5299
5300 if (load_state_from_tss32(vcpu, &tss_segment_32))
5301 goto out;
5302
5303 ret = 1;
5304out:
5305 return ret;
5306}
5307
5308int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) 4835int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
5309{ 4836{
5310 struct kvm_segment tr_seg; 4837 int cs_db, cs_l, ret;
5311 struct desc_struct cseg_desc; 4838 cache_all_regs(vcpu);
5312 struct desc_struct nseg_desc;
5313 int ret = 0;
5314 u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
5315 u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
5316 u32 desc_limit;
5317
5318 old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL);
5319
5320 /* FIXME: Handle errors. Failure to read either TSS or their
5321 * descriptors should generate a pagefault.
5322 */
5323 if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
5324 goto out;
5325
5326 if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
5327 goto out;
5328
5329 if (reason != TASK_SWITCH_IRET) {
5330 int cpl;
5331
5332 cpl = kvm_x86_ops->get_cpl(vcpu);
5333 if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
5334 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
5335 return 1;
5336 }
5337 }
5338
5339 desc_limit = get_desc_limit(&nseg_desc);
5340 if (!nseg_desc.p ||
5341 ((desc_limit < 0x67 && (nseg_desc.type & 8)) ||
5342 desc_limit < 0x2b)) {
5343 kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
5344 return 1;
5345 }
5346
5347 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
5348 cseg_desc.type &= ~(1 << 1); //clear the B flag
5349 save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
5350 }
5351
5352 if (reason == TASK_SWITCH_IRET) {
5353 u32 eflags = kvm_get_rflags(vcpu);
5354 kvm_set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
5355 }
5356 4839
5357 /* set back link to prev task only if NT bit is set in eflags 4840 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
5358 note that old_tss_sel is not used afetr this point */
5359 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
5360 old_tss_sel = 0xffff;
5361 4841
5362 if (nseg_desc.type & 8) 4842 vcpu->arch.emulate_ctxt.vcpu = vcpu;
5363 ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_sel, 4843 vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
5364 old_tss_base, &nseg_desc); 4844 vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu);
5365 else 4845 vcpu->arch.emulate_ctxt.mode =
5366 ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_sel, 4846 (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
5367 old_tss_base, &nseg_desc); 4847 (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
4848 ? X86EMUL_MODE_VM86 : cs_l
4849 ? X86EMUL_MODE_PROT64 : cs_db
4850 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
5368 4851
5369 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) { 4852 ret = emulator_task_switch(&vcpu->arch.emulate_ctxt, &emulate_ops,
5370 u32 eflags = kvm_get_rflags(vcpu); 4853 tss_selector, reason);
5371 kvm_set_rflags(vcpu, eflags | X86_EFLAGS_NT);
5372 }
5373 4854
5374 if (reason != TASK_SWITCH_IRET) { 4855 if (ret == X86EMUL_CONTINUE)
5375 nseg_desc.type |= (1 << 1); 4856 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
5376 save_guest_segment_descriptor(vcpu, tss_selector,
5377 &nseg_desc);
5378 }
5379 4857
5380 kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0(vcpu) | X86_CR0_TS); 4858 return (ret != X86EMUL_CONTINUE);
5381 seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
5382 tr_seg.type = 11;
5383 kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
5384out:
5385 return ret;
5386} 4859}
5387EXPORT_SYMBOL_GPL(kvm_task_switch); 4860EXPORT_SYMBOL_GPL(kvm_task_switch);
5388 4861