aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIzik Eidus <izike@qumranet.com>2008-03-24 17:14:53 -0400
committerAvi Kivity <avi@qumranet.com>2008-04-27 05:00:39 -0400
commit37817f2982d0f559f90cecc66e150dd9d2c2df05 (patch)
tree45114b5720d7a13bdbe48cc6a75dc6de03d6fcd2
parent2e4d2653497856b102c90153f970c9e344ba96c6 (diff)
KVM: x86: hardware task switching support
This emulates the x86 hardware task switch mechanism in software, as it is unsupported by either vmx or svm. It allows operating systems which use it, like freedos, to run as kvm guests. Signed-off-by: Izik Eidus <izike@qumranet.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
-rw-r--r--arch/x86/kvm/svm.c15
-rw-r--r--arch/x86/kvm/svm.h3
-rw-r--r--arch/x86/kvm/tss.h59
-rw-r--r--arch/x86/kvm/vmx.c15
-rw-r--r--arch/x86/kvm/x86.c409
-rw-r--r--include/asm-x86/kvm_host.h9
6 files changed, 507 insertions, 3 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index c1c1b973e80a..ad273468c08a 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1112,9 +1112,18 @@ static int invalid_op_interception(struct vcpu_svm *svm,
1112static int task_switch_interception(struct vcpu_svm *svm, 1112static int task_switch_interception(struct vcpu_svm *svm,
1113 struct kvm_run *kvm_run) 1113 struct kvm_run *kvm_run)
1114{ 1114{
1115 pr_unimpl(&svm->vcpu, "%s: task switch is unsupported\n", __func__); 1115 u16 tss_selector;
1116 kvm_run->exit_reason = KVM_EXIT_UNKNOWN; 1116
1117 return 0; 1117 tss_selector = (u16)svm->vmcb->control.exit_info_1;
1118 if (svm->vmcb->control.exit_info_2 &
1119 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
1120 return kvm_task_switch(&svm->vcpu, tss_selector,
1121 TASK_SWITCH_IRET);
1122 if (svm->vmcb->control.exit_info_2 &
1123 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
1124 return kvm_task_switch(&svm->vcpu, tss_selector,
1125 TASK_SWITCH_JMP);
1126 return kvm_task_switch(&svm->vcpu, tss_selector, TASK_SWITCH_CALL);
1118} 1127}
1119 1128
1120static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1129static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
diff --git a/arch/x86/kvm/svm.h b/arch/x86/kvm/svm.h
index 5fd50491b555..1b8afa78e869 100644
--- a/arch/x86/kvm/svm.h
+++ b/arch/x86/kvm/svm.h
@@ -238,6 +238,9 @@ struct __attribute__ ((__packed__)) vmcb {
238#define SVM_EXITINTINFO_VALID SVM_EVTINJ_VALID 238#define SVM_EXITINTINFO_VALID SVM_EVTINJ_VALID
239#define SVM_EXITINTINFO_VALID_ERR SVM_EVTINJ_VALID_ERR 239#define SVM_EXITINTINFO_VALID_ERR SVM_EVTINJ_VALID_ERR
240 240
241#define SVM_EXITINFOSHIFT_TS_REASON_IRET 36
242#define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
243
241#define SVM_EXIT_READ_CR0 0x000 244#define SVM_EXIT_READ_CR0 0x000
242#define SVM_EXIT_READ_CR3 0x003 245#define SVM_EXIT_READ_CR3 0x003
243#define SVM_EXIT_READ_CR4 0x004 246#define SVM_EXIT_READ_CR4 0x004
diff --git a/arch/x86/kvm/tss.h b/arch/x86/kvm/tss.h
new file mode 100644
index 000000000000..622aa10f692f
--- /dev/null
+++ b/arch/x86/kvm/tss.h
@@ -0,0 +1,59 @@
1#ifndef __TSS_SEGMENT_H
2#define __TSS_SEGMENT_H
3
4struct tss_segment_32 {
5 u32 prev_task_link;
6 u32 esp0;
7 u32 ss0;
8 u32 esp1;
9 u32 ss1;
10 u32 esp2;
11 u32 ss2;
12 u32 cr3;
13 u32 eip;
14 u32 eflags;
15 u32 eax;
16 u32 ecx;
17 u32 edx;
18 u32 ebx;
19 u32 esp;
20 u32 ebp;
21 u32 esi;
22 u32 edi;
23 u32 es;
24 u32 cs;
25 u32 ss;
26 u32 ds;
27 u32 fs;
28 u32 gs;
29 u32 ldt_selector;
30 u16 t;
31 u16 io_map;
32};
33
34struct tss_segment_16 {
35 u16 prev_task_link;
36 u16 sp0;
37 u16 ss0;
38 u16 sp1;
39 u16 ss1;
40 u16 sp2;
41 u16 ss2;
42 u16 ip;
43 u16 flag;
44 u16 ax;
45 u16 cx;
46 u16 dx;
47 u16 bx;
48 u16 sp;
49 u16 bp;
50 u16 si;
51 u16 di;
52 u16 es;
53 u16 cs;
54 u16 ss;
55 u16 ds;
56 u16 ldt;
57};
58
59#endif
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 9b560325b127..cbca46acfac3 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2249,6 +2249,20 @@ static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2249 return 1; 2249 return 1;
2250} 2250}
2251 2251
2252static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2253{
2254 unsigned long exit_qualification;
2255 u16 tss_selector;
2256 int reason;
2257
2258 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
2259
2260 reason = (u32)exit_qualification >> 30;
2261 tss_selector = exit_qualification;
2262
2263 return kvm_task_switch(vcpu, tss_selector, reason);
2264}
2265
2252/* 2266/*
2253 * The exit handlers return 1 if the exit was handled fully and guest execution 2267 * The exit handlers return 1 if the exit was handled fully and guest execution
2254 * may resume. Otherwise they set the kvm_run parameter to indicate what needs 2268 * may resume. Otherwise they set the kvm_run parameter to indicate what needs
@@ -2271,6 +2285,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
2271 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, 2285 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
2272 [EXIT_REASON_APIC_ACCESS] = handle_apic_access, 2286 [EXIT_REASON_APIC_ACCESS] = handle_apic_access,
2273 [EXIT_REASON_WBINVD] = handle_wbinvd, 2287 [EXIT_REASON_WBINVD] = handle_wbinvd,
2288 [EXIT_REASON_TASK_SWITCH] = handle_task_switch,
2274}; 2289};
2275 2290
2276static const int kvm_vmx_max_exit_handlers = 2291static const int kvm_vmx_max_exit_handlers =
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 63afca1c295f..32d910044f85 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -18,6 +18,7 @@
18#include "irq.h" 18#include "irq.h"
19#include "mmu.h" 19#include "mmu.h"
20#include "i8254.h" 20#include "i8254.h"
21#include "tss.h"
21 22
22#include <linux/clocksource.h> 23#include <linux/clocksource.h>
23#include <linux/kvm.h> 24#include <linux/kvm.h>
@@ -3077,6 +3078,414 @@ static void set_segment(struct kvm_vcpu *vcpu,
3077 kvm_x86_ops->set_segment(vcpu, var, seg); 3078 kvm_x86_ops->set_segment(vcpu, var, seg);
3078} 3079}
3079 3080
3081static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
3082 struct kvm_segment *kvm_desct)
3083{
3084 kvm_desct->base = seg_desc->base0;
3085 kvm_desct->base |= seg_desc->base1 << 16;
3086 kvm_desct->base |= seg_desc->base2 << 24;
3087 kvm_desct->limit = seg_desc->limit0;
3088 kvm_desct->limit |= seg_desc->limit << 16;
3089 kvm_desct->selector = selector;
3090 kvm_desct->type = seg_desc->type;
3091 kvm_desct->present = seg_desc->p;
3092 kvm_desct->dpl = seg_desc->dpl;
3093 kvm_desct->db = seg_desc->d;
3094 kvm_desct->s = seg_desc->s;
3095 kvm_desct->l = seg_desc->l;
3096 kvm_desct->g = seg_desc->g;
3097 kvm_desct->avl = seg_desc->avl;
3098 if (!selector)
3099 kvm_desct->unusable = 1;
3100 else
3101 kvm_desct->unusable = 0;
3102 kvm_desct->padding = 0;
3103}
3104
3105static void get_segment_descritptor_dtable(struct kvm_vcpu *vcpu,
3106 u16 selector,
3107 struct descriptor_table *dtable)
3108{
3109 if (selector & 1 << 2) {
3110 struct kvm_segment kvm_seg;
3111
3112 get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
3113
3114 if (kvm_seg.unusable)
3115 dtable->limit = 0;
3116 else
3117 dtable->limit = kvm_seg.limit;
3118 dtable->base = kvm_seg.base;
3119 }
3120 else
3121 kvm_x86_ops->get_gdt(vcpu, dtable);
3122}
3123
3124/* allowed just for 8 bytes segments */
3125static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3126 struct desc_struct *seg_desc)
3127{
3128 struct descriptor_table dtable;
3129 u16 index = selector >> 3;
3130
3131 get_segment_descritptor_dtable(vcpu, selector, &dtable);
3132
3133 if (dtable.limit < index * 8 + 7) {
3134 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
3135 return 1;
3136 }
3137 return kvm_read_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8);
3138}
3139
3140/* allowed just for 8 bytes segments */
3141static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3142 struct desc_struct *seg_desc)
3143{
3144 struct descriptor_table dtable;
3145 u16 index = selector >> 3;
3146
3147 get_segment_descritptor_dtable(vcpu, selector, &dtable);
3148
3149 if (dtable.limit < index * 8 + 7)
3150 return 1;
3151 return kvm_write_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8);
3152}
3153
3154static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
3155 struct desc_struct *seg_desc)
3156{
3157 u32 base_addr;
3158
3159 base_addr = seg_desc->base0;
3160 base_addr |= (seg_desc->base1 << 16);
3161 base_addr |= (seg_desc->base2 << 24);
3162
3163 return base_addr;
3164}
3165
3166static int load_tss_segment32(struct kvm_vcpu *vcpu,
3167 struct desc_struct *seg_desc,
3168 struct tss_segment_32 *tss)
3169{
3170 u32 base_addr;
3171
3172 base_addr = get_tss_base_addr(vcpu, seg_desc);
3173
3174 return kvm_read_guest(vcpu->kvm, base_addr, tss,
3175 sizeof(struct tss_segment_32));
3176}
3177
3178static int save_tss_segment32(struct kvm_vcpu *vcpu,
3179 struct desc_struct *seg_desc,
3180 struct tss_segment_32 *tss)
3181{
3182 u32 base_addr;
3183
3184 base_addr = get_tss_base_addr(vcpu, seg_desc);
3185
3186 return kvm_write_guest(vcpu->kvm, base_addr, tss,
3187 sizeof(struct tss_segment_32));
3188}
3189
3190static int load_tss_segment16(struct kvm_vcpu *vcpu,
3191 struct desc_struct *seg_desc,
3192 struct tss_segment_16 *tss)
3193{
3194 u32 base_addr;
3195
3196 base_addr = get_tss_base_addr(vcpu, seg_desc);
3197
3198 return kvm_read_guest(vcpu->kvm, base_addr, tss,
3199 sizeof(struct tss_segment_16));
3200}
3201
3202static int save_tss_segment16(struct kvm_vcpu *vcpu,
3203 struct desc_struct *seg_desc,
3204 struct tss_segment_16 *tss)
3205{
3206 u32 base_addr;
3207
3208 base_addr = get_tss_base_addr(vcpu, seg_desc);
3209
3210 return kvm_write_guest(vcpu->kvm, base_addr, tss,
3211 sizeof(struct tss_segment_16));
3212}
3213
3214static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
3215{
3216 struct kvm_segment kvm_seg;
3217
3218 get_segment(vcpu, &kvm_seg, seg);
3219 return kvm_seg.selector;
3220}
3221
3222static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
3223 u16 selector,
3224 struct kvm_segment *kvm_seg)
3225{
3226 struct desc_struct seg_desc;
3227
3228 if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
3229 return 1;
3230 seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
3231 return 0;
3232}
3233
3234static int load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3235 int type_bits, int seg)
3236{
3237 struct kvm_segment kvm_seg;
3238
3239 if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
3240 return 1;
3241 kvm_seg.type |= type_bits;
3242
3243 if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
3244 seg != VCPU_SREG_LDTR)
3245 if (!kvm_seg.s)
3246 kvm_seg.unusable = 1;
3247
3248 set_segment(vcpu, &kvm_seg, seg);
3249 return 0;
3250}
3251
3252static void save_state_to_tss32(struct kvm_vcpu *vcpu,
3253 struct tss_segment_32 *tss)
3254{
3255 tss->cr3 = vcpu->arch.cr3;
3256 tss->eip = vcpu->arch.rip;
3257 tss->eflags = kvm_x86_ops->get_rflags(vcpu);
3258 tss->eax = vcpu->arch.regs[VCPU_REGS_RAX];
3259 tss->ecx = vcpu->arch.regs[VCPU_REGS_RCX];
3260 tss->edx = vcpu->arch.regs[VCPU_REGS_RDX];
3261 tss->ebx = vcpu->arch.regs[VCPU_REGS_RBX];
3262 tss->esp = vcpu->arch.regs[VCPU_REGS_RSP];
3263 tss->ebp = vcpu->arch.regs[VCPU_REGS_RBP];
3264 tss->esi = vcpu->arch.regs[VCPU_REGS_RSI];
3265 tss->edi = vcpu->arch.regs[VCPU_REGS_RDI];
3266
3267 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
3268 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
3269 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
3270 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
3271 tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
3272 tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
3273 tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
3274 tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
3275}
3276
3277static int load_state_from_tss32(struct kvm_vcpu *vcpu,
3278 struct tss_segment_32 *tss)
3279{
3280 kvm_set_cr3(vcpu, tss->cr3);
3281
3282 vcpu->arch.rip = tss->eip;
3283 kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
3284
3285 vcpu->arch.regs[VCPU_REGS_RAX] = tss->eax;
3286 vcpu->arch.regs[VCPU_REGS_RCX] = tss->ecx;
3287 vcpu->arch.regs[VCPU_REGS_RDX] = tss->edx;
3288 vcpu->arch.regs[VCPU_REGS_RBX] = tss->ebx;
3289 vcpu->arch.regs[VCPU_REGS_RSP] = tss->esp;
3290 vcpu->arch.regs[VCPU_REGS_RBP] = tss->ebp;
3291 vcpu->arch.regs[VCPU_REGS_RSI] = tss->esi;
3292 vcpu->arch.regs[VCPU_REGS_RDI] = tss->edi;
3293
3294 if (load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
3295 return 1;
3296
3297 if (load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
3298 return 1;
3299
3300 if (load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
3301 return 1;
3302
3303 if (load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
3304 return 1;
3305
3306 if (load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
3307 return 1;
3308
3309 if (load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
3310 return 1;
3311
3312 if (load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
3313 return 1;
3314 return 0;
3315}
3316
3317static void save_state_to_tss16(struct kvm_vcpu *vcpu,
3318 struct tss_segment_16 *tss)
3319{
3320 tss->ip = vcpu->arch.rip;
3321 tss->flag = kvm_x86_ops->get_rflags(vcpu);
3322 tss->ax = vcpu->arch.regs[VCPU_REGS_RAX];
3323 tss->cx = vcpu->arch.regs[VCPU_REGS_RCX];
3324 tss->dx = vcpu->arch.regs[VCPU_REGS_RDX];
3325 tss->bx = vcpu->arch.regs[VCPU_REGS_RBX];
3326 tss->sp = vcpu->arch.regs[VCPU_REGS_RSP];
3327 tss->bp = vcpu->arch.regs[VCPU_REGS_RBP];
3328 tss->si = vcpu->arch.regs[VCPU_REGS_RSI];
3329 tss->di = vcpu->arch.regs[VCPU_REGS_RDI];
3330
3331 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
3332 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
3333 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
3334 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
3335 tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
3336 tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
3337}
3338
3339static int load_state_from_tss16(struct kvm_vcpu *vcpu,
3340 struct tss_segment_16 *tss)
3341{
3342 vcpu->arch.rip = tss->ip;
3343 kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
3344 vcpu->arch.regs[VCPU_REGS_RAX] = tss->ax;
3345 vcpu->arch.regs[VCPU_REGS_RCX] = tss->cx;
3346 vcpu->arch.regs[VCPU_REGS_RDX] = tss->dx;
3347 vcpu->arch.regs[VCPU_REGS_RBX] = tss->bx;
3348 vcpu->arch.regs[VCPU_REGS_RSP] = tss->sp;
3349 vcpu->arch.regs[VCPU_REGS_RBP] = tss->bp;
3350 vcpu->arch.regs[VCPU_REGS_RSI] = tss->si;
3351 vcpu->arch.regs[VCPU_REGS_RDI] = tss->di;
3352
3353 if (load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
3354 return 1;
3355
3356 if (load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
3357 return 1;
3358
3359 if (load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
3360 return 1;
3361
3362 if (load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
3363 return 1;
3364
3365 if (load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
3366 return 1;
3367 return 0;
3368}
3369
3370int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
3371 struct desc_struct *cseg_desc,
3372 struct desc_struct *nseg_desc)
3373{
3374 struct tss_segment_16 tss_segment_16;
3375 int ret = 0;
3376
3377 if (load_tss_segment16(vcpu, cseg_desc, &tss_segment_16))
3378 goto out;
3379
3380 save_state_to_tss16(vcpu, &tss_segment_16);
3381 save_tss_segment16(vcpu, cseg_desc, &tss_segment_16);
3382
3383 if (load_tss_segment16(vcpu, nseg_desc, &tss_segment_16))
3384 goto out;
3385 if (load_state_from_tss16(vcpu, &tss_segment_16))
3386 goto out;
3387
3388 ret = 1;
3389out:
3390 return ret;
3391}
3392
3393int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
3394 struct desc_struct *cseg_desc,
3395 struct desc_struct *nseg_desc)
3396{
3397 struct tss_segment_32 tss_segment_32;
3398 int ret = 0;
3399
3400 if (load_tss_segment32(vcpu, cseg_desc, &tss_segment_32))
3401 goto out;
3402
3403 save_state_to_tss32(vcpu, &tss_segment_32);
3404 save_tss_segment32(vcpu, cseg_desc, &tss_segment_32);
3405
3406 if (load_tss_segment32(vcpu, nseg_desc, &tss_segment_32))
3407 goto out;
3408 if (load_state_from_tss32(vcpu, &tss_segment_32))
3409 goto out;
3410
3411 ret = 1;
3412out:
3413 return ret;
3414}
3415
3416int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
3417{
3418 struct kvm_segment tr_seg;
3419 struct desc_struct cseg_desc;
3420 struct desc_struct nseg_desc;
3421 int ret = 0;
3422
3423 get_segment(vcpu, &tr_seg, VCPU_SREG_TR);
3424
3425 if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
3426 goto out;
3427
3428 if (load_guest_segment_descriptor(vcpu, tr_seg.selector, &cseg_desc))
3429 goto out;
3430
3431
3432 if (reason != TASK_SWITCH_IRET) {
3433 int cpl;
3434
3435 cpl = kvm_x86_ops->get_cpl(vcpu);
3436 if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
3437 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
3438 return 1;
3439 }
3440 }
3441
3442 if (!nseg_desc.p || (nseg_desc.limit0 | nseg_desc.limit << 16) < 0x67) {
3443 kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
3444 return 1;
3445 }
3446
3447 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3448 cseg_desc.type &= ~(1 << 8); //clear the B flag
3449 save_guest_segment_descriptor(vcpu, tr_seg.selector,
3450 &cseg_desc);
3451 }
3452
3453 if (reason == TASK_SWITCH_IRET) {
3454 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
3455 kvm_x86_ops->set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
3456 }
3457
3458 kvm_x86_ops->skip_emulated_instruction(vcpu);
3459 kvm_x86_ops->cache_regs(vcpu);
3460
3461 if (nseg_desc.type & 8)
3462 ret = kvm_task_switch_32(vcpu, tss_selector, &cseg_desc,
3463 &nseg_desc);
3464 else
3465 ret = kvm_task_switch_16(vcpu, tss_selector, &cseg_desc,
3466 &nseg_desc);
3467
3468 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
3469 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
3470 kvm_x86_ops->set_rflags(vcpu, eflags | X86_EFLAGS_NT);
3471 }
3472
3473 if (reason != TASK_SWITCH_IRET) {
3474 nseg_desc.type |= (1 << 8);
3475 save_guest_segment_descriptor(vcpu, tss_selector,
3476 &nseg_desc);
3477 }
3478
3479 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS);
3480 seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
3481 tr_seg.type = 11;
3482 set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
3483out:
3484 kvm_x86_ops->decache_regs(vcpu);
3485 return ret;
3486}
3487EXPORT_SYMBOL_GPL(kvm_task_switch);
3488
3080int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 3489int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3081 struct kvm_sregs *sregs) 3490 struct kvm_sregs *sregs)
3082{ 3491{
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 93e809c251ef..7b28cf949d55 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -492,6 +492,8 @@ int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
492int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, 492int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
493 unsigned long value); 493 unsigned long value);
494 494
495int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason);
496
495void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 497void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
496void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0); 498void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0);
497void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0); 499void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0);
@@ -657,4 +659,11 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
657#define RMODE_TSS_SIZE \ 659#define RMODE_TSS_SIZE \
658 (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) 660 (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
659 661
662enum {
663 TASK_SWITCH_CALL = 0,
664 TASK_SWITCH_IRET = 1,
665 TASK_SWITCH_JMP = 2,
666 TASK_SWITCH_GATE = 3,
667};
668
660#endif 669#endif