aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kvm/x86.c78
-rw-r--r--include/asm-x86/kvm_host.h4
2 files changed, 43 insertions, 39 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5f67a7c54e82..4c94fad7f01e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3100,8 +3100,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3100 return 0; 3100 return 0;
3101} 3101}
3102 3102
3103static void get_segment(struct kvm_vcpu *vcpu, 3103void kvm_get_segment(struct kvm_vcpu *vcpu,
3104 struct kvm_segment *var, int seg) 3104 struct kvm_segment *var, int seg)
3105{ 3105{
3106 kvm_x86_ops->get_segment(vcpu, var, seg); 3106 kvm_x86_ops->get_segment(vcpu, var, seg);
3107} 3107}
@@ -3110,7 +3110,7 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
3110{ 3110{
3111 struct kvm_segment cs; 3111 struct kvm_segment cs;
3112 3112
3113 get_segment(vcpu, &cs, VCPU_SREG_CS); 3113 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
3114 *db = cs.db; 3114 *db = cs.db;
3115 *l = cs.l; 3115 *l = cs.l;
3116} 3116}
@@ -3124,15 +3124,15 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3124 3124
3125 vcpu_load(vcpu); 3125 vcpu_load(vcpu);
3126 3126
3127 get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); 3127 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
3128 get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); 3128 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
3129 get_segment(vcpu, &sregs->es, VCPU_SREG_ES); 3129 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
3130 get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); 3130 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
3131 get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); 3131 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
3132 get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); 3132 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
3133 3133
3134 get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); 3134 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
3135 get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); 3135 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
3136 3136
3137 kvm_x86_ops->get_idt(vcpu, &dt); 3137 kvm_x86_ops->get_idt(vcpu, &dt);
3138 sregs->idt.limit = dt.limit; 3138 sregs->idt.limit = dt.limit;
@@ -3184,7 +3184,7 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3184 return 0; 3184 return 0;
3185} 3185}
3186 3186
3187static void set_segment(struct kvm_vcpu *vcpu, 3187static void kvm_set_segment(struct kvm_vcpu *vcpu,
3188 struct kvm_segment *var, int seg) 3188 struct kvm_segment *var, int seg)
3189{ 3189{
3190 kvm_x86_ops->set_segment(vcpu, var, seg); 3190 kvm_x86_ops->set_segment(vcpu, var, seg);
@@ -3221,7 +3221,7 @@ static void get_segment_descritptor_dtable(struct kvm_vcpu *vcpu,
3221 if (selector & 1 << 2) { 3221 if (selector & 1 << 2) {
3222 struct kvm_segment kvm_seg; 3222 struct kvm_segment kvm_seg;
3223 3223
3224 get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR); 3224 kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
3225 3225
3226 if (kvm_seg.unusable) 3226 if (kvm_seg.unusable)
3227 dtable->limit = 0; 3227 dtable->limit = 0;
@@ -3327,7 +3327,7 @@ static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
3327{ 3327{
3328 struct kvm_segment kvm_seg; 3328 struct kvm_segment kvm_seg;
3329 3329
3330 get_segment(vcpu, &kvm_seg, seg); 3330 kvm_get_segment(vcpu, &kvm_seg, seg);
3331 return kvm_seg.selector; 3331 return kvm_seg.selector;
3332} 3332}
3333 3333
@@ -3343,8 +3343,8 @@ static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
3343 return 0; 3343 return 0;
3344} 3344}
3345 3345
3346static int load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, 3346int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3347 int type_bits, int seg) 3347 int type_bits, int seg)
3348{ 3348{
3349 struct kvm_segment kvm_seg; 3349 struct kvm_segment kvm_seg;
3350 3350
@@ -3357,7 +3357,7 @@ static int load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3357 if (!kvm_seg.s) 3357 if (!kvm_seg.s)
3358 kvm_seg.unusable = 1; 3358 kvm_seg.unusable = 1;
3359 3359
3360 set_segment(vcpu, &kvm_seg, seg); 3360 kvm_set_segment(vcpu, &kvm_seg, seg);
3361 return 0; 3361 return 0;
3362} 3362}
3363 3363
@@ -3403,25 +3403,25 @@ static int load_state_from_tss32(struct kvm_vcpu *vcpu,
3403 vcpu->arch.regs[VCPU_REGS_RSI] = tss->esi; 3403 vcpu->arch.regs[VCPU_REGS_RSI] = tss->esi;
3404 vcpu->arch.regs[VCPU_REGS_RDI] = tss->edi; 3404 vcpu->arch.regs[VCPU_REGS_RDI] = tss->edi;
3405 3405
3406 if (load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR)) 3406 if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
3407 return 1; 3407 return 1;
3408 3408
3409 if (load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES)) 3409 if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
3410 return 1; 3410 return 1;
3411 3411
3412 if (load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS)) 3412 if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
3413 return 1; 3413 return 1;
3414 3414
3415 if (load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS)) 3415 if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
3416 return 1; 3416 return 1;
3417 3417
3418 if (load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS)) 3418 if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
3419 return 1; 3419 return 1;
3420 3420
3421 if (load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS)) 3421 if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
3422 return 1; 3422 return 1;
3423 3423
3424 if (load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS)) 3424 if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
3425 return 1; 3425 return 1;
3426 return 0; 3426 return 0;
3427} 3427}
@@ -3462,19 +3462,19 @@ static int load_state_from_tss16(struct kvm_vcpu *vcpu,
3462 vcpu->arch.regs[VCPU_REGS_RSI] = tss->si; 3462 vcpu->arch.regs[VCPU_REGS_RSI] = tss->si;
3463 vcpu->arch.regs[VCPU_REGS_RDI] = tss->di; 3463 vcpu->arch.regs[VCPU_REGS_RDI] = tss->di;
3464 3464
3465 if (load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR)) 3465 if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
3466 return 1; 3466 return 1;
3467 3467
3468 if (load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES)) 3468 if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
3469 return 1; 3469 return 1;
3470 3470
3471 if (load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS)) 3471 if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
3472 return 1; 3472 return 1;
3473 3473
3474 if (load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS)) 3474 if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
3475 return 1; 3475 return 1;
3476 3476
3477 if (load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS)) 3477 if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
3478 return 1; 3478 return 1;
3479 return 0; 3479 return 0;
3480} 3480}
@@ -3532,7 +3532,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
3532 struct desc_struct nseg_desc; 3532 struct desc_struct nseg_desc;
3533 int ret = 0; 3533 int ret = 0;
3534 3534
3535 get_segment(vcpu, &tr_seg, VCPU_SREG_TR); 3535 kvm_get_segment(vcpu, &tr_seg, VCPU_SREG_TR);
3536 3536
3537 if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc)) 3537 if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
3538 goto out; 3538 goto out;
@@ -3591,7 +3591,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
3591 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS); 3591 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS);
3592 seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg); 3592 seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
3593 tr_seg.type = 11; 3593 tr_seg.type = 11;
3594 set_segment(vcpu, &tr_seg, VCPU_SREG_TR); 3594 kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
3595out: 3595out:
3596 kvm_x86_ops->decache_regs(vcpu); 3596 kvm_x86_ops->decache_regs(vcpu);
3597 return ret; 3597 return ret;
@@ -3658,15 +3658,15 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3658 } 3658 }
3659 } 3659 }
3660 3660
3661 set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); 3661 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
3662 set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); 3662 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
3663 set_segment(vcpu, &sregs->es, VCPU_SREG_ES); 3663 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
3664 set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); 3664 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
3665 set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); 3665 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
3666 set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); 3666 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
3667 3667
3668 set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); 3668 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
3669 set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); 3669 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
3670 3670
3671 vcpu_put(vcpu); 3671 vcpu_put(vcpu);
3672 3672
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index fc72bad878ed..cd6a4bb8c8e8 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -503,6 +503,10 @@ int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
503int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, 503int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
504 unsigned long value); 504 unsigned long value);
505 505
506void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
507int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
508 int type_bits, int seg);
509
506int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason); 510int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason);
507 511
508void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 512void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);