diff options
author | Marcelo Tosatti <mtosatti@redhat.com> | 2008-07-16 18:07:11 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-07-27 04:34:09 -0400 |
commit | 34198bf8426276a2ce1e97056a0f02d43637e5ae (patch) | |
tree | 2f116fe9d97f602359f45a55832cb6621d451b76 /arch/x86 | |
parent | 98899aa0e0bf5de05850082be0eb837058c09ea5 (diff) |
KVM: task switch: use seg regs provided by subarch instead of reading from GDT
There is no guarantee that the old TSS descriptor in the GDT contains
the proper base address. This is the case for Windows installation's
reboot-via-triplefault.
Use guest registers instead. Also translate the address properly.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kvm/x86.c | 93 |
1 files changed, 30 insertions, 63 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index cd687395e4e7..27c6ece91da6 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -3267,54 +3267,6 @@ static u32 get_tss_base_addr(struct kvm_vcpu *vcpu, | |||
3267 | return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr); | 3267 | return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr); |
3268 | } | 3268 | } |
3269 | 3269 | ||
3270 | static int load_tss_segment32(struct kvm_vcpu *vcpu, | ||
3271 | struct desc_struct *seg_desc, | ||
3272 | struct tss_segment_32 *tss) | ||
3273 | { | ||
3274 | u32 base_addr; | ||
3275 | |||
3276 | base_addr = get_tss_base_addr(vcpu, seg_desc); | ||
3277 | |||
3278 | return kvm_read_guest(vcpu->kvm, base_addr, tss, | ||
3279 | sizeof(struct tss_segment_32)); | ||
3280 | } | ||
3281 | |||
3282 | static int save_tss_segment32(struct kvm_vcpu *vcpu, | ||
3283 | struct desc_struct *seg_desc, | ||
3284 | struct tss_segment_32 *tss) | ||
3285 | { | ||
3286 | u32 base_addr; | ||
3287 | |||
3288 | base_addr = get_tss_base_addr(vcpu, seg_desc); | ||
3289 | |||
3290 | return kvm_write_guest(vcpu->kvm, base_addr, tss, | ||
3291 | sizeof(struct tss_segment_32)); | ||
3292 | } | ||
3293 | |||
3294 | static int load_tss_segment16(struct kvm_vcpu *vcpu, | ||
3295 | struct desc_struct *seg_desc, | ||
3296 | struct tss_segment_16 *tss) | ||
3297 | { | ||
3298 | u32 base_addr; | ||
3299 | |||
3300 | base_addr = get_tss_base_addr(vcpu, seg_desc); | ||
3301 | |||
3302 | return kvm_read_guest(vcpu->kvm, base_addr, tss, | ||
3303 | sizeof(struct tss_segment_16)); | ||
3304 | } | ||
3305 | |||
3306 | static int save_tss_segment16(struct kvm_vcpu *vcpu, | ||
3307 | struct desc_struct *seg_desc, | ||
3308 | struct tss_segment_16 *tss) | ||
3309 | { | ||
3310 | u32 base_addr; | ||
3311 | |||
3312 | base_addr = get_tss_base_addr(vcpu, seg_desc); | ||
3313 | |||
3314 | return kvm_write_guest(vcpu->kvm, base_addr, tss, | ||
3315 | sizeof(struct tss_segment_16)); | ||
3316 | } | ||
3317 | |||
3318 | static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg) | 3270 | static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg) |
3319 | { | 3271 | { |
3320 | struct kvm_segment kvm_seg; | 3272 | struct kvm_segment kvm_seg; |
@@ -3472,20 +3424,26 @@ static int load_state_from_tss16(struct kvm_vcpu *vcpu, | |||
3472 | } | 3424 | } |
3473 | 3425 | ||
3474 | static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector, | 3426 | static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector, |
3475 | struct desc_struct *cseg_desc, | 3427 | u32 old_tss_base, |
3476 | struct desc_struct *nseg_desc) | 3428 | struct desc_struct *nseg_desc) |
3477 | { | 3429 | { |
3478 | struct tss_segment_16 tss_segment_16; | 3430 | struct tss_segment_16 tss_segment_16; |
3479 | int ret = 0; | 3431 | int ret = 0; |
3480 | 3432 | ||
3481 | if (load_tss_segment16(vcpu, cseg_desc, &tss_segment_16)) | 3433 | if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16, |
3434 | sizeof tss_segment_16)) | ||
3482 | goto out; | 3435 | goto out; |
3483 | 3436 | ||
3484 | save_state_to_tss16(vcpu, &tss_segment_16); | 3437 | save_state_to_tss16(vcpu, &tss_segment_16); |
3485 | save_tss_segment16(vcpu, cseg_desc, &tss_segment_16); | ||
3486 | 3438 | ||
3487 | if (load_tss_segment16(vcpu, nseg_desc, &tss_segment_16)) | 3439 | if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16, |
3440 | sizeof tss_segment_16)) | ||
3488 | goto out; | 3441 | goto out; |
3442 | |||
3443 | if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc), | ||
3444 | &tss_segment_16, sizeof tss_segment_16)) | ||
3445 | goto out; | ||
3446 | |||
3489 | if (load_state_from_tss16(vcpu, &tss_segment_16)) | 3447 | if (load_state_from_tss16(vcpu, &tss_segment_16)) |
3490 | goto out; | 3448 | goto out; |
3491 | 3449 | ||
@@ -3495,20 +3453,26 @@ out: | |||
3495 | } | 3453 | } |
3496 | 3454 | ||
3497 | static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector, | 3455 | static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector, |
3498 | struct desc_struct *cseg_desc, | 3456 | u32 old_tss_base, |
3499 | struct desc_struct *nseg_desc) | 3457 | struct desc_struct *nseg_desc) |
3500 | { | 3458 | { |
3501 | struct tss_segment_32 tss_segment_32; | 3459 | struct tss_segment_32 tss_segment_32; |
3502 | int ret = 0; | 3460 | int ret = 0; |
3503 | 3461 | ||
3504 | if (load_tss_segment32(vcpu, cseg_desc, &tss_segment_32)) | 3462 | if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32, |
3463 | sizeof tss_segment_32)) | ||
3505 | goto out; | 3464 | goto out; |
3506 | 3465 | ||
3507 | save_state_to_tss32(vcpu, &tss_segment_32); | 3466 | save_state_to_tss32(vcpu, &tss_segment_32); |
3508 | save_tss_segment32(vcpu, cseg_desc, &tss_segment_32); | ||
3509 | 3467 | ||
3510 | if (load_tss_segment32(vcpu, nseg_desc, &tss_segment_32)) | 3468 | if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32, |
3469 | sizeof tss_segment_32)) | ||
3470 | goto out; | ||
3471 | |||
3472 | if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc), | ||
3473 | &tss_segment_32, sizeof tss_segment_32)) | ||
3511 | goto out; | 3474 | goto out; |
3475 | |||
3512 | if (load_state_from_tss32(vcpu, &tss_segment_32)) | 3476 | if (load_state_from_tss32(vcpu, &tss_segment_32)) |
3513 | goto out; | 3477 | goto out; |
3514 | 3478 | ||
@@ -3523,16 +3487,20 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) | |||
3523 | struct desc_struct cseg_desc; | 3487 | struct desc_struct cseg_desc; |
3524 | struct desc_struct nseg_desc; | 3488 | struct desc_struct nseg_desc; |
3525 | int ret = 0; | 3489 | int ret = 0; |
3490 | u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR); | ||
3491 | u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR); | ||
3526 | 3492 | ||
3527 | kvm_get_segment(vcpu, &tr_seg, VCPU_SREG_TR); | 3493 | old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base); |
3528 | 3494 | ||
3495 | /* FIXME: Handle errors. Failure to read either TSS or their | ||
3496 | * descriptors should generate a pagefault. | ||
3497 | */ | ||
3529 | if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc)) | 3498 | if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc)) |
3530 | goto out; | 3499 | goto out; |
3531 | 3500 | ||
3532 | if (load_guest_segment_descriptor(vcpu, tr_seg.selector, &cseg_desc)) | 3501 | if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc)) |
3533 | goto out; | 3502 | goto out; |
3534 | 3503 | ||
3535 | |||
3536 | if (reason != TASK_SWITCH_IRET) { | 3504 | if (reason != TASK_SWITCH_IRET) { |
3537 | int cpl; | 3505 | int cpl; |
3538 | 3506 | ||
@@ -3550,8 +3518,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) | |||
3550 | 3518 | ||
3551 | if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { | 3519 | if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { |
3552 | cseg_desc.type &= ~(1 << 1); //clear the B flag | 3520 | cseg_desc.type &= ~(1 << 1); //clear the B flag |
3553 | save_guest_segment_descriptor(vcpu, tr_seg.selector, | 3521 | save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc); |
3554 | &cseg_desc); | ||
3555 | } | 3522 | } |
3556 | 3523 | ||
3557 | if (reason == TASK_SWITCH_IRET) { | 3524 | if (reason == TASK_SWITCH_IRET) { |
@@ -3563,10 +3530,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) | |||
3563 | kvm_x86_ops->cache_regs(vcpu); | 3530 | kvm_x86_ops->cache_regs(vcpu); |
3564 | 3531 | ||
3565 | if (nseg_desc.type & 8) | 3532 | if (nseg_desc.type & 8) |
3566 | ret = kvm_task_switch_32(vcpu, tss_selector, &cseg_desc, | 3533 | ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_base, |
3567 | &nseg_desc); | 3534 | &nseg_desc); |
3568 | else | 3535 | else |
3569 | ret = kvm_task_switch_16(vcpu, tss_selector, &cseg_desc, | 3536 | ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_base, |
3570 | &nseg_desc); | 3537 | &nseg_desc); |
3571 | 3538 | ||
3572 | if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) { | 3539 | if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) { |