diff options
author | David S. Miller <davem@davemloft.net> | 2008-07-27 19:51:21 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-07-27 19:51:21 -0400 |
commit | 281c7413ed914623d3245299a4761b6b27ab9fdb (patch) | |
tree | 182b5222a7ad4b77c32f7845ea777ca665d7def2 /arch/x86 | |
parent | 2ab61b01110aa04cd853c619a74881e3225a5e24 (diff) | |
parent | c9272c4f9fbe2087beb3392f526dc5b19efaa56b (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/head_32.S | 8 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 7 | ||||
-rw-r--r-- | arch/x86/kvm/svm.c | 10 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 22 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 109 |
5 files changed, 75 insertions, 81 deletions
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index f67e93441caf..a7010c3a377a 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
@@ -456,9 +456,6 @@ is386: movl $2,%ecx # set MP | |||
456 | 1: | 456 | 1: |
457 | #endif /* CONFIG_SMP */ | 457 | #endif /* CONFIG_SMP */ |
458 | jmp *(initial_code) | 458 | jmp *(initial_code) |
459 | .align 4 | ||
460 | ENTRY(initial_code) | ||
461 | .long i386_start_kernel | ||
462 | 459 | ||
463 | /* | 460 | /* |
464 | * We depend on ET to be correct. This checks for 287/387. | 461 | * We depend on ET to be correct. This checks for 287/387. |
@@ -601,6 +598,11 @@ ignore_int: | |||
601 | #endif | 598 | #endif |
602 | iret | 599 | iret |
603 | 600 | ||
601 | .section .cpuinit.data,"wa" | ||
602 | .align 4 | ||
603 | ENTRY(initial_code) | ||
604 | .long i386_start_kernel | ||
605 | |||
604 | .section .text | 606 | .section .text |
605 | /* | 607 | /* |
606 | * Real beginning of normal "text" segment | 608 | * Real beginning of normal "text" segment |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index b0e4ddca6c18..2fa231923cf7 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1814,6 +1814,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) | |||
1814 | spin_unlock(&vcpu->kvm->mmu_lock); | 1814 | spin_unlock(&vcpu->kvm->mmu_lock); |
1815 | return r; | 1815 | return r; |
1816 | } | 1816 | } |
1817 | EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt); | ||
1817 | 1818 | ||
1818 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) | 1819 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) |
1819 | { | 1820 | { |
@@ -1870,6 +1871,12 @@ void kvm_enable_tdp(void) | |||
1870 | } | 1871 | } |
1871 | EXPORT_SYMBOL_GPL(kvm_enable_tdp); | 1872 | EXPORT_SYMBOL_GPL(kvm_enable_tdp); |
1872 | 1873 | ||
1874 | void kvm_disable_tdp(void) | ||
1875 | { | ||
1876 | tdp_enabled = false; | ||
1877 | } | ||
1878 | EXPORT_SYMBOL_GPL(kvm_disable_tdp); | ||
1879 | |||
1873 | static void free_mmu_pages(struct kvm_vcpu *vcpu) | 1880 | static void free_mmu_pages(struct kvm_vcpu *vcpu) |
1874 | { | 1881 | { |
1875 | struct kvm_mmu_page *sp; | 1882 | struct kvm_mmu_page *sp; |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index b756e876dce3..e2ee264740c7 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -453,7 +453,8 @@ static __init int svm_hardware_setup(void) | |||
453 | if (npt_enabled) { | 453 | if (npt_enabled) { |
454 | printk(KERN_INFO "kvm: Nested Paging enabled\n"); | 454 | printk(KERN_INFO "kvm: Nested Paging enabled\n"); |
455 | kvm_enable_tdp(); | 455 | kvm_enable_tdp(); |
456 | } | 456 | } else |
457 | kvm_disable_tdp(); | ||
457 | 458 | ||
458 | return 0; | 459 | return 0; |
459 | 460 | ||
@@ -1007,10 +1008,13 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1007 | struct kvm *kvm = svm->vcpu.kvm; | 1008 | struct kvm *kvm = svm->vcpu.kvm; |
1008 | u64 fault_address; | 1009 | u64 fault_address; |
1009 | u32 error_code; | 1010 | u32 error_code; |
1011 | bool event_injection = false; | ||
1010 | 1012 | ||
1011 | if (!irqchip_in_kernel(kvm) && | 1013 | if (!irqchip_in_kernel(kvm) && |
1012 | is_external_interrupt(exit_int_info)) | 1014 | is_external_interrupt(exit_int_info)) { |
1015 | event_injection = true; | ||
1013 | push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK); | 1016 | push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK); |
1017 | } | ||
1014 | 1018 | ||
1015 | fault_address = svm->vmcb->control.exit_info_2; | 1019 | fault_address = svm->vmcb->control.exit_info_2; |
1016 | error_code = svm->vmcb->control.exit_info_1; | 1020 | error_code = svm->vmcb->control.exit_info_1; |
@@ -1024,6 +1028,8 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1024 | (u32)fault_address, (u32)(fault_address >> 32), | 1028 | (u32)fault_address, (u32)(fault_address >> 32), |
1025 | handler); | 1029 | handler); |
1026 | 1030 | ||
1031 | if (event_injection) | ||
1032 | kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address); | ||
1027 | return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); | 1033 | return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); |
1028 | } | 1034 | } |
1029 | 1035 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 0cac63701719..2a69773e3b26 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -2298,6 +2298,8 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2298 | cr2 = vmcs_readl(EXIT_QUALIFICATION); | 2298 | cr2 = vmcs_readl(EXIT_QUALIFICATION); |
2299 | KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2, | 2299 | KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2, |
2300 | (u32)((u64)cr2 >> 32), handler); | 2300 | (u32)((u64)cr2 >> 32), handler); |
2301 | if (vect_info & VECTORING_INFO_VALID_MASK) | ||
2302 | kvm_mmu_unprotect_page_virt(vcpu, cr2); | ||
2301 | return kvm_mmu_page_fault(vcpu, cr2, error_code); | 2303 | return kvm_mmu_page_fault(vcpu, cr2, error_code); |
2302 | } | 2304 | } |
2303 | 2305 | ||
@@ -3116,15 +3118,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) | |||
3116 | return ERR_PTR(-ENOMEM); | 3118 | return ERR_PTR(-ENOMEM); |
3117 | 3119 | ||
3118 | allocate_vpid(vmx); | 3120 | allocate_vpid(vmx); |
3119 | if (id == 0 && vm_need_ept()) { | ||
3120 | kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | | ||
3121 | VMX_EPT_WRITABLE_MASK | | ||
3122 | VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT); | ||
3123 | kvm_mmu_set_mask_ptes(0ull, VMX_EPT_FAKE_ACCESSED_MASK, | ||
3124 | VMX_EPT_FAKE_DIRTY_MASK, 0ull, | ||
3125 | VMX_EPT_EXECUTABLE_MASK); | ||
3126 | kvm_enable_tdp(); | ||
3127 | } | ||
3128 | 3121 | ||
3129 | err = kvm_vcpu_init(&vmx->vcpu, kvm, id); | 3122 | err = kvm_vcpu_init(&vmx->vcpu, kvm, id); |
3130 | if (err) | 3123 | if (err) |
@@ -3303,8 +3296,17 @@ static int __init vmx_init(void) | |||
3303 | vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_ESP); | 3296 | vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_ESP); |
3304 | vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_EIP); | 3297 | vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_EIP); |
3305 | 3298 | ||
3306 | if (cpu_has_vmx_ept()) | 3299 | if (vm_need_ept()) { |
3307 | bypass_guest_pf = 0; | 3300 | bypass_guest_pf = 0; |
3301 | kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | | ||
3302 | VMX_EPT_WRITABLE_MASK | | ||
3303 | VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT); | ||
3304 | kvm_mmu_set_mask_ptes(0ull, VMX_EPT_FAKE_ACCESSED_MASK, | ||
3305 | VMX_EPT_FAKE_DIRTY_MASK, 0ull, | ||
3306 | VMX_EPT_EXECUTABLE_MASK); | ||
3307 | kvm_enable_tdp(); | ||
3308 | } else | ||
3309 | kvm_disable_tdp(); | ||
3308 | 3310 | ||
3309 | if (bypass_guest_pf) | 3311 | if (bypass_guest_pf) |
3310 | kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull); | 3312 | kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9f1cdb011cff..5916191420c7 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -3184,6 +3184,10 @@ static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector, | |||
3184 | kvm_desct->base |= seg_desc->base2 << 24; | 3184 | kvm_desct->base |= seg_desc->base2 << 24; |
3185 | kvm_desct->limit = seg_desc->limit0; | 3185 | kvm_desct->limit = seg_desc->limit0; |
3186 | kvm_desct->limit |= seg_desc->limit << 16; | 3186 | kvm_desct->limit |= seg_desc->limit << 16; |
3187 | if (seg_desc->g) { | ||
3188 | kvm_desct->limit <<= 12; | ||
3189 | kvm_desct->limit |= 0xfff; | ||
3190 | } | ||
3187 | kvm_desct->selector = selector; | 3191 | kvm_desct->selector = selector; |
3188 | kvm_desct->type = seg_desc->type; | 3192 | kvm_desct->type = seg_desc->type; |
3189 | kvm_desct->present = seg_desc->p; | 3193 | kvm_desct->present = seg_desc->p; |
@@ -3223,6 +3227,7 @@ static void get_segment_descritptor_dtable(struct kvm_vcpu *vcpu, | |||
3223 | static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, | 3227 | static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, |
3224 | struct desc_struct *seg_desc) | 3228 | struct desc_struct *seg_desc) |
3225 | { | 3229 | { |
3230 | gpa_t gpa; | ||
3226 | struct descriptor_table dtable; | 3231 | struct descriptor_table dtable; |
3227 | u16 index = selector >> 3; | 3232 | u16 index = selector >> 3; |
3228 | 3233 | ||
@@ -3232,13 +3237,16 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, | |||
3232 | kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc); | 3237 | kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc); |
3233 | return 1; | 3238 | return 1; |
3234 | } | 3239 | } |
3235 | return kvm_read_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8); | 3240 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base); |
3241 | gpa += index * 8; | ||
3242 | return kvm_read_guest(vcpu->kvm, gpa, seg_desc, 8); | ||
3236 | } | 3243 | } |
3237 | 3244 | ||
3238 | /* allowed just for 8 bytes segments */ | 3245 | /* allowed just for 8 bytes segments */ |
3239 | static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, | 3246 | static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, |
3240 | struct desc_struct *seg_desc) | 3247 | struct desc_struct *seg_desc) |
3241 | { | 3248 | { |
3249 | gpa_t gpa; | ||
3242 | struct descriptor_table dtable; | 3250 | struct descriptor_table dtable; |
3243 | u16 index = selector >> 3; | 3251 | u16 index = selector >> 3; |
3244 | 3252 | ||
@@ -3246,7 +3254,9 @@ static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, | |||
3246 | 3254 | ||
3247 | if (dtable.limit < index * 8 + 7) | 3255 | if (dtable.limit < index * 8 + 7) |
3248 | return 1; | 3256 | return 1; |
3249 | return kvm_write_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8); | 3257 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base); |
3258 | gpa += index * 8; | ||
3259 | return kvm_write_guest(vcpu->kvm, gpa, seg_desc, 8); | ||
3250 | } | 3260 | } |
3251 | 3261 | ||
3252 | static u32 get_tss_base_addr(struct kvm_vcpu *vcpu, | 3262 | static u32 get_tss_base_addr(struct kvm_vcpu *vcpu, |
@@ -3258,55 +3268,7 @@ static u32 get_tss_base_addr(struct kvm_vcpu *vcpu, | |||
3258 | base_addr |= (seg_desc->base1 << 16); | 3268 | base_addr |= (seg_desc->base1 << 16); |
3259 | base_addr |= (seg_desc->base2 << 24); | 3269 | base_addr |= (seg_desc->base2 << 24); |
3260 | 3270 | ||
3261 | return base_addr; | 3271 | return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr); |
3262 | } | ||
3263 | |||
3264 | static int load_tss_segment32(struct kvm_vcpu *vcpu, | ||
3265 | struct desc_struct *seg_desc, | ||
3266 | struct tss_segment_32 *tss) | ||
3267 | { | ||
3268 | u32 base_addr; | ||
3269 | |||
3270 | base_addr = get_tss_base_addr(vcpu, seg_desc); | ||
3271 | |||
3272 | return kvm_read_guest(vcpu->kvm, base_addr, tss, | ||
3273 | sizeof(struct tss_segment_32)); | ||
3274 | } | ||
3275 | |||
3276 | static int save_tss_segment32(struct kvm_vcpu *vcpu, | ||
3277 | struct desc_struct *seg_desc, | ||
3278 | struct tss_segment_32 *tss) | ||
3279 | { | ||
3280 | u32 base_addr; | ||
3281 | |||
3282 | base_addr = get_tss_base_addr(vcpu, seg_desc); | ||
3283 | |||
3284 | return kvm_write_guest(vcpu->kvm, base_addr, tss, | ||
3285 | sizeof(struct tss_segment_32)); | ||
3286 | } | ||
3287 | |||
3288 | static int load_tss_segment16(struct kvm_vcpu *vcpu, | ||
3289 | struct desc_struct *seg_desc, | ||
3290 | struct tss_segment_16 *tss) | ||
3291 | { | ||
3292 | u32 base_addr; | ||
3293 | |||
3294 | base_addr = get_tss_base_addr(vcpu, seg_desc); | ||
3295 | |||
3296 | return kvm_read_guest(vcpu->kvm, base_addr, tss, | ||
3297 | sizeof(struct tss_segment_16)); | ||
3298 | } | ||
3299 | |||
3300 | static int save_tss_segment16(struct kvm_vcpu *vcpu, | ||
3301 | struct desc_struct *seg_desc, | ||
3302 | struct tss_segment_16 *tss) | ||
3303 | { | ||
3304 | u32 base_addr; | ||
3305 | |||
3306 | base_addr = get_tss_base_addr(vcpu, seg_desc); | ||
3307 | |||
3308 | return kvm_write_guest(vcpu->kvm, base_addr, tss, | ||
3309 | sizeof(struct tss_segment_16)); | ||
3310 | } | 3272 | } |
3311 | 3273 | ||
3312 | static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg) | 3274 | static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg) |
@@ -3466,20 +3428,26 @@ static int load_state_from_tss16(struct kvm_vcpu *vcpu, | |||
3466 | } | 3428 | } |
3467 | 3429 | ||
3468 | static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector, | 3430 | static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector, |
3469 | struct desc_struct *cseg_desc, | 3431 | u32 old_tss_base, |
3470 | struct desc_struct *nseg_desc) | 3432 | struct desc_struct *nseg_desc) |
3471 | { | 3433 | { |
3472 | struct tss_segment_16 tss_segment_16; | 3434 | struct tss_segment_16 tss_segment_16; |
3473 | int ret = 0; | 3435 | int ret = 0; |
3474 | 3436 | ||
3475 | if (load_tss_segment16(vcpu, cseg_desc, &tss_segment_16)) | 3437 | if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16, |
3438 | sizeof tss_segment_16)) | ||
3476 | goto out; | 3439 | goto out; |
3477 | 3440 | ||
3478 | save_state_to_tss16(vcpu, &tss_segment_16); | 3441 | save_state_to_tss16(vcpu, &tss_segment_16); |
3479 | save_tss_segment16(vcpu, cseg_desc, &tss_segment_16); | ||
3480 | 3442 | ||
3481 | if (load_tss_segment16(vcpu, nseg_desc, &tss_segment_16)) | 3443 | if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16, |
3444 | sizeof tss_segment_16)) | ||
3482 | goto out; | 3445 | goto out; |
3446 | |||
3447 | if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc), | ||
3448 | &tss_segment_16, sizeof tss_segment_16)) | ||
3449 | goto out; | ||
3450 | |||
3483 | if (load_state_from_tss16(vcpu, &tss_segment_16)) | 3451 | if (load_state_from_tss16(vcpu, &tss_segment_16)) |
3484 | goto out; | 3452 | goto out; |
3485 | 3453 | ||
@@ -3489,20 +3457,26 @@ out: | |||
3489 | } | 3457 | } |
3490 | 3458 | ||
3491 | static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector, | 3459 | static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector, |
3492 | struct desc_struct *cseg_desc, | 3460 | u32 old_tss_base, |
3493 | struct desc_struct *nseg_desc) | 3461 | struct desc_struct *nseg_desc) |
3494 | { | 3462 | { |
3495 | struct tss_segment_32 tss_segment_32; | 3463 | struct tss_segment_32 tss_segment_32; |
3496 | int ret = 0; | 3464 | int ret = 0; |
3497 | 3465 | ||
3498 | if (load_tss_segment32(vcpu, cseg_desc, &tss_segment_32)) | 3466 | if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32, |
3467 | sizeof tss_segment_32)) | ||
3499 | goto out; | 3468 | goto out; |
3500 | 3469 | ||
3501 | save_state_to_tss32(vcpu, &tss_segment_32); | 3470 | save_state_to_tss32(vcpu, &tss_segment_32); |
3502 | save_tss_segment32(vcpu, cseg_desc, &tss_segment_32); | ||
3503 | 3471 | ||
3504 | if (load_tss_segment32(vcpu, nseg_desc, &tss_segment_32)) | 3472 | if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32, |
3473 | sizeof tss_segment_32)) | ||
3474 | goto out; | ||
3475 | |||
3476 | if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc), | ||
3477 | &tss_segment_32, sizeof tss_segment_32)) | ||
3505 | goto out; | 3478 | goto out; |
3479 | |||
3506 | if (load_state_from_tss32(vcpu, &tss_segment_32)) | 3480 | if (load_state_from_tss32(vcpu, &tss_segment_32)) |
3507 | goto out; | 3481 | goto out; |
3508 | 3482 | ||
@@ -3517,16 +3491,20 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) | |||
3517 | struct desc_struct cseg_desc; | 3491 | struct desc_struct cseg_desc; |
3518 | struct desc_struct nseg_desc; | 3492 | struct desc_struct nseg_desc; |
3519 | int ret = 0; | 3493 | int ret = 0; |
3494 | u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR); | ||
3495 | u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR); | ||
3520 | 3496 | ||
3521 | kvm_get_segment(vcpu, &tr_seg, VCPU_SREG_TR); | 3497 | old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base); |
3522 | 3498 | ||
3499 | /* FIXME: Handle errors. Failure to read either TSS or their | ||
3500 | * descriptors should generate a pagefault. | ||
3501 | */ | ||
3523 | if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc)) | 3502 | if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc)) |
3524 | goto out; | 3503 | goto out; |
3525 | 3504 | ||
3526 | if (load_guest_segment_descriptor(vcpu, tr_seg.selector, &cseg_desc)) | 3505 | if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc)) |
3527 | goto out; | 3506 | goto out; |
3528 | 3507 | ||
3529 | |||
3530 | if (reason != TASK_SWITCH_IRET) { | 3508 | if (reason != TASK_SWITCH_IRET) { |
3531 | int cpl; | 3509 | int cpl; |
3532 | 3510 | ||
@@ -3544,8 +3522,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) | |||
3544 | 3522 | ||
3545 | if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { | 3523 | if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { |
3546 | cseg_desc.type &= ~(1 << 1); //clear the B flag | 3524 | cseg_desc.type &= ~(1 << 1); //clear the B flag |
3547 | save_guest_segment_descriptor(vcpu, tr_seg.selector, | 3525 | save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc); |
3548 | &cseg_desc); | ||
3549 | } | 3526 | } |
3550 | 3527 | ||
3551 | if (reason == TASK_SWITCH_IRET) { | 3528 | if (reason == TASK_SWITCH_IRET) { |
@@ -3557,10 +3534,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) | |||
3557 | kvm_x86_ops->cache_regs(vcpu); | 3534 | kvm_x86_ops->cache_regs(vcpu); |
3558 | 3535 | ||
3559 | if (nseg_desc.type & 8) | 3536 | if (nseg_desc.type & 8) |
3560 | ret = kvm_task_switch_32(vcpu, tss_selector, &cseg_desc, | 3537 | ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_base, |
3561 | &nseg_desc); | 3538 | &nseg_desc); |
3562 | else | 3539 | else |
3563 | ret = kvm_task_switch_16(vcpu, tss_selector, &cseg_desc, | 3540 | ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_base, |
3564 | &nseg_desc); | 3541 | &nseg_desc); |
3565 | 3542 | ||
3566 | if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) { | 3543 | if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) { |