aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c48
1 files changed, 16 insertions, 32 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 21b9b6aa3e88..73d854c36e39 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -434,8 +434,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
434 434
435#ifdef CONFIG_X86_64 435#ifdef CONFIG_X86_64
436 if (cr0 & 0xffffffff00000000UL) { 436 if (cr0 & 0xffffffff00000000UL) {
437 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
438 cr0, kvm_read_cr0(vcpu));
439 kvm_inject_gp(vcpu, 0); 437 kvm_inject_gp(vcpu, 0);
440 return; 438 return;
441 } 439 }
@@ -444,14 +442,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
444 cr0 &= ~CR0_RESERVED_BITS; 442 cr0 &= ~CR0_RESERVED_BITS;
445 443
446 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) { 444 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
447 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
448 kvm_inject_gp(vcpu, 0); 445 kvm_inject_gp(vcpu, 0);
449 return; 446 return;
450 } 447 }
451 448
452 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) { 449 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
453 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
454 "and a clear PE flag\n");
455 kvm_inject_gp(vcpu, 0); 450 kvm_inject_gp(vcpu, 0);
456 return; 451 return;
457 } 452 }
@@ -462,15 +457,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
462 int cs_db, cs_l; 457 int cs_db, cs_l;
463 458
464 if (!is_pae(vcpu)) { 459 if (!is_pae(vcpu)) {
465 printk(KERN_DEBUG "set_cr0: #GP, start paging "
466 "in long mode while PAE is disabled\n");
467 kvm_inject_gp(vcpu, 0); 460 kvm_inject_gp(vcpu, 0);
468 return; 461 return;
469 } 462 }
470 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); 463 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
471 if (cs_l) { 464 if (cs_l) {
472 printk(KERN_DEBUG "set_cr0: #GP, start paging "
473 "in long mode while CS.L == 1\n");
474 kvm_inject_gp(vcpu, 0); 465 kvm_inject_gp(vcpu, 0);
475 return; 466 return;
476 467
@@ -478,8 +469,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
478 } else 469 } else
479#endif 470#endif
480 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) { 471 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
481 printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
482 "reserved bits\n");
483 kvm_inject_gp(vcpu, 0); 472 kvm_inject_gp(vcpu, 0);
484 return; 473 return;
485 } 474 }
@@ -506,28 +495,23 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
506 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE; 495 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
507 496
508 if (cr4 & CR4_RESERVED_BITS) { 497 if (cr4 & CR4_RESERVED_BITS) {
509 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
510 kvm_inject_gp(vcpu, 0); 498 kvm_inject_gp(vcpu, 0);
511 return; 499 return;
512 } 500 }
513 501
514 if (is_long_mode(vcpu)) { 502 if (is_long_mode(vcpu)) {
515 if (!(cr4 & X86_CR4_PAE)) { 503 if (!(cr4 & X86_CR4_PAE)) {
516 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
517 "in long mode\n");
518 kvm_inject_gp(vcpu, 0); 504 kvm_inject_gp(vcpu, 0);
519 return; 505 return;
520 } 506 }
521 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) 507 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
522 && ((cr4 ^ old_cr4) & pdptr_bits) 508 && ((cr4 ^ old_cr4) & pdptr_bits)
523 && !load_pdptrs(vcpu, vcpu->arch.cr3)) { 509 && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
524 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
525 kvm_inject_gp(vcpu, 0); 510 kvm_inject_gp(vcpu, 0);
526 return; 511 return;
527 } 512 }
528 513
529 if (cr4 & X86_CR4_VMXE) { 514 if (cr4 & X86_CR4_VMXE) {
530 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
531 kvm_inject_gp(vcpu, 0); 515 kvm_inject_gp(vcpu, 0);
532 return; 516 return;
533 } 517 }
@@ -548,21 +532,16 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
548 532
549 if (is_long_mode(vcpu)) { 533 if (is_long_mode(vcpu)) {
550 if (cr3 & CR3_L_MODE_RESERVED_BITS) { 534 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
551 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
552 kvm_inject_gp(vcpu, 0); 535 kvm_inject_gp(vcpu, 0);
553 return; 536 return;
554 } 537 }
555 } else { 538 } else {
556 if (is_pae(vcpu)) { 539 if (is_pae(vcpu)) {
557 if (cr3 & CR3_PAE_RESERVED_BITS) { 540 if (cr3 & CR3_PAE_RESERVED_BITS) {
558 printk(KERN_DEBUG
559 "set_cr3: #GP, reserved bits\n");
560 kvm_inject_gp(vcpu, 0); 541 kvm_inject_gp(vcpu, 0);
561 return; 542 return;
562 } 543 }
563 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) { 544 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
564 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
565 "reserved bits\n");
566 kvm_inject_gp(vcpu, 0); 545 kvm_inject_gp(vcpu, 0);
567 return; 546 return;
568 } 547 }
@@ -594,7 +573,6 @@ EXPORT_SYMBOL_GPL(kvm_set_cr3);
594void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) 573void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
595{ 574{
596 if (cr8 & CR8_RESERVED_BITS) { 575 if (cr8 & CR8_RESERVED_BITS) {
597 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
598 kvm_inject_gp(vcpu, 0); 576 kvm_inject_gp(vcpu, 0);
599 return; 577 return;
600 } 578 }
@@ -650,15 +628,12 @@ static u32 emulated_msrs[] = {
650static void set_efer(struct kvm_vcpu *vcpu, u64 efer) 628static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
651{ 629{
652 if (efer & efer_reserved_bits) { 630 if (efer & efer_reserved_bits) {
653 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
654 efer);
655 kvm_inject_gp(vcpu, 0); 631 kvm_inject_gp(vcpu, 0);
656 return; 632 return;
657 } 633 }
658 634
659 if (is_paging(vcpu) 635 if (is_paging(vcpu)
660 && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) { 636 && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) {
661 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
662 kvm_inject_gp(vcpu, 0); 637 kvm_inject_gp(vcpu, 0);
663 return; 638 return;
664 } 639 }
@@ -668,7 +643,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
668 643
669 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); 644 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
670 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) { 645 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
671 printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
672 kvm_inject_gp(vcpu, 0); 646 kvm_inject_gp(vcpu, 0);
673 return; 647 return;
674 } 648 }
@@ -679,7 +653,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
679 653
680 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); 654 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
681 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) { 655 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
682 printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
683 kvm_inject_gp(vcpu, 0); 656 kvm_inject_gp(vcpu, 0);
684 return; 657 return;
685 } 658 }
@@ -968,9 +941,13 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
968 if (msr >= MSR_IA32_MC0_CTL && 941 if (msr >= MSR_IA32_MC0_CTL &&
969 msr < MSR_IA32_MC0_CTL + 4 * bank_num) { 942 msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
970 u32 offset = msr - MSR_IA32_MC0_CTL; 943 u32 offset = msr - MSR_IA32_MC0_CTL;
971 /* only 0 or all 1s can be written to IA32_MCi_CTL */ 944 /* only 0 or all 1s can be written to IA32_MCi_CTL
945 * some Linux kernels though clear bit 10 in bank 4 to
946 * workaround a BIOS/GART TBL issue on AMD K8s, ignore
947 * this to avoid an uncatched #GP in the guest
948 */
972 if ((offset & 0x3) == 0 && 949 if ((offset & 0x3) == 0 &&
973 data != 0 && data != ~(u64)0) 950 data != 0 && (data | (1 << 10)) != ~(u64)0)
974 return -1; 951 return -1;
975 vcpu->arch.mce_banks[offset] = data; 952 vcpu->arch.mce_banks[offset] = data;
976 break; 953 break;
@@ -2636,8 +2613,9 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
2636int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 2613int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2637 struct kvm_dirty_log *log) 2614 struct kvm_dirty_log *log)
2638{ 2615{
2639 int r, n, i; 2616 int r, i;
2640 struct kvm_memory_slot *memslot; 2617 struct kvm_memory_slot *memslot;
2618 unsigned long n;
2641 unsigned long is_dirty = 0; 2619 unsigned long is_dirty = 0;
2642 unsigned long *dirty_bitmap = NULL; 2620 unsigned long *dirty_bitmap = NULL;
2643 2621
@@ -2652,7 +2630,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2652 if (!memslot->dirty_bitmap) 2630 if (!memslot->dirty_bitmap)
2653 goto out; 2631 goto out;
2654 2632
2655 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; 2633 n = kvm_dirty_bitmap_bytes(memslot);
2656 2634
2657 r = -ENOMEM; 2635 r = -ENOMEM;
2658 dirty_bitmap = vmalloc(n); 2636 dirty_bitmap = vmalloc(n);
@@ -4533,7 +4511,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
4533 kvm_set_cr8(vcpu, kvm_run->cr8); 4511 kvm_set_cr8(vcpu, kvm_run->cr8);
4534 4512
4535 if (vcpu->arch.pio.cur_count) { 4513 if (vcpu->arch.pio.cur_count) {
4514 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4536 r = complete_pio(vcpu); 4515 r = complete_pio(vcpu);
4516 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
4537 if (r) 4517 if (r)
4538 goto out; 4518 goto out;
4539 } 4519 }
@@ -5196,6 +5176,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
5196 int ret = 0; 5176 int ret = 0;
5197 u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR); 5177 u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
5198 u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR); 5178 u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
5179 u32 desc_limit;
5199 5180
5200 old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL); 5181 old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL);
5201 5182
@@ -5218,7 +5199,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
5218 } 5199 }
5219 } 5200 }
5220 5201
5221 if (!nseg_desc.p || get_desc_limit(&nseg_desc) < 0x67) { 5202 desc_limit = get_desc_limit(&nseg_desc);
5203 if (!nseg_desc.p ||
5204 ((desc_limit < 0x67 && (nseg_desc.type & 8)) ||
5205 desc_limit < 0x2b)) {
5222 kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc); 5206 kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
5223 return 1; 5207 return 1;
5224 } 5208 }