diff options
author | Avi Kivity <avi@redhat.com> | 2010-03-11 05:20:03 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-04-20 05:55:05 -0400 |
commit | d6a23895aa82353788a1cc5a1d9a1c963465463e (patch) | |
tree | ad60622fed081c401fd30cfb59171f328cb5412e /arch | |
parent | b7af40433870aa0636932ad39b0c48a0cb319057 (diff) |
KVM: Don't spam kernel log when injecting exceptions due to bad cr writes
These are guest-triggerable.
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kvm/x86.c | 27 |
1 files changed, 0 insertions, 27 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2eb999dc9774..8f9b08d72c4d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -433,8 +433,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
433 | 433 | ||
434 | #ifdef CONFIG_X86_64 | 434 | #ifdef CONFIG_X86_64 |
435 | if (cr0 & 0xffffffff00000000UL) { | 435 | if (cr0 & 0xffffffff00000000UL) { |
436 | printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n", | ||
437 | cr0, kvm_read_cr0(vcpu)); | ||
438 | kvm_inject_gp(vcpu, 0); | 436 | kvm_inject_gp(vcpu, 0); |
439 | return; | 437 | return; |
440 | } | 438 | } |
@@ -443,14 +441,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
443 | cr0 &= ~CR0_RESERVED_BITS; | 441 | cr0 &= ~CR0_RESERVED_BITS; |
444 | 442 | ||
445 | if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) { | 443 | if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) { |
446 | printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n"); | ||
447 | kvm_inject_gp(vcpu, 0); | 444 | kvm_inject_gp(vcpu, 0); |
448 | return; | 445 | return; |
449 | } | 446 | } |
450 | 447 | ||
451 | if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) { | 448 | if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) { |
452 | printk(KERN_DEBUG "set_cr0: #GP, set PG flag " | ||
453 | "and a clear PE flag\n"); | ||
454 | kvm_inject_gp(vcpu, 0); | 449 | kvm_inject_gp(vcpu, 0); |
455 | return; | 450 | return; |
456 | } | 451 | } |
@@ -461,15 +456,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
461 | int cs_db, cs_l; | 456 | int cs_db, cs_l; |
462 | 457 | ||
463 | if (!is_pae(vcpu)) { | 458 | if (!is_pae(vcpu)) { |
464 | printk(KERN_DEBUG "set_cr0: #GP, start paging " | ||
465 | "in long mode while PAE is disabled\n"); | ||
466 | kvm_inject_gp(vcpu, 0); | 459 | kvm_inject_gp(vcpu, 0); |
467 | return; | 460 | return; |
468 | } | 461 | } |
469 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); | 462 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); |
470 | if (cs_l) { | 463 | if (cs_l) { |
471 | printk(KERN_DEBUG "set_cr0: #GP, start paging " | ||
472 | "in long mode while CS.L == 1\n"); | ||
473 | kvm_inject_gp(vcpu, 0); | 464 | kvm_inject_gp(vcpu, 0); |
474 | return; | 465 | return; |
475 | 466 | ||
@@ -477,8 +468,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
477 | } else | 468 | } else |
478 | #endif | 469 | #endif |
479 | if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) { | 470 | if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) { |
480 | printk(KERN_DEBUG "set_cr0: #GP, pdptrs " | ||
481 | "reserved bits\n"); | ||
482 | kvm_inject_gp(vcpu, 0); | 471 | kvm_inject_gp(vcpu, 0); |
483 | return; | 472 | return; |
484 | } | 473 | } |
@@ -505,28 +494,23 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
505 | unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE; | 494 | unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE; |
506 | 495 | ||
507 | if (cr4 & CR4_RESERVED_BITS) { | 496 | if (cr4 & CR4_RESERVED_BITS) { |
508 | printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n"); | ||
509 | kvm_inject_gp(vcpu, 0); | 497 | kvm_inject_gp(vcpu, 0); |
510 | return; | 498 | return; |
511 | } | 499 | } |
512 | 500 | ||
513 | if (is_long_mode(vcpu)) { | 501 | if (is_long_mode(vcpu)) { |
514 | if (!(cr4 & X86_CR4_PAE)) { | 502 | if (!(cr4 & X86_CR4_PAE)) { |
515 | printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while " | ||
516 | "in long mode\n"); | ||
517 | kvm_inject_gp(vcpu, 0); | 503 | kvm_inject_gp(vcpu, 0); |
518 | return; | 504 | return; |
519 | } | 505 | } |
520 | } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) | 506 | } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) |
521 | && ((cr4 ^ old_cr4) & pdptr_bits) | 507 | && ((cr4 ^ old_cr4) & pdptr_bits) |
522 | && !load_pdptrs(vcpu, vcpu->arch.cr3)) { | 508 | && !load_pdptrs(vcpu, vcpu->arch.cr3)) { |
523 | printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); | ||
524 | kvm_inject_gp(vcpu, 0); | 509 | kvm_inject_gp(vcpu, 0); |
525 | return; | 510 | return; |
526 | } | 511 | } |
527 | 512 | ||
528 | if (cr4 & X86_CR4_VMXE) { | 513 | if (cr4 & X86_CR4_VMXE) { |
529 | printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n"); | ||
530 | kvm_inject_gp(vcpu, 0); | 514 | kvm_inject_gp(vcpu, 0); |
531 | return; | 515 | return; |
532 | } | 516 | } |
@@ -547,21 +531,16 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
547 | 531 | ||
548 | if (is_long_mode(vcpu)) { | 532 | if (is_long_mode(vcpu)) { |
549 | if (cr3 & CR3_L_MODE_RESERVED_BITS) { | 533 | if (cr3 & CR3_L_MODE_RESERVED_BITS) { |
550 | printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n"); | ||
551 | kvm_inject_gp(vcpu, 0); | 534 | kvm_inject_gp(vcpu, 0); |
552 | return; | 535 | return; |
553 | } | 536 | } |
554 | } else { | 537 | } else { |
555 | if (is_pae(vcpu)) { | 538 | if (is_pae(vcpu)) { |
556 | if (cr3 & CR3_PAE_RESERVED_BITS) { | 539 | if (cr3 & CR3_PAE_RESERVED_BITS) { |
557 | printk(KERN_DEBUG | ||
558 | "set_cr3: #GP, reserved bits\n"); | ||
559 | kvm_inject_gp(vcpu, 0); | 540 | kvm_inject_gp(vcpu, 0); |
560 | return; | 541 | return; |
561 | } | 542 | } |
562 | if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) { | 543 | if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) { |
563 | printk(KERN_DEBUG "set_cr3: #GP, pdptrs " | ||
564 | "reserved bits\n"); | ||
565 | kvm_inject_gp(vcpu, 0); | 544 | kvm_inject_gp(vcpu, 0); |
566 | return; | 545 | return; |
567 | } | 546 | } |
@@ -593,7 +572,6 @@ EXPORT_SYMBOL_GPL(kvm_set_cr3); | |||
593 | void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) | 572 | void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) |
594 | { | 573 | { |
595 | if (cr8 & CR8_RESERVED_BITS) { | 574 | if (cr8 & CR8_RESERVED_BITS) { |
596 | printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8); | ||
597 | kvm_inject_gp(vcpu, 0); | 575 | kvm_inject_gp(vcpu, 0); |
598 | return; | 576 | return; |
599 | } | 577 | } |
@@ -649,15 +627,12 @@ static u32 emulated_msrs[] = { | |||
649 | static void set_efer(struct kvm_vcpu *vcpu, u64 efer) | 627 | static void set_efer(struct kvm_vcpu *vcpu, u64 efer) |
650 | { | 628 | { |
651 | if (efer & efer_reserved_bits) { | 629 | if (efer & efer_reserved_bits) { |
652 | printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n", | ||
653 | efer); | ||
654 | kvm_inject_gp(vcpu, 0); | 630 | kvm_inject_gp(vcpu, 0); |
655 | return; | 631 | return; |
656 | } | 632 | } |
657 | 633 | ||
658 | if (is_paging(vcpu) | 634 | if (is_paging(vcpu) |
659 | && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) { | 635 | && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) { |
660 | printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n"); | ||
661 | kvm_inject_gp(vcpu, 0); | 636 | kvm_inject_gp(vcpu, 0); |
662 | return; | 637 | return; |
663 | } | 638 | } |
@@ -667,7 +642,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer) | |||
667 | 642 | ||
668 | feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); | 643 | feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); |
669 | if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) { | 644 | if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) { |
670 | printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n"); | ||
671 | kvm_inject_gp(vcpu, 0); | 645 | kvm_inject_gp(vcpu, 0); |
672 | return; | 646 | return; |
673 | } | 647 | } |
@@ -678,7 +652,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer) | |||
678 | 652 | ||
679 | feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); | 653 | feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); |
680 | if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) { | 654 | if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) { |
681 | printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n"); | ||
682 | kvm_inject_gp(vcpu, 0); | 655 | kvm_inject_gp(vcpu, 0); |
683 | return; | 656 | return; |
684 | } | 657 | } |