aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-03-11 05:20:03 -0500
committerGreg Kroah-Hartman <gregkh@suse.de>2010-04-26 10:48:04 -0400
commit34ae17fafc868b82dbc3314a046d654feff43254 (patch)
tree4882967bdc11066fe7cb691d51f99bbb790f1d87 /arch
parentdee49b4925d922229656f4d3c15da33ae5d036ed (diff)
KVM: Don't spam kernel log when injecting exceptions due to bad cr writes
(Cherry-picked from commit d6a23895aa82353788a1cc5a1d9a1c963465463e) These are guest-triggerable. Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/x86.c27
1 files changed, 0 insertions, 27 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e900908174fa..1b880f8720d2 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -384,21 +384,16 @@ out:
384void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 384void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
385{ 385{
386 if (cr0 & CR0_RESERVED_BITS) { 386 if (cr0 & CR0_RESERVED_BITS) {
387 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
388 cr0, vcpu->arch.cr0);
389 kvm_inject_gp(vcpu, 0); 387 kvm_inject_gp(vcpu, 0);
390 return; 388 return;
391 } 389 }
392 390
393 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) { 391 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
394 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
395 kvm_inject_gp(vcpu, 0); 392 kvm_inject_gp(vcpu, 0);
396 return; 393 return;
397 } 394 }
398 395
399 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) { 396 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
400 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
401 "and a clear PE flag\n");
402 kvm_inject_gp(vcpu, 0); 397 kvm_inject_gp(vcpu, 0);
403 return; 398 return;
404 } 399 }
@@ -409,15 +404,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
409 int cs_db, cs_l; 404 int cs_db, cs_l;
410 405
411 if (!is_pae(vcpu)) { 406 if (!is_pae(vcpu)) {
412 printk(KERN_DEBUG "set_cr0: #GP, start paging "
413 "in long mode while PAE is disabled\n");
414 kvm_inject_gp(vcpu, 0); 407 kvm_inject_gp(vcpu, 0);
415 return; 408 return;
416 } 409 }
417 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); 410 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
418 if (cs_l) { 411 if (cs_l) {
419 printk(KERN_DEBUG "set_cr0: #GP, start paging "
420 "in long mode while CS.L == 1\n");
421 kvm_inject_gp(vcpu, 0); 412 kvm_inject_gp(vcpu, 0);
422 return; 413 return;
423 414
@@ -425,8 +416,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
425 } else 416 } else
426#endif 417#endif
427 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) { 418 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
428 printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
429 "reserved bits\n");
430 kvm_inject_gp(vcpu, 0); 419 kvm_inject_gp(vcpu, 0);
431 return; 420 return;
432 } 421 }
@@ -453,28 +442,23 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
453 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE; 442 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
454 443
455 if (cr4 & CR4_RESERVED_BITS) { 444 if (cr4 & CR4_RESERVED_BITS) {
456 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
457 kvm_inject_gp(vcpu, 0); 445 kvm_inject_gp(vcpu, 0);
458 return; 446 return;
459 } 447 }
460 448
461 if (is_long_mode(vcpu)) { 449 if (is_long_mode(vcpu)) {
462 if (!(cr4 & X86_CR4_PAE)) { 450 if (!(cr4 & X86_CR4_PAE)) {
463 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
464 "in long mode\n");
465 kvm_inject_gp(vcpu, 0); 451 kvm_inject_gp(vcpu, 0);
466 return; 452 return;
467 } 453 }
468 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) 454 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
469 && ((cr4 ^ old_cr4) & pdptr_bits) 455 && ((cr4 ^ old_cr4) & pdptr_bits)
470 && !load_pdptrs(vcpu, vcpu->arch.cr3)) { 456 && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
471 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
472 kvm_inject_gp(vcpu, 0); 457 kvm_inject_gp(vcpu, 0);
473 return; 458 return;
474 } 459 }
475 460
476 if (cr4 & X86_CR4_VMXE) { 461 if (cr4 & X86_CR4_VMXE) {
477 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
478 kvm_inject_gp(vcpu, 0); 462 kvm_inject_gp(vcpu, 0);
479 return; 463 return;
480 } 464 }
@@ -495,21 +479,16 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
495 479
496 if (is_long_mode(vcpu)) { 480 if (is_long_mode(vcpu)) {
497 if (cr3 & CR3_L_MODE_RESERVED_BITS) { 481 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
498 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
499 kvm_inject_gp(vcpu, 0); 482 kvm_inject_gp(vcpu, 0);
500 return; 483 return;
501 } 484 }
502 } else { 485 } else {
503 if (is_pae(vcpu)) { 486 if (is_pae(vcpu)) {
504 if (cr3 & CR3_PAE_RESERVED_BITS) { 487 if (cr3 & CR3_PAE_RESERVED_BITS) {
505 printk(KERN_DEBUG
506 "set_cr3: #GP, reserved bits\n");
507 kvm_inject_gp(vcpu, 0); 488 kvm_inject_gp(vcpu, 0);
508 return; 489 return;
509 } 490 }
510 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) { 491 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
511 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
512 "reserved bits\n");
513 kvm_inject_gp(vcpu, 0); 492 kvm_inject_gp(vcpu, 0);
514 return; 493 return;
515 } 494 }
@@ -541,7 +520,6 @@ EXPORT_SYMBOL_GPL(kvm_set_cr3);
541void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) 520void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
542{ 521{
543 if (cr8 & CR8_RESERVED_BITS) { 522 if (cr8 & CR8_RESERVED_BITS) {
544 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
545 kvm_inject_gp(vcpu, 0); 523 kvm_inject_gp(vcpu, 0);
546 return; 524 return;
547 } 525 }
@@ -595,15 +573,12 @@ static u32 emulated_msrs[] = {
595static void set_efer(struct kvm_vcpu *vcpu, u64 efer) 573static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
596{ 574{
597 if (efer & efer_reserved_bits) { 575 if (efer & efer_reserved_bits) {
598 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
599 efer);
600 kvm_inject_gp(vcpu, 0); 576 kvm_inject_gp(vcpu, 0);
601 return; 577 return;
602 } 578 }
603 579
604 if (is_paging(vcpu) 580 if (is_paging(vcpu)
605 && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) { 581 && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
606 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
607 kvm_inject_gp(vcpu, 0); 582 kvm_inject_gp(vcpu, 0);
608 return; 583 return;
609 } 584 }
@@ -613,7 +588,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
613 588
614 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); 589 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
615 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) { 590 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
616 printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
617 kvm_inject_gp(vcpu, 0); 591 kvm_inject_gp(vcpu, 0);
618 return; 592 return;
619 } 593 }
@@ -624,7 +598,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
624 598
625 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); 599 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
626 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) { 600 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
627 printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
628 kvm_inject_gp(vcpu, 0); 601 kvm_inject_gp(vcpu, 0);
629 return; 602 return;
630 } 603 }