diff options
author | Gleb Natapov <gleb@redhat.com> | 2010-04-28 12:15:31 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-08-01 03:35:31 -0400 |
commit | 0f12244fe70e8a94a491f6cd7ed70a352ab6c26c (patch) | |
tree | a42fa2fd198e187c90abbed62ef35cd8acc9ff7e /arch/x86/kvm | |
parent | 79168fd1a307ffee46ee03b7f8711559241738c7 (diff) |
KVM: x86 emulator: make set_cr() callback return error if it fails
Make set_cr() callback return error if it fails instead of injecting #GP
behind emulator's back.
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/emulate.c | 10 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 148 |
2 files changed, 83 insertions, 75 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index f56ec486393e..061f7d37c9f7 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -2272,7 +2272,10 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, | |||
2272 | struct decode_cache *c = &ctxt->decode; | 2272 | struct decode_cache *c = &ctxt->decode; |
2273 | int ret; | 2273 | int ret; |
2274 | 2274 | ||
2275 | ops->set_cr(3, tss->cr3, ctxt->vcpu); | 2275 | if (ops->set_cr(3, tss->cr3, ctxt->vcpu)) { |
2276 | kvm_inject_gp(ctxt->vcpu, 0); | ||
2277 | return X86EMUL_PROPAGATE_FAULT; | ||
2278 | } | ||
2276 | c->eip = tss->eip; | 2279 | c->eip = tss->eip; |
2277 | ctxt->eflags = tss->eflags | 2; | 2280 | ctxt->eflags = tss->eflags | 2; |
2278 | c->regs[VCPU_REGS_RAX] = tss->eax; | 2281 | c->regs[VCPU_REGS_RAX] = tss->eax; |
@@ -3135,7 +3138,10 @@ twobyte_insn: | |||
3135 | c->dst.type = OP_NONE; /* no writeback */ | 3138 | c->dst.type = OP_NONE; /* no writeback */ |
3136 | break; | 3139 | break; |
3137 | case 0x22: /* mov reg, cr */ | 3140 | case 0x22: /* mov reg, cr */ |
3138 | ops->set_cr(c->modrm_reg, c->modrm_val, ctxt->vcpu); | 3141 | if (ops->set_cr(c->modrm_reg, c->modrm_val, ctxt->vcpu)) { |
3142 | kvm_inject_gp(ctxt->vcpu, 0); | ||
3143 | goto done; | ||
3144 | } | ||
3139 | c->dst.type = OP_NONE; | 3145 | c->dst.type = OP_NONE; |
3140 | break; | 3146 | break; |
3141 | case 0x23: /* mov from reg to dr */ | 3147 | case 0x23: /* mov from reg to dr */ |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9a469df6011c..64c6e7a31411 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -414,57 +414,49 @@ out: | |||
414 | return changed; | 414 | return changed; |
415 | } | 415 | } |
416 | 416 | ||
417 | void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | 417 | static int __kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) |
418 | { | 418 | { |
419 | cr0 |= X86_CR0_ET; | 419 | cr0 |= X86_CR0_ET; |
420 | 420 | ||
421 | #ifdef CONFIG_X86_64 | 421 | #ifdef CONFIG_X86_64 |
422 | if (cr0 & 0xffffffff00000000UL) { | 422 | if (cr0 & 0xffffffff00000000UL) |
423 | kvm_inject_gp(vcpu, 0); | 423 | return 1; |
424 | return; | ||
425 | } | ||
426 | #endif | 424 | #endif |
427 | 425 | ||
428 | cr0 &= ~CR0_RESERVED_BITS; | 426 | cr0 &= ~CR0_RESERVED_BITS; |
429 | 427 | ||
430 | if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) { | 428 | if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) |
431 | kvm_inject_gp(vcpu, 0); | 429 | return 1; |
432 | return; | ||
433 | } | ||
434 | 430 | ||
435 | if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) { | 431 | if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) |
436 | kvm_inject_gp(vcpu, 0); | 432 | return 1; |
437 | return; | ||
438 | } | ||
439 | 433 | ||
440 | if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { | 434 | if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { |
441 | #ifdef CONFIG_X86_64 | 435 | #ifdef CONFIG_X86_64 |
442 | if ((vcpu->arch.efer & EFER_LME)) { | 436 | if ((vcpu->arch.efer & EFER_LME)) { |
443 | int cs_db, cs_l; | 437 | int cs_db, cs_l; |
444 | 438 | ||
445 | if (!is_pae(vcpu)) { | 439 | if (!is_pae(vcpu)) |
446 | kvm_inject_gp(vcpu, 0); | 440 | return 1; |
447 | return; | ||
448 | } | ||
449 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); | 441 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); |
450 | if (cs_l) { | 442 | if (cs_l) |
451 | kvm_inject_gp(vcpu, 0); | 443 | return 1; |
452 | return; | ||
453 | |||
454 | } | ||
455 | } else | 444 | } else |
456 | #endif | 445 | #endif |
457 | if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) { | 446 | if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) |
458 | kvm_inject_gp(vcpu, 0); | 447 | return 1; |
459 | return; | ||
460 | } | ||
461 | |||
462 | } | 448 | } |
463 | 449 | ||
464 | kvm_x86_ops->set_cr0(vcpu, cr0); | 450 | kvm_x86_ops->set_cr0(vcpu, cr0); |
465 | 451 | ||
466 | kvm_mmu_reset_context(vcpu); | 452 | kvm_mmu_reset_context(vcpu); |
467 | return; | 453 | return 0; |
454 | } | ||
455 | |||
456 | void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | ||
457 | { | ||
458 | if (__kvm_set_cr0(vcpu, cr0)) | ||
459 | kvm_inject_gp(vcpu, 0); | ||
468 | } | 460 | } |
469 | EXPORT_SYMBOL_GPL(kvm_set_cr0); | 461 | EXPORT_SYMBOL_GPL(kvm_set_cr0); |
470 | 462 | ||
@@ -474,61 +466,56 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) | |||
474 | } | 466 | } |
475 | EXPORT_SYMBOL_GPL(kvm_lmsw); | 467 | EXPORT_SYMBOL_GPL(kvm_lmsw); |
476 | 468 | ||
477 | void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | 469 | int __kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
478 | { | 470 | { |
479 | unsigned long old_cr4 = kvm_read_cr4(vcpu); | 471 | unsigned long old_cr4 = kvm_read_cr4(vcpu); |
480 | unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE; | 472 | unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE; |
481 | 473 | ||
482 | if (cr4 & CR4_RESERVED_BITS) { | 474 | if (cr4 & CR4_RESERVED_BITS) |
483 | kvm_inject_gp(vcpu, 0); | 475 | return 1; |
484 | return; | ||
485 | } | ||
486 | 476 | ||
487 | if (is_long_mode(vcpu)) { | 477 | if (is_long_mode(vcpu)) { |
488 | if (!(cr4 & X86_CR4_PAE)) { | 478 | if (!(cr4 & X86_CR4_PAE)) |
489 | kvm_inject_gp(vcpu, 0); | 479 | return 1; |
490 | return; | ||
491 | } | ||
492 | } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) | 480 | } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) |
493 | && ((cr4 ^ old_cr4) & pdptr_bits) | 481 | && ((cr4 ^ old_cr4) & pdptr_bits) |
494 | && !load_pdptrs(vcpu, vcpu->arch.cr3)) { | 482 | && !load_pdptrs(vcpu, vcpu->arch.cr3)) |
495 | kvm_inject_gp(vcpu, 0); | 483 | return 1; |
496 | return; | 484 | |
497 | } | 485 | if (cr4 & X86_CR4_VMXE) |
486 | return 1; | ||
498 | 487 | ||
499 | if (cr4 & X86_CR4_VMXE) { | ||
500 | kvm_inject_gp(vcpu, 0); | ||
501 | return; | ||
502 | } | ||
503 | kvm_x86_ops->set_cr4(vcpu, cr4); | 488 | kvm_x86_ops->set_cr4(vcpu, cr4); |
504 | vcpu->arch.cr4 = cr4; | 489 | vcpu->arch.cr4 = cr4; |
505 | kvm_mmu_reset_context(vcpu); | 490 | kvm_mmu_reset_context(vcpu); |
491 | |||
492 | return 0; | ||
493 | } | ||
494 | |||
495 | void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | ||
496 | { | ||
497 | if (__kvm_set_cr4(vcpu, cr4)) | ||
498 | kvm_inject_gp(vcpu, 0); | ||
506 | } | 499 | } |
507 | EXPORT_SYMBOL_GPL(kvm_set_cr4); | 500 | EXPORT_SYMBOL_GPL(kvm_set_cr4); |
508 | 501 | ||
509 | void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | 502 | static int __kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) |
510 | { | 503 | { |
511 | if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) { | 504 | if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) { |
512 | kvm_mmu_sync_roots(vcpu); | 505 | kvm_mmu_sync_roots(vcpu); |
513 | kvm_mmu_flush_tlb(vcpu); | 506 | kvm_mmu_flush_tlb(vcpu); |
514 | return; | 507 | return 0; |
515 | } | 508 | } |
516 | 509 | ||
517 | if (is_long_mode(vcpu)) { | 510 | if (is_long_mode(vcpu)) { |
518 | if (cr3 & CR3_L_MODE_RESERVED_BITS) { | 511 | if (cr3 & CR3_L_MODE_RESERVED_BITS) |
519 | kvm_inject_gp(vcpu, 0); | 512 | return 1; |
520 | return; | ||
521 | } | ||
522 | } else { | 513 | } else { |
523 | if (is_pae(vcpu)) { | 514 | if (is_pae(vcpu)) { |
524 | if (cr3 & CR3_PAE_RESERVED_BITS) { | 515 | if (cr3 & CR3_PAE_RESERVED_BITS) |
525 | kvm_inject_gp(vcpu, 0); | 516 | return 1; |
526 | return; | 517 | if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) |
527 | } | 518 | return 1; |
528 | if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) { | ||
529 | kvm_inject_gp(vcpu, 0); | ||
530 | return; | ||
531 | } | ||
532 | } | 519 | } |
533 | /* | 520 | /* |
534 | * We don't check reserved bits in nonpae mode, because | 521 | * We don't check reserved bits in nonpae mode, because |
@@ -546,24 +533,34 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
546 | * to debug) behavior on the guest side. | 533 | * to debug) behavior on the guest side. |
547 | */ | 534 | */ |
548 | if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT))) | 535 | if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT))) |
536 | return 1; | ||
537 | vcpu->arch.cr3 = cr3; | ||
538 | vcpu->arch.mmu.new_cr3(vcpu); | ||
539 | return 0; | ||
540 | } | ||
541 | |||
542 | void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | ||
543 | { | ||
544 | if (__kvm_set_cr3(vcpu, cr3)) | ||
549 | kvm_inject_gp(vcpu, 0); | 545 | kvm_inject_gp(vcpu, 0); |
550 | else { | ||
551 | vcpu->arch.cr3 = cr3; | ||
552 | vcpu->arch.mmu.new_cr3(vcpu); | ||
553 | } | ||
554 | } | 546 | } |
555 | EXPORT_SYMBOL_GPL(kvm_set_cr3); | 547 | EXPORT_SYMBOL_GPL(kvm_set_cr3); |
556 | 548 | ||
557 | void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) | 549 | int __kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) |
558 | { | 550 | { |
559 | if (cr8 & CR8_RESERVED_BITS) { | 551 | if (cr8 & CR8_RESERVED_BITS) |
560 | kvm_inject_gp(vcpu, 0); | 552 | return 1; |
561 | return; | ||
562 | } | ||
563 | if (irqchip_in_kernel(vcpu->kvm)) | 553 | if (irqchip_in_kernel(vcpu->kvm)) |
564 | kvm_lapic_set_tpr(vcpu, cr8); | 554 | kvm_lapic_set_tpr(vcpu, cr8); |
565 | else | 555 | else |
566 | vcpu->arch.cr8 = cr8; | 556 | vcpu->arch.cr8 = cr8; |
557 | return 0; | ||
558 | } | ||
559 | |||
560 | void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) | ||
561 | { | ||
562 | if (__kvm_set_cr8(vcpu, cr8)) | ||
563 | kvm_inject_gp(vcpu, 0); | ||
567 | } | 564 | } |
568 | EXPORT_SYMBOL_GPL(kvm_set_cr8); | 565 | EXPORT_SYMBOL_GPL(kvm_set_cr8); |
569 | 566 | ||
@@ -3681,27 +3678,32 @@ static unsigned long emulator_get_cr(int cr, struct kvm_vcpu *vcpu) | |||
3681 | return value; | 3678 | return value; |
3682 | } | 3679 | } |
3683 | 3680 | ||
3684 | static void emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu) | 3681 | static int emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu) |
3685 | { | 3682 | { |
3683 | int res = 0; | ||
3684 | |||
3686 | switch (cr) { | 3685 | switch (cr) { |
3687 | case 0: | 3686 | case 0: |
3688 | kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val)); | 3687 | res = __kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val)); |
3689 | break; | 3688 | break; |
3690 | case 2: | 3689 | case 2: |
3691 | vcpu->arch.cr2 = val; | 3690 | vcpu->arch.cr2 = val; |
3692 | break; | 3691 | break; |
3693 | case 3: | 3692 | case 3: |
3694 | kvm_set_cr3(vcpu, val); | 3693 | res = __kvm_set_cr3(vcpu, val); |
3695 | break; | 3694 | break; |
3696 | case 4: | 3695 | case 4: |
3697 | kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); | 3696 | res = __kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); |
3698 | break; | 3697 | break; |
3699 | case 8: | 3698 | case 8: |
3700 | kvm_set_cr8(vcpu, val & 0xfUL); | 3699 | res = __kvm_set_cr8(vcpu, val & 0xfUL); |
3701 | break; | 3700 | break; |
3702 | default: | 3701 | default: |
3703 | vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr); | 3702 | vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr); |
3703 | res = -1; | ||
3704 | } | 3704 | } |
3705 | |||
3706 | return res; | ||
3705 | } | 3707 | } |
3706 | 3708 | ||
3707 | static int emulator_get_cpl(struct kvm_vcpu *vcpu) | 3709 | static int emulator_get_cpl(struct kvm_vcpu *vcpu) |