diff options
Diffstat (limited to 'drivers/kvm/kvm_main.c')
| -rw-r--r-- | drivers/kvm/kvm_main.c | 155 |
1 files changed, 124 insertions, 31 deletions
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index ce7fe640f18d..67c1154960f0 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
| @@ -58,6 +58,9 @@ static struct kvm_stats_debugfs_item { | |||
| 58 | { "io_exits", &kvm_stat.io_exits }, | 58 | { "io_exits", &kvm_stat.io_exits }, |
| 59 | { "mmio_exits", &kvm_stat.mmio_exits }, | 59 | { "mmio_exits", &kvm_stat.mmio_exits }, |
| 60 | { "signal_exits", &kvm_stat.signal_exits }, | 60 | { "signal_exits", &kvm_stat.signal_exits }, |
| 61 | { "irq_window", &kvm_stat.irq_window_exits }, | ||
| 62 | { "halt_exits", &kvm_stat.halt_exits }, | ||
| 63 | { "request_irq", &kvm_stat.request_irq_exits }, | ||
| 61 | { "irq_exits", &kvm_stat.irq_exits }, | 64 | { "irq_exits", &kvm_stat.irq_exits }, |
| 62 | { 0, 0 } | 65 | { 0, 0 } |
| 63 | }; | 66 | }; |
| @@ -227,6 +230,7 @@ static int kvm_dev_open(struct inode *inode, struct file *filp) | |||
| 227 | struct kvm_vcpu *vcpu = &kvm->vcpus[i]; | 230 | struct kvm_vcpu *vcpu = &kvm->vcpus[i]; |
| 228 | 231 | ||
| 229 | mutex_init(&vcpu->mutex); | 232 | mutex_init(&vcpu->mutex); |
| 233 | vcpu->kvm = kvm; | ||
| 230 | vcpu->mmu.root_hpa = INVALID_PAGE; | 234 | vcpu->mmu.root_hpa = INVALID_PAGE; |
| 231 | INIT_LIST_HEAD(&vcpu->free_pages); | 235 | INIT_LIST_HEAD(&vcpu->free_pages); |
| 232 | } | 236 | } |
| @@ -268,8 +272,8 @@ static void kvm_free_physmem(struct kvm *kvm) | |||
| 268 | 272 | ||
| 269 | static void kvm_free_vcpu(struct kvm_vcpu *vcpu) | 273 | static void kvm_free_vcpu(struct kvm_vcpu *vcpu) |
| 270 | { | 274 | { |
| 271 | kvm_arch_ops->vcpu_free(vcpu); | ||
| 272 | kvm_mmu_destroy(vcpu); | 275 | kvm_mmu_destroy(vcpu); |
| 276 | kvm_arch_ops->vcpu_free(vcpu); | ||
| 273 | } | 277 | } |
| 274 | 278 | ||
| 275 | static void kvm_free_vcpus(struct kvm *kvm) | 279 | static void kvm_free_vcpus(struct kvm *kvm) |
| @@ -295,14 +299,17 @@ static void inject_gp(struct kvm_vcpu *vcpu) | |||
| 295 | kvm_arch_ops->inject_gp(vcpu, 0); | 299 | kvm_arch_ops->inject_gp(vcpu, 0); |
| 296 | } | 300 | } |
| 297 | 301 | ||
| 298 | static int pdptrs_have_reserved_bits_set(struct kvm_vcpu *vcpu, | 302 | /* |
| 299 | unsigned long cr3) | 303 | * Load the pae pdptrs. Return true is they are all valid. |
| 304 | */ | ||
| 305 | static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) | ||
| 300 | { | 306 | { |
| 301 | gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; | 307 | gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; |
| 302 | unsigned offset = (cr3 & (PAGE_SIZE-1)) >> 5; | 308 | unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; |
| 303 | int i; | 309 | int i; |
| 304 | u64 pdpte; | 310 | u64 pdpte; |
| 305 | u64 *pdpt; | 311 | u64 *pdpt; |
| 312 | int ret; | ||
| 306 | struct kvm_memory_slot *memslot; | 313 | struct kvm_memory_slot *memslot; |
| 307 | 314 | ||
| 308 | spin_lock(&vcpu->kvm->lock); | 315 | spin_lock(&vcpu->kvm->lock); |
| @@ -310,16 +317,23 @@ static int pdptrs_have_reserved_bits_set(struct kvm_vcpu *vcpu, | |||
| 310 | /* FIXME: !memslot - emulate? 0xff? */ | 317 | /* FIXME: !memslot - emulate? 0xff? */ |
| 311 | pdpt = kmap_atomic(gfn_to_page(memslot, pdpt_gfn), KM_USER0); | 318 | pdpt = kmap_atomic(gfn_to_page(memslot, pdpt_gfn), KM_USER0); |
| 312 | 319 | ||
| 320 | ret = 1; | ||
| 313 | for (i = 0; i < 4; ++i) { | 321 | for (i = 0; i < 4; ++i) { |
| 314 | pdpte = pdpt[offset + i]; | 322 | pdpte = pdpt[offset + i]; |
| 315 | if ((pdpte & 1) && (pdpte & 0xfffffff0000001e6ull)) | 323 | if ((pdpte & 1) && (pdpte & 0xfffffff0000001e6ull)) { |
| 316 | break; | 324 | ret = 0; |
| 325 | goto out; | ||
| 326 | } | ||
| 317 | } | 327 | } |
| 318 | 328 | ||
| 329 | for (i = 0; i < 4; ++i) | ||
| 330 | vcpu->pdptrs[i] = pdpt[offset + i]; | ||
| 331 | |||
| 332 | out: | ||
| 319 | kunmap_atomic(pdpt, KM_USER0); | 333 | kunmap_atomic(pdpt, KM_USER0); |
| 320 | spin_unlock(&vcpu->kvm->lock); | 334 | spin_unlock(&vcpu->kvm->lock); |
| 321 | 335 | ||
| 322 | return i != 4; | 336 | return ret; |
| 323 | } | 337 | } |
| 324 | 338 | ||
| 325 | void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | 339 | void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) |
| @@ -365,8 +379,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
| 365 | } | 379 | } |
| 366 | } else | 380 | } else |
| 367 | #endif | 381 | #endif |
| 368 | if (is_pae(vcpu) && | 382 | if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) { |
| 369 | pdptrs_have_reserved_bits_set(vcpu, vcpu->cr3)) { | ||
| 370 | printk(KERN_DEBUG "set_cr0: #GP, pdptrs " | 383 | printk(KERN_DEBUG "set_cr0: #GP, pdptrs " |
| 371 | "reserved bits\n"); | 384 | "reserved bits\n"); |
| 372 | inject_gp(vcpu); | 385 | inject_gp(vcpu); |
| @@ -387,6 +400,7 @@ EXPORT_SYMBOL_GPL(set_cr0); | |||
| 387 | 400 | ||
| 388 | void lmsw(struct kvm_vcpu *vcpu, unsigned long msw) | 401 | void lmsw(struct kvm_vcpu *vcpu, unsigned long msw) |
| 389 | { | 402 | { |
| 403 | kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu); | ||
| 390 | set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f)); | 404 | set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f)); |
| 391 | } | 405 | } |
| 392 | EXPORT_SYMBOL_GPL(lmsw); | 406 | EXPORT_SYMBOL_GPL(lmsw); |
| @@ -407,7 +421,7 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
| 407 | return; | 421 | return; |
| 408 | } | 422 | } |
| 409 | } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & CR4_PAE_MASK) | 423 | } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & CR4_PAE_MASK) |
| 410 | && pdptrs_have_reserved_bits_set(vcpu, vcpu->cr3)) { | 424 | && !load_pdptrs(vcpu, vcpu->cr3)) { |
| 411 | printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); | 425 | printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); |
| 412 | inject_gp(vcpu); | 426 | inject_gp(vcpu); |
| 413 | } | 427 | } |
| @@ -439,7 +453,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
| 439 | return; | 453 | return; |
| 440 | } | 454 | } |
| 441 | if (is_paging(vcpu) && is_pae(vcpu) && | 455 | if (is_paging(vcpu) && is_pae(vcpu) && |
| 442 | pdptrs_have_reserved_bits_set(vcpu, cr3)) { | 456 | !load_pdptrs(vcpu, cr3)) { |
| 443 | printk(KERN_DEBUG "set_cr3: #GP, pdptrs " | 457 | printk(KERN_DEBUG "set_cr3: #GP, pdptrs " |
| 444 | "reserved bits\n"); | 458 | "reserved bits\n"); |
| 445 | inject_gp(vcpu); | 459 | inject_gp(vcpu); |
| @@ -449,7 +463,19 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
| 449 | 463 | ||
| 450 | vcpu->cr3 = cr3; | 464 | vcpu->cr3 = cr3; |
| 451 | spin_lock(&vcpu->kvm->lock); | 465 | spin_lock(&vcpu->kvm->lock); |
| 452 | vcpu->mmu.new_cr3(vcpu); | 466 | /* |
| 467 | * Does the new cr3 value map to physical memory? (Note, we | ||
| 468 | * catch an invalid cr3 even in real-mode, because it would | ||
| 469 | * cause trouble later on when we turn on paging anyway.) | ||
| 470 | * | ||
| 471 | * A real CPU would silently accept an invalid cr3 and would | ||
| 472 | * attempt to use it - with largely undefined (and often hard | ||
| 473 | * to debug) behavior on the guest side. | ||
| 474 | */ | ||
| 475 | if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT))) | ||
| 476 | inject_gp(vcpu); | ||
| 477 | else | ||
| 478 | vcpu->mmu.new_cr3(vcpu); | ||
| 453 | spin_unlock(&vcpu->kvm->lock); | 479 | spin_unlock(&vcpu->kvm->lock); |
| 454 | } | 480 | } |
| 455 | EXPORT_SYMBOL_GPL(set_cr3); | 481 | EXPORT_SYMBOL_GPL(set_cr3); |
| @@ -517,7 +543,6 @@ static int kvm_dev_ioctl_create_vcpu(struct kvm *kvm, int n) | |||
| 517 | vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE; | 543 | vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE; |
| 518 | 544 | ||
| 519 | vcpu->cpu = -1; /* First load will set up TR */ | 545 | vcpu->cpu = -1; /* First load will set up TR */ |
| 520 | vcpu->kvm = kvm; | ||
| 521 | r = kvm_arch_ops->vcpu_create(vcpu); | 546 | r = kvm_arch_ops->vcpu_create(vcpu); |
| 522 | if (r < 0) | 547 | if (r < 0) |
| 523 | goto out_free_vcpus; | 548 | goto out_free_vcpus; |
| @@ -634,6 +659,7 @@ raced: | |||
| 634 | | __GFP_ZERO); | 659 | | __GFP_ZERO); |
| 635 | if (!new.phys_mem[i]) | 660 | if (!new.phys_mem[i]) |
| 636 | goto out_free; | 661 | goto out_free; |
| 662 | new.phys_mem[i]->private = 0; | ||
| 637 | } | 663 | } |
| 638 | } | 664 | } |
| 639 | 665 | ||
| @@ -688,6 +714,13 @@ out: | |||
| 688 | return r; | 714 | return r; |
| 689 | } | 715 | } |
| 690 | 716 | ||
| 717 | static void do_remove_write_access(struct kvm_vcpu *vcpu, int slot) | ||
| 718 | { | ||
| 719 | spin_lock(&vcpu->kvm->lock); | ||
| 720 | kvm_mmu_slot_remove_write_access(vcpu, slot); | ||
| 721 | spin_unlock(&vcpu->kvm->lock); | ||
| 722 | } | ||
| 723 | |||
| 691 | /* | 724 | /* |
| 692 | * Get (and clear) the dirty memory log for a memory slot. | 725 | * Get (and clear) the dirty memory log for a memory slot. |
| 693 | */ | 726 | */ |
| @@ -697,6 +730,7 @@ static int kvm_dev_ioctl_get_dirty_log(struct kvm *kvm, | |||
| 697 | struct kvm_memory_slot *memslot; | 730 | struct kvm_memory_slot *memslot; |
| 698 | int r, i; | 731 | int r, i; |
| 699 | int n; | 732 | int n; |
| 733 | int cleared; | ||
| 700 | unsigned long any = 0; | 734 | unsigned long any = 0; |
| 701 | 735 | ||
| 702 | spin_lock(&kvm->lock); | 736 | spin_lock(&kvm->lock); |
| @@ -727,15 +761,17 @@ static int kvm_dev_ioctl_get_dirty_log(struct kvm *kvm, | |||
| 727 | 761 | ||
| 728 | 762 | ||
| 729 | if (any) { | 763 | if (any) { |
| 730 | spin_lock(&kvm->lock); | 764 | cleared = 0; |
| 731 | kvm_mmu_slot_remove_write_access(kvm, log->slot); | ||
| 732 | spin_unlock(&kvm->lock); | ||
| 733 | memset(memslot->dirty_bitmap, 0, n); | ||
| 734 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | 765 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { |
| 735 | struct kvm_vcpu *vcpu = vcpu_load(kvm, i); | 766 | struct kvm_vcpu *vcpu = vcpu_load(kvm, i); |
| 736 | 767 | ||
| 737 | if (!vcpu) | 768 | if (!vcpu) |
| 738 | continue; | 769 | continue; |
| 770 | if (!cleared) { | ||
| 771 | do_remove_write_access(vcpu, log->slot); | ||
| 772 | memset(memslot->dirty_bitmap, 0, n); | ||
| 773 | cleared = 1; | ||
| 774 | } | ||
| 739 | kvm_arch_ops->tlb_flush(vcpu); | 775 | kvm_arch_ops->tlb_flush(vcpu); |
| 740 | vcpu_put(vcpu); | 776 | vcpu_put(vcpu); |
| 741 | } | 777 | } |
| @@ -863,6 +899,27 @@ static int emulator_read_emulated(unsigned long addr, | |||
| 863 | } | 899 | } |
| 864 | } | 900 | } |
| 865 | 901 | ||
| 902 | static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, | ||
| 903 | unsigned long val, int bytes) | ||
| 904 | { | ||
| 905 | struct kvm_memory_slot *m; | ||
| 906 | struct page *page; | ||
| 907 | void *virt; | ||
| 908 | |||
| 909 | if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT)) | ||
| 910 | return 0; | ||
| 911 | m = gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT); | ||
| 912 | if (!m) | ||
| 913 | return 0; | ||
| 914 | page = gfn_to_page(m, gpa >> PAGE_SHIFT); | ||
| 915 | kvm_mmu_pre_write(vcpu, gpa, bytes); | ||
| 916 | virt = kmap_atomic(page, KM_USER0); | ||
| 917 | memcpy(virt + offset_in_page(gpa), &val, bytes); | ||
| 918 | kunmap_atomic(virt, KM_USER0); | ||
| 919 | kvm_mmu_post_write(vcpu, gpa, bytes); | ||
| 920 | return 1; | ||
| 921 | } | ||
| 922 | |||
| 866 | static int emulator_write_emulated(unsigned long addr, | 923 | static int emulator_write_emulated(unsigned long addr, |
| 867 | unsigned long val, | 924 | unsigned long val, |
| 868 | unsigned int bytes, | 925 | unsigned int bytes, |
| @@ -874,6 +931,9 @@ static int emulator_write_emulated(unsigned long addr, | |||
| 874 | if (gpa == UNMAPPED_GVA) | 931 | if (gpa == UNMAPPED_GVA) |
| 875 | return X86EMUL_PROPAGATE_FAULT; | 932 | return X86EMUL_PROPAGATE_FAULT; |
| 876 | 933 | ||
| 934 | if (emulator_write_phys(vcpu, gpa, val, bytes)) | ||
| 935 | return X86EMUL_CONTINUE; | ||
| 936 | |||
| 877 | vcpu->mmio_needed = 1; | 937 | vcpu->mmio_needed = 1; |
| 878 | vcpu->mmio_phys_addr = gpa; | 938 | vcpu->mmio_phys_addr = gpa; |
| 879 | vcpu->mmio_size = bytes; | 939 | vcpu->mmio_size = bytes; |
| @@ -898,6 +958,30 @@ static int emulator_cmpxchg_emulated(unsigned long addr, | |||
| 898 | return emulator_write_emulated(addr, new, bytes, ctxt); | 958 | return emulator_write_emulated(addr, new, bytes, ctxt); |
| 899 | } | 959 | } |
| 900 | 960 | ||
| 961 | #ifdef CONFIG_X86_32 | ||
| 962 | |||
| 963 | static int emulator_cmpxchg8b_emulated(unsigned long addr, | ||
| 964 | unsigned long old_lo, | ||
| 965 | unsigned long old_hi, | ||
| 966 | unsigned long new_lo, | ||
| 967 | unsigned long new_hi, | ||
| 968 | struct x86_emulate_ctxt *ctxt) | ||
| 969 | { | ||
| 970 | static int reported; | ||
| 971 | int r; | ||
| 972 | |||
| 973 | if (!reported) { | ||
| 974 | reported = 1; | ||
| 975 | printk(KERN_WARNING "kvm: emulating exchange8b as write\n"); | ||
| 976 | } | ||
| 977 | r = emulator_write_emulated(addr, new_lo, 4, ctxt); | ||
| 978 | if (r != X86EMUL_CONTINUE) | ||
| 979 | return r; | ||
| 980 | return emulator_write_emulated(addr+4, new_hi, 4, ctxt); | ||
| 981 | } | ||
| 982 | |||
| 983 | #endif | ||
| 984 | |||
| 901 | static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) | 985 | static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) |
| 902 | { | 986 | { |
| 903 | return kvm_arch_ops->get_segment_base(vcpu, seg); | 987 | return kvm_arch_ops->get_segment_base(vcpu, seg); |
| @@ -905,18 +989,15 @@ static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) | |||
| 905 | 989 | ||
| 906 | int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address) | 990 | int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address) |
| 907 | { | 991 | { |
| 908 | spin_lock(&vcpu->kvm->lock); | ||
| 909 | vcpu->mmu.inval_page(vcpu, address); | ||
| 910 | spin_unlock(&vcpu->kvm->lock); | ||
| 911 | kvm_arch_ops->invlpg(vcpu, address); | ||
| 912 | return X86EMUL_CONTINUE; | 992 | return X86EMUL_CONTINUE; |
| 913 | } | 993 | } |
| 914 | 994 | ||
| 915 | int emulate_clts(struct kvm_vcpu *vcpu) | 995 | int emulate_clts(struct kvm_vcpu *vcpu) |
| 916 | { | 996 | { |
| 917 | unsigned long cr0 = vcpu->cr0; | 997 | unsigned long cr0; |
| 918 | 998 | ||
| 919 | cr0 &= ~CR0_TS_MASK; | 999 | kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu); |
| 1000 | cr0 = vcpu->cr0 & ~CR0_TS_MASK; | ||
| 920 | kvm_arch_ops->set_cr0(vcpu, cr0); | 1001 | kvm_arch_ops->set_cr0(vcpu, cr0); |
| 921 | return X86EMUL_CONTINUE; | 1002 | return X86EMUL_CONTINUE; |
| 922 | } | 1003 | } |
| @@ -975,6 +1056,9 @@ struct x86_emulate_ops emulate_ops = { | |||
| 975 | .read_emulated = emulator_read_emulated, | 1056 | .read_emulated = emulator_read_emulated, |
| 976 | .write_emulated = emulator_write_emulated, | 1057 | .write_emulated = emulator_write_emulated, |
| 977 | .cmpxchg_emulated = emulator_cmpxchg_emulated, | 1058 | .cmpxchg_emulated = emulator_cmpxchg_emulated, |
| 1059 | #ifdef CONFIG_X86_32 | ||
| 1060 | .cmpxchg8b_emulated = emulator_cmpxchg8b_emulated, | ||
| 1061 | #endif | ||
| 978 | }; | 1062 | }; |
| 979 | 1063 | ||
| 980 | int emulate_instruction(struct kvm_vcpu *vcpu, | 1064 | int emulate_instruction(struct kvm_vcpu *vcpu, |
| @@ -1024,6 +1108,8 @@ int emulate_instruction(struct kvm_vcpu *vcpu, | |||
| 1024 | } | 1108 | } |
| 1025 | 1109 | ||
| 1026 | if (r) { | 1110 | if (r) { |
| 1111 | if (kvm_mmu_unprotect_page_virt(vcpu, cr2)) | ||
| 1112 | return EMULATE_DONE; | ||
| 1027 | if (!vcpu->mmio_needed) { | 1113 | if (!vcpu->mmio_needed) { |
| 1028 | report_emulation_failure(&emulate_ctxt); | 1114 | report_emulation_failure(&emulate_ctxt); |
| 1029 | return EMULATE_FAIL; | 1115 | return EMULATE_FAIL; |
| @@ -1069,6 +1155,7 @@ void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw, | |||
| 1069 | 1155 | ||
| 1070 | unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr) | 1156 | unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr) |
| 1071 | { | 1157 | { |
| 1158 | kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu); | ||
| 1072 | switch (cr) { | 1159 | switch (cr) { |
| 1073 | case 0: | 1160 | case 0: |
| 1074 | return vcpu->cr0; | 1161 | return vcpu->cr0; |
| @@ -1403,6 +1490,7 @@ static int kvm_dev_ioctl_get_sregs(struct kvm *kvm, struct kvm_sregs *sregs) | |||
| 1403 | sregs->gdt.limit = dt.limit; | 1490 | sregs->gdt.limit = dt.limit; |
| 1404 | sregs->gdt.base = dt.base; | 1491 | sregs->gdt.base = dt.base; |
| 1405 | 1492 | ||
| 1493 | kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu); | ||
| 1406 | sregs->cr0 = vcpu->cr0; | 1494 | sregs->cr0 = vcpu->cr0; |
| 1407 | sregs->cr2 = vcpu->cr2; | 1495 | sregs->cr2 = vcpu->cr2; |
| 1408 | sregs->cr3 = vcpu->cr3; | 1496 | sregs->cr3 = vcpu->cr3; |
| @@ -1467,11 +1555,15 @@ static int kvm_dev_ioctl_set_sregs(struct kvm *kvm, struct kvm_sregs *sregs) | |||
| 1467 | #endif | 1555 | #endif |
| 1468 | vcpu->apic_base = sregs->apic_base; | 1556 | vcpu->apic_base = sregs->apic_base; |
| 1469 | 1557 | ||
| 1558 | kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu); | ||
| 1559 | |||
| 1470 | mmu_reset_needed |= vcpu->cr0 != sregs->cr0; | 1560 | mmu_reset_needed |= vcpu->cr0 != sregs->cr0; |
| 1471 | kvm_arch_ops->set_cr0_no_modeswitch(vcpu, sregs->cr0); | 1561 | kvm_arch_ops->set_cr0_no_modeswitch(vcpu, sregs->cr0); |
| 1472 | 1562 | ||
| 1473 | mmu_reset_needed |= vcpu->cr4 != sregs->cr4; | 1563 | mmu_reset_needed |= vcpu->cr4 != sregs->cr4; |
| 1474 | kvm_arch_ops->set_cr4(vcpu, sregs->cr4); | 1564 | kvm_arch_ops->set_cr4(vcpu, sregs->cr4); |
| 1565 | if (!is_long_mode(vcpu) && is_pae(vcpu)) | ||
| 1566 | load_pdptrs(vcpu, vcpu->cr3); | ||
| 1475 | 1567 | ||
| 1476 | if (mmu_reset_needed) | 1568 | if (mmu_reset_needed) |
| 1477 | kvm_mmu_reset_context(vcpu); | 1569 | kvm_mmu_reset_context(vcpu); |
| @@ -1693,12 +1785,12 @@ static long kvm_dev_ioctl(struct file *filp, | |||
| 1693 | if (copy_from_user(&kvm_run, (void *)arg, sizeof kvm_run)) | 1785 | if (copy_from_user(&kvm_run, (void *)arg, sizeof kvm_run)) |
| 1694 | goto out; | 1786 | goto out; |
| 1695 | r = kvm_dev_ioctl_run(kvm, &kvm_run); | 1787 | r = kvm_dev_ioctl_run(kvm, &kvm_run); |
| 1696 | if (r < 0) | 1788 | if (r < 0 && r != -EINTR) |
| 1697 | goto out; | 1789 | goto out; |
| 1698 | r = -EFAULT; | 1790 | if (copy_to_user((void *)arg, &kvm_run, sizeof kvm_run)) { |
| 1699 | if (copy_to_user((void *)arg, &kvm_run, sizeof kvm_run)) | 1791 | r = -EFAULT; |
| 1700 | goto out; | 1792 | goto out; |
| 1701 | r = 0; | 1793 | } |
| 1702 | break; | 1794 | break; |
| 1703 | } | 1795 | } |
| 1704 | case KVM_GET_REGS: { | 1796 | case KVM_GET_REGS: { |
| @@ -1842,6 +1934,7 @@ static long kvm_dev_ioctl(struct file *filp, | |||
| 1842 | num_msrs_to_save * sizeof(u32))) | 1934 | num_msrs_to_save * sizeof(u32))) |
| 1843 | goto out; | 1935 | goto out; |
| 1844 | r = 0; | 1936 | r = 0; |
| 1937 | break; | ||
| 1845 | } | 1938 | } |
| 1846 | default: | 1939 | default: |
| 1847 | ; | 1940 | ; |
| @@ -1944,17 +2037,17 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module) | |||
| 1944 | return -EEXIST; | 2037 | return -EEXIST; |
| 1945 | } | 2038 | } |
| 1946 | 2039 | ||
| 1947 | kvm_arch_ops = ops; | 2040 | if (!ops->cpu_has_kvm_support()) { |
| 1948 | |||
| 1949 | if (!kvm_arch_ops->cpu_has_kvm_support()) { | ||
| 1950 | printk(KERN_ERR "kvm: no hardware support\n"); | 2041 | printk(KERN_ERR "kvm: no hardware support\n"); |
| 1951 | return -EOPNOTSUPP; | 2042 | return -EOPNOTSUPP; |
| 1952 | } | 2043 | } |
| 1953 | if (kvm_arch_ops->disabled_by_bios()) { | 2044 | if (ops->disabled_by_bios()) { |
| 1954 | printk(KERN_ERR "kvm: disabled by bios\n"); | 2045 | printk(KERN_ERR "kvm: disabled by bios\n"); |
| 1955 | return -EOPNOTSUPP; | 2046 | return -EOPNOTSUPP; |
| 1956 | } | 2047 | } |
| 1957 | 2048 | ||
| 2049 | kvm_arch_ops = ops; | ||
| 2050 | |||
| 1958 | r = kvm_arch_ops->hardware_setup(); | 2051 | r = kvm_arch_ops->hardware_setup(); |
| 1959 | if (r < 0) | 2052 | if (r < 0) |
| 1960 | return r; | 2053 | return r; |
