diff options
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r-- | arch/x86/kvm/x86.c | 120 |
1 files changed, 101 insertions, 19 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f1f8ff2f1fa2..cc17546a2406 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -34,11 +34,13 @@ | |||
34 | #include <linux/module.h> | 34 | #include <linux/module.h> |
35 | #include <linux/mman.h> | 35 | #include <linux/mman.h> |
36 | #include <linux/highmem.h> | 36 | #include <linux/highmem.h> |
37 | #include <linux/iommu.h> | ||
37 | #include <linux/intel-iommu.h> | 38 | #include <linux/intel-iommu.h> |
38 | 39 | ||
39 | #include <asm/uaccess.h> | 40 | #include <asm/uaccess.h> |
40 | #include <asm/msr.h> | 41 | #include <asm/msr.h> |
41 | #include <asm/desc.h> | 42 | #include <asm/desc.h> |
43 | #include <asm/mtrr.h> | ||
42 | 44 | ||
43 | #define MAX_IO_MSRS 256 | 45 | #define MAX_IO_MSRS 256 |
44 | #define CR0_RESERVED_BITS \ | 46 | #define CR0_RESERVED_BITS \ |
@@ -86,6 +88,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
86 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, | 88 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, |
87 | { "hypercalls", VCPU_STAT(hypercalls) }, | 89 | { "hypercalls", VCPU_STAT(hypercalls) }, |
88 | { "request_irq", VCPU_STAT(request_irq_exits) }, | 90 | { "request_irq", VCPU_STAT(request_irq_exits) }, |
91 | { "request_nmi", VCPU_STAT(request_nmi_exits) }, | ||
89 | { "irq_exits", VCPU_STAT(irq_exits) }, | 92 | { "irq_exits", VCPU_STAT(irq_exits) }, |
90 | { "host_state_reload", VCPU_STAT(host_state_reload) }, | 93 | { "host_state_reload", VCPU_STAT(host_state_reload) }, |
91 | { "efer_reload", VCPU_STAT(efer_reload) }, | 94 | { "efer_reload", VCPU_STAT(efer_reload) }, |
@@ -93,6 +96,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
93 | { "insn_emulation", VCPU_STAT(insn_emulation) }, | 96 | { "insn_emulation", VCPU_STAT(insn_emulation) }, |
94 | { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) }, | 97 | { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) }, |
95 | { "irq_injections", VCPU_STAT(irq_injections) }, | 98 | { "irq_injections", VCPU_STAT(irq_injections) }, |
99 | { "nmi_injections", VCPU_STAT(nmi_injections) }, | ||
96 | { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) }, | 100 | { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) }, |
97 | { "mmu_pte_write", VM_STAT(mmu_pte_write) }, | 101 | { "mmu_pte_write", VM_STAT(mmu_pte_write) }, |
98 | { "mmu_pte_updated", VM_STAT(mmu_pte_updated) }, | 102 | { "mmu_pte_updated", VM_STAT(mmu_pte_updated) }, |
@@ -101,6 +105,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
101 | { "mmu_recycled", VM_STAT(mmu_recycled) }, | 105 | { "mmu_recycled", VM_STAT(mmu_recycled) }, |
102 | { "mmu_cache_miss", VM_STAT(mmu_cache_miss) }, | 106 | { "mmu_cache_miss", VM_STAT(mmu_cache_miss) }, |
103 | { "mmu_unsync", VM_STAT(mmu_unsync) }, | 107 | { "mmu_unsync", VM_STAT(mmu_unsync) }, |
108 | { "mmu_unsync_global", VM_STAT(mmu_unsync_global) }, | ||
104 | { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, | 109 | { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, |
105 | { "largepages", VM_STAT(lpages) }, | 110 | { "largepages", VM_STAT(lpages) }, |
106 | { NULL } | 111 | { NULL } |
@@ -312,6 +317,7 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
312 | kvm_x86_ops->set_cr0(vcpu, cr0); | 317 | kvm_x86_ops->set_cr0(vcpu, cr0); |
313 | vcpu->arch.cr0 = cr0; | 318 | vcpu->arch.cr0 = cr0; |
314 | 319 | ||
320 | kvm_mmu_sync_global(vcpu); | ||
315 | kvm_mmu_reset_context(vcpu); | 321 | kvm_mmu_reset_context(vcpu); |
316 | return; | 322 | return; |
317 | } | 323 | } |
@@ -355,6 +361,7 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
355 | } | 361 | } |
356 | kvm_x86_ops->set_cr4(vcpu, cr4); | 362 | kvm_x86_ops->set_cr4(vcpu, cr4); |
357 | vcpu->arch.cr4 = cr4; | 363 | vcpu->arch.cr4 = cr4; |
364 | kvm_mmu_sync_global(vcpu); | ||
358 | kvm_mmu_reset_context(vcpu); | 365 | kvm_mmu_reset_context(vcpu); |
359 | } | 366 | } |
360 | EXPORT_SYMBOL_GPL(kvm_set_cr4); | 367 | EXPORT_SYMBOL_GPL(kvm_set_cr4); |
@@ -449,7 +456,7 @@ static u32 msrs_to_save[] = { | |||
449 | MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, | 456 | MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, |
450 | #endif | 457 | #endif |
451 | MSR_IA32_TIME_STAMP_COUNTER, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, | 458 | MSR_IA32_TIME_STAMP_COUNTER, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, |
452 | MSR_IA32_PERF_STATUS, | 459 | MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT |
453 | }; | 460 | }; |
454 | 461 | ||
455 | static unsigned num_msrs_to_save; | 462 | static unsigned num_msrs_to_save; |
@@ -648,10 +655,38 @@ static bool msr_mtrr_valid(unsigned msr) | |||
648 | 655 | ||
649 | static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data) | 656 | static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data) |
650 | { | 657 | { |
658 | u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; | ||
659 | |||
651 | if (!msr_mtrr_valid(msr)) | 660 | if (!msr_mtrr_valid(msr)) |
652 | return 1; | 661 | return 1; |
653 | 662 | ||
654 | vcpu->arch.mtrr[msr - 0x200] = data; | 663 | if (msr == MSR_MTRRdefType) { |
664 | vcpu->arch.mtrr_state.def_type = data; | ||
665 | vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10; | ||
666 | } else if (msr == MSR_MTRRfix64K_00000) | ||
667 | p[0] = data; | ||
668 | else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000) | ||
669 | p[1 + msr - MSR_MTRRfix16K_80000] = data; | ||
670 | else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000) | ||
671 | p[3 + msr - MSR_MTRRfix4K_C0000] = data; | ||
672 | else if (msr == MSR_IA32_CR_PAT) | ||
673 | vcpu->arch.pat = data; | ||
674 | else { /* Variable MTRRs */ | ||
675 | int idx, is_mtrr_mask; | ||
676 | u64 *pt; | ||
677 | |||
678 | idx = (msr - 0x200) / 2; | ||
679 | is_mtrr_mask = msr - 0x200 - 2 * idx; | ||
680 | if (!is_mtrr_mask) | ||
681 | pt = | ||
682 | (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; | ||
683 | else | ||
684 | pt = | ||
685 | (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; | ||
686 | *pt = data; | ||
687 | } | ||
688 | |||
689 | kvm_mmu_reset_context(vcpu); | ||
655 | return 0; | 690 | return 0; |
656 | } | 691 | } |
657 | 692 | ||
@@ -747,10 +782,37 @@ int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | |||
747 | 782 | ||
748 | static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | 783 | static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) |
749 | { | 784 | { |
785 | u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; | ||
786 | |||
750 | if (!msr_mtrr_valid(msr)) | 787 | if (!msr_mtrr_valid(msr)) |
751 | return 1; | 788 | return 1; |
752 | 789 | ||
753 | *pdata = vcpu->arch.mtrr[msr - 0x200]; | 790 | if (msr == MSR_MTRRdefType) |
791 | *pdata = vcpu->arch.mtrr_state.def_type + | ||
792 | (vcpu->arch.mtrr_state.enabled << 10); | ||
793 | else if (msr == MSR_MTRRfix64K_00000) | ||
794 | *pdata = p[0]; | ||
795 | else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000) | ||
796 | *pdata = p[1 + msr - MSR_MTRRfix16K_80000]; | ||
797 | else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000) | ||
798 | *pdata = p[3 + msr - MSR_MTRRfix4K_C0000]; | ||
799 | else if (msr == MSR_IA32_CR_PAT) | ||
800 | *pdata = vcpu->arch.pat; | ||
801 | else { /* Variable MTRRs */ | ||
802 | int idx, is_mtrr_mask; | ||
803 | u64 *pt; | ||
804 | |||
805 | idx = (msr - 0x200) / 2; | ||
806 | is_mtrr_mask = msr - 0x200 - 2 * idx; | ||
807 | if (!is_mtrr_mask) | ||
808 | pt = | ||
809 | (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; | ||
810 | else | ||
811 | pt = | ||
812 | (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; | ||
813 | *pdata = *pt; | ||
814 | } | ||
815 | |||
754 | return 0; | 816 | return 0; |
755 | } | 817 | } |
756 | 818 | ||
@@ -903,7 +965,6 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
903 | case KVM_CAP_IRQCHIP: | 965 | case KVM_CAP_IRQCHIP: |
904 | case KVM_CAP_HLT: | 966 | case KVM_CAP_HLT: |
905 | case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: | 967 | case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: |
906 | case KVM_CAP_USER_MEMORY: | ||
907 | case KVM_CAP_SET_TSS_ADDR: | 968 | case KVM_CAP_SET_TSS_ADDR: |
908 | case KVM_CAP_EXT_CPUID: | 969 | case KVM_CAP_EXT_CPUID: |
909 | case KVM_CAP_CLOCKSOURCE: | 970 | case KVM_CAP_CLOCKSOURCE: |
@@ -929,7 +990,7 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
929 | r = !tdp_enabled; | 990 | r = !tdp_enabled; |
930 | break; | 991 | break; |
931 | case KVM_CAP_IOMMU: | 992 | case KVM_CAP_IOMMU: |
932 | r = intel_iommu_found(); | 993 | r = iommu_found(); |
933 | break; | 994 | break; |
934 | default: | 995 | default: |
935 | r = 0; | 996 | r = 0; |
@@ -1188,6 +1249,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, | |||
1188 | int t, times = entry->eax & 0xff; | 1249 | int t, times = entry->eax & 0xff; |
1189 | 1250 | ||
1190 | entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC; | 1251 | entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC; |
1252 | entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; | ||
1191 | for (t = 1; t < times && *nent < maxnent; ++t) { | 1253 | for (t = 1; t < times && *nent < maxnent; ++t) { |
1192 | do_cpuid_1_ent(&entry[t], function, 0); | 1254 | do_cpuid_1_ent(&entry[t], function, 0); |
1193 | entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC; | 1255 | entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC; |
@@ -1218,7 +1280,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, | |||
1218 | entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; | 1280 | entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; |
1219 | /* read more entries until level_type is zero */ | 1281 | /* read more entries until level_type is zero */ |
1220 | for (i = 1; *nent < maxnent; ++i) { | 1282 | for (i = 1; *nent < maxnent; ++i) { |
1221 | level_type = entry[i - 1].ecx & 0xff; | 1283 | level_type = entry[i - 1].ecx & 0xff00; |
1222 | if (!level_type) | 1284 | if (!level_type) |
1223 | break; | 1285 | break; |
1224 | do_cpuid_1_ent(&entry[i], function, i); | 1286 | do_cpuid_1_ent(&entry[i], function, i); |
@@ -1318,6 +1380,15 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, | |||
1318 | return 0; | 1380 | return 0; |
1319 | } | 1381 | } |
1320 | 1382 | ||
1383 | static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) | ||
1384 | { | ||
1385 | vcpu_load(vcpu); | ||
1386 | kvm_inject_nmi(vcpu); | ||
1387 | vcpu_put(vcpu); | ||
1388 | |||
1389 | return 0; | ||
1390 | } | ||
1391 | |||
1321 | static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, | 1392 | static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, |
1322 | struct kvm_tpr_access_ctl *tac) | 1393 | struct kvm_tpr_access_ctl *tac) |
1323 | { | 1394 | { |
@@ -1377,6 +1448,13 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
1377 | r = 0; | 1448 | r = 0; |
1378 | break; | 1449 | break; |
1379 | } | 1450 | } |
1451 | case KVM_NMI: { | ||
1452 | r = kvm_vcpu_ioctl_nmi(vcpu); | ||
1453 | if (r) | ||
1454 | goto out; | ||
1455 | r = 0; | ||
1456 | break; | ||
1457 | } | ||
1380 | case KVM_SET_CPUID: { | 1458 | case KVM_SET_CPUID: { |
1381 | struct kvm_cpuid __user *cpuid_arg = argp; | 1459 | struct kvm_cpuid __user *cpuid_arg = argp; |
1382 | struct kvm_cpuid cpuid; | 1460 | struct kvm_cpuid cpuid; |
@@ -1968,7 +2046,7 @@ int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
1968 | ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes); | 2046 | ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes); |
1969 | if (ret < 0) | 2047 | if (ret < 0) |
1970 | return 0; | 2048 | return 0; |
1971 | kvm_mmu_pte_write(vcpu, gpa, val, bytes); | 2049 | kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1); |
1972 | return 1; | 2050 | return 1; |
1973 | } | 2051 | } |
1974 | 2052 | ||
@@ -2404,8 +2482,6 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | |||
2404 | val = kvm_register_read(vcpu, VCPU_REGS_RAX); | 2482 | val = kvm_register_read(vcpu, VCPU_REGS_RAX); |
2405 | memcpy(vcpu->arch.pio_data, &val, 4); | 2483 | memcpy(vcpu->arch.pio_data, &val, 4); |
2406 | 2484 | ||
2407 | kvm_x86_ops->skip_emulated_instruction(vcpu); | ||
2408 | |||
2409 | pio_dev = vcpu_find_pio_dev(vcpu, port, size, !in); | 2485 | pio_dev = vcpu_find_pio_dev(vcpu, port, size, !in); |
2410 | if (pio_dev) { | 2486 | if (pio_dev) { |
2411 | kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data); | 2487 | kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data); |
@@ -2541,7 +2617,7 @@ int kvm_arch_init(void *opaque) | |||
2541 | kvm_mmu_set_nonpresent_ptes(0ull, 0ull); | 2617 | kvm_mmu_set_nonpresent_ptes(0ull, 0ull); |
2542 | kvm_mmu_set_base_ptes(PT_PRESENT_MASK); | 2618 | kvm_mmu_set_base_ptes(PT_PRESENT_MASK); |
2543 | kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, | 2619 | kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, |
2544 | PT_DIRTY_MASK, PT64_NX_MASK, 0); | 2620 | PT_DIRTY_MASK, PT64_NX_MASK, 0, 0); |
2545 | return 0; | 2621 | return 0; |
2546 | 2622 | ||
2547 | out: | 2623 | out: |
@@ -2729,7 +2805,7 @@ static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i) | |||
2729 | 2805 | ||
2730 | e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT; | 2806 | e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT; |
2731 | /* when no next entry is found, the current entry[i] is reselected */ | 2807 | /* when no next entry is found, the current entry[i] is reselected */ |
2732 | for (j = i + 1; j == i; j = (j + 1) % nent) { | 2808 | for (j = i + 1; ; j = (j + 1) % nent) { |
2733 | struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j]; | 2809 | struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j]; |
2734 | if (ej->function == e->function) { | 2810 | if (ej->function == e->function) { |
2735 | ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; | 2811 | ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; |
@@ -2973,7 +3049,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2973 | pr_debug("vcpu %d received sipi with vector # %x\n", | 3049 | pr_debug("vcpu %d received sipi with vector # %x\n", |
2974 | vcpu->vcpu_id, vcpu->arch.sipi_vector); | 3050 | vcpu->vcpu_id, vcpu->arch.sipi_vector); |
2975 | kvm_lapic_reset(vcpu); | 3051 | kvm_lapic_reset(vcpu); |
2976 | r = kvm_x86_ops->vcpu_reset(vcpu); | 3052 | r = kvm_arch_vcpu_reset(vcpu); |
2977 | if (r) | 3053 | if (r) |
2978 | return r; | 3054 | return r; |
2979 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | 3055 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
@@ -3275,9 +3351,9 @@ static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector, | |||
3275 | kvm_desct->padding = 0; | 3351 | kvm_desct->padding = 0; |
3276 | } | 3352 | } |
3277 | 3353 | ||
3278 | static void get_segment_descritptor_dtable(struct kvm_vcpu *vcpu, | 3354 | static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu, |
3279 | u16 selector, | 3355 | u16 selector, |
3280 | struct descriptor_table *dtable) | 3356 | struct descriptor_table *dtable) |
3281 | { | 3357 | { |
3282 | if (selector & 1 << 2) { | 3358 | if (selector & 1 << 2) { |
3283 | struct kvm_segment kvm_seg; | 3359 | struct kvm_segment kvm_seg; |
@@ -3302,7 +3378,7 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, | |||
3302 | struct descriptor_table dtable; | 3378 | struct descriptor_table dtable; |
3303 | u16 index = selector >> 3; | 3379 | u16 index = selector >> 3; |
3304 | 3380 | ||
3305 | get_segment_descritptor_dtable(vcpu, selector, &dtable); | 3381 | get_segment_descriptor_dtable(vcpu, selector, &dtable); |
3306 | 3382 | ||
3307 | if (dtable.limit < index * 8 + 7) { | 3383 | if (dtable.limit < index * 8 + 7) { |
3308 | kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc); | 3384 | kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc); |
@@ -3321,7 +3397,7 @@ static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, | |||
3321 | struct descriptor_table dtable; | 3397 | struct descriptor_table dtable; |
3322 | u16 index = selector >> 3; | 3398 | u16 index = selector >> 3; |
3323 | 3399 | ||
3324 | get_segment_descritptor_dtable(vcpu, selector, &dtable); | 3400 | get_segment_descriptor_dtable(vcpu, selector, &dtable); |
3325 | 3401 | ||
3326 | if (dtable.limit < index * 8 + 7) | 3402 | if (dtable.limit < index * 8 + 7) |
3327 | return 1; | 3403 | return 1; |
@@ -3900,6 +3976,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
3900 | /* We do fxsave: this must be aligned. */ | 3976 | /* We do fxsave: this must be aligned. */ |
3901 | BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF); | 3977 | BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF); |
3902 | 3978 | ||
3979 | vcpu->arch.mtrr_state.have_fixed = 1; | ||
3903 | vcpu_load(vcpu); | 3980 | vcpu_load(vcpu); |
3904 | r = kvm_arch_vcpu_reset(vcpu); | 3981 | r = kvm_arch_vcpu_reset(vcpu); |
3905 | if (r == 0) | 3982 | if (r == 0) |
@@ -3925,6 +4002,9 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | |||
3925 | 4002 | ||
3926 | int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu) | 4003 | int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu) |
3927 | { | 4004 | { |
4005 | vcpu->arch.nmi_pending = false; | ||
4006 | vcpu->arch.nmi_injected = false; | ||
4007 | |||
3928 | return kvm_x86_ops->vcpu_reset(vcpu); | 4008 | return kvm_x86_ops->vcpu_reset(vcpu); |
3929 | } | 4009 | } |
3930 | 4010 | ||
@@ -4012,6 +4092,7 @@ struct kvm *kvm_arch_create_vm(void) | |||
4012 | return ERR_PTR(-ENOMEM); | 4092 | return ERR_PTR(-ENOMEM); |
4013 | 4093 | ||
4014 | INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); | 4094 | INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); |
4095 | INIT_LIST_HEAD(&kvm->arch.oos_global_pages); | ||
4015 | INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); | 4096 | INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); |
4016 | 4097 | ||
4017 | /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ | 4098 | /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ |
@@ -4048,8 +4129,8 @@ static void kvm_free_vcpus(struct kvm *kvm) | |||
4048 | 4129 | ||
4049 | void kvm_arch_destroy_vm(struct kvm *kvm) | 4130 | void kvm_arch_destroy_vm(struct kvm *kvm) |
4050 | { | 4131 | { |
4051 | kvm_iommu_unmap_guest(kvm); | ||
4052 | kvm_free_all_assigned_devices(kvm); | 4132 | kvm_free_all_assigned_devices(kvm); |
4133 | kvm_iommu_unmap_guest(kvm); | ||
4053 | kvm_free_pit(kvm); | 4134 | kvm_free_pit(kvm); |
4054 | kfree(kvm->arch.vpic); | 4135 | kfree(kvm->arch.vpic); |
4055 | kfree(kvm->arch.vioapic); | 4136 | kfree(kvm->arch.vioapic); |
@@ -4127,7 +4208,8 @@ void kvm_arch_flush_shadow(struct kvm *kvm) | |||
4127 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) | 4208 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) |
4128 | { | 4209 | { |
4129 | return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE | 4210 | return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE |
4130 | || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED; | 4211 | || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED |
4212 | || vcpu->arch.nmi_pending; | ||
4131 | } | 4213 | } |
4132 | 4214 | ||
4133 | static void vcpu_kick_intr(void *info) | 4215 | static void vcpu_kick_intr(void *info) |