diff options
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r-- | arch/x86/kvm/x86.c | 140 |
1 files changed, 85 insertions, 55 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 5d004da1e35d..39c28f09dfd5 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -94,6 +94,9 @@ EXPORT_SYMBOL_GPL(kvm_x86_ops); | |||
94 | static bool ignore_msrs = 0; | 94 | static bool ignore_msrs = 0; |
95 | module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR); | 95 | module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR); |
96 | 96 | ||
97 | unsigned int min_timer_period_us = 500; | ||
98 | module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR); | ||
99 | |||
97 | bool kvm_has_tsc_control; | 100 | bool kvm_has_tsc_control; |
98 | EXPORT_SYMBOL_GPL(kvm_has_tsc_control); | 101 | EXPORT_SYMBOL_GPL(kvm_has_tsc_control); |
99 | u32 kvm_max_guest_tsc_khz; | 102 | u32 kvm_max_guest_tsc_khz; |
@@ -254,10 +257,26 @@ u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) | |||
254 | } | 257 | } |
255 | EXPORT_SYMBOL_GPL(kvm_get_apic_base); | 258 | EXPORT_SYMBOL_GPL(kvm_get_apic_base); |
256 | 259 | ||
257 | void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data) | 260 | int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
258 | { | 261 | { |
259 | /* TODO: reserve bits check */ | 262 | u64 old_state = vcpu->arch.apic_base & |
260 | kvm_lapic_set_base(vcpu, data); | 263 | (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE); |
264 | u64 new_state = msr_info->data & | ||
265 | (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE); | ||
266 | u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | | ||
267 | 0x2ff | (guest_cpuid_has_x2apic(vcpu) ? 0 : X2APIC_ENABLE); | ||
268 | |||
269 | if (!msr_info->host_initiated && | ||
270 | ((msr_info->data & reserved_bits) != 0 || | ||
271 | new_state == X2APIC_ENABLE || | ||
272 | (new_state == MSR_IA32_APICBASE_ENABLE && | ||
273 | old_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) || | ||
274 | (new_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE) && | ||
275 | old_state == 0))) | ||
276 | return 1; | ||
277 | |||
278 | kvm_lapic_set_base(vcpu, msr_info->data); | ||
279 | return 0; | ||
261 | } | 280 | } |
262 | EXPORT_SYMBOL_GPL(kvm_set_apic_base); | 281 | EXPORT_SYMBOL_GPL(kvm_set_apic_base); |
263 | 282 | ||
@@ -719,6 +738,12 @@ unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) | |||
719 | } | 738 | } |
720 | EXPORT_SYMBOL_GPL(kvm_get_cr8); | 739 | EXPORT_SYMBOL_GPL(kvm_get_cr8); |
721 | 740 | ||
741 | static void kvm_update_dr6(struct kvm_vcpu *vcpu) | ||
742 | { | ||
743 | if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) | ||
744 | kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6); | ||
745 | } | ||
746 | |||
722 | static void kvm_update_dr7(struct kvm_vcpu *vcpu) | 747 | static void kvm_update_dr7(struct kvm_vcpu *vcpu) |
723 | { | 748 | { |
724 | unsigned long dr7; | 749 | unsigned long dr7; |
@@ -747,6 +772,7 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) | |||
747 | if (val & 0xffffffff00000000ULL) | 772 | if (val & 0xffffffff00000000ULL) |
748 | return -1; /* #GP */ | 773 | return -1; /* #GP */ |
749 | vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1; | 774 | vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1; |
775 | kvm_update_dr6(vcpu); | ||
750 | break; | 776 | break; |
751 | case 5: | 777 | case 5: |
752 | if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) | 778 | if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) |
@@ -788,7 +814,10 @@ static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) | |||
788 | return 1; | 814 | return 1; |
789 | /* fall through */ | 815 | /* fall through */ |
790 | case 6: | 816 | case 6: |
791 | *val = vcpu->arch.dr6; | 817 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) |
818 | *val = vcpu->arch.dr6; | ||
819 | else | ||
820 | *val = kvm_x86_ops->get_dr6(vcpu); | ||
792 | break; | 821 | break; |
793 | case 5: | 822 | case 5: |
794 | if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) | 823 | if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) |
@@ -836,11 +865,12 @@ EXPORT_SYMBOL_GPL(kvm_rdpmc); | |||
836 | * kvm-specific. Those are put in the beginning of the list. | 865 | * kvm-specific. Those are put in the beginning of the list. |
837 | */ | 866 | */ |
838 | 867 | ||
839 | #define KVM_SAVE_MSRS_BEGIN 10 | 868 | #define KVM_SAVE_MSRS_BEGIN 12 |
840 | static u32 msrs_to_save[] = { | 869 | static u32 msrs_to_save[] = { |
841 | MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, | 870 | MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, |
842 | MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, | 871 | MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, |
843 | HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, | 872 | HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, |
873 | HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC, | ||
844 | HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, | 874 | HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, |
845 | MSR_KVM_PV_EOI_EN, | 875 | MSR_KVM_PV_EOI_EN, |
846 | MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, | 876 | MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, |
@@ -1275,8 +1305,6 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) | |||
1275 | kvm->arch.last_tsc_write = data; | 1305 | kvm->arch.last_tsc_write = data; |
1276 | kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; | 1306 | kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; |
1277 | 1307 | ||
1278 | /* Reset of TSC must disable overshoot protection below */ | ||
1279 | vcpu->arch.hv_clock.tsc_timestamp = 0; | ||
1280 | vcpu->arch.last_guest_tsc = data; | 1308 | vcpu->arch.last_guest_tsc = data; |
1281 | 1309 | ||
1282 | /* Keep track of which generation this VCPU has synchronized to */ | 1310 | /* Keep track of which generation this VCPU has synchronized to */ |
@@ -1484,7 +1512,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) | |||
1484 | unsigned long flags, this_tsc_khz; | 1512 | unsigned long flags, this_tsc_khz; |
1485 | struct kvm_vcpu_arch *vcpu = &v->arch; | 1513 | struct kvm_vcpu_arch *vcpu = &v->arch; |
1486 | struct kvm_arch *ka = &v->kvm->arch; | 1514 | struct kvm_arch *ka = &v->kvm->arch; |
1487 | s64 kernel_ns, max_kernel_ns; | 1515 | s64 kernel_ns; |
1488 | u64 tsc_timestamp, host_tsc; | 1516 | u64 tsc_timestamp, host_tsc; |
1489 | struct pvclock_vcpu_time_info guest_hv_clock; | 1517 | struct pvclock_vcpu_time_info guest_hv_clock; |
1490 | u8 pvclock_flags; | 1518 | u8 pvclock_flags; |
@@ -1543,37 +1571,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) | |||
1543 | if (!vcpu->pv_time_enabled) | 1571 | if (!vcpu->pv_time_enabled) |
1544 | return 0; | 1572 | return 0; |
1545 | 1573 | ||
1546 | /* | ||
1547 | * Time as measured by the TSC may go backwards when resetting the base | ||
1548 | * tsc_timestamp. The reason for this is that the TSC resolution is | ||
1549 | * higher than the resolution of the other clock scales. Thus, many | ||
1550 | * possible measurments of the TSC correspond to one measurement of any | ||
1551 | * other clock, and so a spread of values is possible. This is not a | ||
1552 | * problem for the computation of the nanosecond clock; with TSC rates | ||
1553 | * around 1GHZ, there can only be a few cycles which correspond to one | ||
1554 | * nanosecond value, and any path through this code will inevitably | ||
1555 | * take longer than that. However, with the kernel_ns value itself, | ||
1556 | * the precision may be much lower, down to HZ granularity. If the | ||
1557 | * first sampling of TSC against kernel_ns ends in the low part of the | ||
1558 | * range, and the second in the high end of the range, we can get: | ||
1559 | * | ||
1560 | * (TSC - offset_low) * S + kns_old > (TSC - offset_high) * S + kns_new | ||
1561 | * | ||
1562 | * As the sampling errors potentially range in the thousands of cycles, | ||
1563 | * it is possible such a time value has already been observed by the | ||
1564 | * guest. To protect against this, we must compute the system time as | ||
1565 | * observed by the guest and ensure the new system time is greater. | ||
1566 | */ | ||
1567 | max_kernel_ns = 0; | ||
1568 | if (vcpu->hv_clock.tsc_timestamp) { | ||
1569 | max_kernel_ns = vcpu->last_guest_tsc - | ||
1570 | vcpu->hv_clock.tsc_timestamp; | ||
1571 | max_kernel_ns = pvclock_scale_delta(max_kernel_ns, | ||
1572 | vcpu->hv_clock.tsc_to_system_mul, | ||
1573 | vcpu->hv_clock.tsc_shift); | ||
1574 | max_kernel_ns += vcpu->last_kernel_ns; | ||
1575 | } | ||
1576 | |||
1577 | if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) { | 1574 | if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) { |
1578 | kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz, | 1575 | kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz, |
1579 | &vcpu->hv_clock.tsc_shift, | 1576 | &vcpu->hv_clock.tsc_shift, |
@@ -1581,14 +1578,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) | |||
1581 | vcpu->hw_tsc_khz = this_tsc_khz; | 1578 | vcpu->hw_tsc_khz = this_tsc_khz; |
1582 | } | 1579 | } |
1583 | 1580 | ||
1584 | /* with a master <monotonic time, tsc value> tuple, | ||
1585 | * pvclock clock reads always increase at the (scaled) rate | ||
1586 | * of guest TSC - no need to deal with sampling errors. | ||
1587 | */ | ||
1588 | if (!use_master_clock) { | ||
1589 | if (max_kernel_ns > kernel_ns) | ||
1590 | kernel_ns = max_kernel_ns; | ||
1591 | } | ||
1592 | /* With all the info we got, fill in the values */ | 1581 | /* With all the info we got, fill in the values */ |
1593 | vcpu->hv_clock.tsc_timestamp = tsc_timestamp; | 1582 | vcpu->hv_clock.tsc_timestamp = tsc_timestamp; |
1594 | vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; | 1583 | vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; |
@@ -1826,6 +1815,8 @@ static bool kvm_hv_msr_partition_wide(u32 msr) | |||
1826 | switch (msr) { | 1815 | switch (msr) { |
1827 | case HV_X64_MSR_GUEST_OS_ID: | 1816 | case HV_X64_MSR_GUEST_OS_ID: |
1828 | case HV_X64_MSR_HYPERCALL: | 1817 | case HV_X64_MSR_HYPERCALL: |
1818 | case HV_X64_MSR_REFERENCE_TSC: | ||
1819 | case HV_X64_MSR_TIME_REF_COUNT: | ||
1829 | r = true; | 1820 | r = true; |
1830 | break; | 1821 | break; |
1831 | } | 1822 | } |
@@ -1865,6 +1856,21 @@ static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data) | |||
1865 | if (__copy_to_user((void __user *)addr, instructions, 4)) | 1856 | if (__copy_to_user((void __user *)addr, instructions, 4)) |
1866 | return 1; | 1857 | return 1; |
1867 | kvm->arch.hv_hypercall = data; | 1858 | kvm->arch.hv_hypercall = data; |
1859 | mark_page_dirty(kvm, gfn); | ||
1860 | break; | ||
1861 | } | ||
1862 | case HV_X64_MSR_REFERENCE_TSC: { | ||
1863 | u64 gfn; | ||
1864 | HV_REFERENCE_TSC_PAGE tsc_ref; | ||
1865 | memset(&tsc_ref, 0, sizeof(tsc_ref)); | ||
1866 | kvm->arch.hv_tsc_page = data; | ||
1867 | if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE)) | ||
1868 | break; | ||
1869 | gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; | ||
1870 | if (kvm_write_guest(kvm, data, | ||
1871 | &tsc_ref, sizeof(tsc_ref))) | ||
1872 | return 1; | ||
1873 | mark_page_dirty(kvm, gfn); | ||
1868 | break; | 1874 | break; |
1869 | } | 1875 | } |
1870 | default: | 1876 | default: |
@@ -1879,19 +1885,21 @@ static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data) | |||
1879 | { | 1885 | { |
1880 | switch (msr) { | 1886 | switch (msr) { |
1881 | case HV_X64_MSR_APIC_ASSIST_PAGE: { | 1887 | case HV_X64_MSR_APIC_ASSIST_PAGE: { |
1888 | u64 gfn; | ||
1882 | unsigned long addr; | 1889 | unsigned long addr; |
1883 | 1890 | ||
1884 | if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) { | 1891 | if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) { |
1885 | vcpu->arch.hv_vapic = data; | 1892 | vcpu->arch.hv_vapic = data; |
1886 | break; | 1893 | break; |
1887 | } | 1894 | } |
1888 | addr = gfn_to_hva(vcpu->kvm, data >> | 1895 | gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT; |
1889 | HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT); | 1896 | addr = gfn_to_hva(vcpu->kvm, gfn); |
1890 | if (kvm_is_error_hva(addr)) | 1897 | if (kvm_is_error_hva(addr)) |
1891 | return 1; | 1898 | return 1; |
1892 | if (__clear_user((void __user *)addr, PAGE_SIZE)) | 1899 | if (__clear_user((void __user *)addr, PAGE_SIZE)) |
1893 | return 1; | 1900 | return 1; |
1894 | vcpu->arch.hv_vapic = data; | 1901 | vcpu->arch.hv_vapic = data; |
1902 | mark_page_dirty(vcpu->kvm, gfn); | ||
1895 | break; | 1903 | break; |
1896 | } | 1904 | } |
1897 | case HV_X64_MSR_EOI: | 1905 | case HV_X64_MSR_EOI: |
@@ -2017,8 +2025,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
2017 | case 0x200 ... 0x2ff: | 2025 | case 0x200 ... 0x2ff: |
2018 | return set_msr_mtrr(vcpu, msr, data); | 2026 | return set_msr_mtrr(vcpu, msr, data); |
2019 | case MSR_IA32_APICBASE: | 2027 | case MSR_IA32_APICBASE: |
2020 | kvm_set_apic_base(vcpu, data); | 2028 | return kvm_set_apic_base(vcpu, msr_info); |
2021 | break; | ||
2022 | case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: | 2029 | case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: |
2023 | return kvm_x2apic_msr_write(vcpu, msr, data); | 2030 | return kvm_x2apic_msr_write(vcpu, msr, data); |
2024 | case MSR_IA32_TSCDEADLINE: | 2031 | case MSR_IA32_TSCDEADLINE: |
@@ -2291,6 +2298,14 @@ static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | |||
2291 | case HV_X64_MSR_HYPERCALL: | 2298 | case HV_X64_MSR_HYPERCALL: |
2292 | data = kvm->arch.hv_hypercall; | 2299 | data = kvm->arch.hv_hypercall; |
2293 | break; | 2300 | break; |
2301 | case HV_X64_MSR_TIME_REF_COUNT: { | ||
2302 | data = | ||
2303 | div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100); | ||
2304 | break; | ||
2305 | } | ||
2306 | case HV_X64_MSR_REFERENCE_TSC: | ||
2307 | data = kvm->arch.hv_tsc_page; | ||
2308 | break; | ||
2294 | default: | 2309 | default: |
2295 | vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); | 2310 | vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); |
2296 | return 1; | 2311 | return 1; |
@@ -2601,6 +2616,7 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
2601 | case KVM_CAP_GET_TSC_KHZ: | 2616 | case KVM_CAP_GET_TSC_KHZ: |
2602 | case KVM_CAP_KVMCLOCK_CTRL: | 2617 | case KVM_CAP_KVMCLOCK_CTRL: |
2603 | case KVM_CAP_READONLY_MEM: | 2618 | case KVM_CAP_READONLY_MEM: |
2619 | case KVM_CAP_HYPERV_TIME: | ||
2604 | #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT | 2620 | #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT |
2605 | case KVM_CAP_ASSIGN_DEV_IRQ: | 2621 | case KVM_CAP_ASSIGN_DEV_IRQ: |
2606 | case KVM_CAP_PCI_2_3: | 2622 | case KVM_CAP_PCI_2_3: |
@@ -2972,8 +2988,11 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, | |||
2972 | static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, | 2988 | static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, |
2973 | struct kvm_debugregs *dbgregs) | 2989 | struct kvm_debugregs *dbgregs) |
2974 | { | 2990 | { |
2991 | unsigned long val; | ||
2992 | |||
2975 | memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); | 2993 | memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); |
2976 | dbgregs->dr6 = vcpu->arch.dr6; | 2994 | _kvm_get_dr(vcpu, 6, &val); |
2995 | dbgregs->dr6 = val; | ||
2977 | dbgregs->dr7 = vcpu->arch.dr7; | 2996 | dbgregs->dr7 = vcpu->arch.dr7; |
2978 | dbgregs->flags = 0; | 2997 | dbgregs->flags = 0; |
2979 | memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); | 2998 | memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); |
@@ -2987,7 +3006,9 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, | |||
2987 | 3006 | ||
2988 | memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); | 3007 | memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); |
2989 | vcpu->arch.dr6 = dbgregs->dr6; | 3008 | vcpu->arch.dr6 = dbgregs->dr6; |
3009 | kvm_update_dr6(vcpu); | ||
2990 | vcpu->arch.dr7 = dbgregs->dr7; | 3010 | vcpu->arch.dr7 = dbgregs->dr7; |
3011 | kvm_update_dr7(vcpu); | ||
2991 | 3012 | ||
2992 | return 0; | 3013 | return 0; |
2993 | } | 3014 | } |
@@ -5834,6 +5855,11 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) | |||
5834 | kvm_apic_update_tmr(vcpu, tmr); | 5855 | kvm_apic_update_tmr(vcpu, tmr); |
5835 | } | 5856 | } |
5836 | 5857 | ||
5858 | /* | ||
5859 | * Returns 1 to let __vcpu_run() continue the guest execution loop without | ||
5860 | * exiting to the userspace. Otherwise, the value will be returned to the | ||
5861 | * userspace. | ||
5862 | */ | ||
5837 | static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | 5863 | static int vcpu_enter_guest(struct kvm_vcpu *vcpu) |
5838 | { | 5864 | { |
5839 | int r; | 5865 | int r; |
@@ -6089,7 +6115,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) | |||
6089 | } | 6115 | } |
6090 | if (need_resched()) { | 6116 | if (need_resched()) { |
6091 | srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); | 6117 | srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); |
6092 | kvm_resched(vcpu); | 6118 | cond_resched(); |
6093 | vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); | 6119 | vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); |
6094 | } | 6120 | } |
6095 | } | 6121 | } |
@@ -6401,6 +6427,7 @@ EXPORT_SYMBOL_GPL(kvm_task_switch); | |||
6401 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | 6427 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
6402 | struct kvm_sregs *sregs) | 6428 | struct kvm_sregs *sregs) |
6403 | { | 6429 | { |
6430 | struct msr_data apic_base_msr; | ||
6404 | int mmu_reset_needed = 0; | 6431 | int mmu_reset_needed = 0; |
6405 | int pending_vec, max_bits, idx; | 6432 | int pending_vec, max_bits, idx; |
6406 | struct desc_ptr dt; | 6433 | struct desc_ptr dt; |
@@ -6424,7 +6451,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
6424 | 6451 | ||
6425 | mmu_reset_needed |= vcpu->arch.efer != sregs->efer; | 6452 | mmu_reset_needed |= vcpu->arch.efer != sregs->efer; |
6426 | kvm_x86_ops->set_efer(vcpu, sregs->efer); | 6453 | kvm_x86_ops->set_efer(vcpu, sregs->efer); |
6427 | kvm_set_apic_base(vcpu, sregs->apic_base); | 6454 | apic_base_msr.data = sregs->apic_base; |
6455 | apic_base_msr.host_initiated = true; | ||
6456 | kvm_set_apic_base(vcpu, &apic_base_msr); | ||
6428 | 6457 | ||
6429 | mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; | 6458 | mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; |
6430 | kvm_x86_ops->set_cr0(vcpu, sregs->cr0); | 6459 | kvm_x86_ops->set_cr0(vcpu, sregs->cr0); |
@@ -6717,6 +6746,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu) | |||
6717 | 6746 | ||
6718 | memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); | 6747 | memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); |
6719 | vcpu->arch.dr6 = DR6_FIXED_1; | 6748 | vcpu->arch.dr6 = DR6_FIXED_1; |
6749 | kvm_update_dr6(vcpu); | ||
6720 | vcpu->arch.dr7 = DR7_FIXED_1; | 6750 | vcpu->arch.dr7 = DR7_FIXED_1; |
6721 | kvm_update_dr7(vcpu); | 6751 | kvm_update_dr7(vcpu); |
6722 | 6752 | ||