aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-03-19 21:24:12 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-03-19 21:24:12 -0400
commitea4a0ce11160200410abbabd44ec9e75e93a95be (patch)
tree20ceced4ddc4b34d78ce1318d2bc5a58debf08aa /arch
parent10b38669d64c757cfd927e3820292c580ed70aae (diff)
parenta2c118bfab8bc6b8bb213abfc35201e441693d55 (diff)
Merge git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm fixes from Marcelo Tosatti. * git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: Fix bounds checking in ioapic indirect register reads (CVE-2013-1798) KVM: x86: Convert MSR_KVM_SYSTEM_TIME to use gfn_to_hva_cache functions (CVE-2013-1797) KVM: x86: fix for buffer overflow in handling of MSR_KVM_SYSTEM_TIME (CVE-2013-1796) KVM: x86: fix deadlock in clock-in-progress request handling KVM: allow host header to be included even for !CONFIG_KVM
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/kvm_host.h4
-rw-r--r--arch/x86/kvm/x86.c64
2 files changed, 33 insertions, 35 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 635a74d22409..4979778cc7fb 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -414,8 +414,8 @@ struct kvm_vcpu_arch {
414 gpa_t time; 414 gpa_t time;
415 struct pvclock_vcpu_time_info hv_clock; 415 struct pvclock_vcpu_time_info hv_clock;
416 unsigned int hw_tsc_khz; 416 unsigned int hw_tsc_khz;
417 unsigned int time_offset; 417 struct gfn_to_hva_cache pv_time;
418 struct page *time_page; 418 bool pv_time_enabled;
419 /* set guest stopped flag in pvclock flags field */ 419 /* set guest stopped flag in pvclock flags field */
420 bool pvclock_set_guest_stopped_request; 420 bool pvclock_set_guest_stopped_request;
421 421
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f71500af1f81..f19ac0aca60d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1406,25 +1406,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
1406 unsigned long flags, this_tsc_khz; 1406 unsigned long flags, this_tsc_khz;
1407 struct kvm_vcpu_arch *vcpu = &v->arch; 1407 struct kvm_vcpu_arch *vcpu = &v->arch;
1408 struct kvm_arch *ka = &v->kvm->arch; 1408 struct kvm_arch *ka = &v->kvm->arch;
1409 void *shared_kaddr;
1410 s64 kernel_ns, max_kernel_ns; 1409 s64 kernel_ns, max_kernel_ns;
1411 u64 tsc_timestamp, host_tsc; 1410 u64 tsc_timestamp, host_tsc;
1412 struct pvclock_vcpu_time_info *guest_hv_clock; 1411 struct pvclock_vcpu_time_info guest_hv_clock;
1413 u8 pvclock_flags; 1412 u8 pvclock_flags;
1414 bool use_master_clock; 1413 bool use_master_clock;
1415 1414
1416 kernel_ns = 0; 1415 kernel_ns = 0;
1417 host_tsc = 0; 1416 host_tsc = 0;
1418 1417
1419 /* Keep irq disabled to prevent changes to the clock */
1420 local_irq_save(flags);
1421 this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
1422 if (unlikely(this_tsc_khz == 0)) {
1423 local_irq_restore(flags);
1424 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
1425 return 1;
1426 }
1427
1428 /* 1418 /*
1429 * If the host uses TSC clock, then passthrough TSC as stable 1419 * If the host uses TSC clock, then passthrough TSC as stable
1430 * to the guest. 1420 * to the guest.
@@ -1436,6 +1426,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
1436 kernel_ns = ka->master_kernel_ns; 1426 kernel_ns = ka->master_kernel_ns;
1437 } 1427 }
1438 spin_unlock(&ka->pvclock_gtod_sync_lock); 1428 spin_unlock(&ka->pvclock_gtod_sync_lock);
1429
1430 /* Keep irq disabled to prevent changes to the clock */
1431 local_irq_save(flags);
1432 this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
1433 if (unlikely(this_tsc_khz == 0)) {
1434 local_irq_restore(flags);
1435 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
1436 return 1;
1437 }
1439 if (!use_master_clock) { 1438 if (!use_master_clock) {
1440 host_tsc = native_read_tsc(); 1439 host_tsc = native_read_tsc();
1441 kernel_ns = get_kernel_ns(); 1440 kernel_ns = get_kernel_ns();
@@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
1463 1462
1464 local_irq_restore(flags); 1463 local_irq_restore(flags);
1465 1464
1466 if (!vcpu->time_page) 1465 if (!vcpu->pv_time_enabled)
1467 return 0; 1466 return 0;
1468 1467
1469 /* 1468 /*
@@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
1525 */ 1524 */
1526 vcpu->hv_clock.version += 2; 1525 vcpu->hv_clock.version += 2;
1527 1526
1528 shared_kaddr = kmap_atomic(vcpu->time_page); 1527 if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
1529 1528 &guest_hv_clock, sizeof(guest_hv_clock))))
1530 guest_hv_clock = shared_kaddr + vcpu->time_offset; 1529 return 0;
1531 1530
1532 /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ 1531 /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
1533 pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED); 1532 pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
1534 1533
1535 if (vcpu->pvclock_set_guest_stopped_request) { 1534 if (vcpu->pvclock_set_guest_stopped_request) {
1536 pvclock_flags |= PVCLOCK_GUEST_STOPPED; 1535 pvclock_flags |= PVCLOCK_GUEST_STOPPED;
@@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
1543 1542
1544 vcpu->hv_clock.flags = pvclock_flags; 1543 vcpu->hv_clock.flags = pvclock_flags;
1545 1544
1546 memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock, 1545 kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
1547 sizeof(vcpu->hv_clock)); 1546 &vcpu->hv_clock,
1548 1547 sizeof(vcpu->hv_clock));
1549 kunmap_atomic(shared_kaddr);
1550
1551 mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
1552 return 0; 1548 return 0;
1553} 1549}
1554 1550
@@ -1837,10 +1833,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
1837 1833
1838static void kvmclock_reset(struct kvm_vcpu *vcpu) 1834static void kvmclock_reset(struct kvm_vcpu *vcpu)
1839{ 1835{
1840 if (vcpu->arch.time_page) { 1836 vcpu->arch.pv_time_enabled = false;
1841 kvm_release_page_dirty(vcpu->arch.time_page);
1842 vcpu->arch.time_page = NULL;
1843 }
1844} 1837}
1845 1838
1846static void accumulate_steal_time(struct kvm_vcpu *vcpu) 1839static void accumulate_steal_time(struct kvm_vcpu *vcpu)
@@ -1947,6 +1940,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1947 break; 1940 break;
1948 case MSR_KVM_SYSTEM_TIME_NEW: 1941 case MSR_KVM_SYSTEM_TIME_NEW:
1949 case MSR_KVM_SYSTEM_TIME: { 1942 case MSR_KVM_SYSTEM_TIME: {
1943 u64 gpa_offset;
1950 kvmclock_reset(vcpu); 1944 kvmclock_reset(vcpu);
1951 1945
1952 vcpu->arch.time = data; 1946 vcpu->arch.time = data;
@@ -1956,14 +1950,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1956 if (!(data & 1)) 1950 if (!(data & 1))
1957 break; 1951 break;
1958 1952
1959 /* ...but clean it before doing the actual write */ 1953 gpa_offset = data & ~(PAGE_MASK | 1);
1960 vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
1961 1954
1962 vcpu->arch.time_page = 1955 /* Check that the address is 32-byte aligned. */
1963 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT); 1956 if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
1957 break;
1964 1958
1965 if (is_error_page(vcpu->arch.time_page)) 1959 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
1966 vcpu->arch.time_page = NULL; 1960 &vcpu->arch.pv_time, data & ~1ULL))
1961 vcpu->arch.pv_time_enabled = false;
1962 else
1963 vcpu->arch.pv_time_enabled = true;
1967 1964
1968 break; 1965 break;
1969 } 1966 }
@@ -2967,7 +2964,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
2967 */ 2964 */
2968static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) 2965static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
2969{ 2966{
2970 if (!vcpu->arch.time_page) 2967 if (!vcpu->arch.pv_time_enabled)
2971 return -EINVAL; 2968 return -EINVAL;
2972 vcpu->arch.pvclock_set_guest_stopped_request = true; 2969 vcpu->arch.pvclock_set_guest_stopped_request = true;
2973 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 2970 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -6718,6 +6715,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
6718 goto fail_free_wbinvd_dirty_mask; 6715 goto fail_free_wbinvd_dirty_mask;
6719 6716
6720 vcpu->arch.ia32_tsc_adjust_msr = 0x0; 6717 vcpu->arch.ia32_tsc_adjust_msr = 0x0;
6718 vcpu->arch.pv_time_enabled = false;
6721 kvm_async_pf_hash_reset(vcpu); 6719 kvm_async_pf_hash_reset(vcpu);
6722 kvm_pmu_init(vcpu); 6720 kvm_pmu_init(vcpu);
6723 6721