aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorGlauber Costa <glommer@redhat.com>2011-07-11 15:28:14 -0400
committerAvi Kivity <avi@redhat.com>2011-07-14 05:59:14 -0400
commitc9aaa8957f203bd6df83b002fb40b98390bed078 (patch)
tree96930ff2cbf73f6b0a3f4973e3933d036af13544 /arch
parent9ddabbe72e41ca6794cb4947c70929c9410e6752 (diff)
KVM: Steal time implementation
To implement steal time, we need the hypervisor to pass the guest information about how much time was spent running other processes outside the VM, while the vcpu had meaningful work to do - halt time does not count. This information is acquired through the run_delay field of delayacct/schedstats infrastructure, that counts time spent in a runqueue but not running. Steal time is a per-cpu information, so the traditional MSR-based infrastructure is used. A new msr, KVM_MSR_STEAL_TIME, holds the memory area address containing information about steal time This patch contains the hypervisor part of the steal time infrasructure, and can be backported independently of the guest portion. [avi, yongjie: export delayacct_on, to avoid build failures in some configs] Signed-off-by: Glauber Costa <glommer@redhat.com> Tested-by: Eric B Munson <emunson@mgebm.net> CC: Rik van Riel <riel@redhat.com> CC: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> CC: Peter Zijlstra <peterz@infradead.org> CC: Anthony Liguori <aliguori@us.ibm.com> Signed-off-by: Yongjie Ren <yongjie.ren@intel.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/kvm_host.h9
-rw-r--r--arch/x86/include/asm/kvm_para.h4
-rw-r--r--arch/x86/kvm/Kconfig1
-rw-r--r--arch/x86/kvm/x86.c74
4 files changed, 86 insertions, 2 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index da6bbee878c..59086a77ff1 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -389,6 +389,15 @@ struct kvm_vcpu_arch {
389 unsigned int hw_tsc_khz; 389 unsigned int hw_tsc_khz;
390 unsigned int time_offset; 390 unsigned int time_offset;
391 struct page *time_page; 391 struct page *time_page;
392
393 struct {
394 u64 msr_val;
395 u64 last_steal;
396 u64 accum_steal;
397 struct gfn_to_hva_cache stime;
398 struct kvm_steal_time steal;
399 } st;
400
392 u64 last_guest_tsc; 401 u64 last_guest_tsc;
393 u64 last_kernel_ns; 402 u64 last_kernel_ns;
394 u64 last_tsc_nsec; 403 u64 last_tsc_nsec;
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 65f8bb9279e..c484ba8e05e 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -45,6 +45,10 @@ struct kvm_steal_time {
45 __u32 pad[12]; 45 __u32 pad[12];
46}; 46};
47 47
48#define KVM_STEAL_ALIGNMENT_BITS 5
49#define KVM_STEAL_VALID_BITS ((-1ULL << (KVM_STEAL_ALIGNMENT_BITS + 1)))
50#define KVM_STEAL_RESERVED_MASK (((1 << KVM_STEAL_ALIGNMENT_BITS) - 1 ) << 1)
51
48#define KVM_MAX_MMU_OP_BATCH 32 52#define KVM_MAX_MMU_OP_BATCH 32
49 53
50#define KVM_ASYNC_PF_ENABLED (1 << 0) 54#define KVM_ASYNC_PF_ENABLED (1 << 0)
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 50f63648ce1..99c3f0589fa 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -31,6 +31,7 @@ config KVM
31 select KVM_ASYNC_PF 31 select KVM_ASYNC_PF
32 select USER_RETURN_NOTIFIER 32 select USER_RETURN_NOTIFIER
33 select KVM_MMIO 33 select KVM_MMIO
34 select TASK_DELAY_ACCT
34 ---help--- 35 ---help---
35 Support hosting fully virtualized guest machines using hardware 36 Support hosting fully virtualized guest machines using hardware
36 virtualization extensions. You will need a fairly recent 37 virtualization extensions. You will need a fairly recent
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0b803f04bde..c96cdc09248 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -808,12 +808,12 @@ EXPORT_SYMBOL_GPL(kvm_get_dr);
808 * kvm-specific. Those are put in the beginning of the list. 808 * kvm-specific. Those are put in the beginning of the list.
809 */ 809 */
810 810
811#define KVM_SAVE_MSRS_BEGIN 8 811#define KVM_SAVE_MSRS_BEGIN 9
812static u32 msrs_to_save[] = { 812static u32 msrs_to_save[] = {
813 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, 813 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
814 MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, 814 MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
815 HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, 815 HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
816 HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, 816 HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
817 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, 817 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
818 MSR_STAR, 818 MSR_STAR,
819#ifdef CONFIG_X86_64 819#ifdef CONFIG_X86_64
@@ -1488,6 +1488,35 @@ static void kvmclock_reset(struct kvm_vcpu *vcpu)
1488 } 1488 }
1489} 1489}
1490 1490
1491static void accumulate_steal_time(struct kvm_vcpu *vcpu)
1492{
1493 u64 delta;
1494
1495 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
1496 return;
1497
1498 delta = current->sched_info.run_delay - vcpu->arch.st.last_steal;
1499 vcpu->arch.st.last_steal = current->sched_info.run_delay;
1500 vcpu->arch.st.accum_steal = delta;
1501}
1502
1503static void record_steal_time(struct kvm_vcpu *vcpu)
1504{
1505 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
1506 return;
1507
1508 if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
1509 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
1510 return;
1511
1512 vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal;
1513 vcpu->arch.st.steal.version += 2;
1514 vcpu->arch.st.accum_steal = 0;
1515
1516 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
1517 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
1518}
1519
1491int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) 1520int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1492{ 1521{
1493 switch (msr) { 1522 switch (msr) {
@@ -1570,6 +1599,33 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1570 if (kvm_pv_enable_async_pf(vcpu, data)) 1599 if (kvm_pv_enable_async_pf(vcpu, data))
1571 return 1; 1600 return 1;
1572 break; 1601 break;
1602 case MSR_KVM_STEAL_TIME:
1603
1604 if (unlikely(!sched_info_on()))
1605 return 1;
1606
1607 if (data & KVM_STEAL_RESERVED_MASK)
1608 return 1;
1609
1610 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
1611 data & KVM_STEAL_VALID_BITS))
1612 return 1;
1613
1614 vcpu->arch.st.msr_val = data;
1615
1616 if (!(data & KVM_MSR_ENABLED))
1617 break;
1618
1619 vcpu->arch.st.last_steal = current->sched_info.run_delay;
1620
1621 preempt_disable();
1622 accumulate_steal_time(vcpu);
1623 preempt_enable();
1624
1625 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1626
1627 break;
1628
1573 case MSR_IA32_MCG_CTL: 1629 case MSR_IA32_MCG_CTL:
1574 case MSR_IA32_MCG_STATUS: 1630 case MSR_IA32_MCG_STATUS:
1575 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1: 1631 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
@@ -1855,6 +1911,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1855 case MSR_KVM_ASYNC_PF_EN: 1911 case MSR_KVM_ASYNC_PF_EN:
1856 data = vcpu->arch.apf.msr_val; 1912 data = vcpu->arch.apf.msr_val;
1857 break; 1913 break;
1914 case MSR_KVM_STEAL_TIME:
1915 data = vcpu->arch.st.msr_val;
1916 break;
1858 case MSR_IA32_P5_MC_ADDR: 1917 case MSR_IA32_P5_MC_ADDR:
1859 case MSR_IA32_P5_MC_TYPE: 1918 case MSR_IA32_P5_MC_TYPE:
1860 case MSR_IA32_MCG_CAP: 1919 case MSR_IA32_MCG_CAP:
@@ -2166,6 +2225,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2166 kvm_migrate_timers(vcpu); 2225 kvm_migrate_timers(vcpu);
2167 vcpu->cpu = cpu; 2226 vcpu->cpu = cpu;
2168 } 2227 }
2228
2229 accumulate_steal_time(vcpu);
2230 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
2169} 2231}
2170 2232
2171void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 2233void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -2487,6 +2549,10 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
2487 (1 << KVM_FEATURE_CLOCKSOURCE2) | 2549 (1 << KVM_FEATURE_CLOCKSOURCE2) |
2488 (1 << KVM_FEATURE_ASYNC_PF) | 2550 (1 << KVM_FEATURE_ASYNC_PF) |
2489 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT); 2551 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
2552
2553 if (sched_info_on())
2554 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
2555
2490 entry->ebx = 0; 2556 entry->ebx = 0;
2491 entry->ecx = 0; 2557 entry->ecx = 0;
2492 entry->edx = 0; 2558 entry->edx = 0;
@@ -5470,6 +5536,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
5470 r = 1; 5536 r = 1;
5471 goto out; 5537 goto out;
5472 } 5538 }
5539 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
5540 record_steal_time(vcpu);
5541
5473 } 5542 }
5474 5543
5475 r = kvm_mmu_reload(vcpu); 5544 r = kvm_mmu_reload(vcpu);
@@ -6206,6 +6275,7 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
6206 6275
6207 kvm_make_request(KVM_REQ_EVENT, vcpu); 6276 kvm_make_request(KVM_REQ_EVENT, vcpu);
6208 vcpu->arch.apf.msr_val = 0; 6277 vcpu->arch.apf.msr_val = 0;
6278 vcpu->arch.st.msr_val = 0;
6209 6279
6210 kvmclock_reset(vcpu); 6280 kvmclock_reset(vcpu);
6211 6281