aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorGlauber Costa <glommer@redhat.com>2011-07-11 15:28:19 -0400
committerAvi Kivity <avi@redhat.com>2011-07-24 04:49:36 -0400
commitd910f5c1064d7ff09c31b0191564f9f99e210f91 (patch)
treefcc977e4b0b6f1fc9dbdcc485753a7422147a40e /arch/x86
parent095c0aa83e52d6c3dd7168610746703921f570af (diff)
KVM guest: KVM Steal time registration
This patch implements the kvm bits of the steal time infrastructure. The most important part of it, is the steal time clock. It is an continuous clock that shows the accumulated amount of steal time since vcpu creation. It is supposed to survive cpu offlining/onlining. [marcelo: fix build with CONFIG_KVM_GUEST=n] Signed-off-by: Glauber Costa <glommer@redhat.com> Acked-by: Rik van Riel <riel@redhat.com> Tested-by: Eric B Munson <emunson@mgebm.net> CC: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> CC: Peter Zijlstra <peterz@infradead.org> CC: Avi Kivity <avi@redhat.com> CC: Anthony Liguori <aliguori@us.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm_para.h6
-rw-r--r--arch/x86/kernel/kvm.c72
-rw-r--r--arch/x86/kernel/kvmclock.c2
3 files changed, 80 insertions, 0 deletions
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index c484ba8e05ea..734c3767cfac 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -192,6 +192,7 @@ void __init kvm_guest_init(void);
192void kvm_async_pf_task_wait(u32 token); 192void kvm_async_pf_task_wait(u32 token);
193void kvm_async_pf_task_wake(u32 token); 193void kvm_async_pf_task_wake(u32 token);
194u32 kvm_read_and_reset_pf_reason(void); 194u32 kvm_read_and_reset_pf_reason(void);
195extern void kvm_disable_steal_time(void);
195#else 196#else
196#define kvm_guest_init() do { } while (0) 197#define kvm_guest_init() do { } while (0)
197#define kvm_async_pf_task_wait(T) do {} while(0) 198#define kvm_async_pf_task_wait(T) do {} while(0)
@@ -200,6 +201,11 @@ static inline u32 kvm_read_and_reset_pf_reason(void)
200{ 201{
201 return 0; 202 return 0;
202} 203}
204
205static inline void kvm_disable_steal_time(void)
206{
207 return;
208}
203#endif 209#endif
204 210
205#endif /* __KERNEL__ */ 211#endif /* __KERNEL__ */
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 33c07b0b122e..a9c2116001d6 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -51,6 +51,15 @@ static int parse_no_kvmapf(char *arg)
51 51
52early_param("no-kvmapf", parse_no_kvmapf); 52early_param("no-kvmapf", parse_no_kvmapf);
53 53
54static int steal_acc = 1;
55static int parse_no_stealacc(char *arg)
56{
57 steal_acc = 0;
58 return 0;
59}
60
61early_param("no-steal-acc", parse_no_stealacc);
62
54struct kvm_para_state { 63struct kvm_para_state {
55 u8 mmu_queue[MMU_QUEUE_SIZE]; 64 u8 mmu_queue[MMU_QUEUE_SIZE];
56 int mmu_queue_len; 65 int mmu_queue_len;
@@ -58,6 +67,8 @@ struct kvm_para_state {
58 67
59static DEFINE_PER_CPU(struct kvm_para_state, para_state); 68static DEFINE_PER_CPU(struct kvm_para_state, para_state);
60static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64); 69static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
70static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
71static int has_steal_clock = 0;
61 72
62static struct kvm_para_state *kvm_para_state(void) 73static struct kvm_para_state *kvm_para_state(void)
63{ 74{
@@ -441,6 +452,21 @@ static void __init paravirt_ops_setup(void)
441#endif 452#endif
442} 453}
443 454
455static void kvm_register_steal_time(void)
456{
457 int cpu = smp_processor_id();
458 struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
459
460 if (!has_steal_clock)
461 return;
462
463 memset(st, 0, sizeof(*st));
464
465 wrmsrl(MSR_KVM_STEAL_TIME, (__pa(st) | KVM_MSR_ENABLED));
466 printk(KERN_INFO "kvm-stealtime: cpu %d, msr %lx\n",
467 cpu, __pa(st));
468}
469
444void __cpuinit kvm_guest_cpu_init(void) 470void __cpuinit kvm_guest_cpu_init(void)
445{ 471{
446 if (!kvm_para_available()) 472 if (!kvm_para_available())
@@ -457,6 +483,9 @@ void __cpuinit kvm_guest_cpu_init(void)
457 printk(KERN_INFO"KVM setup async PF for cpu %d\n", 483 printk(KERN_INFO"KVM setup async PF for cpu %d\n",
458 smp_processor_id()); 484 smp_processor_id());
459 } 485 }
486
487 if (has_steal_clock)
488 kvm_register_steal_time();
460} 489}
461 490
462static void kvm_pv_disable_apf(void *unused) 491static void kvm_pv_disable_apf(void *unused)
@@ -483,6 +512,31 @@ static struct notifier_block kvm_pv_reboot_nb = {
483 .notifier_call = kvm_pv_reboot_notify, 512 .notifier_call = kvm_pv_reboot_notify,
484}; 513};
485 514
515static u64 kvm_steal_clock(int cpu)
516{
517 u64 steal;
518 struct kvm_steal_time *src;
519 int version;
520
521 src = &per_cpu(steal_time, cpu);
522 do {
523 version = src->version;
524 rmb();
525 steal = src->steal;
526 rmb();
527 } while ((version & 1) || (version != src->version));
528
529 return steal;
530}
531
532void kvm_disable_steal_time(void)
533{
534 if (!has_steal_clock)
535 return;
536
537 wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
538}
539
486#ifdef CONFIG_SMP 540#ifdef CONFIG_SMP
487static void __init kvm_smp_prepare_boot_cpu(void) 541static void __init kvm_smp_prepare_boot_cpu(void)
488{ 542{
@@ -500,6 +554,7 @@ static void __cpuinit kvm_guest_cpu_online(void *dummy)
500 554
501static void kvm_guest_cpu_offline(void *dummy) 555static void kvm_guest_cpu_offline(void *dummy)
502{ 556{
557 kvm_disable_steal_time();
503 kvm_pv_disable_apf(NULL); 558 kvm_pv_disable_apf(NULL);
504 apf_task_wake_all(); 559 apf_task_wake_all();
505} 560}
@@ -548,6 +603,11 @@ void __init kvm_guest_init(void)
548 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF)) 603 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
549 x86_init.irqs.trap_init = kvm_apf_trap_init; 604 x86_init.irqs.trap_init = kvm_apf_trap_init;
550 605
606 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
607 has_steal_clock = 1;
608 pv_time_ops.steal_clock = kvm_steal_clock;
609 }
610
551#ifdef CONFIG_SMP 611#ifdef CONFIG_SMP
552 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; 612 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
553 register_cpu_notifier(&kvm_cpu_notifier); 613 register_cpu_notifier(&kvm_cpu_notifier);
@@ -555,3 +615,15 @@ void __init kvm_guest_init(void)
555 kvm_guest_cpu_init(); 615 kvm_guest_cpu_init();
556#endif 616#endif
557} 617}
618
619static __init int activate_jump_labels(void)
620{
621 if (has_steal_clock) {
622 jump_label_inc(&paravirt_steal_enabled);
623 if (steal_acc)
624 jump_label_inc(&paravirt_steal_rq_enabled);
625 }
626
627 return 0;
628}
629arch_initcall(activate_jump_labels);
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 6389a6bca11b..c1a0188e29ae 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -160,6 +160,7 @@ static void __cpuinit kvm_setup_secondary_clock(void)
160static void kvm_crash_shutdown(struct pt_regs *regs) 160static void kvm_crash_shutdown(struct pt_regs *regs)
161{ 161{
162 native_write_msr(msr_kvm_system_time, 0, 0); 162 native_write_msr(msr_kvm_system_time, 0, 0);
163 kvm_disable_steal_time();
163 native_machine_crash_shutdown(regs); 164 native_machine_crash_shutdown(regs);
164} 165}
165#endif 166#endif
@@ -167,6 +168,7 @@ static void kvm_crash_shutdown(struct pt_regs *regs)
167static void kvm_shutdown(void) 168static void kvm_shutdown(void)
168{ 169{
169 native_write_msr(msr_kvm_system_time, 0, 0); 170 native_write_msr(msr_kvm_system_time, 0, 0);
171 kvm_disable_steal_time();
170 native_machine_shutdown(); 172 native_machine_shutdown();
171} 173}
172 174