diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-24 12:07:03 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-24 12:07:03 -0400 |
commit | 5fabc487c96819dd12ddb9414835d170fd9cd6d5 (patch) | |
tree | 01532d492e5074b0d3add29bf92ebf9a9d161e9e /arch/x86/kernel | |
parent | c61264f98c1a974ee6f545f61a4ab33b141d6bda (diff) | |
parent | 3f68b0318bbbd61bf08478ab99a149f0d9e5156e (diff) |
Merge branch 'kvm-updates/3.1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
* 'kvm-updates/3.1' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (143 commits)
KVM: IOMMU: Disable device assignment without interrupt remapping
KVM: MMU: trace mmio page fault
KVM: MMU: mmio page fault support
KVM: MMU: reorganize struct kvm_shadow_walk_iterator
KVM: MMU: lockless walking shadow page table
KVM: MMU: do not need atomicly to set/clear spte
KVM: MMU: introduce the rules to modify shadow page table
KVM: MMU: abstract some functions to handle fault pfn
KVM: MMU: filter out the mmio pfn from the fault pfn
KVM: MMU: remove bypass_guest_pf
KVM: MMU: split kvm_mmu_free_page
KVM: MMU: count used shadow pages on prepareing path
KVM: MMU: rename 'pt_write' to 'emulate'
KVM: MMU: cleanup for FNAME(fetch)
KVM: MMU: optimize to handle dirty bit
KVM: MMU: cache mmio info on page fault path
KVM: x86: introduce vcpu_mmio_gva_to_gpa to cleanup the code
KVM: MMU: do not update slot bitmap if spte is nonpresent
KVM: MMU: fix walking shadow page table
KVM guest: KVM Steal time registration
...
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/kvm.c | 72 | ||||
-rw-r--r-- | arch/x86/kernel/kvmclock.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt.c | 9 |
3 files changed, 83 insertions, 0 deletions
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 33c07b0b122e..a9c2116001d6 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
@@ -51,6 +51,15 @@ static int parse_no_kvmapf(char *arg) | |||
51 | 51 | ||
52 | early_param("no-kvmapf", parse_no_kvmapf); | 52 | early_param("no-kvmapf", parse_no_kvmapf); |
53 | 53 | ||
54 | static int steal_acc = 1; | ||
55 | static int parse_no_stealacc(char *arg) | ||
56 | { | ||
57 | steal_acc = 0; | ||
58 | return 0; | ||
59 | } | ||
60 | |||
61 | early_param("no-steal-acc", parse_no_stealacc); | ||
62 | |||
54 | struct kvm_para_state { | 63 | struct kvm_para_state { |
55 | u8 mmu_queue[MMU_QUEUE_SIZE]; | 64 | u8 mmu_queue[MMU_QUEUE_SIZE]; |
56 | int mmu_queue_len; | 65 | int mmu_queue_len; |
@@ -58,6 +67,8 @@ struct kvm_para_state { | |||
58 | 67 | ||
59 | static DEFINE_PER_CPU(struct kvm_para_state, para_state); | 68 | static DEFINE_PER_CPU(struct kvm_para_state, para_state); |
60 | static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64); | 69 | static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64); |
70 | static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64); | ||
71 | static int has_steal_clock = 0; | ||
61 | 72 | ||
62 | static struct kvm_para_state *kvm_para_state(void) | 73 | static struct kvm_para_state *kvm_para_state(void) |
63 | { | 74 | { |
@@ -441,6 +452,21 @@ static void __init paravirt_ops_setup(void) | |||
441 | #endif | 452 | #endif |
442 | } | 453 | } |
443 | 454 | ||
455 | static void kvm_register_steal_time(void) | ||
456 | { | ||
457 | int cpu = smp_processor_id(); | ||
458 | struct kvm_steal_time *st = &per_cpu(steal_time, cpu); | ||
459 | |||
460 | if (!has_steal_clock) | ||
461 | return; | ||
462 | |||
463 | memset(st, 0, sizeof(*st)); | ||
464 | |||
465 | wrmsrl(MSR_KVM_STEAL_TIME, (__pa(st) | KVM_MSR_ENABLED)); | ||
466 | printk(KERN_INFO "kvm-stealtime: cpu %d, msr %lx\n", | ||
467 | cpu, __pa(st)); | ||
468 | } | ||
469 | |||
444 | void __cpuinit kvm_guest_cpu_init(void) | 470 | void __cpuinit kvm_guest_cpu_init(void) |
445 | { | 471 | { |
446 | if (!kvm_para_available()) | 472 | if (!kvm_para_available()) |
@@ -457,6 +483,9 @@ void __cpuinit kvm_guest_cpu_init(void) | |||
457 | printk(KERN_INFO"KVM setup async PF for cpu %d\n", | 483 | printk(KERN_INFO"KVM setup async PF for cpu %d\n", |
458 | smp_processor_id()); | 484 | smp_processor_id()); |
459 | } | 485 | } |
486 | |||
487 | if (has_steal_clock) | ||
488 | kvm_register_steal_time(); | ||
460 | } | 489 | } |
461 | 490 | ||
462 | static void kvm_pv_disable_apf(void *unused) | 491 | static void kvm_pv_disable_apf(void *unused) |
@@ -483,6 +512,31 @@ static struct notifier_block kvm_pv_reboot_nb = { | |||
483 | .notifier_call = kvm_pv_reboot_notify, | 512 | .notifier_call = kvm_pv_reboot_notify, |
484 | }; | 513 | }; |
485 | 514 | ||
515 | static u64 kvm_steal_clock(int cpu) | ||
516 | { | ||
517 | u64 steal; | ||
518 | struct kvm_steal_time *src; | ||
519 | int version; | ||
520 | |||
521 | src = &per_cpu(steal_time, cpu); | ||
522 | do { | ||
523 | version = src->version; | ||
524 | rmb(); | ||
525 | steal = src->steal; | ||
526 | rmb(); | ||
527 | } while ((version & 1) || (version != src->version)); | ||
528 | |||
529 | return steal; | ||
530 | } | ||
531 | |||
532 | void kvm_disable_steal_time(void) | ||
533 | { | ||
534 | if (!has_steal_clock) | ||
535 | return; | ||
536 | |||
537 | wrmsr(MSR_KVM_STEAL_TIME, 0, 0); | ||
538 | } | ||
539 | |||
486 | #ifdef CONFIG_SMP | 540 | #ifdef CONFIG_SMP |
487 | static void __init kvm_smp_prepare_boot_cpu(void) | 541 | static void __init kvm_smp_prepare_boot_cpu(void) |
488 | { | 542 | { |
@@ -500,6 +554,7 @@ static void __cpuinit kvm_guest_cpu_online(void *dummy) | |||
500 | 554 | ||
501 | static void kvm_guest_cpu_offline(void *dummy) | 555 | static void kvm_guest_cpu_offline(void *dummy) |
502 | { | 556 | { |
557 | kvm_disable_steal_time(); | ||
503 | kvm_pv_disable_apf(NULL); | 558 | kvm_pv_disable_apf(NULL); |
504 | apf_task_wake_all(); | 559 | apf_task_wake_all(); |
505 | } | 560 | } |
@@ -548,6 +603,11 @@ void __init kvm_guest_init(void) | |||
548 | if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF)) | 603 | if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF)) |
549 | x86_init.irqs.trap_init = kvm_apf_trap_init; | 604 | x86_init.irqs.trap_init = kvm_apf_trap_init; |
550 | 605 | ||
606 | if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { | ||
607 | has_steal_clock = 1; | ||
608 | pv_time_ops.steal_clock = kvm_steal_clock; | ||
609 | } | ||
610 | |||
551 | #ifdef CONFIG_SMP | 611 | #ifdef CONFIG_SMP |
552 | smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; | 612 | smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; |
553 | register_cpu_notifier(&kvm_cpu_notifier); | 613 | register_cpu_notifier(&kvm_cpu_notifier); |
@@ -555,3 +615,15 @@ void __init kvm_guest_init(void) | |||
555 | kvm_guest_cpu_init(); | 615 | kvm_guest_cpu_init(); |
556 | #endif | 616 | #endif |
557 | } | 617 | } |
618 | |||
619 | static __init int activate_jump_labels(void) | ||
620 | { | ||
621 | if (has_steal_clock) { | ||
622 | jump_label_inc(¶virt_steal_enabled); | ||
623 | if (steal_acc) | ||
624 | jump_label_inc(¶virt_steal_rq_enabled); | ||
625 | } | ||
626 | |||
627 | return 0; | ||
628 | } | ||
629 | arch_initcall(activate_jump_labels); | ||
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 6389a6bca11b..c1a0188e29ae 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c | |||
@@ -160,6 +160,7 @@ static void __cpuinit kvm_setup_secondary_clock(void) | |||
160 | static void kvm_crash_shutdown(struct pt_regs *regs) | 160 | static void kvm_crash_shutdown(struct pt_regs *regs) |
161 | { | 161 | { |
162 | native_write_msr(msr_kvm_system_time, 0, 0); | 162 | native_write_msr(msr_kvm_system_time, 0, 0); |
163 | kvm_disable_steal_time(); | ||
163 | native_machine_crash_shutdown(regs); | 164 | native_machine_crash_shutdown(regs); |
164 | } | 165 | } |
165 | #endif | 166 | #endif |
@@ -167,6 +168,7 @@ static void kvm_crash_shutdown(struct pt_regs *regs) | |||
167 | static void kvm_shutdown(void) | 168 | static void kvm_shutdown(void) |
168 | { | 169 | { |
169 | native_write_msr(msr_kvm_system_time, 0, 0); | 170 | native_write_msr(msr_kvm_system_time, 0, 0); |
171 | kvm_disable_steal_time(); | ||
170 | native_machine_shutdown(); | 172 | native_machine_shutdown(); |
171 | } | 173 | } |
172 | 174 | ||
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 869e1aeeb71b..613a7931ecc1 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c | |||
@@ -202,6 +202,14 @@ static void native_flush_tlb_single(unsigned long addr) | |||
202 | __native_flush_tlb_single(addr); | 202 | __native_flush_tlb_single(addr); |
203 | } | 203 | } |
204 | 204 | ||
205 | struct jump_label_key paravirt_steal_enabled; | ||
206 | struct jump_label_key paravirt_steal_rq_enabled; | ||
207 | |||
208 | static u64 native_steal_clock(int cpu) | ||
209 | { | ||
210 | return 0; | ||
211 | } | ||
212 | |||
205 | /* These are in entry.S */ | 213 | /* These are in entry.S */ |
206 | extern void native_iret(void); | 214 | extern void native_iret(void); |
207 | extern void native_irq_enable_sysexit(void); | 215 | extern void native_irq_enable_sysexit(void); |
@@ -307,6 +315,7 @@ struct pv_init_ops pv_init_ops = { | |||
307 | 315 | ||
308 | struct pv_time_ops pv_time_ops = { | 316 | struct pv_time_ops pv_time_ops = { |
309 | .sched_clock = native_sched_clock, | 317 | .sched_clock = native_sched_clock, |
318 | .steal_clock = native_steal_clock, | ||
310 | }; | 319 | }; |
311 | 320 | ||
312 | struct pv_irq_ops pv_irq_ops = { | 321 | struct pv_irq_ops pv_irq_ops = { |