diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-28 17:35:31 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-28 17:35:31 -0400 |
commit | 2e7580b0e75d771d93e24e681031a165b1d31071 (patch) | |
tree | d9449702609eeaab28913a43b5a4434667e09d43 /include/linux/kvm_host.h | |
parent | d25413efa9536e2f425ea45c7720598035c597bc (diff) | |
parent | cf9eeac46350b8b43730b7dc5e999757bed089a4 (diff) |
Merge branch 'kvm-updates/3.4' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm updates from Avi Kivity:
"Changes include timekeeping improvements, support for assigning host
PCI devices that share interrupt lines, s390 user-controlled guests, a
large ppc update, and random fixes."
This is with the sign-off's fixed, hopefully next merge window we won't
have rebased commits.
* 'kvm-updates/3.4' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (130 commits)
KVM: Convert intx_mask_lock to spin lock
KVM: x86: fix kvm_write_tsc() TSC matching thinko
x86: kvmclock: abstract save/restore sched_clock_state
KVM: nVMX: Fix erroneous exception bitmap check
KVM: Ignore the writes to MSR_K7_HWCR(3)
KVM: MMU: make use of ->root_level in reset_rsvds_bits_mask
KVM: PMU: add proper support for fixed counter 2
KVM: PMU: Fix raw event check
KVM: PMU: warn when pin control is set in eventsel msr
KVM: VMX: Fix delayed load of shared MSRs
KVM: use correct tlbs dirty type in cmpxchg
KVM: Allow host IRQ sharing for assigned PCI 2.3 devices
KVM: Ensure all vcpus are consistent with in-kernel irqchip settings
KVM: x86 emulator: Allow PM/VM86 switch during task switch
KVM: SVM: Fix CPL updates
KVM: x86 emulator: VM86 segments must have DPL 3
KVM: x86 emulator: Fix task switch privilege checks
arch/powerpc/kvm/book3s_hv.c: included linux/sched.h twice
KVM: x86 emulator: correctly mask pmc index bits in RDPMC instruction emulation
KVM: mmu_notifier: Flush TLBs before releasing mmu_lock
...
Diffstat (limited to 'include/linux/kvm_host.h')
-rw-r--r-- | include/linux/kvm_host.h | 69 |
1 files changed, 57 insertions, 12 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index ca1b153585d..665a260c7e0 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -172,11 +172,6 @@ static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) | |||
172 | */ | 172 | */ |
173 | #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) | 173 | #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) |
174 | 174 | ||
175 | struct kvm_lpage_info { | ||
176 | unsigned long rmap_pde; | ||
177 | int write_count; | ||
178 | }; | ||
179 | |||
180 | struct kvm_memory_slot { | 175 | struct kvm_memory_slot { |
181 | gfn_t base_gfn; | 176 | gfn_t base_gfn; |
182 | unsigned long npages; | 177 | unsigned long npages; |
@@ -185,7 +180,7 @@ struct kvm_memory_slot { | |||
185 | unsigned long *dirty_bitmap; | 180 | unsigned long *dirty_bitmap; |
186 | unsigned long *dirty_bitmap_head; | 181 | unsigned long *dirty_bitmap_head; |
187 | unsigned long nr_dirty_pages; | 182 | unsigned long nr_dirty_pages; |
188 | struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; | 183 | struct kvm_arch_memory_slot arch; |
189 | unsigned long userspace_addr; | 184 | unsigned long userspace_addr; |
190 | int user_alloc; | 185 | int user_alloc; |
191 | int id; | 186 | int id; |
@@ -377,6 +372,9 @@ int kvm_set_memory_region(struct kvm *kvm, | |||
377 | int __kvm_set_memory_region(struct kvm *kvm, | 372 | int __kvm_set_memory_region(struct kvm *kvm, |
378 | struct kvm_userspace_memory_region *mem, | 373 | struct kvm_userspace_memory_region *mem, |
379 | int user_alloc); | 374 | int user_alloc); |
375 | void kvm_arch_free_memslot(struct kvm_memory_slot *free, | ||
376 | struct kvm_memory_slot *dont); | ||
377 | int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages); | ||
380 | int kvm_arch_prepare_memory_region(struct kvm *kvm, | 378 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
381 | struct kvm_memory_slot *memslot, | 379 | struct kvm_memory_slot *memslot, |
382 | struct kvm_memory_slot old, | 380 | struct kvm_memory_slot old, |
@@ -386,6 +384,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, | |||
386 | struct kvm_userspace_memory_region *mem, | 384 | struct kvm_userspace_memory_region *mem, |
387 | struct kvm_memory_slot old, | 385 | struct kvm_memory_slot old, |
388 | int user_alloc); | 386 | int user_alloc); |
387 | bool kvm_largepages_enabled(void); | ||
389 | void kvm_disable_largepages(void); | 388 | void kvm_disable_largepages(void); |
390 | void kvm_arch_flush_shadow(struct kvm *kvm); | 389 | void kvm_arch_flush_shadow(struct kvm *kvm); |
391 | 390 | ||
@@ -451,6 +450,7 @@ long kvm_arch_dev_ioctl(struct file *filp, | |||
451 | unsigned int ioctl, unsigned long arg); | 450 | unsigned int ioctl, unsigned long arg); |
452 | long kvm_arch_vcpu_ioctl(struct file *filp, | 451 | long kvm_arch_vcpu_ioctl(struct file *filp, |
453 | unsigned int ioctl, unsigned long arg); | 452 | unsigned int ioctl, unsigned long arg); |
453 | int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); | ||
454 | 454 | ||
455 | int kvm_dev_ioctl_check_extension(long ext); | 455 | int kvm_dev_ioctl_check_extension(long ext); |
456 | 456 | ||
@@ -521,7 +521,7 @@ static inline void kvm_arch_free_vm(struct kvm *kvm) | |||
521 | } | 521 | } |
522 | #endif | 522 | #endif |
523 | 523 | ||
524 | int kvm_arch_init_vm(struct kvm *kvm); | 524 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); |
525 | void kvm_arch_destroy_vm(struct kvm *kvm); | 525 | void kvm_arch_destroy_vm(struct kvm *kvm); |
526 | void kvm_free_all_assigned_devices(struct kvm *kvm); | 526 | void kvm_free_all_assigned_devices(struct kvm *kvm); |
527 | void kvm_arch_sync_events(struct kvm *kvm); | 527 | void kvm_arch_sync_events(struct kvm *kvm); |
@@ -547,6 +547,7 @@ struct kvm_assigned_dev_kernel { | |||
547 | unsigned int entries_nr; | 547 | unsigned int entries_nr; |
548 | int host_irq; | 548 | int host_irq; |
549 | bool host_irq_disabled; | 549 | bool host_irq_disabled; |
550 | bool pci_2_3; | ||
550 | struct msix_entry *host_msix_entries; | 551 | struct msix_entry *host_msix_entries; |
551 | int guest_irq; | 552 | int guest_irq; |
552 | struct msix_entry *guest_msix_entries; | 553 | struct msix_entry *guest_msix_entries; |
@@ -556,6 +557,7 @@ struct kvm_assigned_dev_kernel { | |||
556 | struct pci_dev *dev; | 557 | struct pci_dev *dev; |
557 | struct kvm *kvm; | 558 | struct kvm *kvm; |
558 | spinlock_t intx_lock; | 559 | spinlock_t intx_lock; |
560 | spinlock_t intx_mask_lock; | ||
559 | char irq_name[32]; | 561 | char irq_name[32]; |
560 | struct pci_saved_state *pci_saved_state; | 562 | struct pci_saved_state *pci_saved_state; |
561 | }; | 563 | }; |
@@ -651,11 +653,43 @@ static inline void kvm_guest_exit(void) | |||
651 | current->flags &= ~PF_VCPU; | 653 | current->flags &= ~PF_VCPU; |
652 | } | 654 | } |
653 | 655 | ||
656 | /* | ||
657 | * search_memslots() and __gfn_to_memslot() are here because they are | ||
658 | * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c. | ||
659 | * gfn_to_memslot() itself isn't here as an inline because that would | ||
660 | * bloat other code too much. | ||
661 | */ | ||
662 | static inline struct kvm_memory_slot * | ||
663 | search_memslots(struct kvm_memslots *slots, gfn_t gfn) | ||
664 | { | ||
665 | struct kvm_memory_slot *memslot; | ||
666 | |||
667 | kvm_for_each_memslot(memslot, slots) | ||
668 | if (gfn >= memslot->base_gfn && | ||
669 | gfn < memslot->base_gfn + memslot->npages) | ||
670 | return memslot; | ||
671 | |||
672 | return NULL; | ||
673 | } | ||
674 | |||
675 | static inline struct kvm_memory_slot * | ||
676 | __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) | ||
677 | { | ||
678 | return search_memslots(slots, gfn); | ||
679 | } | ||
680 | |||
654 | static inline int memslot_id(struct kvm *kvm, gfn_t gfn) | 681 | static inline int memslot_id(struct kvm *kvm, gfn_t gfn) |
655 | { | 682 | { |
656 | return gfn_to_memslot(kvm, gfn)->id; | 683 | return gfn_to_memslot(kvm, gfn)->id; |
657 | } | 684 | } |
658 | 685 | ||
686 | static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) | ||
687 | { | ||
688 | /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */ | ||
689 | return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - | ||
690 | (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); | ||
691 | } | ||
692 | |||
659 | static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, | 693 | static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, |
660 | gfn_t gfn) | 694 | gfn_t gfn) |
661 | { | 695 | { |
@@ -702,12 +736,16 @@ static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_se | |||
702 | if (unlikely(vcpu->kvm->mmu_notifier_count)) | 736 | if (unlikely(vcpu->kvm->mmu_notifier_count)) |
703 | return 1; | 737 | return 1; |
704 | /* | 738 | /* |
705 | * Both reads happen under the mmu_lock and both values are | 739 | * Ensure the read of mmu_notifier_count happens before the read |
706 | * modified under mmu_lock, so there's no need of smb_rmb() | 740 | * of mmu_notifier_seq. This interacts with the smp_wmb() in |
707 | * here in between, otherwise mmu_notifier_count should be | 741 | * mmu_notifier_invalidate_range_end to make sure that the caller |
708 | * read before mmu_notifier_seq, see | 742 | * either sees the old (non-zero) value of mmu_notifier_count or |
709 | * mmu_notifier_invalidate_range_end write side. | 743 | * the new (incremented) value of mmu_notifier_seq. |
744 | * PowerPC Book3s HV KVM calls this under a per-page lock | ||
745 | * rather than under kvm->mmu_lock, for scalability, so | ||
746 | * can't rely on kvm->mmu_lock to keep things ordered. | ||
710 | */ | 747 | */ |
748 | smp_rmb(); | ||
711 | if (vcpu->kvm->mmu_notifier_seq != mmu_seq) | 749 | if (vcpu->kvm->mmu_notifier_seq != mmu_seq) |
712 | return 1; | 750 | return 1; |
713 | return 0; | 751 | return 0; |
@@ -770,6 +808,13 @@ static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) | |||
770 | { | 808 | { |
771 | return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id; | 809 | return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id; |
772 | } | 810 | } |
811 | |||
812 | bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu); | ||
813 | |||
814 | #else | ||
815 | |||
816 | static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; } | ||
817 | |||
773 | #endif | 818 | #endif |
774 | 819 | ||
775 | #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT | 820 | #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT |