aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/kvm_host.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/kvm_host.h')
-rw-r--r--include/linux/kvm_host.h76
1 files changed, 64 insertions, 12 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 900c76337e8f..72cbf08d45fb 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -13,6 +13,7 @@
13#include <linux/spinlock.h> 13#include <linux/spinlock.h>
14#include <linux/signal.h> 14#include <linux/signal.h>
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/bug.h>
16#include <linux/mm.h> 17#include <linux/mm.h>
17#include <linux/mmu_notifier.h> 18#include <linux/mmu_notifier.h>
18#include <linux/preempt.h> 19#include <linux/preempt.h>
@@ -171,11 +172,6 @@ static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
171 */ 172 */
172#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) 173#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
173 174
174struct kvm_lpage_info {
175 unsigned long rmap_pde;
176 int write_count;
177};
178
179struct kvm_memory_slot { 175struct kvm_memory_slot {
180 gfn_t base_gfn; 176 gfn_t base_gfn;
181 unsigned long npages; 177 unsigned long npages;
@@ -184,7 +180,7 @@ struct kvm_memory_slot {
184 unsigned long *dirty_bitmap; 180 unsigned long *dirty_bitmap;
185 unsigned long *dirty_bitmap_head; 181 unsigned long *dirty_bitmap_head;
186 unsigned long nr_dirty_pages; 182 unsigned long nr_dirty_pages;
187 struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; 183 struct kvm_arch_memory_slot arch;
188 unsigned long userspace_addr; 184 unsigned long userspace_addr;
189 int user_alloc; 185 int user_alloc;
190 int id; 186 int id;
@@ -376,6 +372,9 @@ int kvm_set_memory_region(struct kvm *kvm,
376int __kvm_set_memory_region(struct kvm *kvm, 372int __kvm_set_memory_region(struct kvm *kvm,
377 struct kvm_userspace_memory_region *mem, 373 struct kvm_userspace_memory_region *mem,
378 int user_alloc); 374 int user_alloc);
375void kvm_arch_free_memslot(struct kvm_memory_slot *free,
376 struct kvm_memory_slot *dont);
377int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
379int kvm_arch_prepare_memory_region(struct kvm *kvm, 378int kvm_arch_prepare_memory_region(struct kvm *kvm,
380 struct kvm_memory_slot *memslot, 379 struct kvm_memory_slot *memslot,
381 struct kvm_memory_slot old, 380 struct kvm_memory_slot old,
@@ -385,6 +384,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
385 struct kvm_userspace_memory_region *mem, 384 struct kvm_userspace_memory_region *mem,
386 struct kvm_memory_slot old, 385 struct kvm_memory_slot old,
387 int user_alloc); 386 int user_alloc);
387bool kvm_largepages_enabled(void);
388void kvm_disable_largepages(void); 388void kvm_disable_largepages(void);
389void kvm_arch_flush_shadow(struct kvm *kvm); 389void kvm_arch_flush_shadow(struct kvm *kvm);
390 390
@@ -450,6 +450,7 @@ long kvm_arch_dev_ioctl(struct file *filp,
450 unsigned int ioctl, unsigned long arg); 450 unsigned int ioctl, unsigned long arg);
451long kvm_arch_vcpu_ioctl(struct file *filp, 451long kvm_arch_vcpu_ioctl(struct file *filp,
452 unsigned int ioctl, unsigned long arg); 452 unsigned int ioctl, unsigned long arg);
453int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
453 454
454int kvm_dev_ioctl_check_extension(long ext); 455int kvm_dev_ioctl_check_extension(long ext);
455 456
@@ -520,7 +521,7 @@ static inline void kvm_arch_free_vm(struct kvm *kvm)
520} 521}
521#endif 522#endif
522 523
523int kvm_arch_init_vm(struct kvm *kvm); 524int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
524void kvm_arch_destroy_vm(struct kvm *kvm); 525void kvm_arch_destroy_vm(struct kvm *kvm);
525void kvm_free_all_assigned_devices(struct kvm *kvm); 526void kvm_free_all_assigned_devices(struct kvm *kvm);
526void kvm_arch_sync_events(struct kvm *kvm); 527void kvm_arch_sync_events(struct kvm *kvm);
@@ -546,6 +547,7 @@ struct kvm_assigned_dev_kernel {
546 unsigned int entries_nr; 547 unsigned int entries_nr;
547 int host_irq; 548 int host_irq;
548 bool host_irq_disabled; 549 bool host_irq_disabled;
550 bool pci_2_3;
549 struct msix_entry *host_msix_entries; 551 struct msix_entry *host_msix_entries;
550 int guest_irq; 552 int guest_irq;
551 struct msix_entry *guest_msix_entries; 553 struct msix_entry *guest_msix_entries;
@@ -555,6 +557,7 @@ struct kvm_assigned_dev_kernel {
555 struct pci_dev *dev; 557 struct pci_dev *dev;
556 struct kvm *kvm; 558 struct kvm *kvm;
557 spinlock_t intx_lock; 559 spinlock_t intx_lock;
560 spinlock_t intx_mask_lock;
558 char irq_name[32]; 561 char irq_name[32];
559 struct pci_saved_state *pci_saved_state; 562 struct pci_saved_state *pci_saved_state;
560}; 563};
@@ -593,6 +596,7 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
593 596
594#ifdef CONFIG_IOMMU_API 597#ifdef CONFIG_IOMMU_API
595int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot); 598int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
599void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
596int kvm_iommu_map_guest(struct kvm *kvm); 600int kvm_iommu_map_guest(struct kvm *kvm);
597int kvm_iommu_unmap_guest(struct kvm *kvm); 601int kvm_iommu_unmap_guest(struct kvm *kvm);
598int kvm_assign_device(struct kvm *kvm, 602int kvm_assign_device(struct kvm *kvm,
@@ -606,6 +610,11 @@ static inline int kvm_iommu_map_pages(struct kvm *kvm,
606 return 0; 610 return 0;
607} 611}
608 612
613static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
614 struct kvm_memory_slot *slot)
615{
616}
617
609static inline int kvm_iommu_map_guest(struct kvm *kvm) 618static inline int kvm_iommu_map_guest(struct kvm *kvm)
610{ 619{
611 return -ENODEV; 620 return -ENODEV;
@@ -650,11 +659,43 @@ static inline void kvm_guest_exit(void)
650 current->flags &= ~PF_VCPU; 659 current->flags &= ~PF_VCPU;
651} 660}
652 661
662/*
663 * search_memslots() and __gfn_to_memslot() are here because they are
664 * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
665 * gfn_to_memslot() itself isn't here as an inline because that would
666 * bloat other code too much.
667 */
668static inline struct kvm_memory_slot *
669search_memslots(struct kvm_memslots *slots, gfn_t gfn)
670{
671 struct kvm_memory_slot *memslot;
672
673 kvm_for_each_memslot(memslot, slots)
674 if (gfn >= memslot->base_gfn &&
675 gfn < memslot->base_gfn + memslot->npages)
676 return memslot;
677
678 return NULL;
679}
680
681static inline struct kvm_memory_slot *
682__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
683{
684 return search_memslots(slots, gfn);
685}
686
653static inline int memslot_id(struct kvm *kvm, gfn_t gfn) 687static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
654{ 688{
655 return gfn_to_memslot(kvm, gfn)->id; 689 return gfn_to_memslot(kvm, gfn)->id;
656} 690}
657 691
692static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
693{
694 /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
695 return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
696 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
697}
698
658static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, 699static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
659 gfn_t gfn) 700 gfn_t gfn)
660{ 701{
@@ -701,12 +742,16 @@ static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_se
701 if (unlikely(vcpu->kvm->mmu_notifier_count)) 742 if (unlikely(vcpu->kvm->mmu_notifier_count))
702 return 1; 743 return 1;
703 /* 744 /*
704 * Both reads happen under the mmu_lock and both values are 745 * Ensure the read of mmu_notifier_count happens before the read
705 * modified under mmu_lock, so there's no need of smb_rmb() 746 * of mmu_notifier_seq. This interacts with the smp_wmb() in
706 * here in between, otherwise mmu_notifier_count should be 747 * mmu_notifier_invalidate_range_end to make sure that the caller
707 * read before mmu_notifier_seq, see 748 * either sees the old (non-zero) value of mmu_notifier_count or
708 * mmu_notifier_invalidate_range_end write side. 749 * the new (incremented) value of mmu_notifier_seq.
750 * PowerPC Book3s HV KVM calls this under a per-page lock
751 * rather than under kvm->mmu_lock, for scalability, so
752 * can't rely on kvm->mmu_lock to keep things ordered.
709 */ 753 */
754 smp_rmb();
710 if (vcpu->kvm->mmu_notifier_seq != mmu_seq) 755 if (vcpu->kvm->mmu_notifier_seq != mmu_seq)
711 return 1; 756 return 1;
712 return 0; 757 return 0;
@@ -769,6 +814,13 @@ static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
769{ 814{
770 return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id; 815 return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
771} 816}
817
818bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
819
820#else
821
822static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
823
772#endif 824#endif
773 825
774#ifdef __KVM_HAVE_DEVICE_ASSIGNMENT 826#ifdef __KVM_HAVE_DEVICE_ASSIGNMENT