aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-14 23:51:36 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-14 23:51:36 -0500
commitf080480488028bcc25357f85e8ae54ccc3bb7173 (patch)
tree8fcc943f16d26c795b3b6324b478af2d5a30285d /include
parenteda670c626a4f53eb8ac5f20d8c10d3f0b54c583 (diff)
parente504c9098ed6acd9e1079c5e10e4910724ad429f (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM changes from Paolo Bonzini: "Here are the 3.13 KVM changes. There was a lot of work on the PPC side: the HV and emulation flavors can now coexist in a single kernel is probably the most interesting change from a user point of view. On the x86 side there are nested virtualization improvements and a few bugfixes. ARM got transparent huge page support, improved overcommit, and support for big endian guests. Finally, there is a new interface to connect KVM with VFIO. This helps with devices that use NoSnoop PCI transactions, letting the driver in the guest execute WBINVD instructions. This includes some nVidia cards on Windows, that fail to start without these patches and the corresponding userspace changes" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (146 commits) kvm, vmx: Fix lazy FPU on nested guest arm/arm64: KVM: PSCI: propagate caller endianness to the incoming vcpu arm/arm64: KVM: MMIO support for BE guest kvm, cpuid: Fix sparse warning kvm: Delete prototype for non-existent function kvm_check_iopl kvm: Delete prototype for non-existent function complete_pio hung_task: add method to reset detector pvclock: detect watchdog reset at pvclock read kvm: optimize out smp_mb after srcu_read_unlock srcu: API for barrier after srcu read unlock KVM: remove vm mmap method KVM: IOMMU: hva align mapping page size KVM: x86: trace cpuid emulation when called from emulator KVM: emulator: cleanup decode_register_operand() a bit KVM: emulator: check rex prefix inside decode_register() KVM: x86: fix emulation of "movzbl %bpl, %eax" kvm_host: typo fix KVM: x86: emulate SAHF instruction MAINTAINERS: add tree for kvm.git Documentation/kvm: add a 00-INDEX file ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/kvm_host.h42
-rw-r--r--include/linux/sched.h8
-rw-r--r--include/linux/srcu.h14
-rw-r--r--include/trace/events/kvm.h10
-rw-r--r--include/uapi/linux/kvm.h11
5 files changed, 63 insertions, 22 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 0fbbc7aa02cb..9523d2ad7535 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -142,7 +142,7 @@ struct kvm;
142struct kvm_vcpu; 142struct kvm_vcpu;
143extern struct kmem_cache *kvm_vcpu_cache; 143extern struct kmem_cache *kvm_vcpu_cache;
144 144
145extern raw_spinlock_t kvm_lock; 145extern spinlock_t kvm_lock;
146extern struct list_head vm_list; 146extern struct list_head vm_list;
147 147
148struct kvm_io_range { 148struct kvm_io_range {
@@ -189,8 +189,7 @@ struct kvm_async_pf {
189 gva_t gva; 189 gva_t gva;
190 unsigned long addr; 190 unsigned long addr;
191 struct kvm_arch_async_pf arch; 191 struct kvm_arch_async_pf arch;
192 struct page *page; 192 bool wakeup_all;
193 bool done;
194}; 193};
195 194
196void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); 195void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
@@ -508,9 +507,10 @@ int kvm_set_memory_region(struct kvm *kvm,
508 struct kvm_userspace_memory_region *mem); 507 struct kvm_userspace_memory_region *mem);
509int __kvm_set_memory_region(struct kvm *kvm, 508int __kvm_set_memory_region(struct kvm *kvm,
510 struct kvm_userspace_memory_region *mem); 509 struct kvm_userspace_memory_region *mem);
511void kvm_arch_free_memslot(struct kvm_memory_slot *free, 510void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
512 struct kvm_memory_slot *dont); 511 struct kvm_memory_slot *dont);
513int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages); 512int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
513 unsigned long npages);
514void kvm_arch_memslots_updated(struct kvm *kvm); 514void kvm_arch_memslots_updated(struct kvm *kvm);
515int kvm_arch_prepare_memory_region(struct kvm *kvm, 515int kvm_arch_prepare_memory_region(struct kvm *kvm,
516 struct kvm_memory_slot *memslot, 516 struct kvm_memory_slot *memslot,
@@ -671,6 +671,25 @@ static inline void kvm_arch_free_vm(struct kvm *kvm)
671} 671}
672#endif 672#endif
673 673
674#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
675void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
676void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
677bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
678#else
679static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
680{
681}
682
683static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
684{
685}
686
687static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
688{
689 return false;
690}
691#endif
692
674static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) 693static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
675{ 694{
676#ifdef __KVM_HAVE_ARCH_WQP 695#ifdef __KVM_HAVE_ARCH_WQP
@@ -747,9 +766,6 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
747int kvm_request_irq_source_id(struct kvm *kvm); 766int kvm_request_irq_source_id(struct kvm *kvm);
748void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); 767void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
749 768
750/* For vcpu->arch.iommu_flags */
751#define KVM_IOMMU_CACHE_COHERENCY 0x1
752
753#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT 769#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
754int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot); 770int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
755void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot); 771void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
@@ -789,7 +805,7 @@ static inline void kvm_guest_enter(void)
789 805
790 /* KVM does not hold any references to rcu protected data when it 806 /* KVM does not hold any references to rcu protected data when it
791 * switches CPU into a guest mode. In fact switching to a guest mode 807 * switches CPU into a guest mode. In fact switching to a guest mode
792 * is very similar to exiting to userspase from rcu point of view. In 808 * is very similar to exiting to userspace from rcu point of view. In
793 * addition CPU may stay in a guest mode for quite a long time (up to 809 * addition CPU may stay in a guest mode for quite a long time (up to
794 * one time slice). Lets treat guest mode as quiescent state, just like 810 * one time slice). Lets treat guest mode as quiescent state, just like
795 * we do with user-mode execution. 811 * we do with user-mode execution.
@@ -842,13 +858,6 @@ static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
842 return gfn_to_memslot(kvm, gfn)->id; 858 return gfn_to_memslot(kvm, gfn)->id;
843} 859}
844 860
845static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
846{
847 /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
848 return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
849 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
850}
851
852static inline gfn_t 861static inline gfn_t
853hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot) 862hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
854{ 863{
@@ -1066,6 +1075,7 @@ struct kvm_device *kvm_device_from_filp(struct file *filp);
1066 1075
1067extern struct kvm_device_ops kvm_mpic_ops; 1076extern struct kvm_device_ops kvm_mpic_ops;
1068extern struct kvm_device_ops kvm_xics_ops; 1077extern struct kvm_device_ops kvm_xics_ops;
1078extern struct kvm_device_ops kvm_vfio_ops;
1069 1079
1070#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 1080#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
1071 1081
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f7efc8604652..6f7ffa460089 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -286,6 +286,14 @@ static inline void lockup_detector_init(void)
286} 286}
287#endif 287#endif
288 288
289#ifdef CONFIG_DETECT_HUNG_TASK
290void reset_hung_task_detector(void);
291#else
292static inline void reset_hung_task_detector(void)
293{
294}
295#endif
296
289/* Attach to any functions which should be ignored in wchan output. */ 297/* Attach to any functions which should be ignored in wchan output. */
290#define __sched __attribute__((__section__(".sched.text"))) 298#define __sched __attribute__((__section__(".sched.text")))
291 299
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index c114614ed172..9b058eecd403 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -237,4 +237,18 @@ static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
237 __srcu_read_unlock(sp, idx); 237 __srcu_read_unlock(sp, idx);
238} 238}
239 239
240/**
241 * smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock
242 *
243 * Converts the preceding srcu_read_unlock into a two-way memory barrier.
244 *
245 * Call this after srcu_read_unlock, to guarantee that all memory operations
246 * that occur after smp_mb__after_srcu_read_unlock will appear to happen after
247 * the preceding srcu_read_unlock.
248 */
249static inline void smp_mb__after_srcu_read_unlock(void)
250{
251 /* __srcu_read_unlock has smp_mb() internally so nothing to do here. */
252}
253
240#endif 254#endif
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 7005d1109ec9..131a0bda7aec 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -296,23 +296,21 @@ DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
296 296
297TRACE_EVENT( 297TRACE_EVENT(
298 kvm_async_pf_completed, 298 kvm_async_pf_completed,
299 TP_PROTO(unsigned long address, struct page *page, u64 gva), 299 TP_PROTO(unsigned long address, u64 gva),
300 TP_ARGS(address, page, gva), 300 TP_ARGS(address, gva),
301 301
302 TP_STRUCT__entry( 302 TP_STRUCT__entry(
303 __field(unsigned long, address) 303 __field(unsigned long, address)
304 __field(pfn_t, pfn)
305 __field(u64, gva) 304 __field(u64, gva)
306 ), 305 ),
307 306
308 TP_fast_assign( 307 TP_fast_assign(
309 __entry->address = address; 308 __entry->address = address;
310 __entry->pfn = page ? page_to_pfn(page) : 0;
311 __entry->gva = gva; 309 __entry->gva = gva;
312 ), 310 ),
313 311
314 TP_printk("gva %#llx address %#lx pfn %#llx", __entry->gva, 312 TP_printk("gva %#llx address %#lx", __entry->gva,
315 __entry->address, __entry->pfn) 313 __entry->address)
316); 314);
317 315
318#endif 316#endif
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 99c25338ede8..902f12461873 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -518,6 +518,10 @@ struct kvm_ppc_smmu_info {
518/* machine type bits, to be used as argument to KVM_CREATE_VM */ 518/* machine type bits, to be used as argument to KVM_CREATE_VM */
519#define KVM_VM_S390_UCONTROL 1 519#define KVM_VM_S390_UCONTROL 1
520 520
521/* on ppc, 0 indicate default, 1 should force HV and 2 PR */
522#define KVM_VM_PPC_HV 1
523#define KVM_VM_PPC_PR 2
524
521#define KVM_S390_SIE_PAGE_OFFSET 1 525#define KVM_S390_SIE_PAGE_OFFSET 1
522 526
523/* 527/*
@@ -541,6 +545,7 @@ struct kvm_ppc_smmu_info {
541#define KVM_TRACE_ENABLE __KVM_DEPRECATED_MAIN_W_0x06 545#define KVM_TRACE_ENABLE __KVM_DEPRECATED_MAIN_W_0x06
542#define KVM_TRACE_PAUSE __KVM_DEPRECATED_MAIN_0x07 546#define KVM_TRACE_PAUSE __KVM_DEPRECATED_MAIN_0x07
543#define KVM_TRACE_DISABLE __KVM_DEPRECATED_MAIN_0x08 547#define KVM_TRACE_DISABLE __KVM_DEPRECATED_MAIN_0x08
548#define KVM_GET_EMULATED_CPUID _IOWR(KVMIO, 0x09, struct kvm_cpuid2)
544 549
545/* 550/*
546 * Extension capability list. 551 * Extension capability list.
@@ -668,6 +673,7 @@ struct kvm_ppc_smmu_info {
668#define KVM_CAP_IRQ_XICS 92 673#define KVM_CAP_IRQ_XICS 92
669#define KVM_CAP_ARM_EL1_32BIT 93 674#define KVM_CAP_ARM_EL1_32BIT 93
670#define KVM_CAP_SPAPR_MULTITCE 94 675#define KVM_CAP_SPAPR_MULTITCE 94
676#define KVM_CAP_EXT_EMUL_CPUID 95
671 677
672#ifdef KVM_CAP_IRQ_ROUTING 678#ifdef KVM_CAP_IRQ_ROUTING
673 679
@@ -843,6 +849,10 @@ struct kvm_device_attr {
843#define KVM_DEV_TYPE_FSL_MPIC_20 1 849#define KVM_DEV_TYPE_FSL_MPIC_20 1
844#define KVM_DEV_TYPE_FSL_MPIC_42 2 850#define KVM_DEV_TYPE_FSL_MPIC_42 2
845#define KVM_DEV_TYPE_XICS 3 851#define KVM_DEV_TYPE_XICS 3
852#define KVM_DEV_TYPE_VFIO 4
853#define KVM_DEV_VFIO_GROUP 1
854#define KVM_DEV_VFIO_GROUP_ADD 1
855#define KVM_DEV_VFIO_GROUP_DEL 2
846 856
847/* 857/*
848 * ioctls for VM fds 858 * ioctls for VM fds
@@ -1012,6 +1022,7 @@ struct kvm_s390_ucas_mapping {
1012/* VM is being stopped by host */ 1022/* VM is being stopped by host */
1013#define KVM_KVMCLOCK_CTRL _IO(KVMIO, 0xad) 1023#define KVM_KVMCLOCK_CTRL _IO(KVMIO, 0xad)
1014#define KVM_ARM_VCPU_INIT _IOW(KVMIO, 0xae, struct kvm_vcpu_init) 1024#define KVM_ARM_VCPU_INIT _IOW(KVMIO, 0xae, struct kvm_vcpu_init)
1025#define KVM_ARM_PREFERRED_TARGET _IOR(KVMIO, 0xaf, struct kvm_vcpu_init)
1015#define KVM_GET_REG_LIST _IOWR(KVMIO, 0xb0, struct kvm_reg_list) 1026#define KVM_GET_REG_LIST _IOWR(KVMIO, 0xb0, struct kvm_reg_list)
1016 1027
1017#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) 1028#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)