aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/kvm_host.h166
-rw-r--r--include/trace/events/kvm.h12
-rw-r--r--include/uapi/linux/kvm.h45
3 files changed, 161 insertions, 62 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index c13958251927..f0eea07d2c2b 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -117,14 +117,13 @@ static inline bool is_error_page(struct page *page)
117#define KVM_REQ_APF_HALT 12 117#define KVM_REQ_APF_HALT 12
118#define KVM_REQ_STEAL_UPDATE 13 118#define KVM_REQ_STEAL_UPDATE 13
119#define KVM_REQ_NMI 14 119#define KVM_REQ_NMI 14
120#define KVM_REQ_IMMEDIATE_EXIT 15 120#define KVM_REQ_PMU 15
121#define KVM_REQ_PMU 16 121#define KVM_REQ_PMI 16
122#define KVM_REQ_PMI 17 122#define KVM_REQ_WATCHDOG 17
123#define KVM_REQ_WATCHDOG 18 123#define KVM_REQ_MASTERCLOCK_UPDATE 18
124#define KVM_REQ_MASTERCLOCK_UPDATE 19 124#define KVM_REQ_MCLOCK_INPROGRESS 19
125#define KVM_REQ_MCLOCK_INPROGRESS 20 125#define KVM_REQ_EPR_EXIT 20
126#define KVM_REQ_EPR_EXIT 21 126#define KVM_REQ_SCAN_IOAPIC 21
127#define KVM_REQ_EOIBITMAP 22
128 127
129#define KVM_USERSPACE_IRQ_SOURCE_ID 0 128#define KVM_USERSPACE_IRQ_SOURCE_ID 0
130#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 129#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
@@ -133,6 +132,9 @@ struct kvm;
133struct kvm_vcpu; 132struct kvm_vcpu;
134extern struct kmem_cache *kvm_vcpu_cache; 133extern struct kmem_cache *kvm_vcpu_cache;
135 134
135extern raw_spinlock_t kvm_lock;
136extern struct list_head vm_list;
137
136struct kvm_io_range { 138struct kvm_io_range {
137 gpa_t addr; 139 gpa_t addr;
138 int len; 140 int len;
@@ -149,6 +151,7 @@ struct kvm_io_bus {
149enum kvm_bus { 151enum kvm_bus {
150 KVM_MMIO_BUS, 152 KVM_MMIO_BUS,
151 KVM_PIO_BUS, 153 KVM_PIO_BUS,
154 KVM_VIRTIO_CCW_NOTIFY_BUS,
152 KVM_NR_BUSES 155 KVM_NR_BUSES
153}; 156};
154 157
@@ -252,6 +255,7 @@ struct kvm_vcpu {
252 bool dy_eligible; 255 bool dy_eligible;
253 } spin_loop; 256 } spin_loop;
254#endif 257#endif
258 bool preempted;
255 struct kvm_vcpu_arch arch; 259 struct kvm_vcpu_arch arch;
256}; 260};
257 261
@@ -285,7 +289,8 @@ struct kvm_kernel_irq_routing_entry {
285 u32 gsi; 289 u32 gsi;
286 u32 type; 290 u32 type;
287 int (*set)(struct kvm_kernel_irq_routing_entry *e, 291 int (*set)(struct kvm_kernel_irq_routing_entry *e,
288 struct kvm *kvm, int irq_source_id, int level); 292 struct kvm *kvm, int irq_source_id, int level,
293 bool line_status);
289 union { 294 union {
290 struct { 295 struct {
291 unsigned irqchip; 296 unsigned irqchip;
@@ -296,10 +301,10 @@ struct kvm_kernel_irq_routing_entry {
296 struct hlist_node link; 301 struct hlist_node link;
297}; 302};
298 303
299#ifdef __KVM_HAVE_IOAPIC 304#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
300 305
301struct kvm_irq_routing_table { 306struct kvm_irq_routing_table {
302 int chip[KVM_NR_IRQCHIPS][KVM_IOAPIC_NUM_PINS]; 307 int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
303 struct kvm_kernel_irq_routing_entry *rt_entries; 308 struct kvm_kernel_irq_routing_entry *rt_entries;
304 u32 nr_rt_entries; 309 u32 nr_rt_entries;
305 /* 310 /*
@@ -385,6 +390,7 @@ struct kvm {
385 long mmu_notifier_count; 390 long mmu_notifier_count;
386#endif 391#endif
387 long tlbs_dirty; 392 long tlbs_dirty;
393 struct list_head devices;
388}; 394};
389 395
390#define kvm_err(fmt, ...) \ 396#define kvm_err(fmt, ...) \
@@ -424,6 +430,19 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
424int __must_check vcpu_load(struct kvm_vcpu *vcpu); 430int __must_check vcpu_load(struct kvm_vcpu *vcpu);
425void vcpu_put(struct kvm_vcpu *vcpu); 431void vcpu_put(struct kvm_vcpu *vcpu);
426 432
433#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
434int kvm_irqfd_init(void);
435void kvm_irqfd_exit(void);
436#else
437static inline int kvm_irqfd_init(void)
438{
439 return 0;
440}
441
442static inline void kvm_irqfd_exit(void)
443{
444}
445#endif
427int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 446int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
428 struct module *module); 447 struct module *module);
429void kvm_exit(void); 448void kvm_exit(void);
@@ -452,24 +471,39 @@ id_to_memslot(struct kvm_memslots *slots, int id)
452 return slot; 471 return slot;
453} 472}
454 473
474/*
475 * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
476 * - create a new memory slot
477 * - delete an existing memory slot
478 * - modify an existing memory slot
479 * -- move it in the guest physical memory space
480 * -- just change its flags
481 *
482 * Since flags can be changed by some of these operations, the following
483 * differentiation is the best we can do for __kvm_set_memory_region():
484 */
485enum kvm_mr_change {
486 KVM_MR_CREATE,
487 KVM_MR_DELETE,
488 KVM_MR_MOVE,
489 KVM_MR_FLAGS_ONLY,
490};
491
455int kvm_set_memory_region(struct kvm *kvm, 492int kvm_set_memory_region(struct kvm *kvm,
456 struct kvm_userspace_memory_region *mem, 493 struct kvm_userspace_memory_region *mem);
457 bool user_alloc);
458int __kvm_set_memory_region(struct kvm *kvm, 494int __kvm_set_memory_region(struct kvm *kvm,
459 struct kvm_userspace_memory_region *mem, 495 struct kvm_userspace_memory_region *mem);
460 bool user_alloc);
461void kvm_arch_free_memslot(struct kvm_memory_slot *free, 496void kvm_arch_free_memslot(struct kvm_memory_slot *free,
462 struct kvm_memory_slot *dont); 497 struct kvm_memory_slot *dont);
463int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages); 498int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
464int kvm_arch_prepare_memory_region(struct kvm *kvm, 499int kvm_arch_prepare_memory_region(struct kvm *kvm,
465 struct kvm_memory_slot *memslot, 500 struct kvm_memory_slot *memslot,
466 struct kvm_memory_slot old,
467 struct kvm_userspace_memory_region *mem, 501 struct kvm_userspace_memory_region *mem,
468 bool user_alloc); 502 enum kvm_mr_change change);
469void kvm_arch_commit_memory_region(struct kvm *kvm, 503void kvm_arch_commit_memory_region(struct kvm *kvm,
470 struct kvm_userspace_memory_region *mem, 504 struct kvm_userspace_memory_region *mem,
471 struct kvm_memory_slot old, 505 const struct kvm_memory_slot *old,
472 bool user_alloc); 506 enum kvm_mr_change change);
473bool kvm_largepages_enabled(void); 507bool kvm_largepages_enabled(void);
474void kvm_disable_largepages(void); 508void kvm_disable_largepages(void);
475/* flush all memory translations */ 509/* flush all memory translations */
@@ -539,7 +573,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
539void kvm_flush_remote_tlbs(struct kvm *kvm); 573void kvm_flush_remote_tlbs(struct kvm *kvm);
540void kvm_reload_remote_mmus(struct kvm *kvm); 574void kvm_reload_remote_mmus(struct kvm *kvm);
541void kvm_make_mclock_inprogress_request(struct kvm *kvm); 575void kvm_make_mclock_inprogress_request(struct kvm *kvm);
542void kvm_make_update_eoibitmap_request(struct kvm *kvm); 576void kvm_make_scan_ioapic_request(struct kvm *kvm);
543 577
544long kvm_arch_dev_ioctl(struct file *filp, 578long kvm_arch_dev_ioctl(struct file *filp,
545 unsigned int ioctl, unsigned long arg); 579 unsigned int ioctl, unsigned long arg);
@@ -555,10 +589,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
555 struct kvm_dirty_log *log); 589 struct kvm_dirty_log *log);
556 590
557int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 591int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
558 struct 592 struct kvm_userspace_memory_region *mem);
559 kvm_userspace_memory_region *mem, 593int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
560 bool user_alloc); 594 bool line_status);
561int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level);
562long kvm_arch_vm_ioctl(struct file *filp, 595long kvm_arch_vm_ioctl(struct file *filp,
563 unsigned int ioctl, unsigned long arg); 596 unsigned int ioctl, unsigned long arg);
564 597
@@ -632,7 +665,6 @@ static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
632 665
633int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); 666int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
634void kvm_arch_destroy_vm(struct kvm *kvm); 667void kvm_arch_destroy_vm(struct kvm *kvm);
635void kvm_free_all_assigned_devices(struct kvm *kvm);
636void kvm_arch_sync_events(struct kvm *kvm); 668void kvm_arch_sync_events(struct kvm *kvm);
637 669
638int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); 670int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
@@ -684,15 +716,11 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
684void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, 716void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
685 bool mask); 717 bool mask);
686 718
687#ifdef __KVM_HAVE_IOAPIC 719int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
688void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic, 720 bool line_status);
689 union kvm_ioapic_redirect_entry *entry,
690 unsigned long *deliver_bitmask);
691#endif
692int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
693int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level); 721int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level);
694int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, 722int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
695 int irq_source_id, int level); 723 int irq_source_id, int level, bool line_status);
696bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); 724bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
697void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); 725void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
698void kvm_register_irq_ack_notifier(struct kvm *kvm, 726void kvm_register_irq_ack_notifier(struct kvm *kvm,
@@ -705,7 +733,7 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
705/* For vcpu->arch.iommu_flags */ 733/* For vcpu->arch.iommu_flags */
706#define KVM_IOMMU_CACHE_COHERENCY 0x1 734#define KVM_IOMMU_CACHE_COHERENCY 0x1
707 735
708#ifdef CONFIG_IOMMU_API 736#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
709int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot); 737int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
710void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot); 738void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
711int kvm_iommu_map_guest(struct kvm *kvm); 739int kvm_iommu_map_guest(struct kvm *kvm);
@@ -714,7 +742,7 @@ int kvm_assign_device(struct kvm *kvm,
714 struct kvm_assigned_dev_kernel *assigned_dev); 742 struct kvm_assigned_dev_kernel *assigned_dev);
715int kvm_deassign_device(struct kvm *kvm, 743int kvm_deassign_device(struct kvm *kvm,
716 struct kvm_assigned_dev_kernel *assigned_dev); 744 struct kvm_assigned_dev_kernel *assigned_dev);
717#else /* CONFIG_IOMMU_API */ 745#else
718static inline int kvm_iommu_map_pages(struct kvm *kvm, 746static inline int kvm_iommu_map_pages(struct kvm *kvm,
719 struct kvm_memory_slot *slot) 747 struct kvm_memory_slot *slot)
720{ 748{
@@ -726,28 +754,11 @@ static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
726{ 754{
727} 755}
728 756
729static inline int kvm_iommu_map_guest(struct kvm *kvm)
730{
731 return -ENODEV;
732}
733
734static inline int kvm_iommu_unmap_guest(struct kvm *kvm) 757static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
735{ 758{
736 return 0; 759 return 0;
737} 760}
738 761#endif
739static inline int kvm_assign_device(struct kvm *kvm,
740 struct kvm_assigned_dev_kernel *assigned_dev)
741{
742 return 0;
743}
744
745static inline int kvm_deassign_device(struct kvm *kvm,
746 struct kvm_assigned_dev_kernel *assigned_dev)
747{
748 return 0;
749}
750#endif /* CONFIG_IOMMU_API */
751 762
752static inline void __guest_enter(void) 763static inline void __guest_enter(void)
753{ 764{
@@ -921,7 +932,7 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
921} 932}
922#endif 933#endif
923 934
924#ifdef KVM_CAP_IRQ_ROUTING 935#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
925 936
926#define KVM_MAX_IRQ_ROUTES 1024 937#define KVM_MAX_IRQ_ROUTES 1024
927 938
@@ -930,6 +941,9 @@ int kvm_set_irq_routing(struct kvm *kvm,
930 const struct kvm_irq_routing_entry *entries, 941 const struct kvm_irq_routing_entry *entries,
931 unsigned nr, 942 unsigned nr,
932 unsigned flags); 943 unsigned flags);
944int kvm_set_routing_entry(struct kvm_irq_routing_table *rt,
945 struct kvm_kernel_irq_routing_entry *e,
946 const struct kvm_irq_routing_entry *ue);
933void kvm_free_irq_routing(struct kvm *kvm); 947void kvm_free_irq_routing(struct kvm *kvm);
934 948
935int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); 949int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
@@ -998,11 +1012,13 @@ static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
998 1012
999#endif 1013#endif
1000 1014
1001#ifdef __KVM_HAVE_DEVICE_ASSIGNMENT 1015#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
1002 1016
1003long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, 1017long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
1004 unsigned long arg); 1018 unsigned long arg);
1005 1019
1020void kvm_free_all_assigned_devices(struct kvm *kvm);
1021
1006#else 1022#else
1007 1023
1008static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, 1024static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
@@ -1011,6 +1027,8 @@ static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
1011 return -ENOTTY; 1027 return -ENOTTY;
1012} 1028}
1013 1029
1030static inline void kvm_free_all_assigned_devices(struct kvm *kvm) {}
1031
1014#endif 1032#endif
1015 1033
1016static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) 1034static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
@@ -1028,6 +1046,46 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
1028 } 1046 }
1029} 1047}
1030 1048
1049extern bool kvm_rebooting;
1050
1051struct kvm_device_ops;
1052
1053struct kvm_device {
1054 struct kvm_device_ops *ops;
1055 struct kvm *kvm;
1056 void *private;
1057 struct list_head vm_node;
1058};
1059
1060/* create, destroy, and name are mandatory */
1061struct kvm_device_ops {
1062 const char *name;
1063 int (*create)(struct kvm_device *dev, u32 type);
1064
1065 /*
1066 * Destroy is responsible for freeing dev.
1067 *
1068 * Destroy may be called before or after destructors are called
1069 * on emulated I/O regions, depending on whether a reference is
1070 * held by a vcpu or other kvm component that gets destroyed
1071 * after the emulated I/O.
1072 */
1073 void (*destroy)(struct kvm_device *dev);
1074
1075 int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1076 int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1077 int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1078 long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
1079 unsigned long arg);
1080};
1081
1082void kvm_device_get(struct kvm_device *dev);
1083void kvm_device_put(struct kvm_device *dev);
1084struct kvm_device *kvm_device_from_filp(struct file *filp);
1085
1086extern struct kvm_device_ops kvm_mpic_ops;
1087extern struct kvm_device_ops kvm_xics_ops;
1088
1031#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 1089#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
1032 1090
1033static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) 1091static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 19911dddaeb7..7005d1109ec9 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -37,7 +37,7 @@ TRACE_EVENT(kvm_userspace_exit,
37 __entry->errno < 0 ? -__entry->errno : __entry->reason) 37 __entry->errno < 0 ? -__entry->errno : __entry->reason)
38); 38);
39 39
40#if defined(__KVM_HAVE_IRQ_LINE) 40#if defined(CONFIG_HAVE_KVM_IRQCHIP)
41TRACE_EVENT(kvm_set_irq, 41TRACE_EVENT(kvm_set_irq,
42 TP_PROTO(unsigned int gsi, int level, int irq_source_id), 42 TP_PROTO(unsigned int gsi, int level, int irq_source_id),
43 TP_ARGS(gsi, level, irq_source_id), 43 TP_ARGS(gsi, level, irq_source_id),
@@ -122,6 +122,10 @@ TRACE_EVENT(kvm_msi_set_irq,
122 {KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \ 122 {KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
123 {KVM_IRQCHIP_IOAPIC, "IOAPIC"} 123 {KVM_IRQCHIP_IOAPIC, "IOAPIC"}
124 124
125#endif /* defined(__KVM_HAVE_IOAPIC) */
126
127#if defined(CONFIG_HAVE_KVM_IRQCHIP)
128
125TRACE_EVENT(kvm_ack_irq, 129TRACE_EVENT(kvm_ack_irq,
126 TP_PROTO(unsigned int irqchip, unsigned int pin), 130 TP_PROTO(unsigned int irqchip, unsigned int pin),
127 TP_ARGS(irqchip, pin), 131 TP_ARGS(irqchip, pin),
@@ -136,14 +140,18 @@ TRACE_EVENT(kvm_ack_irq,
136 __entry->pin = pin; 140 __entry->pin = pin;
137 ), 141 ),
138 142
143#ifdef kvm_irqchips
139 TP_printk("irqchip %s pin %u", 144 TP_printk("irqchip %s pin %u",
140 __print_symbolic(__entry->irqchip, kvm_irqchips), 145 __print_symbolic(__entry->irqchip, kvm_irqchips),
141 __entry->pin) 146 __entry->pin)
147#else
148 TP_printk("irqchip %d pin %u", __entry->irqchip, __entry->pin)
149#endif
142); 150);
143 151
152#endif /* defined(CONFIG_HAVE_KVM_IRQCHIP) */
144 153
145 154
146#endif /* defined(__KVM_HAVE_IOAPIC) */
147 155
148#define KVM_TRACE_MMIO_READ_UNSATISFIED 0 156#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
149#define KVM_TRACE_MMIO_READ 1 157#define KVM_TRACE_MMIO_READ 1
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 3c56ba3d80c1..a5c86fc34a37 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -449,12 +449,15 @@ enum {
449 kvm_ioeventfd_flag_nr_datamatch, 449 kvm_ioeventfd_flag_nr_datamatch,
450 kvm_ioeventfd_flag_nr_pio, 450 kvm_ioeventfd_flag_nr_pio,
451 kvm_ioeventfd_flag_nr_deassign, 451 kvm_ioeventfd_flag_nr_deassign,
452 kvm_ioeventfd_flag_nr_virtio_ccw_notify,
452 kvm_ioeventfd_flag_nr_max, 453 kvm_ioeventfd_flag_nr_max,
453}; 454};
454 455
455#define KVM_IOEVENTFD_FLAG_DATAMATCH (1 << kvm_ioeventfd_flag_nr_datamatch) 456#define KVM_IOEVENTFD_FLAG_DATAMATCH (1 << kvm_ioeventfd_flag_nr_datamatch)
456#define KVM_IOEVENTFD_FLAG_PIO (1 << kvm_ioeventfd_flag_nr_pio) 457#define KVM_IOEVENTFD_FLAG_PIO (1 << kvm_ioeventfd_flag_nr_pio)
457#define KVM_IOEVENTFD_FLAG_DEASSIGN (1 << kvm_ioeventfd_flag_nr_deassign) 458#define KVM_IOEVENTFD_FLAG_DEASSIGN (1 << kvm_ioeventfd_flag_nr_deassign)
459#define KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY \
460 (1 << kvm_ioeventfd_flag_nr_virtio_ccw_notify)
458 461
459#define KVM_IOEVENTFD_VALID_FLAG_MASK ((1 << kvm_ioeventfd_flag_nr_max) - 1) 462#define KVM_IOEVENTFD_VALID_FLAG_MASK ((1 << kvm_ioeventfd_flag_nr_max) - 1)
460 463
@@ -558,9 +561,7 @@ struct kvm_ppc_smmu_info {
558#define KVM_CAP_MP_STATE 14 561#define KVM_CAP_MP_STATE 14
559#define KVM_CAP_COALESCED_MMIO 15 562#define KVM_CAP_COALESCED_MMIO 15
560#define KVM_CAP_SYNC_MMU 16 /* Changes to host mmap are reflected in guest */ 563#define KVM_CAP_SYNC_MMU 16 /* Changes to host mmap are reflected in guest */
561#ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
562#define KVM_CAP_DEVICE_ASSIGNMENT 17 564#define KVM_CAP_DEVICE_ASSIGNMENT 17
563#endif
564#define KVM_CAP_IOMMU 18 565#define KVM_CAP_IOMMU 18
565#ifdef __KVM_HAVE_MSI 566#ifdef __KVM_HAVE_MSI
566#define KVM_CAP_DEVICE_MSI 20 567#define KVM_CAP_DEVICE_MSI 20
@@ -576,13 +577,9 @@ struct kvm_ppc_smmu_info {
576#ifdef __KVM_HAVE_PIT 577#ifdef __KVM_HAVE_PIT
577#define KVM_CAP_REINJECT_CONTROL 24 578#define KVM_CAP_REINJECT_CONTROL 24
578#endif 579#endif
579#ifdef __KVM_HAVE_IOAPIC
580#define KVM_CAP_IRQ_ROUTING 25 580#define KVM_CAP_IRQ_ROUTING 25
581#endif
582#define KVM_CAP_IRQ_INJECT_STATUS 26 581#define KVM_CAP_IRQ_INJECT_STATUS 26
583#ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
584#define KVM_CAP_DEVICE_DEASSIGNMENT 27 582#define KVM_CAP_DEVICE_DEASSIGNMENT 27
585#endif
586#ifdef __KVM_HAVE_MSIX 583#ifdef __KVM_HAVE_MSIX
587#define KVM_CAP_DEVICE_MSIX 28 584#define KVM_CAP_DEVICE_MSIX 28
588#endif 585#endif
@@ -665,6 +662,10 @@ struct kvm_ppc_smmu_info {
665#define KVM_CAP_PPC_EPR 86 662#define KVM_CAP_PPC_EPR 86
666#define KVM_CAP_ARM_PSCI 87 663#define KVM_CAP_ARM_PSCI 87
667#define KVM_CAP_ARM_SET_DEVICE_ADDR 88 664#define KVM_CAP_ARM_SET_DEVICE_ADDR 88
665#define KVM_CAP_DEVICE_CTRL 89
666#define KVM_CAP_IRQ_MPIC 90
667#define KVM_CAP_PPC_RTAS 91
668#define KVM_CAP_IRQ_XICS 92
668 669
669#ifdef KVM_CAP_IRQ_ROUTING 670#ifdef KVM_CAP_IRQ_ROUTING
670 671
@@ -818,6 +819,28 @@ struct kvm_arm_device_addr {
818}; 819};
819 820
820/* 821/*
822 * Device control API, available with KVM_CAP_DEVICE_CTRL
823 */
824#define KVM_CREATE_DEVICE_TEST 1
825
826struct kvm_create_device {
827 __u32 type; /* in: KVM_DEV_TYPE_xxx */
828 __u32 fd; /* out: device handle */
829 __u32 flags; /* in: KVM_CREATE_DEVICE_xxx */
830};
831
832struct kvm_device_attr {
833 __u32 flags; /* no flags currently defined */
834 __u32 group; /* device-defined */
835 __u64 attr; /* group-defined */
836 __u64 addr; /* userspace address of attr data */
837};
838
839#define KVM_DEV_TYPE_FSL_MPIC_20 1
840#define KVM_DEV_TYPE_FSL_MPIC_42 2
841#define KVM_DEV_TYPE_XICS 3
842
843/*
821 * ioctls for VM fds 844 * ioctls for VM fds
822 */ 845 */
823#define KVM_SET_MEMORY_REGION _IOW(KVMIO, 0x40, struct kvm_memory_region) 846#define KVM_SET_MEMORY_REGION _IOW(KVMIO, 0x40, struct kvm_memory_region)
@@ -904,6 +927,16 @@ struct kvm_s390_ucas_mapping {
904#define KVM_PPC_GET_HTAB_FD _IOW(KVMIO, 0xaa, struct kvm_get_htab_fd) 927#define KVM_PPC_GET_HTAB_FD _IOW(KVMIO, 0xaa, struct kvm_get_htab_fd)
905/* Available with KVM_CAP_ARM_SET_DEVICE_ADDR */ 928/* Available with KVM_CAP_ARM_SET_DEVICE_ADDR */
906#define KVM_ARM_SET_DEVICE_ADDR _IOW(KVMIO, 0xab, struct kvm_arm_device_addr) 929#define KVM_ARM_SET_DEVICE_ADDR _IOW(KVMIO, 0xab, struct kvm_arm_device_addr)
930/* Available with KVM_CAP_PPC_RTAS */
931#define KVM_PPC_RTAS_DEFINE_TOKEN _IOW(KVMIO, 0xac, struct kvm_rtas_token_args)
932
933/* ioctl for vm fd */
934#define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device)
935
936/* ioctls for fds returned by KVM_CREATE_DEVICE */
937#define KVM_SET_DEVICE_ATTR _IOW(KVMIO, 0xe1, struct kvm_device_attr)
938#define KVM_GET_DEVICE_ATTR _IOW(KVMIO, 0xe2, struct kvm_device_attr)
939#define KVM_HAS_DEVICE_ATTR _IOW(KVMIO, 0xe3, struct kvm_device_attr)
907 940
908/* 941/*
909 * ioctls for vcpu fds 942 * ioctls for vcpu fds