diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-05 17:47:31 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-05 17:47:31 -0400 |
commit | 01227a889ed56ae53aeebb9f93be9d54dd8b2de8 (patch) | |
tree | d5eba9359a9827e84d4112b84d48c54df5c5acde /include/linux/kvm_host.h | |
parent | 9e6879460c8edb0cd3c24c09b83d06541b5af0dc (diff) | |
parent | db6ae6158186a17165ef990bda2895ae7594b039 (diff) |
Merge tag 'kvm-3.10-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm updates from Gleb Natapov:
"Highlights of the updates are:
general:
- new emulated device API
- legacy device assignment is now optional
- irqfd interface is more generic and can be shared between arches
x86:
- VMCS shadow support and other nested VMX improvements
- APIC virtualization and Posted Interrupt hardware support
- Optimize mmio spte zapping
ppc:
- BookE: in-kernel MPIC emulation with irqfd support
- Book3S: in-kernel XICS emulation (incomplete)
- Book3S: HV: migration fixes
- BookE: more debug support preparation
- BookE: e6500 support
ARM:
- reworking of Hyp idmaps
s390:
- ioeventfd for virtio-ccw
And many other bug fixes, cleanups and improvements"
* tag 'kvm-3.10-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (204 commits)
kvm: Add compat_ioctl for device control API
KVM: x86: Account for failing enable_irq_window for NMI window request
KVM: PPC: Book3S: Add API for in-kernel XICS emulation
kvm/ppc/mpic: fix missing unlock in set_base_addr()
kvm/ppc: Hold srcu lock when calling kvm_io_bus_read/write
kvm/ppc/mpic: remove users
kvm/ppc/mpic: fix mmio region lists when multiple guests used
kvm/ppc/mpic: remove default routes from documentation
kvm: KVM_CAP_IOMMU only available with device assignment
ARM: KVM: iterate over all CPUs for CPU compatibility check
KVM: ARM: Fix spelling in error message
ARM: KVM: define KVM_ARM_MAX_VCPUS unconditionally
KVM: ARM: Fix API documentation for ONE_REG encoding
ARM: KVM: promote vfp_host pointer to generic host cpu context
ARM: KVM: add architecture specific hook for capabilities
ARM: KVM: perform HYP initilization for hotplugged CPUs
ARM: KVM: switch to a dual-step HYP init code
ARM: KVM: rework HYP page table freeing
ARM: KVM: enforce maximum size for identity mapped code
ARM: KVM: move to a KVM provided HYP idmap
...
Diffstat (limited to 'include/linux/kvm_host.h')
-rw-r--r-- | include/linux/kvm_host.h | 166 |
1 files changed, 112 insertions, 54 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index c13958251927..f0eea07d2c2b 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -117,14 +117,13 @@ static inline bool is_error_page(struct page *page) | |||
117 | #define KVM_REQ_APF_HALT 12 | 117 | #define KVM_REQ_APF_HALT 12 |
118 | #define KVM_REQ_STEAL_UPDATE 13 | 118 | #define KVM_REQ_STEAL_UPDATE 13 |
119 | #define KVM_REQ_NMI 14 | 119 | #define KVM_REQ_NMI 14 |
120 | #define KVM_REQ_IMMEDIATE_EXIT 15 | 120 | #define KVM_REQ_PMU 15 |
121 | #define KVM_REQ_PMU 16 | 121 | #define KVM_REQ_PMI 16 |
122 | #define KVM_REQ_PMI 17 | 122 | #define KVM_REQ_WATCHDOG 17 |
123 | #define KVM_REQ_WATCHDOG 18 | 123 | #define KVM_REQ_MASTERCLOCK_UPDATE 18 |
124 | #define KVM_REQ_MASTERCLOCK_UPDATE 19 | 124 | #define KVM_REQ_MCLOCK_INPROGRESS 19 |
125 | #define KVM_REQ_MCLOCK_INPROGRESS 20 | 125 | #define KVM_REQ_EPR_EXIT 20 |
126 | #define KVM_REQ_EPR_EXIT 21 | 126 | #define KVM_REQ_SCAN_IOAPIC 21 |
127 | #define KVM_REQ_EOIBITMAP 22 | ||
128 | 127 | ||
129 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 | 128 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 |
130 | #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 | 129 | #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 |
@@ -133,6 +132,9 @@ struct kvm; | |||
133 | struct kvm_vcpu; | 132 | struct kvm_vcpu; |
134 | extern struct kmem_cache *kvm_vcpu_cache; | 133 | extern struct kmem_cache *kvm_vcpu_cache; |
135 | 134 | ||
135 | extern raw_spinlock_t kvm_lock; | ||
136 | extern struct list_head vm_list; | ||
137 | |||
136 | struct kvm_io_range { | 138 | struct kvm_io_range { |
137 | gpa_t addr; | 139 | gpa_t addr; |
138 | int len; | 140 | int len; |
@@ -149,6 +151,7 @@ struct kvm_io_bus { | |||
149 | enum kvm_bus { | 151 | enum kvm_bus { |
150 | KVM_MMIO_BUS, | 152 | KVM_MMIO_BUS, |
151 | KVM_PIO_BUS, | 153 | KVM_PIO_BUS, |
154 | KVM_VIRTIO_CCW_NOTIFY_BUS, | ||
152 | KVM_NR_BUSES | 155 | KVM_NR_BUSES |
153 | }; | 156 | }; |
154 | 157 | ||
@@ -252,6 +255,7 @@ struct kvm_vcpu { | |||
252 | bool dy_eligible; | 255 | bool dy_eligible; |
253 | } spin_loop; | 256 | } spin_loop; |
254 | #endif | 257 | #endif |
258 | bool preempted; | ||
255 | struct kvm_vcpu_arch arch; | 259 | struct kvm_vcpu_arch arch; |
256 | }; | 260 | }; |
257 | 261 | ||
@@ -285,7 +289,8 @@ struct kvm_kernel_irq_routing_entry { | |||
285 | u32 gsi; | 289 | u32 gsi; |
286 | u32 type; | 290 | u32 type; |
287 | int (*set)(struct kvm_kernel_irq_routing_entry *e, | 291 | int (*set)(struct kvm_kernel_irq_routing_entry *e, |
288 | struct kvm *kvm, int irq_source_id, int level); | 292 | struct kvm *kvm, int irq_source_id, int level, |
293 | bool line_status); | ||
289 | union { | 294 | union { |
290 | struct { | 295 | struct { |
291 | unsigned irqchip; | 296 | unsigned irqchip; |
@@ -296,10 +301,10 @@ struct kvm_kernel_irq_routing_entry { | |||
296 | struct hlist_node link; | 301 | struct hlist_node link; |
297 | }; | 302 | }; |
298 | 303 | ||
299 | #ifdef __KVM_HAVE_IOAPIC | 304 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
300 | 305 | ||
301 | struct kvm_irq_routing_table { | 306 | struct kvm_irq_routing_table { |
302 | int chip[KVM_NR_IRQCHIPS][KVM_IOAPIC_NUM_PINS]; | 307 | int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS]; |
303 | struct kvm_kernel_irq_routing_entry *rt_entries; | 308 | struct kvm_kernel_irq_routing_entry *rt_entries; |
304 | u32 nr_rt_entries; | 309 | u32 nr_rt_entries; |
305 | /* | 310 | /* |
@@ -385,6 +390,7 @@ struct kvm { | |||
385 | long mmu_notifier_count; | 390 | long mmu_notifier_count; |
386 | #endif | 391 | #endif |
387 | long tlbs_dirty; | 392 | long tlbs_dirty; |
393 | struct list_head devices; | ||
388 | }; | 394 | }; |
389 | 395 | ||
390 | #define kvm_err(fmt, ...) \ | 396 | #define kvm_err(fmt, ...) \ |
@@ -424,6 +430,19 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); | |||
424 | int __must_check vcpu_load(struct kvm_vcpu *vcpu); | 430 | int __must_check vcpu_load(struct kvm_vcpu *vcpu); |
425 | void vcpu_put(struct kvm_vcpu *vcpu); | 431 | void vcpu_put(struct kvm_vcpu *vcpu); |
426 | 432 | ||
433 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING | ||
434 | int kvm_irqfd_init(void); | ||
435 | void kvm_irqfd_exit(void); | ||
436 | #else | ||
437 | static inline int kvm_irqfd_init(void) | ||
438 | { | ||
439 | return 0; | ||
440 | } | ||
441 | |||
442 | static inline void kvm_irqfd_exit(void) | ||
443 | { | ||
444 | } | ||
445 | #endif | ||
427 | int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, | 446 | int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, |
428 | struct module *module); | 447 | struct module *module); |
429 | void kvm_exit(void); | 448 | void kvm_exit(void); |
@@ -452,24 +471,39 @@ id_to_memslot(struct kvm_memslots *slots, int id) | |||
452 | return slot; | 471 | return slot; |
453 | } | 472 | } |
454 | 473 | ||
474 | /* | ||
475 | * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: | ||
476 | * - create a new memory slot | ||
477 | * - delete an existing memory slot | ||
478 | * - modify an existing memory slot | ||
479 | * -- move it in the guest physical memory space | ||
480 | * -- just change its flags | ||
481 | * | ||
482 | * Since flags can be changed by some of these operations, the following | ||
483 | * differentiation is the best we can do for __kvm_set_memory_region(): | ||
484 | */ | ||
485 | enum kvm_mr_change { | ||
486 | KVM_MR_CREATE, | ||
487 | KVM_MR_DELETE, | ||
488 | KVM_MR_MOVE, | ||
489 | KVM_MR_FLAGS_ONLY, | ||
490 | }; | ||
491 | |||
455 | int kvm_set_memory_region(struct kvm *kvm, | 492 | int kvm_set_memory_region(struct kvm *kvm, |
456 | struct kvm_userspace_memory_region *mem, | 493 | struct kvm_userspace_memory_region *mem); |
457 | bool user_alloc); | ||
458 | int __kvm_set_memory_region(struct kvm *kvm, | 494 | int __kvm_set_memory_region(struct kvm *kvm, |
459 | struct kvm_userspace_memory_region *mem, | 495 | struct kvm_userspace_memory_region *mem); |
460 | bool user_alloc); | ||
461 | void kvm_arch_free_memslot(struct kvm_memory_slot *free, | 496 | void kvm_arch_free_memslot(struct kvm_memory_slot *free, |
462 | struct kvm_memory_slot *dont); | 497 | struct kvm_memory_slot *dont); |
463 | int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages); | 498 | int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages); |
464 | int kvm_arch_prepare_memory_region(struct kvm *kvm, | 499 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
465 | struct kvm_memory_slot *memslot, | 500 | struct kvm_memory_slot *memslot, |
466 | struct kvm_memory_slot old, | ||
467 | struct kvm_userspace_memory_region *mem, | 501 | struct kvm_userspace_memory_region *mem, |
468 | bool user_alloc); | 502 | enum kvm_mr_change change); |
469 | void kvm_arch_commit_memory_region(struct kvm *kvm, | 503 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
470 | struct kvm_userspace_memory_region *mem, | 504 | struct kvm_userspace_memory_region *mem, |
471 | struct kvm_memory_slot old, | 505 | const struct kvm_memory_slot *old, |
472 | bool user_alloc); | 506 | enum kvm_mr_change change); |
473 | bool kvm_largepages_enabled(void); | 507 | bool kvm_largepages_enabled(void); |
474 | void kvm_disable_largepages(void); | 508 | void kvm_disable_largepages(void); |
475 | /* flush all memory translations */ | 509 | /* flush all memory translations */ |
@@ -539,7 +573,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); | |||
539 | void kvm_flush_remote_tlbs(struct kvm *kvm); | 573 | void kvm_flush_remote_tlbs(struct kvm *kvm); |
540 | void kvm_reload_remote_mmus(struct kvm *kvm); | 574 | void kvm_reload_remote_mmus(struct kvm *kvm); |
541 | void kvm_make_mclock_inprogress_request(struct kvm *kvm); | 575 | void kvm_make_mclock_inprogress_request(struct kvm *kvm); |
542 | void kvm_make_update_eoibitmap_request(struct kvm *kvm); | 576 | void kvm_make_scan_ioapic_request(struct kvm *kvm); |
543 | 577 | ||
544 | long kvm_arch_dev_ioctl(struct file *filp, | 578 | long kvm_arch_dev_ioctl(struct file *filp, |
545 | unsigned int ioctl, unsigned long arg); | 579 | unsigned int ioctl, unsigned long arg); |
@@ -555,10 +589,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
555 | struct kvm_dirty_log *log); | 589 | struct kvm_dirty_log *log); |
556 | 590 | ||
557 | int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, | 591 | int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, |
558 | struct | 592 | struct kvm_userspace_memory_region *mem); |
559 | kvm_userspace_memory_region *mem, | 593 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, |
560 | bool user_alloc); | 594 | bool line_status); |
561 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level); | ||
562 | long kvm_arch_vm_ioctl(struct file *filp, | 595 | long kvm_arch_vm_ioctl(struct file *filp, |
563 | unsigned int ioctl, unsigned long arg); | 596 | unsigned int ioctl, unsigned long arg); |
564 | 597 | ||
@@ -632,7 +665,6 @@ static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) | |||
632 | 665 | ||
633 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); | 666 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); |
634 | void kvm_arch_destroy_vm(struct kvm *kvm); | 667 | void kvm_arch_destroy_vm(struct kvm *kvm); |
635 | void kvm_free_all_assigned_devices(struct kvm *kvm); | ||
636 | void kvm_arch_sync_events(struct kvm *kvm); | 668 | void kvm_arch_sync_events(struct kvm *kvm); |
637 | 669 | ||
638 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); | 670 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); |
@@ -684,15 +716,11 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, | |||
684 | void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, | 716 | void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, |
685 | bool mask); | 717 | bool mask); |
686 | 718 | ||
687 | #ifdef __KVM_HAVE_IOAPIC | 719 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, |
688 | void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic, | 720 | bool line_status); |
689 | union kvm_ioapic_redirect_entry *entry, | ||
690 | unsigned long *deliver_bitmask); | ||
691 | #endif | ||
692 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level); | ||
693 | int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level); | 721 | int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level); |
694 | int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, | 722 | int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, |
695 | int irq_source_id, int level); | 723 | int irq_source_id, int level, bool line_status); |
696 | bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); | 724 | bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); |
697 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); | 725 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); |
698 | void kvm_register_irq_ack_notifier(struct kvm *kvm, | 726 | void kvm_register_irq_ack_notifier(struct kvm *kvm, |
@@ -705,7 +733,7 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); | |||
705 | /* For vcpu->arch.iommu_flags */ | 733 | /* For vcpu->arch.iommu_flags */ |
706 | #define KVM_IOMMU_CACHE_COHERENCY 0x1 | 734 | #define KVM_IOMMU_CACHE_COHERENCY 0x1 |
707 | 735 | ||
708 | #ifdef CONFIG_IOMMU_API | 736 | #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT |
709 | int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot); | 737 | int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot); |
710 | void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot); | 738 | void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot); |
711 | int kvm_iommu_map_guest(struct kvm *kvm); | 739 | int kvm_iommu_map_guest(struct kvm *kvm); |
@@ -714,7 +742,7 @@ int kvm_assign_device(struct kvm *kvm, | |||
714 | struct kvm_assigned_dev_kernel *assigned_dev); | 742 | struct kvm_assigned_dev_kernel *assigned_dev); |
715 | int kvm_deassign_device(struct kvm *kvm, | 743 | int kvm_deassign_device(struct kvm *kvm, |
716 | struct kvm_assigned_dev_kernel *assigned_dev); | 744 | struct kvm_assigned_dev_kernel *assigned_dev); |
717 | #else /* CONFIG_IOMMU_API */ | 745 | #else |
718 | static inline int kvm_iommu_map_pages(struct kvm *kvm, | 746 | static inline int kvm_iommu_map_pages(struct kvm *kvm, |
719 | struct kvm_memory_slot *slot) | 747 | struct kvm_memory_slot *slot) |
720 | { | 748 | { |
@@ -726,28 +754,11 @@ static inline void kvm_iommu_unmap_pages(struct kvm *kvm, | |||
726 | { | 754 | { |
727 | } | 755 | } |
728 | 756 | ||
729 | static inline int kvm_iommu_map_guest(struct kvm *kvm) | ||
730 | { | ||
731 | return -ENODEV; | ||
732 | } | ||
733 | |||
734 | static inline int kvm_iommu_unmap_guest(struct kvm *kvm) | 757 | static inline int kvm_iommu_unmap_guest(struct kvm *kvm) |
735 | { | 758 | { |
736 | return 0; | 759 | return 0; |
737 | } | 760 | } |
738 | 761 | #endif | |
739 | static inline int kvm_assign_device(struct kvm *kvm, | ||
740 | struct kvm_assigned_dev_kernel *assigned_dev) | ||
741 | { | ||
742 | return 0; | ||
743 | } | ||
744 | |||
745 | static inline int kvm_deassign_device(struct kvm *kvm, | ||
746 | struct kvm_assigned_dev_kernel *assigned_dev) | ||
747 | { | ||
748 | return 0; | ||
749 | } | ||
750 | #endif /* CONFIG_IOMMU_API */ | ||
751 | 762 | ||
752 | static inline void __guest_enter(void) | 763 | static inline void __guest_enter(void) |
753 | { | 764 | { |
@@ -921,7 +932,7 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) | |||
921 | } | 932 | } |
922 | #endif | 933 | #endif |
923 | 934 | ||
924 | #ifdef KVM_CAP_IRQ_ROUTING | 935 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
925 | 936 | ||
926 | #define KVM_MAX_IRQ_ROUTES 1024 | 937 | #define KVM_MAX_IRQ_ROUTES 1024 |
927 | 938 | ||
@@ -930,6 +941,9 @@ int kvm_set_irq_routing(struct kvm *kvm, | |||
930 | const struct kvm_irq_routing_entry *entries, | 941 | const struct kvm_irq_routing_entry *entries, |
931 | unsigned nr, | 942 | unsigned nr, |
932 | unsigned flags); | 943 | unsigned flags); |
944 | int kvm_set_routing_entry(struct kvm_irq_routing_table *rt, | ||
945 | struct kvm_kernel_irq_routing_entry *e, | ||
946 | const struct kvm_irq_routing_entry *ue); | ||
933 | void kvm_free_irq_routing(struct kvm *kvm); | 947 | void kvm_free_irq_routing(struct kvm *kvm); |
934 | 948 | ||
935 | int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); | 949 | int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); |
@@ -998,11 +1012,13 @@ static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; } | |||
998 | 1012 | ||
999 | #endif | 1013 | #endif |
1000 | 1014 | ||
1001 | #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT | 1015 | #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT |
1002 | 1016 | ||
1003 | long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, | 1017 | long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, |
1004 | unsigned long arg); | 1018 | unsigned long arg); |
1005 | 1019 | ||
1020 | void kvm_free_all_assigned_devices(struct kvm *kvm); | ||
1021 | |||
1006 | #else | 1022 | #else |
1007 | 1023 | ||
1008 | static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, | 1024 | static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, |
@@ -1011,6 +1027,8 @@ static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, | |||
1011 | return -ENOTTY; | 1027 | return -ENOTTY; |
1012 | } | 1028 | } |
1013 | 1029 | ||
1030 | static inline void kvm_free_all_assigned_devices(struct kvm *kvm) {} | ||
1031 | |||
1014 | #endif | 1032 | #endif |
1015 | 1033 | ||
1016 | static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) | 1034 | static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) |
@@ -1028,6 +1046,46 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) | |||
1028 | } | 1046 | } |
1029 | } | 1047 | } |
1030 | 1048 | ||
1049 | extern bool kvm_rebooting; | ||
1050 | |||
1051 | struct kvm_device_ops; | ||
1052 | |||
1053 | struct kvm_device { | ||
1054 | struct kvm_device_ops *ops; | ||
1055 | struct kvm *kvm; | ||
1056 | void *private; | ||
1057 | struct list_head vm_node; | ||
1058 | }; | ||
1059 | |||
1060 | /* create, destroy, and name are mandatory */ | ||
1061 | struct kvm_device_ops { | ||
1062 | const char *name; | ||
1063 | int (*create)(struct kvm_device *dev, u32 type); | ||
1064 | |||
1065 | /* | ||
1066 | * Destroy is responsible for freeing dev. | ||
1067 | * | ||
1068 | * Destroy may be called before or after destructors are called | ||
1069 | * on emulated I/O regions, depending on whether a reference is | ||
1070 | * held by a vcpu or other kvm component that gets destroyed | ||
1071 | * after the emulated I/O. | ||
1072 | */ | ||
1073 | void (*destroy)(struct kvm_device *dev); | ||
1074 | |||
1075 | int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); | ||
1076 | int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); | ||
1077 | int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); | ||
1078 | long (*ioctl)(struct kvm_device *dev, unsigned int ioctl, | ||
1079 | unsigned long arg); | ||
1080 | }; | ||
1081 | |||
1082 | void kvm_device_get(struct kvm_device *dev); | ||
1083 | void kvm_device_put(struct kvm_device *dev); | ||
1084 | struct kvm_device *kvm_device_from_filp(struct file *filp); | ||
1085 | |||
1086 | extern struct kvm_device_ops kvm_mpic_ops; | ||
1087 | extern struct kvm_device_ops kvm_xics_ops; | ||
1088 | |||
1031 | #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT | 1089 | #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT |
1032 | 1090 | ||
1033 | static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) | 1091 | static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) |