aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/vgic.c157
-rw-r--r--virt/kvm/ioapic.c46
-rw-r--r--virt/kvm/ioapic.h2
-rw-r--r--virt/kvm/kvm_main.c101
-rw-r--r--virt/kvm/vfio.c22
5 files changed, 195 insertions, 133 deletions
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index efe6eee2e7eb..eeb23b37f87c 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1522,83 +1522,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
1522 return 0; 1522 return 0;
1523} 1523}
1524 1524
1525static void vgic_init_maintenance_interrupt(void *info)
1526{
1527 enable_percpu_irq(vgic->maint_irq, 0);
1528}
1529
1530static int vgic_cpu_notify(struct notifier_block *self,
1531 unsigned long action, void *cpu)
1532{
1533 switch (action) {
1534 case CPU_STARTING:
1535 case CPU_STARTING_FROZEN:
1536 vgic_init_maintenance_interrupt(NULL);
1537 break;
1538 case CPU_DYING:
1539 case CPU_DYING_FROZEN:
1540 disable_percpu_irq(vgic->maint_irq);
1541 break;
1542 }
1543
1544 return NOTIFY_OK;
1545}
1546
1547static struct notifier_block vgic_cpu_nb = {
1548 .notifier_call = vgic_cpu_notify,
1549};
1550
1551static const struct of_device_id vgic_ids[] = {
1552 { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, },
1553 { .compatible = "arm,gic-v3", .data = vgic_v3_probe, },
1554 {},
1555};
1556
1557int kvm_vgic_hyp_init(void)
1558{
1559 const struct of_device_id *matched_id;
1560 const int (*vgic_probe)(struct device_node *,const struct vgic_ops **,
1561 const struct vgic_params **);
1562 struct device_node *vgic_node;
1563 int ret;
1564
1565 vgic_node = of_find_matching_node_and_match(NULL,
1566 vgic_ids, &matched_id);
1567 if (!vgic_node) {
1568 kvm_err("error: no compatible GIC node found\n");
1569 return -ENODEV;
1570 }
1571
1572 vgic_probe = matched_id->data;
1573 ret = vgic_probe(vgic_node, &vgic_ops, &vgic);
1574 if (ret)
1575 return ret;
1576
1577 ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler,
1578 "vgic", kvm_get_running_vcpus());
1579 if (ret) {
1580 kvm_err("Cannot register interrupt %d\n", vgic->maint_irq);
1581 return ret;
1582 }
1583
1584 ret = __register_cpu_notifier(&vgic_cpu_nb);
1585 if (ret) {
1586 kvm_err("Cannot register vgic CPU notifier\n");
1587 goto out_free_irq;
1588 }
1589
1590 /* Callback into for arch code for setup */
1591 vgic_arch_setup(vgic);
1592
1593 on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
1594
1595 return 0;
1596
1597out_free_irq:
1598 free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
1599 return ret;
1600}
1601
1602/** 1525/**
1603 * kvm_vgic_init - Initialize global VGIC state before running any VCPUs 1526 * kvm_vgic_init - Initialize global VGIC state before running any VCPUs
1604 * @kvm: pointer to the kvm struct 1527 * @kvm: pointer to the kvm struct
@@ -2062,7 +1985,7 @@ static int vgic_create(struct kvm_device *dev, u32 type)
2062 return kvm_vgic_create(dev->kvm); 1985 return kvm_vgic_create(dev->kvm);
2063} 1986}
2064 1987
2065struct kvm_device_ops kvm_arm_vgic_v2_ops = { 1988static struct kvm_device_ops kvm_arm_vgic_v2_ops = {
2066 .name = "kvm-arm-vgic", 1989 .name = "kvm-arm-vgic",
2067 .create = vgic_create, 1990 .create = vgic_create,
2068 .destroy = vgic_destroy, 1991 .destroy = vgic_destroy,
@@ -2070,3 +1993,81 @@ struct kvm_device_ops kvm_arm_vgic_v2_ops = {
2070 .get_attr = vgic_get_attr, 1993 .get_attr = vgic_get_attr,
2071 .has_attr = vgic_has_attr, 1994 .has_attr = vgic_has_attr,
2072}; 1995};
1996
1997static void vgic_init_maintenance_interrupt(void *info)
1998{
1999 enable_percpu_irq(vgic->maint_irq, 0);
2000}
2001
2002static int vgic_cpu_notify(struct notifier_block *self,
2003 unsigned long action, void *cpu)
2004{
2005 switch (action) {
2006 case CPU_STARTING:
2007 case CPU_STARTING_FROZEN:
2008 vgic_init_maintenance_interrupt(NULL);
2009 break;
2010 case CPU_DYING:
2011 case CPU_DYING_FROZEN:
2012 disable_percpu_irq(vgic->maint_irq);
2013 break;
2014 }
2015
2016 return NOTIFY_OK;
2017}
2018
2019static struct notifier_block vgic_cpu_nb = {
2020 .notifier_call = vgic_cpu_notify,
2021};
2022
2023static const struct of_device_id vgic_ids[] = {
2024 { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, },
2025 { .compatible = "arm,gic-v3", .data = vgic_v3_probe, },
2026 {},
2027};
2028
2029int kvm_vgic_hyp_init(void)
2030{
2031 const struct of_device_id *matched_id;
2032 const int (*vgic_probe)(struct device_node *,const struct vgic_ops **,
2033 const struct vgic_params **);
2034 struct device_node *vgic_node;
2035 int ret;
2036
2037 vgic_node = of_find_matching_node_and_match(NULL,
2038 vgic_ids, &matched_id);
2039 if (!vgic_node) {
2040 kvm_err("error: no compatible GIC node found\n");
2041 return -ENODEV;
2042 }
2043
2044 vgic_probe = matched_id->data;
2045 ret = vgic_probe(vgic_node, &vgic_ops, &vgic);
2046 if (ret)
2047 return ret;
2048
2049 ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler,
2050 "vgic", kvm_get_running_vcpus());
2051 if (ret) {
2052 kvm_err("Cannot register interrupt %d\n", vgic->maint_irq);
2053 return ret;
2054 }
2055
2056 ret = __register_cpu_notifier(&vgic_cpu_nb);
2057 if (ret) {
2058 kvm_err("Cannot register vgic CPU notifier\n");
2059 goto out_free_irq;
2060 }
2061
2062 /* Callback into for arch code for setup */
2063 vgic_arch_setup(vgic);
2064
2065 on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
2066
2067 return kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
2068 KVM_DEV_TYPE_ARM_VGIC_V2);
2069
2070out_free_irq:
2071 free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
2072 return ret;
2073}
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index e8ce34c9db32..0ba4057d271b 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -405,6 +405,26 @@ void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id)
405 spin_unlock(&ioapic->lock); 405 spin_unlock(&ioapic->lock);
406} 406}
407 407
408static void kvm_ioapic_eoi_inject_work(struct work_struct *work)
409{
410 int i;
411 struct kvm_ioapic *ioapic = container_of(work, struct kvm_ioapic,
412 eoi_inject.work);
413 spin_lock(&ioapic->lock);
414 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
415 union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
416
417 if (ent->fields.trig_mode != IOAPIC_LEVEL_TRIG)
418 continue;
419
420 if (ioapic->irr & (1 << i) && !ent->fields.remote_irr)
421 ioapic_service(ioapic, i, false);
422 }
423 spin_unlock(&ioapic->lock);
424}
425
426#define IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT 10000
427
408static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, 428static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
409 struct kvm_ioapic *ioapic, int vector, int trigger_mode) 429 struct kvm_ioapic *ioapic, int vector, int trigger_mode)
410{ 430{
@@ -435,8 +455,26 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
435 455
436 ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); 456 ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
437 ent->fields.remote_irr = 0; 457 ent->fields.remote_irr = 0;
438 if (ioapic->irr & (1 << i)) 458 if (!ent->fields.mask && (ioapic->irr & (1 << i))) {
439 ioapic_service(ioapic, i, false); 459 ++ioapic->irq_eoi[i];
460 if (ioapic->irq_eoi[i] == IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT) {
461 /*
462 * Real hardware does not deliver the interrupt
463 * immediately during eoi broadcast, and this
464 * lets a buggy guest make slow progress
465 * even if it does not correctly handle a
466 * level-triggered interrupt. Emulate this
467 * behavior if we detect an interrupt storm.
468 */
469 schedule_delayed_work(&ioapic->eoi_inject, HZ / 100);
470 ioapic->irq_eoi[i] = 0;
471 trace_kvm_ioapic_delayed_eoi_inj(ent->bits);
472 } else {
473 ioapic_service(ioapic, i, false);
474 }
475 } else {
476 ioapic->irq_eoi[i] = 0;
477 }
440 } 478 }
441} 479}
442 480
@@ -565,12 +603,14 @@ static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
565{ 603{
566 int i; 604 int i;
567 605
606 cancel_delayed_work_sync(&ioapic->eoi_inject);
568 for (i = 0; i < IOAPIC_NUM_PINS; i++) 607 for (i = 0; i < IOAPIC_NUM_PINS; i++)
569 ioapic->redirtbl[i].fields.mask = 1; 608 ioapic->redirtbl[i].fields.mask = 1;
570 ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS; 609 ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS;
571 ioapic->ioregsel = 0; 610 ioapic->ioregsel = 0;
572 ioapic->irr = 0; 611 ioapic->irr = 0;
573 ioapic->id = 0; 612 ioapic->id = 0;
613 memset(ioapic->irq_eoi, 0x00, IOAPIC_NUM_PINS);
574 rtc_irq_eoi_tracking_reset(ioapic); 614 rtc_irq_eoi_tracking_reset(ioapic);
575 update_handled_vectors(ioapic); 615 update_handled_vectors(ioapic);
576} 616}
@@ -589,6 +629,7 @@ int kvm_ioapic_init(struct kvm *kvm)
589 if (!ioapic) 629 if (!ioapic)
590 return -ENOMEM; 630 return -ENOMEM;
591 spin_lock_init(&ioapic->lock); 631 spin_lock_init(&ioapic->lock);
632 INIT_DELAYED_WORK(&ioapic->eoi_inject, kvm_ioapic_eoi_inject_work);
592 kvm->arch.vioapic = ioapic; 633 kvm->arch.vioapic = ioapic;
593 kvm_ioapic_reset(ioapic); 634 kvm_ioapic_reset(ioapic);
594 kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops); 635 kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
@@ -609,6 +650,7 @@ void kvm_ioapic_destroy(struct kvm *kvm)
609{ 650{
610 struct kvm_ioapic *ioapic = kvm->arch.vioapic; 651 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
611 652
653 cancel_delayed_work_sync(&ioapic->eoi_inject);
612 if (ioapic) { 654 if (ioapic) {
613 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev); 655 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
614 kvm->arch.vioapic = NULL; 656 kvm->arch.vioapic = NULL;
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
index 90d43e95dcf8..e23b70634f1e 100644
--- a/virt/kvm/ioapic.h
+++ b/virt/kvm/ioapic.h
@@ -59,6 +59,8 @@ struct kvm_ioapic {
59 spinlock_t lock; 59 spinlock_t lock;
60 DECLARE_BITMAP(handled_vectors, 256); 60 DECLARE_BITMAP(handled_vectors, 256);
61 struct rtc_status rtc_status; 61 struct rtc_status rtc_status;
62 struct delayed_work eoi_inject;
63 u32 irq_eoi[IOAPIC_NUM_PINS];
62}; 64};
63 65
64#ifdef DEBUG 66#ifdef DEBUG
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 76c92a7249c4..278232025129 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -95,8 +95,6 @@ static int hardware_enable_all(void);
95static void hardware_disable_all(void); 95static void hardware_disable_all(void);
96 96
97static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 97static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
98static void update_memslots(struct kvm_memslots *slots,
99 struct kvm_memory_slot *new, u64 last_generation);
100 98
101static void kvm_release_pfn_dirty(pfn_t pfn); 99static void kvm_release_pfn_dirty(pfn_t pfn);
102static void mark_page_dirty_in_slot(struct kvm *kvm, 100static void mark_page_dirty_in_slot(struct kvm *kvm,
@@ -477,6 +475,13 @@ static struct kvm *kvm_create_vm(unsigned long type)
477 kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 475 kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
478 if (!kvm->memslots) 476 if (!kvm->memslots)
479 goto out_err_no_srcu; 477 goto out_err_no_srcu;
478
479 /*
480 * Init kvm generation close to the maximum to easily test the
481 * code of handling generation number wrap-around.
482 */
483 kvm->memslots->generation = -150;
484
480 kvm_init_memslots_id(kvm); 485 kvm_init_memslots_id(kvm);
481 if (init_srcu_struct(&kvm->srcu)) 486 if (init_srcu_struct(&kvm->srcu))
482 goto out_err_no_srcu; 487 goto out_err_no_srcu;
@@ -688,8 +693,7 @@ static void sort_memslots(struct kvm_memslots *slots)
688} 693}
689 694
690static void update_memslots(struct kvm_memslots *slots, 695static void update_memslots(struct kvm_memslots *slots,
691 struct kvm_memory_slot *new, 696 struct kvm_memory_slot *new)
692 u64 last_generation)
693{ 697{
694 if (new) { 698 if (new) {
695 int id = new->id; 699 int id = new->id;
@@ -700,15 +704,13 @@ static void update_memslots(struct kvm_memslots *slots,
700 if (new->npages != npages) 704 if (new->npages != npages)
701 sort_memslots(slots); 705 sort_memslots(slots);
702 } 706 }
703
704 slots->generation = last_generation + 1;
705} 707}
706 708
707static int check_memory_region_flags(struct kvm_userspace_memory_region *mem) 709static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
708{ 710{
709 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; 711 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
710 712
711#ifdef KVM_CAP_READONLY_MEM 713#ifdef __KVM_HAVE_READONLY_MEM
712 valid_flags |= KVM_MEM_READONLY; 714 valid_flags |= KVM_MEM_READONLY;
713#endif 715#endif
714 716
@@ -723,10 +725,24 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
723{ 725{
724 struct kvm_memslots *old_memslots = kvm->memslots; 726 struct kvm_memslots *old_memslots = kvm->memslots;
725 727
726 update_memslots(slots, new, kvm->memslots->generation); 728 /*
729 * Set the low bit in the generation, which disables SPTE caching
730 * until the end of synchronize_srcu_expedited.
731 */
732 WARN_ON(old_memslots->generation & 1);
733 slots->generation = old_memslots->generation + 1;
734
735 update_memslots(slots, new);
727 rcu_assign_pointer(kvm->memslots, slots); 736 rcu_assign_pointer(kvm->memslots, slots);
728 synchronize_srcu_expedited(&kvm->srcu); 737 synchronize_srcu_expedited(&kvm->srcu);
729 738
739 /*
740 * Increment the new memslot generation a second time. This prevents
741 * vm exits that race with memslot updates from caching a memslot
742 * generation that will (potentially) be valid forever.
743 */
744 slots->generation++;
745
730 kvm_arch_memslots_updated(kvm); 746 kvm_arch_memslots_updated(kvm);
731 747
732 return old_memslots; 748 return old_memslots;
@@ -777,7 +793,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
777 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; 793 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
778 npages = mem->memory_size >> PAGE_SHIFT; 794 npages = mem->memory_size >> PAGE_SHIFT;
779 795
780 r = -EINVAL;
781 if (npages > KVM_MEM_MAX_NR_PAGES) 796 if (npages > KVM_MEM_MAX_NR_PAGES)
782 goto out; 797 goto out;
783 798
@@ -791,7 +806,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
791 new.npages = npages; 806 new.npages = npages;
792 new.flags = mem->flags; 807 new.flags = mem->flags;
793 808
794 r = -EINVAL;
795 if (npages) { 809 if (npages) {
796 if (!old.npages) 810 if (!old.npages)
797 change = KVM_MR_CREATE; 811 change = KVM_MR_CREATE;
@@ -847,7 +861,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
847 } 861 }
848 862
849 if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { 863 if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
850 r = -ENOMEM;
851 slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots), 864 slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
852 GFP_KERNEL); 865 GFP_KERNEL);
853 if (!slots) 866 if (!slots)
@@ -1776,8 +1789,7 @@ static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
1776 bool eligible; 1789 bool eligible;
1777 1790
1778 eligible = !vcpu->spin_loop.in_spin_loop || 1791 eligible = !vcpu->spin_loop.in_spin_loop ||
1779 (vcpu->spin_loop.in_spin_loop && 1792 vcpu->spin_loop.dy_eligible;
1780 vcpu->spin_loop.dy_eligible);
1781 1793
1782 if (vcpu->spin_loop.in_spin_loop) 1794 if (vcpu->spin_loop.in_spin_loop)
1783 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); 1795 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
@@ -2267,6 +2279,29 @@ struct kvm_device *kvm_device_from_filp(struct file *filp)
2267 return filp->private_data; 2279 return filp->private_data;
2268} 2280}
2269 2281
2282static struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
2283#ifdef CONFIG_KVM_MPIC
2284 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops,
2285 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops,
2286#endif
2287
2288#ifdef CONFIG_KVM_XICS
2289 [KVM_DEV_TYPE_XICS] = &kvm_xics_ops,
2290#endif
2291};
2292
2293int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type)
2294{
2295 if (type >= ARRAY_SIZE(kvm_device_ops_table))
2296 return -ENOSPC;
2297
2298 if (kvm_device_ops_table[type] != NULL)
2299 return -EEXIST;
2300
2301 kvm_device_ops_table[type] = ops;
2302 return 0;
2303}
2304
2270static int kvm_ioctl_create_device(struct kvm *kvm, 2305static int kvm_ioctl_create_device(struct kvm *kvm,
2271 struct kvm_create_device *cd) 2306 struct kvm_create_device *cd)
2272{ 2307{
@@ -2275,36 +2310,12 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
2275 bool test = cd->flags & KVM_CREATE_DEVICE_TEST; 2310 bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
2276 int ret; 2311 int ret;
2277 2312
2278 switch (cd->type) { 2313 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
2279#ifdef CONFIG_KVM_MPIC 2314 return -ENODEV;
2280 case KVM_DEV_TYPE_FSL_MPIC_20: 2315
2281 case KVM_DEV_TYPE_FSL_MPIC_42: 2316 ops = kvm_device_ops_table[cd->type];
2282 ops = &kvm_mpic_ops; 2317 if (ops == NULL)
2283 break;
2284#endif
2285#ifdef CONFIG_KVM_XICS
2286 case KVM_DEV_TYPE_XICS:
2287 ops = &kvm_xics_ops;
2288 break;
2289#endif
2290#ifdef CONFIG_KVM_VFIO
2291 case KVM_DEV_TYPE_VFIO:
2292 ops = &kvm_vfio_ops;
2293 break;
2294#endif
2295#ifdef CONFIG_KVM_ARM_VGIC
2296 case KVM_DEV_TYPE_ARM_VGIC_V2:
2297 ops = &kvm_arm_vgic_v2_ops;
2298 break;
2299#endif
2300#ifdef CONFIG_S390
2301 case KVM_DEV_TYPE_FLIC:
2302 ops = &kvm_flic_ops;
2303 break;
2304#endif
2305 default:
2306 return -ENODEV; 2318 return -ENODEV;
2307 }
2308 2319
2309 if (test) 2320 if (test)
2310 return 0; 2321 return 0;
@@ -2619,7 +2630,6 @@ static long kvm_dev_ioctl(struct file *filp,
2619 2630
2620 switch (ioctl) { 2631 switch (ioctl) {
2621 case KVM_GET_API_VERSION: 2632 case KVM_GET_API_VERSION:
2622 r = -EINVAL;
2623 if (arg) 2633 if (arg)
2624 goto out; 2634 goto out;
2625 r = KVM_API_VERSION; 2635 r = KVM_API_VERSION;
@@ -2631,7 +2641,6 @@ static long kvm_dev_ioctl(struct file *filp,
2631 r = kvm_vm_ioctl_check_extension_generic(NULL, arg); 2641 r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
2632 break; 2642 break;
2633 case KVM_GET_VCPU_MMAP_SIZE: 2643 case KVM_GET_VCPU_MMAP_SIZE:
2634 r = -EINVAL;
2635 if (arg) 2644 if (arg)
2636 goto out; 2645 goto out;
2637 r = PAGE_SIZE; /* struct kvm_run */ 2646 r = PAGE_SIZE; /* struct kvm_run */
@@ -2676,7 +2685,7 @@ static void hardware_enable_nolock(void *junk)
2676 2685
2677 cpumask_set_cpu(cpu, cpus_hardware_enabled); 2686 cpumask_set_cpu(cpu, cpus_hardware_enabled);
2678 2687
2679 r = kvm_arch_hardware_enable(NULL); 2688 r = kvm_arch_hardware_enable();
2680 2689
2681 if (r) { 2690 if (r) {
2682 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 2691 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
@@ -2701,7 +2710,7 @@ static void hardware_disable_nolock(void *junk)
2701 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) 2710 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
2702 return; 2711 return;
2703 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 2712 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
2704 kvm_arch_hardware_disable(NULL); 2713 kvm_arch_hardware_disable();
2705} 2714}
2706 2715
2707static void hardware_disable(void) 2716static void hardware_disable(void)
diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
index ba1a93f935c7..bb11b36ee8a2 100644
--- a/virt/kvm/vfio.c
+++ b/virt/kvm/vfio.c
@@ -246,6 +246,16 @@ static void kvm_vfio_destroy(struct kvm_device *dev)
246 kfree(dev); /* alloc by kvm_ioctl_create_device, free by .destroy */ 246 kfree(dev); /* alloc by kvm_ioctl_create_device, free by .destroy */
247} 247}
248 248
249static int kvm_vfio_create(struct kvm_device *dev, u32 type);
250
251static struct kvm_device_ops kvm_vfio_ops = {
252 .name = "kvm-vfio",
253 .create = kvm_vfio_create,
254 .destroy = kvm_vfio_destroy,
255 .set_attr = kvm_vfio_set_attr,
256 .has_attr = kvm_vfio_has_attr,
257};
258
249static int kvm_vfio_create(struct kvm_device *dev, u32 type) 259static int kvm_vfio_create(struct kvm_device *dev, u32 type)
250{ 260{
251 struct kvm_device *tmp; 261 struct kvm_device *tmp;
@@ -268,10 +278,8 @@ static int kvm_vfio_create(struct kvm_device *dev, u32 type)
268 return 0; 278 return 0;
269} 279}
270 280
271struct kvm_device_ops kvm_vfio_ops = { 281static int __init kvm_vfio_ops_init(void)
272 .name = "kvm-vfio", 282{
273 .create = kvm_vfio_create, 283 return kvm_register_device_ops(&kvm_vfio_ops, KVM_DEV_TYPE_VFIO);
274 .destroy = kvm_vfio_destroy, 284}
275 .set_attr = kvm_vfio_set_attr, 285module_init(kvm_vfio_ops_init);
276 .has_attr = kvm_vfio_has_attr,
277};