diff options
author | Marcelo Tosatti <mtosatti@redhat.com> | 2009-12-23 11:35:24 -0500 |
---|---|---|
committer | Marcelo Tosatti <mtosatti@redhat.com> | 2010-03-01 10:35:45 -0500 |
commit | e93f8a0f821e290ac5149830110a5f704db7a1fc (patch) | |
tree | f88a5e97286031a8105d63a069a4131b74dab113 /virt | |
parent | a983fb238728e1123177e8058d4f644b949a7d05 (diff) |
KVM: convert io_bus to SRCU
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/coalesced_mmio.c | 4 | ||||
-rw-r--r-- | virt/kvm/eventfd.c | 8 | ||||
-rw-r--r-- | virt/kvm/ioapic.c | 4 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 106 |
4 files changed, 73 insertions, 49 deletions
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c index d68e6c68e0ff..a736a93ca7b7 100644 --- a/virt/kvm/coalesced_mmio.c +++ b/virt/kvm/coalesced_mmio.c | |||
@@ -110,7 +110,9 @@ int kvm_coalesced_mmio_init(struct kvm *kvm) | |||
110 | dev->kvm = kvm; | 110 | dev->kvm = kvm; |
111 | kvm->coalesced_mmio_dev = dev; | 111 | kvm->coalesced_mmio_dev = dev; |
112 | 112 | ||
113 | ret = kvm_io_bus_register_dev(kvm, &kvm->mmio_bus, &dev->dev); | 113 | down_write(&kvm->slots_lock); |
114 | ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev); | ||
115 | up_write(&kvm->slots_lock); | ||
114 | if (ret < 0) | 116 | if (ret < 0) |
115 | goto out_free_dev; | 117 | goto out_free_dev; |
116 | 118 | ||
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index a9d3fc6c681c..315a586ec4d5 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c | |||
@@ -463,7 +463,7 @@ static int | |||
463 | kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | 463 | kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) |
464 | { | 464 | { |
465 | int pio = args->flags & KVM_IOEVENTFD_FLAG_PIO; | 465 | int pio = args->flags & KVM_IOEVENTFD_FLAG_PIO; |
466 | struct kvm_io_bus *bus = pio ? &kvm->pio_bus : &kvm->mmio_bus; | 466 | enum kvm_bus bus_idx = pio ? KVM_PIO_BUS : KVM_MMIO_BUS; |
467 | struct _ioeventfd *p; | 467 | struct _ioeventfd *p; |
468 | struct eventfd_ctx *eventfd; | 468 | struct eventfd_ctx *eventfd; |
469 | int ret; | 469 | int ret; |
@@ -518,7 +518,7 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | |||
518 | 518 | ||
519 | kvm_iodevice_init(&p->dev, &ioeventfd_ops); | 519 | kvm_iodevice_init(&p->dev, &ioeventfd_ops); |
520 | 520 | ||
521 | ret = __kvm_io_bus_register_dev(bus, &p->dev); | 521 | ret = kvm_io_bus_register_dev(kvm, bus_idx, &p->dev); |
522 | if (ret < 0) | 522 | if (ret < 0) |
523 | goto unlock_fail; | 523 | goto unlock_fail; |
524 | 524 | ||
@@ -542,7 +542,7 @@ static int | |||
542 | kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | 542 | kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) |
543 | { | 543 | { |
544 | int pio = args->flags & KVM_IOEVENTFD_FLAG_PIO; | 544 | int pio = args->flags & KVM_IOEVENTFD_FLAG_PIO; |
545 | struct kvm_io_bus *bus = pio ? &kvm->pio_bus : &kvm->mmio_bus; | 545 | enum kvm_bus bus_idx = pio ? KVM_PIO_BUS : KVM_MMIO_BUS; |
546 | struct _ioeventfd *p, *tmp; | 546 | struct _ioeventfd *p, *tmp; |
547 | struct eventfd_ctx *eventfd; | 547 | struct eventfd_ctx *eventfd; |
548 | int ret = -ENOENT; | 548 | int ret = -ENOENT; |
@@ -565,7 +565,7 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | |||
565 | if (!p->wildcard && p->datamatch != args->datamatch) | 565 | if (!p->wildcard && p->datamatch != args->datamatch) |
566 | continue; | 566 | continue; |
567 | 567 | ||
568 | __kvm_io_bus_unregister_dev(bus, &p->dev); | 568 | kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); |
569 | ioeventfd_release(p); | 569 | ioeventfd_release(p); |
570 | ret = 0; | 570 | ret = 0; |
571 | break; | 571 | break; |
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index 38a2d20b89de..f326a6f301cc 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c | |||
@@ -372,7 +372,9 @@ int kvm_ioapic_init(struct kvm *kvm) | |||
372 | kvm_ioapic_reset(ioapic); | 372 | kvm_ioapic_reset(ioapic); |
373 | kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops); | 373 | kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops); |
374 | ioapic->kvm = kvm; | 374 | ioapic->kvm = kvm; |
375 | ret = kvm_io_bus_register_dev(kvm, &kvm->mmio_bus, &ioapic->dev); | 375 | down_write(&kvm->slots_lock); |
376 | ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &ioapic->dev); | ||
377 | up_write(&kvm->slots_lock); | ||
376 | if (ret < 0) | 378 | if (ret < 0) |
377 | kfree(ioapic); | 379 | kfree(ioapic); |
378 | 380 | ||
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index c680f7b64c6f..659bc12ad16a 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -85,6 +85,8 @@ static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, | |||
85 | static int hardware_enable_all(void); | 85 | static int hardware_enable_all(void); |
86 | static void hardware_disable_all(void); | 86 | static void hardware_disable_all(void); |
87 | 87 | ||
88 | static void kvm_io_bus_destroy(struct kvm_io_bus *bus); | ||
89 | |||
88 | static bool kvm_rebooting; | 90 | static bool kvm_rebooting; |
89 | 91 | ||
90 | static bool largepages_enabled = true; | 92 | static bool largepages_enabled = true; |
@@ -367,7 +369,7 @@ static int kvm_init_mmu_notifier(struct kvm *kvm) | |||
367 | 369 | ||
368 | static struct kvm *kvm_create_vm(void) | 370 | static struct kvm *kvm_create_vm(void) |
369 | { | 371 | { |
370 | int r = 0; | 372 | int r = 0, i; |
371 | struct kvm *kvm = kvm_arch_create_vm(); | 373 | struct kvm *kvm = kvm_arch_create_vm(); |
372 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | 374 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET |
373 | struct page *page; | 375 | struct page *page; |
@@ -391,6 +393,14 @@ static struct kvm *kvm_create_vm(void) | |||
391 | goto out_err; | 393 | goto out_err; |
392 | if (init_srcu_struct(&kvm->srcu)) | 394 | if (init_srcu_struct(&kvm->srcu)) |
393 | goto out_err; | 395 | goto out_err; |
396 | for (i = 0; i < KVM_NR_BUSES; i++) { | ||
397 | kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus), | ||
398 | GFP_KERNEL); | ||
399 | if (!kvm->buses[i]) { | ||
400 | cleanup_srcu_struct(&kvm->srcu); | ||
401 | goto out_err; | ||
402 | } | ||
403 | } | ||
394 | 404 | ||
395 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | 405 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET |
396 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); | 406 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); |
@@ -416,11 +426,9 @@ static struct kvm *kvm_create_vm(void) | |||
416 | atomic_inc(&kvm->mm->mm_count); | 426 | atomic_inc(&kvm->mm->mm_count); |
417 | spin_lock_init(&kvm->mmu_lock); | 427 | spin_lock_init(&kvm->mmu_lock); |
418 | spin_lock_init(&kvm->requests_lock); | 428 | spin_lock_init(&kvm->requests_lock); |
419 | kvm_io_bus_init(&kvm->pio_bus); | ||
420 | kvm_eventfd_init(kvm); | 429 | kvm_eventfd_init(kvm); |
421 | mutex_init(&kvm->lock); | 430 | mutex_init(&kvm->lock); |
422 | mutex_init(&kvm->irq_lock); | 431 | mutex_init(&kvm->irq_lock); |
423 | kvm_io_bus_init(&kvm->mmio_bus); | ||
424 | init_rwsem(&kvm->slots_lock); | 432 | init_rwsem(&kvm->slots_lock); |
425 | atomic_set(&kvm->users_count, 1); | 433 | atomic_set(&kvm->users_count, 1); |
426 | spin_lock(&kvm_lock); | 434 | spin_lock(&kvm_lock); |
@@ -435,6 +443,8 @@ out: | |||
435 | out_err: | 443 | out_err: |
436 | hardware_disable_all(); | 444 | hardware_disable_all(); |
437 | out_err_nodisable: | 445 | out_err_nodisable: |
446 | for (i = 0; i < KVM_NR_BUSES; i++) | ||
447 | kfree(kvm->buses[i]); | ||
438 | kfree(kvm->memslots); | 448 | kfree(kvm->memslots); |
439 | kfree(kvm); | 449 | kfree(kvm); |
440 | return ERR_PTR(r); | 450 | return ERR_PTR(r); |
@@ -480,6 +490,7 @@ void kvm_free_physmem(struct kvm *kvm) | |||
480 | 490 | ||
481 | static void kvm_destroy_vm(struct kvm *kvm) | 491 | static void kvm_destroy_vm(struct kvm *kvm) |
482 | { | 492 | { |
493 | int i; | ||
483 | struct mm_struct *mm = kvm->mm; | 494 | struct mm_struct *mm = kvm->mm; |
484 | 495 | ||
485 | kvm_arch_sync_events(kvm); | 496 | kvm_arch_sync_events(kvm); |
@@ -487,8 +498,8 @@ static void kvm_destroy_vm(struct kvm *kvm) | |||
487 | list_del(&kvm->vm_list); | 498 | list_del(&kvm->vm_list); |
488 | spin_unlock(&kvm_lock); | 499 | spin_unlock(&kvm_lock); |
489 | kvm_free_irq_routing(kvm); | 500 | kvm_free_irq_routing(kvm); |
490 | kvm_io_bus_destroy(&kvm->pio_bus); | 501 | for (i = 0; i < KVM_NR_BUSES; i++) |
491 | kvm_io_bus_destroy(&kvm->mmio_bus); | 502 | kvm_io_bus_destroy(kvm->buses[i]); |
492 | kvm_coalesced_mmio_free(kvm); | 503 | kvm_coalesced_mmio_free(kvm); |
493 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) | 504 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
494 | mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); | 505 | mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); |
@@ -1949,12 +1960,7 @@ static struct notifier_block kvm_reboot_notifier = { | |||
1949 | .priority = 0, | 1960 | .priority = 0, |
1950 | }; | 1961 | }; |
1951 | 1962 | ||
1952 | void kvm_io_bus_init(struct kvm_io_bus *bus) | 1963 | static void kvm_io_bus_destroy(struct kvm_io_bus *bus) |
1953 | { | ||
1954 | memset(bus, 0, sizeof(*bus)); | ||
1955 | } | ||
1956 | |||
1957 | void kvm_io_bus_destroy(struct kvm_io_bus *bus) | ||
1958 | { | 1964 | { |
1959 | int i; | 1965 | int i; |
1960 | 1966 | ||
@@ -1963,13 +1969,15 @@ void kvm_io_bus_destroy(struct kvm_io_bus *bus) | |||
1963 | 1969 | ||
1964 | kvm_iodevice_destructor(pos); | 1970 | kvm_iodevice_destructor(pos); |
1965 | } | 1971 | } |
1972 | kfree(bus); | ||
1966 | } | 1973 | } |
1967 | 1974 | ||
1968 | /* kvm_io_bus_write - called under kvm->slots_lock */ | 1975 | /* kvm_io_bus_write - called under kvm->slots_lock */ |
1969 | int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr, | 1976 | int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, |
1970 | int len, const void *val) | 1977 | int len, const void *val) |
1971 | { | 1978 | { |
1972 | int i; | 1979 | int i; |
1980 | struct kvm_io_bus *bus = rcu_dereference(kvm->buses[bus_idx]); | ||
1973 | for (i = 0; i < bus->dev_count; i++) | 1981 | for (i = 0; i < bus->dev_count; i++) |
1974 | if (!kvm_iodevice_write(bus->devs[i], addr, len, val)) | 1982 | if (!kvm_iodevice_write(bus->devs[i], addr, len, val)) |
1975 | return 0; | 1983 | return 0; |
@@ -1977,59 +1985,71 @@ int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr, | |||
1977 | } | 1985 | } |
1978 | 1986 | ||
1979 | /* kvm_io_bus_read - called under kvm->slots_lock */ | 1987 | /* kvm_io_bus_read - called under kvm->slots_lock */ |
1980 | int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len, void *val) | 1988 | int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, |
1989 | int len, void *val) | ||
1981 | { | 1990 | { |
1982 | int i; | 1991 | int i; |
1992 | struct kvm_io_bus *bus = rcu_dereference(kvm->buses[bus_idx]); | ||
1993 | |||
1983 | for (i = 0; i < bus->dev_count; i++) | 1994 | for (i = 0; i < bus->dev_count; i++) |
1984 | if (!kvm_iodevice_read(bus->devs[i], addr, len, val)) | 1995 | if (!kvm_iodevice_read(bus->devs[i], addr, len, val)) |
1985 | return 0; | 1996 | return 0; |
1986 | return -EOPNOTSUPP; | 1997 | return -EOPNOTSUPP; |
1987 | } | 1998 | } |
1988 | 1999 | ||
1989 | int kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus, | 2000 | /* Caller must have write lock on slots_lock. */ |
1990 | struct kvm_io_device *dev) | 2001 | int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
2002 | struct kvm_io_device *dev) | ||
1991 | { | 2003 | { |
1992 | int ret; | 2004 | struct kvm_io_bus *new_bus, *bus; |
1993 | |||
1994 | down_write(&kvm->slots_lock); | ||
1995 | ret = __kvm_io_bus_register_dev(bus, dev); | ||
1996 | up_write(&kvm->slots_lock); | ||
1997 | 2005 | ||
1998 | return ret; | 2006 | bus = kvm->buses[bus_idx]; |
1999 | } | ||
2000 | |||
2001 | /* An unlocked version. Caller must have write lock on slots_lock. */ | ||
2002 | int __kvm_io_bus_register_dev(struct kvm_io_bus *bus, | ||
2003 | struct kvm_io_device *dev) | ||
2004 | { | ||
2005 | if (bus->dev_count > NR_IOBUS_DEVS-1) | 2007 | if (bus->dev_count > NR_IOBUS_DEVS-1) |
2006 | return -ENOSPC; | 2008 | return -ENOSPC; |
2007 | 2009 | ||
2008 | bus->devs[bus->dev_count++] = dev; | 2010 | new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL); |
2011 | if (!new_bus) | ||
2012 | return -ENOMEM; | ||
2013 | memcpy(new_bus, bus, sizeof(struct kvm_io_bus)); | ||
2014 | new_bus->devs[new_bus->dev_count++] = dev; | ||
2015 | rcu_assign_pointer(kvm->buses[bus_idx], new_bus); | ||
2016 | synchronize_srcu_expedited(&kvm->srcu); | ||
2017 | kfree(bus); | ||
2009 | 2018 | ||
2010 | return 0; | 2019 | return 0; |
2011 | } | 2020 | } |
2012 | 2021 | ||
2013 | void kvm_io_bus_unregister_dev(struct kvm *kvm, | 2022 | /* Caller must have write lock on slots_lock. */ |
2014 | struct kvm_io_bus *bus, | 2023 | int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
2015 | struct kvm_io_device *dev) | 2024 | struct kvm_io_device *dev) |
2016 | { | 2025 | { |
2017 | down_write(&kvm->slots_lock); | 2026 | int i, r; |
2018 | __kvm_io_bus_unregister_dev(bus, dev); | 2027 | struct kvm_io_bus *new_bus, *bus; |
2019 | up_write(&kvm->slots_lock); | ||
2020 | } | ||
2021 | 2028 | ||
2022 | /* An unlocked version. Caller must have write lock on slots_lock. */ | 2029 | new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL); |
2023 | void __kvm_io_bus_unregister_dev(struct kvm_io_bus *bus, | 2030 | if (!new_bus) |
2024 | struct kvm_io_device *dev) | 2031 | return -ENOMEM; |
2025 | { | ||
2026 | int i; | ||
2027 | 2032 | ||
2028 | for (i = 0; i < bus->dev_count; i++) | 2033 | bus = kvm->buses[bus_idx]; |
2029 | if (bus->devs[i] == dev) { | 2034 | memcpy(new_bus, bus, sizeof(struct kvm_io_bus)); |
2030 | bus->devs[i] = bus->devs[--bus->dev_count]; | 2035 | |
2036 | r = -ENOENT; | ||
2037 | for (i = 0; i < new_bus->dev_count; i++) | ||
2038 | if (new_bus->devs[i] == dev) { | ||
2039 | r = 0; | ||
2040 | new_bus->devs[i] = new_bus->devs[--new_bus->dev_count]; | ||
2031 | break; | 2041 | break; |
2032 | } | 2042 | } |
2043 | |||
2044 | if (r) { | ||
2045 | kfree(new_bus); | ||
2046 | return r; | ||
2047 | } | ||
2048 | |||
2049 | rcu_assign_pointer(kvm->buses[bus_idx], new_bus); | ||
2050 | synchronize_srcu_expedited(&kvm->srcu); | ||
2051 | kfree(bus); | ||
2052 | return r; | ||
2033 | } | 2053 | } |
2034 | 2054 | ||
2035 | static struct notifier_block kvm_cpu_notifier = { | 2055 | static struct notifier_block kvm_cpu_notifier = { |