diff options
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r-- | virt/kvm/kvm_main.c | 63 |
1 files changed, 31 insertions, 32 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index c82ae2492634..f032806a212f 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -422,9 +422,6 @@ static struct kvm *kvm_create_vm(void) | |||
422 | spin_lock(&kvm_lock); | 422 | spin_lock(&kvm_lock); |
423 | list_add(&kvm->vm_list, &vm_list); | 423 | list_add(&kvm->vm_list, &vm_list); |
424 | spin_unlock(&kvm_lock); | 424 | spin_unlock(&kvm_lock); |
425 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | ||
426 | kvm_coalesced_mmio_init(kvm); | ||
427 | #endif | ||
428 | out: | 425 | out: |
429 | return kvm; | 426 | return kvm; |
430 | 427 | ||
@@ -560,6 +557,10 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
560 | base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; | 557 | base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; |
561 | npages = mem->memory_size >> PAGE_SHIFT; | 558 | npages = mem->memory_size >> PAGE_SHIFT; |
562 | 559 | ||
560 | r = -EINVAL; | ||
561 | if (npages > KVM_MEM_MAX_NR_PAGES) | ||
562 | goto out; | ||
563 | |||
563 | if (!npages) | 564 | if (!npages) |
564 | mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; | 565 | mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; |
565 | 566 | ||
@@ -833,7 +834,7 @@ EXPORT_SYMBOL_GPL(kvm_is_error_hva); | |||
833 | struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn) | 834 | struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn) |
834 | { | 835 | { |
835 | int i; | 836 | int i; |
836 | struct kvm_memslots *slots = rcu_dereference(kvm->memslots); | 837 | struct kvm_memslots *slots = kvm_memslots(kvm); |
837 | 838 | ||
838 | for (i = 0; i < slots->nmemslots; ++i) { | 839 | for (i = 0; i < slots->nmemslots; ++i) { |
839 | struct kvm_memory_slot *memslot = &slots->memslots[i]; | 840 | struct kvm_memory_slot *memslot = &slots->memslots[i]; |
@@ -855,7 +856,7 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) | |||
855 | int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) | 856 | int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) |
856 | { | 857 | { |
857 | int i; | 858 | int i; |
858 | struct kvm_memslots *slots = rcu_dereference(kvm->memslots); | 859 | struct kvm_memslots *slots = kvm_memslots(kvm); |
859 | 860 | ||
860 | gfn = unalias_gfn_instantiation(kvm, gfn); | 861 | gfn = unalias_gfn_instantiation(kvm, gfn); |
861 | for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { | 862 | for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { |
@@ -899,7 +900,7 @@ out: | |||
899 | int memslot_id(struct kvm *kvm, gfn_t gfn) | 900 | int memslot_id(struct kvm *kvm, gfn_t gfn) |
900 | { | 901 | { |
901 | int i; | 902 | int i; |
902 | struct kvm_memslots *slots = rcu_dereference(kvm->memslots); | 903 | struct kvm_memslots *slots = kvm_memslots(kvm); |
903 | struct kvm_memory_slot *memslot = NULL; | 904 | struct kvm_memory_slot *memslot = NULL; |
904 | 905 | ||
905 | gfn = unalias_gfn(kvm, gfn); | 906 | gfn = unalias_gfn(kvm, gfn); |
@@ -914,6 +915,11 @@ int memslot_id(struct kvm *kvm, gfn_t gfn) | |||
914 | return memslot - slots->memslots; | 915 | return memslot - slots->memslots; |
915 | } | 916 | } |
916 | 917 | ||
918 | static unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) | ||
919 | { | ||
920 | return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; | ||
921 | } | ||
922 | |||
917 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) | 923 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) |
918 | { | 924 | { |
919 | struct kvm_memory_slot *slot; | 925 | struct kvm_memory_slot *slot; |
@@ -922,7 +928,7 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) | |||
922 | slot = gfn_to_memslot_unaliased(kvm, gfn); | 928 | slot = gfn_to_memslot_unaliased(kvm, gfn); |
923 | if (!slot || slot->flags & KVM_MEMSLOT_INVALID) | 929 | if (!slot || slot->flags & KVM_MEMSLOT_INVALID) |
924 | return bad_hva(); | 930 | return bad_hva(); |
925 | return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE); | 931 | return gfn_to_hva_memslot(slot, gfn); |
926 | } | 932 | } |
927 | EXPORT_SYMBOL_GPL(gfn_to_hva); | 933 | EXPORT_SYMBOL_GPL(gfn_to_hva); |
928 | 934 | ||
@@ -972,11 +978,6 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) | |||
972 | } | 978 | } |
973 | EXPORT_SYMBOL_GPL(gfn_to_pfn); | 979 | EXPORT_SYMBOL_GPL(gfn_to_pfn); |
974 | 980 | ||
975 | static unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) | ||
976 | { | ||
977 | return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE); | ||
978 | } | ||
979 | |||
980 | pfn_t gfn_to_pfn_memslot(struct kvm *kvm, | 981 | pfn_t gfn_to_pfn_memslot(struct kvm *kvm, |
981 | struct kvm_memory_slot *slot, gfn_t gfn) | 982 | struct kvm_memory_slot *slot, gfn_t gfn) |
982 | { | 983 | { |
@@ -1190,13 +1191,8 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn) | |||
1190 | memslot = gfn_to_memslot_unaliased(kvm, gfn); | 1191 | memslot = gfn_to_memslot_unaliased(kvm, gfn); |
1191 | if (memslot && memslot->dirty_bitmap) { | 1192 | if (memslot && memslot->dirty_bitmap) { |
1192 | unsigned long rel_gfn = gfn - memslot->base_gfn; | 1193 | unsigned long rel_gfn = gfn - memslot->base_gfn; |
1193 | unsigned long *p = memslot->dirty_bitmap + | ||
1194 | rel_gfn / BITS_PER_LONG; | ||
1195 | int offset = rel_gfn % BITS_PER_LONG; | ||
1196 | 1194 | ||
1197 | /* avoid RMW */ | 1195 | generic___set_le_bit(rel_gfn, memslot->dirty_bitmap); |
1198 | if (!generic_test_le_bit(offset, p)) | ||
1199 | generic___set_le_bit(offset, p); | ||
1200 | } | 1196 | } |
1201 | } | 1197 | } |
1202 | 1198 | ||
@@ -1609,7 +1605,6 @@ static long kvm_vm_ioctl(struct file *filp, | |||
1609 | r = -EFAULT; | 1605 | r = -EFAULT; |
1610 | if (copy_from_user(&zone, argp, sizeof zone)) | 1606 | if (copy_from_user(&zone, argp, sizeof zone)) |
1611 | goto out; | 1607 | goto out; |
1612 | r = -ENXIO; | ||
1613 | r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); | 1608 | r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); |
1614 | if (r) | 1609 | if (r) |
1615 | goto out; | 1610 | goto out; |
@@ -1621,7 +1616,6 @@ static long kvm_vm_ioctl(struct file *filp, | |||
1621 | r = -EFAULT; | 1616 | r = -EFAULT; |
1622 | if (copy_from_user(&zone, argp, sizeof zone)) | 1617 | if (copy_from_user(&zone, argp, sizeof zone)) |
1623 | goto out; | 1618 | goto out; |
1624 | r = -ENXIO; | ||
1625 | r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); | 1619 | r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); |
1626 | if (r) | 1620 | if (r) |
1627 | goto out; | 1621 | goto out; |
@@ -1755,12 +1749,19 @@ static struct file_operations kvm_vm_fops = { | |||
1755 | 1749 | ||
1756 | static int kvm_dev_ioctl_create_vm(void) | 1750 | static int kvm_dev_ioctl_create_vm(void) |
1757 | { | 1751 | { |
1758 | int fd; | 1752 | int fd, r; |
1759 | struct kvm *kvm; | 1753 | struct kvm *kvm; |
1760 | 1754 | ||
1761 | kvm = kvm_create_vm(); | 1755 | kvm = kvm_create_vm(); |
1762 | if (IS_ERR(kvm)) | 1756 | if (IS_ERR(kvm)) |
1763 | return PTR_ERR(kvm); | 1757 | return PTR_ERR(kvm); |
1758 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | ||
1759 | r = kvm_coalesced_mmio_init(kvm); | ||
1760 | if (r < 0) { | ||
1761 | kvm_put_kvm(kvm); | ||
1762 | return r; | ||
1763 | } | ||
1764 | #endif | ||
1764 | fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); | 1765 | fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); |
1765 | if (fd < 0) | 1766 | if (fd < 0) |
1766 | kvm_put_kvm(kvm); | 1767 | kvm_put_kvm(kvm); |
@@ -1928,11 +1929,6 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, | |||
1928 | cpu); | 1929 | cpu); |
1929 | hardware_disable(NULL); | 1930 | hardware_disable(NULL); |
1930 | break; | 1931 | break; |
1931 | case CPU_UP_CANCELED: | ||
1932 | printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", | ||
1933 | cpu); | ||
1934 | smp_call_function_single(cpu, hardware_disable, NULL, 1); | ||
1935 | break; | ||
1936 | case CPU_ONLINE: | 1932 | case CPU_ONLINE: |
1937 | printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", | 1933 | printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", |
1938 | cpu); | 1934 | cpu); |
@@ -1991,7 +1987,9 @@ int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, | |||
1991 | int len, const void *val) | 1987 | int len, const void *val) |
1992 | { | 1988 | { |
1993 | int i; | 1989 | int i; |
1994 | struct kvm_io_bus *bus = rcu_dereference(kvm->buses[bus_idx]); | 1990 | struct kvm_io_bus *bus; |
1991 | |||
1992 | bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); | ||
1995 | for (i = 0; i < bus->dev_count; i++) | 1993 | for (i = 0; i < bus->dev_count; i++) |
1996 | if (!kvm_iodevice_write(bus->devs[i], addr, len, val)) | 1994 | if (!kvm_iodevice_write(bus->devs[i], addr, len, val)) |
1997 | return 0; | 1995 | return 0; |
@@ -2003,8 +2001,9 @@ int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, | |||
2003 | int len, void *val) | 2001 | int len, void *val) |
2004 | { | 2002 | { |
2005 | int i; | 2003 | int i; |
2006 | struct kvm_io_bus *bus = rcu_dereference(kvm->buses[bus_idx]); | 2004 | struct kvm_io_bus *bus; |
2007 | 2005 | ||
2006 | bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); | ||
2008 | for (i = 0; i < bus->dev_count; i++) | 2007 | for (i = 0; i < bus->dev_count; i++) |
2009 | if (!kvm_iodevice_read(bus->devs[i], addr, len, val)) | 2008 | if (!kvm_iodevice_read(bus->devs[i], addr, len, val)) |
2010 | return 0; | 2009 | return 0; |
@@ -2179,7 +2178,7 @@ static void kvm_sched_out(struct preempt_notifier *pn, | |||
2179 | kvm_arch_vcpu_put(vcpu); | 2178 | kvm_arch_vcpu_put(vcpu); |
2180 | } | 2179 | } |
2181 | 2180 | ||
2182 | int kvm_init(void *opaque, unsigned int vcpu_size, | 2181 | int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, |
2183 | struct module *module) | 2182 | struct module *module) |
2184 | { | 2183 | { |
2185 | int r; | 2184 | int r; |
@@ -2229,8 +2228,9 @@ int kvm_init(void *opaque, unsigned int vcpu_size, | |||
2229 | goto out_free_4; | 2228 | goto out_free_4; |
2230 | 2229 | ||
2231 | /* A kmem cache lets us meet the alignment requirements of fx_save. */ | 2230 | /* A kmem cache lets us meet the alignment requirements of fx_save. */ |
2232 | kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, | 2231 | if (!vcpu_align) |
2233 | __alignof__(struct kvm_vcpu), | 2232 | vcpu_align = __alignof__(struct kvm_vcpu); |
2233 | kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align, | ||
2234 | 0, NULL); | 2234 | 0, NULL); |
2235 | if (!kvm_vcpu_cache) { | 2235 | if (!kvm_vcpu_cache) { |
2236 | r = -ENOMEM; | 2236 | r = -ENOMEM; |
@@ -2279,7 +2279,6 @@ EXPORT_SYMBOL_GPL(kvm_init); | |||
2279 | 2279 | ||
2280 | void kvm_exit(void) | 2280 | void kvm_exit(void) |
2281 | { | 2281 | { |
2282 | tracepoint_synchronize_unregister(); | ||
2283 | kvm_exit_debug(); | 2282 | kvm_exit_debug(); |
2284 | misc_deregister(&kvm_dev); | 2283 | misc_deregister(&kvm_dev); |
2285 | kmem_cache_destroy(kvm_vcpu_cache); | 2284 | kmem_cache_destroy(kvm_vcpu_cache); |