aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/kvm_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r--virt/kvm/kvm_main.c103
1 files changed, 54 insertions, 49 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index d237d3350a99..f25aa98a94df 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -81,6 +81,11 @@ unsigned int halt_poll_ns_grow = 2;
81module_param(halt_poll_ns_grow, uint, 0644); 81module_param(halt_poll_ns_grow, uint, 0644);
82EXPORT_SYMBOL_GPL(halt_poll_ns_grow); 82EXPORT_SYMBOL_GPL(halt_poll_ns_grow);
83 83
84/* The start value to grow halt_poll_ns from */
85unsigned int halt_poll_ns_grow_start = 10000; /* 10us */
86module_param(halt_poll_ns_grow_start, uint, 0644);
87EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start);
88
84/* Default resets per-vcpu halt_poll_ns . */ 89/* Default resets per-vcpu halt_poll_ns . */
85unsigned int halt_poll_ns_shrink; 90unsigned int halt_poll_ns_shrink;
86module_param(halt_poll_ns_shrink, uint, 0644); 91module_param(halt_poll_ns_shrink, uint, 0644);
@@ -525,7 +530,7 @@ static struct kvm_memslots *kvm_alloc_memslots(void)
525 int i; 530 int i;
526 struct kvm_memslots *slots; 531 struct kvm_memslots *slots;
527 532
528 slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 533 slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
529 if (!slots) 534 if (!slots)
530 return NULL; 535 return NULL;
531 536
@@ -601,12 +606,12 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
601 606
602 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries, 607 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
603 sizeof(*kvm->debugfs_stat_data), 608 sizeof(*kvm->debugfs_stat_data),
604 GFP_KERNEL); 609 GFP_KERNEL_ACCOUNT);
605 if (!kvm->debugfs_stat_data) 610 if (!kvm->debugfs_stat_data)
606 return -ENOMEM; 611 return -ENOMEM;
607 612
608 for (p = debugfs_entries; p->name; p++) { 613 for (p = debugfs_entries; p->name; p++) {
609 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL); 614 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
610 if (!stat_data) 615 if (!stat_data)
611 return -ENOMEM; 616 return -ENOMEM;
612 617
@@ -656,12 +661,8 @@ static struct kvm *kvm_create_vm(unsigned long type)
656 struct kvm_memslots *slots = kvm_alloc_memslots(); 661 struct kvm_memslots *slots = kvm_alloc_memslots();
657 if (!slots) 662 if (!slots)
658 goto out_err_no_srcu; 663 goto out_err_no_srcu;
659 /* 664 /* Generations must be different for each address space. */
660 * Generations must be different for each address space. 665 slots->generation = i;
661 * Init kvm generation close to the maximum to easily test the
662 * code of handling generation number wrap-around.
663 */
664 slots->generation = i * 2 - 150;
665 rcu_assign_pointer(kvm->memslots[i], slots); 666 rcu_assign_pointer(kvm->memslots[i], slots);
666 } 667 }
667 668
@@ -671,7 +672,7 @@ static struct kvm *kvm_create_vm(unsigned long type)
671 goto out_err_no_irq_srcu; 672 goto out_err_no_irq_srcu;
672 for (i = 0; i < KVM_NR_BUSES; i++) { 673 for (i = 0; i < KVM_NR_BUSES; i++) {
673 rcu_assign_pointer(kvm->buses[i], 674 rcu_assign_pointer(kvm->buses[i],
674 kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL)); 675 kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT));
675 if (!kvm->buses[i]) 676 if (!kvm->buses[i])
676 goto out_err; 677 goto out_err;
677 } 678 }
@@ -789,7 +790,7 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
789{ 790{
790 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); 791 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
791 792
792 memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL); 793 memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL_ACCOUNT);
793 if (!memslot->dirty_bitmap) 794 if (!memslot->dirty_bitmap)
794 return -ENOMEM; 795 return -ENOMEM;
795 796
@@ -874,31 +875,34 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
874 int as_id, struct kvm_memslots *slots) 875 int as_id, struct kvm_memslots *slots)
875{ 876{
876 struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id); 877 struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id);
878 u64 gen = old_memslots->generation;
877 879
878 /* 880 WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
879 * Set the low bit in the generation, which disables SPTE caching 881 slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
880 * until the end of synchronize_srcu_expedited.
881 */
882 WARN_ON(old_memslots->generation & 1);
883 slots->generation = old_memslots->generation + 1;
884 882
885 rcu_assign_pointer(kvm->memslots[as_id], slots); 883 rcu_assign_pointer(kvm->memslots[as_id], slots);
886 synchronize_srcu_expedited(&kvm->srcu); 884 synchronize_srcu_expedited(&kvm->srcu);
887 885
888 /* 886 /*
889 * Increment the new memslot generation a second time. This prevents 887 * Increment the new memslot generation a second time, dropping the
890 * vm exits that race with memslot updates from caching a memslot 888 * update in-progress flag and incrementing then generation based on
891 * generation that will (potentially) be valid forever. 889 * the number of address spaces. This provides a unique and easily
892 * 890 * identifiable generation number while the memslots are in flux.
891 */
892 gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
893
894 /*
893 * Generations must be unique even across address spaces. We do not need 895 * Generations must be unique even across address spaces. We do not need
894 * a global counter for that, instead the generation space is evenly split 896 * a global counter for that, instead the generation space is evenly split
895 * across address spaces. For example, with two address spaces, address 897 * across address spaces. For example, with two address spaces, address
896 * space 0 will use generations 0, 4, 8, ... while * address space 1 will 898 * space 0 will use generations 0, 2, 4, ... while address space 1 will
897 * use generations 2, 6, 10, 14, ... 899 * use generations 1, 3, 5, ...
898 */ 900 */
899 slots->generation += KVM_ADDRESS_SPACE_NUM * 2 - 1; 901 gen += KVM_ADDRESS_SPACE_NUM;
902
903 kvm_arch_memslots_updated(kvm, gen);
900 904
901 kvm_arch_memslots_updated(kvm, slots); 905 slots->generation = gen;
902 906
903 return old_memslots; 907 return old_memslots;
904} 908}
@@ -1018,7 +1022,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
1018 goto out_free; 1022 goto out_free;
1019 } 1023 }
1020 1024
1021 slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 1025 slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
1022 if (!slots) 1026 if (!slots)
1023 goto out_free; 1027 goto out_free;
1024 memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots)); 1028 memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots));
@@ -1201,11 +1205,9 @@ int kvm_get_dirty_log_protect(struct kvm *kvm,
1201 mask = xchg(&dirty_bitmap[i], 0); 1205 mask = xchg(&dirty_bitmap[i], 0);
1202 dirty_bitmap_buffer[i] = mask; 1206 dirty_bitmap_buffer[i] = mask;
1203 1207
1204 if (mask) { 1208 offset = i * BITS_PER_LONG;
1205 offset = i * BITS_PER_LONG; 1209 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
1206 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, 1210 offset, mask);
1207 offset, mask);
1208 }
1209 } 1211 }
1210 spin_unlock(&kvm->mmu_lock); 1212 spin_unlock(&kvm->mmu_lock);
1211 } 1213 }
@@ -2185,20 +2187,23 @@ void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
2185 2187
2186static void grow_halt_poll_ns(struct kvm_vcpu *vcpu) 2188static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
2187{ 2189{
2188 unsigned int old, val, grow; 2190 unsigned int old, val, grow, grow_start;
2189 2191
2190 old = val = vcpu->halt_poll_ns; 2192 old = val = vcpu->halt_poll_ns;
2193 grow_start = READ_ONCE(halt_poll_ns_grow_start);
2191 grow = READ_ONCE(halt_poll_ns_grow); 2194 grow = READ_ONCE(halt_poll_ns_grow);
2192 /* 10us base */ 2195 if (!grow)
2193 if (val == 0 && grow) 2196 goto out;
2194 val = 10000; 2197
2195 else 2198 val *= grow;
2196 val *= grow; 2199 if (val < grow_start)
2200 val = grow_start;
2197 2201
2198 if (val > halt_poll_ns) 2202 if (val > halt_poll_ns)
2199 val = halt_poll_ns; 2203 val = halt_poll_ns;
2200 2204
2201 vcpu->halt_poll_ns = val; 2205 vcpu->halt_poll_ns = val;
2206out:
2202 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old); 2207 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
2203} 2208}
2204 2209
@@ -2683,7 +2688,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
2683 struct kvm_regs *kvm_regs; 2688 struct kvm_regs *kvm_regs;
2684 2689
2685 r = -ENOMEM; 2690 r = -ENOMEM;
2686 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); 2691 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT);
2687 if (!kvm_regs) 2692 if (!kvm_regs)
2688 goto out; 2693 goto out;
2689 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); 2694 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
@@ -2711,7 +2716,8 @@ out_free1:
2711 break; 2716 break;
2712 } 2717 }
2713 case KVM_GET_SREGS: { 2718 case KVM_GET_SREGS: {
2714 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL); 2719 kvm_sregs = kzalloc(sizeof(struct kvm_sregs),
2720 GFP_KERNEL_ACCOUNT);
2715 r = -ENOMEM; 2721 r = -ENOMEM;
2716 if (!kvm_sregs) 2722 if (!kvm_sregs)
2717 goto out; 2723 goto out;
@@ -2803,7 +2809,7 @@ out_free1:
2803 break; 2809 break;
2804 } 2810 }
2805 case KVM_GET_FPU: { 2811 case KVM_GET_FPU: {
2806 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL); 2812 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT);
2807 r = -ENOMEM; 2813 r = -ENOMEM;
2808 if (!fpu) 2814 if (!fpu)
2809 goto out; 2815 goto out;
@@ -2980,7 +2986,7 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
2980 if (test) 2986 if (test)
2981 return 0; 2987 return 0;
2982 2988
2983 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2989 dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT);
2984 if (!dev) 2990 if (!dev)
2985 return -ENOMEM; 2991 return -ENOMEM;
2986 2992
@@ -3625,6 +3631,7 @@ int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
3625 r = __kvm_io_bus_write(vcpu, bus, &range, val); 3631 r = __kvm_io_bus_write(vcpu, bus, &range, val);
3626 return r < 0 ? r : 0; 3632 return r < 0 ? r : 0;
3627} 3633}
3634EXPORT_SYMBOL_GPL(kvm_io_bus_write);
3628 3635
3629/* kvm_io_bus_write_cookie - called under kvm->slots_lock */ 3636/* kvm_io_bus_write_cookie - called under kvm->slots_lock */
3630int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, 3637int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
@@ -3675,7 +3682,6 @@ static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
3675 3682
3676 return -EOPNOTSUPP; 3683 return -EOPNOTSUPP;
3677} 3684}
3678EXPORT_SYMBOL_GPL(kvm_io_bus_write);
3679 3685
3680/* kvm_io_bus_read - called under kvm->slots_lock */ 3686/* kvm_io_bus_read - called under kvm->slots_lock */
3681int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 3687int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
@@ -3697,7 +3703,6 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
3697 return r < 0 ? r : 0; 3703 return r < 0 ? r : 0;
3698} 3704}
3699 3705
3700
3701/* Caller must hold slots_lock. */ 3706/* Caller must hold slots_lock. */
3702int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 3707int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
3703 int len, struct kvm_io_device *dev) 3708 int len, struct kvm_io_device *dev)
@@ -3714,8 +3719,8 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
3714 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) 3719 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
3715 return -ENOSPC; 3720 return -ENOSPC;
3716 3721
3717 new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count + 1) * 3722 new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1),
3718 sizeof(struct kvm_io_range)), GFP_KERNEL); 3723 GFP_KERNEL_ACCOUNT);
3719 if (!new_bus) 3724 if (!new_bus)
3720 return -ENOMEM; 3725 return -ENOMEM;
3721 3726
@@ -3760,8 +3765,8 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
3760 if (i == bus->dev_count) 3765 if (i == bus->dev_count)
3761 return; 3766 return;
3762 3767
3763 new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) * 3768 new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
3764 sizeof(struct kvm_io_range)), GFP_KERNEL); 3769 GFP_KERNEL_ACCOUNT);
3765 if (!new_bus) { 3770 if (!new_bus) {
3766 pr_err("kvm: failed to shrink bus, removing it completely\n"); 3771 pr_err("kvm: failed to shrink bus, removing it completely\n");
3767 goto broken; 3772 goto broken;
@@ -4029,7 +4034,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
4029 active = kvm_active_vms; 4034 active = kvm_active_vms;
4030 spin_unlock(&kvm_lock); 4035 spin_unlock(&kvm_lock);
4031 4036
4032 env = kzalloc(sizeof(*env), GFP_KERNEL); 4037 env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT);
4033 if (!env) 4038 if (!env)
4034 return; 4039 return;
4035 4040
@@ -4045,7 +4050,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
4045 add_uevent_var(env, "PID=%d", kvm->userspace_pid); 4050 add_uevent_var(env, "PID=%d", kvm->userspace_pid);
4046 4051
4047 if (!IS_ERR_OR_NULL(kvm->debugfs_dentry)) { 4052 if (!IS_ERR_OR_NULL(kvm->debugfs_dentry)) {
4048 char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL); 4053 char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
4049 4054
4050 if (p) { 4055 if (p) {
4051 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX); 4056 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);