diff options
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r-- | arch/x86/kvm/x86.c | 32 |
1 files changed, 20 insertions, 12 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 941f932373d0..65e4559eef2f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -3879,7 +3879,8 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
3879 | r = -EINVAL; | 3879 | r = -EINVAL; |
3880 | if (!lapic_in_kernel(vcpu)) | 3880 | if (!lapic_in_kernel(vcpu)) |
3881 | goto out; | 3881 | goto out; |
3882 | u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL); | 3882 | u.lapic = kzalloc(sizeof(struct kvm_lapic_state), |
3883 | GFP_KERNEL_ACCOUNT); | ||
3883 | 3884 | ||
3884 | r = -ENOMEM; | 3885 | r = -ENOMEM; |
3885 | if (!u.lapic) | 3886 | if (!u.lapic) |
@@ -4066,7 +4067,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
4066 | break; | 4067 | break; |
4067 | } | 4068 | } |
4068 | case KVM_GET_XSAVE: { | 4069 | case KVM_GET_XSAVE: { |
4069 | u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL); | 4070 | u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL_ACCOUNT); |
4070 | r = -ENOMEM; | 4071 | r = -ENOMEM; |
4071 | if (!u.xsave) | 4072 | if (!u.xsave) |
4072 | break; | 4073 | break; |
@@ -4090,7 +4091,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
4090 | break; | 4091 | break; |
4091 | } | 4092 | } |
4092 | case KVM_GET_XCRS: { | 4093 | case KVM_GET_XCRS: { |
4093 | u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL); | 4094 | u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL_ACCOUNT); |
4094 | r = -ENOMEM; | 4095 | r = -ENOMEM; |
4095 | if (!u.xcrs) | 4096 | if (!u.xcrs) |
4096 | break; | 4097 | break; |
@@ -7055,6 +7056,13 @@ static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid) | |||
7055 | 7056 | ||
7056 | void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu) | 7057 | void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu) |
7057 | { | 7058 | { |
7059 | if (!lapic_in_kernel(vcpu)) { | ||
7060 | WARN_ON_ONCE(vcpu->arch.apicv_active); | ||
7061 | return; | ||
7062 | } | ||
7063 | if (!vcpu->arch.apicv_active) | ||
7064 | return; | ||
7065 | |||
7058 | vcpu->arch.apicv_active = false; | 7066 | vcpu->arch.apicv_active = false; |
7059 | kvm_x86_ops->refresh_apicv_exec_ctrl(vcpu); | 7067 | kvm_x86_ops->refresh_apicv_exec_ctrl(vcpu); |
7060 | } | 7068 | } |
@@ -9005,7 +9013,6 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
9005 | struct page *page; | 9013 | struct page *page; |
9006 | int r; | 9014 | int r; |
9007 | 9015 | ||
9008 | vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(vcpu); | ||
9009 | vcpu->arch.emulate_ctxt.ops = &emulate_ops; | 9016 | vcpu->arch.emulate_ctxt.ops = &emulate_ops; |
9010 | if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu)) | 9017 | if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu)) |
9011 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | 9018 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
@@ -9026,6 +9033,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
9026 | goto fail_free_pio_data; | 9033 | goto fail_free_pio_data; |
9027 | 9034 | ||
9028 | if (irqchip_in_kernel(vcpu->kvm)) { | 9035 | if (irqchip_in_kernel(vcpu->kvm)) { |
9036 | vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(vcpu); | ||
9029 | r = kvm_create_lapic(vcpu); | 9037 | r = kvm_create_lapic(vcpu); |
9030 | if (r < 0) | 9038 | if (r < 0) |
9031 | goto fail_mmu_destroy; | 9039 | goto fail_mmu_destroy; |
@@ -9033,14 +9041,15 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
9033 | static_key_slow_inc(&kvm_no_apic_vcpu); | 9041 | static_key_slow_inc(&kvm_no_apic_vcpu); |
9034 | 9042 | ||
9035 | vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4, | 9043 | vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4, |
9036 | GFP_KERNEL); | 9044 | GFP_KERNEL_ACCOUNT); |
9037 | if (!vcpu->arch.mce_banks) { | 9045 | if (!vcpu->arch.mce_banks) { |
9038 | r = -ENOMEM; | 9046 | r = -ENOMEM; |
9039 | goto fail_free_lapic; | 9047 | goto fail_free_lapic; |
9040 | } | 9048 | } |
9041 | vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; | 9049 | vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; |
9042 | 9050 | ||
9043 | if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) { | 9051 | if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, |
9052 | GFP_KERNEL_ACCOUNT)) { | ||
9044 | r = -ENOMEM; | 9053 | r = -ENOMEM; |
9045 | goto fail_free_mce_banks; | 9054 | goto fail_free_mce_banks; |
9046 | } | 9055 | } |
@@ -9104,7 +9113,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
9104 | 9113 | ||
9105 | INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); | 9114 | INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); |
9106 | INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); | 9115 | INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); |
9107 | INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); | ||
9108 | INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); | 9116 | INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); |
9109 | atomic_set(&kvm->arch.noncoherent_dma_count, 0); | 9117 | atomic_set(&kvm->arch.noncoherent_dma_count, 0); |
9110 | 9118 | ||
@@ -9299,13 +9307,13 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, | |||
9299 | 9307 | ||
9300 | slot->arch.rmap[i] = | 9308 | slot->arch.rmap[i] = |
9301 | kvcalloc(lpages, sizeof(*slot->arch.rmap[i]), | 9309 | kvcalloc(lpages, sizeof(*slot->arch.rmap[i]), |
9302 | GFP_KERNEL); | 9310 | GFP_KERNEL_ACCOUNT); |
9303 | if (!slot->arch.rmap[i]) | 9311 | if (!slot->arch.rmap[i]) |
9304 | goto out_free; | 9312 | goto out_free; |
9305 | if (i == 0) | 9313 | if (i == 0) |
9306 | continue; | 9314 | continue; |
9307 | 9315 | ||
9308 | linfo = kvcalloc(lpages, sizeof(*linfo), GFP_KERNEL); | 9316 | linfo = kvcalloc(lpages, sizeof(*linfo), GFP_KERNEL_ACCOUNT); |
9309 | if (!linfo) | 9317 | if (!linfo) |
9310 | goto out_free; | 9318 | goto out_free; |
9311 | 9319 | ||
@@ -9348,13 +9356,13 @@ out_free: | |||
9348 | return -ENOMEM; | 9356 | return -ENOMEM; |
9349 | } | 9357 | } |
9350 | 9358 | ||
9351 | void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) | 9359 | void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) |
9352 | { | 9360 | { |
9353 | /* | 9361 | /* |
9354 | * memslots->generation has been incremented. | 9362 | * memslots->generation has been incremented. |
9355 | * mmio generation may have reached its maximum value. | 9363 | * mmio generation may have reached its maximum value. |
9356 | */ | 9364 | */ |
9357 | kvm_mmu_invalidate_mmio_sptes(kvm, slots); | 9365 | kvm_mmu_invalidate_mmio_sptes(kvm, gen); |
9358 | } | 9366 | } |
9359 | 9367 | ||
9360 | int kvm_arch_prepare_memory_region(struct kvm *kvm, | 9368 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
@@ -9462,7 +9470,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, | |||
9462 | 9470 | ||
9463 | void kvm_arch_flush_shadow_all(struct kvm *kvm) | 9471 | void kvm_arch_flush_shadow_all(struct kvm *kvm) |
9464 | { | 9472 | { |
9465 | kvm_mmu_invalidate_zap_all_pages(kvm); | 9473 | kvm_mmu_zap_all(kvm); |
9466 | } | 9474 | } |
9467 | 9475 | ||
9468 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | 9476 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, |