summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/Kconfig3
-rw-r--r--virt/kvm/arm/arm.c90
-rw-r--r--virt/kvm/kvm_main.c50
3 files changed, 88 insertions, 55 deletions
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index 70691c08e1ed..cca7e065a075 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -51,3 +51,6 @@ config KVM_COMPAT
51 51
52config HAVE_KVM_IRQ_BYPASS 52config HAVE_KVM_IRQ_BYPASS
53 bool 53 bool
54
55config HAVE_KVM_VCPU_ASYNC_IOCTL
56 bool
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 04ee7a327870..92b95ae9a2ca 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -384,17 +384,24 @@ static void vcpu_power_off(struct kvm_vcpu *vcpu)
384int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 384int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
385 struct kvm_mp_state *mp_state) 385 struct kvm_mp_state *mp_state)
386{ 386{
387 vcpu_load(vcpu);
388
387 if (vcpu->arch.power_off) 389 if (vcpu->arch.power_off)
388 mp_state->mp_state = KVM_MP_STATE_STOPPED; 390 mp_state->mp_state = KVM_MP_STATE_STOPPED;
389 else 391 else
390 mp_state->mp_state = KVM_MP_STATE_RUNNABLE; 392 mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
391 393
394 vcpu_put(vcpu);
392 return 0; 395 return 0;
393} 396}
394 397
395int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 398int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
396 struct kvm_mp_state *mp_state) 399 struct kvm_mp_state *mp_state)
397{ 400{
401 int ret = 0;
402
403 vcpu_load(vcpu);
404
398 switch (mp_state->mp_state) { 405 switch (mp_state->mp_state) {
399 case KVM_MP_STATE_RUNNABLE: 406 case KVM_MP_STATE_RUNNABLE:
400 vcpu->arch.power_off = false; 407 vcpu->arch.power_off = false;
@@ -403,10 +410,11 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
403 vcpu_power_off(vcpu); 410 vcpu_power_off(vcpu);
404 break; 411 break;
405 default: 412 default:
406 return -EINVAL; 413 ret = -EINVAL;
407 } 414 }
408 415
409 return 0; 416 vcpu_put(vcpu);
417 return ret;
410} 418}
411 419
412/** 420/**
@@ -630,21 +638,27 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
630 if (unlikely(!kvm_vcpu_initialized(vcpu))) 638 if (unlikely(!kvm_vcpu_initialized(vcpu)))
631 return -ENOEXEC; 639 return -ENOEXEC;
632 640
641 vcpu_load(vcpu);
642
633 ret = kvm_vcpu_first_run_init(vcpu); 643 ret = kvm_vcpu_first_run_init(vcpu);
634 if (ret) 644 if (ret)
635 return ret; 645 goto out;
636 646
637 if (run->exit_reason == KVM_EXIT_MMIO) { 647 if (run->exit_reason == KVM_EXIT_MMIO) {
638 ret = kvm_handle_mmio_return(vcpu, vcpu->run); 648 ret = kvm_handle_mmio_return(vcpu, vcpu->run);
639 if (ret) 649 if (ret)
640 return ret; 650 goto out;
641 if (kvm_arm_handle_step_debug(vcpu, vcpu->run)) 651 if (kvm_arm_handle_step_debug(vcpu, vcpu->run)) {
642 return 0; 652 ret = 0;
653 goto out;
654 }
643 655
644 } 656 }
645 657
646 if (run->immediate_exit) 658 if (run->immediate_exit) {
647 return -EINTR; 659 ret = -EINTR;
660 goto out;
661 }
648 662
649 kvm_sigset_activate(vcpu); 663 kvm_sigset_activate(vcpu);
650 664
@@ -796,6 +810,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
796 810
797 kvm_sigset_deactivate(vcpu); 811 kvm_sigset_deactivate(vcpu);
798 812
813out:
814 vcpu_put(vcpu);
799 return ret; 815 return ret;
800} 816}
801 817
@@ -1011,66 +1027,88 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
1011 struct kvm_vcpu *vcpu = filp->private_data; 1027 struct kvm_vcpu *vcpu = filp->private_data;
1012 void __user *argp = (void __user *)arg; 1028 void __user *argp = (void __user *)arg;
1013 struct kvm_device_attr attr; 1029 struct kvm_device_attr attr;
1030 long r;
1031
1032 vcpu_load(vcpu);
1014 1033
1015 switch (ioctl) { 1034 switch (ioctl) {
1016 case KVM_ARM_VCPU_INIT: { 1035 case KVM_ARM_VCPU_INIT: {
1017 struct kvm_vcpu_init init; 1036 struct kvm_vcpu_init init;
1018 1037
1038 r = -EFAULT;
1019 if (copy_from_user(&init, argp, sizeof(init))) 1039 if (copy_from_user(&init, argp, sizeof(init)))
1020 return -EFAULT; 1040 break;
1021 1041
1022 return kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init); 1042 r = kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
1043 break;
1023 } 1044 }
1024 case KVM_SET_ONE_REG: 1045 case KVM_SET_ONE_REG:
1025 case KVM_GET_ONE_REG: { 1046 case KVM_GET_ONE_REG: {
1026 struct kvm_one_reg reg; 1047 struct kvm_one_reg reg;
1027 1048
1049 r = -ENOEXEC;
1028 if (unlikely(!kvm_vcpu_initialized(vcpu))) 1050 if (unlikely(!kvm_vcpu_initialized(vcpu)))
1029 return -ENOEXEC; 1051 break;
1030 1052
1053 r = -EFAULT;
1031 if (copy_from_user(&reg, argp, sizeof(reg))) 1054 if (copy_from_user(&reg, argp, sizeof(reg)))
1032 return -EFAULT; 1055 break;
1056
1033 if (ioctl == KVM_SET_ONE_REG) 1057 if (ioctl == KVM_SET_ONE_REG)
1034 return kvm_arm_set_reg(vcpu, &reg); 1058 r = kvm_arm_set_reg(vcpu, &reg);
1035 else 1059 else
1036 return kvm_arm_get_reg(vcpu, &reg); 1060 r = kvm_arm_get_reg(vcpu, &reg);
1061 break;
1037 } 1062 }
1038 case KVM_GET_REG_LIST: { 1063 case KVM_GET_REG_LIST: {
1039 struct kvm_reg_list __user *user_list = argp; 1064 struct kvm_reg_list __user *user_list = argp;
1040 struct kvm_reg_list reg_list; 1065 struct kvm_reg_list reg_list;
1041 unsigned n; 1066 unsigned n;
1042 1067
1068 r = -ENOEXEC;
1043 if (unlikely(!kvm_vcpu_initialized(vcpu))) 1069 if (unlikely(!kvm_vcpu_initialized(vcpu)))
1044 return -ENOEXEC; 1070 break;
1045 1071
1072 r = -EFAULT;
1046 if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) 1073 if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
1047 return -EFAULT; 1074 break;
1048 n = reg_list.n; 1075 n = reg_list.n;
1049 reg_list.n = kvm_arm_num_regs(vcpu); 1076 reg_list.n = kvm_arm_num_regs(vcpu);
1050 if (copy_to_user(user_list, &reg_list, sizeof(reg_list))) 1077 if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
1051 return -EFAULT; 1078 break;
1079 r = -E2BIG;
1052 if (n < reg_list.n) 1080 if (n < reg_list.n)
1053 return -E2BIG; 1081 break;
1054 return kvm_arm_copy_reg_indices(vcpu, user_list->reg); 1082 r = kvm_arm_copy_reg_indices(vcpu, user_list->reg);
1083 break;
1055 } 1084 }
1056 case KVM_SET_DEVICE_ATTR: { 1085 case KVM_SET_DEVICE_ATTR: {
1086 r = -EFAULT;
1057 if (copy_from_user(&attr, argp, sizeof(attr))) 1087 if (copy_from_user(&attr, argp, sizeof(attr)))
1058 return -EFAULT; 1088 break;
1059 return kvm_arm_vcpu_set_attr(vcpu, &attr); 1089 r = kvm_arm_vcpu_set_attr(vcpu, &attr);
1090 break;
1060 } 1091 }
1061 case KVM_GET_DEVICE_ATTR: { 1092 case KVM_GET_DEVICE_ATTR: {
1093 r = -EFAULT;
1062 if (copy_from_user(&attr, argp, sizeof(attr))) 1094 if (copy_from_user(&attr, argp, sizeof(attr)))
1063 return -EFAULT; 1095 break;
1064 return kvm_arm_vcpu_get_attr(vcpu, &attr); 1096 r = kvm_arm_vcpu_get_attr(vcpu, &attr);
1097 break;
1065 } 1098 }
1066 case KVM_HAS_DEVICE_ATTR: { 1099 case KVM_HAS_DEVICE_ATTR: {
1100 r = -EFAULT;
1067 if (copy_from_user(&attr, argp, sizeof(attr))) 1101 if (copy_from_user(&attr, argp, sizeof(attr)))
1068 return -EFAULT; 1102 break;
1069 return kvm_arm_vcpu_has_attr(vcpu, &attr); 1103 r = kvm_arm_vcpu_has_attr(vcpu, &attr);
1104 break;
1070 } 1105 }
1071 default: 1106 default:
1072 return -EINVAL; 1107 r = -EINVAL;
1073 } 1108 }
1109
1110 vcpu_put(vcpu);
1111 return r;
1074} 1112}
1075 1113
1076/** 1114/**
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 210bf820385a..b4414842b023 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -151,17 +151,12 @@ bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
151/* 151/*
152 * Switches to specified vcpu, until a matching vcpu_put() 152 * Switches to specified vcpu, until a matching vcpu_put()
153 */ 153 */
154int vcpu_load(struct kvm_vcpu *vcpu) 154void vcpu_load(struct kvm_vcpu *vcpu)
155{ 155{
156 int cpu; 156 int cpu = get_cpu();
157
158 if (mutex_lock_killable(&vcpu->mutex))
159 return -EINTR;
160 cpu = get_cpu();
161 preempt_notifier_register(&vcpu->preempt_notifier); 157 preempt_notifier_register(&vcpu->preempt_notifier);
162 kvm_arch_vcpu_load(vcpu, cpu); 158 kvm_arch_vcpu_load(vcpu, cpu);
163 put_cpu(); 159 put_cpu();
164 return 0;
165} 160}
166EXPORT_SYMBOL_GPL(vcpu_load); 161EXPORT_SYMBOL_GPL(vcpu_load);
167 162
@@ -171,7 +166,6 @@ void vcpu_put(struct kvm_vcpu *vcpu)
171 kvm_arch_vcpu_put(vcpu); 166 kvm_arch_vcpu_put(vcpu);
172 preempt_notifier_unregister(&vcpu->preempt_notifier); 167 preempt_notifier_unregister(&vcpu->preempt_notifier);
173 preempt_enable(); 168 preempt_enable();
174 mutex_unlock(&vcpu->mutex);
175} 169}
176EXPORT_SYMBOL_GPL(vcpu_put); 170EXPORT_SYMBOL_GPL(vcpu_put);
177 171
@@ -2550,19 +2544,16 @@ static long kvm_vcpu_ioctl(struct file *filp,
2550 if (unlikely(_IOC_TYPE(ioctl) != KVMIO)) 2544 if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
2551 return -EINVAL; 2545 return -EINVAL;
2552 2546
2553#if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS)
2554 /* 2547 /*
2555 * Special cases: vcpu ioctls that are asynchronous to vcpu execution, 2548 * Some architectures have vcpu ioctls that are asynchronous to vcpu
2556 * so vcpu_load() would break it. 2549 * execution; mutex_lock() would break them.
2557 */ 2550 */
2558 if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_S390_IRQ || ioctl == KVM_INTERRUPT) 2551 r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg);
2559 return kvm_arch_vcpu_ioctl(filp, ioctl, arg); 2552 if (r != -ENOIOCTLCMD)
2560#endif
2561
2562
2563 r = vcpu_load(vcpu);
2564 if (r)
2565 return r; 2553 return r;
2554
2555 if (mutex_lock_killable(&vcpu->mutex))
2556 return -EINTR;
2566 switch (ioctl) { 2557 switch (ioctl) {
2567 case KVM_RUN: { 2558 case KVM_RUN: {
2568 struct pid *oldpid; 2559 struct pid *oldpid;
@@ -2734,7 +2725,7 @@ out_free1:
2734 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 2725 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
2735 } 2726 }
2736out: 2727out:
2737 vcpu_put(vcpu); 2728 mutex_unlock(&vcpu->mutex);
2738 kfree(fpu); 2729 kfree(fpu);
2739 kfree(kvm_sregs); 2730 kfree(kvm_sregs);
2740 return r; 2731 return r;
@@ -3186,21 +3177,18 @@ static int kvm_dev_ioctl_create_vm(unsigned long type)
3186 return PTR_ERR(kvm); 3177 return PTR_ERR(kvm);
3187#ifdef CONFIG_KVM_MMIO 3178#ifdef CONFIG_KVM_MMIO
3188 r = kvm_coalesced_mmio_init(kvm); 3179 r = kvm_coalesced_mmio_init(kvm);
3189 if (r < 0) { 3180 if (r < 0)
3190 kvm_put_kvm(kvm); 3181 goto put_kvm;
3191 return r;
3192 }
3193#endif 3182#endif
3194 r = get_unused_fd_flags(O_CLOEXEC); 3183 r = get_unused_fd_flags(O_CLOEXEC);
3195 if (r < 0) { 3184 if (r < 0)
3196 kvm_put_kvm(kvm); 3185 goto put_kvm;
3197 return r; 3186
3198 }
3199 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); 3187 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
3200 if (IS_ERR(file)) { 3188 if (IS_ERR(file)) {
3201 put_unused_fd(r); 3189 put_unused_fd(r);
3202 kvm_put_kvm(kvm); 3190 r = PTR_ERR(file);
3203 return PTR_ERR(file); 3191 goto put_kvm;
3204 } 3192 }
3205 3193
3206 /* 3194 /*
@@ -3218,6 +3206,10 @@ static int kvm_dev_ioctl_create_vm(unsigned long type)
3218 3206
3219 fd_install(r, file); 3207 fd_install(r, file);
3220 return r; 3208 return r;
3209
3210put_kvm:
3211 kvm_put_kvm(kvm);
3212 return r;
3221} 3213}
3222 3214
3223static long kvm_dev_ioctl(struct file *filp, 3215static long kvm_dev_ioctl(struct file *filp,