aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@redhat.com>2012-09-16 04:50:30 -0400
committerMarcelo Tosatti <mtosatti@redhat.com>2012-09-17 12:46:32 -0400
commit9fc77441e5e1bf80b794cc546d2243ee9f4afb75 (patch)
tree4661976f4fea7606a54b3f5276c0ecb45760979c
parent7454766f7bead388251aedee35a478356a7f4e72 (diff)
KVM: make processes waiting on vcpu mutex killable
vcpu mutex can be held for unlimited time so taking it with mutex_lock on an ioctl is wrong: one process could be passed a vcpu fd and call this ioctl on the vcpu used by another process, it will then be unkillable until the owner exits. Call mutex_lock_killable instead and return status. Note: mutex_lock_interruptible would be even nicer, but I am not sure all users are prepared to handle EINTR from these ioctls. They might misinterpret it as an error. Cleanup paths expect a vcpu that can't be used by any userspace so this will always succeed - catch bugs by calling BUG_ON. Catch callers that don't check return state by adding __must_check. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r--arch/x86/kvm/x86.c12
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--virt/kvm/kvm_main.c10
3 files changed, 17 insertions, 7 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c4d451ed1573..19047eafa38d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6016,7 +6016,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
6016 int r; 6016 int r;
6017 6017
6018 vcpu->arch.mtrr_state.have_fixed = 1; 6018 vcpu->arch.mtrr_state.have_fixed = 1;
6019 vcpu_load(vcpu); 6019 r = vcpu_load(vcpu);
6020 if (r)
6021 return r;
6020 r = kvm_arch_vcpu_reset(vcpu); 6022 r = kvm_arch_vcpu_reset(vcpu);
6021 if (r == 0) 6023 if (r == 0)
6022 r = kvm_mmu_setup(vcpu); 6024 r = kvm_mmu_setup(vcpu);
@@ -6027,9 +6029,11 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
6027 6029
6028void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 6030void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
6029{ 6031{
6032 int r;
6030 vcpu->arch.apf.msr_val = 0; 6033 vcpu->arch.apf.msr_val = 0;
6031 6034
6032 vcpu_load(vcpu); 6035 r = vcpu_load(vcpu);
6036 BUG_ON(r);
6033 kvm_mmu_unload(vcpu); 6037 kvm_mmu_unload(vcpu);
6034 vcpu_put(vcpu); 6038 vcpu_put(vcpu);
6035 6039
@@ -6275,7 +6279,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
6275 6279
6276static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) 6280static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
6277{ 6281{
6278 vcpu_load(vcpu); 6282 int r;
6283 r = vcpu_load(vcpu);
6284 BUG_ON(r);
6279 kvm_mmu_unload(vcpu); 6285 kvm_mmu_unload(vcpu);
6280 vcpu_put(vcpu); 6286 vcpu_put(vcpu);
6281} 6287}
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 40791930bc15..80bfc880921e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -408,7 +408,7 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
408int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); 408int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
409void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); 409void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
410 410
411void vcpu_load(struct kvm_vcpu *vcpu); 411int __must_check vcpu_load(struct kvm_vcpu *vcpu);
412void vcpu_put(struct kvm_vcpu *vcpu); 412void vcpu_put(struct kvm_vcpu *vcpu);
413 413
414int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 414int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 4fe02d900810..cc3f6dc506e4 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -131,11 +131,12 @@ bool kvm_is_mmio_pfn(pfn_t pfn)
131/* 131/*
132 * Switches to specified vcpu, until a matching vcpu_put() 132 * Switches to specified vcpu, until a matching vcpu_put()
133 */ 133 */
134void vcpu_load(struct kvm_vcpu *vcpu) 134int vcpu_load(struct kvm_vcpu *vcpu)
135{ 135{
136 int cpu; 136 int cpu;
137 137
138 mutex_lock(&vcpu->mutex); 138 if (mutex_lock_killable(&vcpu->mutex))
139 return -EINTR;
139 if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) { 140 if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
140 /* The thread running this VCPU changed. */ 141 /* The thread running this VCPU changed. */
141 struct pid *oldpid = vcpu->pid; 142 struct pid *oldpid = vcpu->pid;
@@ -148,6 +149,7 @@ void vcpu_load(struct kvm_vcpu *vcpu)
148 preempt_notifier_register(&vcpu->preempt_notifier); 149 preempt_notifier_register(&vcpu->preempt_notifier);
149 kvm_arch_vcpu_load(vcpu, cpu); 150 kvm_arch_vcpu_load(vcpu, cpu);
150 put_cpu(); 151 put_cpu();
152 return 0;
151} 153}
152 154
153void vcpu_put(struct kvm_vcpu *vcpu) 155void vcpu_put(struct kvm_vcpu *vcpu)
@@ -1891,7 +1893,9 @@ static long kvm_vcpu_ioctl(struct file *filp,
1891#endif 1893#endif
1892 1894
1893 1895
1894 vcpu_load(vcpu); 1896 r = vcpu_load(vcpu);
1897 if (r)
1898 return r;
1895 switch (ioctl) { 1899 switch (ioctl) {
1896 case KVM_RUN: 1900 case KVM_RUN:
1897 r = -EINVAL; 1901 r = -EINVAL;