aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2009-05-07 16:55:13 -0400
committerAvi Kivity <avi@redhat.com>2009-06-10 04:48:53 -0400
commit547de29e5b1662deb05b5f90917902dc0e9ac182 (patch)
treec8c8d913729f4577251d3bceb9fe52b676f7a5a8 /virt
parent32f8840064d88cc3f6e85203aec7b6b57bebcb97 (diff)
KVM: protect assigned dev workqueue, int handler and irq acker
kvm_assigned_dev_ack_irq is vulnerable to a race condition with the interrupt handler function. It does: if (dev->host_irq_disabled) { enable_irq(dev->host_irq); dev->host_irq_disabled = false; } If an interrupt triggers before the host->dev_irq_disabled assignment, it will disable the interrupt and set dev->host_irq_disabled to true. On return to kvm_assigned_dev_ack_irq, dev->host_irq_disabled is set to false, and the next kvm_assigned_dev_ack_irq call will fail to reenable it. Other than that, having the interrupt handler and work handlers run in parallel sounds like asking for trouble (could not spot any obvious problem, but better not have to, its fragile). CC: sheng.yang@intel.com Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c13
1 files changed, 12 insertions, 1 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 29c0afb064da..687d113a3e5e 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -42,6 +42,7 @@
42#include <linux/mman.h> 42#include <linux/mman.h>
43#include <linux/swap.h> 43#include <linux/swap.h>
44#include <linux/bitops.h> 44#include <linux/bitops.h>
45#include <linux/spinlock.h>
45 46
46#include <asm/processor.h> 47#include <asm/processor.h>
47#include <asm/io.h> 48#include <asm/io.h>
@@ -130,6 +131,7 @@ static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
130 * finer-grained lock, update this 131 * finer-grained lock, update this
131 */ 132 */
132 mutex_lock(&kvm->lock); 133 mutex_lock(&kvm->lock);
134 spin_lock_irq(&assigned_dev->assigned_dev_lock);
133 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { 135 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
134 struct kvm_guest_msix_entry *guest_entries = 136 struct kvm_guest_msix_entry *guest_entries =
135 assigned_dev->guest_msix_entries; 137 assigned_dev->guest_msix_entries;
@@ -156,18 +158,21 @@ static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
156 } 158 }
157 } 159 }
158 160
161 spin_unlock_irq(&assigned_dev->assigned_dev_lock);
159 mutex_unlock(&assigned_dev->kvm->lock); 162 mutex_unlock(&assigned_dev->kvm->lock);
160} 163}
161 164
162static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id) 165static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
163{ 166{
167 unsigned long flags;
164 struct kvm_assigned_dev_kernel *assigned_dev = 168 struct kvm_assigned_dev_kernel *assigned_dev =
165 (struct kvm_assigned_dev_kernel *) dev_id; 169 (struct kvm_assigned_dev_kernel *) dev_id;
166 170
171 spin_lock_irqsave(&assigned_dev->assigned_dev_lock, flags);
167 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { 172 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
168 int index = find_index_from_host_irq(assigned_dev, irq); 173 int index = find_index_from_host_irq(assigned_dev, irq);
169 if (index < 0) 174 if (index < 0)
170 return IRQ_HANDLED; 175 goto out;
171 assigned_dev->guest_msix_entries[index].flags |= 176 assigned_dev->guest_msix_entries[index].flags |=
172 KVM_ASSIGNED_MSIX_PENDING; 177 KVM_ASSIGNED_MSIX_PENDING;
173 } 178 }
@@ -177,6 +182,8 @@ static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
177 disable_irq_nosync(irq); 182 disable_irq_nosync(irq);
178 assigned_dev->host_irq_disabled = true; 183 assigned_dev->host_irq_disabled = true;
179 184
185out:
186 spin_unlock_irqrestore(&assigned_dev->assigned_dev_lock, flags);
180 return IRQ_HANDLED; 187 return IRQ_HANDLED;
181} 188}
182 189
@@ -184,6 +191,7 @@ static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
184static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian) 191static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
185{ 192{
186 struct kvm_assigned_dev_kernel *dev; 193 struct kvm_assigned_dev_kernel *dev;
194 unsigned long flags;
187 195
188 if (kian->gsi == -1) 196 if (kian->gsi == -1)
189 return; 197 return;
@@ -196,10 +204,12 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
196 /* The guest irq may be shared so this ack may be 204 /* The guest irq may be shared so this ack may be
197 * from another device. 205 * from another device.
198 */ 206 */
207 spin_lock_irqsave(&dev->assigned_dev_lock, flags);
199 if (dev->host_irq_disabled) { 208 if (dev->host_irq_disabled) {
200 enable_irq(dev->host_irq); 209 enable_irq(dev->host_irq);
201 dev->host_irq_disabled = false; 210 dev->host_irq_disabled = false;
202 } 211 }
212 spin_unlock_irqrestore(&dev->assigned_dev_lock, flags);
203} 213}
204 214
205static void deassign_guest_irq(struct kvm *kvm, 215static void deassign_guest_irq(struct kvm *kvm,
@@ -615,6 +625,7 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
615 match->host_devfn = assigned_dev->devfn; 625 match->host_devfn = assigned_dev->devfn;
616 match->flags = assigned_dev->flags; 626 match->flags = assigned_dev->flags;
617 match->dev = dev; 627 match->dev = dev;
628 spin_lock_init(&match->assigned_dev_lock);
618 match->irq_source_id = -1; 629 match->irq_source_id = -1;
619 match->kvm = kvm; 630 match->kvm = kvm;
620 match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq; 631 match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq;