aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorJan Kiszka <jan.kiszka@siemens.com>2010-11-16 16:30:03 -0500
committerAvi Kivity <avi@redhat.com>2011-01-12 04:29:20 -0500
commit0645211c43df0b96c51e12980066b3227e10b164 (patch)
tree2c23ea65e07b9481681eb6bb7cc5d7d2fa109917 /virt
parent0c106b5aaa727c7f508828e94cff4a98874f930c (diff)
KVM: Switch assigned device IRQ forwarding to threaded handler
This improves the IRQ forwarding for assigned devices: By using the kernel's threaded IRQ scheme, we can get rid of the latency-prone work queue and simplify the code in the same run. Moreover, we no longer have to hold assigned_dev_lock while raising the guest IRQ, which can be a lenghty operation as we may have to iterate over all VCPUs. The lock is now only used for synchronizing masking vs. unmasking of INTx-type IRQs, thus is renames to intx_lock. Acked-by: Alex Williamson <alex.williamson@redhat.com> Acked-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/assigned-dev.c107
1 files changed, 34 insertions, 73 deletions
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index ecc4419fadca..1d77ce16360a 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -55,58 +55,31 @@ static int find_index_from_host_irq(struct kvm_assigned_dev_kernel
55 return index; 55 return index;
56} 56}
57 57
58static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work) 58static irqreturn_t kvm_assigned_dev_thread(int irq, void *dev_id)
59{ 59{
60 struct kvm_assigned_dev_kernel *assigned_dev; 60 struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
61 int i; 61 u32 vector;
62 int index;
62 63
63 assigned_dev = container_of(work, struct kvm_assigned_dev_kernel, 64 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_INTX) {
64 interrupt_work); 65 spin_lock(&assigned_dev->intx_lock);
66 disable_irq_nosync(irq);
67 assigned_dev->host_irq_disabled = true;
68 spin_unlock(&assigned_dev->intx_lock);
69 }
65 70
66 spin_lock_irq(&assigned_dev->assigned_dev_lock);
67 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { 71 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
68 struct kvm_guest_msix_entry *guest_entries = 72 index = find_index_from_host_irq(assigned_dev, irq);
69 assigned_dev->guest_msix_entries; 73 if (index >= 0) {
70 for (i = 0; i < assigned_dev->entries_nr; i++) { 74 vector = assigned_dev->
71 if (!(guest_entries[i].flags & 75 guest_msix_entries[index].vector;
72 KVM_ASSIGNED_MSIX_PENDING))
73 continue;
74 guest_entries[i].flags &= ~KVM_ASSIGNED_MSIX_PENDING;
75 kvm_set_irq(assigned_dev->kvm, 76 kvm_set_irq(assigned_dev->kvm,
76 assigned_dev->irq_source_id, 77 assigned_dev->irq_source_id, vector, 1);
77 guest_entries[i].vector, 1);
78 } 78 }
79 } else 79 } else
80 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, 80 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
81 assigned_dev->guest_irq, 1); 81 assigned_dev->guest_irq, 1);
82 82
83 spin_unlock_irq(&assigned_dev->assigned_dev_lock);
84}
85
86static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
87{
88 unsigned long flags;
89 struct kvm_assigned_dev_kernel *assigned_dev =
90 (struct kvm_assigned_dev_kernel *) dev_id;
91
92 spin_lock_irqsave(&assigned_dev->assigned_dev_lock, flags);
93 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
94 int index = find_index_from_host_irq(assigned_dev, irq);
95 if (index < 0)
96 goto out;
97 assigned_dev->guest_msix_entries[index].flags |=
98 KVM_ASSIGNED_MSIX_PENDING;
99 }
100
101 schedule_work(&assigned_dev->interrupt_work);
102
103 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) {
104 disable_irq_nosync(irq);
105 assigned_dev->host_irq_disabled = true;
106 }
107
108out:
109 spin_unlock_irqrestore(&assigned_dev->assigned_dev_lock, flags);
110 return IRQ_HANDLED; 83 return IRQ_HANDLED;
111} 84}
112 85
@@ -114,7 +87,6 @@ out:
114static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian) 87static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
115{ 88{
116 struct kvm_assigned_dev_kernel *dev; 89 struct kvm_assigned_dev_kernel *dev;
117 unsigned long flags;
118 90
119 if (kian->gsi == -1) 91 if (kian->gsi == -1)
120 return; 92 return;
@@ -127,12 +99,12 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
127 /* The guest irq may be shared so this ack may be 99 /* The guest irq may be shared so this ack may be
128 * from another device. 100 * from another device.
129 */ 101 */
130 spin_lock_irqsave(&dev->assigned_dev_lock, flags); 102 spin_lock(&dev->intx_lock);
131 if (dev->host_irq_disabled) { 103 if (dev->host_irq_disabled) {
132 enable_irq(dev->host_irq); 104 enable_irq(dev->host_irq);
133 dev->host_irq_disabled = false; 105 dev->host_irq_disabled = false;
134 } 106 }
135 spin_unlock_irqrestore(&dev->assigned_dev_lock, flags); 107 spin_unlock(&dev->intx_lock);
136} 108}
137 109
138static void deassign_guest_irq(struct kvm *kvm, 110static void deassign_guest_irq(struct kvm *kvm,
@@ -155,28 +127,19 @@ static void deassign_host_irq(struct kvm *kvm,
155 struct kvm_assigned_dev_kernel *assigned_dev) 127 struct kvm_assigned_dev_kernel *assigned_dev)
156{ 128{
157 /* 129 /*
158 * In kvm_free_device_irq, cancel_work_sync return true if: 130 * We disable irq here to prevent further events.
159 * 1. work is scheduled, and then cancelled.
160 * 2. work callback is executed.
161 *
162 * The first one ensured that the irq is disabled and no more events
163 * would happen. But for the second one, the irq may be enabled (e.g.
164 * for MSI). So we disable irq here to prevent further events.
165 * 131 *
166 * Notice this maybe result in nested disable if the interrupt type is 132 * Notice this maybe result in nested disable if the interrupt type is
167 * INTx, but it's OK for we are going to free it. 133 * INTx, but it's OK for we are going to free it.
168 * 134 *
169 * If this function is a part of VM destroy, please ensure that till 135 * If this function is a part of VM destroy, please ensure that till
170 * now, the kvm state is still legal for probably we also have to wait 136 * now, the kvm state is still legal for probably we also have to wait
171 * interrupt_work done. 137 * on a currently running IRQ handler.
172 */ 138 */
173 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { 139 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
174 int i; 140 int i;
175 for (i = 0; i < assigned_dev->entries_nr; i++) 141 for (i = 0; i < assigned_dev->entries_nr; i++)
176 disable_irq_nosync(assigned_dev-> 142 disable_irq(assigned_dev->host_msix_entries[i].vector);
177 host_msix_entries[i].vector);
178
179 cancel_work_sync(&assigned_dev->interrupt_work);
180 143
181 for (i = 0; i < assigned_dev->entries_nr; i++) 144 for (i = 0; i < assigned_dev->entries_nr; i++)
182 free_irq(assigned_dev->host_msix_entries[i].vector, 145 free_irq(assigned_dev->host_msix_entries[i].vector,
@@ -188,8 +151,7 @@ static void deassign_host_irq(struct kvm *kvm,
188 pci_disable_msix(assigned_dev->dev); 151 pci_disable_msix(assigned_dev->dev);
189 } else { 152 } else {
190 /* Deal with MSI and INTx */ 153 /* Deal with MSI and INTx */
191 disable_irq_nosync(assigned_dev->host_irq); 154 disable_irq(assigned_dev->host_irq);
192 cancel_work_sync(&assigned_dev->interrupt_work);
193 155
194 free_irq(assigned_dev->host_irq, (void *)assigned_dev); 156 free_irq(assigned_dev->host_irq, (void *)assigned_dev);
195 157
@@ -268,8 +230,9 @@ static int assigned_device_enable_host_intx(struct kvm *kvm,
268 * on the same interrupt line is not a happy situation: there 230 * on the same interrupt line is not a happy situation: there
269 * are going to be long delays in accepting, acking, etc. 231 * are going to be long delays in accepting, acking, etc.
270 */ 232 */
271 if (request_irq(dev->host_irq, kvm_assigned_dev_intr, 233 if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread,
272 0, "kvm_assigned_intx_device", (void *)dev)) 234 IRQF_ONESHOT, "kvm_assigned_intx_device",
235 (void *)dev))
273 return -EIO; 236 return -EIO;
274 return 0; 237 return 0;
275} 238}
@@ -287,8 +250,8 @@ static int assigned_device_enable_host_msi(struct kvm *kvm,
287 } 250 }
288 251
289 dev->host_irq = dev->dev->irq; 252 dev->host_irq = dev->dev->irq;
290 if (request_irq(dev->host_irq, kvm_assigned_dev_intr, 0, 253 if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread,
291 "kvm_assigned_msi_device", (void *)dev)) { 254 0, "kvm_assigned_msi_device", (void *)dev)) {
292 pci_disable_msi(dev->dev); 255 pci_disable_msi(dev->dev);
293 return -EIO; 256 return -EIO;
294 } 257 }
@@ -313,10 +276,10 @@ static int assigned_device_enable_host_msix(struct kvm *kvm,
313 return r; 276 return r;
314 277
315 for (i = 0; i < dev->entries_nr; i++) { 278 for (i = 0; i < dev->entries_nr; i++) {
316 r = request_irq(dev->host_msix_entries[i].vector, 279 r = request_threaded_irq(dev->host_msix_entries[i].vector,
317 kvm_assigned_dev_intr, 0, 280 NULL, kvm_assigned_dev_thread,
318 "kvm_assigned_msix_device", 281 0, "kvm_assigned_msix_device",
319 (void *)dev); 282 (void *)dev);
320 if (r) 283 if (r)
321 goto err; 284 goto err;
322 } 285 }
@@ -557,12 +520,10 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
557 match->host_devfn = assigned_dev->devfn; 520 match->host_devfn = assigned_dev->devfn;
558 match->flags = assigned_dev->flags; 521 match->flags = assigned_dev->flags;
559 match->dev = dev; 522 match->dev = dev;
560 spin_lock_init(&match->assigned_dev_lock); 523 spin_lock_init(&match->intx_lock);
561 match->irq_source_id = -1; 524 match->irq_source_id = -1;
562 match->kvm = kvm; 525 match->kvm = kvm;
563 match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq; 526 match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq;
564 INIT_WORK(&match->interrupt_work,
565 kvm_assigned_dev_interrupt_work_handler);
566 527
567 list_add(&match->list, &kvm->arch.assigned_dev_head); 528 list_add(&match->list, &kvm->arch.assigned_dev_head);
568 529
@@ -654,9 +615,9 @@ static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm,
654 r = -ENOMEM; 615 r = -ENOMEM;
655 goto msix_nr_out; 616 goto msix_nr_out;
656 } 617 }
657 adev->guest_msix_entries = kzalloc( 618 adev->guest_msix_entries =
658 sizeof(struct kvm_guest_msix_entry) * 619 kzalloc(sizeof(struct msix_entry) * entry_nr->entry_nr,
659 entry_nr->entry_nr, GFP_KERNEL); 620 GFP_KERNEL);
660 if (!adev->guest_msix_entries) { 621 if (!adev->guest_msix_entries) {
661 kfree(adev->host_msix_entries); 622 kfree(adev->host_msix_entries);
662 r = -ENOMEM; 623 r = -ENOMEM;