aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/assigned-dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/assigned-dev.c')
-rw-r--r--virt/kvm/assigned-dev.c62
1 files changed, 33 insertions, 29 deletions
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index eaf3a50f976..3ad0925d23a 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -58,8 +58,6 @@ static int find_index_from_host_irq(struct kvm_assigned_dev_kernel
58static irqreturn_t kvm_assigned_dev_thread(int irq, void *dev_id) 58static irqreturn_t kvm_assigned_dev_thread(int irq, void *dev_id)
59{ 59{
60 struct kvm_assigned_dev_kernel *assigned_dev = dev_id; 60 struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
61 u32 vector;
62 int index;
63 61
64 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_INTX) { 62 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_INTX) {
65 spin_lock(&assigned_dev->intx_lock); 63 spin_lock(&assigned_dev->intx_lock);
@@ -68,31 +66,35 @@ static irqreturn_t kvm_assigned_dev_thread(int irq, void *dev_id)
68 spin_unlock(&assigned_dev->intx_lock); 66 spin_unlock(&assigned_dev->intx_lock);
69 } 67 }
70 68
71 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { 69 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
72 index = find_index_from_host_irq(assigned_dev, irq); 70 assigned_dev->guest_irq, 1);
73 if (index >= 0) { 71
74 vector = assigned_dev-> 72 return IRQ_HANDLED;
75 guest_msix_entries[index].vector; 73}
76 kvm_set_irq(assigned_dev->kvm, 74
77 assigned_dev->irq_source_id, vector, 1); 75#ifdef __KVM_HAVE_MSIX
78 } 76static irqreturn_t kvm_assigned_dev_thread_msix(int irq, void *dev_id)
79 } else 77{
78 struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
79 int index = find_index_from_host_irq(assigned_dev, irq);
80 u32 vector;
81
82 if (index >= 0) {
83 vector = assigned_dev->guest_msix_entries[index].vector;
80 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, 84 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
81 assigned_dev->guest_irq, 1); 85 vector, 1);
86 }
82 87
83 return IRQ_HANDLED; 88 return IRQ_HANDLED;
84} 89}
90#endif
85 91
86/* Ack the irq line for an assigned device */ 92/* Ack the irq line for an assigned device */
87static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian) 93static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
88{ 94{
89 struct kvm_assigned_dev_kernel *dev; 95 struct kvm_assigned_dev_kernel *dev =
90 96 container_of(kian, struct kvm_assigned_dev_kernel,
91 if (kian->gsi == -1) 97 ack_notifier);
92 return;
93
94 dev = container_of(kian, struct kvm_assigned_dev_kernel,
95 ack_notifier);
96 98
97 kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0); 99 kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0);
98 100
@@ -110,8 +112,9 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
110static void deassign_guest_irq(struct kvm *kvm, 112static void deassign_guest_irq(struct kvm *kvm,
111 struct kvm_assigned_dev_kernel *assigned_dev) 113 struct kvm_assigned_dev_kernel *assigned_dev)
112{ 114{
113 kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier); 115 if (assigned_dev->ack_notifier.gsi != -1)
114 assigned_dev->ack_notifier.gsi = -1; 116 kvm_unregister_irq_ack_notifier(kvm,
117 &assigned_dev->ack_notifier);
115 118
116 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, 119 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
117 assigned_dev->guest_irq, 0); 120 assigned_dev->guest_irq, 0);
@@ -143,7 +146,7 @@ static void deassign_host_irq(struct kvm *kvm,
143 146
144 for (i = 0; i < assigned_dev->entries_nr; i++) 147 for (i = 0; i < assigned_dev->entries_nr; i++)
145 free_irq(assigned_dev->host_msix_entries[i].vector, 148 free_irq(assigned_dev->host_msix_entries[i].vector,
146 (void *)assigned_dev); 149 assigned_dev);
147 150
148 assigned_dev->entries_nr = 0; 151 assigned_dev->entries_nr = 0;
149 kfree(assigned_dev->host_msix_entries); 152 kfree(assigned_dev->host_msix_entries);
@@ -153,7 +156,7 @@ static void deassign_host_irq(struct kvm *kvm,
153 /* Deal with MSI and INTx */ 156 /* Deal with MSI and INTx */
154 disable_irq(assigned_dev->host_irq); 157 disable_irq(assigned_dev->host_irq);
155 158
156 free_irq(assigned_dev->host_irq, (void *)assigned_dev); 159 free_irq(assigned_dev->host_irq, assigned_dev);
157 160
158 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI) 161 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI)
159 pci_disable_msi(assigned_dev->dev); 162 pci_disable_msi(assigned_dev->dev);
@@ -239,7 +242,7 @@ static int assigned_device_enable_host_intx(struct kvm *kvm,
239 * are going to be long delays in accepting, acking, etc. 242 * are going to be long delays in accepting, acking, etc.
240 */ 243 */
241 if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread, 244 if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread,
242 IRQF_ONESHOT, dev->irq_name, (void *)dev)) 245 IRQF_ONESHOT, dev->irq_name, dev))
243 return -EIO; 246 return -EIO;
244 return 0; 247 return 0;
245} 248}
@@ -258,7 +261,7 @@ static int assigned_device_enable_host_msi(struct kvm *kvm,
258 261
259 dev->host_irq = dev->dev->irq; 262 dev->host_irq = dev->dev->irq;
260 if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread, 263 if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread,
261 0, dev->irq_name, (void *)dev)) { 264 0, dev->irq_name, dev)) {
262 pci_disable_msi(dev->dev); 265 pci_disable_msi(dev->dev);
263 return -EIO; 266 return -EIO;
264 } 267 }
@@ -284,8 +287,8 @@ static int assigned_device_enable_host_msix(struct kvm *kvm,
284 287
285 for (i = 0; i < dev->entries_nr; i++) { 288 for (i = 0; i < dev->entries_nr; i++) {
286 r = request_threaded_irq(dev->host_msix_entries[i].vector, 289 r = request_threaded_irq(dev->host_msix_entries[i].vector,
287 NULL, kvm_assigned_dev_thread, 290 NULL, kvm_assigned_dev_thread_msix,
288 0, dev->irq_name, (void *)dev); 291 0, dev->irq_name, dev);
289 if (r) 292 if (r)
290 goto err; 293 goto err;
291 } 294 }
@@ -293,7 +296,7 @@ static int assigned_device_enable_host_msix(struct kvm *kvm,
293 return 0; 296 return 0;
294err: 297err:
295 for (i -= 1; i >= 0; i--) 298 for (i -= 1; i >= 0; i--)
296 free_irq(dev->host_msix_entries[i].vector, (void *)dev); 299 free_irq(dev->host_msix_entries[i].vector, dev);
297 pci_disable_msix(dev->dev); 300 pci_disable_msix(dev->dev);
298 return r; 301 return r;
299} 302}
@@ -406,7 +409,8 @@ static int assign_guest_irq(struct kvm *kvm,
406 409
407 if (!r) { 410 if (!r) {
408 dev->irq_requested_type |= guest_irq_type; 411 dev->irq_requested_type |= guest_irq_type;
409 kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier); 412 if (dev->ack_notifier.gsi != -1)
413 kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier);
410 } else 414 } else
411 kvm_free_irq_source_id(kvm, dev->irq_source_id); 415 kvm_free_irq_source_id(kvm, dev->irq_source_id);
412 416