diff options
Diffstat (limited to 'virt/kvm/irq_comm.c')
-rw-r--r-- | virt/kvm/irq_comm.c | 88 |
1 files changed, 53 insertions, 35 deletions
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index 9783f5c43dae..81950f6f6fd9 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c | |||
@@ -144,10 +144,12 @@ static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, | |||
144 | * = 0 Interrupt was coalesced (previous irq is still pending) | 144 | * = 0 Interrupt was coalesced (previous irq is still pending) |
145 | * > 0 Number of CPUs interrupt was delivered to | 145 | * > 0 Number of CPUs interrupt was delivered to |
146 | */ | 146 | */ |
147 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level) | 147 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level) |
148 | { | 148 | { |
149 | struct kvm_kernel_irq_routing_entry *e; | 149 | struct kvm_kernel_irq_routing_entry *e; |
150 | int ret = -1; | 150 | int ret = -1; |
151 | struct kvm_irq_routing_table *irq_rt; | ||
152 | struct hlist_node *n; | ||
151 | 153 | ||
152 | trace_kvm_set_irq(irq, level, irq_source_id); | 154 | trace_kvm_set_irq(irq, level, irq_source_id); |
153 | 155 | ||
@@ -157,8 +159,9 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level) | |||
157 | * IOAPIC. So set the bit in both. The guest will ignore | 159 | * IOAPIC. So set the bit in both. The guest will ignore |
158 | * writes to the unused one. | 160 | * writes to the unused one. |
159 | */ | 161 | */ |
160 | list_for_each_entry(e, &kvm->irq_routing, link) | 162 | irq_rt = kvm->irq_routing; |
161 | if (e->gsi == irq) { | 163 | if (irq < irq_rt->nr_rt_entries) |
164 | hlist_for_each_entry(e, n, &irq_rt->map[irq], link) { | ||
162 | int r = e->set(e, kvm, irq_source_id, level); | 165 | int r = e->set(e, kvm, irq_source_id, level); |
163 | if (r < 0) | 166 | if (r < 0) |
164 | continue; | 167 | continue; |
@@ -170,20 +173,23 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level) | |||
170 | 173 | ||
171 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) | 174 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) |
172 | { | 175 | { |
173 | struct kvm_kernel_irq_routing_entry *e; | ||
174 | struct kvm_irq_ack_notifier *kian; | 176 | struct kvm_irq_ack_notifier *kian; |
175 | struct hlist_node *n; | 177 | struct hlist_node *n; |
176 | unsigned gsi = pin; | 178 | unsigned gsi = pin; |
179 | int i; | ||
177 | 180 | ||
178 | trace_kvm_ack_irq(irqchip, pin); | 181 | trace_kvm_ack_irq(irqchip, pin); |
179 | 182 | ||
180 | list_for_each_entry(e, &kvm->irq_routing, link) | 183 | for (i = 0; i < kvm->irq_routing->nr_rt_entries; i++) { |
184 | struct kvm_kernel_irq_routing_entry *e; | ||
185 | e = &kvm->irq_routing->rt_entries[i]; | ||
181 | if (e->type == KVM_IRQ_ROUTING_IRQCHIP && | 186 | if (e->type == KVM_IRQ_ROUTING_IRQCHIP && |
182 | e->irqchip.irqchip == irqchip && | 187 | e->irqchip.irqchip == irqchip && |
183 | e->irqchip.pin == pin) { | 188 | e->irqchip.pin == pin) { |
184 | gsi = e->gsi; | 189 | gsi = e->gsi; |
185 | break; | 190 | break; |
186 | } | 191 | } |
192 | } | ||
187 | 193 | ||
188 | hlist_for_each_entry(kian, n, &kvm->arch.irq_ack_notifier_list, link) | 194 | hlist_for_each_entry(kian, n, &kvm->arch.irq_ack_notifier_list, link) |
189 | if (kian->gsi == gsi) | 195 | if (kian->gsi == gsi) |
@@ -280,26 +286,30 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask) | |||
280 | kimn->func(kimn, mask); | 286 | kimn->func(kimn, mask); |
281 | } | 287 | } |
282 | 288 | ||
283 | static void __kvm_free_irq_routing(struct list_head *irq_routing) | ||
284 | { | ||
285 | struct kvm_kernel_irq_routing_entry *e, *n; | ||
286 | |||
287 | list_for_each_entry_safe(e, n, irq_routing, link) | ||
288 | kfree(e); | ||
289 | } | ||
290 | |||
291 | void kvm_free_irq_routing(struct kvm *kvm) | 289 | void kvm_free_irq_routing(struct kvm *kvm) |
292 | { | 290 | { |
293 | mutex_lock(&kvm->irq_lock); | 291 | mutex_lock(&kvm->irq_lock); |
294 | __kvm_free_irq_routing(&kvm->irq_routing); | 292 | kfree(kvm->irq_routing); |
295 | mutex_unlock(&kvm->irq_lock); | 293 | mutex_unlock(&kvm->irq_lock); |
296 | } | 294 | } |
297 | 295 | ||
298 | static int setup_routing_entry(struct kvm_kernel_irq_routing_entry *e, | 296 | static int setup_routing_entry(struct kvm_irq_routing_table *rt, |
297 | struct kvm_kernel_irq_routing_entry *e, | ||
299 | const struct kvm_irq_routing_entry *ue) | 298 | const struct kvm_irq_routing_entry *ue) |
300 | { | 299 | { |
301 | int r = -EINVAL; | 300 | int r = -EINVAL; |
302 | int delta; | 301 | int delta; |
302 | struct kvm_kernel_irq_routing_entry *ei; | ||
303 | struct hlist_node *n; | ||
304 | |||
305 | /* | ||
306 | * Do not allow GSI to be mapped to the same irqchip more than once. | ||
307 | * Allow only one to one mapping between GSI and MSI. | ||
308 | */ | ||
309 | hlist_for_each_entry(ei, n, &rt->map[ue->gsi], link) | ||
310 | if (ei->type == KVM_IRQ_ROUTING_MSI || | ||
311 | ue->u.irqchip.irqchip == ei->irqchip.irqchip) | ||
312 | return r; | ||
303 | 313 | ||
304 | e->gsi = ue->gsi; | 314 | e->gsi = ue->gsi; |
305 | e->type = ue->type; | 315 | e->type = ue->type; |
@@ -332,6 +342,8 @@ static int setup_routing_entry(struct kvm_kernel_irq_routing_entry *e, | |||
332 | default: | 342 | default: |
333 | goto out; | 343 | goto out; |
334 | } | 344 | } |
345 | |||
346 | hlist_add_head(&e->link, &rt->map[e->gsi]); | ||
335 | r = 0; | 347 | r = 0; |
336 | out: | 348 | out: |
337 | return r; | 349 | return r; |
@@ -343,43 +355,49 @@ int kvm_set_irq_routing(struct kvm *kvm, | |||
343 | unsigned nr, | 355 | unsigned nr, |
344 | unsigned flags) | 356 | unsigned flags) |
345 | { | 357 | { |
346 | struct list_head irq_list = LIST_HEAD_INIT(irq_list); | 358 | struct kvm_irq_routing_table *new, *old; |
347 | struct list_head tmp = LIST_HEAD_INIT(tmp); | 359 | u32 i, nr_rt_entries = 0; |
348 | struct kvm_kernel_irq_routing_entry *e = NULL; | ||
349 | unsigned i; | ||
350 | int r; | 360 | int r; |
351 | 361 | ||
352 | for (i = 0; i < nr; ++i) { | 362 | for (i = 0; i < nr; ++i) { |
363 | if (ue[i].gsi >= KVM_MAX_IRQ_ROUTES) | ||
364 | return -EINVAL; | ||
365 | nr_rt_entries = max(nr_rt_entries, ue[i].gsi); | ||
366 | } | ||
367 | |||
368 | nr_rt_entries += 1; | ||
369 | |||
370 | new = kzalloc(sizeof(*new) + (nr_rt_entries * sizeof(struct hlist_head)) | ||
371 | + (nr * sizeof(struct kvm_kernel_irq_routing_entry)), | ||
372 | GFP_KERNEL); | ||
373 | |||
374 | if (!new) | ||
375 | return -ENOMEM; | ||
376 | |||
377 | new->rt_entries = (void *)&new->map[nr_rt_entries]; | ||
378 | |||
379 | new->nr_rt_entries = nr_rt_entries; | ||
380 | |||
381 | for (i = 0; i < nr; ++i) { | ||
353 | r = -EINVAL; | 382 | r = -EINVAL; |
354 | if (ue->gsi >= KVM_MAX_IRQ_ROUTES) | ||
355 | goto out; | ||
356 | if (ue->flags) | 383 | if (ue->flags) |
357 | goto out; | 384 | goto out; |
358 | r = -ENOMEM; | 385 | r = setup_routing_entry(new, &new->rt_entries[i], ue); |
359 | e = kzalloc(sizeof(*e), GFP_KERNEL); | ||
360 | if (!e) | ||
361 | goto out; | ||
362 | r = setup_routing_entry(e, ue); | ||
363 | if (r) | 386 | if (r) |
364 | goto out; | 387 | goto out; |
365 | ++ue; | 388 | ++ue; |
366 | list_add(&e->link, &irq_list); | ||
367 | e = NULL; | ||
368 | } | 389 | } |
369 | 390 | ||
370 | mutex_lock(&kvm->irq_lock); | 391 | mutex_lock(&kvm->irq_lock); |
371 | list_splice(&kvm->irq_routing, &tmp); | 392 | old = kvm->irq_routing; |
372 | INIT_LIST_HEAD(&kvm->irq_routing); | 393 | kvm->irq_routing = new; |
373 | list_splice(&irq_list, &kvm->irq_routing); | ||
374 | INIT_LIST_HEAD(&irq_list); | ||
375 | list_splice(&tmp, &irq_list); | ||
376 | mutex_unlock(&kvm->irq_lock); | 394 | mutex_unlock(&kvm->irq_lock); |
377 | 395 | ||
396 | new = old; | ||
378 | r = 0; | 397 | r = 0; |
379 | 398 | ||
380 | out: | 399 | out: |
381 | kfree(e); | 400 | kfree(new); |
382 | __kvm_free_irq_routing(&irq_list); | ||
383 | return r; | 401 | return r; |
384 | } | 402 | } |
385 | 403 | ||