aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/pci/xen.c22
-rw-r--r--arch/x86/xen/time.c4
-rw-r--r--drivers/xen/events.c255
3 files changed, 138 insertions, 143 deletions
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 25cd4a07d09f..2a12f3dbdd02 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -226,21 +226,27 @@ static int xen_pcifront_enable_irq(struct pci_dev *dev)
226{ 226{
227 int rc; 227 int rc;
228 int share = 1; 228 int share = 1;
229 u8 gsi;
229 230
230 dev_info(&dev->dev, "Xen PCI enabling IRQ: %d\n", dev->irq); 231 rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
231 232 if (rc < 0) {
232 if (dev->irq < 0) 233 dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n",
233 return -EINVAL; 234 rc);
235 return rc;
236 }
234 237
235 if (dev->irq < NR_IRQS_LEGACY) 238 if (gsi < NR_IRQS_LEGACY)
236 share = 0; 239 share = 0;
237 240
238 rc = xen_allocate_pirq(dev->irq, share, "pcifront"); 241 rc = xen_allocate_pirq(gsi, share, "pcifront");
239 if (rc < 0) { 242 if (rc < 0) {
240 dev_warn(&dev->dev, "Xen PCI IRQ: %d, failed to register:%d\n", 243 dev_warn(&dev->dev, "Xen PCI: failed to register GSI%d: %d\n",
241 dev->irq, rc); 244 gsi, rc);
242 return rc; 245 return rc;
243 } 246 }
247
248 dev->irq = rc;
249 dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n", gsi, dev->irq);
244 return 0; 250 return 0;
245} 251}
246 252
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 067759e3d6a5..2e2d370a47b1 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -397,7 +397,9 @@ void xen_setup_timer(int cpu)
397 name = "<timer kasprintf failed>"; 397 name = "<timer kasprintf failed>";
398 398
399 irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, 399 irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
400 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER, 400 IRQF_DISABLED|IRQF_PERCPU|
401 IRQF_NOBALANCING|IRQF_TIMER|
402 IRQF_FORCE_RESUME,
401 name, NULL); 403 name, NULL);
402 404
403 evt = &per_cpu(xen_clock_events, cpu); 405 evt = &per_cpu(xen_clock_events, cpu);
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 74681478100a..5aa422a3c3cd 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -277,7 +277,7 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
277 277
278 BUG_ON(irq == -1); 278 BUG_ON(irq == -1);
279#ifdef CONFIG_SMP 279#ifdef CONFIG_SMP
280 cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu)); 280 cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
281#endif 281#endif
282 282
283 clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); 283 clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
@@ -294,7 +294,7 @@ static void init_evtchn_cpu_bindings(void)
294 294
295 /* By default all event channels notify CPU#0. */ 295 /* By default all event channels notify CPU#0. */
296 for_each_irq_desc(i, desc) { 296 for_each_irq_desc(i, desc) {
297 cpumask_copy(desc->affinity, cpumask_of(0)); 297 cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
298 } 298 }
299#endif 299#endif
300 300
@@ -376,81 +376,69 @@ static void unmask_evtchn(int port)
376 put_cpu(); 376 put_cpu();
377} 377}
378 378
379static int get_nr_hw_irqs(void) 379static int xen_allocate_irq_dynamic(void)
380{ 380{
381 int ret = 1; 381 int first = 0;
382 int irq;
382 383
383#ifdef CONFIG_X86_IO_APIC 384#ifdef CONFIG_X86_IO_APIC
384 ret = get_nr_irqs_gsi(); 385 /*
386 * For an HVM guest or domain 0 which see "real" (emulated or
387 * actual repectively) GSIs we allocate dynamic IRQs
388 * e.g. those corresponding to event channels or MSIs
389 * etc. from the range above those "real" GSIs to avoid
390 * collisions.
391 */
392 if (xen_initial_domain() || xen_hvm_domain())
393 first = get_nr_irqs_gsi();
385#endif 394#endif
386 395
387 return ret; 396retry:
388} 397 irq = irq_alloc_desc_from(first, -1);
389 398
390static int find_unbound_pirq(int type) 399 if (irq == -ENOMEM && first > NR_IRQS_LEGACY) {
391{ 400 printk(KERN_ERR "Out of dynamic IRQ space and eating into GSI space. You should increase nr_irqs\n");
392 int rc, i; 401 first = max(NR_IRQS_LEGACY, first - NR_IRQS_LEGACY);
393 struct physdev_get_free_pirq op_get_free_pirq; 402 goto retry;
394 op_get_free_pirq.type = type; 403 }
395 404
396 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq); 405 if (irq < 0)
397 if (!rc) 406 panic("No available IRQ to bind to: increase nr_irqs!\n");
398 return op_get_free_pirq.pirq;
399 407
400 for (i = 0; i < nr_irqs; i++) { 408 return irq;
401 if (pirq_to_irq[i] < 0)
402 return i;
403 }
404 return -1;
405} 409}
406 410
407static int find_unbound_irq(void) 411static int xen_allocate_irq_gsi(unsigned gsi)
408{ 412{
409 struct irq_data *data; 413 int irq;
410 int irq, res;
411 int bottom = get_nr_hw_irqs();
412 int top = nr_irqs-1;
413
414 if (bottom == nr_irqs)
415 goto no_irqs;
416 414
417 /* This loop starts from the top of IRQ space and goes down. 415 /*
418 * We need this b/c if we have a PCI device in a Xen PV guest 416 * A PV guest has no concept of a GSI (since it has no ACPI
419 * we do not have an IO-APIC (though the backend might have them) 417 * nor access to/knowledge of the physical APICs). Therefore
420 * mapped in. To not have a collision of physical IRQs with the Xen 418 * all IRQs are dynamically allocated from the entire IRQ
421 * event channels start at the top of the IRQ space for virtual IRQs. 419 * space.
422 */ 420 */
423 for (irq = top; irq > bottom; irq--) { 421 if (xen_pv_domain() && !xen_initial_domain())
424 data = irq_get_irq_data(irq); 422 return xen_allocate_irq_dynamic();
425 /* only 15->0 have init'd desc; handle irq > 16 */
426 if (!data)
427 break;
428 if (data->chip == &no_irq_chip)
429 break;
430 if (data->chip != &xen_dynamic_chip)
431 continue;
432 if (irq_info[irq].type == IRQT_UNBOUND)
433 return irq;
434 }
435
436 if (irq == bottom)
437 goto no_irqs;
438 423
439 res = irq_alloc_desc_at(irq, -1); 424 /* Legacy IRQ descriptors are already allocated by the arch. */
425 if (gsi < NR_IRQS_LEGACY)
426 return gsi;
440 427
441 if (WARN_ON(res != irq)) 428 irq = irq_alloc_desc_at(gsi, -1);
442 return -1; 429 if (irq < 0)
430 panic("Unable to allocate to IRQ%d (%d)\n", gsi, irq);
443 431
444 return irq; 432 return irq;
445
446no_irqs:
447 panic("No available IRQ to bind to: increase nr_irqs!\n");
448} 433}
449 434
450static bool identity_mapped_irq(unsigned irq) 435static void xen_free_irq(unsigned irq)
451{ 436{
452 /* identity map all the hardware irqs */ 437 /* Legacy IRQ descriptors are managed by the arch. */
453 return irq < get_nr_hw_irqs(); 438 if (irq < NR_IRQS_LEGACY)
439 return;
440
441 irq_free_desc(irq);
454} 442}
455 443
456static void pirq_unmask_notify(int irq) 444static void pirq_unmask_notify(int irq)
@@ -486,7 +474,7 @@ static bool probing_irq(int irq)
486 return desc && desc->action == NULL; 474 return desc && desc->action == NULL;
487} 475}
488 476
489static unsigned int startup_pirq(unsigned int irq) 477static unsigned int __startup_pirq(unsigned int irq)
490{ 478{
491 struct evtchn_bind_pirq bind_pirq; 479 struct evtchn_bind_pirq bind_pirq;
492 struct irq_info *info = info_for_irq(irq); 480 struct irq_info *info = info_for_irq(irq);
@@ -524,9 +512,15 @@ out:
524 return 0; 512 return 0;
525} 513}
526 514
527static void shutdown_pirq(unsigned int irq) 515static unsigned int startup_pirq(struct irq_data *data)
516{
517 return __startup_pirq(data->irq);
518}
519
520static void shutdown_pirq(struct irq_data *data)
528{ 521{
529 struct evtchn_close close; 522 struct evtchn_close close;
523 unsigned int irq = data->irq;
530 struct irq_info *info = info_for_irq(irq); 524 struct irq_info *info = info_for_irq(irq);
531 int evtchn = evtchn_from_irq(irq); 525 int evtchn = evtchn_from_irq(irq);
532 526
@@ -546,20 +540,20 @@ static void shutdown_pirq(unsigned int irq)
546 info->evtchn = 0; 540 info->evtchn = 0;
547} 541}
548 542
549static void enable_pirq(unsigned int irq) 543static void enable_pirq(struct irq_data *data)
550{ 544{
551 startup_pirq(irq); 545 startup_pirq(data);
552} 546}
553 547
554static void disable_pirq(unsigned int irq) 548static void disable_pirq(struct irq_data *data)
555{ 549{
556} 550}
557 551
558static void ack_pirq(unsigned int irq) 552static void ack_pirq(struct irq_data *data)
559{ 553{
560 int evtchn = evtchn_from_irq(irq); 554 int evtchn = evtchn_from_irq(data->irq);
561 555
562 move_native_irq(irq); 556 move_native_irq(data->irq);
563 557
564 if (VALID_EVTCHN(evtchn)) { 558 if (VALID_EVTCHN(evtchn)) {
565 mask_evtchn(evtchn); 559 mask_evtchn(evtchn);
@@ -567,23 +561,6 @@ static void ack_pirq(unsigned int irq)
567 } 561 }
568} 562}
569 563
570static void end_pirq(unsigned int irq)
571{
572 int evtchn = evtchn_from_irq(irq);
573 struct irq_desc *desc = irq_to_desc(irq);
574
575 if (WARN_ON(!desc))
576 return;
577
578 if ((desc->status & (IRQ_DISABLED|IRQ_PENDING)) ==
579 (IRQ_DISABLED|IRQ_PENDING)) {
580 shutdown_pirq(irq);
581 } else if (VALID_EVTCHN(evtchn)) {
582 unmask_evtchn(evtchn);
583 pirq_unmask_notify(irq);
584 }
585}
586
587static int find_irq_by_gsi(unsigned gsi) 564static int find_irq_by_gsi(unsigned gsi)
588{ 565{
589 int irq; 566 int irq;
@@ -638,14 +615,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
638 goto out; /* XXX need refcount? */ 615 goto out; /* XXX need refcount? */
639 } 616 }
640 617
641 /* If we are a PV guest, we don't have GSIs (no ACPI passed). Therefore 618 irq = xen_allocate_irq_gsi(gsi);
642 * we are using the !xen_initial_domain() to drop in the function.*/
643 if (identity_mapped_irq(gsi) || (!xen_initial_domain() &&
644 xen_pv_domain())) {
645 irq = gsi;
646 irq_alloc_desc_at(irq, -1);
647 } else
648 irq = find_unbound_irq();
649 619
650 set_irq_chip_and_handler_name(irq, &xen_pirq_chip, 620 set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
651 handle_level_irq, name); 621 handle_level_irq, name);
@@ -658,7 +628,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
658 * this in the priv domain. */ 628 * this in the priv domain. */
659 if (xen_initial_domain() && 629 if (xen_initial_domain() &&
660 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) { 630 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
661 irq_free_desc(irq); 631 xen_free_irq(irq);
662 irq = -ENOSPC; 632 irq = -ENOSPC;
663 goto out; 633 goto out;
664 } 634 }
@@ -677,12 +647,29 @@ out:
677#include <linux/msi.h> 647#include <linux/msi.h>
678#include "../pci/msi.h" 648#include "../pci/msi.h"
679 649
650static int find_unbound_pirq(int type)
651{
652 int rc, i;
653 struct physdev_get_free_pirq op_get_free_pirq;
654 op_get_free_pirq.type = type;
655
656 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
657 if (!rc)
658 return op_get_free_pirq.pirq;
659
660 for (i = 0; i < nr_irqs; i++) {
661 if (pirq_to_irq[i] < 0)
662 return i;
663 }
664 return -1;
665}
666
680void xen_allocate_pirq_msi(char *name, int *irq, int *pirq, int alloc) 667void xen_allocate_pirq_msi(char *name, int *irq, int *pirq, int alloc)
681{ 668{
682 spin_lock(&irq_mapping_update_lock); 669 spin_lock(&irq_mapping_update_lock);
683 670
684 if (alloc & XEN_ALLOC_IRQ) { 671 if (alloc & XEN_ALLOC_IRQ) {
685 *irq = find_unbound_irq(); 672 *irq = xen_allocate_irq_dynamic();
686 if (*irq == -1) 673 if (*irq == -1)
687 goto out; 674 goto out;
688 } 675 }
@@ -732,7 +719,7 @@ int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type)
732 719
733 spin_lock(&irq_mapping_update_lock); 720 spin_lock(&irq_mapping_update_lock);
734 721
735 irq = find_unbound_irq(); 722 irq = xen_allocate_irq_dynamic();
736 723
737 if (irq == -1) 724 if (irq == -1)
738 goto out; 725 goto out;
@@ -741,7 +728,7 @@ int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type)
741 if (rc) { 728 if (rc) {
742 printk(KERN_WARNING "xen map irq failed %d\n", rc); 729 printk(KERN_WARNING "xen map irq failed %d\n", rc);
743 730
744 irq_free_desc(irq); 731 xen_free_irq(irq);
745 732
746 irq = -1; 733 irq = -1;
747 goto out; 734 goto out;
@@ -783,7 +770,7 @@ int xen_destroy_irq(int irq)
783 } 770 }
784 irq_info[irq] = mk_unbound_info(); 771 irq_info[irq] = mk_unbound_info();
785 772
786 irq_free_desc(irq); 773 xen_free_irq(irq);
787 774
788out: 775out:
789 spin_unlock(&irq_mapping_update_lock); 776 spin_unlock(&irq_mapping_update_lock);
@@ -814,7 +801,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
814 irq = evtchn_to_irq[evtchn]; 801 irq = evtchn_to_irq[evtchn];
815 802
816 if (irq == -1) { 803 if (irq == -1) {
817 irq = find_unbound_irq(); 804 irq = xen_allocate_irq_dynamic();
818 805
819 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, 806 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
820 handle_fasteoi_irq, "event"); 807 handle_fasteoi_irq, "event");
@@ -839,7 +826,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
839 irq = per_cpu(ipi_to_irq, cpu)[ipi]; 826 irq = per_cpu(ipi_to_irq, cpu)[ipi];
840 827
841 if (irq == -1) { 828 if (irq == -1) {
842 irq = find_unbound_irq(); 829 irq = xen_allocate_irq_dynamic();
843 if (irq < 0) 830 if (irq < 0)
844 goto out; 831 goto out;
845 832
@@ -875,7 +862,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
875 irq = per_cpu(virq_to_irq, cpu)[virq]; 862 irq = per_cpu(virq_to_irq, cpu)[virq];
876 863
877 if (irq == -1) { 864 if (irq == -1) {
878 irq = find_unbound_irq(); 865 irq = xen_allocate_irq_dynamic();
879 866
880 set_irq_chip_and_handler_name(irq, &xen_percpu_chip, 867 set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
881 handle_percpu_irq, "virq"); 868 handle_percpu_irq, "virq");
@@ -934,7 +921,7 @@ static void unbind_from_irq(unsigned int irq)
934 if (irq_info[irq].type != IRQT_UNBOUND) { 921 if (irq_info[irq].type != IRQT_UNBOUND) {
935 irq_info[irq] = mk_unbound_info(); 922 irq_info[irq] = mk_unbound_info();
936 923
937 irq_free_desc(irq); 924 xen_free_irq(irq);
938 } 925 }
939 926
940 spin_unlock(&irq_mapping_update_lock); 927 spin_unlock(&irq_mapping_update_lock);
@@ -1234,11 +1221,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1234 return 0; 1221 return 0;
1235} 1222}
1236 1223
1237static int set_affinity_irq(unsigned irq, const struct cpumask *dest) 1224static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
1225 bool force)
1238{ 1226{
1239 unsigned tcpu = cpumask_first(dest); 1227 unsigned tcpu = cpumask_first(dest);
1240 1228
1241 return rebind_irq_to_cpu(irq, tcpu); 1229 return rebind_irq_to_cpu(data->irq, tcpu);
1242} 1230}
1243 1231
1244int resend_irq_on_evtchn(unsigned int irq) 1232int resend_irq_on_evtchn(unsigned int irq)
@@ -1257,35 +1245,35 @@ int resend_irq_on_evtchn(unsigned int irq)
1257 return 1; 1245 return 1;
1258} 1246}
1259 1247
1260static void enable_dynirq(unsigned int irq) 1248static void enable_dynirq(struct irq_data *data)
1261{ 1249{
1262 int evtchn = evtchn_from_irq(irq); 1250 int evtchn = evtchn_from_irq(data->irq);
1263 1251
1264 if (VALID_EVTCHN(evtchn)) 1252 if (VALID_EVTCHN(evtchn))
1265 unmask_evtchn(evtchn); 1253 unmask_evtchn(evtchn);
1266} 1254}
1267 1255
1268static void disable_dynirq(unsigned int irq) 1256static void disable_dynirq(struct irq_data *data)
1269{ 1257{
1270 int evtchn = evtchn_from_irq(irq); 1258 int evtchn = evtchn_from_irq(data->irq);
1271 1259
1272 if (VALID_EVTCHN(evtchn)) 1260 if (VALID_EVTCHN(evtchn))
1273 mask_evtchn(evtchn); 1261 mask_evtchn(evtchn);
1274} 1262}
1275 1263
1276static void ack_dynirq(unsigned int irq) 1264static void ack_dynirq(struct irq_data *data)
1277{ 1265{
1278 int evtchn = evtchn_from_irq(irq); 1266 int evtchn = evtchn_from_irq(data->irq);
1279 1267
1280 move_masked_irq(irq); 1268 move_masked_irq(data->irq);
1281 1269
1282 if (VALID_EVTCHN(evtchn)) 1270 if (VALID_EVTCHN(evtchn))
1283 unmask_evtchn(evtchn); 1271 unmask_evtchn(evtchn);
1284} 1272}
1285 1273
1286static int retrigger_dynirq(unsigned int irq) 1274static int retrigger_dynirq(struct irq_data *data)
1287{ 1275{
1288 int evtchn = evtchn_from_irq(irq); 1276 int evtchn = evtchn_from_irq(data->irq);
1289 struct shared_info *sh = HYPERVISOR_shared_info; 1277 struct shared_info *sh = HYPERVISOR_shared_info;
1290 int ret = 0; 1278 int ret = 0;
1291 1279
@@ -1334,7 +1322,7 @@ static void restore_cpu_pirqs(void)
1334 1322
1335 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); 1323 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
1336 1324
1337 startup_pirq(irq); 1325 __startup_pirq(irq);
1338 } 1326 }
1339} 1327}
1340 1328
@@ -1486,45 +1474,44 @@ void xen_irq_resume(void)
1486} 1474}
1487 1475
1488static struct irq_chip xen_dynamic_chip __read_mostly = { 1476static struct irq_chip xen_dynamic_chip __read_mostly = {
1489 .name = "xen-dyn", 1477 .name = "xen-dyn",
1490 1478
1491 .disable = disable_dynirq, 1479 .irq_disable = disable_dynirq,
1492 .mask = disable_dynirq, 1480 .irq_mask = disable_dynirq,
1493 .unmask = enable_dynirq, 1481 .irq_unmask = enable_dynirq,
1494 1482
1495 .eoi = ack_dynirq, 1483 .irq_eoi = ack_dynirq,
1496 .set_affinity = set_affinity_irq, 1484 .irq_set_affinity = set_affinity_irq,
1497 .retrigger = retrigger_dynirq, 1485 .irq_retrigger = retrigger_dynirq,
1498}; 1486};
1499 1487
1500static struct irq_chip xen_pirq_chip __read_mostly = { 1488static struct irq_chip xen_pirq_chip __read_mostly = {
1501 .name = "xen-pirq", 1489 .name = "xen-pirq",
1502 1490
1503 .startup = startup_pirq, 1491 .irq_startup = startup_pirq,
1504 .shutdown = shutdown_pirq, 1492 .irq_shutdown = shutdown_pirq,
1505 1493
1506 .enable = enable_pirq, 1494 .irq_enable = enable_pirq,
1507 .unmask = enable_pirq, 1495 .irq_unmask = enable_pirq,
1508 1496
1509 .disable = disable_pirq, 1497 .irq_disable = disable_pirq,
1510 .mask = disable_pirq, 1498 .irq_mask = disable_pirq,
1511 1499
1512 .ack = ack_pirq, 1500 .irq_ack = ack_pirq,
1513 .end = end_pirq,
1514 1501
1515 .set_affinity = set_affinity_irq, 1502 .irq_set_affinity = set_affinity_irq,
1516 1503
1517 .retrigger = retrigger_dynirq, 1504 .irq_retrigger = retrigger_dynirq,
1518}; 1505};
1519 1506
1520static struct irq_chip xen_percpu_chip __read_mostly = { 1507static struct irq_chip xen_percpu_chip __read_mostly = {
1521 .name = "xen-percpu", 1508 .name = "xen-percpu",
1522 1509
1523 .disable = disable_dynirq, 1510 .irq_disable = disable_dynirq,
1524 .mask = disable_dynirq, 1511 .irq_mask = disable_dynirq,
1525 .unmask = enable_dynirq, 1512 .irq_unmask = enable_dynirq,
1526 1513
1527 .ack = ack_dynirq, 1514 .irq_ack = ack_dynirq,
1528}; 1515};
1529 1516
1530int xen_set_callback_via(uint64_t via) 1517int xen_set_callback_via(uint64_t via)