diff options
author | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2011-03-10 14:41:43 -0500 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2011-03-10 14:41:43 -0500 |
commit | 8054c3634cb3cb9d426c8ade934389213b857858 (patch) | |
tree | 30d4de58cdd6be0924bd96dde2de2a826bf27099 /drivers | |
parent | f5412be599602124d2bdd49947b231dd77c0bf99 (diff) | |
parent | 1aa0b51a033d4a1ec6d29d06487e053398afa21b (diff) |
Merge branch 'stable/irq.rework' into stable/irq.cleanup
* stable/irq.rework:
xen/irq: Cleanup up the pirq_to_irq for DomU PV PCI passthrough guests as well.
xen: Use IRQF_FORCE_RESUME
xen/timer: Missing IRQF_NO_SUSPEND in timer code broke suspend.
xen: Fix compile error introduced by "switch to new irq_chip functions"
xen: Switch to new irq_chip functions
xen: Remove stale irq_chip.end
xen: events: do not free legacy IRQs
xen: events: allocate GSIs and dynamic IRQs from separate IRQ ranges.
xen: events: add xen_allocate_irq_{dynamic, gsi} and xen_free_irq
xen:events: move find_unbound_irq inside CONFIG_PCI_MSI
xen: handled remapped IRQs when enabling a pcifront PCI device.
genirq: Add IRQF_FORCE_RESUME
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/xen/events.c | 278 |
1 files changed, 124 insertions, 154 deletions
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 74681478100a..89987a7bf26f 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -277,7 +277,7 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) | |||
277 | 277 | ||
278 | BUG_ON(irq == -1); | 278 | BUG_ON(irq == -1); |
279 | #ifdef CONFIG_SMP | 279 | #ifdef CONFIG_SMP |
280 | cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu)); | 280 | cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu)); |
281 | #endif | 281 | #endif |
282 | 282 | ||
283 | clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); | 283 | clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); |
@@ -294,7 +294,7 @@ static void init_evtchn_cpu_bindings(void) | |||
294 | 294 | ||
295 | /* By default all event channels notify CPU#0. */ | 295 | /* By default all event channels notify CPU#0. */ |
296 | for_each_irq_desc(i, desc) { | 296 | for_each_irq_desc(i, desc) { |
297 | cpumask_copy(desc->affinity, cpumask_of(0)); | 297 | cpumask_copy(desc->irq_data.affinity, cpumask_of(0)); |
298 | } | 298 | } |
299 | #endif | 299 | #endif |
300 | 300 | ||
@@ -376,81 +376,69 @@ static void unmask_evtchn(int port) | |||
376 | put_cpu(); | 376 | put_cpu(); |
377 | } | 377 | } |
378 | 378 | ||
379 | static int get_nr_hw_irqs(void) | 379 | static int xen_allocate_irq_dynamic(void) |
380 | { | 380 | { |
381 | int ret = 1; | 381 | int first = 0; |
382 | int irq; | ||
382 | 383 | ||
383 | #ifdef CONFIG_X86_IO_APIC | 384 | #ifdef CONFIG_X86_IO_APIC |
384 | ret = get_nr_irqs_gsi(); | 385 | /* |
386 | * For an HVM guest or domain 0 which see "real" (emulated or | ||
387 | * actual repectively) GSIs we allocate dynamic IRQs | ||
388 | * e.g. those corresponding to event channels or MSIs | ||
389 | * etc. from the range above those "real" GSIs to avoid | ||
390 | * collisions. | ||
391 | */ | ||
392 | if (xen_initial_domain() || xen_hvm_domain()) | ||
393 | first = get_nr_irqs_gsi(); | ||
385 | #endif | 394 | #endif |
386 | 395 | ||
387 | return ret; | 396 | retry: |
388 | } | 397 | irq = irq_alloc_desc_from(first, -1); |
389 | 398 | ||
390 | static int find_unbound_pirq(int type) | 399 | if (irq == -ENOMEM && first > NR_IRQS_LEGACY) { |
391 | { | 400 | printk(KERN_ERR "Out of dynamic IRQ space and eating into GSI space. You should increase nr_irqs\n"); |
392 | int rc, i; | 401 | first = max(NR_IRQS_LEGACY, first - NR_IRQS_LEGACY); |
393 | struct physdev_get_free_pirq op_get_free_pirq; | 402 | goto retry; |
394 | op_get_free_pirq.type = type; | 403 | } |
395 | 404 | ||
396 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq); | 405 | if (irq < 0) |
397 | if (!rc) | 406 | panic("No available IRQ to bind to: increase nr_irqs!\n"); |
398 | return op_get_free_pirq.pirq; | ||
399 | 407 | ||
400 | for (i = 0; i < nr_irqs; i++) { | 408 | return irq; |
401 | if (pirq_to_irq[i] < 0) | ||
402 | return i; | ||
403 | } | ||
404 | return -1; | ||
405 | } | 409 | } |
406 | 410 | ||
407 | static int find_unbound_irq(void) | 411 | static int xen_allocate_irq_gsi(unsigned gsi) |
408 | { | 412 | { |
409 | struct irq_data *data; | 413 | int irq; |
410 | int irq, res; | ||
411 | int bottom = get_nr_hw_irqs(); | ||
412 | int top = nr_irqs-1; | ||
413 | |||
414 | if (bottom == nr_irqs) | ||
415 | goto no_irqs; | ||
416 | 414 | ||
417 | /* This loop starts from the top of IRQ space and goes down. | 415 | /* |
418 | * We need this b/c if we have a PCI device in a Xen PV guest | 416 | * A PV guest has no concept of a GSI (since it has no ACPI |
419 | * we do not have an IO-APIC (though the backend might have them) | 417 | * nor access to/knowledge of the physical APICs). Therefore |
420 | * mapped in. To not have a collision of physical IRQs with the Xen | 418 | * all IRQs are dynamically allocated from the entire IRQ |
421 | * event channels start at the top of the IRQ space for virtual IRQs. | 419 | * space. |
422 | */ | 420 | */ |
423 | for (irq = top; irq > bottom; irq--) { | 421 | if (xen_pv_domain() && !xen_initial_domain()) |
424 | data = irq_get_irq_data(irq); | 422 | return xen_allocate_irq_dynamic(); |
425 | /* only 15->0 have init'd desc; handle irq > 16 */ | ||
426 | if (!data) | ||
427 | break; | ||
428 | if (data->chip == &no_irq_chip) | ||
429 | break; | ||
430 | if (data->chip != &xen_dynamic_chip) | ||
431 | continue; | ||
432 | if (irq_info[irq].type == IRQT_UNBOUND) | ||
433 | return irq; | ||
434 | } | ||
435 | |||
436 | if (irq == bottom) | ||
437 | goto no_irqs; | ||
438 | 423 | ||
439 | res = irq_alloc_desc_at(irq, -1); | 424 | /* Legacy IRQ descriptors are already allocated by the arch. */ |
425 | if (gsi < NR_IRQS_LEGACY) | ||
426 | return gsi; | ||
440 | 427 | ||
441 | if (WARN_ON(res != irq)) | 428 | irq = irq_alloc_desc_at(gsi, -1); |
442 | return -1; | 429 | if (irq < 0) |
430 | panic("Unable to allocate to IRQ%d (%d)\n", gsi, irq); | ||
443 | 431 | ||
444 | return irq; | 432 | return irq; |
445 | |||
446 | no_irqs: | ||
447 | panic("No available IRQ to bind to: increase nr_irqs!\n"); | ||
448 | } | 433 | } |
449 | 434 | ||
450 | static bool identity_mapped_irq(unsigned irq) | 435 | static void xen_free_irq(unsigned irq) |
451 | { | 436 | { |
452 | /* identity map all the hardware irqs */ | 437 | /* Legacy IRQ descriptors are managed by the arch. */ |
453 | return irq < get_nr_hw_irqs(); | 438 | if (irq < NR_IRQS_LEGACY) |
439 | return; | ||
440 | |||
441 | irq_free_desc(irq); | ||
454 | } | 442 | } |
455 | 443 | ||
456 | static void pirq_unmask_notify(int irq) | 444 | static void pirq_unmask_notify(int irq) |
@@ -486,7 +474,7 @@ static bool probing_irq(int irq) | |||
486 | return desc && desc->action == NULL; | 474 | return desc && desc->action == NULL; |
487 | } | 475 | } |
488 | 476 | ||
489 | static unsigned int startup_pirq(unsigned int irq) | 477 | static unsigned int __startup_pirq(unsigned int irq) |
490 | { | 478 | { |
491 | struct evtchn_bind_pirq bind_pirq; | 479 | struct evtchn_bind_pirq bind_pirq; |
492 | struct irq_info *info = info_for_irq(irq); | 480 | struct irq_info *info = info_for_irq(irq); |
@@ -524,9 +512,15 @@ out: | |||
524 | return 0; | 512 | return 0; |
525 | } | 513 | } |
526 | 514 | ||
527 | static void shutdown_pirq(unsigned int irq) | 515 | static unsigned int startup_pirq(struct irq_data *data) |
516 | { | ||
517 | return __startup_pirq(data->irq); | ||
518 | } | ||
519 | |||
520 | static void shutdown_pirq(struct irq_data *data) | ||
528 | { | 521 | { |
529 | struct evtchn_close close; | 522 | struct evtchn_close close; |
523 | unsigned int irq = data->irq; | ||
530 | struct irq_info *info = info_for_irq(irq); | 524 | struct irq_info *info = info_for_irq(irq); |
531 | int evtchn = evtchn_from_irq(irq); | 525 | int evtchn = evtchn_from_irq(irq); |
532 | 526 | ||
@@ -546,20 +540,20 @@ static void shutdown_pirq(unsigned int irq) | |||
546 | info->evtchn = 0; | 540 | info->evtchn = 0; |
547 | } | 541 | } |
548 | 542 | ||
549 | static void enable_pirq(unsigned int irq) | 543 | static void enable_pirq(struct irq_data *data) |
550 | { | 544 | { |
551 | startup_pirq(irq); | 545 | startup_pirq(data); |
552 | } | 546 | } |
553 | 547 | ||
554 | static void disable_pirq(unsigned int irq) | 548 | static void disable_pirq(struct irq_data *data) |
555 | { | 549 | { |
556 | } | 550 | } |
557 | 551 | ||
558 | static void ack_pirq(unsigned int irq) | 552 | static void ack_pirq(struct irq_data *data) |
559 | { | 553 | { |
560 | int evtchn = evtchn_from_irq(irq); | 554 | int evtchn = evtchn_from_irq(data->irq); |
561 | 555 | ||
562 | move_native_irq(irq); | 556 | move_native_irq(data->irq); |
563 | 557 | ||
564 | if (VALID_EVTCHN(evtchn)) { | 558 | if (VALID_EVTCHN(evtchn)) { |
565 | mask_evtchn(evtchn); | 559 | mask_evtchn(evtchn); |
@@ -567,23 +561,6 @@ static void ack_pirq(unsigned int irq) | |||
567 | } | 561 | } |
568 | } | 562 | } |
569 | 563 | ||
570 | static void end_pirq(unsigned int irq) | ||
571 | { | ||
572 | int evtchn = evtchn_from_irq(irq); | ||
573 | struct irq_desc *desc = irq_to_desc(irq); | ||
574 | |||
575 | if (WARN_ON(!desc)) | ||
576 | return; | ||
577 | |||
578 | if ((desc->status & (IRQ_DISABLED|IRQ_PENDING)) == | ||
579 | (IRQ_DISABLED|IRQ_PENDING)) { | ||
580 | shutdown_pirq(irq); | ||
581 | } else if (VALID_EVTCHN(evtchn)) { | ||
582 | unmask_evtchn(evtchn); | ||
583 | pirq_unmask_notify(irq); | ||
584 | } | ||
585 | } | ||
586 | |||
587 | static int find_irq_by_gsi(unsigned gsi) | 564 | static int find_irq_by_gsi(unsigned gsi) |
588 | { | 565 | { |
589 | int irq; | 566 | int irq; |
@@ -638,14 +615,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name) | |||
638 | goto out; /* XXX need refcount? */ | 615 | goto out; /* XXX need refcount? */ |
639 | } | 616 | } |
640 | 617 | ||
641 | /* If we are a PV guest, we don't have GSIs (no ACPI passed). Therefore | 618 | irq = xen_allocate_irq_gsi(gsi); |
642 | * we are using the !xen_initial_domain() to drop in the function.*/ | ||
643 | if (identity_mapped_irq(gsi) || (!xen_initial_domain() && | ||
644 | xen_pv_domain())) { | ||
645 | irq = gsi; | ||
646 | irq_alloc_desc_at(irq, -1); | ||
647 | } else | ||
648 | irq = find_unbound_irq(); | ||
649 | 619 | ||
650 | set_irq_chip_and_handler_name(irq, &xen_pirq_chip, | 620 | set_irq_chip_and_handler_name(irq, &xen_pirq_chip, |
651 | handle_level_irq, name); | 621 | handle_level_irq, name); |
@@ -658,7 +628,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name) | |||
658 | * this in the priv domain. */ | 628 | * this in the priv domain. */ |
659 | if (xen_initial_domain() && | 629 | if (xen_initial_domain() && |
660 | HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) { | 630 | HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) { |
661 | irq_free_desc(irq); | 631 | xen_free_irq(irq); |
662 | irq = -ENOSPC; | 632 | irq = -ENOSPC; |
663 | goto out; | 633 | goto out; |
664 | } | 634 | } |
@@ -677,12 +647,29 @@ out: | |||
677 | #include <linux/msi.h> | 647 | #include <linux/msi.h> |
678 | #include "../pci/msi.h" | 648 | #include "../pci/msi.h" |
679 | 649 | ||
650 | static int find_unbound_pirq(int type) | ||
651 | { | ||
652 | int rc, i; | ||
653 | struct physdev_get_free_pirq op_get_free_pirq; | ||
654 | op_get_free_pirq.type = type; | ||
655 | |||
656 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq); | ||
657 | if (!rc) | ||
658 | return op_get_free_pirq.pirq; | ||
659 | |||
660 | for (i = 0; i < nr_irqs; i++) { | ||
661 | if (pirq_to_irq[i] < 0) | ||
662 | return i; | ||
663 | } | ||
664 | return -1; | ||
665 | } | ||
666 | |||
680 | void xen_allocate_pirq_msi(char *name, int *irq, int *pirq, int alloc) | 667 | void xen_allocate_pirq_msi(char *name, int *irq, int *pirq, int alloc) |
681 | { | 668 | { |
682 | spin_lock(&irq_mapping_update_lock); | 669 | spin_lock(&irq_mapping_update_lock); |
683 | 670 | ||
684 | if (alloc & XEN_ALLOC_IRQ) { | 671 | if (alloc & XEN_ALLOC_IRQ) { |
685 | *irq = find_unbound_irq(); | 672 | *irq = xen_allocate_irq_dynamic(); |
686 | if (*irq == -1) | 673 | if (*irq == -1) |
687 | goto out; | 674 | goto out; |
688 | } | 675 | } |
@@ -732,7 +719,7 @@ int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type) | |||
732 | 719 | ||
733 | spin_lock(&irq_mapping_update_lock); | 720 | spin_lock(&irq_mapping_update_lock); |
734 | 721 | ||
735 | irq = find_unbound_irq(); | 722 | irq = xen_allocate_irq_dynamic(); |
736 | 723 | ||
737 | if (irq == -1) | 724 | if (irq == -1) |
738 | goto out; | 725 | goto out; |
@@ -741,7 +728,7 @@ int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type) | |||
741 | if (rc) { | 728 | if (rc) { |
742 | printk(KERN_WARNING "xen map irq failed %d\n", rc); | 729 | printk(KERN_WARNING "xen map irq failed %d\n", rc); |
743 | 730 | ||
744 | irq_free_desc(irq); | 731 | xen_free_irq(irq); |
745 | 732 | ||
746 | irq = -1; | 733 | irq = -1; |
747 | goto out; | 734 | goto out; |
@@ -779,11 +766,12 @@ int xen_destroy_irq(int irq) | |||
779 | printk(KERN_WARNING "unmap irq failed %d\n", rc); | 766 | printk(KERN_WARNING "unmap irq failed %d\n", rc); |
780 | goto out; | 767 | goto out; |
781 | } | 768 | } |
782 | pirq_to_irq[info->u.pirq.pirq] = -1; | ||
783 | } | 769 | } |
770 | pirq_to_irq[info->u.pirq.pirq] = -1; | ||
771 | |||
784 | irq_info[irq] = mk_unbound_info(); | 772 | irq_info[irq] = mk_unbound_info(); |
785 | 773 | ||
786 | irq_free_desc(irq); | 774 | xen_free_irq(irq); |
787 | 775 | ||
788 | out: | 776 | out: |
789 | spin_unlock(&irq_mapping_update_lock); | 777 | spin_unlock(&irq_mapping_update_lock); |
@@ -814,7 +802,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) | |||
814 | irq = evtchn_to_irq[evtchn]; | 802 | irq = evtchn_to_irq[evtchn]; |
815 | 803 | ||
816 | if (irq == -1) { | 804 | if (irq == -1) { |
817 | irq = find_unbound_irq(); | 805 | irq = xen_allocate_irq_dynamic(); |
818 | 806 | ||
819 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | 807 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, |
820 | handle_fasteoi_irq, "event"); | 808 | handle_fasteoi_irq, "event"); |
@@ -839,7 +827,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) | |||
839 | irq = per_cpu(ipi_to_irq, cpu)[ipi]; | 827 | irq = per_cpu(ipi_to_irq, cpu)[ipi]; |
840 | 828 | ||
841 | if (irq == -1) { | 829 | if (irq == -1) { |
842 | irq = find_unbound_irq(); | 830 | irq = xen_allocate_irq_dynamic(); |
843 | if (irq < 0) | 831 | if (irq < 0) |
844 | goto out; | 832 | goto out; |
845 | 833 | ||
@@ -875,7 +863,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu) | |||
875 | irq = per_cpu(virq_to_irq, cpu)[virq]; | 863 | irq = per_cpu(virq_to_irq, cpu)[virq]; |
876 | 864 | ||
877 | if (irq == -1) { | 865 | if (irq == -1) { |
878 | irq = find_unbound_irq(); | 866 | irq = xen_allocate_irq_dynamic(); |
879 | 867 | ||
880 | set_irq_chip_and_handler_name(irq, &xen_percpu_chip, | 868 | set_irq_chip_and_handler_name(irq, &xen_percpu_chip, |
881 | handle_percpu_irq, "virq"); | 869 | handle_percpu_irq, "virq"); |
@@ -934,7 +922,7 @@ static void unbind_from_irq(unsigned int irq) | |||
934 | if (irq_info[irq].type != IRQT_UNBOUND) { | 922 | if (irq_info[irq].type != IRQT_UNBOUND) { |
935 | irq_info[irq] = mk_unbound_info(); | 923 | irq_info[irq] = mk_unbound_info(); |
936 | 924 | ||
937 | irq_free_desc(irq); | 925 | xen_free_irq(irq); |
938 | } | 926 | } |
939 | 927 | ||
940 | spin_unlock(&irq_mapping_update_lock); | 928 | spin_unlock(&irq_mapping_update_lock); |
@@ -990,7 +978,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi, | |||
990 | if (irq < 0) | 978 | if (irq < 0) |
991 | return irq; | 979 | return irq; |
992 | 980 | ||
993 | irqflags |= IRQF_NO_SUSPEND; | 981 | irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME; |
994 | retval = request_irq(irq, handler, irqflags, devname, dev_id); | 982 | retval = request_irq(irq, handler, irqflags, devname, dev_id); |
995 | if (retval != 0) { | 983 | if (retval != 0) { |
996 | unbind_from_irq(irq); | 984 | unbind_from_irq(irq); |
@@ -1234,11 +1222,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) | |||
1234 | return 0; | 1222 | return 0; |
1235 | } | 1223 | } |
1236 | 1224 | ||
1237 | static int set_affinity_irq(unsigned irq, const struct cpumask *dest) | 1225 | static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest, |
1226 | bool force) | ||
1238 | { | 1227 | { |
1239 | unsigned tcpu = cpumask_first(dest); | 1228 | unsigned tcpu = cpumask_first(dest); |
1240 | 1229 | ||
1241 | return rebind_irq_to_cpu(irq, tcpu); | 1230 | return rebind_irq_to_cpu(data->irq, tcpu); |
1242 | } | 1231 | } |
1243 | 1232 | ||
1244 | int resend_irq_on_evtchn(unsigned int irq) | 1233 | int resend_irq_on_evtchn(unsigned int irq) |
@@ -1257,35 +1246,35 @@ int resend_irq_on_evtchn(unsigned int irq) | |||
1257 | return 1; | 1246 | return 1; |
1258 | } | 1247 | } |
1259 | 1248 | ||
1260 | static void enable_dynirq(unsigned int irq) | 1249 | static void enable_dynirq(struct irq_data *data) |
1261 | { | 1250 | { |
1262 | int evtchn = evtchn_from_irq(irq); | 1251 | int evtchn = evtchn_from_irq(data->irq); |
1263 | 1252 | ||
1264 | if (VALID_EVTCHN(evtchn)) | 1253 | if (VALID_EVTCHN(evtchn)) |
1265 | unmask_evtchn(evtchn); | 1254 | unmask_evtchn(evtchn); |
1266 | } | 1255 | } |
1267 | 1256 | ||
1268 | static void disable_dynirq(unsigned int irq) | 1257 | static void disable_dynirq(struct irq_data *data) |
1269 | { | 1258 | { |
1270 | int evtchn = evtchn_from_irq(irq); | 1259 | int evtchn = evtchn_from_irq(data->irq); |
1271 | 1260 | ||
1272 | if (VALID_EVTCHN(evtchn)) | 1261 | if (VALID_EVTCHN(evtchn)) |
1273 | mask_evtchn(evtchn); | 1262 | mask_evtchn(evtchn); |
1274 | } | 1263 | } |
1275 | 1264 | ||
1276 | static void ack_dynirq(unsigned int irq) | 1265 | static void ack_dynirq(struct irq_data *data) |
1277 | { | 1266 | { |
1278 | int evtchn = evtchn_from_irq(irq); | 1267 | int evtchn = evtchn_from_irq(data->irq); |
1279 | 1268 | ||
1280 | move_masked_irq(irq); | 1269 | move_masked_irq(data->irq); |
1281 | 1270 | ||
1282 | if (VALID_EVTCHN(evtchn)) | 1271 | if (VALID_EVTCHN(evtchn)) |
1283 | unmask_evtchn(evtchn); | 1272 | unmask_evtchn(evtchn); |
1284 | } | 1273 | } |
1285 | 1274 | ||
1286 | static int retrigger_dynirq(unsigned int irq) | 1275 | static int retrigger_dynirq(struct irq_data *data) |
1287 | { | 1276 | { |
1288 | int evtchn = evtchn_from_irq(irq); | 1277 | int evtchn = evtchn_from_irq(data->irq); |
1289 | struct shared_info *sh = HYPERVISOR_shared_info; | 1278 | struct shared_info *sh = HYPERVISOR_shared_info; |
1290 | int ret = 0; | 1279 | int ret = 0; |
1291 | 1280 | ||
@@ -1334,7 +1323,7 @@ static void restore_cpu_pirqs(void) | |||
1334 | 1323 | ||
1335 | printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); | 1324 | printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); |
1336 | 1325 | ||
1337 | startup_pirq(irq); | 1326 | __startup_pirq(irq); |
1338 | } | 1327 | } |
1339 | } | 1328 | } |
1340 | 1329 | ||
@@ -1445,7 +1434,6 @@ void xen_poll_irq(int irq) | |||
1445 | void xen_irq_resume(void) | 1434 | void xen_irq_resume(void) |
1446 | { | 1435 | { |
1447 | unsigned int cpu, irq, evtchn; | 1436 | unsigned int cpu, irq, evtchn; |
1448 | struct irq_desc *desc; | ||
1449 | 1437 | ||
1450 | init_evtchn_cpu_bindings(); | 1438 | init_evtchn_cpu_bindings(); |
1451 | 1439 | ||
@@ -1465,66 +1453,48 @@ void xen_irq_resume(void) | |||
1465 | restore_cpu_ipis(cpu); | 1453 | restore_cpu_ipis(cpu); |
1466 | } | 1454 | } |
1467 | 1455 | ||
1468 | /* | ||
1469 | * Unmask any IRQF_NO_SUSPEND IRQs which are enabled. These | ||
1470 | * are not handled by the IRQ core. | ||
1471 | */ | ||
1472 | for_each_irq_desc(irq, desc) { | ||
1473 | if (!desc->action || !(desc->action->flags & IRQF_NO_SUSPEND)) | ||
1474 | continue; | ||
1475 | if (desc->status & IRQ_DISABLED) | ||
1476 | continue; | ||
1477 | |||
1478 | evtchn = evtchn_from_irq(irq); | ||
1479 | if (evtchn == -1) | ||
1480 | continue; | ||
1481 | |||
1482 | unmask_evtchn(evtchn); | ||
1483 | } | ||
1484 | |||
1485 | restore_cpu_pirqs(); | 1456 | restore_cpu_pirqs(); |
1486 | } | 1457 | } |
1487 | 1458 | ||
1488 | static struct irq_chip xen_dynamic_chip __read_mostly = { | 1459 | static struct irq_chip xen_dynamic_chip __read_mostly = { |
1489 | .name = "xen-dyn", | 1460 | .name = "xen-dyn", |
1490 | 1461 | ||
1491 | .disable = disable_dynirq, | 1462 | .irq_disable = disable_dynirq, |
1492 | .mask = disable_dynirq, | 1463 | .irq_mask = disable_dynirq, |
1493 | .unmask = enable_dynirq, | 1464 | .irq_unmask = enable_dynirq, |
1494 | 1465 | ||
1495 | .eoi = ack_dynirq, | 1466 | .irq_eoi = ack_dynirq, |
1496 | .set_affinity = set_affinity_irq, | 1467 | .irq_set_affinity = set_affinity_irq, |
1497 | .retrigger = retrigger_dynirq, | 1468 | .irq_retrigger = retrigger_dynirq, |
1498 | }; | 1469 | }; |
1499 | 1470 | ||
1500 | static struct irq_chip xen_pirq_chip __read_mostly = { | 1471 | static struct irq_chip xen_pirq_chip __read_mostly = { |
1501 | .name = "xen-pirq", | 1472 | .name = "xen-pirq", |
1502 | 1473 | ||
1503 | .startup = startup_pirq, | 1474 | .irq_startup = startup_pirq, |
1504 | .shutdown = shutdown_pirq, | 1475 | .irq_shutdown = shutdown_pirq, |
1505 | 1476 | ||
1506 | .enable = enable_pirq, | 1477 | .irq_enable = enable_pirq, |
1507 | .unmask = enable_pirq, | 1478 | .irq_unmask = enable_pirq, |
1508 | 1479 | ||
1509 | .disable = disable_pirq, | 1480 | .irq_disable = disable_pirq, |
1510 | .mask = disable_pirq, | 1481 | .irq_mask = disable_pirq, |
1511 | 1482 | ||
1512 | .ack = ack_pirq, | 1483 | .irq_ack = ack_pirq, |
1513 | .end = end_pirq, | ||
1514 | 1484 | ||
1515 | .set_affinity = set_affinity_irq, | 1485 | .irq_set_affinity = set_affinity_irq, |
1516 | 1486 | ||
1517 | .retrigger = retrigger_dynirq, | 1487 | .irq_retrigger = retrigger_dynirq, |
1518 | }; | 1488 | }; |
1519 | 1489 | ||
1520 | static struct irq_chip xen_percpu_chip __read_mostly = { | 1490 | static struct irq_chip xen_percpu_chip __read_mostly = { |
1521 | .name = "xen-percpu", | 1491 | .name = "xen-percpu", |
1522 | 1492 | ||
1523 | .disable = disable_dynirq, | 1493 | .irq_disable = disable_dynirq, |
1524 | .mask = disable_dynirq, | 1494 | .irq_mask = disable_dynirq, |
1525 | .unmask = enable_dynirq, | 1495 | .irq_unmask = enable_dynirq, |
1526 | 1496 | ||
1527 | .ack = ack_dynirq, | 1497 | .irq_ack = ack_dynirq, |
1528 | }; | 1498 | }; |
1529 | 1499 | ||
1530 | int xen_set_callback_via(uint64_t via) | 1500 | int xen_set_callback_via(uint64_t via) |