diff options
Diffstat (limited to 'kernel/irq')
-rw-r--r-- | kernel/irq/Kconfig | 15 | ||||
-rw-r--r-- | kernel/irq/Makefile | 1 | ||||
-rw-r--r-- | kernel/irq/chip.c | 130 | ||||
-rw-r--r-- | kernel/irq/irqdomain.c | 567 | ||||
-rw-r--r-- | kernel/irq/manage.c | 2 | ||||
-rw-r--r-- | kernel/irq/msi.c | 330 |
6 files changed, 1024 insertions, 21 deletions
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 225086b2652e..9a76e3beda54 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig | |||
@@ -55,6 +55,21 @@ config GENERIC_IRQ_CHIP | |||
55 | config IRQ_DOMAIN | 55 | config IRQ_DOMAIN |
56 | bool | 56 | bool |
57 | 57 | ||
58 | # Support for hierarchical irq domains | ||
59 | config IRQ_DOMAIN_HIERARCHY | ||
60 | bool | ||
61 | select IRQ_DOMAIN | ||
62 | |||
63 | # Generic MSI interrupt support | ||
64 | config GENERIC_MSI_IRQ | ||
65 | bool | ||
66 | |||
67 | # Generic MSI hierarchical interrupt domain support | ||
68 | config GENERIC_MSI_IRQ_DOMAIN | ||
69 | bool | ||
70 | select IRQ_DOMAIN_HIERARCHY | ||
71 | select GENERIC_MSI_IRQ | ||
72 | |||
58 | config HANDLE_DOMAIN_IRQ | 73 | config HANDLE_DOMAIN_IRQ |
59 | bool | 74 | bool |
60 | 75 | ||
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile index fff17381f0af..d12123526e2b 100644 --- a/kernel/irq/Makefile +++ b/kernel/irq/Makefile | |||
@@ -6,3 +6,4 @@ obj-$(CONFIG_IRQ_DOMAIN) += irqdomain.o | |||
6 | obj-$(CONFIG_PROC_FS) += proc.o | 6 | obj-$(CONFIG_PROC_FS) += proc.o |
7 | obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o | 7 | obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o |
8 | obj-$(CONFIG_PM_SLEEP) += pm.o | 8 | obj-$(CONFIG_PM_SLEEP) += pm.o |
9 | obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o | ||
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index e5202f00cabc..6f1c7a566b95 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/kernel_stat.h> | 17 | #include <linux/kernel_stat.h> |
18 | #include <linux/irqdomain.h> | ||
18 | 19 | ||
19 | #include <trace/events/irq.h> | 20 | #include <trace/events/irq.h> |
20 | 21 | ||
@@ -178,6 +179,7 @@ int irq_startup(struct irq_desc *desc, bool resend) | |||
178 | irq_state_clr_disabled(desc); | 179 | irq_state_clr_disabled(desc); |
179 | desc->depth = 0; | 180 | desc->depth = 0; |
180 | 181 | ||
182 | irq_domain_activate_irq(&desc->irq_data); | ||
181 | if (desc->irq_data.chip->irq_startup) { | 183 | if (desc->irq_data.chip->irq_startup) { |
182 | ret = desc->irq_data.chip->irq_startup(&desc->irq_data); | 184 | ret = desc->irq_data.chip->irq_startup(&desc->irq_data); |
183 | irq_state_clr_masked(desc); | 185 | irq_state_clr_masked(desc); |
@@ -199,6 +201,7 @@ void irq_shutdown(struct irq_desc *desc) | |||
199 | desc->irq_data.chip->irq_disable(&desc->irq_data); | 201 | desc->irq_data.chip->irq_disable(&desc->irq_data); |
200 | else | 202 | else |
201 | desc->irq_data.chip->irq_mask(&desc->irq_data); | 203 | desc->irq_data.chip->irq_mask(&desc->irq_data); |
204 | irq_domain_deactivate_irq(&desc->irq_data); | ||
202 | irq_state_set_masked(desc); | 205 | irq_state_set_masked(desc); |
203 | } | 206 | } |
204 | 207 | ||
@@ -728,7 +731,30 @@ __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |||
728 | if (!handle) { | 731 | if (!handle) { |
729 | handle = handle_bad_irq; | 732 | handle = handle_bad_irq; |
730 | } else { | 733 | } else { |
731 | if (WARN_ON(desc->irq_data.chip == &no_irq_chip)) | 734 | struct irq_data *irq_data = &desc->irq_data; |
735 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | ||
736 | /* | ||
737 | * With hierarchical domains we might run into a | ||
738 | * situation where the outermost chip is not yet set | ||
739 | * up, but the inner chips are there. Instead of | ||
740 | * bailing we install the handler, but obviously we | ||
741 | * cannot enable/startup the interrupt at this point. | ||
742 | */ | ||
743 | while (irq_data) { | ||
744 | if (irq_data->chip != &no_irq_chip) | ||
745 | break; | ||
746 | /* | ||
747 | * Bail out if the outer chip is not set up | ||
748 | * and the interrrupt supposed to be started | ||
749 | * right away. | ||
750 | */ | ||
751 | if (WARN_ON(is_chained)) | ||
752 | goto out; | ||
753 | /* Try the parent */ | ||
754 | irq_data = irq_data->parent_data; | ||
755 | } | ||
756 | #endif | ||
757 | if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip)) | ||
732 | goto out; | 758 | goto out; |
733 | } | 759 | } |
734 | 760 | ||
@@ -847,3 +873,105 @@ void irq_cpu_offline(void) | |||
847 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 873 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
848 | } | 874 | } |
849 | } | 875 | } |
876 | |||
877 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | ||
878 | /** | ||
879 | * irq_chip_ack_parent - Acknowledge the parent interrupt | ||
880 | * @data: Pointer to interrupt specific data | ||
881 | */ | ||
882 | void irq_chip_ack_parent(struct irq_data *data) | ||
883 | { | ||
884 | data = data->parent_data; | ||
885 | data->chip->irq_ack(data); | ||
886 | } | ||
887 | |||
888 | /** | ||
889 | * irq_chip_mask_parent - Mask the parent interrupt | ||
890 | * @data: Pointer to interrupt specific data | ||
891 | */ | ||
892 | void irq_chip_mask_parent(struct irq_data *data) | ||
893 | { | ||
894 | data = data->parent_data; | ||
895 | data->chip->irq_mask(data); | ||
896 | } | ||
897 | |||
898 | /** | ||
899 | * irq_chip_unmask_parent - Unmask the parent interrupt | ||
900 | * @data: Pointer to interrupt specific data | ||
901 | */ | ||
902 | void irq_chip_unmask_parent(struct irq_data *data) | ||
903 | { | ||
904 | data = data->parent_data; | ||
905 | data->chip->irq_unmask(data); | ||
906 | } | ||
907 | |||
908 | /** | ||
909 | * irq_chip_eoi_parent - Invoke EOI on the parent interrupt | ||
910 | * @data: Pointer to interrupt specific data | ||
911 | */ | ||
912 | void irq_chip_eoi_parent(struct irq_data *data) | ||
913 | { | ||
914 | data = data->parent_data; | ||
915 | data->chip->irq_eoi(data); | ||
916 | } | ||
917 | |||
918 | /** | ||
919 | * irq_chip_set_affinity_parent - Set affinity on the parent interrupt | ||
920 | * @data: Pointer to interrupt specific data | ||
921 | * @dest: The affinity mask to set | ||
922 | * @force: Flag to enforce setting (disable online checks) | ||
923 | * | ||
924 | * Conditinal, as the underlying parent chip might not implement it. | ||
925 | */ | ||
926 | int irq_chip_set_affinity_parent(struct irq_data *data, | ||
927 | const struct cpumask *dest, bool force) | ||
928 | { | ||
929 | data = data->parent_data; | ||
930 | if (data->chip->irq_set_affinity) | ||
931 | return data->chip->irq_set_affinity(data, dest, force); | ||
932 | |||
933 | return -ENOSYS; | ||
934 | } | ||
935 | |||
936 | /** | ||
937 | * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware | ||
938 | * @data: Pointer to interrupt specific data | ||
939 | * | ||
940 | * Iterate through the domain hierarchy of the interrupt and check | ||
941 | * whether a hw retrigger function exists. If yes, invoke it. | ||
942 | */ | ||
943 | int irq_chip_retrigger_hierarchy(struct irq_data *data) | ||
944 | { | ||
945 | for (data = data->parent_data; data; data = data->parent_data) | ||
946 | if (data->chip && data->chip->irq_retrigger) | ||
947 | return data->chip->irq_retrigger(data); | ||
948 | |||
949 | return -ENOSYS; | ||
950 | } | ||
951 | #endif | ||
952 | |||
953 | /** | ||
954 | * irq_chip_compose_msi_msg - Componse msi message for a irq chip | ||
955 | * @data: Pointer to interrupt specific data | ||
956 | * @msg: Pointer to the MSI message | ||
957 | * | ||
958 | * For hierarchical domains we find the first chip in the hierarchy | ||
959 | * which implements the irq_compose_msi_msg callback. For non | ||
960 | * hierarchical we use the top level chip. | ||
961 | */ | ||
962 | int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) | ||
963 | { | ||
964 | struct irq_data *pos = NULL; | ||
965 | |||
966 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | ||
967 | for (; data; data = data->parent_data) | ||
968 | #endif | ||
969 | if (data->chip && data->chip->irq_compose_msi_msg) | ||
970 | pos = data; | ||
971 | if (!pos) | ||
972 | return -ENOSYS; | ||
973 | |||
974 | pos->chip->irq_compose_msi_msg(pos, msg); | ||
975 | |||
976 | return 0; | ||
977 | } | ||
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 6534ff6ce02e..7fac311057b8 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c | |||
@@ -23,6 +23,10 @@ static DEFINE_MUTEX(irq_domain_mutex); | |||
23 | static DEFINE_MUTEX(revmap_trees_mutex); | 23 | static DEFINE_MUTEX(revmap_trees_mutex); |
24 | static struct irq_domain *irq_default_domain; | 24 | static struct irq_domain *irq_default_domain; |
25 | 25 | ||
26 | static int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, | ||
27 | irq_hw_number_t hwirq, int node); | ||
28 | static void irq_domain_check_hierarchy(struct irq_domain *domain); | ||
29 | |||
26 | /** | 30 | /** |
27 | * __irq_domain_add() - Allocate a new irq_domain data structure | 31 | * __irq_domain_add() - Allocate a new irq_domain data structure |
28 | * @of_node: optional device-tree node of the interrupt controller | 32 | * @of_node: optional device-tree node of the interrupt controller |
@@ -30,7 +34,7 @@ static struct irq_domain *irq_default_domain; | |||
30 | * @hwirq_max: Maximum number of interrupts supported by controller | 34 | * @hwirq_max: Maximum number of interrupts supported by controller |
31 | * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no | 35 | * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no |
32 | * direct mapping | 36 | * direct mapping |
33 | * @ops: map/unmap domain callbacks | 37 | * @ops: domain callbacks |
34 | * @host_data: Controller private data pointer | 38 | * @host_data: Controller private data pointer |
35 | * | 39 | * |
36 | * Allocates and initialize and irq_domain structure. | 40 | * Allocates and initialize and irq_domain structure. |
@@ -56,6 +60,7 @@ struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, | |||
56 | domain->hwirq_max = hwirq_max; | 60 | domain->hwirq_max = hwirq_max; |
57 | domain->revmap_size = size; | 61 | domain->revmap_size = size; |
58 | domain->revmap_direct_max_irq = direct_max; | 62 | domain->revmap_direct_max_irq = direct_max; |
63 | irq_domain_check_hierarchy(domain); | ||
59 | 64 | ||
60 | mutex_lock(&irq_domain_mutex); | 65 | mutex_lock(&irq_domain_mutex); |
61 | list_add(&domain->link, &irq_domain_list); | 66 | list_add(&domain->link, &irq_domain_list); |
@@ -109,7 +114,7 @@ EXPORT_SYMBOL_GPL(irq_domain_remove); | |||
109 | * @first_irq: first number of irq block assigned to the domain, | 114 | * @first_irq: first number of irq block assigned to the domain, |
110 | * pass zero to assign irqs on-the-fly. If first_irq is non-zero, then | 115 | * pass zero to assign irqs on-the-fly. If first_irq is non-zero, then |
111 | * pre-map all of the irqs in the domain to virqs starting at first_irq. | 116 | * pre-map all of the irqs in the domain to virqs starting at first_irq. |
112 | * @ops: map/unmap domain callbacks | 117 | * @ops: domain callbacks |
113 | * @host_data: Controller private data pointer | 118 | * @host_data: Controller private data pointer |
114 | * | 119 | * |
115 | * Allocates an irq_domain, and optionally if first_irq is positive then also | 120 | * Allocates an irq_domain, and optionally if first_irq is positive then also |
@@ -174,10 +179,8 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, | |||
174 | 179 | ||
175 | domain = __irq_domain_add(of_node, first_hwirq + size, | 180 | domain = __irq_domain_add(of_node, first_hwirq + size, |
176 | first_hwirq + size, 0, ops, host_data); | 181 | first_hwirq + size, 0, ops, host_data); |
177 | if (!domain) | 182 | if (domain) |
178 | return NULL; | 183 | irq_domain_associate_many(domain, first_irq, first_hwirq, size); |
179 | |||
180 | irq_domain_associate_many(domain, first_irq, first_hwirq, size); | ||
181 | 184 | ||
182 | return domain; | 185 | return domain; |
183 | } | 186 | } |
@@ -388,7 +391,6 @@ EXPORT_SYMBOL_GPL(irq_create_direct_mapping); | |||
388 | unsigned int irq_create_mapping(struct irq_domain *domain, | 391 | unsigned int irq_create_mapping(struct irq_domain *domain, |
389 | irq_hw_number_t hwirq) | 392 | irq_hw_number_t hwirq) |
390 | { | 393 | { |
391 | unsigned int hint; | ||
392 | int virq; | 394 | int virq; |
393 | 395 | ||
394 | pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); | 396 | pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); |
@@ -410,12 +412,8 @@ unsigned int irq_create_mapping(struct irq_domain *domain, | |||
410 | } | 412 | } |
411 | 413 | ||
412 | /* Allocate a virtual interrupt number */ | 414 | /* Allocate a virtual interrupt number */ |
413 | hint = hwirq % nr_irqs; | 415 | virq = irq_domain_alloc_descs(-1, 1, hwirq, |
414 | if (hint == 0) | 416 | of_node_to_nid(domain->of_node)); |
415 | hint++; | ||
416 | virq = irq_alloc_desc_from(hint, of_node_to_nid(domain->of_node)); | ||
417 | if (virq <= 0) | ||
418 | virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node)); | ||
419 | if (virq <= 0) { | 417 | if (virq <= 0) { |
420 | pr_debug("-> virq allocation failed\n"); | 418 | pr_debug("-> virq allocation failed\n"); |
421 | return 0; | 419 | return 0; |
@@ -471,7 +469,7 @@ unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data) | |||
471 | struct irq_domain *domain; | 469 | struct irq_domain *domain; |
472 | irq_hw_number_t hwirq; | 470 | irq_hw_number_t hwirq; |
473 | unsigned int type = IRQ_TYPE_NONE; | 471 | unsigned int type = IRQ_TYPE_NONE; |
474 | unsigned int virq; | 472 | int virq; |
475 | 473 | ||
476 | domain = irq_data->np ? irq_find_host(irq_data->np) : irq_default_domain; | 474 | domain = irq_data->np ? irq_find_host(irq_data->np) : irq_default_domain; |
477 | if (!domain) { | 475 | if (!domain) { |
@@ -489,10 +487,24 @@ unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data) | |||
489 | return 0; | 487 | return 0; |
490 | } | 488 | } |
491 | 489 | ||
492 | /* Create mapping */ | 490 | if (irq_domain_is_hierarchy(domain)) { |
493 | virq = irq_create_mapping(domain, hwirq); | 491 | /* |
494 | if (!virq) | 492 | * If we've already configured this interrupt, |
495 | return virq; | 493 | * don't do it again, or hell will break loose. |
494 | */ | ||
495 | virq = irq_find_mapping(domain, hwirq); | ||
496 | if (virq) | ||
497 | return virq; | ||
498 | |||
499 | virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, irq_data); | ||
500 | if (virq <= 0) | ||
501 | return 0; | ||
502 | } else { | ||
503 | /* Create mapping */ | ||
504 | virq = irq_create_mapping(domain, hwirq); | ||
505 | if (!virq) | ||
506 | return virq; | ||
507 | } | ||
496 | 508 | ||
497 | /* Set type if specified and different than the current one */ | 509 | /* Set type if specified and different than the current one */ |
498 | if (type != IRQ_TYPE_NONE && | 510 | if (type != IRQ_TYPE_NONE && |
@@ -540,8 +552,8 @@ unsigned int irq_find_mapping(struct irq_domain *domain, | |||
540 | return 0; | 552 | return 0; |
541 | 553 | ||
542 | if (hwirq < domain->revmap_direct_max_irq) { | 554 | if (hwirq < domain->revmap_direct_max_irq) { |
543 | data = irq_get_irq_data(hwirq); | 555 | data = irq_domain_get_irq_data(domain, hwirq); |
544 | if (data && (data->domain == domain) && (data->hwirq == hwirq)) | 556 | if (data && data->hwirq == hwirq) |
545 | return hwirq; | 557 | return hwirq; |
546 | } | 558 | } |
547 | 559 | ||
@@ -709,3 +721,518 @@ const struct irq_domain_ops irq_domain_simple_ops = { | |||
709 | .xlate = irq_domain_xlate_onetwocell, | 721 | .xlate = irq_domain_xlate_onetwocell, |
710 | }; | 722 | }; |
711 | EXPORT_SYMBOL_GPL(irq_domain_simple_ops); | 723 | EXPORT_SYMBOL_GPL(irq_domain_simple_ops); |
724 | |||
725 | static int irq_domain_alloc_descs(int virq, unsigned int cnt, | ||
726 | irq_hw_number_t hwirq, int node) | ||
727 | { | ||
728 | unsigned int hint; | ||
729 | |||
730 | if (virq >= 0) { | ||
731 | virq = irq_alloc_descs(virq, virq, cnt, node); | ||
732 | } else { | ||
733 | hint = hwirq % nr_irqs; | ||
734 | if (hint == 0) | ||
735 | hint++; | ||
736 | virq = irq_alloc_descs_from(hint, cnt, node); | ||
737 | if (virq <= 0 && hint > 1) | ||
738 | virq = irq_alloc_descs_from(1, cnt, node); | ||
739 | } | ||
740 | |||
741 | return virq; | ||
742 | } | ||
743 | |||
744 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | ||
745 | /** | ||
746 | * irq_domain_add_hierarchy - Add a irqdomain into the hierarchy | ||
747 | * @parent: Parent irq domain to associate with the new domain | ||
748 | * @flags: Irq domain flags associated to the domain | ||
749 | * @size: Size of the domain. See below | ||
750 | * @node: Optional device-tree node of the interrupt controller | ||
751 | * @ops: Pointer to the interrupt domain callbacks | ||
752 | * @host_data: Controller private data pointer | ||
753 | * | ||
754 | * If @size is 0 a tree domain is created, otherwise a linear domain. | ||
755 | * | ||
756 | * If successful the parent is associated to the new domain and the | ||
757 | * domain flags are set. | ||
758 | * Returns pointer to IRQ domain, or NULL on failure. | ||
759 | */ | ||
760 | struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent, | ||
761 | unsigned int flags, | ||
762 | unsigned int size, | ||
763 | struct device_node *node, | ||
764 | const struct irq_domain_ops *ops, | ||
765 | void *host_data) | ||
766 | { | ||
767 | struct irq_domain *domain; | ||
768 | |||
769 | if (size) | ||
770 | domain = irq_domain_add_linear(node, size, ops, host_data); | ||
771 | else | ||
772 | domain = irq_domain_add_tree(node, ops, host_data); | ||
773 | if (domain) { | ||
774 | domain->parent = parent; | ||
775 | domain->flags |= flags; | ||
776 | } | ||
777 | |||
778 | return domain; | ||
779 | } | ||
780 | |||
781 | static void irq_domain_insert_irq(int virq) | ||
782 | { | ||
783 | struct irq_data *data; | ||
784 | |||
785 | for (data = irq_get_irq_data(virq); data; data = data->parent_data) { | ||
786 | struct irq_domain *domain = data->domain; | ||
787 | irq_hw_number_t hwirq = data->hwirq; | ||
788 | |||
789 | if (hwirq < domain->revmap_size) { | ||
790 | domain->linear_revmap[hwirq] = virq; | ||
791 | } else { | ||
792 | mutex_lock(&revmap_trees_mutex); | ||
793 | radix_tree_insert(&domain->revmap_tree, hwirq, data); | ||
794 | mutex_unlock(&revmap_trees_mutex); | ||
795 | } | ||
796 | |||
797 | /* If not already assigned, give the domain the chip's name */ | ||
798 | if (!domain->name && data->chip) | ||
799 | domain->name = data->chip->name; | ||
800 | } | ||
801 | |||
802 | irq_clear_status_flags(virq, IRQ_NOREQUEST); | ||
803 | } | ||
804 | |||
805 | static void irq_domain_remove_irq(int virq) | ||
806 | { | ||
807 | struct irq_data *data; | ||
808 | |||
809 | irq_set_status_flags(virq, IRQ_NOREQUEST); | ||
810 | irq_set_chip_and_handler(virq, NULL, NULL); | ||
811 | synchronize_irq(virq); | ||
812 | smp_mb(); | ||
813 | |||
814 | for (data = irq_get_irq_data(virq); data; data = data->parent_data) { | ||
815 | struct irq_domain *domain = data->domain; | ||
816 | irq_hw_number_t hwirq = data->hwirq; | ||
817 | |||
818 | if (hwirq < domain->revmap_size) { | ||
819 | domain->linear_revmap[hwirq] = 0; | ||
820 | } else { | ||
821 | mutex_lock(&revmap_trees_mutex); | ||
822 | radix_tree_delete(&domain->revmap_tree, hwirq); | ||
823 | mutex_unlock(&revmap_trees_mutex); | ||
824 | } | ||
825 | } | ||
826 | } | ||
827 | |||
828 | static struct irq_data *irq_domain_insert_irq_data(struct irq_domain *domain, | ||
829 | struct irq_data *child) | ||
830 | { | ||
831 | struct irq_data *irq_data; | ||
832 | |||
833 | irq_data = kzalloc_node(sizeof(*irq_data), GFP_KERNEL, child->node); | ||
834 | if (irq_data) { | ||
835 | child->parent_data = irq_data; | ||
836 | irq_data->irq = child->irq; | ||
837 | irq_data->node = child->node; | ||
838 | irq_data->domain = domain; | ||
839 | } | ||
840 | |||
841 | return irq_data; | ||
842 | } | ||
843 | |||
844 | static void irq_domain_free_irq_data(unsigned int virq, unsigned int nr_irqs) | ||
845 | { | ||
846 | struct irq_data *irq_data, *tmp; | ||
847 | int i; | ||
848 | |||
849 | for (i = 0; i < nr_irqs; i++) { | ||
850 | irq_data = irq_get_irq_data(virq + i); | ||
851 | tmp = irq_data->parent_data; | ||
852 | irq_data->parent_data = NULL; | ||
853 | irq_data->domain = NULL; | ||
854 | |||
855 | while (tmp) { | ||
856 | irq_data = tmp; | ||
857 | tmp = tmp->parent_data; | ||
858 | kfree(irq_data); | ||
859 | } | ||
860 | } | ||
861 | } | ||
862 | |||
863 | static int irq_domain_alloc_irq_data(struct irq_domain *domain, | ||
864 | unsigned int virq, unsigned int nr_irqs) | ||
865 | { | ||
866 | struct irq_data *irq_data; | ||
867 | struct irq_domain *parent; | ||
868 | int i; | ||
869 | |||
870 | /* The outermost irq_data is embedded in struct irq_desc */ | ||
871 | for (i = 0; i < nr_irqs; i++) { | ||
872 | irq_data = irq_get_irq_data(virq + i); | ||
873 | irq_data->domain = domain; | ||
874 | |||
875 | for (parent = domain->parent; parent; parent = parent->parent) { | ||
876 | irq_data = irq_domain_insert_irq_data(parent, irq_data); | ||
877 | if (!irq_data) { | ||
878 | irq_domain_free_irq_data(virq, i + 1); | ||
879 | return -ENOMEM; | ||
880 | } | ||
881 | } | ||
882 | } | ||
883 | |||
884 | return 0; | ||
885 | } | ||
886 | |||
887 | /** | ||
888 | * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain | ||
889 | * @domain: domain to match | ||
890 | * @virq: IRQ number to get irq_data | ||
891 | */ | ||
892 | struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, | ||
893 | unsigned int virq) | ||
894 | { | ||
895 | struct irq_data *irq_data; | ||
896 | |||
897 | for (irq_data = irq_get_irq_data(virq); irq_data; | ||
898 | irq_data = irq_data->parent_data) | ||
899 | if (irq_data->domain == domain) | ||
900 | return irq_data; | ||
901 | |||
902 | return NULL; | ||
903 | } | ||
904 | |||
905 | /** | ||
906 | * irq_domain_set_hwirq_and_chip - Set hwirq and irqchip of @virq at @domain | ||
907 | * @domain: Interrupt domain to match | ||
908 | * @virq: IRQ number | ||
909 | * @hwirq: The hwirq number | ||
910 | * @chip: The associated interrupt chip | ||
911 | * @chip_data: The associated chip data | ||
912 | */ | ||
913 | int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq, | ||
914 | irq_hw_number_t hwirq, struct irq_chip *chip, | ||
915 | void *chip_data) | ||
916 | { | ||
917 | struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq); | ||
918 | |||
919 | if (!irq_data) | ||
920 | return -ENOENT; | ||
921 | |||
922 | irq_data->hwirq = hwirq; | ||
923 | irq_data->chip = chip ? chip : &no_irq_chip; | ||
924 | irq_data->chip_data = chip_data; | ||
925 | |||
926 | return 0; | ||
927 | } | ||
928 | |||
929 | /** | ||
930 | * irq_domain_set_info - Set the complete data for a @virq in @domain | ||
931 | * @domain: Interrupt domain to match | ||
932 | * @virq: IRQ number | ||
933 | * @hwirq: The hardware interrupt number | ||
934 | * @chip: The associated interrupt chip | ||
935 | * @chip_data: The associated interrupt chip data | ||
936 | * @handler: The interrupt flow handler | ||
937 | * @handler_data: The interrupt flow handler data | ||
938 | * @handler_name: The interrupt handler name | ||
939 | */ | ||
940 | void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, | ||
941 | irq_hw_number_t hwirq, struct irq_chip *chip, | ||
942 | void *chip_data, irq_flow_handler_t handler, | ||
943 | void *handler_data, const char *handler_name) | ||
944 | { | ||
945 | irq_domain_set_hwirq_and_chip(domain, virq, hwirq, chip, chip_data); | ||
946 | __irq_set_handler(virq, handler, 0, handler_name); | ||
947 | irq_set_handler_data(virq, handler_data); | ||
948 | } | ||
949 | |||
950 | /** | ||
951 | * irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data | ||
952 | * @irq_data: The pointer to irq_data | ||
953 | */ | ||
954 | void irq_domain_reset_irq_data(struct irq_data *irq_data) | ||
955 | { | ||
956 | irq_data->hwirq = 0; | ||
957 | irq_data->chip = &no_irq_chip; | ||
958 | irq_data->chip_data = NULL; | ||
959 | } | ||
960 | |||
961 | /** | ||
962 | * irq_domain_free_irqs_common - Clear irq_data and free the parent | ||
963 | * @domain: Interrupt domain to match | ||
964 | * @virq: IRQ number to start with | ||
965 | * @nr_irqs: The number of irqs to free | ||
966 | */ | ||
967 | void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq, | ||
968 | unsigned int nr_irqs) | ||
969 | { | ||
970 | struct irq_data *irq_data; | ||
971 | int i; | ||
972 | |||
973 | for (i = 0; i < nr_irqs; i++) { | ||
974 | irq_data = irq_domain_get_irq_data(domain, virq + i); | ||
975 | if (irq_data) | ||
976 | irq_domain_reset_irq_data(irq_data); | ||
977 | } | ||
978 | irq_domain_free_irqs_parent(domain, virq, nr_irqs); | ||
979 | } | ||
980 | |||
981 | /** | ||
982 | * irq_domain_free_irqs_top - Clear handler and handler data, clear irqdata and free parent | ||
983 | * @domain: Interrupt domain to match | ||
984 | * @virq: IRQ number to start with | ||
985 | * @nr_irqs: The number of irqs to free | ||
986 | */ | ||
987 | void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq, | ||
988 | unsigned int nr_irqs) | ||
989 | { | ||
990 | int i; | ||
991 | |||
992 | for (i = 0; i < nr_irqs; i++) { | ||
993 | irq_set_handler_data(virq + i, NULL); | ||
994 | irq_set_handler(virq + i, NULL); | ||
995 | } | ||
996 | irq_domain_free_irqs_common(domain, virq, nr_irqs); | ||
997 | } | ||
998 | |||
999 | static bool irq_domain_is_auto_recursive(struct irq_domain *domain) | ||
1000 | { | ||
1001 | return domain->flags & IRQ_DOMAIN_FLAG_AUTO_RECURSIVE; | ||
1002 | } | ||
1003 | |||
1004 | static void irq_domain_free_irqs_recursive(struct irq_domain *domain, | ||
1005 | unsigned int irq_base, | ||
1006 | unsigned int nr_irqs) | ||
1007 | { | ||
1008 | domain->ops->free(domain, irq_base, nr_irqs); | ||
1009 | if (irq_domain_is_auto_recursive(domain)) { | ||
1010 | BUG_ON(!domain->parent); | ||
1011 | irq_domain_free_irqs_recursive(domain->parent, irq_base, | ||
1012 | nr_irqs); | ||
1013 | } | ||
1014 | } | ||
1015 | |||
1016 | static int irq_domain_alloc_irqs_recursive(struct irq_domain *domain, | ||
1017 | unsigned int irq_base, | ||
1018 | unsigned int nr_irqs, void *arg) | ||
1019 | { | ||
1020 | int ret = 0; | ||
1021 | struct irq_domain *parent = domain->parent; | ||
1022 | bool recursive = irq_domain_is_auto_recursive(domain); | ||
1023 | |||
1024 | BUG_ON(recursive && !parent); | ||
1025 | if (recursive) | ||
1026 | ret = irq_domain_alloc_irqs_recursive(parent, irq_base, | ||
1027 | nr_irqs, arg); | ||
1028 | if (ret >= 0) | ||
1029 | ret = domain->ops->alloc(domain, irq_base, nr_irqs, arg); | ||
1030 | if (ret < 0 && recursive) | ||
1031 | irq_domain_free_irqs_recursive(parent, irq_base, nr_irqs); | ||
1032 | |||
1033 | return ret; | ||
1034 | } | ||
1035 | |||
1036 | /** | ||
1037 | * __irq_domain_alloc_irqs - Allocate IRQs from domain | ||
1038 | * @domain: domain to allocate from | ||
1039 | * @irq_base: allocate specified IRQ nubmer if irq_base >= 0 | ||
1040 | * @nr_irqs: number of IRQs to allocate | ||
1041 | * @node: NUMA node id for memory allocation | ||
1042 | * @arg: domain specific argument | ||
1043 | * @realloc: IRQ descriptors have already been allocated if true | ||
1044 | * | ||
1045 | * Allocate IRQ numbers and initialized all data structures to support | ||
1046 | * hierarchy IRQ domains. | ||
1047 | * Parameter @realloc is mainly to support legacy IRQs. | ||
1048 | * Returns error code or allocated IRQ number | ||
1049 | * | ||
1050 | * The whole process to setup an IRQ has been split into two steps. | ||
1051 | * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ | ||
1052 | * descriptor and required hardware resources. The second step, | ||
1053 | * irq_domain_activate_irq(), is to program hardwares with preallocated | ||
1054 | * resources. In this way, it's easier to rollback when failing to | ||
1055 | * allocate resources. | ||
1056 | */ | ||
1057 | int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, | ||
1058 | unsigned int nr_irqs, int node, void *arg, | ||
1059 | bool realloc) | ||
1060 | { | ||
1061 | int i, ret, virq; | ||
1062 | |||
1063 | if (domain == NULL) { | ||
1064 | domain = irq_default_domain; | ||
1065 | if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n")) | ||
1066 | return -EINVAL; | ||
1067 | } | ||
1068 | |||
1069 | if (!domain->ops->alloc) { | ||
1070 | pr_debug("domain->ops->alloc() is NULL\n"); | ||
1071 | return -ENOSYS; | ||
1072 | } | ||
1073 | |||
1074 | if (realloc && irq_base >= 0) { | ||
1075 | virq = irq_base; | ||
1076 | } else { | ||
1077 | virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node); | ||
1078 | if (virq < 0) { | ||
1079 | pr_debug("cannot allocate IRQ(base %d, count %d)\n", | ||
1080 | irq_base, nr_irqs); | ||
1081 | return virq; | ||
1082 | } | ||
1083 | } | ||
1084 | |||
1085 | if (irq_domain_alloc_irq_data(domain, virq, nr_irqs)) { | ||
1086 | pr_debug("cannot allocate memory for IRQ%d\n", virq); | ||
1087 | ret = -ENOMEM; | ||
1088 | goto out_free_desc; | ||
1089 | } | ||
1090 | |||
1091 | mutex_lock(&irq_domain_mutex); | ||
1092 | ret = irq_domain_alloc_irqs_recursive(domain, virq, nr_irqs, arg); | ||
1093 | if (ret < 0) { | ||
1094 | mutex_unlock(&irq_domain_mutex); | ||
1095 | goto out_free_irq_data; | ||
1096 | } | ||
1097 | for (i = 0; i < nr_irqs; i++) | ||
1098 | irq_domain_insert_irq(virq + i); | ||
1099 | mutex_unlock(&irq_domain_mutex); | ||
1100 | |||
1101 | return virq; | ||
1102 | |||
1103 | out_free_irq_data: | ||
1104 | irq_domain_free_irq_data(virq, nr_irqs); | ||
1105 | out_free_desc: | ||
1106 | irq_free_descs(virq, nr_irqs); | ||
1107 | return ret; | ||
1108 | } | ||
1109 | |||
1110 | /** | ||
1111 | * irq_domain_free_irqs - Free IRQ number and associated data structures | ||
1112 | * @virq: base IRQ number | ||
1113 | * @nr_irqs: number of IRQs to free | ||
1114 | */ | ||
1115 | void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs) | ||
1116 | { | ||
1117 | struct irq_data *data = irq_get_irq_data(virq); | ||
1118 | int i; | ||
1119 | |||
1120 | if (WARN(!data || !data->domain || !data->domain->ops->free, | ||
1121 | "NULL pointer, cannot free irq\n")) | ||
1122 | return; | ||
1123 | |||
1124 | mutex_lock(&irq_domain_mutex); | ||
1125 | for (i = 0; i < nr_irqs; i++) | ||
1126 | irq_domain_remove_irq(virq + i); | ||
1127 | irq_domain_free_irqs_recursive(data->domain, virq, nr_irqs); | ||
1128 | mutex_unlock(&irq_domain_mutex); | ||
1129 | |||
1130 | irq_domain_free_irq_data(virq, nr_irqs); | ||
1131 | irq_free_descs(virq, nr_irqs); | ||
1132 | } | ||
1133 | |||
1134 | /** | ||
1135 | * irq_domain_alloc_irqs_parent - Allocate interrupts from parent domain | ||
1136 | * @irq_base: Base IRQ number | ||
1137 | * @nr_irqs: Number of IRQs to allocate | ||
1138 | * @arg: Allocation data (arch/domain specific) | ||
1139 | * | ||
1140 | * Check whether the domain has been setup recursive. If not allocate | ||
1141 | * through the parent domain. | ||
1142 | */ | ||
1143 | int irq_domain_alloc_irqs_parent(struct irq_domain *domain, | ||
1144 | unsigned int irq_base, unsigned int nr_irqs, | ||
1145 | void *arg) | ||
1146 | { | ||
1147 | /* irq_domain_alloc_irqs_recursive() has called parent's alloc() */ | ||
1148 | if (irq_domain_is_auto_recursive(domain)) | ||
1149 | return 0; | ||
1150 | |||
1151 | domain = domain->parent; | ||
1152 | if (domain) | ||
1153 | return irq_domain_alloc_irqs_recursive(domain, irq_base, | ||
1154 | nr_irqs, arg); | ||
1155 | return -ENOSYS; | ||
1156 | } | ||
1157 | |||
1158 | /** | ||
1159 | * irq_domain_free_irqs_parent - Free interrupts from parent domain | ||
1160 | * @irq_base: Base IRQ number | ||
1161 | * @nr_irqs: Number of IRQs to free | ||
1162 | * | ||
1163 | * Check whether the domain has been setup recursive. If not free | ||
1164 | * through the parent domain. | ||
1165 | */ | ||
1166 | void irq_domain_free_irqs_parent(struct irq_domain *domain, | ||
1167 | unsigned int irq_base, unsigned int nr_irqs) | ||
1168 | { | ||
1169 | /* irq_domain_free_irqs_recursive() will call parent's free */ | ||
1170 | if (!irq_domain_is_auto_recursive(domain) && domain->parent) | ||
1171 | irq_domain_free_irqs_recursive(domain->parent, irq_base, | ||
1172 | nr_irqs); | ||
1173 | } | ||
1174 | |||
1175 | /** | ||
1176 | * irq_domain_activate_irq - Call domain_ops->activate recursively to activate | ||
1177 | * interrupt | ||
1178 | * @irq_data: outermost irq_data associated with interrupt | ||
1179 | * | ||
1180 | * This is the second step to call domain_ops->activate to program interrupt | ||
1181 | * controllers, so the interrupt could actually get delivered. | ||
1182 | */ | ||
1183 | void irq_domain_activate_irq(struct irq_data *irq_data) | ||
1184 | { | ||
1185 | if (irq_data && irq_data->domain) { | ||
1186 | struct irq_domain *domain = irq_data->domain; | ||
1187 | |||
1188 | if (irq_data->parent_data) | ||
1189 | irq_domain_activate_irq(irq_data->parent_data); | ||
1190 | if (domain->ops->activate) | ||
1191 | domain->ops->activate(domain, irq_data); | ||
1192 | } | ||
1193 | } | ||
1194 | |||
1195 | /** | ||
1196 | * irq_domain_deactivate_irq - Call domain_ops->deactivate recursively to | ||
1197 | * deactivate interrupt | ||
1198 | * @irq_data: outermost irq_data associated with interrupt | ||
1199 | * | ||
1200 | * It calls domain_ops->deactivate to program interrupt controllers to disable | ||
1201 | * interrupt delivery. | ||
1202 | */ | ||
1203 | void irq_domain_deactivate_irq(struct irq_data *irq_data) | ||
1204 | { | ||
1205 | if (irq_data && irq_data->domain) { | ||
1206 | struct irq_domain *domain = irq_data->domain; | ||
1207 | |||
1208 | if (domain->ops->deactivate) | ||
1209 | domain->ops->deactivate(domain, irq_data); | ||
1210 | if (irq_data->parent_data) | ||
1211 | irq_domain_deactivate_irq(irq_data->parent_data); | ||
1212 | } | ||
1213 | } | ||
1214 | |||
1215 | static void irq_domain_check_hierarchy(struct irq_domain *domain) | ||
1216 | { | ||
1217 | /* Hierarchy irq_domains must implement callback alloc() */ | ||
1218 | if (domain->ops->alloc) | ||
1219 | domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY; | ||
1220 | } | ||
1221 | #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ | ||
1222 | /** | ||
1223 | * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain | ||
1224 | * @domain: domain to match | ||
1225 | * @virq: IRQ number to get irq_data | ||
1226 | */ | ||
1227 | struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, | ||
1228 | unsigned int virq) | ||
1229 | { | ||
1230 | struct irq_data *irq_data = irq_get_irq_data(virq); | ||
1231 | |||
1232 | return (irq_data && irq_data->domain == domain) ? irq_data : NULL; | ||
1233 | } | ||
1234 | |||
1235 | static void irq_domain_check_hierarchy(struct irq_domain *domain) | ||
1236 | { | ||
1237 | } | ||
1238 | #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ | ||
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 0a9104b4608b..80692373abd6 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -183,6 +183,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, | |||
183 | ret = chip->irq_set_affinity(data, mask, force); | 183 | ret = chip->irq_set_affinity(data, mask, force); |
184 | switch (ret) { | 184 | switch (ret) { |
185 | case IRQ_SET_MASK_OK: | 185 | case IRQ_SET_MASK_OK: |
186 | case IRQ_SET_MASK_OK_DONE: | ||
186 | cpumask_copy(data->affinity, mask); | 187 | cpumask_copy(data->affinity, mask); |
187 | case IRQ_SET_MASK_OK_NOCOPY: | 188 | case IRQ_SET_MASK_OK_NOCOPY: |
188 | irq_set_thread_affinity(desc); | 189 | irq_set_thread_affinity(desc); |
@@ -600,6 +601,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
600 | 601 | ||
601 | switch (ret) { | 602 | switch (ret) { |
602 | case IRQ_SET_MASK_OK: | 603 | case IRQ_SET_MASK_OK: |
604 | case IRQ_SET_MASK_OK_DONE: | ||
603 | irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); | 605 | irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); |
604 | irqd_set(&desc->irq_data, flags); | 606 | irqd_set(&desc->irq_data, flags); |
605 | 607 | ||
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c new file mode 100644 index 000000000000..3e18163f336f --- /dev/null +++ b/kernel/irq/msi.c | |||
@@ -0,0 +1,330 @@ | |||
1 | /* | ||
2 | * linux/kernel/irq/msi.c | ||
3 | * | ||
4 | * Copyright (C) 2014 Intel Corp. | ||
5 | * Author: Jiang Liu <jiang.liu@linux.intel.com> | ||
6 | * | ||
7 | * This file is licensed under GPLv2. | ||
8 | * | ||
9 | * This file contains common code to support Message Signalled Interrupt for | ||
10 | * PCI compatible and non PCI compatible devices. | ||
11 | */ | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/device.h> | ||
14 | #include <linux/irq.h> | ||
15 | #include <linux/irqdomain.h> | ||
16 | #include <linux/msi.h> | ||
17 | |||
18 | /* Temparory solution for building, will be removed later */ | ||
19 | #include <linux/pci.h> | ||
20 | |||
21 | void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg) | ||
22 | { | ||
23 | *msg = entry->msg; | ||
24 | } | ||
25 | |||
26 | void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) | ||
27 | { | ||
28 | struct msi_desc *entry = irq_get_msi_desc(irq); | ||
29 | |||
30 | __get_cached_msi_msg(entry, msg); | ||
31 | } | ||
32 | EXPORT_SYMBOL_GPL(get_cached_msi_msg); | ||
33 | |||
34 | #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN | ||
35 | static inline void irq_chip_write_msi_msg(struct irq_data *data, | ||
36 | struct msi_msg *msg) | ||
37 | { | ||
38 | data->chip->irq_write_msi_msg(data, msg); | ||
39 | } | ||
40 | |||
41 | /** | ||
42 | * msi_domain_set_affinity - Generic affinity setter function for MSI domains | ||
43 | * @irq_data: The irq data associated to the interrupt | ||
44 | * @mask: The affinity mask to set | ||
45 | * @force: Flag to enforce setting (disable online checks) | ||
46 | * | ||
47 | * Intended to be used by MSI interrupt controllers which are | ||
48 | * implemented with hierarchical domains. | ||
49 | */ | ||
50 | int msi_domain_set_affinity(struct irq_data *irq_data, | ||
51 | const struct cpumask *mask, bool force) | ||
52 | { | ||
53 | struct irq_data *parent = irq_data->parent_data; | ||
54 | struct msi_msg msg; | ||
55 | int ret; | ||
56 | |||
57 | ret = parent->chip->irq_set_affinity(parent, mask, force); | ||
58 | if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) { | ||
59 | BUG_ON(irq_chip_compose_msi_msg(irq_data, &msg)); | ||
60 | irq_chip_write_msi_msg(irq_data, &msg); | ||
61 | } | ||
62 | |||
63 | return ret; | ||
64 | } | ||
65 | |||
66 | static void msi_domain_activate(struct irq_domain *domain, | ||
67 | struct irq_data *irq_data) | ||
68 | { | ||
69 | struct msi_msg msg; | ||
70 | |||
71 | BUG_ON(irq_chip_compose_msi_msg(irq_data, &msg)); | ||
72 | irq_chip_write_msi_msg(irq_data, &msg); | ||
73 | } | ||
74 | |||
75 | static void msi_domain_deactivate(struct irq_domain *domain, | ||
76 | struct irq_data *irq_data) | ||
77 | { | ||
78 | struct msi_msg msg; | ||
79 | |||
80 | memset(&msg, 0, sizeof(msg)); | ||
81 | irq_chip_write_msi_msg(irq_data, &msg); | ||
82 | } | ||
83 | |||
84 | static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq, | ||
85 | unsigned int nr_irqs, void *arg) | ||
86 | { | ||
87 | struct msi_domain_info *info = domain->host_data; | ||
88 | struct msi_domain_ops *ops = info->ops; | ||
89 | irq_hw_number_t hwirq = ops->get_hwirq(info, arg); | ||
90 | int i, ret; | ||
91 | |||
92 | if (irq_find_mapping(domain, hwirq) > 0) | ||
93 | return -EEXIST; | ||
94 | |||
95 | ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); | ||
96 | if (ret < 0) | ||
97 | return ret; | ||
98 | |||
99 | for (i = 0; i < nr_irqs; i++) { | ||
100 | ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg); | ||
101 | if (ret < 0) { | ||
102 | if (ops->msi_free) { | ||
103 | for (i--; i > 0; i--) | ||
104 | ops->msi_free(domain, info, virq + i); | ||
105 | } | ||
106 | irq_domain_free_irqs_top(domain, virq, nr_irqs); | ||
107 | return ret; | ||
108 | } | ||
109 | } | ||
110 | |||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | static void msi_domain_free(struct irq_domain *domain, unsigned int virq, | ||
115 | unsigned int nr_irqs) | ||
116 | { | ||
117 | struct msi_domain_info *info = domain->host_data; | ||
118 | int i; | ||
119 | |||
120 | if (info->ops->msi_free) { | ||
121 | for (i = 0; i < nr_irqs; i++) | ||
122 | info->ops->msi_free(domain, info, virq + i); | ||
123 | } | ||
124 | irq_domain_free_irqs_top(domain, virq, nr_irqs); | ||
125 | } | ||
126 | |||
127 | static struct irq_domain_ops msi_domain_ops = { | ||
128 | .alloc = msi_domain_alloc, | ||
129 | .free = msi_domain_free, | ||
130 | .activate = msi_domain_activate, | ||
131 | .deactivate = msi_domain_deactivate, | ||
132 | }; | ||
133 | |||
134 | #ifdef GENERIC_MSI_DOMAIN_OPS | ||
135 | static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info, | ||
136 | msi_alloc_info_t *arg) | ||
137 | { | ||
138 | return arg->hwirq; | ||
139 | } | ||
140 | |||
141 | static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev, | ||
142 | int nvec, msi_alloc_info_t *arg) | ||
143 | { | ||
144 | memset(arg, 0, sizeof(*arg)); | ||
145 | return 0; | ||
146 | } | ||
147 | |||
148 | static void msi_domain_ops_set_desc(msi_alloc_info_t *arg, | ||
149 | struct msi_desc *desc) | ||
150 | { | ||
151 | arg->desc = desc; | ||
152 | } | ||
153 | #else | ||
154 | #define msi_domain_ops_get_hwirq NULL | ||
155 | #define msi_domain_ops_prepare NULL | ||
156 | #define msi_domain_ops_set_desc NULL | ||
157 | #endif /* !GENERIC_MSI_DOMAIN_OPS */ | ||
158 | |||
159 | static int msi_domain_ops_init(struct irq_domain *domain, | ||
160 | struct msi_domain_info *info, | ||
161 | unsigned int virq, irq_hw_number_t hwirq, | ||
162 | msi_alloc_info_t *arg) | ||
163 | { | ||
164 | irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip, | ||
165 | info->chip_data); | ||
166 | if (info->handler && info->handler_name) { | ||
167 | __irq_set_handler(virq, info->handler, 0, info->handler_name); | ||
168 | if (info->handler_data) | ||
169 | irq_set_handler_data(virq, info->handler_data); | ||
170 | } | ||
171 | return 0; | ||
172 | } | ||
173 | |||
174 | static int msi_domain_ops_check(struct irq_domain *domain, | ||
175 | struct msi_domain_info *info, | ||
176 | struct device *dev) | ||
177 | { | ||
178 | return 0; | ||
179 | } | ||
180 | |||
181 | static struct msi_domain_ops msi_domain_ops_default = { | ||
182 | .get_hwirq = msi_domain_ops_get_hwirq, | ||
183 | .msi_init = msi_domain_ops_init, | ||
184 | .msi_check = msi_domain_ops_check, | ||
185 | .msi_prepare = msi_domain_ops_prepare, | ||
186 | .set_desc = msi_domain_ops_set_desc, | ||
187 | }; | ||
188 | |||
189 | static void msi_domain_update_dom_ops(struct msi_domain_info *info) | ||
190 | { | ||
191 | struct msi_domain_ops *ops = info->ops; | ||
192 | |||
193 | if (ops == NULL) { | ||
194 | info->ops = &msi_domain_ops_default; | ||
195 | return; | ||
196 | } | ||
197 | |||
198 | if (ops->get_hwirq == NULL) | ||
199 | ops->get_hwirq = msi_domain_ops_default.get_hwirq; | ||
200 | if (ops->msi_init == NULL) | ||
201 | ops->msi_init = msi_domain_ops_default.msi_init; | ||
202 | if (ops->msi_check == NULL) | ||
203 | ops->msi_check = msi_domain_ops_default.msi_check; | ||
204 | if (ops->msi_prepare == NULL) | ||
205 | ops->msi_prepare = msi_domain_ops_default.msi_prepare; | ||
206 | if (ops->set_desc == NULL) | ||
207 | ops->set_desc = msi_domain_ops_default.set_desc; | ||
208 | } | ||
209 | |||
210 | static void msi_domain_update_chip_ops(struct msi_domain_info *info) | ||
211 | { | ||
212 | struct irq_chip *chip = info->chip; | ||
213 | |||
214 | BUG_ON(!chip); | ||
215 | if (!chip->irq_mask) | ||
216 | chip->irq_mask = pci_msi_mask_irq; | ||
217 | if (!chip->irq_unmask) | ||
218 | chip->irq_unmask = pci_msi_unmask_irq; | ||
219 | if (!chip->irq_set_affinity) | ||
220 | chip->irq_set_affinity = msi_domain_set_affinity; | ||
221 | } | ||
222 | |||
223 | /** | ||
224 | * msi_create_irq_domain - Create a MSI interrupt domain | ||
225 | * @of_node: Optional device-tree node of the interrupt controller | ||
226 | * @info: MSI domain info | ||
227 | * @parent: Parent irq domain | ||
228 | */ | ||
229 | struct irq_domain *msi_create_irq_domain(struct device_node *node, | ||
230 | struct msi_domain_info *info, | ||
231 | struct irq_domain *parent) | ||
232 | { | ||
233 | if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) | ||
234 | msi_domain_update_dom_ops(info); | ||
235 | if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) | ||
236 | msi_domain_update_chip_ops(info); | ||
237 | |||
238 | return irq_domain_add_hierarchy(parent, 0, 0, node, &msi_domain_ops, | ||
239 | info); | ||
240 | } | ||
241 | |||
242 | /** | ||
243 | * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain | ||
244 | * @domain: The domain to allocate from | ||
245 | * @dev: Pointer to device struct of the device for which the interrupts | ||
246 | * are allocated | ||
247 | * @nvec: The number of interrupts to allocate | ||
248 | * | ||
249 | * Returns 0 on success or an error code. | ||
250 | */ | ||
251 | int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, | ||
252 | int nvec) | ||
253 | { | ||
254 | struct msi_domain_info *info = domain->host_data; | ||
255 | struct msi_domain_ops *ops = info->ops; | ||
256 | msi_alloc_info_t arg; | ||
257 | struct msi_desc *desc; | ||
258 | int i, ret, virq = -1; | ||
259 | |||
260 | ret = ops->msi_check(domain, info, dev); | ||
261 | if (ret == 0) | ||
262 | ret = ops->msi_prepare(domain, dev, nvec, &arg); | ||
263 | if (ret) | ||
264 | return ret; | ||
265 | |||
266 | for_each_msi_entry(desc, dev) { | ||
267 | ops->set_desc(&arg, desc); | ||
268 | if (info->flags & MSI_FLAG_IDENTITY_MAP) | ||
269 | virq = (int)ops->get_hwirq(info, &arg); | ||
270 | else | ||
271 | virq = -1; | ||
272 | |||
273 | virq = __irq_domain_alloc_irqs(domain, virq, desc->nvec_used, | ||
274 | dev_to_node(dev), &arg, false); | ||
275 | if (virq < 0) { | ||
276 | ret = -ENOSPC; | ||
277 | if (ops->handle_error) | ||
278 | ret = ops->handle_error(domain, desc, ret); | ||
279 | if (ops->msi_finish) | ||
280 | ops->msi_finish(&arg, ret); | ||
281 | return ret; | ||
282 | } | ||
283 | |||
284 | for (i = 0; i < desc->nvec_used; i++) | ||
285 | irq_set_msi_desc_off(virq, i, desc); | ||
286 | } | ||
287 | |||
288 | if (ops->msi_finish) | ||
289 | ops->msi_finish(&arg, 0); | ||
290 | |||
291 | for_each_msi_entry(desc, dev) { | ||
292 | if (desc->nvec_used == 1) | ||
293 | dev_dbg(dev, "irq %d for MSI\n", virq); | ||
294 | else | ||
295 | dev_dbg(dev, "irq [%d-%d] for MSI\n", | ||
296 | virq, virq + desc->nvec_used - 1); | ||
297 | } | ||
298 | |||
299 | return 0; | ||
300 | } | ||
301 | |||
302 | /** | ||
303 | * msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated tp @dev | ||
304 | * @domain: The domain to managing the interrupts | ||
305 | * @dev: Pointer to device struct of the device for which the interrupts | ||
306 | * are free | ||
307 | */ | ||
308 | void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev) | ||
309 | { | ||
310 | struct msi_desc *desc; | ||
311 | |||
312 | for_each_msi_entry(desc, dev) { | ||
313 | irq_domain_free_irqs(desc->irq, desc->nvec_used); | ||
314 | desc->irq = 0; | ||
315 | } | ||
316 | } | ||
317 | |||
318 | /** | ||
319 | * msi_get_domain_info - Get the MSI interrupt domain info for @domain | ||
320 | * @domain: The interrupt domain to retrieve data from | ||
321 | * | ||
322 | * Returns the pointer to the msi_domain_info stored in | ||
323 | * @domain->host_data. | ||
324 | */ | ||
325 | struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain) | ||
326 | { | ||
327 | return (struct msi_domain_info *)domain->host_data; | ||
328 | } | ||
329 | |||
330 | #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */ | ||