summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt7
-rw-r--r--Documentation/arm64/silicon-errata.txt1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.txt36
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt4
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt3
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt3
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/socionext,synquacer-exiu.txt32
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt4
-rw-r--r--arch/arm/include/asm/arch_gicv3.h5
-rw-r--r--arch/arm64/Kconfig19
-rw-r--r--arch/arm64/Kconfig.platforms3
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h5
-rw-r--r--arch/x86/include/asm/irqdomain.h4
-rw-r--r--arch/x86/kernel/apic/htirq.c5
-rw-r--r--arch/x86/kernel/apic/io_apic.c9
-rw-r--r--arch/x86/platform/uv/uv_irq.c5
-rw-r--r--drivers/gpio/gpio-xgene-sb.c8
-rw-r--r--drivers/iommu/amd_iommu.c5
-rw-r--r--drivers/iommu/intel_irq_remapping.c5
-rw-r--r--drivers/irqchip/Kconfig13
-rw-r--r--drivers/irqchip/Makefile2
-rw-r--r--drivers/irqchip/irq-aspeed-i2c-ic.c4
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c171
-rw-r--r--drivers/irqchip/irq-gic-common.c5
-rw-r--r--drivers/irqchip/irq-gic-common.h2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c390
-rw-r--r--drivers/irqchip/irq-gic-v3.c50
-rw-r--r--drivers/irqchip/irq-gic.c71
-rw-r--r--drivers/irqchip/irq-meson-gpio.c419
-rw-r--r--drivers/irqchip/irq-mips-gic.c226
-rw-r--r--drivers/irqchip/irq-omap-intc.c16
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c9
-rw-r--r--drivers/irqchip/irq-sni-exiu.c227
-rw-r--r--drivers/irqchip/irq-stm32-exti.c206
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c5
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/irq.h22
-rw-r--r--include/linux/irqchip/arm-gic-v3.h4
-rw-r--r--include/linux/irqchip/arm-gic-v4.h9
-rw-r--r--include/linux/irqchip/irq-omap-intc.h2
-rw-r--r--include/linux/irqdesc.h1
-rw-r--r--include/linux/irqdomain.h20
-rw-r--r--include/linux/msi.h5
-rw-r--r--include/trace/events/irq_matrix.h201
-rw-r--r--kernel/irq/Kconfig3
-rw-r--r--kernel/irq/Makefile1
-rw-r--r--kernel/irq/autoprobe.c2
-rw-r--r--kernel/irq/chip.c35
-rw-r--r--kernel/irq/debugfs.c12
-rw-r--r--kernel/irq/internals.h19
-rw-r--r--kernel/irq/irqdesc.c9
-rw-r--r--kernel/irq/irqdomain.c60
-rw-r--r--kernel/irq/manage.c23
-rw-r--r--kernel/irq/matrix.c443
-rw-r--r--kernel/irq/msi.c32
-rw-r--r--kernel/irq/proc.c5
-rw-r--r--kernel/irq_work.c10
57 files changed, 2490 insertions, 408 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 38ed8787261b..b74e13312fdc 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1716,6 +1716,13 @@
1716 irqaffinity= [SMP] Set the default irq affinity mask 1716 irqaffinity= [SMP] Set the default irq affinity mask
1717 The argument is a cpu list, as described above. 1717 The argument is a cpu list, as described above.
1718 1718
1719 irqchip.gicv2_force_probe=
1720 [ARM, ARM64]
1721 Format: <bool>
1722 Force the kernel to look for the second 4kB page
1723 of a GICv2 controller even if the memory range
1724 exposed by the device tree is too small.
1725
1719 irqfixup [HW] 1726 irqfixup [HW]
1720 When an interrupt is not handled search all handlers 1727 When an interrupt is not handled search all handlers
1721 for it. Intended to get systems with badly broken 1728 for it. Intended to get systems with badly broken
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
index 66e8ce14d23d..304bf22bb83c 100644
--- a/Documentation/arm64/silicon-errata.txt
+++ b/Documentation/arm64/silicon-errata.txt
@@ -70,6 +70,7 @@ stable kernels.
70| | | | | 70| | | | |
71| Hisilicon | Hip0{5,6,7} | #161010101 | HISILICON_ERRATUM_161010101 | 71| Hisilicon | Hip0{5,6,7} | #161010101 | HISILICON_ERRATUM_161010101 |
72| Hisilicon | Hip0{6,7} | #161010701 | N/A | 72| Hisilicon | Hip0{6,7} | #161010701 | N/A |
73| Hisilicon | Hip07 | #161600802 | HISILICON_ERRATUM_161600802 |
73| | | | | 74| | | | |
74| Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 | 75| Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
75| Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 | 76| Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 |
diff --git a/Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.txt
new file mode 100644
index 000000000000..a83f9a5734ca
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.txt
@@ -0,0 +1,36 @@
1Amlogic meson GPIO interrupt controller
2
3Meson SoCs contains an interrupt controller which is able to watch the SoC
4pads and generate an interrupt on edge or level. The controller is essentially
5a 256 pads to 8 GIC interrupt multiplexer, with a filter block to select edge
6or level and polarity. It does not expose all 256 mux inputs because the
7documentation shows that the upper part is not mapped to any pad. The actual
8number of interrupt exposed depends on the SoC.
9
10Required properties:
11
12- compatible : must have "amlogic,meson8-gpio-intc” and either
13 “amlogic,meson8-gpio-intc” for meson8 SoCs (S802) or
14 “amlogic,meson8b-gpio-intc” for meson8b SoCs (S805) or
15 “amlogic,meson-gxbb-gpio-intc” for GXBB SoCs (S905) or
16 “amlogic,meson-gxl-gpio-intc” for GXL SoCs (S905X, S912)
17- interrupt-parent : a phandle to the GIC the interrupts are routed to.
18 Usually this is provided at the root level of the device tree as it is
19 common to most of the SoC.
20- reg : Specifies base physical address and size of the registers.
21- interrupt-controller : Identifies the node as an interrupt controller.
22- #interrupt-cells : Specifies the number of cells needed to encode an
23 interrupt source. The value must be 2.
24- meson,channel-interrupts: Array with the 8 upstream hwirq numbers. These
25 are the hwirqs used on the parent interrupt controller.
26
27Example:
28
29gpio_interrupt: interrupt-controller@9880 {
30 compatible = "amlogic,meson-gxbb-gpio-intc",
31 "amlogic,meson-gpio-intc";
32 reg = <0x0 0x9880 0x0 0x10>;
33 interrupt-controller;
34 #interrupt-cells = <2>;
35 meson,channel-interrupts = <64 65 66 67 68 69 70 71>;
36};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
index 5eb108e180fa..0a57f2f4167d 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
@@ -75,6 +75,10 @@ These nodes must have the following properties:
75- reg: Specifies the base physical address and size of the ITS 75- reg: Specifies the base physical address and size of the ITS
76 registers. 76 registers.
77 77
78Optional:
79- socionext,synquacer-pre-its: (u32, u32) tuple describing the untranslated
80 address and size of the pre-ITS window.
81
78The main GIC node must contain the appropriate #address-cells, 82The main GIC node must contain the appropriate #address-cells,
79#size-cells and ranges properties for the reg property of all ITS 83#size-cells and ranges properties for the reg property of all ITS
80nodes. 84nodes.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt
index 448273a30a11..36df06c5c567 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt
@@ -2,7 +2,8 @@ Broadcom Generic Level 2 Interrupt Controller
2 2
3Required properties: 3Required properties:
4 4
5- compatible: should be "brcm,l2-intc" 5- compatible: should be "brcm,l2-intc" for latched interrupt controllers
6 should be "brcm,bcm7271-l2-intc" for level interrupt controllers
6- reg: specifies the base physical address and size of the registers 7- reg: specifies the base physical address and size of the registers
7- interrupt-controller: identifies the node as an interrupt controller 8- interrupt-controller: identifies the node as an interrupt controller
8- #interrupt-cells: specifies the number of cells needed to encode an 9- #interrupt-cells: specifies the number of cells needed to encode an
diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
index e3f052d8c11a..33c9a10fdc91 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
@@ -13,6 +13,9 @@ Required properties:
13 - "renesas,irqc-r8a7793" (R-Car M2-N) 13 - "renesas,irqc-r8a7793" (R-Car M2-N)
14 - "renesas,irqc-r8a7794" (R-Car E2) 14 - "renesas,irqc-r8a7794" (R-Car E2)
15 - "renesas,intc-ex-r8a7795" (R-Car H3) 15 - "renesas,intc-ex-r8a7795" (R-Car H3)
16 - "renesas,intc-ex-r8a7796" (R-Car M3-W)
17 - "renesas,intc-ex-r8a77970" (R-Car V3M)
18 - "renesas,intc-ex-r8a77995" (R-Car D3)
16- #interrupt-cells: has to be <2>: an interrupt index and flags, as defined in 19- #interrupt-cells: has to be <2>: an interrupt index and flags, as defined in
17 interrupts.txt in this directory 20 interrupts.txt in this directory
18- clocks: Must contain a reference to the functional clock. 21- clocks: Must contain a reference to the functional clock.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/socionext,synquacer-exiu.txt b/Documentation/devicetree/bindings/interrupt-controller/socionext,synquacer-exiu.txt
new file mode 100644
index 000000000000..8b2faefe29ca
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/socionext,synquacer-exiu.txt
@@ -0,0 +1,32 @@
1Socionext SynQuacer External Interrupt Unit (EXIU)
2
3The Socionext Synquacer SoC has an external interrupt unit (EXIU)
4that forwards a block of 32 configurable input lines to 32 adjacent
5level-high type GICv3 SPIs.
6
7Required properties:
8
9- compatible : Should be "socionext,synquacer-exiu".
10- reg : Specifies base physical address and size of the
11 control registers.
12- interrupt-controller : Identifies the node as an interrupt controller.
13- #interrupt-cells : Specifies the number of cells needed to encode an
14 interrupt source. The value must be 3.
15- interrupt-parent : phandle of the GIC these interrupts are routed to.
16- socionext,spi-base : The SPI number of the first SPI of the 32 adjacent
17 ones the EXIU forwards its interrups to.
18
19Notes:
20
21- Only SPIs can use the EXIU as an interrupt parent.
22
23Example:
24
25 exiu: interrupt-controller@510c0000 {
26 compatible = "socionext,synquacer-exiu";
27 reg = <0x0 0x510c0000 0x0 0x20>;
28 interrupt-controller;
29 interrupt-parent = <&gic>;
30 #interrupt-cells = <3>;
31 socionext,spi-base = <112>;
32 };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt b/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt
index 6e7703d4ff5b..edf03f09244b 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt
@@ -2,7 +2,9 @@ STM32 External Interrupt Controller
2 2
3Required properties: 3Required properties:
4 4
5- compatible: Should be "st,stm32-exti" 5- compatible: Should be:
6 "st,stm32-exti"
7 "st,stm32h7-exti"
6- reg: Specifies base physical address and size of the registers 8- reg: Specifies base physical address and size of the registers
7- interrupt-controller: Indentifies the node as an interrupt controller 9- interrupt-controller: Indentifies the node as an interrupt controller
8- #interrupt-cells: Specifies the number of cells to encode an interrupt 10- #interrupt-cells: Specifies the number of cells to encode an interrupt
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index eee269321923..1070044f5c3f 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -196,6 +196,11 @@ static inline void gic_write_ctlr(u32 val)
196 isb(); 196 isb();
197} 197}
198 198
199static inline u32 gic_read_ctlr(void)
200{
201 return read_sysreg(ICC_CTLR);
202}
203
199static inline void gic_write_grpen1(u32 val) 204static inline void gic_write_grpen1(u32 val)
200{ 205{
201 write_sysreg(val, ICC_IGRPEN1); 206 write_sysreg(val, ICC_IGRPEN1);
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index df02ad932020..6205f521b648 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -556,6 +556,25 @@ config QCOM_QDF2400_ERRATUM_0065
556 556
557 If unsure, say Y. 557 If unsure, say Y.
558 558
559
560config SOCIONEXT_SYNQUACER_PREITS
561 bool "Socionext Synquacer: Workaround for GICv3 pre-ITS"
562 default y
563 help
564 Socionext Synquacer SoCs implement a separate h/w block to generate
565 MSI doorbell writes with non-zero values for the device ID.
566
567 If unsure, say Y.
568
569config HISILICON_ERRATUM_161600802
570 bool "Hip07 161600802: Erroneous redistributor VLPI base"
571 default y
572 help
573 The HiSilicon Hip07 SoC usees the wrong redistributor base
574 when issued ITS commands such as VMOVP and VMAPP, and requires
575 a 128kB offset to be applied to the target address in this commands.
576
577 If unsure, say Y.
559endmenu 578endmenu
560 579
561 580
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 6b54ee8c1262..1d03ef54295a 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -161,6 +161,9 @@ config ARCH_SEATTLE
161config ARCH_SHMOBILE 161config ARCH_SHMOBILE
162 bool 162 bool
163 163
164config ARCH_SYNQUACER
165 bool "Socionext SynQuacer SoC Family"
166
164config ARCH_RENESAS 167config ARCH_RENESAS
165 bool "Renesas SoC Platforms" 168 bool "Renesas SoC Platforms"
166 select ARCH_SHMOBILE 169 select ARCH_SHMOBILE
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index b7e3f74822da..9becba9ab392 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -87,6 +87,11 @@ static inline void gic_write_ctlr(u32 val)
87 isb(); 87 isb();
88} 88}
89 89
90static inline u32 gic_read_ctlr(void)
91{
92 return read_sysreg_s(SYS_ICC_CTLR_EL1);
93}
94
90static inline void gic_write_grpen1(u32 val) 95static inline void gic_write_grpen1(u32 val)
91{ 96{
92 write_sysreg_s(val, SYS_ICC_IGRPEN1_EL1); 97 write_sysreg_s(val, SYS_ICC_IGRPEN1_EL1);
diff --git a/arch/x86/include/asm/irqdomain.h b/arch/x86/include/asm/irqdomain.h
index 423e112c1e8f..e2a51ee1236f 100644
--- a/arch/x86/include/asm/irqdomain.h
+++ b/arch/x86/include/asm/irqdomain.h
@@ -42,8 +42,8 @@ extern int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
42 unsigned int nr_irqs, void *arg); 42 unsigned int nr_irqs, void *arg);
43extern void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq, 43extern void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq,
44 unsigned int nr_irqs); 44 unsigned int nr_irqs);
45extern void mp_irqdomain_activate(struct irq_domain *domain, 45extern int mp_irqdomain_activate(struct irq_domain *domain,
46 struct irq_data *irq_data); 46 struct irq_data *irq_data, bool early);
47extern void mp_irqdomain_deactivate(struct irq_domain *domain, 47extern void mp_irqdomain_deactivate(struct irq_domain *domain,
48 struct irq_data *irq_data); 48 struct irq_data *irq_data);
49extern int mp_irqdomain_ioapic_idx(struct irq_domain *domain); 49extern int mp_irqdomain_ioapic_idx(struct irq_domain *domain);
diff --git a/arch/x86/kernel/apic/htirq.c b/arch/x86/kernel/apic/htirq.c
index 56ccf9346b08..b07075dce8b7 100644
--- a/arch/x86/kernel/apic/htirq.c
+++ b/arch/x86/kernel/apic/htirq.c
@@ -112,8 +112,8 @@ static void htirq_domain_free(struct irq_domain *domain, unsigned int virq,
112 irq_domain_free_irqs_top(domain, virq, nr_irqs); 112 irq_domain_free_irqs_top(domain, virq, nr_irqs);
113} 113}
114 114
115static void htirq_domain_activate(struct irq_domain *domain, 115static int htirq_domain_activate(struct irq_domain *domain,
116 struct irq_data *irq_data) 116 struct irq_data *irq_data, bool early)
117{ 117{
118 struct ht_irq_msg msg; 118 struct ht_irq_msg msg;
119 struct irq_cfg *cfg = irqd_cfg(irq_data); 119 struct irq_cfg *cfg = irqd_cfg(irq_data);
@@ -132,6 +132,7 @@ static void htirq_domain_activate(struct irq_domain *domain,
132 HT_IRQ_LOW_MT_ARBITRATED) | 132 HT_IRQ_LOW_MT_ARBITRATED) |
133 HT_IRQ_LOW_IRQ_MASKED; 133 HT_IRQ_LOW_IRQ_MASKED;
134 write_ht_irq_msg(irq_data->irq, &msg); 134 write_ht_irq_msg(irq_data->irq, &msg);
135 return 0;
135} 136}
136 137
137static void htirq_domain_deactivate(struct irq_domain *domain, 138static void htirq_domain_deactivate(struct irq_domain *domain,
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 3b89b27945ff..18c8aca5bae7 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2097,7 +2097,7 @@ static inline void __init check_timer(void)
2097 unmask_ioapic_irq(irq_get_irq_data(0)); 2097 unmask_ioapic_irq(irq_get_irq_data(0));
2098 } 2098 }
2099 irq_domain_deactivate_irq(irq_data); 2099 irq_domain_deactivate_irq(irq_data);
2100 irq_domain_activate_irq(irq_data); 2100 irq_domain_activate_irq(irq_data, false);
2101 if (timer_irq_works()) { 2101 if (timer_irq_works()) {
2102 if (disable_timer_pin_1 > 0) 2102 if (disable_timer_pin_1 > 0)
2103 clear_IO_APIC_pin(0, pin1); 2103 clear_IO_APIC_pin(0, pin1);
@@ -2119,7 +2119,7 @@ static inline void __init check_timer(void)
2119 */ 2119 */
2120 replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2); 2120 replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
2121 irq_domain_deactivate_irq(irq_data); 2121 irq_domain_deactivate_irq(irq_data);
2122 irq_domain_activate_irq(irq_data); 2122 irq_domain_activate_irq(irq_data, false);
2123 legacy_pic->unmask(0); 2123 legacy_pic->unmask(0);
2124 if (timer_irq_works()) { 2124 if (timer_irq_works()) {
2125 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); 2125 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
@@ -2978,8 +2978,8 @@ void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq,
2978 irq_domain_free_irqs_top(domain, virq, nr_irqs); 2978 irq_domain_free_irqs_top(domain, virq, nr_irqs);
2979} 2979}
2980 2980
2981void mp_irqdomain_activate(struct irq_domain *domain, 2981int mp_irqdomain_activate(struct irq_domain *domain,
2982 struct irq_data *irq_data) 2982 struct irq_data *irq_data, bool early)
2983{ 2983{
2984 unsigned long flags; 2984 unsigned long flags;
2985 struct irq_pin_list *entry; 2985 struct irq_pin_list *entry;
@@ -2989,6 +2989,7 @@ void mp_irqdomain_activate(struct irq_domain *domain,
2989 for_each_irq_pin(entry, data->irq_2_pin) 2989 for_each_irq_pin(entry, data->irq_2_pin)
2990 __ioapic_write_entry(entry->apic, entry->pin, data->entry); 2990 __ioapic_write_entry(entry->apic, entry->pin, data->entry);
2991 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2991 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2992 return 0;
2992} 2993}
2993 2994
2994void mp_irqdomain_deactivate(struct irq_domain *domain, 2995void mp_irqdomain_deactivate(struct irq_domain *domain,
diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
index 03fc397335b7..5f6fd860820a 100644
--- a/arch/x86/platform/uv/uv_irq.c
+++ b/arch/x86/platform/uv/uv_irq.c
@@ -127,10 +127,11 @@ static void uv_domain_free(struct irq_domain *domain, unsigned int virq,
127 * Re-target the irq to the specified CPU and enable the specified MMR located 127 * Re-target the irq to the specified CPU and enable the specified MMR located
128 * on the specified blade to allow the sending of MSIs to the specified CPU. 128 * on the specified blade to allow the sending of MSIs to the specified CPU.
129 */ 129 */
130static void uv_domain_activate(struct irq_domain *domain, 130static int uv_domain_activate(struct irq_domain *domain,
131 struct irq_data *irq_data) 131 struct irq_data *irq_data, bool early)
132{ 132{
133 uv_program_mmr(irqd_cfg(irq_data), irq_data->chip_data); 133 uv_program_mmr(irqd_cfg(irq_data), irq_data->chip_data);
134 return 0;
134} 135}
135 136
136/* 137/*
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
index 033258634b8c..b5843fe6a44d 100644
--- a/drivers/gpio/gpio-xgene-sb.c
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -140,8 +140,9 @@ static int xgene_gpio_sb_to_irq(struct gpio_chip *gc, u32 gpio)
140 return irq_create_fwspec_mapping(&fwspec); 140 return irq_create_fwspec_mapping(&fwspec);
141} 141}
142 142
143static void xgene_gpio_sb_domain_activate(struct irq_domain *d, 143static int xgene_gpio_sb_domain_activate(struct irq_domain *d,
144 struct irq_data *irq_data) 144 struct irq_data *irq_data,
145 bool early)
145{ 146{
146 struct xgene_gpio_sb *priv = d->host_data; 147 struct xgene_gpio_sb *priv = d->host_data;
147 u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq); 148 u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq);
@@ -150,11 +151,12 @@ static void xgene_gpio_sb_domain_activate(struct irq_domain *d,
150 dev_err(priv->gc.parent, 151 dev_err(priv->gc.parent,
151 "Unable to configure XGene GPIO standby pin %d as IRQ\n", 152 "Unable to configure XGene GPIO standby pin %d as IRQ\n",
152 gpio); 153 gpio);
153 return; 154 return -ENOSPC;
154 } 155 }
155 156
156 xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_SEL_LO, 157 xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_SEL_LO,
157 gpio * 2, 1); 158 gpio * 2, 1);
159 return 0;
158} 160}
159 161
160static void xgene_gpio_sb_domain_deactivate(struct irq_domain *d, 162static void xgene_gpio_sb_domain_deactivate(struct irq_domain *d,
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 8e8874d23717..330856803e90 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -4173,8 +4173,8 @@ static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
4173 irq_domain_free_irqs_common(domain, virq, nr_irqs); 4173 irq_domain_free_irqs_common(domain, virq, nr_irqs);
4174} 4174}
4175 4175
4176static void irq_remapping_activate(struct irq_domain *domain, 4176static int irq_remapping_activate(struct irq_domain *domain,
4177 struct irq_data *irq_data) 4177 struct irq_data *irq_data, bool early)
4178{ 4178{
4179 struct amd_ir_data *data = irq_data->chip_data; 4179 struct amd_ir_data *data = irq_data->chip_data;
4180 struct irq_2_irte *irte_info = &data->irq_2_irte; 4180 struct irq_2_irte *irte_info = &data->irq_2_irte;
@@ -4183,6 +4183,7 @@ static void irq_remapping_activate(struct irq_domain *domain,
4183 if (iommu) 4183 if (iommu)
4184 iommu->irte_ops->activate(data->entry, irte_info->devid, 4184 iommu->irte_ops->activate(data->entry, irte_info->devid,
4185 irte_info->index); 4185 irte_info->index);
4186 return 0;
4186} 4187}
4187 4188
4188static void irq_remapping_deactivate(struct irq_domain *domain, 4189static void irq_remapping_deactivate(struct irq_domain *domain,
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 25842b566c39..324163330eaa 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -1390,12 +1390,13 @@ static void intel_irq_remapping_free(struct irq_domain *domain,
1390 irq_domain_free_irqs_common(domain, virq, nr_irqs); 1390 irq_domain_free_irqs_common(domain, virq, nr_irqs);
1391} 1391}
1392 1392
1393static void intel_irq_remapping_activate(struct irq_domain *domain, 1393static int intel_irq_remapping_activate(struct irq_domain *domain,
1394 struct irq_data *irq_data) 1394 struct irq_data *irq_data, bool early)
1395{ 1395{
1396 struct intel_ir_data *data = irq_data->chip_data; 1396 struct intel_ir_data *data = irq_data->chip_data;
1397 1397
1398 modify_irte(&data->irq_2_iommu, &data->irte_entry); 1398 modify_irte(&data->irq_2_iommu, &data->irte_entry);
1399 return 0;
1399} 1400}
1400 1401
1401static void intel_irq_remapping_deactivate(struct irq_domain *domain, 1402static void intel_irq_remapping_deactivate(struct irq_domain *domain,
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index a2ca82f6c2dd..53380bd72ea4 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -1,3 +1,5 @@
1menu "IRQ chip support"
2
1config IRQCHIP 3config IRQCHIP
2 def_bool y 4 def_bool y
3 depends on OF_IRQ 5 depends on OF_IRQ
@@ -307,6 +309,7 @@ config EZNPS_GIC
307config STM32_EXTI 309config STM32_EXTI
308 bool 310 bool
309 select IRQ_DOMAIN 311 select IRQ_DOMAIN
312 select GENERIC_IRQ_CHIP
310 313
311config QCOM_IRQ_COMBINER 314config QCOM_IRQ_COMBINER
312 bool "QCOM IRQ combiner support" 315 bool "QCOM IRQ combiner support"
@@ -324,3 +327,13 @@ config IRQ_UNIPHIER_AIDET
324 select IRQ_DOMAIN_HIERARCHY 327 select IRQ_DOMAIN_HIERARCHY
325 help 328 help
326 Support for the UniPhier AIDET (ARM Interrupt Detector). 329 Support for the UniPhier AIDET (ARM Interrupt Detector).
330
331config MESON_IRQ_GPIO
332 bool "Meson GPIO Interrupt Multiplexer"
333 depends on ARCH_MESON
334 select IRQ_DOMAIN
335 select IRQ_DOMAIN_HIERARCHY
336 help
337 Support Meson SoC Family GPIO Interrupt Multiplexer
338
339endmenu
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 046df81c402a..dae7282bfdef 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -81,3 +81,5 @@ obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o
81obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o 81obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o
82obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o 82obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o
83obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o 83obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o
84obj-$(CONFIG_ARCH_SYNQUACER) += irq-sni-exiu.o
85obj-$(CONFIG_MESON_IRQ_GPIO) += irq-meson-gpio.o
diff --git a/drivers/irqchip/irq-aspeed-i2c-ic.c b/drivers/irqchip/irq-aspeed-i2c-ic.c
index 815b88dd18f2..f20200af0992 100644
--- a/drivers/irqchip/irq-aspeed-i2c-ic.c
+++ b/drivers/irqchip/irq-aspeed-i2c-ic.c
@@ -76,8 +76,8 @@ static int __init aspeed_i2c_ic_of_init(struct device_node *node,
76 return -ENOMEM; 76 return -ENOMEM;
77 77
78 i2c_ic->base = of_iomap(node, 0); 78 i2c_ic->base = of_iomap(node, 0);
79 if (IS_ERR(i2c_ic->base)) { 79 if (!i2c_ic->base) {
80 ret = PTR_ERR(i2c_ic->base); 80 ret = -ENOMEM;
81 goto err_free_ic; 81 goto err_free_ic;
82 } 82 }
83 83
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index b009b916a292..691d20eb0bec 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Generic Broadcom Set Top Box Level 2 Interrupt controller driver 2 * Generic Broadcom Set Top Box Level 2 Interrupt controller driver
3 * 3 *
4 * Copyright (C) 2014 Broadcom Corporation 4 * Copyright (C) 2014-2017 Broadcom
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
@@ -31,35 +31,82 @@
31#include <linux/irqchip.h> 31#include <linux/irqchip.h>
32#include <linux/irqchip/chained_irq.h> 32#include <linux/irqchip/chained_irq.h>
33 33
34/* Register offsets in the L2 interrupt controller */ 34struct brcmstb_intc_init_params {
35#define CPU_STATUS 0x00 35 irq_flow_handler_t handler;
36#define CPU_SET 0x04 36 int cpu_status;
37#define CPU_CLEAR 0x08 37 int cpu_clear;
38#define CPU_MASK_STATUS 0x0c 38 int cpu_mask_status;
39#define CPU_MASK_SET 0x10 39 int cpu_mask_set;
40#define CPU_MASK_CLEAR 0x14 40 int cpu_mask_clear;
41};
42
43/* Register offsets in the L2 latched interrupt controller */
44static const struct brcmstb_intc_init_params l2_edge_intc_init = {
45 .handler = handle_edge_irq,
46 .cpu_status = 0x00,
47 .cpu_clear = 0x08,
48 .cpu_mask_status = 0x0c,
49 .cpu_mask_set = 0x10,
50 .cpu_mask_clear = 0x14
51};
52
53/* Register offsets in the L2 level interrupt controller */
54static const struct brcmstb_intc_init_params l2_lvl_intc_init = {
55 .handler = handle_level_irq,
56 .cpu_status = 0x00,
57 .cpu_clear = -1, /* Register not present */
58 .cpu_mask_status = 0x04,
59 .cpu_mask_set = 0x08,
60 .cpu_mask_clear = 0x0C
61};
41 62
42/* L2 intc private data structure */ 63/* L2 intc private data structure */
43struct brcmstb_l2_intc_data { 64struct brcmstb_l2_intc_data {
44 int parent_irq;
45 void __iomem *base;
46 struct irq_domain *domain; 65 struct irq_domain *domain;
66 struct irq_chip_generic *gc;
67 int status_offset;
68 int mask_offset;
47 bool can_wake; 69 bool can_wake;
48 u32 saved_mask; /* for suspend/resume */ 70 u32 saved_mask; /* for suspend/resume */
49}; 71};
50 72
73/**
74 * brcmstb_l2_mask_and_ack - Mask and ack pending interrupt
75 * @d: irq_data
76 *
77 * Chip has separate enable/disable registers instead of a single mask
78 * register and pending interrupt is acknowledged by setting a bit.
79 *
80 * Note: This function is generic and could easily be added to the
81 * generic irqchip implementation if there ever becomes a will to do so.
82 * Perhaps with a name like irq_gc_mask_disable_and_ack_set().
83 *
84 * e.g.: https://patchwork.kernel.org/patch/9831047/
85 */
86static void brcmstb_l2_mask_and_ack(struct irq_data *d)
87{
88 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
89 struct irq_chip_type *ct = irq_data_get_chip_type(d);
90 u32 mask = d->mask;
91
92 irq_gc_lock(gc);
93 irq_reg_writel(gc, mask, ct->regs.disable);
94 *ct->mask_cache &= ~mask;
95 irq_reg_writel(gc, mask, ct->regs.ack);
96 irq_gc_unlock(gc);
97}
98
51static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc) 99static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc)
52{ 100{
53 struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc); 101 struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc);
54 struct irq_chip_generic *gc = irq_get_domain_generic_chip(b->domain, 0);
55 struct irq_chip *chip = irq_desc_get_chip(desc); 102 struct irq_chip *chip = irq_desc_get_chip(desc);
56 unsigned int irq; 103 unsigned int irq;
57 u32 status; 104 u32 status;
58 105
59 chained_irq_enter(chip, desc); 106 chained_irq_enter(chip, desc);
60 107
61 status = irq_reg_readl(gc, CPU_STATUS) & 108 status = irq_reg_readl(b->gc, b->status_offset) &
62 ~(irq_reg_readl(gc, CPU_MASK_STATUS)); 109 ~(irq_reg_readl(b->gc, b->mask_offset));
63 110
64 if (status == 0) { 111 if (status == 0) {
65 raw_spin_lock(&desc->lock); 112 raw_spin_lock(&desc->lock);
@@ -70,10 +117,8 @@ static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc)
70 117
71 do { 118 do {
72 irq = ffs(status) - 1; 119 irq = ffs(status) - 1;
73 /* ack at our level */
74 irq_reg_writel(gc, 1 << irq, CPU_CLEAR);
75 status &= ~(1 << irq); 120 status &= ~(1 << irq);
76 generic_handle_irq(irq_find_mapping(b->domain, irq)); 121 generic_handle_irq(irq_linear_revmap(b->domain, irq));
77 } while (status); 122 } while (status);
78out: 123out:
79 chained_irq_exit(chip, desc); 124 chained_irq_exit(chip, desc);
@@ -82,16 +127,17 @@ out:
82static void brcmstb_l2_intc_suspend(struct irq_data *d) 127static void brcmstb_l2_intc_suspend(struct irq_data *d)
83{ 128{
84 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 129 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
130 struct irq_chip_type *ct = irq_data_get_chip_type(d);
85 struct brcmstb_l2_intc_data *b = gc->private; 131 struct brcmstb_l2_intc_data *b = gc->private;
86 132
87 irq_gc_lock(gc); 133 irq_gc_lock(gc);
88 /* Save the current mask */ 134 /* Save the current mask */
89 b->saved_mask = irq_reg_readl(gc, CPU_MASK_STATUS); 135 b->saved_mask = irq_reg_readl(gc, ct->regs.mask);
90 136
91 if (b->can_wake) { 137 if (b->can_wake) {
92 /* Program the wakeup mask */ 138 /* Program the wakeup mask */
93 irq_reg_writel(gc, ~gc->wake_active, CPU_MASK_SET); 139 irq_reg_writel(gc, ~gc->wake_active, ct->regs.disable);
94 irq_reg_writel(gc, gc->wake_active, CPU_MASK_CLEAR); 140 irq_reg_writel(gc, gc->wake_active, ct->regs.enable);
95 } 141 }
96 irq_gc_unlock(gc); 142 irq_gc_unlock(gc);
97} 143}
@@ -99,49 +145,56 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d)
99static void brcmstb_l2_intc_resume(struct irq_data *d) 145static void brcmstb_l2_intc_resume(struct irq_data *d)
100{ 146{
101 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 147 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
148 struct irq_chip_type *ct = irq_data_get_chip_type(d);
102 struct brcmstb_l2_intc_data *b = gc->private; 149 struct brcmstb_l2_intc_data *b = gc->private;
103 150
104 irq_gc_lock(gc); 151 irq_gc_lock(gc);
105 /* Clear unmasked non-wakeup interrupts */ 152 if (ct->chip.irq_ack) {
106 irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active, CPU_CLEAR); 153 /* Clear unmasked non-wakeup interrupts */
154 irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active,
155 ct->regs.ack);
156 }
107 157
108 /* Restore the saved mask */ 158 /* Restore the saved mask */
109 irq_reg_writel(gc, b->saved_mask, CPU_MASK_SET); 159 irq_reg_writel(gc, b->saved_mask, ct->regs.disable);
110 irq_reg_writel(gc, ~b->saved_mask, CPU_MASK_CLEAR); 160 irq_reg_writel(gc, ~b->saved_mask, ct->regs.enable);
111 irq_gc_unlock(gc); 161 irq_gc_unlock(gc);
112} 162}
113 163
114static int __init brcmstb_l2_intc_of_init(struct device_node *np, 164static int __init brcmstb_l2_intc_of_init(struct device_node *np,
115 struct device_node *parent) 165 struct device_node *parent,
166 const struct brcmstb_intc_init_params
167 *init_params)
116{ 168{
117 unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; 169 unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
118 struct brcmstb_l2_intc_data *data; 170 struct brcmstb_l2_intc_data *data;
119 struct irq_chip_generic *gc;
120 struct irq_chip_type *ct; 171 struct irq_chip_type *ct;
121 int ret; 172 int ret;
122 unsigned int flags; 173 unsigned int flags;
174 int parent_irq;
175 void __iomem *base;
123 176
124 data = kzalloc(sizeof(*data), GFP_KERNEL); 177 data = kzalloc(sizeof(*data), GFP_KERNEL);
125 if (!data) 178 if (!data)
126 return -ENOMEM; 179 return -ENOMEM;
127 180
128 data->base = of_iomap(np, 0); 181 base = of_iomap(np, 0);
129 if (!data->base) { 182 if (!base) {
130 pr_err("failed to remap intc L2 registers\n"); 183 pr_err("failed to remap intc L2 registers\n");
131 ret = -ENOMEM; 184 ret = -ENOMEM;
132 goto out_free; 185 goto out_free;
133 } 186 }
134 187
135 /* Disable all interrupts by default */ 188 /* Disable all interrupts by default */
136 writel(0xffffffff, data->base + CPU_MASK_SET); 189 writel(0xffffffff, base + init_params->cpu_mask_set);
137 190
138 /* Wakeup interrupts may be retained from S5 (cold boot) */ 191 /* Wakeup interrupts may be retained from S5 (cold boot) */
139 data->can_wake = of_property_read_bool(np, "brcm,irq-can-wake"); 192 data->can_wake = of_property_read_bool(np, "brcm,irq-can-wake");
140 if (!data->can_wake) 193 if (!data->can_wake && (init_params->cpu_clear >= 0))
141 writel(0xffffffff, data->base + CPU_CLEAR); 194 writel(0xffffffff, base + init_params->cpu_clear);
142 195
143 data->parent_irq = irq_of_parse_and_map(np, 0); 196 parent_irq = irq_of_parse_and_map(np, 0);
144 if (!data->parent_irq) { 197 if (!parent_irq) {
145 pr_err("failed to find parent interrupt\n"); 198 pr_err("failed to find parent interrupt\n");
146 ret = -EINVAL; 199 ret = -EINVAL;
147 goto out_unmap; 200 goto out_unmap;
@@ -163,29 +216,39 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
163 216
164 /* Allocate a single Generic IRQ chip for this node */ 217 /* Allocate a single Generic IRQ chip for this node */
165 ret = irq_alloc_domain_generic_chips(data->domain, 32, 1, 218 ret = irq_alloc_domain_generic_chips(data->domain, 32, 1,
166 np->full_name, handle_edge_irq, clr, 0, flags); 219 np->full_name, init_params->handler, clr, 0, flags);
167 if (ret) { 220 if (ret) {
168 pr_err("failed to allocate generic irq chip\n"); 221 pr_err("failed to allocate generic irq chip\n");
169 goto out_free_domain; 222 goto out_free_domain;
170 } 223 }
171 224
172 /* Set the IRQ chaining logic */ 225 /* Set the IRQ chaining logic */
173 irq_set_chained_handler_and_data(data->parent_irq, 226 irq_set_chained_handler_and_data(parent_irq,
174 brcmstb_l2_intc_irq_handle, data); 227 brcmstb_l2_intc_irq_handle, data);
175 228
176 gc = irq_get_domain_generic_chip(data->domain, 0); 229 data->gc = irq_get_domain_generic_chip(data->domain, 0);
177 gc->reg_base = data->base; 230 data->gc->reg_base = base;
178 gc->private = data; 231 data->gc->private = data;
179 ct = gc->chip_types; 232 data->status_offset = init_params->cpu_status;
180 233 data->mask_offset = init_params->cpu_mask_status;
181 ct->chip.irq_ack = irq_gc_ack_set_bit; 234
182 ct->regs.ack = CPU_CLEAR; 235 ct = data->gc->chip_types;
236
237 if (init_params->cpu_clear >= 0) {
238 ct->regs.ack = init_params->cpu_clear;
239 ct->chip.irq_ack = irq_gc_ack_set_bit;
240 ct->chip.irq_mask_ack = brcmstb_l2_mask_and_ack;
241 } else {
242 /* No Ack - but still slightly more efficient to define this */
243 ct->chip.irq_mask_ack = irq_gc_mask_disable_reg;
244 }
183 245
184 ct->chip.irq_mask = irq_gc_mask_disable_reg; 246 ct->chip.irq_mask = irq_gc_mask_disable_reg;
185 ct->regs.disable = CPU_MASK_SET; 247 ct->regs.disable = init_params->cpu_mask_set;
248 ct->regs.mask = init_params->cpu_mask_status;
186 249
187 ct->chip.irq_unmask = irq_gc_unmask_enable_reg; 250 ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
188 ct->regs.enable = CPU_MASK_CLEAR; 251 ct->regs.enable = init_params->cpu_mask_clear;
189 252
190 ct->chip.irq_suspend = brcmstb_l2_intc_suspend; 253 ct->chip.irq_suspend = brcmstb_l2_intc_suspend;
191 ct->chip.irq_resume = brcmstb_l2_intc_resume; 254 ct->chip.irq_resume = brcmstb_l2_intc_resume;
@@ -195,21 +258,35 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
195 /* This IRQ chip can wake the system, set all child interrupts 258 /* This IRQ chip can wake the system, set all child interrupts
196 * in wake_enabled mask 259 * in wake_enabled mask
197 */ 260 */
198 gc->wake_enabled = 0xffffffff; 261 data->gc->wake_enabled = 0xffffffff;
199 ct->chip.irq_set_wake = irq_gc_set_wake; 262 ct->chip.irq_set_wake = irq_gc_set_wake;
200 } 263 }
201 264
202 pr_info("registered L2 intc (mem: 0x%p, parent irq: %d)\n", 265 pr_info("registered L2 intc (mem: 0x%p, parent irq: %d)\n",
203 data->base, data->parent_irq); 266 base, parent_irq);
204 267
205 return 0; 268 return 0;
206 269
207out_free_domain: 270out_free_domain:
208 irq_domain_remove(data->domain); 271 irq_domain_remove(data->domain);
209out_unmap: 272out_unmap:
210 iounmap(data->base); 273 iounmap(base);
211out_free: 274out_free:
212 kfree(data); 275 kfree(data);
213 return ret; 276 return ret;
214} 277}
215IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_intc_of_init); 278
279int __init brcmstb_l2_edge_intc_of_init(struct device_node *np,
280 struct device_node *parent)
281{
282 return brcmstb_l2_intc_of_init(np, parent, &l2_edge_intc_init);
283}
284IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_edge_intc_of_init);
285
286int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np,
287 struct device_node *parent)
288{
289 return brcmstb_l2_intc_of_init(np, parent, &l2_lvl_intc_init);
290}
291IRQCHIP_DECLARE(bcm7271_l2_intc, "brcm,bcm7271-l2-intc",
292 brcmstb_l2_lvl_intc_of_init);
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
index 9ae71804b5dd..30017df5b54c 100644
--- a/drivers/irqchip/irq-gic-common.c
+++ b/drivers/irqchip/irq-gic-common.c
@@ -40,8 +40,9 @@ void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
40 for (; quirks->desc; quirks++) { 40 for (; quirks->desc; quirks++) {
41 if (quirks->iidr != (quirks->mask & iidr)) 41 if (quirks->iidr != (quirks->mask & iidr))
42 continue; 42 continue;
43 quirks->init(data); 43 if (quirks->init(data))
44 pr_info("GIC: enabling workaround for %s\n", quirks->desc); 44 pr_info("GIC: enabling workaround for %s\n",
45 quirks->desc);
45 } 46 }
46} 47}
47 48
diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h
index 205e5fddf6da..3919cd7c5285 100644
--- a/drivers/irqchip/irq-gic-common.h
+++ b/drivers/irqchip/irq-gic-common.h
@@ -23,7 +23,7 @@
23 23
24struct gic_quirk { 24struct gic_quirk {
25 const char *desc; 25 const char *desc;
26 void (*init)(void *data); 26 bool (*init)(void *data);
27 u32 iidr; 27 u32 iidr;
28 u32 mask; 28 u32 mask;
29}; 29};
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index e88395605e32..4039e64cd342 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -83,6 +83,8 @@ struct its_baser {
83 u32 psz; 83 u32 psz;
84}; 84};
85 85
86struct its_device;
87
86/* 88/*
87 * The ITS structure - contains most of the infrastructure, with the 89 * The ITS structure - contains most of the infrastructure, with the
88 * top-level MSI domain, the command queue, the collections, and the 90 * top-level MSI domain, the command queue, the collections, and the
@@ -97,12 +99,18 @@ struct its_node {
97 struct its_cmd_block *cmd_write; 99 struct its_cmd_block *cmd_write;
98 struct its_baser tables[GITS_BASER_NR_REGS]; 100 struct its_baser tables[GITS_BASER_NR_REGS];
99 struct its_collection *collections; 101 struct its_collection *collections;
102 struct fwnode_handle *fwnode_handle;
103 u64 (*get_msi_base)(struct its_device *its_dev);
100 struct list_head its_device_list; 104 struct list_head its_device_list;
101 u64 flags; 105 u64 flags;
106 unsigned long list_nr;
102 u32 ite_size; 107 u32 ite_size;
103 u32 device_ids; 108 u32 device_ids;
104 int numa_node; 109 int numa_node;
110 unsigned int msi_domain_flags;
111 u32 pre_its_base; /* for Socionext Synquacer */
105 bool is_v4; 112 bool is_v4;
113 int vlpi_redist_offset;
106}; 114};
107 115
108#define ITS_ITT_ALIGN SZ_256 116#define ITS_ITT_ALIGN SZ_256
@@ -152,12 +160,6 @@ static DEFINE_SPINLOCK(its_lock);
152static struct rdists *gic_rdists; 160static struct rdists *gic_rdists;
153static struct irq_domain *its_parent; 161static struct irq_domain *its_parent;
154 162
155/*
156 * We have a maximum number of 16 ITSs in the whole system if we're
157 * using the ITSList mechanism
158 */
159#define ITS_LIST_MAX 16
160
161static unsigned long its_list_map; 163static unsigned long its_list_map;
162static u16 vmovp_seq_num; 164static u16 vmovp_seq_num;
163static DEFINE_RAW_SPINLOCK(vmovp_lock); 165static DEFINE_RAW_SPINLOCK(vmovp_lock);
@@ -272,10 +274,12 @@ struct its_cmd_block {
272#define ITS_CMD_QUEUE_SZ SZ_64K 274#define ITS_CMD_QUEUE_SZ SZ_64K
273#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block)) 275#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
274 276
275typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *, 277typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
278 struct its_cmd_block *,
276 struct its_cmd_desc *); 279 struct its_cmd_desc *);
277 280
278typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_cmd_block *, 281typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
282 struct its_cmd_block *,
279 struct its_cmd_desc *); 283 struct its_cmd_desc *);
280 284
281static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l) 285static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
@@ -379,7 +383,8 @@ static inline void its_fixup_cmd(struct its_cmd_block *cmd)
379 cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]); 383 cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
380} 384}
381 385
382static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd, 386static struct its_collection *its_build_mapd_cmd(struct its_node *its,
387 struct its_cmd_block *cmd,
383 struct its_cmd_desc *desc) 388 struct its_cmd_desc *desc)
384{ 389{
385 unsigned long itt_addr; 390 unsigned long itt_addr;
@@ -399,7 +404,8 @@ static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd,
399 return NULL; 404 return NULL;
400} 405}
401 406
402static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd, 407static struct its_collection *its_build_mapc_cmd(struct its_node *its,
408 struct its_cmd_block *cmd,
403 struct its_cmd_desc *desc) 409 struct its_cmd_desc *desc)
404{ 410{
405 its_encode_cmd(cmd, GITS_CMD_MAPC); 411 its_encode_cmd(cmd, GITS_CMD_MAPC);
@@ -412,7 +418,8 @@ static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
412 return desc->its_mapc_cmd.col; 418 return desc->its_mapc_cmd.col;
413} 419}
414 420
415static struct its_collection *its_build_mapti_cmd(struct its_cmd_block *cmd, 421static struct its_collection *its_build_mapti_cmd(struct its_node *its,
422 struct its_cmd_block *cmd,
416 struct its_cmd_desc *desc) 423 struct its_cmd_desc *desc)
417{ 424{
418 struct its_collection *col; 425 struct its_collection *col;
@@ -431,7 +438,8 @@ static struct its_collection *its_build_mapti_cmd(struct its_cmd_block *cmd,
431 return col; 438 return col;
432} 439}
433 440
434static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd, 441static struct its_collection *its_build_movi_cmd(struct its_node *its,
442 struct its_cmd_block *cmd,
435 struct its_cmd_desc *desc) 443 struct its_cmd_desc *desc)
436{ 444{
437 struct its_collection *col; 445 struct its_collection *col;
@@ -449,7 +457,8 @@ static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd,
449 return col; 457 return col;
450} 458}
451 459
452static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd, 460static struct its_collection *its_build_discard_cmd(struct its_node *its,
461 struct its_cmd_block *cmd,
453 struct its_cmd_desc *desc) 462 struct its_cmd_desc *desc)
454{ 463{
455 struct its_collection *col; 464 struct its_collection *col;
@@ -466,7 +475,8 @@ static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd,
466 return col; 475 return col;
467} 476}
468 477
469static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd, 478static struct its_collection *its_build_inv_cmd(struct its_node *its,
479 struct its_cmd_block *cmd,
470 struct its_cmd_desc *desc) 480 struct its_cmd_desc *desc)
471{ 481{
472 struct its_collection *col; 482 struct its_collection *col;
@@ -483,7 +493,8 @@ static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
483 return col; 493 return col;
484} 494}
485 495
486static struct its_collection *its_build_int_cmd(struct its_cmd_block *cmd, 496static struct its_collection *its_build_int_cmd(struct its_node *its,
497 struct its_cmd_block *cmd,
487 struct its_cmd_desc *desc) 498 struct its_cmd_desc *desc)
488{ 499{
489 struct its_collection *col; 500 struct its_collection *col;
@@ -500,7 +511,8 @@ static struct its_collection *its_build_int_cmd(struct its_cmd_block *cmd,
500 return col; 511 return col;
501} 512}
502 513
503static struct its_collection *its_build_clear_cmd(struct its_cmd_block *cmd, 514static struct its_collection *its_build_clear_cmd(struct its_node *its,
515 struct its_cmd_block *cmd,
504 struct its_cmd_desc *desc) 516 struct its_cmd_desc *desc)
505{ 517{
506 struct its_collection *col; 518 struct its_collection *col;
@@ -517,7 +529,8 @@ static struct its_collection *its_build_clear_cmd(struct its_cmd_block *cmd,
517 return col; 529 return col;
518} 530}
519 531
520static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd, 532static struct its_collection *its_build_invall_cmd(struct its_node *its,
533 struct its_cmd_block *cmd,
521 struct its_cmd_desc *desc) 534 struct its_cmd_desc *desc)
522{ 535{
523 its_encode_cmd(cmd, GITS_CMD_INVALL); 536 its_encode_cmd(cmd, GITS_CMD_INVALL);
@@ -528,7 +541,8 @@ static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
528 return NULL; 541 return NULL;
529} 542}
530 543
531static struct its_vpe *its_build_vinvall_cmd(struct its_cmd_block *cmd, 544static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
545 struct its_cmd_block *cmd,
532 struct its_cmd_desc *desc) 546 struct its_cmd_desc *desc)
533{ 547{
534 its_encode_cmd(cmd, GITS_CMD_VINVALL); 548 its_encode_cmd(cmd, GITS_CMD_VINVALL);
@@ -539,17 +553,20 @@ static struct its_vpe *its_build_vinvall_cmd(struct its_cmd_block *cmd,
539 return desc->its_vinvall_cmd.vpe; 553 return desc->its_vinvall_cmd.vpe;
540} 554}
541 555
542static struct its_vpe *its_build_vmapp_cmd(struct its_cmd_block *cmd, 556static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
557 struct its_cmd_block *cmd,
543 struct its_cmd_desc *desc) 558 struct its_cmd_desc *desc)
544{ 559{
545 unsigned long vpt_addr; 560 unsigned long vpt_addr;
561 u64 target;
546 562
547 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); 563 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
564 target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
548 565
549 its_encode_cmd(cmd, GITS_CMD_VMAPP); 566 its_encode_cmd(cmd, GITS_CMD_VMAPP);
550 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id); 567 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
551 its_encode_valid(cmd, desc->its_vmapp_cmd.valid); 568 its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
552 its_encode_target(cmd, desc->its_vmapp_cmd.col->target_address); 569 its_encode_target(cmd, target);
553 its_encode_vpt_addr(cmd, vpt_addr); 570 its_encode_vpt_addr(cmd, vpt_addr);
554 its_encode_vpt_size(cmd, LPI_NRBITS - 1); 571 its_encode_vpt_size(cmd, LPI_NRBITS - 1);
555 572
@@ -558,7 +575,8 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_cmd_block *cmd,
558 return desc->its_vmapp_cmd.vpe; 575 return desc->its_vmapp_cmd.vpe;
559} 576}
560 577
561static struct its_vpe *its_build_vmapti_cmd(struct its_cmd_block *cmd, 578static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
579 struct its_cmd_block *cmd,
562 struct its_cmd_desc *desc) 580 struct its_cmd_desc *desc)
563{ 581{
564 u32 db; 582 u32 db;
@@ -580,7 +598,8 @@ static struct its_vpe *its_build_vmapti_cmd(struct its_cmd_block *cmd,
580 return desc->its_vmapti_cmd.vpe; 598 return desc->its_vmapti_cmd.vpe;
581} 599}
582 600
583static struct its_vpe *its_build_vmovi_cmd(struct its_cmd_block *cmd, 601static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
602 struct its_cmd_block *cmd,
584 struct its_cmd_desc *desc) 603 struct its_cmd_desc *desc)
585{ 604{
586 u32 db; 605 u32 db;
@@ -602,14 +621,18 @@ static struct its_vpe *its_build_vmovi_cmd(struct its_cmd_block *cmd,
602 return desc->its_vmovi_cmd.vpe; 621 return desc->its_vmovi_cmd.vpe;
603} 622}
604 623
605static struct its_vpe *its_build_vmovp_cmd(struct its_cmd_block *cmd, 624static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
625 struct its_cmd_block *cmd,
606 struct its_cmd_desc *desc) 626 struct its_cmd_desc *desc)
607{ 627{
628 u64 target;
629
630 target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
608 its_encode_cmd(cmd, GITS_CMD_VMOVP); 631 its_encode_cmd(cmd, GITS_CMD_VMOVP);
609 its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num); 632 its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
610 its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list); 633 its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
611 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id); 634 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
612 its_encode_target(cmd, desc->its_vmovp_cmd.col->target_address); 635 its_encode_target(cmd, target);
613 636
614 its_fixup_cmd(cmd); 637 its_fixup_cmd(cmd);
615 638
@@ -688,9 +711,9 @@ static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
688 dsb(ishst); 711 dsb(ishst);
689} 712}
690 713
691static void its_wait_for_range_completion(struct its_node *its, 714static int its_wait_for_range_completion(struct its_node *its,
692 struct its_cmd_block *from, 715 struct its_cmd_block *from,
693 struct its_cmd_block *to) 716 struct its_cmd_block *to)
694{ 717{
695 u64 rd_idx, from_idx, to_idx; 718 u64 rd_idx, from_idx, to_idx;
696 u32 count = 1000000; /* 1s! */ 719 u32 count = 1000000; /* 1s! */
@@ -711,12 +734,15 @@ static void its_wait_for_range_completion(struct its_node *its,
711 734
712 count--; 735 count--;
713 if (!count) { 736 if (!count) {
714 pr_err_ratelimited("ITS queue timeout\n"); 737 pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n",
715 return; 738 from_idx, to_idx, rd_idx);
739 return -1;
716 } 740 }
717 cpu_relax(); 741 cpu_relax();
718 udelay(1); 742 udelay(1);
719 } 743 }
744
745 return 0;
720} 746}
721 747
722/* Warning, macro hell follows */ 748/* Warning, macro hell follows */
@@ -736,7 +762,7 @@ void name(struct its_node *its, \
736 raw_spin_unlock_irqrestore(&its->lock, flags); \ 762 raw_spin_unlock_irqrestore(&its->lock, flags); \
737 return; \ 763 return; \
738 } \ 764 } \
739 sync_obj = builder(cmd, desc); \ 765 sync_obj = builder(its, cmd, desc); \
740 its_flush_cmd(its, cmd); \ 766 its_flush_cmd(its, cmd); \
741 \ 767 \
742 if (sync_obj) { \ 768 if (sync_obj) { \
@@ -744,7 +770,7 @@ void name(struct its_node *its, \
744 if (!sync_cmd) \ 770 if (!sync_cmd) \
745 goto post; \ 771 goto post; \
746 \ 772 \
747 buildfn(sync_cmd, sync_obj); \ 773 buildfn(its, sync_cmd, sync_obj); \
748 its_flush_cmd(its, sync_cmd); \ 774 its_flush_cmd(its, sync_cmd); \
749 } \ 775 } \
750 \ 776 \
@@ -752,10 +778,12 @@ post: \
752 next_cmd = its_post_commands(its); \ 778 next_cmd = its_post_commands(its); \
753 raw_spin_unlock_irqrestore(&its->lock, flags); \ 779 raw_spin_unlock_irqrestore(&its->lock, flags); \
754 \ 780 \
755 its_wait_for_range_completion(its, cmd, next_cmd); \ 781 if (its_wait_for_range_completion(its, cmd, next_cmd)) \
782 pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
756} 783}
757 784
758static void its_build_sync_cmd(struct its_cmd_block *sync_cmd, 785static void its_build_sync_cmd(struct its_node *its,
786 struct its_cmd_block *sync_cmd,
759 struct its_collection *sync_col) 787 struct its_collection *sync_col)
760{ 788{
761 its_encode_cmd(sync_cmd, GITS_CMD_SYNC); 789 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
@@ -767,7 +795,8 @@ static void its_build_sync_cmd(struct its_cmd_block *sync_cmd,
767static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t, 795static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
768 struct its_collection, its_build_sync_cmd) 796 struct its_collection, its_build_sync_cmd)
769 797
770static void its_build_vsync_cmd(struct its_cmd_block *sync_cmd, 798static void its_build_vsync_cmd(struct its_node *its,
799 struct its_cmd_block *sync_cmd,
771 struct its_vpe *sync_vpe) 800 struct its_vpe *sync_vpe)
772{ 801{
773 its_encode_cmd(sync_cmd, GITS_CMD_VSYNC); 802 its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
@@ -899,21 +928,16 @@ static void its_send_vmovi(struct its_device *dev, u32 id)
899 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc); 928 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
900} 929}
901 930
902static void its_send_vmapp(struct its_vpe *vpe, bool valid) 931static void its_send_vmapp(struct its_node *its,
932 struct its_vpe *vpe, bool valid)
903{ 933{
904 struct its_cmd_desc desc; 934 struct its_cmd_desc desc;
905 struct its_node *its;
906 935
907 desc.its_vmapp_cmd.vpe = vpe; 936 desc.its_vmapp_cmd.vpe = vpe;
908 desc.its_vmapp_cmd.valid = valid; 937 desc.its_vmapp_cmd.valid = valid;
938 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
909 939
910 list_for_each_entry(its, &its_nodes, entry) { 940 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
911 if (!its->is_v4)
912 continue;
913
914 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
915 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
916 }
917} 941}
918 942
919static void its_send_vmovp(struct its_vpe *vpe) 943static void its_send_vmovp(struct its_vpe *vpe)
@@ -951,6 +975,9 @@ static void its_send_vmovp(struct its_vpe *vpe)
951 if (!its->is_v4) 975 if (!its->is_v4)
952 continue; 976 continue;
953 977
978 if (!vpe->its_vm->vlpi_count[its->list_nr])
979 continue;
980
954 desc.its_vmovp_cmd.col = &its->collections[col_id]; 981 desc.its_vmovp_cmd.col = &its->collections[col_id];
955 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); 982 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
956 } 983 }
@@ -958,18 +985,12 @@ static void its_send_vmovp(struct its_vpe *vpe)
958 raw_spin_unlock_irqrestore(&vmovp_lock, flags); 985 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
959} 986}
960 987
961static void its_send_vinvall(struct its_vpe *vpe) 988static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
962{ 989{
963 struct its_cmd_desc desc; 990 struct its_cmd_desc desc;
964 struct its_node *its;
965 991
966 desc.its_vinvall_cmd.vpe = vpe; 992 desc.its_vinvall_cmd.vpe = vpe;
967 993 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
968 list_for_each_entry(its, &its_nodes, entry) {
969 if (!its->is_v4)
970 continue;
971 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
972 }
973} 994}
974 995
975/* 996/*
@@ -991,9 +1012,15 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
991 if (irqd_is_forwarded_to_vcpu(d)) { 1012 if (irqd_is_forwarded_to_vcpu(d)) {
992 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1013 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
993 u32 event = its_get_event_id(d); 1014 u32 event = its_get_event_id(d);
1015 struct its_vlpi_map *map;
994 1016
995 prop_page = its_dev->event_map.vm->vprop_page; 1017 prop_page = its_dev->event_map.vm->vprop_page;
996 hwirq = its_dev->event_map.vlpi_maps[event].vintid; 1018 map = &its_dev->event_map.vlpi_maps[event];
1019 hwirq = map->vintid;
1020
1021 /* Remember the updated property */
1022 map->properties &= ~clr;
1023 map->properties |= set | LPI_PROP_GROUP1;
997 } else { 1024 } else {
998 prop_page = gic_rdists->prop_page; 1025 prop_page = gic_rdists->prop_page;
999 hwirq = d->hwirq; 1026 hwirq = d->hwirq;
@@ -1099,6 +1126,13 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1099 return IRQ_SET_MASK_OK_DONE; 1126 return IRQ_SET_MASK_OK_DONE;
1100} 1127}
1101 1128
1129static u64 its_irq_get_msi_base(struct its_device *its_dev)
1130{
1131 struct its_node *its = its_dev->its;
1132
1133 return its->phys_base + GITS_TRANSLATER;
1134}
1135
1102static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) 1136static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1103{ 1137{
1104 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1138 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
@@ -1106,7 +1140,7 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1106 u64 addr; 1140 u64 addr;
1107 1141
1108 its = its_dev->its; 1142 its = its_dev->its;
1109 addr = its->phys_base + GITS_TRANSLATER; 1143 addr = its->get_msi_base(its_dev);
1110 1144
1111 msg->address_lo = lower_32_bits(addr); 1145 msg->address_lo = lower_32_bits(addr);
1112 msg->address_hi = upper_32_bits(addr); 1146 msg->address_hi = upper_32_bits(addr);
@@ -1133,6 +1167,60 @@ static int its_irq_set_irqchip_state(struct irq_data *d,
1133 return 0; 1167 return 0;
1134} 1168}
1135 1169
1170static void its_map_vm(struct its_node *its, struct its_vm *vm)
1171{
1172 unsigned long flags;
1173
1174 /* Not using the ITS list? Everything is always mapped. */
1175 if (!its_list_map)
1176 return;
1177
1178 raw_spin_lock_irqsave(&vmovp_lock, flags);
1179
1180 /*
1181 * If the VM wasn't mapped yet, iterate over the vpes and get
1182 * them mapped now.
1183 */
1184 vm->vlpi_count[its->list_nr]++;
1185
1186 if (vm->vlpi_count[its->list_nr] == 1) {
1187 int i;
1188
1189 for (i = 0; i < vm->nr_vpes; i++) {
1190 struct its_vpe *vpe = vm->vpes[i];
1191 struct irq_data *d = irq_get_irq_data(vpe->irq);
1192
1193 /* Map the VPE to the first possible CPU */
1194 vpe->col_idx = cpumask_first(cpu_online_mask);
1195 its_send_vmapp(its, vpe, true);
1196 its_send_vinvall(its, vpe);
1197 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
1198 }
1199 }
1200
1201 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1202}
1203
1204static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1205{
1206 unsigned long flags;
1207
1208 /* Not using the ITS list? Everything is always mapped. */
1209 if (!its_list_map)
1210 return;
1211
1212 raw_spin_lock_irqsave(&vmovp_lock, flags);
1213
1214 if (!--vm->vlpi_count[its->list_nr]) {
1215 int i;
1216
1217 for (i = 0; i < vm->nr_vpes; i++)
1218 its_send_vmapp(its, vm->vpes[i], false);
1219 }
1220
1221 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1222}
1223
1136static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) 1224static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1137{ 1225{
1138 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1226 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
@@ -1168,12 +1256,23 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1168 /* Already mapped, move it around */ 1256 /* Already mapped, move it around */
1169 its_send_vmovi(its_dev, event); 1257 its_send_vmovi(its_dev, event);
1170 } else { 1258 } else {
1259 /* Ensure all the VPEs are mapped on this ITS */
1260 its_map_vm(its_dev->its, info->map->vm);
1261
1262 /*
1263 * Flag the interrupt as forwarded so that we can
1264 * start poking the virtual property table.
1265 */
1266 irqd_set_forwarded_to_vcpu(d);
1267
1268 /* Write out the property to the prop table */
1269 lpi_write_config(d, 0xff, info->map->properties);
1270
1171 /* Drop the physical mapping */ 1271 /* Drop the physical mapping */
1172 its_send_discard(its_dev, event); 1272 its_send_discard(its_dev, event);
1173 1273
1174 /* and install the virtual one */ 1274 /* and install the virtual one */
1175 its_send_vmapti(its_dev, event); 1275 its_send_vmapti(its_dev, event);
1176 irqd_set_forwarded_to_vcpu(d);
1177 1276
1178 /* Increment the number of VLPIs */ 1277 /* Increment the number of VLPIs */
1179 its_dev->event_map.nr_vlpis++; 1278 its_dev->event_map.nr_vlpis++;
@@ -1229,6 +1328,9 @@ static int its_vlpi_unmap(struct irq_data *d)
1229 LPI_PROP_ENABLED | 1328 LPI_PROP_ENABLED |
1230 LPI_PROP_GROUP1)); 1329 LPI_PROP_GROUP1));
1231 1330
1331 /* Potentially unmap the VM from this ITS */
1332 its_unmap_vm(its_dev->its, its_dev->event_map.vm);
1333
1232 /* 1334 /*
1233 * Drop the refcount and make the device available again if 1335 * Drop the refcount and make the device available again if
1234 * this was the last VLPI. 1336 * this was the last VLPI.
@@ -1669,23 +1771,14 @@ static void its_free_tables(struct its_node *its)
1669 1771
1670static int its_alloc_tables(struct its_node *its) 1772static int its_alloc_tables(struct its_node *its)
1671{ 1773{
1672 u64 typer = gic_read_typer(its->base + GITS_TYPER);
1673 u32 ids = GITS_TYPER_DEVBITS(typer);
1674 u64 shr = GITS_BASER_InnerShareable; 1774 u64 shr = GITS_BASER_InnerShareable;
1675 u64 cache = GITS_BASER_RaWaWb; 1775 u64 cache = GITS_BASER_RaWaWb;
1676 u32 psz = SZ_64K; 1776 u32 psz = SZ_64K;
1677 int err, i; 1777 int err, i;
1678 1778
1679 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) { 1779 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
1680 /* 1780 /* erratum 24313: ignore memory access type */
1681 * erratum 22375: only alloc 8MB table size 1781 cache = GITS_BASER_nCnB;
1682 * erratum 24313: ignore memory access type
1683 */
1684 cache = GITS_BASER_nCnB;
1685 ids = 0x14; /* 20 bits, 8MB */
1686 }
1687
1688 its->device_ids = ids;
1689 1782
1690 for (i = 0; i < GITS_BASER_NR_REGS; i++) { 1783 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
1691 struct its_baser *baser = its->tables + i; 1784 struct its_baser *baser = its->tables + i;
@@ -2209,8 +2302,8 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2209 return 0; 2302 return 0;
2210} 2303}
2211 2304
2212static void its_irq_domain_activate(struct irq_domain *domain, 2305static int its_irq_domain_activate(struct irq_domain *domain,
2213 struct irq_data *d) 2306 struct irq_data *d, bool early)
2214{ 2307{
2215 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 2308 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2216 u32 event = its_get_event_id(d); 2309 u32 event = its_get_event_id(d);
@@ -2228,6 +2321,7 @@ static void its_irq_domain_activate(struct irq_domain *domain,
2228 2321
2229 /* Map the GIC IRQ and event to the device */ 2322 /* Map the GIC IRQ and event to the device */
2230 its_send_mapti(its_dev, d->hwirq, event); 2323 its_send_mapti(its_dev, d->hwirq, event);
2324 return 0;
2231} 2325}
2232 2326
2233static void its_irq_domain_deactivate(struct irq_domain *domain, 2327static void its_irq_domain_deactivate(struct irq_domain *domain,
@@ -2394,6 +2488,8 @@ static int its_vpe_set_affinity(struct irq_data *d,
2394 its_vpe_db_proxy_move(vpe, from, cpu); 2488 its_vpe_db_proxy_move(vpe, from, cpu);
2395 } 2489 }
2396 2490
2491 irq_data_update_effective_affinity(d, cpumask_of(cpu));
2492
2397 return IRQ_SET_MASK_OK_DONE; 2493 return IRQ_SET_MASK_OK_DONE;
2398} 2494}
2399 2495
@@ -2461,6 +2557,26 @@ static void its_vpe_deschedule(struct its_vpe *vpe)
2461 } 2557 }
2462} 2558}
2463 2559
2560static void its_vpe_invall(struct its_vpe *vpe)
2561{
2562 struct its_node *its;
2563
2564 list_for_each_entry(its, &its_nodes, entry) {
2565 if (!its->is_v4)
2566 continue;
2567
2568 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
2569 continue;
2570
2571 /*
2572 * Sending a VINVALL to a single ITS is enough, as all
2573 * we need is to reach the redistributors.
2574 */
2575 its_send_vinvall(its, vpe);
2576 return;
2577 }
2578}
2579
2464static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) 2580static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
2465{ 2581{
2466 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 2582 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
@@ -2476,7 +2592,7 @@ static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
2476 return 0; 2592 return 0;
2477 2593
2478 case INVALL_VPE: 2594 case INVALL_VPE:
2479 its_send_vinvall(vpe); 2595 its_vpe_invall(vpe);
2480 return 0; 2596 return 0;
2481 2597
2482 default: 2598 default:
@@ -2701,23 +2817,51 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
2701 return err; 2817 return err;
2702} 2818}
2703 2819
2704static void its_vpe_irq_domain_activate(struct irq_domain *domain, 2820static int its_vpe_irq_domain_activate(struct irq_domain *domain,
2705 struct irq_data *d) 2821 struct irq_data *d, bool early)
2706{ 2822{
2707 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 2823 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2824 struct its_node *its;
2825
2826 /* If we use the list map, we issue VMAPP on demand... */
2827 if (its_list_map)
2828 return 0;
2708 2829
2709 /* Map the VPE to the first possible CPU */ 2830 /* Map the VPE to the first possible CPU */
2710 vpe->col_idx = cpumask_first(cpu_online_mask); 2831 vpe->col_idx = cpumask_first(cpu_online_mask);
2711 its_send_vmapp(vpe, true); 2832
2712 its_send_vinvall(vpe); 2833 list_for_each_entry(its, &its_nodes, entry) {
2834 if (!its->is_v4)
2835 continue;
2836
2837 its_send_vmapp(its, vpe, true);
2838 its_send_vinvall(its, vpe);
2839 }
2840
2841 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
2842
2843 return 0;
2713} 2844}
2714 2845
2715static void its_vpe_irq_domain_deactivate(struct irq_domain *domain, 2846static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
2716 struct irq_data *d) 2847 struct irq_data *d)
2717{ 2848{
2718 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 2849 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2850 struct its_node *its;
2851
2852 /*
2853 * If we use the list map, we unmap the VPE once no VLPIs are
2854 * associated with the VM.
2855 */
2856 if (its_list_map)
2857 return;
2719 2858
2720 its_send_vmapp(vpe, false); 2859 list_for_each_entry(its, &its_nodes, entry) {
2860 if (!its->is_v4)
2861 continue;
2862
2863 its_send_vmapp(its, vpe, false);
2864 }
2721} 2865}
2722 2866
2723static const struct irq_domain_ops its_vpe_domain_ops = { 2867static const struct irq_domain_ops its_vpe_domain_ops = {
@@ -2760,26 +2904,85 @@ static int its_force_quiescent(void __iomem *base)
2760 } 2904 }
2761} 2905}
2762 2906
2763static void __maybe_unused its_enable_quirk_cavium_22375(void *data) 2907static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
2764{ 2908{
2765 struct its_node *its = data; 2909 struct its_node *its = data;
2766 2910
2911 /* erratum 22375: only alloc 8MB table size */
2912 its->device_ids = 0x14; /* 20 bits, 8MB */
2767 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; 2913 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
2914
2915 return true;
2768} 2916}
2769 2917
2770static void __maybe_unused its_enable_quirk_cavium_23144(void *data) 2918static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
2771{ 2919{
2772 struct its_node *its = data; 2920 struct its_node *its = data;
2773 2921
2774 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; 2922 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
2923
2924 return true;
2775} 2925}
2776 2926
2777static void __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) 2927static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
2778{ 2928{
2779 struct its_node *its = data; 2929 struct its_node *its = data;
2780 2930
2781 /* On QDF2400, the size of the ITE is 16Bytes */ 2931 /* On QDF2400, the size of the ITE is 16Bytes */
2782 its->ite_size = 16; 2932 its->ite_size = 16;
2933
2934 return true;
2935}
2936
2937static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
2938{
2939 struct its_node *its = its_dev->its;
2940
2941 /*
2942 * The Socionext Synquacer SoC has a so-called 'pre-ITS',
2943 * which maps 32-bit writes targeted at a separate window of
2944 * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
2945 * with device ID taken from bits [device_id_bits + 1:2] of
2946 * the window offset.
2947 */
2948 return its->pre_its_base + (its_dev->device_id << 2);
2949}
2950
2951static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
2952{
2953 struct its_node *its = data;
2954 u32 pre_its_window[2];
2955 u32 ids;
2956
2957 if (!fwnode_property_read_u32_array(its->fwnode_handle,
2958 "socionext,synquacer-pre-its",
2959 pre_its_window,
2960 ARRAY_SIZE(pre_its_window))) {
2961
2962 its->pre_its_base = pre_its_window[0];
2963 its->get_msi_base = its_irq_get_msi_base_pre_its;
2964
2965 ids = ilog2(pre_its_window[1]) - 2;
2966 if (its->device_ids > ids)
2967 its->device_ids = ids;
2968
2969 /* the pre-ITS breaks isolation, so disable MSI remapping */
2970 its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
2971 return true;
2972 }
2973 return false;
2974}
2975
2976static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
2977{
2978 struct its_node *its = data;
2979
2980 /*
2981 * Hip07 insists on using the wrong address for the VLPI
2982 * page. Trick it into doing the right thing...
2983 */
2984 its->vlpi_redist_offset = SZ_128K;
2985 return true;
2783} 2986}
2784 2987
2785static const struct gic_quirk its_quirks[] = { 2988static const struct gic_quirk its_quirks[] = {
@@ -2807,6 +3010,27 @@ static const struct gic_quirk its_quirks[] = {
2807 .init = its_enable_quirk_qdf2400_e0065, 3010 .init = its_enable_quirk_qdf2400_e0065,
2808 }, 3011 },
2809#endif 3012#endif
3013#ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
3014 {
3015 /*
3016 * The Socionext Synquacer SoC incorporates ARM's own GIC-500
3017 * implementation, but with a 'pre-ITS' added that requires
3018 * special handling in software.
3019 */
3020 .desc = "ITS: Socionext Synquacer pre-ITS",
3021 .iidr = 0x0001143b,
3022 .mask = 0xffffffff,
3023 .init = its_enable_quirk_socionext_synquacer,
3024 },
3025#endif
3026#ifdef CONFIG_HISILICON_ERRATUM_161600802
3027 {
3028 .desc = "ITS: Hip07 erratum 161600802",
3029 .iidr = 0x00000004,
3030 .mask = 0xffffffff,
3031 .init = its_enable_quirk_hip07_161600802,
3032 },
3033#endif
2810 { 3034 {
2811 } 3035 }
2812}; 3036};
@@ -2835,7 +3059,7 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
2835 3059
2836 inner_domain->parent = its_parent; 3060 inner_domain->parent = its_parent;
2837 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS); 3061 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
2838 inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_REMAP; 3062 inner_domain->flags |= its->msi_domain_flags;
2839 info->ops = &its_msi_domain_ops; 3063 info->ops = &its_msi_domain_ops;
2840 info->data = its; 3064 info->data = its;
2841 inner_domain->host_data = info; 3065 inner_domain->host_data = info;
@@ -2896,8 +3120,8 @@ static int __init its_compute_its_list_map(struct resource *res,
2896 * locking. Should this change, we should address 3120 * locking. Should this change, we should address
2897 * this. 3121 * this.
2898 */ 3122 */
2899 its_number = find_first_zero_bit(&its_list_map, ITS_LIST_MAX); 3123 its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
2900 if (its_number >= ITS_LIST_MAX) { 3124 if (its_number >= GICv4_ITS_LIST_MAX) {
2901 pr_err("ITS@%pa: No ITSList entry available!\n", 3125 pr_err("ITS@%pa: No ITSList entry available!\n",
2902 &res->start); 3126 &res->start);
2903 return -EINVAL; 3127 return -EINVAL;
@@ -2965,6 +3189,7 @@ static int __init its_probe_one(struct resource *res,
2965 its->base = its_base; 3189 its->base = its_base;
2966 its->phys_base = res->start; 3190 its->phys_base = res->start;
2967 its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer); 3191 its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer);
3192 its->device_ids = GITS_TYPER_DEVBITS(typer);
2968 its->is_v4 = !!(typer & GITS_TYPER_VLPIS); 3193 its->is_v4 = !!(typer & GITS_TYPER_VLPIS);
2969 if (its->is_v4) { 3194 if (its->is_v4) {
2970 if (!(typer & GITS_TYPER_VMOVP)) { 3195 if (!(typer & GITS_TYPER_VMOVP)) {
@@ -2972,6 +3197,8 @@ static int __init its_probe_one(struct resource *res,
2972 if (err < 0) 3197 if (err < 0)
2973 goto out_free_its; 3198 goto out_free_its;
2974 3199
3200 its->list_nr = err;
3201
2975 pr_info("ITS@%pa: Using ITS number %d\n", 3202 pr_info("ITS@%pa: Using ITS number %d\n",
2976 &res->start, err); 3203 &res->start, err);
2977 } else { 3204 } else {
@@ -2988,6 +3215,9 @@ static int __init its_probe_one(struct resource *res,
2988 goto out_free_its; 3215 goto out_free_its;
2989 } 3216 }
2990 its->cmd_write = its->cmd_base; 3217 its->cmd_write = its->cmd_base;
3218 its->fwnode_handle = handle;
3219 its->get_msi_base = its_irq_get_msi_base;
3220 its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP;
2991 3221
2992 its_enable_quirks(its); 3222 its_enable_quirks(its);
2993 3223
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index b5df99c6f680..b54b55597ffb 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -55,6 +55,7 @@ struct gic_chip_data {
55 struct irq_domain *domain; 55 struct irq_domain *domain;
56 u64 redist_stride; 56 u64 redist_stride;
57 u32 nr_redist_regions; 57 u32 nr_redist_regions;
58 bool has_rss;
58 unsigned int irq_nr; 59 unsigned int irq_nr;
59 struct partition_desc *ppi_descs[16]; 60 struct partition_desc *ppi_descs[16];
60}; 61};
@@ -63,7 +64,9 @@ static struct gic_chip_data gic_data __read_mostly;
63static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE; 64static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE;
64 65
65static struct gic_kvm_info gic_v3_kvm_info; 66static struct gic_kvm_info gic_v3_kvm_info;
67static DEFINE_PER_CPU(bool, has_rss);
66 68
69#define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4)
67#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist)) 70#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
68#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) 71#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
69#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K) 72#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
@@ -526,6 +529,10 @@ static void gic_update_vlpi_properties(void)
526 529
527static void gic_cpu_sys_reg_init(void) 530static void gic_cpu_sys_reg_init(void)
528{ 531{
532 int i, cpu = smp_processor_id();
533 u64 mpidr = cpu_logical_map(cpu);
534 u64 need_rss = MPIDR_RS(mpidr);
535
529 /* 536 /*
530 * Need to check that the SRE bit has actually been set. If 537 * Need to check that the SRE bit has actually been set. If
531 * not, it means that SRE is disabled at EL2. We're going to 538 * not, it means that SRE is disabled at EL2. We're going to
@@ -557,6 +564,30 @@ static void gic_cpu_sys_reg_init(void)
557 564
558 /* ... and let's hit the road... */ 565 /* ... and let's hit the road... */
559 gic_write_grpen1(1); 566 gic_write_grpen1(1);
567
568 /* Keep the RSS capability status in per_cpu variable */
569 per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
570
571 /* Check all the CPUs have capable of sending SGIs to other CPUs */
572 for_each_online_cpu(i) {
573 bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
574
575 need_rss |= MPIDR_RS(cpu_logical_map(i));
576 if (need_rss && (!have_rss))
577 pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n",
578 cpu, (unsigned long)mpidr,
579 i, (unsigned long)cpu_logical_map(i));
580 }
581
582 /**
583 * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0,
584 * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED
585 * UNPREDICTABLE choice of :
586 * - The write is ignored.
587 * - The RS field is treated as 0.
588 */
589 if (need_rss && (!gic_data.has_rss))
590 pr_crit_once("RSS is required but GICD doesn't support it\n");
560} 591}
561 592
562static int gic_dist_supports_lpis(void) 593static int gic_dist_supports_lpis(void)
@@ -591,6 +622,9 @@ static void gic_cpu_init(void)
591 622
592#ifdef CONFIG_SMP 623#ifdef CONFIG_SMP
593 624
625#define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
626#define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL)
627
594static int gic_starting_cpu(unsigned int cpu) 628static int gic_starting_cpu(unsigned int cpu)
595{ 629{
596 gic_cpu_init(); 630 gic_cpu_init();
@@ -605,13 +639,6 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
605 u16 tlist = 0; 639 u16 tlist = 0;
606 640
607 while (cpu < nr_cpu_ids) { 641 while (cpu < nr_cpu_ids) {
608 /*
609 * If we ever get a cluster of more than 16 CPUs, just
610 * scream and skip that CPU.
611 */
612 if (WARN_ON((mpidr & 0xff) >= 16))
613 goto out;
614
615 tlist |= 1 << (mpidr & 0xf); 642 tlist |= 1 << (mpidr & 0xf);
616 643
617 next_cpu = cpumask_next(cpu, mask); 644 next_cpu = cpumask_next(cpu, mask);
@@ -621,7 +648,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
621 648
622 mpidr = cpu_logical_map(cpu); 649 mpidr = cpu_logical_map(cpu);
623 650
624 if (cluster_id != (mpidr & ~0xffUL)) { 651 if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) {
625 cpu--; 652 cpu--;
626 goto out; 653 goto out;
627 } 654 }
@@ -643,6 +670,7 @@ static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
643 MPIDR_TO_SGI_AFFINITY(cluster_id, 2) | 670 MPIDR_TO_SGI_AFFINITY(cluster_id, 2) |
644 irq << ICC_SGI1R_SGI_ID_SHIFT | 671 irq << ICC_SGI1R_SGI_ID_SHIFT |
645 MPIDR_TO_SGI_AFFINITY(cluster_id, 1) | 672 MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
673 MPIDR_TO_SGI_RS(cluster_id) |
646 tlist << ICC_SGI1R_TARGET_LIST_SHIFT); 674 tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
647 675
648 pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); 676 pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
@@ -663,7 +691,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
663 smp_wmb(); 691 smp_wmb();
664 692
665 for_each_cpu(cpu, mask) { 693 for_each_cpu(cpu, mask) {
666 unsigned long cluster_id = cpu_logical_map(cpu) & ~0xffUL; 694 u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
667 u16 tlist; 695 u16 tlist;
668 696
669 tlist = gic_compute_target_list(&cpu, mask, cluster_id); 697 tlist = gic_compute_target_list(&cpu, mask, cluster_id);
@@ -1007,6 +1035,10 @@ static int __init gic_init_bases(void __iomem *dist_base,
1007 goto out_free; 1035 goto out_free;
1008 } 1036 }
1009 1037
1038 gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
1039 pr_info("Distributor has %sRange Selector support\n",
1040 gic_data.has_rss ? "" : "no ");
1041
1010 set_handle_irq(gic_handle_irq); 1042 set_handle_irq(gic_handle_irq);
1011 1043
1012 gic_update_vlpi_properties(); 1044 gic_update_vlpi_properties();
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 651d726e8b12..f641e8e2c78d 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1256,6 +1256,19 @@ static void gic_teardown(struct gic_chip_data *gic)
1256 1256
1257#ifdef CONFIG_OF 1257#ifdef CONFIG_OF
1258static int gic_cnt __initdata; 1258static int gic_cnt __initdata;
1259static bool gicv2_force_probe;
1260
1261static int __init gicv2_force_probe_cfg(char *buf)
1262{
1263 return strtobool(buf, &gicv2_force_probe);
1264}
1265early_param("irqchip.gicv2_force_probe", gicv2_force_probe_cfg);
1266
1267static bool gic_check_gicv2(void __iomem *base)
1268{
1269 u32 val = readl_relaxed(base + GIC_CPU_IDENT);
1270 return (val & 0xff0fff) == 0x02043B;
1271}
1259 1272
1260static bool gic_check_eoimode(struct device_node *node, void __iomem **base) 1273static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
1261{ 1274{
@@ -1265,20 +1278,60 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
1265 1278
1266 if (!is_hyp_mode_available()) 1279 if (!is_hyp_mode_available())
1267 return false; 1280 return false;
1268 if (resource_size(&cpuif_res) < SZ_8K) 1281 if (resource_size(&cpuif_res) < SZ_8K) {
1269 return false; 1282 void __iomem *alt;
1270 if (resource_size(&cpuif_res) == SZ_128K) { 1283 /*
1271 u32 val_low, val_high; 1284 * Check for a stupid firmware that only exposes the
1285 * first page of a GICv2.
1286 */
1287 if (!gic_check_gicv2(*base))
1288 return false;
1272 1289
1290 if (!gicv2_force_probe) {
1291 pr_warn("GIC: GICv2 detected, but range too small and irqchip.gicv2_force_probe not set\n");
1292 return false;
1293 }
1294
1295 alt = ioremap(cpuif_res.start, SZ_8K);
1296 if (!alt)
1297 return false;
1298 if (!gic_check_gicv2(alt + SZ_4K)) {
1299 /*
1300 * The first page was that of a GICv2, and
1301 * the second was *something*. Let's trust it
1302 * to be a GICv2, and update the mapping.
1303 */
1304 pr_warn("GIC: GICv2 at %pa, but range is too small (broken DT?), assuming 8kB\n",
1305 &cpuif_res.start);
1306 iounmap(*base);
1307 *base = alt;
1308 return true;
1309 }
1310
1311 /*
1312 * We detected *two* initial GICv2 pages in a
1313 * row. Could be a GICv2 aliased over two 64kB
1314 * pages. Update the resource, map the iospace, and
1315 * pray.
1316 */
1317 iounmap(alt);
1318 alt = ioremap(cpuif_res.start, SZ_128K);
1319 if (!alt)
1320 return false;
1321 pr_warn("GIC: Aliased GICv2 at %pa, trying to find the canonical range over 128kB\n",
1322 &cpuif_res.start);
1323 cpuif_res.end = cpuif_res.start + SZ_128K -1;
1324 iounmap(*base);
1325 *base = alt;
1326 }
1327 if (resource_size(&cpuif_res) == SZ_128K) {
1273 /* 1328 /*
1274 * Verify that we have the first 4kB of a GIC400 1329 * Verify that we have the first 4kB of a GICv2
1275 * aliased over the first 64kB by checking the 1330 * aliased over the first 64kB by checking the
1276 * GICC_IIDR register on both ends. 1331 * GICC_IIDR register on both ends.
1277 */ 1332 */
1278 val_low = readl_relaxed(*base + GIC_CPU_IDENT); 1333 if (!gic_check_gicv2(*base) ||
1279 val_high = readl_relaxed(*base + GIC_CPU_IDENT + 0xf000); 1334 !gic_check_gicv2(*base + 0xf000))
1280 if ((val_low & 0xffff0fff) != 0x0202043B ||
1281 val_low != val_high)
1282 return false; 1335 return false;
1283 1336
1284 /* 1337 /*
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
new file mode 100644
index 000000000000..a59bdbc0b9bb
--- /dev/null
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -0,0 +1,419 @@
1/*
2 * Copyright (c) 2015 Endless Mobile, Inc.
3 * Author: Carlo Caione <carlo@endlessm.com>
4 * Copyright (c) 2016 BayLibre, SAS.
5 * Author: Jerome Brunet <jbrunet@baylibre.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution
19 * in the file called COPYING.
20 */
21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
24#include <linux/io.h>
25#include <linux/module.h>
26#include <linux/irq.h>
27#include <linux/irqdomain.h>
28#include <linux/irqchip.h>
29#include <linux/of.h>
30#include <linux/of_address.h>
31
32#define NUM_CHANNEL 8
33#define MAX_INPUT_MUX 256
34
35#define REG_EDGE_POL 0x00
36#define REG_PIN_03_SEL 0x04
37#define REG_PIN_47_SEL 0x08
38#define REG_FILTER_SEL 0x0c
39
40#define REG_EDGE_POL_MASK(x) (BIT(x) | BIT(16 + (x)))
41#define REG_EDGE_POL_EDGE(x) BIT(x)
42#define REG_EDGE_POL_LOW(x) BIT(16 + (x))
43#define REG_PIN_SEL_SHIFT(x) (((x) % 4) * 8)
44#define REG_FILTER_SEL_SHIFT(x) ((x) * 4)
45
46struct meson_gpio_irq_params {
47 unsigned int nr_hwirq;
48};
49
50static const struct meson_gpio_irq_params meson8_params = {
51 .nr_hwirq = 134,
52};
53
54static const struct meson_gpio_irq_params meson8b_params = {
55 .nr_hwirq = 119,
56};
57
58static const struct meson_gpio_irq_params gxbb_params = {
59 .nr_hwirq = 133,
60};
61
62static const struct meson_gpio_irq_params gxl_params = {
63 .nr_hwirq = 110,
64};
65
66static const struct of_device_id meson_irq_gpio_matches[] = {
67 { .compatible = "amlogic,meson8-gpio-intc", .data = &meson8_params },
68 { .compatible = "amlogic,meson8b-gpio-intc", .data = &meson8b_params },
69 { .compatible = "amlogic,meson-gxbb-gpio-intc", .data = &gxbb_params },
70 { .compatible = "amlogic,meson-gxl-gpio-intc", .data = &gxl_params },
71 { }
72};
73
74struct meson_gpio_irq_controller {
75 unsigned int nr_hwirq;
76 void __iomem *base;
77 u32 channel_irqs[NUM_CHANNEL];
78 DECLARE_BITMAP(channel_map, NUM_CHANNEL);
79 spinlock_t lock;
80};
81
82static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl,
83 unsigned int reg, u32 mask, u32 val)
84{
85 u32 tmp;
86
87 tmp = readl_relaxed(ctl->base + reg);
88 tmp &= ~mask;
89 tmp |= val;
90 writel_relaxed(tmp, ctl->base + reg);
91}
92
93static unsigned int meson_gpio_irq_channel_to_reg(unsigned int channel)
94{
95 return (channel < 4) ? REG_PIN_03_SEL : REG_PIN_47_SEL;
96}
97
98static int
99meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
100 unsigned long hwirq,
101 u32 **channel_hwirq)
102{
103 unsigned int reg, idx;
104
105 spin_lock(&ctl->lock);
106
107 /* Find a free channel */
108 idx = find_first_zero_bit(ctl->channel_map, NUM_CHANNEL);
109 if (idx >= NUM_CHANNEL) {
110 spin_unlock(&ctl->lock);
111 pr_err("No channel available\n");
112 return -ENOSPC;
113 }
114
115 /* Mark the channel as used */
116 set_bit(idx, ctl->channel_map);
117
118 /*
119 * Setup the mux of the channel to route the signal of the pad
120 * to the appropriate input of the GIC
121 */
122 reg = meson_gpio_irq_channel_to_reg(idx);
123 meson_gpio_irq_update_bits(ctl, reg,
124 0xff << REG_PIN_SEL_SHIFT(idx),
125 hwirq << REG_PIN_SEL_SHIFT(idx));
126
127 /*
128 * Get the hwirq number assigned to this channel through
129 * a pointer the channel_irq table. The added benifit of this
130 * method is that we can also retrieve the channel index with
131 * it, using the table base.
132 */
133 *channel_hwirq = &(ctl->channel_irqs[idx]);
134
135 spin_unlock(&ctl->lock);
136
137 pr_debug("hwirq %lu assigned to channel %d - irq %u\n",
138 hwirq, idx, **channel_hwirq);
139
140 return 0;
141}
142
143static unsigned int
144meson_gpio_irq_get_channel_idx(struct meson_gpio_irq_controller *ctl,
145 u32 *channel_hwirq)
146{
147 return channel_hwirq - ctl->channel_irqs;
148}
149
150static void
151meson_gpio_irq_release_channel(struct meson_gpio_irq_controller *ctl,
152 u32 *channel_hwirq)
153{
154 unsigned int idx;
155
156 idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq);
157 clear_bit(idx, ctl->channel_map);
158}
159
160static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl,
161 unsigned int type,
162 u32 *channel_hwirq)
163{
164 u32 val = 0;
165 unsigned int idx;
166
167 idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq);
168
169 /*
170 * The controller has a filter block to operate in either LEVEL or
171 * EDGE mode, then signal is sent to the GIC. To enable LEVEL_LOW and
172 * EDGE_FALLING support (which the GIC does not support), the filter
173 * block is also able to invert the input signal it gets before
174 * providing it to the GIC.
175 */
176 type &= IRQ_TYPE_SENSE_MASK;
177
178 if (type == IRQ_TYPE_EDGE_BOTH)
179 return -EINVAL;
180
181 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
182 val |= REG_EDGE_POL_EDGE(idx);
183
184 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING))
185 val |= REG_EDGE_POL_LOW(idx);
186
187 spin_lock(&ctl->lock);
188
189 meson_gpio_irq_update_bits(ctl, REG_EDGE_POL,
190 REG_EDGE_POL_MASK(idx), val);
191
192 spin_unlock(&ctl->lock);
193
194 return 0;
195}
196
197static unsigned int meson_gpio_irq_type_output(unsigned int type)
198{
199 unsigned int sense = type & IRQ_TYPE_SENSE_MASK;
200
201 type &= ~IRQ_TYPE_SENSE_MASK;
202
203 /*
204 * The polarity of the signal provided to the GIC should always
205 * be high.
206 */
207 if (sense & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
208 type |= IRQ_TYPE_LEVEL_HIGH;
209 else if (sense & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
210 type |= IRQ_TYPE_EDGE_RISING;
211
212 return type;
213}
214
215static int meson_gpio_irq_set_type(struct irq_data *data, unsigned int type)
216{
217 struct meson_gpio_irq_controller *ctl = data->domain->host_data;
218 u32 *channel_hwirq = irq_data_get_irq_chip_data(data);
219 int ret;
220
221 ret = meson_gpio_irq_type_setup(ctl, type, channel_hwirq);
222 if (ret)
223 return ret;
224
225 return irq_chip_set_type_parent(data,
226 meson_gpio_irq_type_output(type));
227}
228
229static struct irq_chip meson_gpio_irq_chip = {
230 .name = "meson-gpio-irqchip",
231 .irq_mask = irq_chip_mask_parent,
232 .irq_unmask = irq_chip_unmask_parent,
233 .irq_eoi = irq_chip_eoi_parent,
234 .irq_set_type = meson_gpio_irq_set_type,
235 .irq_retrigger = irq_chip_retrigger_hierarchy,
236#ifdef CONFIG_SMP
237 .irq_set_affinity = irq_chip_set_affinity_parent,
238#endif
239 .flags = IRQCHIP_SET_TYPE_MASKED,
240};
241
242static int meson_gpio_irq_domain_translate(struct irq_domain *domain,
243 struct irq_fwspec *fwspec,
244 unsigned long *hwirq,
245 unsigned int *type)
246{
247 if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) {
248 *hwirq = fwspec->param[0];
249 *type = fwspec->param[1];
250 return 0;
251 }
252
253 return -EINVAL;
254}
255
256static int meson_gpio_irq_allocate_gic_irq(struct irq_domain *domain,
257 unsigned int virq,
258 u32 hwirq,
259 unsigned int type)
260{
261 struct irq_fwspec fwspec;
262
263 fwspec.fwnode = domain->parent->fwnode;
264 fwspec.param_count = 3;
265 fwspec.param[0] = 0; /* SPI */
266 fwspec.param[1] = hwirq;
267 fwspec.param[2] = meson_gpio_irq_type_output(type);
268
269 return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
270}
271
272static int meson_gpio_irq_domain_alloc(struct irq_domain *domain,
273 unsigned int virq,
274 unsigned int nr_irqs,
275 void *data)
276{
277 struct irq_fwspec *fwspec = data;
278 struct meson_gpio_irq_controller *ctl = domain->host_data;
279 unsigned long hwirq;
280 u32 *channel_hwirq;
281 unsigned int type;
282 int ret;
283
284 if (WARN_ON(nr_irqs != 1))
285 return -EINVAL;
286
287 ret = meson_gpio_irq_domain_translate(domain, fwspec, &hwirq, &type);
288 if (ret)
289 return ret;
290
291 ret = meson_gpio_irq_request_channel(ctl, hwirq, &channel_hwirq);
292 if (ret)
293 return ret;
294
295 ret = meson_gpio_irq_allocate_gic_irq(domain, virq,
296 *channel_hwirq, type);
297 if (ret < 0) {
298 pr_err("failed to allocate gic irq %u\n", *channel_hwirq);
299 meson_gpio_irq_release_channel(ctl, channel_hwirq);
300 return ret;
301 }
302
303 irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
304 &meson_gpio_irq_chip, channel_hwirq);
305
306 return 0;
307}
308
309static void meson_gpio_irq_domain_free(struct irq_domain *domain,
310 unsigned int virq,
311 unsigned int nr_irqs)
312{
313 struct meson_gpio_irq_controller *ctl = domain->host_data;
314 struct irq_data *irq_data;
315 u32 *channel_hwirq;
316
317 if (WARN_ON(nr_irqs != 1))
318 return;
319
320 irq_domain_free_irqs_parent(domain, virq, 1);
321
322 irq_data = irq_domain_get_irq_data(domain, virq);
323 channel_hwirq = irq_data_get_irq_chip_data(irq_data);
324
325 meson_gpio_irq_release_channel(ctl, channel_hwirq);
326}
327
328static const struct irq_domain_ops meson_gpio_irq_domain_ops = {
329 .alloc = meson_gpio_irq_domain_alloc,
330 .free = meson_gpio_irq_domain_free,
331 .translate = meson_gpio_irq_domain_translate,
332};
333
334static int __init meson_gpio_irq_parse_dt(struct device_node *node,
335 struct meson_gpio_irq_controller *ctl)
336{
337 const struct of_device_id *match;
338 const struct meson_gpio_irq_params *params;
339 int ret;
340
341 match = of_match_node(meson_irq_gpio_matches, node);
342 if (!match)
343 return -ENODEV;
344
345 params = match->data;
346 ctl->nr_hwirq = params->nr_hwirq;
347
348 ret = of_property_read_variable_u32_array(node,
349 "amlogic,channel-interrupts",
350 ctl->channel_irqs,
351 NUM_CHANNEL,
352 NUM_CHANNEL);
353 if (ret < 0) {
354 pr_err("can't get %d channel interrupts\n", NUM_CHANNEL);
355 return ret;
356 }
357
358 return 0;
359}
360
361static int __init meson_gpio_irq_of_init(struct device_node *node,
362 struct device_node *parent)
363{
364 struct irq_domain *domain, *parent_domain;
365 struct meson_gpio_irq_controller *ctl;
366 int ret;
367
368 if (!parent) {
369 pr_err("missing parent interrupt node\n");
370 return -ENODEV;
371 }
372
373 parent_domain = irq_find_host(parent);
374 if (!parent_domain) {
375 pr_err("unable to obtain parent domain\n");
376 return -ENXIO;
377 }
378
379 ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
380 if (!ctl)
381 return -ENOMEM;
382
383 spin_lock_init(&ctl->lock);
384
385 ctl->base = of_iomap(node, 0);
386 if (!ctl->base) {
387 ret = -ENOMEM;
388 goto free_ctl;
389 }
390
391 ret = meson_gpio_irq_parse_dt(node, ctl);
392 if (ret)
393 goto free_channel_irqs;
394
395 domain = irq_domain_create_hierarchy(parent_domain, 0, ctl->nr_hwirq,
396 of_node_to_fwnode(node),
397 &meson_gpio_irq_domain_ops,
398 ctl);
399 if (!domain) {
400 pr_err("failed to add domain\n");
401 ret = -ENODEV;
402 goto free_channel_irqs;
403 }
404
405 pr_info("%d to %d gpio interrupt mux initialized\n",
406 ctl->nr_hwirq, NUM_CHANNEL);
407
408 return 0;
409
410free_channel_irqs:
411 iounmap(ctl->base);
412free_ctl:
413 kfree(ctl);
414
415 return ret;
416}
417
418IRQCHIP_DECLARE(meson_gpio_intc, "amlogic,meson-gpio-intc",
419 meson_gpio_irq_of_init);
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index c90976d7e53c..ef92a4d2038e 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -6,8 +6,12 @@
6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org) 6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
8 */ 8 */
9
10#define pr_fmt(fmt) "irq-mips-gic: " fmt
11
9#include <linux/bitmap.h> 12#include <linux/bitmap.h>
10#include <linux/clocksource.h> 13#include <linux/clocksource.h>
14#include <linux/cpuhotplug.h>
11#include <linux/init.h> 15#include <linux/init.h>
12#include <linux/interrupt.h> 16#include <linux/interrupt.h>
13#include <linux/irq.h> 17#include <linux/irq.h>
@@ -48,12 +52,16 @@ static DEFINE_SPINLOCK(gic_lock);
48static struct irq_domain *gic_irq_domain; 52static struct irq_domain *gic_irq_domain;
49static struct irq_domain *gic_ipi_domain; 53static struct irq_domain *gic_ipi_domain;
50static int gic_shared_intrs; 54static int gic_shared_intrs;
51static int gic_vpes;
52static unsigned int gic_cpu_pin; 55static unsigned int gic_cpu_pin;
53static unsigned int timer_cpu_pin; 56static unsigned int timer_cpu_pin;
54static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller; 57static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
55DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS); 58static DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
56DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS); 59static DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS);
60
61static struct gic_all_vpes_chip_data {
62 u32 map;
63 bool mask;
64} gic_all_vpes_chip_data[GIC_NUM_LOCAL_INTRS];
57 65
58static void gic_clear_pcpu_masks(unsigned int intr) 66static void gic_clear_pcpu_masks(unsigned int intr)
59{ 67{
@@ -194,46 +202,46 @@ static void gic_ack_irq(struct irq_data *d)
194 202
195static int gic_set_type(struct irq_data *d, unsigned int type) 203static int gic_set_type(struct irq_data *d, unsigned int type)
196{ 204{
197 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 205 unsigned int irq, pol, trig, dual;
198 unsigned long flags; 206 unsigned long flags;
199 bool is_edge; 207
208 irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
200 209
201 spin_lock_irqsave(&gic_lock, flags); 210 spin_lock_irqsave(&gic_lock, flags);
202 switch (type & IRQ_TYPE_SENSE_MASK) { 211 switch (type & IRQ_TYPE_SENSE_MASK) {
203 case IRQ_TYPE_EDGE_FALLING: 212 case IRQ_TYPE_EDGE_FALLING:
204 change_gic_pol(irq, GIC_POL_FALLING_EDGE); 213 pol = GIC_POL_FALLING_EDGE;
205 change_gic_trig(irq, GIC_TRIG_EDGE); 214 trig = GIC_TRIG_EDGE;
206 change_gic_dual(irq, GIC_DUAL_SINGLE); 215 dual = GIC_DUAL_SINGLE;
207 is_edge = true;
208 break; 216 break;
209 case IRQ_TYPE_EDGE_RISING: 217 case IRQ_TYPE_EDGE_RISING:
210 change_gic_pol(irq, GIC_POL_RISING_EDGE); 218 pol = GIC_POL_RISING_EDGE;
211 change_gic_trig(irq, GIC_TRIG_EDGE); 219 trig = GIC_TRIG_EDGE;
212 change_gic_dual(irq, GIC_DUAL_SINGLE); 220 dual = GIC_DUAL_SINGLE;
213 is_edge = true;
214 break; 221 break;
215 case IRQ_TYPE_EDGE_BOTH: 222 case IRQ_TYPE_EDGE_BOTH:
216 /* polarity is irrelevant in this case */ 223 pol = 0; /* Doesn't matter */
217 change_gic_trig(irq, GIC_TRIG_EDGE); 224 trig = GIC_TRIG_EDGE;
218 change_gic_dual(irq, GIC_DUAL_DUAL); 225 dual = GIC_DUAL_DUAL;
219 is_edge = true;
220 break; 226 break;
221 case IRQ_TYPE_LEVEL_LOW: 227 case IRQ_TYPE_LEVEL_LOW:
222 change_gic_pol(irq, GIC_POL_ACTIVE_LOW); 228 pol = GIC_POL_ACTIVE_LOW;
223 change_gic_trig(irq, GIC_TRIG_LEVEL); 229 trig = GIC_TRIG_LEVEL;
224 change_gic_dual(irq, GIC_DUAL_SINGLE); 230 dual = GIC_DUAL_SINGLE;
225 is_edge = false;
226 break; 231 break;
227 case IRQ_TYPE_LEVEL_HIGH: 232 case IRQ_TYPE_LEVEL_HIGH:
228 default: 233 default:
229 change_gic_pol(irq, GIC_POL_ACTIVE_HIGH); 234 pol = GIC_POL_ACTIVE_HIGH;
230 change_gic_trig(irq, GIC_TRIG_LEVEL); 235 trig = GIC_TRIG_LEVEL;
231 change_gic_dual(irq, GIC_DUAL_SINGLE); 236 dual = GIC_DUAL_SINGLE;
232 is_edge = false;
233 break; 237 break;
234 } 238 }
235 239
236 if (is_edge) 240 change_gic_pol(irq, pol);
241 change_gic_trig(irq, trig);
242 change_gic_dual(irq, dual);
243
244 if (trig == GIC_TRIG_EDGE)
237 irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller, 245 irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
238 handle_edge_irq, NULL); 246 handle_edge_irq, NULL);
239 else 247 else
@@ -338,13 +346,17 @@ static struct irq_chip gic_local_irq_controller = {
338 346
339static void gic_mask_local_irq_all_vpes(struct irq_data *d) 347static void gic_mask_local_irq_all_vpes(struct irq_data *d)
340{ 348{
341 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 349 struct gic_all_vpes_chip_data *cd;
342 int i;
343 unsigned long flags; 350 unsigned long flags;
351 int intr, cpu;
352
353 intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
354 cd = irq_data_get_irq_chip_data(d);
355 cd->mask = false;
344 356
345 spin_lock_irqsave(&gic_lock, flags); 357 spin_lock_irqsave(&gic_lock, flags);
346 for (i = 0; i < gic_vpes; i++) { 358 for_each_online_cpu(cpu) {
347 write_gic_vl_other(mips_cm_vp_id(i)); 359 write_gic_vl_other(mips_cm_vp_id(cpu));
348 write_gic_vo_rmask(BIT(intr)); 360 write_gic_vo_rmask(BIT(intr));
349 } 361 }
350 spin_unlock_irqrestore(&gic_lock, flags); 362 spin_unlock_irqrestore(&gic_lock, flags);
@@ -352,22 +364,40 @@ static void gic_mask_local_irq_all_vpes(struct irq_data *d)
352 364
353static void gic_unmask_local_irq_all_vpes(struct irq_data *d) 365static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
354{ 366{
355 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 367 struct gic_all_vpes_chip_data *cd;
356 int i;
357 unsigned long flags; 368 unsigned long flags;
369 int intr, cpu;
370
371 intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
372 cd = irq_data_get_irq_chip_data(d);
373 cd->mask = true;
358 374
359 spin_lock_irqsave(&gic_lock, flags); 375 spin_lock_irqsave(&gic_lock, flags);
360 for (i = 0; i < gic_vpes; i++) { 376 for_each_online_cpu(cpu) {
361 write_gic_vl_other(mips_cm_vp_id(i)); 377 write_gic_vl_other(mips_cm_vp_id(cpu));
362 write_gic_vo_smask(BIT(intr)); 378 write_gic_vo_smask(BIT(intr));
363 } 379 }
364 spin_unlock_irqrestore(&gic_lock, flags); 380 spin_unlock_irqrestore(&gic_lock, flags);
365} 381}
366 382
383static void gic_all_vpes_irq_cpu_online(struct irq_data *d)
384{
385 struct gic_all_vpes_chip_data *cd;
386 unsigned int intr;
387
388 intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
389 cd = irq_data_get_irq_chip_data(d);
390
391 write_gic_vl_map(intr, cd->map);
392 if (cd->mask)
393 write_gic_vl_smask(BIT(intr));
394}
395
367static struct irq_chip gic_all_vpes_local_irq_controller = { 396static struct irq_chip gic_all_vpes_local_irq_controller = {
368 .name = "MIPS GIC Local", 397 .name = "MIPS GIC Local",
369 .irq_mask = gic_mask_local_irq_all_vpes, 398 .irq_mask = gic_mask_local_irq_all_vpes,
370 .irq_unmask = gic_unmask_local_irq_all_vpes, 399 .irq_unmask = gic_unmask_local_irq_all_vpes,
400 .irq_cpu_online = gic_all_vpes_irq_cpu_online,
371}; 401};
372 402
373static void __gic_irq_dispatch(void) 403static void __gic_irq_dispatch(void)
@@ -382,39 +412,6 @@ static void gic_irq_dispatch(struct irq_desc *desc)
382 gic_handle_shared_int(true); 412 gic_handle_shared_int(true);
383} 413}
384 414
385static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
386 irq_hw_number_t hw)
387{
388 int intr = GIC_HWIRQ_TO_LOCAL(hw);
389 int i;
390 unsigned long flags;
391 u32 val;
392
393 if (!gic_local_irq_is_routable(intr))
394 return -EPERM;
395
396 if (intr > GIC_LOCAL_INT_FDC) {
397 pr_err("Invalid local IRQ %d\n", intr);
398 return -EINVAL;
399 }
400
401 if (intr == GIC_LOCAL_INT_TIMER) {
402 /* CONFIG_MIPS_CMP workaround (see __gic_init) */
403 val = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin;
404 } else {
405 val = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin;
406 }
407
408 spin_lock_irqsave(&gic_lock, flags);
409 for (i = 0; i < gic_vpes; i++) {
410 write_gic_vl_other(mips_cm_vp_id(i));
411 write_gic_vo_map(intr, val);
412 }
413 spin_unlock_irqrestore(&gic_lock, flags);
414
415 return 0;
416}
417
418static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq, 415static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
419 irq_hw_number_t hw, unsigned int cpu) 416 irq_hw_number_t hw, unsigned int cpu)
420{ 417{
@@ -457,7 +454,11 @@ static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
457static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, 454static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
458 irq_hw_number_t hwirq) 455 irq_hw_number_t hwirq)
459{ 456{
460 int err; 457 struct gic_all_vpes_chip_data *cd;
458 unsigned long flags;
459 unsigned int intr;
460 int err, cpu;
461 u32 map;
461 462
462 if (hwirq >= GIC_SHARED_HWIRQ_BASE) { 463 if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
463 /* verify that shared irqs don't conflict with an IPI irq */ 464 /* verify that shared irqs don't conflict with an IPI irq */
@@ -474,8 +475,14 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
474 return gic_shared_irq_domain_map(d, virq, hwirq, 0); 475 return gic_shared_irq_domain_map(d, virq, hwirq, 0);
475 } 476 }
476 477
477 switch (GIC_HWIRQ_TO_LOCAL(hwirq)) { 478 intr = GIC_HWIRQ_TO_LOCAL(hwirq);
479 map = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin;
480
481 switch (intr) {
478 case GIC_LOCAL_INT_TIMER: 482 case GIC_LOCAL_INT_TIMER:
483 /* CONFIG_MIPS_CMP workaround (see __gic_init) */
484 map = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin;
485 /* fall-through */
479 case GIC_LOCAL_INT_PERFCTR: 486 case GIC_LOCAL_INT_PERFCTR:
480 case GIC_LOCAL_INT_FDC: 487 case GIC_LOCAL_INT_FDC:
481 /* 488 /*
@@ -483,9 +490,11 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
483 * the rest of the MIPS kernel code does not use the 490 * the rest of the MIPS kernel code does not use the
484 * percpu IRQ API for them. 491 * percpu IRQ API for them.
485 */ 492 */
493 cd = &gic_all_vpes_chip_data[intr];
494 cd->map = map;
486 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, 495 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
487 &gic_all_vpes_local_irq_controller, 496 &gic_all_vpes_local_irq_controller,
488 NULL); 497 cd);
489 if (err) 498 if (err)
490 return err; 499 return err;
491 500
@@ -504,7 +513,17 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
504 break; 513 break;
505 } 514 }
506 515
507 return gic_local_irq_domain_map(d, virq, hwirq); 516 if (!gic_local_irq_is_routable(intr))
517 return -EPERM;
518
519 spin_lock_irqsave(&gic_lock, flags);
520 for_each_online_cpu(cpu) {
521 write_gic_vl_other(mips_cm_vp_id(cpu));
522 write_gic_vo_map(intr, map);
523 }
524 spin_unlock_irqrestore(&gic_lock, flags);
525
526 return 0;
508} 527}
509 528
510static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq, 529static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
@@ -636,11 +655,25 @@ static const struct irq_domain_ops gic_ipi_domain_ops = {
636 .match = gic_ipi_domain_match, 655 .match = gic_ipi_domain_match,
637}; 656};
638 657
658static int gic_cpu_startup(unsigned int cpu)
659{
660 /* Enable or disable EIC */
661 change_gic_vl_ctl(GIC_VX_CTL_EIC,
662 cpu_has_veic ? GIC_VX_CTL_EIC : 0);
663
664 /* Clear all local IRQ masks (ie. disable all local interrupts) */
665 write_gic_vl_rmask(~0);
666
667 /* Invoke irq_cpu_online callbacks to enable desired interrupts */
668 irq_cpu_online();
669
670 return 0;
671}
639 672
640static int __init gic_of_init(struct device_node *node, 673static int __init gic_of_init(struct device_node *node,
641 struct device_node *parent) 674 struct device_node *parent)
642{ 675{
643 unsigned int cpu_vec, i, j, gicconfig, cpu, v[2]; 676 unsigned int cpu_vec, i, gicconfig, v[2], num_ipis;
644 unsigned long reserved; 677 unsigned long reserved;
645 phys_addr_t gic_base; 678 phys_addr_t gic_base;
646 struct resource res; 679 struct resource res;
@@ -655,7 +688,7 @@ static int __init gic_of_init(struct device_node *node,
655 688
656 cpu_vec = find_first_zero_bit(&reserved, hweight_long(ST0_IM)); 689 cpu_vec = find_first_zero_bit(&reserved, hweight_long(ST0_IM));
657 if (cpu_vec == hweight_long(ST0_IM)) { 690 if (cpu_vec == hweight_long(ST0_IM)) {
658 pr_err("No CPU vectors available for GIC\n"); 691 pr_err("No CPU vectors available\n");
659 return -ENODEV; 692 return -ENODEV;
660 } 693 }
661 694
@@ -668,8 +701,10 @@ static int __init gic_of_init(struct device_node *node,
668 gic_base = read_gcr_gic_base() & 701 gic_base = read_gcr_gic_base() &
669 ~CM_GCR_GIC_BASE_GICEN; 702 ~CM_GCR_GIC_BASE_GICEN;
670 gic_len = 0x20000; 703 gic_len = 0x20000;
704 pr_warn("Using inherited base address %pa\n",
705 &gic_base);
671 } else { 706 } else {
672 pr_err("Failed to get GIC memory range\n"); 707 pr_err("Failed to get memory range\n");
673 return -ENODEV; 708 return -ENODEV;
674 } 709 }
675 } else { 710 } else {
@@ -690,17 +725,7 @@ static int __init gic_of_init(struct device_node *node,
690 gic_shared_intrs >>= __ffs(GIC_CONFIG_NUMINTERRUPTS); 725 gic_shared_intrs >>= __ffs(GIC_CONFIG_NUMINTERRUPTS);
691 gic_shared_intrs = (gic_shared_intrs + 1) * 8; 726 gic_shared_intrs = (gic_shared_intrs + 1) * 8;
692 727
693 gic_vpes = gicconfig & GIC_CONFIG_PVPS;
694 gic_vpes >>= __ffs(GIC_CONFIG_PVPS);
695 gic_vpes = gic_vpes + 1;
696
697 if (cpu_has_veic) { 728 if (cpu_has_veic) {
698 /* Set EIC mode for all VPEs */
699 for_each_present_cpu(cpu) {
700 write_gic_vl_other(mips_cm_vp_id(cpu));
701 write_gic_vo_ctl(GIC_VX_CTL_EIC);
702 }
703
704 /* Always use vector 1 in EIC mode */ 729 /* Always use vector 1 in EIC mode */
705 gic_cpu_pin = 0; 730 gic_cpu_pin = 0;
706 timer_cpu_pin = gic_cpu_pin; 731 timer_cpu_pin = gic_cpu_pin;
@@ -737,7 +762,7 @@ static int __init gic_of_init(struct device_node *node,
737 gic_shared_intrs, 0, 762 gic_shared_intrs, 0,
738 &gic_irq_domain_ops, NULL); 763 &gic_irq_domain_ops, NULL);
739 if (!gic_irq_domain) { 764 if (!gic_irq_domain) {
740 pr_err("Failed to add GIC IRQ domain"); 765 pr_err("Failed to add IRQ domain");
741 return -ENXIO; 766 return -ENXIO;
742 } 767 }
743 768
@@ -746,7 +771,7 @@ static int __init gic_of_init(struct device_node *node,
746 GIC_NUM_LOCAL_INTRS + gic_shared_intrs, 771 GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
747 node, &gic_ipi_domain_ops, NULL); 772 node, &gic_ipi_domain_ops, NULL);
748 if (!gic_ipi_domain) { 773 if (!gic_ipi_domain) {
749 pr_err("Failed to add GIC IPI domain"); 774 pr_err("Failed to add IPI domain");
750 return -ENXIO; 775 return -ENXIO;
751 } 776 }
752 777
@@ -756,10 +781,12 @@ static int __init gic_of_init(struct device_node *node,
756 !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) { 781 !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
757 bitmap_set(ipi_resrv, v[0], v[1]); 782 bitmap_set(ipi_resrv, v[0], v[1]);
758 } else { 783 } else {
759 /* Make the last 2 * gic_vpes available for IPIs */ 784 /*
760 bitmap_set(ipi_resrv, 785 * Reserve 2 interrupts per possible CPU/VP for use as IPIs,
761 gic_shared_intrs - 2 * gic_vpes, 786 * meeting the requirements of arch/mips SMP.
762 2 * gic_vpes); 787 */
788 num_ipis = 2 * num_possible_cpus();
789 bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis);
763 } 790 }
764 791
765 bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS); 792 bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS);
@@ -773,15 +800,8 @@ static int __init gic_of_init(struct device_node *node,
773 write_gic_rmask(i); 800 write_gic_rmask(i);
774 } 801 }
775 802
776 for (i = 0; i < gic_vpes; i++) { 803 return cpuhp_setup_state(CPUHP_AP_IRQ_MIPS_GIC_STARTING,
777 write_gic_vl_other(mips_cm_vp_id(i)); 804 "irqchip/mips/gic:starting",
778 for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) { 805 gic_cpu_startup, NULL);
779 if (!gic_local_irq_is_routable(j))
780 continue;
781 write_gic_vo_rmask(BIT(j));
782 }
783 }
784
785 return 0;
786} 806}
787IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init); 807IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);
diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c
index b04a8ac6e744..d360a6eddd6d 100644
--- a/drivers/irqchip/irq-omap-intc.c
+++ b/drivers/irqchip/irq-omap-intc.c
@@ -25,10 +25,6 @@
25 25
26#include <linux/irqchip/irq-omap-intc.h> 26#include <linux/irqchip/irq-omap-intc.h>
27 27
28/* Define these here for now until we drop all board-files */
29#define OMAP24XX_IC_BASE 0x480fe000
30#define OMAP34XX_IC_BASE 0x48200000
31
32/* selected INTC register offsets */ 28/* selected INTC register offsets */
33 29
34#define INTC_REVISION 0x0000 30#define INTC_REVISION 0x0000
@@ -70,8 +66,8 @@ static struct omap_intc_regs intc_context;
70 66
71static struct irq_domain *domain; 67static struct irq_domain *domain;
72static void __iomem *omap_irq_base; 68static void __iomem *omap_irq_base;
73static int omap_nr_pending = 3; 69static int omap_nr_pending;
74static int omap_nr_irqs = 96; 70static int omap_nr_irqs;
75 71
76static void intc_writel(u32 reg, u32 val) 72static void intc_writel(u32 reg, u32 val)
77{ 73{
@@ -364,14 +360,6 @@ omap_intc_handle_irq(struct pt_regs *regs)
364 handle_domain_irq(domain, irqnr, regs); 360 handle_domain_irq(domain, irqnr, regs);
365} 361}
366 362
367void __init omap3_init_irq(void)
368{
369 omap_nr_irqs = 96;
370 omap_nr_pending = 3;
371 omap_init_irq(OMAP34XX_IC_BASE, NULL);
372 set_handle_irq(omap_intc_handle_irq);
373}
374
375static int __init intc_of_init(struct device_node *node, 363static int __init intc_of_init(struct device_node *node,
376 struct device_node *parent) 364 struct device_node *parent)
377{ 365{
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 713177d97c7a..06f29cf5018a 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -389,9 +389,8 @@ MODULE_DEVICE_TABLE(of, intc_irqpin_dt_ids);
389 389
390static int intc_irqpin_probe(struct platform_device *pdev) 390static int intc_irqpin_probe(struct platform_device *pdev)
391{ 391{
392 const struct intc_irqpin_config *config = NULL; 392 const struct intc_irqpin_config *config;
393 struct device *dev = &pdev->dev; 393 struct device *dev = &pdev->dev;
394 const struct of_device_id *of_id;
395 struct intc_irqpin_priv *p; 394 struct intc_irqpin_priv *p;
396 struct intc_irqpin_iomem *i; 395 struct intc_irqpin_iomem *i;
397 struct resource *io[INTC_IRQPIN_REG_NR]; 396 struct resource *io[INTC_IRQPIN_REG_NR];
@@ -422,11 +421,9 @@ static int intc_irqpin_probe(struct platform_device *pdev)
422 p->pdev = pdev; 421 p->pdev = pdev;
423 platform_set_drvdata(pdev, p); 422 platform_set_drvdata(pdev, p);
424 423
425 of_id = of_match_device(intc_irqpin_dt_ids, dev); 424 config = of_device_get_match_data(dev);
426 if (of_id && of_id->data) { 425 if (config)
427 config = of_id->data;
428 p->needs_clk = config->needs_clk; 426 p->needs_clk = config->needs_clk;
429 }
430 427
431 p->clk = devm_clk_get(dev, NULL); 428 p->clk = devm_clk_get(dev, NULL);
432 if (IS_ERR(p->clk)) { 429 if (IS_ERR(p->clk)) {
diff --git a/drivers/irqchip/irq-sni-exiu.c b/drivers/irqchip/irq-sni-exiu.c
new file mode 100644
index 000000000000..1b6e2f7c59af
--- /dev/null
+++ b/drivers/irqchip/irq-sni-exiu.c
@@ -0,0 +1,227 @@
1/*
2 * Driver for Socionext External Interrupt Unit (EXIU)
3 *
4 * Copyright (c) 2017 Linaro, Ltd. <ard.biesheuvel@linaro.org>
5 *
6 * Based on irq-tegra.c:
7 * Copyright (C) 2011 Google, Inc.
8 * Copyright (C) 2010,2013, NVIDIA Corporation
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/interrupt.h>
16#include <linux/io.h>
17#include <linux/irq.h>
18#include <linux/irqchip.h>
19#include <linux/irqdomain.h>
20#include <linux/of.h>
21#include <linux/of_address.h>
22#include <linux/of_irq.h>
23
24#include <dt-bindings/interrupt-controller/arm-gic.h>
25
26#define NUM_IRQS 32
27
28#define EIMASK 0x00
29#define EISRCSEL 0x04
30#define EIREQSTA 0x08
31#define EIRAWREQSTA 0x0C
32#define EIREQCLR 0x10
33#define EILVL 0x14
34#define EIEDG 0x18
35#define EISIR 0x1C
36
37struct exiu_irq_data {
38 void __iomem *base;
39 u32 spi_base;
40};
41
42static void exiu_irq_eoi(struct irq_data *d)
43{
44 struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
45
46 writel(BIT(d->hwirq), data->base + EIREQCLR);
47 irq_chip_eoi_parent(d);
48}
49
50static void exiu_irq_mask(struct irq_data *d)
51{
52 struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
53 u32 val;
54
55 val = readl_relaxed(data->base + EIMASK) | BIT(d->hwirq);
56 writel_relaxed(val, data->base + EIMASK);
57 irq_chip_mask_parent(d);
58}
59
60static void exiu_irq_unmask(struct irq_data *d)
61{
62 struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
63 u32 val;
64
65 val = readl_relaxed(data->base + EIMASK) & ~BIT(d->hwirq);
66 writel_relaxed(val, data->base + EIMASK);
67 irq_chip_unmask_parent(d);
68}
69
70static void exiu_irq_enable(struct irq_data *d)
71{
72 struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
73 u32 val;
74
75 /* clear interrupts that were latched while disabled */
76 writel_relaxed(BIT(d->hwirq), data->base + EIREQCLR);
77
78 val = readl_relaxed(data->base + EIMASK) & ~BIT(d->hwirq);
79 writel_relaxed(val, data->base + EIMASK);
80 irq_chip_enable_parent(d);
81}
82
83static int exiu_irq_set_type(struct irq_data *d, unsigned int type)
84{
85 struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
86 u32 val;
87
88 val = readl_relaxed(data->base + EILVL);
89 if (type == IRQ_TYPE_EDGE_RISING || type == IRQ_TYPE_LEVEL_HIGH)
90 val |= BIT(d->hwirq);
91 else
92 val &= ~BIT(d->hwirq);
93 writel_relaxed(val, data->base + EILVL);
94
95 val = readl_relaxed(data->base + EIEDG);
96 if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_LEVEL_HIGH)
97 val &= ~BIT(d->hwirq);
98 else
99 val |= BIT(d->hwirq);
100 writel_relaxed(val, data->base + EIEDG);
101
102 writel_relaxed(BIT(d->hwirq), data->base + EIREQCLR);
103
104 return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
105}
106
107static struct irq_chip exiu_irq_chip = {
108 .name = "EXIU",
109 .irq_eoi = exiu_irq_eoi,
110 .irq_enable = exiu_irq_enable,
111 .irq_mask = exiu_irq_mask,
112 .irq_unmask = exiu_irq_unmask,
113 .irq_set_type = exiu_irq_set_type,
114 .irq_set_affinity = irq_chip_set_affinity_parent,
115 .flags = IRQCHIP_SET_TYPE_MASKED |
116 IRQCHIP_SKIP_SET_WAKE |
117 IRQCHIP_EOI_THREADED |
118 IRQCHIP_MASK_ON_SUSPEND,
119};
120
121static int exiu_domain_translate(struct irq_domain *domain,
122 struct irq_fwspec *fwspec,
123 unsigned long *hwirq,
124 unsigned int *type)
125{
126 struct exiu_irq_data *info = domain->host_data;
127
128 if (is_of_node(fwspec->fwnode)) {
129 if (fwspec->param_count != 3)
130 return -EINVAL;
131
132 if (fwspec->param[0] != GIC_SPI)
133 return -EINVAL; /* No PPI should point to this domain */
134
135 *hwirq = fwspec->param[1] - info->spi_base;
136 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
137 return 0;
138 }
139 return -EINVAL;
140}
141
142static int exiu_domain_alloc(struct irq_domain *dom, unsigned int virq,
143 unsigned int nr_irqs, void *data)
144{
145 struct irq_fwspec *fwspec = data;
146 struct irq_fwspec parent_fwspec;
147 struct exiu_irq_data *info = dom->host_data;
148 irq_hw_number_t hwirq;
149
150 if (fwspec->param_count != 3)
151 return -EINVAL; /* Not GIC compliant */
152 if (fwspec->param[0] != GIC_SPI)
153 return -EINVAL; /* No PPI should point to this domain */
154
155 WARN_ON(nr_irqs != 1);
156 hwirq = fwspec->param[1] - info->spi_base;
157 irq_domain_set_hwirq_and_chip(dom, virq, hwirq, &exiu_irq_chip, info);
158
159 parent_fwspec = *fwspec;
160 parent_fwspec.fwnode = dom->parent->fwnode;
161 return irq_domain_alloc_irqs_parent(dom, virq, nr_irqs, &parent_fwspec);
162}
163
164static const struct irq_domain_ops exiu_domain_ops = {
165 .translate = exiu_domain_translate,
166 .alloc = exiu_domain_alloc,
167 .free = irq_domain_free_irqs_common,
168};
169
170static int __init exiu_init(struct device_node *node,
171 struct device_node *parent)
172{
173 struct irq_domain *parent_domain, *domain;
174 struct exiu_irq_data *data;
175 int err;
176
177 if (!parent) {
178 pr_err("%pOF: no parent, giving up\n", node);
179 return -ENODEV;
180 }
181
182 parent_domain = irq_find_host(parent);
183 if (!parent_domain) {
184 pr_err("%pOF: unable to obtain parent domain\n", node);
185 return -ENXIO;
186 }
187
188 data = kzalloc(sizeof(*data), GFP_KERNEL);
189 if (!data)
190 return -ENOMEM;
191
192 if (of_property_read_u32(node, "socionext,spi-base", &data->spi_base)) {
193 pr_err("%pOF: failed to parse 'spi-base' property\n", node);
194 err = -ENODEV;
195 goto out_free;
196 }
197
198 data->base = of_iomap(node, 0);
199 if (IS_ERR(data->base)) {
200 err = PTR_ERR(data->base);
201 goto out_free;
202 }
203
204 /* clear and mask all interrupts */
205 writel_relaxed(0xFFFFFFFF, data->base + EIREQCLR);
206 writel_relaxed(0xFFFFFFFF, data->base + EIMASK);
207
208 domain = irq_domain_add_hierarchy(parent_domain, 0, NUM_IRQS, node,
209 &exiu_domain_ops, data);
210 if (!domain) {
211 pr_err("%pOF: failed to allocate domain\n", node);
212 err = -ENOMEM;
213 goto out_unmap;
214 }
215
216 pr_info("%pOF: %d interrupts forwarded to %pOF\n", node, NUM_IRQS,
217 parent);
218
219 return 0;
220
221out_unmap:
222 iounmap(data->base);
223out_free:
224 kfree(data);
225 return err;
226}
227IRQCHIP_DECLARE(exiu, "socionext,synquacer-exiu", exiu_init);
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 45363ff8d06f..31ab0dee2ce7 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -14,27 +14,99 @@
14#include <linux/of_address.h> 14#include <linux/of_address.h>
15#include <linux/of_irq.h> 15#include <linux/of_irq.h>
16 16
17#define EXTI_IMR 0x0 17#define IRQS_PER_BANK 32
18#define EXTI_EMR 0x4 18
19#define EXTI_RTSR 0x8 19struct stm32_exti_bank {
20#define EXTI_FTSR 0xc 20 u32 imr_ofst;
21#define EXTI_SWIER 0x10 21 u32 emr_ofst;
22#define EXTI_PR 0x14 22 u32 rtsr_ofst;
23 u32 ftsr_ofst;
24 u32 swier_ofst;
25 u32 pr_ofst;
26};
27
28static const struct stm32_exti_bank stm32f4xx_exti_b1 = {
29 .imr_ofst = 0x00,
30 .emr_ofst = 0x04,
31 .rtsr_ofst = 0x08,
32 .ftsr_ofst = 0x0C,
33 .swier_ofst = 0x10,
34 .pr_ofst = 0x14,
35};
36
37static const struct stm32_exti_bank *stm32f4xx_exti_banks[] = {
38 &stm32f4xx_exti_b1,
39};
40
41static const struct stm32_exti_bank stm32h7xx_exti_b1 = {
42 .imr_ofst = 0x80,
43 .emr_ofst = 0x84,
44 .rtsr_ofst = 0x00,
45 .ftsr_ofst = 0x04,
46 .swier_ofst = 0x08,
47 .pr_ofst = 0x88,
48};
49
50static const struct stm32_exti_bank stm32h7xx_exti_b2 = {
51 .imr_ofst = 0x90,
52 .emr_ofst = 0x94,
53 .rtsr_ofst = 0x20,
54 .ftsr_ofst = 0x24,
55 .swier_ofst = 0x28,
56 .pr_ofst = 0x98,
57};
58
59static const struct stm32_exti_bank stm32h7xx_exti_b3 = {
60 .imr_ofst = 0xA0,
61 .emr_ofst = 0xA4,
62 .rtsr_ofst = 0x40,
63 .ftsr_ofst = 0x44,
64 .swier_ofst = 0x48,
65 .pr_ofst = 0xA8,
66};
67
68static const struct stm32_exti_bank *stm32h7xx_exti_banks[] = {
69 &stm32h7xx_exti_b1,
70 &stm32h7xx_exti_b2,
71 &stm32h7xx_exti_b3,
72};
73
74static unsigned long stm32_exti_pending(struct irq_chip_generic *gc)
75{
76 const struct stm32_exti_bank *stm32_bank = gc->private;
77
78 return irq_reg_readl(gc, stm32_bank->pr_ofst);
79}
80
81static void stm32_exti_irq_ack(struct irq_chip_generic *gc, u32 mask)
82{
83 const struct stm32_exti_bank *stm32_bank = gc->private;
84
85 irq_reg_writel(gc, mask, stm32_bank->pr_ofst);
86}
23 87
24static void stm32_irq_handler(struct irq_desc *desc) 88static void stm32_irq_handler(struct irq_desc *desc)
25{ 89{
26 struct irq_domain *domain = irq_desc_get_handler_data(desc); 90 struct irq_domain *domain = irq_desc_get_handler_data(desc);
27 struct irq_chip_generic *gc = domain->gc->gc[0];
28 struct irq_chip *chip = irq_desc_get_chip(desc); 91 struct irq_chip *chip = irq_desc_get_chip(desc);
92 unsigned int virq, nbanks = domain->gc->num_chips;
93 struct irq_chip_generic *gc;
94 const struct stm32_exti_bank *stm32_bank;
29 unsigned long pending; 95 unsigned long pending;
30 int n; 96 int n, i, irq_base = 0;
31 97
32 chained_irq_enter(chip, desc); 98 chained_irq_enter(chip, desc);
33 99
34 while ((pending = irq_reg_readl(gc, EXTI_PR))) { 100 for (i = 0; i < nbanks; i++, irq_base += IRQS_PER_BANK) {
35 for_each_set_bit(n, &pending, BITS_PER_LONG) { 101 gc = irq_get_domain_generic_chip(domain, irq_base);
36 generic_handle_irq(irq_find_mapping(domain, n)); 102 stm32_bank = gc->private;
37 irq_reg_writel(gc, BIT(n), EXTI_PR); 103
104 while ((pending = stm32_exti_pending(gc))) {
105 for_each_set_bit(n, &pending, IRQS_PER_BANK) {
106 virq = irq_find_mapping(domain, irq_base + n);
107 generic_handle_irq(virq);
108 stm32_exti_irq_ack(gc, BIT(n));
109 }
38 } 110 }
39 } 111 }
40 112
@@ -44,13 +116,14 @@ static void stm32_irq_handler(struct irq_desc *desc)
44static int stm32_irq_set_type(struct irq_data *data, unsigned int type) 116static int stm32_irq_set_type(struct irq_data *data, unsigned int type)
45{ 117{
46 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); 118 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
47 int pin = data->hwirq; 119 const struct stm32_exti_bank *stm32_bank = gc->private;
120 int pin = data->hwirq % IRQS_PER_BANK;
48 u32 rtsr, ftsr; 121 u32 rtsr, ftsr;
49 122
50 irq_gc_lock(gc); 123 irq_gc_lock(gc);
51 124
52 rtsr = irq_reg_readl(gc, EXTI_RTSR); 125 rtsr = irq_reg_readl(gc, stm32_bank->rtsr_ofst);
53 ftsr = irq_reg_readl(gc, EXTI_FTSR); 126 ftsr = irq_reg_readl(gc, stm32_bank->ftsr_ofst);
54 127
55 switch (type) { 128 switch (type) {
56 case IRQ_TYPE_EDGE_RISING: 129 case IRQ_TYPE_EDGE_RISING:
@@ -70,8 +143,8 @@ static int stm32_irq_set_type(struct irq_data *data, unsigned int type)
70 return -EINVAL; 143 return -EINVAL;
71 } 144 }
72 145
73 irq_reg_writel(gc, rtsr, EXTI_RTSR); 146 irq_reg_writel(gc, rtsr, stm32_bank->rtsr_ofst);
74 irq_reg_writel(gc, ftsr, EXTI_FTSR); 147 irq_reg_writel(gc, ftsr, stm32_bank->ftsr_ofst);
75 148
76 irq_gc_unlock(gc); 149 irq_gc_unlock(gc);
77 150
@@ -81,17 +154,18 @@ static int stm32_irq_set_type(struct irq_data *data, unsigned int type)
81static int stm32_irq_set_wake(struct irq_data *data, unsigned int on) 154static int stm32_irq_set_wake(struct irq_data *data, unsigned int on)
82{ 155{
83 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); 156 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
84 int pin = data->hwirq; 157 const struct stm32_exti_bank *stm32_bank = gc->private;
85 u32 emr; 158 int pin = data->hwirq % IRQS_PER_BANK;
159 u32 imr;
86 160
87 irq_gc_lock(gc); 161 irq_gc_lock(gc);
88 162
89 emr = irq_reg_readl(gc, EXTI_EMR); 163 imr = irq_reg_readl(gc, stm32_bank->imr_ofst);
90 if (on) 164 if (on)
91 emr |= BIT(pin); 165 imr |= BIT(pin);
92 else 166 else
93 emr &= ~BIT(pin); 167 imr &= ~BIT(pin);
94 irq_reg_writel(gc, emr, EXTI_EMR); 168 irq_reg_writel(gc, imr, stm32_bank->imr_ofst);
95 169
96 irq_gc_unlock(gc); 170 irq_gc_unlock(gc);
97 171
@@ -101,11 +175,12 @@ static int stm32_irq_set_wake(struct irq_data *data, unsigned int on)
101static int stm32_exti_alloc(struct irq_domain *d, unsigned int virq, 175static int stm32_exti_alloc(struct irq_domain *d, unsigned int virq,
102 unsigned int nr_irqs, void *data) 176 unsigned int nr_irqs, void *data)
103{ 177{
104 struct irq_chip_generic *gc = d->gc->gc[0]; 178 struct irq_chip_generic *gc;
105 struct irq_fwspec *fwspec = data; 179 struct irq_fwspec *fwspec = data;
106 irq_hw_number_t hwirq; 180 irq_hw_number_t hwirq;
107 181
108 hwirq = fwspec->param[0]; 182 hwirq = fwspec->param[0];
183 gc = irq_get_domain_generic_chip(d, hwirq);
109 184
110 irq_map_generic_chip(d, virq, hwirq); 185 irq_map_generic_chip(d, virq, hwirq);
111 irq_domain_set_info(d, virq, hwirq, &gc->chip_types->chip, gc, 186 irq_domain_set_info(d, virq, hwirq, &gc->chip_types->chip, gc,
@@ -129,8 +204,9 @@ struct irq_domain_ops irq_exti_domain_ops = {
129 .free = stm32_exti_free, 204 .free = stm32_exti_free,
130}; 205};
131 206
132static int __init stm32_exti_init(struct device_node *node, 207static int
133 struct device_node *parent) 208__init stm32_exti_init(const struct stm32_exti_bank **stm32_exti_banks,
209 int bank_nr, struct device_node *node)
134{ 210{
135 unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; 211 unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
136 int nr_irqs, nr_exti, ret, i; 212 int nr_irqs, nr_exti, ret, i;
@@ -144,23 +220,16 @@ static int __init stm32_exti_init(struct device_node *node,
144 return -ENOMEM; 220 return -ENOMEM;
145 } 221 }
146 222
147 /* Determine number of irqs supported */ 223 domain = irq_domain_add_linear(node, bank_nr * IRQS_PER_BANK,
148 writel_relaxed(~0UL, base + EXTI_RTSR);
149 nr_exti = fls(readl_relaxed(base + EXTI_RTSR));
150 writel_relaxed(0, base + EXTI_RTSR);
151
152 pr_info("%pOF: %d External IRQs detected\n", node, nr_exti);
153
154 domain = irq_domain_add_linear(node, nr_exti,
155 &irq_exti_domain_ops, NULL); 224 &irq_exti_domain_ops, NULL);
156 if (!domain) { 225 if (!domain) {
157 pr_err("%s: Could not register interrupt domain.\n", 226 pr_err("%s: Could not register interrupt domain.\n",
158 node->name); 227 node->name);
159 ret = -ENOMEM; 228 ret = -ENOMEM;
160 goto out_unmap; 229 goto out_unmap;
161 } 230 }
162 231
163 ret = irq_alloc_domain_generic_chips(domain, nr_exti, 1, "exti", 232 ret = irq_alloc_domain_generic_chips(domain, IRQS_PER_BANK, 1, "exti",
164 handle_edge_irq, clr, 0, 0); 233 handle_edge_irq, clr, 0, 0);
165 if (ret) { 234 if (ret) {
166 pr_err("%pOF: Could not allocate generic interrupt chip.\n", 235 pr_err("%pOF: Could not allocate generic interrupt chip.\n",
@@ -168,18 +237,41 @@ static int __init stm32_exti_init(struct device_node *node,
168 goto out_free_domain; 237 goto out_free_domain;
169 } 238 }
170 239
171 gc = domain->gc->gc[0]; 240 for (i = 0; i < bank_nr; i++) {
172 gc->reg_base = base; 241 const struct stm32_exti_bank *stm32_bank = stm32_exti_banks[i];
173 gc->chip_types->type = IRQ_TYPE_EDGE_BOTH; 242 u32 irqs_mask;
174 gc->chip_types->chip.name = gc->chip_types[0].chip.name; 243
175 gc->chip_types->chip.irq_ack = irq_gc_ack_set_bit; 244 gc = irq_get_domain_generic_chip(domain, i * IRQS_PER_BANK);
176 gc->chip_types->chip.irq_mask = irq_gc_mask_clr_bit; 245
177 gc->chip_types->chip.irq_unmask = irq_gc_mask_set_bit; 246 gc->reg_base = base;
178 gc->chip_types->chip.irq_set_type = stm32_irq_set_type; 247 gc->chip_types->type = IRQ_TYPE_EDGE_BOTH;
179 gc->chip_types->chip.irq_set_wake = stm32_irq_set_wake; 248 gc->chip_types->chip.irq_ack = irq_gc_ack_set_bit;
180 gc->chip_types->regs.ack = EXTI_PR; 249 gc->chip_types->chip.irq_mask = irq_gc_mask_clr_bit;
181 gc->chip_types->regs.mask = EXTI_IMR; 250 gc->chip_types->chip.irq_unmask = irq_gc_mask_set_bit;
182 gc->chip_types->handler = handle_edge_irq; 251 gc->chip_types->chip.irq_set_type = stm32_irq_set_type;
252 gc->chip_types->chip.irq_set_wake = stm32_irq_set_wake;
253 gc->chip_types->regs.ack = stm32_bank->pr_ofst;
254 gc->chip_types->regs.mask = stm32_bank->imr_ofst;
255 gc->private = (void *)stm32_bank;
256
257 /* Determine number of irqs supported */
258 writel_relaxed(~0UL, base + stm32_bank->rtsr_ofst);
259 irqs_mask = readl_relaxed(base + stm32_bank->rtsr_ofst);
260 nr_exti = fls(readl_relaxed(base + stm32_bank->rtsr_ofst));
261
262 /*
263 * This IP has no reset, so after hot reboot we should
264 * clear registers to avoid residue
265 */
266 writel_relaxed(0, base + stm32_bank->imr_ofst);
267 writel_relaxed(0, base + stm32_bank->emr_ofst);
268 writel_relaxed(0, base + stm32_bank->rtsr_ofst);
269 writel_relaxed(0, base + stm32_bank->ftsr_ofst);
270 writel_relaxed(~0UL, base + stm32_bank->pr_ofst);
271
272 pr_info("%s: bank%d, External IRQs available:%#x\n",
273 node->full_name, i, irqs_mask);
274 }
183 275
184 nr_irqs = of_irq_count(node); 276 nr_irqs = of_irq_count(node);
185 for (i = 0; i < nr_irqs; i++) { 277 for (i = 0; i < nr_irqs; i++) {
@@ -198,4 +290,20 @@ out_unmap:
198 return ret; 290 return ret;
199} 291}
200 292
201IRQCHIP_DECLARE(stm32_exti, "st,stm32-exti", stm32_exti_init); 293static int __init stm32f4_exti_of_init(struct device_node *np,
294 struct device_node *parent)
295{
296 return stm32_exti_init(stm32f4xx_exti_banks,
297 ARRAY_SIZE(stm32f4xx_exti_banks), np);
298}
299
300IRQCHIP_DECLARE(stm32f4_exti, "st,stm32-exti", stm32f4_exti_of_init);
301
302static int __init stm32h7_exti_of_init(struct device_node *np,
303 struct device_node *parent)
304{
305 return stm32_exti_init(stm32h7xx_exti_banks,
306 ARRAY_SIZE(stm32h7xx_exti_banks), np);
307}
308
309IRQCHIP_DECLARE(stm32h7_exti, "st,stm32h7-exti", stm32h7_exti_of_init);
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 50299ad96659..02b66588cac6 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -289,13 +289,14 @@ static int stm32_gpio_domain_translate(struct irq_domain *d,
289 return 0; 289 return 0;
290} 290}
291 291
292static void stm32_gpio_domain_activate(struct irq_domain *d, 292static int stm32_gpio_domain_activate(struct irq_domain *d,
293 struct irq_data *irq_data) 293 struct irq_data *irq_data, bool early)
294{ 294{
295 struct stm32_gpio_bank *bank = d->host_data; 295 struct stm32_gpio_bank *bank = d->host_data;
296 struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent); 296 struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
297 297
298 regmap_field_write(pctl->irqmux[irq_data->hwirq], bank->bank_nr); 298 regmap_field_write(pctl->irqmux[irq_data->hwirq], bank->bank_nr);
299 return 0;
299} 300}
300 301
301static int stm32_gpio_domain_alloc(struct irq_domain *d, 302static int stm32_gpio_domain_alloc(struct irq_domain *d,
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 2477a5cb5bd5..ec32c4c5eb30 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -99,6 +99,7 @@ enum cpuhp_state {
99 CPUHP_AP_IRQ_HIP04_STARTING, 99 CPUHP_AP_IRQ_HIP04_STARTING,
100 CPUHP_AP_IRQ_ARMADA_XP_STARTING, 100 CPUHP_AP_IRQ_ARMADA_XP_STARTING,
101 CPUHP_AP_IRQ_BCM2836_STARTING, 101 CPUHP_AP_IRQ_BCM2836_STARTING,
102 CPUHP_AP_IRQ_MIPS_GIC_STARTING,
102 CPUHP_AP_ARM_MVEBU_COHERENCY, 103 CPUHP_AP_ARM_MVEBU_COHERENCY,
103 CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, 104 CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
104 CPUHP_AP_PERF_X86_STARTING, 105 CPUHP_AP_PERF_X86_STARTING,
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 4536286cc4d2..b01d06db9101 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -1114,6 +1114,28 @@ static inline u32 irq_reg_readl(struct irq_chip_generic *gc,
1114 return readl(gc->reg_base + reg_offset); 1114 return readl(gc->reg_base + reg_offset);
1115} 1115}
1116 1116
1117struct irq_matrix;
1118struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits,
1119 unsigned int alloc_start,
1120 unsigned int alloc_end);
1121void irq_matrix_online(struct irq_matrix *m);
1122void irq_matrix_offline(struct irq_matrix *m);
1123void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace);
1124int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk);
1125void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk);
1126int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu);
1127void irq_matrix_reserve(struct irq_matrix *m);
1128void irq_matrix_remove_reserved(struct irq_matrix *m);
1129int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
1130 bool reserved, unsigned int *mapped_cpu);
1131void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
1132 unsigned int bit, bool managed);
1133void irq_matrix_assign(struct irq_matrix *m, unsigned int bit);
1134unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown);
1135unsigned int irq_matrix_allocated(struct irq_matrix *m);
1136unsigned int irq_matrix_reserved(struct irq_matrix *m);
1137void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind);
1138
1117/* Contrary to Linux irqs, for hardware irqs the irq number 0 is valid */ 1139/* Contrary to Linux irqs, for hardware irqs the irq number 0 is valid */
1118#define INVALID_HWIRQ (~0UL) 1140#define INVALID_HWIRQ (~0UL)
1119irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu); 1141irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu);
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 14b74f22d43c..c00c4c33e432 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -68,6 +68,7 @@
68#define GICD_CTLR_ENABLE_SS_G1 (1U << 1) 68#define GICD_CTLR_ENABLE_SS_G1 (1U << 1)
69#define GICD_CTLR_ENABLE_SS_G0 (1U << 0) 69#define GICD_CTLR_ENABLE_SS_G0 (1U << 0)
70 70
71#define GICD_TYPER_RSS (1U << 26)
71#define GICD_TYPER_LPIS (1U << 17) 72#define GICD_TYPER_LPIS (1U << 17)
72#define GICD_TYPER_MBIS (1U << 16) 73#define GICD_TYPER_MBIS (1U << 16)
73 74
@@ -461,6 +462,7 @@
461#define ICC_CTLR_EL1_SEIS_MASK (0x1 << ICC_CTLR_EL1_SEIS_SHIFT) 462#define ICC_CTLR_EL1_SEIS_MASK (0x1 << ICC_CTLR_EL1_SEIS_SHIFT)
462#define ICC_CTLR_EL1_A3V_SHIFT 15 463#define ICC_CTLR_EL1_A3V_SHIFT 15
463#define ICC_CTLR_EL1_A3V_MASK (0x1 << ICC_CTLR_EL1_A3V_SHIFT) 464#define ICC_CTLR_EL1_A3V_MASK (0x1 << ICC_CTLR_EL1_A3V_SHIFT)
465#define ICC_CTLR_EL1_RSS (0x1 << 18)
464#define ICC_PMR_EL1_SHIFT 0 466#define ICC_PMR_EL1_SHIFT 0
465#define ICC_PMR_EL1_MASK (0xff << ICC_PMR_EL1_SHIFT) 467#define ICC_PMR_EL1_MASK (0xff << ICC_PMR_EL1_SHIFT)
466#define ICC_BPR0_EL1_SHIFT 0 468#define ICC_BPR0_EL1_SHIFT 0
@@ -549,6 +551,8 @@
549#define ICC_SGI1R_AFFINITY_2_SHIFT 32 551#define ICC_SGI1R_AFFINITY_2_SHIFT 32
550#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT) 552#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT)
551#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40 553#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40
554#define ICC_SGI1R_RS_SHIFT 44
555#define ICC_SGI1R_RS_MASK (0xfULL << ICC_SGI1R_RS_SHIFT)
552#define ICC_SGI1R_AFFINITY_3_SHIFT 48 556#define ICC_SGI1R_AFFINITY_3_SHIFT 48
553#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT) 557#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT)
554 558
diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h
index 58a4d89aa82c..447da8ca2156 100644
--- a/include/linux/irqchip/arm-gic-v4.h
+++ b/include/linux/irqchip/arm-gic-v4.h
@@ -20,6 +20,12 @@
20 20
21struct its_vpe; 21struct its_vpe;
22 22
23/*
24 * Maximum number of ITTs when GITS_TYPER.VMOVP == 0, using the
25 * ITSList mechanism to perform inter-ITS synchronization.
26 */
27#define GICv4_ITS_LIST_MAX 16
28
23/* Embedded in kvm.arch */ 29/* Embedded in kvm.arch */
24struct its_vm { 30struct its_vm {
25 struct fwnode_handle *fwnode; 31 struct fwnode_handle *fwnode;
@@ -30,6 +36,7 @@ struct its_vm {
30 irq_hw_number_t db_lpi_base; 36 irq_hw_number_t db_lpi_base;
31 unsigned long *db_bitmap; 37 unsigned long *db_bitmap;
32 int nr_db_lpis; 38 int nr_db_lpis;
39 u32 vlpi_count[GICv4_ITS_LIST_MAX];
33}; 40};
34 41
35/* Embedded in kvm_vcpu.arch */ 42/* Embedded in kvm_vcpu.arch */
@@ -64,12 +71,14 @@ struct its_vpe {
64 * @vm: Pointer to the GICv4 notion of a VM 71 * @vm: Pointer to the GICv4 notion of a VM
65 * @vpe: Pointer to the GICv4 notion of a virtual CPU (VPE) 72 * @vpe: Pointer to the GICv4 notion of a virtual CPU (VPE)
66 * @vintid: Virtual LPI number 73 * @vintid: Virtual LPI number
74 * @properties: Priority and enable bits (as written in the prop table)
67 * @db_enabled: Is the VPE doorbell to be generated? 75 * @db_enabled: Is the VPE doorbell to be generated?
68 */ 76 */
69struct its_vlpi_map { 77struct its_vlpi_map {
70 struct its_vm *vm; 78 struct its_vm *vm;
71 struct its_vpe *vpe; 79 struct its_vpe *vpe;
72 u32 vintid; 80 u32 vintid;
81 u8 properties;
73 bool db_enabled; 82 bool db_enabled;
74}; 83};
75 84
diff --git a/include/linux/irqchip/irq-omap-intc.h b/include/linux/irqchip/irq-omap-intc.h
index 2e3d1afeb674..f19ccee7749f 100644
--- a/include/linux/irqchip/irq-omap-intc.h
+++ b/include/linux/irqchip/irq-omap-intc.h
@@ -18,8 +18,6 @@
18#ifndef __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H 18#ifndef __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H
19#define __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H 19#define __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H
20 20
21void omap3_init_irq(void);
22
23int omap_irq_pending(void); 21int omap_irq_pending(void);
24void omap_intc_save_context(void); 22void omap_intc_save_context(void);
25void omap_intc_restore_context(void); 23void omap_intc_restore_context(void);
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index b6084898d330..60e3100b0809 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -94,6 +94,7 @@ struct irq_desc {
94#endif 94#endif
95#ifdef CONFIG_GENERIC_IRQ_DEBUGFS 95#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
96 struct dentry *debugfs_file; 96 struct dentry *debugfs_file;
97 const char *dev_name;
97#endif 98#endif
98#ifdef CONFIG_SPARSE_IRQ 99#ifdef CONFIG_SPARSE_IRQ
99 struct rcu_head rcu; 100 struct rcu_head rcu;
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index b1037dfc47e4..a34355d19546 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -33,6 +33,7 @@
33#include <linux/types.h> 33#include <linux/types.h>
34#include <linux/irqhandler.h> 34#include <linux/irqhandler.h>
35#include <linux/of.h> 35#include <linux/of.h>
36#include <linux/mutex.h>
36#include <linux/radix-tree.h> 37#include <linux/radix-tree.h>
37 38
38struct device_node; 39struct device_node;
@@ -41,6 +42,7 @@ struct of_device_id;
41struct irq_chip; 42struct irq_chip;
42struct irq_data; 43struct irq_data;
43struct cpumask; 44struct cpumask;
45struct seq_file;
44 46
45/* Number of irqs reserved for a legacy isa controller */ 47/* Number of irqs reserved for a legacy isa controller */
46#define NUM_ISA_INTERRUPTS 16 48#define NUM_ISA_INTERRUPTS 16
@@ -105,18 +107,21 @@ struct irq_domain_ops {
105 int (*xlate)(struct irq_domain *d, struct device_node *node, 107 int (*xlate)(struct irq_domain *d, struct device_node *node,
106 const u32 *intspec, unsigned int intsize, 108 const u32 *intspec, unsigned int intsize,
107 unsigned long *out_hwirq, unsigned int *out_type); 109 unsigned long *out_hwirq, unsigned int *out_type);
108
109#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 110#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
110 /* extended V2 interfaces to support hierarchy irq_domains */ 111 /* extended V2 interfaces to support hierarchy irq_domains */
111 int (*alloc)(struct irq_domain *d, unsigned int virq, 112 int (*alloc)(struct irq_domain *d, unsigned int virq,
112 unsigned int nr_irqs, void *arg); 113 unsigned int nr_irqs, void *arg);
113 void (*free)(struct irq_domain *d, unsigned int virq, 114 void (*free)(struct irq_domain *d, unsigned int virq,
114 unsigned int nr_irqs); 115 unsigned int nr_irqs);
115 void (*activate)(struct irq_domain *d, struct irq_data *irq_data); 116 int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool early);
116 void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data); 117 void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
117 int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec, 118 int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec,
118 unsigned long *out_hwirq, unsigned int *out_type); 119 unsigned long *out_hwirq, unsigned int *out_type);
119#endif 120#endif
121#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
122 void (*debug_show)(struct seq_file *m, struct irq_domain *d,
123 struct irq_data *irqd, int ind);
124#endif
120}; 125};
121 126
122extern struct irq_domain_ops irq_generic_chip_ops; 127extern struct irq_domain_ops irq_generic_chip_ops;
@@ -134,8 +139,8 @@ struct irq_domain_chip_generic;
134 * @mapcount: The number of mapped interrupts 139 * @mapcount: The number of mapped interrupts
135 * 140 *
136 * Optional elements 141 * Optional elements
137 * @of_node: Pointer to device tree nodes associated with the irq_domain. Used 142 * @fwnode: Pointer to firmware node associated with the irq_domain. Pretty easy
138 * when decoding device tree interrupt specifiers. 143 * to swap it for the of_node via the irq_domain_get_of_node accessor
139 * @gc: Pointer to a list of generic chips. There is a helper function for 144 * @gc: Pointer to a list of generic chips. There is a helper function for
140 * setting up one or more generic chips for interrupt controllers 145 * setting up one or more generic chips for interrupt controllers
141 * drivers using the generic chip library which uses this pointer. 146 * drivers using the generic chip library which uses this pointer.
@@ -173,6 +178,7 @@ struct irq_domain {
173 unsigned int revmap_direct_max_irq; 178 unsigned int revmap_direct_max_irq;
174 unsigned int revmap_size; 179 unsigned int revmap_size;
175 struct radix_tree_root revmap_tree; 180 struct radix_tree_root revmap_tree;
181 struct mutex revmap_tree_mutex;
176 unsigned int linear_revmap[]; 182 unsigned int linear_revmap[];
177}; 183};
178 184
@@ -438,7 +444,7 @@ extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
438 unsigned int nr_irqs, int node, void *arg, 444 unsigned int nr_irqs, int node, void *arg,
439 bool realloc, const struct cpumask *affinity); 445 bool realloc, const struct cpumask *affinity);
440extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs); 446extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs);
441extern void irq_domain_activate_irq(struct irq_data *irq_data); 447extern int irq_domain_activate_irq(struct irq_data *irq_data, bool early);
442extern void irq_domain_deactivate_irq(struct irq_data *irq_data); 448extern void irq_domain_deactivate_irq(struct irq_data *irq_data);
443 449
444static inline int irq_domain_alloc_irqs(struct irq_domain *domain, 450static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
@@ -508,8 +514,6 @@ static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
508extern bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain); 514extern bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain);
509 515
510#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ 516#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
511static inline void irq_domain_activate_irq(struct irq_data *data) { }
512static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
513static inline int irq_domain_alloc_irqs(struct irq_domain *domain, 517static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
514 unsigned int nr_irqs, int node, void *arg) 518 unsigned int nr_irqs, int node, void *arg)
515{ 519{
@@ -558,8 +562,6 @@ irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
558 562
559#else /* CONFIG_IRQ_DOMAIN */ 563#else /* CONFIG_IRQ_DOMAIN */
560static inline void irq_dispose_mapping(unsigned int virq) { } 564static inline void irq_dispose_mapping(unsigned int virq) { }
561static inline void irq_domain_activate_irq(struct irq_data *data) { }
562static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
563static inline struct irq_domain *irq_find_matching_fwnode( 565static inline struct irq_domain *irq_find_matching_fwnode(
564 struct fwnode_handle *fwnode, enum irq_domain_bus_token bus_token) 566 struct fwnode_handle *fwnode, enum irq_domain_bus_token bus_token)
565{ 567{
diff --git a/include/linux/msi.h b/include/linux/msi.h
index cdd069cf9ed8..1f1bbb5b4679 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -284,6 +284,11 @@ enum {
284 MSI_FLAG_PCI_MSIX = (1 << 3), 284 MSI_FLAG_PCI_MSIX = (1 << 3),
285 /* Needs early activate, required for PCI */ 285 /* Needs early activate, required for PCI */
286 MSI_FLAG_ACTIVATE_EARLY = (1 << 4), 286 MSI_FLAG_ACTIVATE_EARLY = (1 << 4),
287 /*
288 * Must reactivate when irq is started even when
289 * MSI_FLAG_ACTIVATE_EARLY has been set.
290 */
291 MSI_FLAG_MUST_REACTIVATE = (1 << 5),
287}; 292};
288 293
289int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, 294int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
diff --git a/include/trace/events/irq_matrix.h b/include/trace/events/irq_matrix.h
new file mode 100644
index 000000000000..267d4cbbf360
--- /dev/null
+++ b/include/trace/events/irq_matrix.h
@@ -0,0 +1,201 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM irq_matrix
3
4#if !defined(_TRACE_IRQ_MATRIX_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_IRQ_MATRIX_H
6
7#include <linux/tracepoint.h>
8
9struct irq_matrix;
10struct cpumap;
11
12DECLARE_EVENT_CLASS(irq_matrix_global,
13
14 TP_PROTO(struct irq_matrix *matrix),
15
16 TP_ARGS(matrix),
17
18 TP_STRUCT__entry(
19 __field( unsigned int, online_maps )
20 __field( unsigned int, global_available )
21 __field( unsigned int, global_reserved )
22 __field( unsigned int, total_allocated )
23 ),
24
25 TP_fast_assign(
26 __entry->online_maps = matrix->online_maps;
27 __entry->global_available = matrix->global_available;
28 __entry->global_reserved = matrix->global_reserved;
29 __entry->total_allocated = matrix->total_allocated;
30 ),
31
32 TP_printk("online_maps=%d global_avl=%u, global_rsvd=%u, total_alloc=%u",
33 __entry->online_maps, __entry->global_available,
34 __entry->global_reserved, __entry->total_allocated)
35);
36
37DECLARE_EVENT_CLASS(irq_matrix_global_update,
38
39 TP_PROTO(int bit, struct irq_matrix *matrix),
40
41 TP_ARGS(bit, matrix),
42
43 TP_STRUCT__entry(
44 __field( int, bit )
45 __field( unsigned int, online_maps )
46 __field( unsigned int, global_available )
47 __field( unsigned int, global_reserved )
48 __field( unsigned int, total_allocated )
49 ),
50
51 TP_fast_assign(
52 __entry->bit = bit;
53 __entry->online_maps = matrix->online_maps;
54 __entry->global_available = matrix->global_available;
55 __entry->global_reserved = matrix->global_reserved;
56 __entry->total_allocated = matrix->total_allocated;
57 ),
58
59 TP_printk("bit=%d online_maps=%d global_avl=%u, global_rsvd=%u, total_alloc=%u",
60 __entry->bit, __entry->online_maps,
61 __entry->global_available, __entry->global_reserved,
62 __entry->total_allocated)
63);
64
65DECLARE_EVENT_CLASS(irq_matrix_cpu,
66
67 TP_PROTO(int bit, unsigned int cpu, struct irq_matrix *matrix,
68 struct cpumap *cmap),
69
70 TP_ARGS(bit, cpu, matrix, cmap),
71
72 TP_STRUCT__entry(
73 __field( int, bit )
74 __field( unsigned int, cpu )
75 __field( bool, online )
76 __field( unsigned int, available )
77 __field( unsigned int, allocated )
78 __field( unsigned int, managed )
79 __field( unsigned int, online_maps )
80 __field( unsigned int, global_available )
81 __field( unsigned int, global_reserved )
82 __field( unsigned int, total_allocated )
83 ),
84
85 TP_fast_assign(
86 __entry->bit = bit;
87 __entry->cpu = cpu;
88 __entry->online = cmap->online;
89 __entry->available = cmap->available;
90 __entry->allocated = cmap->allocated;
91 __entry->managed = cmap->managed;
92 __entry->online_maps = matrix->online_maps;
93 __entry->global_available = matrix->global_available;
94 __entry->global_reserved = matrix->global_reserved;
95 __entry->total_allocated = matrix->total_allocated;
96 ),
97
98 TP_printk("bit=%d cpu=%u online=%d avl=%u alloc=%u managed=%u online_maps=%u global_avl=%u, global_rsvd=%u, total_alloc=%u",
99 __entry->bit, __entry->cpu, __entry->online,
100 __entry->available, __entry->allocated,
101 __entry->managed, __entry->online_maps,
102 __entry->global_available, __entry->global_reserved,
103 __entry->total_allocated)
104);
105
106DEFINE_EVENT(irq_matrix_global, irq_matrix_online,
107
108 TP_PROTO(struct irq_matrix *matrix),
109
110 TP_ARGS(matrix)
111);
112
113DEFINE_EVENT(irq_matrix_global, irq_matrix_offline,
114
115 TP_PROTO(struct irq_matrix *matrix),
116
117 TP_ARGS(matrix)
118);
119
120DEFINE_EVENT(irq_matrix_global, irq_matrix_reserve,
121
122 TP_PROTO(struct irq_matrix *matrix),
123
124 TP_ARGS(matrix)
125);
126
127DEFINE_EVENT(irq_matrix_global, irq_matrix_remove_reserved,
128
129 TP_PROTO(struct irq_matrix *matrix),
130
131 TP_ARGS(matrix)
132);
133
134DEFINE_EVENT(irq_matrix_global_update, irq_matrix_assign_system,
135
136 TP_PROTO(int bit, struct irq_matrix *matrix),
137
138 TP_ARGS(bit, matrix)
139);
140
141DEFINE_EVENT(irq_matrix_cpu, irq_matrix_alloc_reserved,
142
143 TP_PROTO(int bit, unsigned int cpu,
144 struct irq_matrix *matrix, struct cpumap *cmap),
145
146 TP_ARGS(bit, cpu, matrix, cmap)
147);
148
149DEFINE_EVENT(irq_matrix_cpu, irq_matrix_reserve_managed,
150
151 TP_PROTO(int bit, unsigned int cpu,
152 struct irq_matrix *matrix, struct cpumap *cmap),
153
154 TP_ARGS(bit, cpu, matrix, cmap)
155);
156
157DEFINE_EVENT(irq_matrix_cpu, irq_matrix_remove_managed,
158
159 TP_PROTO(int bit, unsigned int cpu,
160 struct irq_matrix *matrix, struct cpumap *cmap),
161
162 TP_ARGS(bit, cpu, matrix, cmap)
163);
164
165DEFINE_EVENT(irq_matrix_cpu, irq_matrix_alloc_managed,
166
167 TP_PROTO(int bit, unsigned int cpu,
168 struct irq_matrix *matrix, struct cpumap *cmap),
169
170 TP_ARGS(bit, cpu, matrix, cmap)
171);
172
173DEFINE_EVENT(irq_matrix_cpu, irq_matrix_assign,
174
175 TP_PROTO(int bit, unsigned int cpu,
176 struct irq_matrix *matrix, struct cpumap *cmap),
177
178 TP_ARGS(bit, cpu, matrix, cmap)
179);
180
181DEFINE_EVENT(irq_matrix_cpu, irq_matrix_alloc,
182
183 TP_PROTO(int bit, unsigned int cpu,
184 struct irq_matrix *matrix, struct cpumap *cmap),
185
186 TP_ARGS(bit, cpu, matrix, cmap)
187);
188
189DEFINE_EVENT(irq_matrix_cpu, irq_matrix_free,
190
191 TP_PROTO(int bit, unsigned int cpu,
192 struct irq_matrix *matrix, struct cpumap *cmap),
193
194 TP_ARGS(bit, cpu, matrix, cmap)
195);
196
197
198#endif /* _TRACE_IRQ_H */
199
200/* This part must be outside protection */
201#include <trace/define_trace.h>
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index a117adf7084b..ac1a3e29d3b9 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -97,6 +97,9 @@ config HANDLE_DOMAIN_IRQ
97config IRQ_TIMINGS 97config IRQ_TIMINGS
98 bool 98 bool
99 99
100config GENERIC_IRQ_MATRIX_ALLOCATOR
101 bool
102
100config IRQ_DOMAIN_DEBUG 103config IRQ_DOMAIN_DEBUG
101 bool "Expose hardware/virtual IRQ mapping via debugfs" 104 bool "Expose hardware/virtual IRQ mapping via debugfs"
102 depends on IRQ_DOMAIN && DEBUG_FS 105 depends on IRQ_DOMAIN && DEBUG_FS
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index ed15d142694b..ff6e352e3a6c 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -14,3 +14,4 @@ obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o
14obj-$(CONFIG_GENERIC_IRQ_IPI) += ipi.o 14obj-$(CONFIG_GENERIC_IRQ_IPI) += ipi.o
15obj-$(CONFIG_SMP) += affinity.o 15obj-$(CONFIG_SMP) += affinity.o
16obj-$(CONFIG_GENERIC_IRQ_DEBUGFS) += debugfs.o 16obj-$(CONFIG_GENERIC_IRQ_DEBUGFS) += debugfs.o
17obj-$(CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR) += matrix.o
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
index befa671fba64..4e8089b319ae 100644
--- a/kernel/irq/autoprobe.c
+++ b/kernel/irq/autoprobe.c
@@ -54,7 +54,7 @@ unsigned long probe_irq_on(void)
54 if (desc->irq_data.chip->irq_set_type) 54 if (desc->irq_data.chip->irq_set_type)
55 desc->irq_data.chip->irq_set_type(&desc->irq_data, 55 desc->irq_data.chip->irq_set_type(&desc->irq_data,
56 IRQ_TYPE_PROBE); 56 IRQ_TYPE_PROBE);
57 irq_startup(desc, IRQ_NORESEND, IRQ_START_FORCE); 57 irq_activate_and_startup(desc, IRQ_NORESEND);
58 } 58 }
59 raw_spin_unlock_irq(&desc->lock); 59 raw_spin_unlock_irq(&desc->lock);
60 } 60 }
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 5a2ef92c2782..043bfc35b353 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -207,20 +207,24 @@ __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
207 * Catch code which fiddles with enable_irq() on a managed 207 * Catch code which fiddles with enable_irq() on a managed
208 * and potentially shutdown IRQ. Chained interrupt 208 * and potentially shutdown IRQ. Chained interrupt
209 * installment or irq auto probing should not happen on 209 * installment or irq auto probing should not happen on
210 * managed irqs either. Emit a warning, break the affinity 210 * managed irqs either.
211 * and start it up as a normal interrupt.
212 */ 211 */
213 if (WARN_ON_ONCE(force)) 212 if (WARN_ON_ONCE(force))
214 return IRQ_STARTUP_NORMAL; 213 return IRQ_STARTUP_ABORT;
215 /* 214 /*
216 * The interrupt was requested, but there is no online CPU 215 * The interrupt was requested, but there is no online CPU
217 * in it's affinity mask. Put it into managed shutdown 216 * in it's affinity mask. Put it into managed shutdown
218 * state and let the cpu hotplug mechanism start it up once 217 * state and let the cpu hotplug mechanism start it up once
219 * a CPU in the mask becomes available. 218 * a CPU in the mask becomes available.
220 */ 219 */
221 irqd_set_managed_shutdown(d);
222 return IRQ_STARTUP_ABORT; 220 return IRQ_STARTUP_ABORT;
223 } 221 }
222 /*
223 * Managed interrupts have reserved resources, so this should not
224 * happen.
225 */
226 if (WARN_ON(irq_domain_activate_irq(d, false)))
227 return IRQ_STARTUP_ABORT;
224 return IRQ_STARTUP_MANAGED; 228 return IRQ_STARTUP_MANAGED;
225} 229}
226#else 230#else
@@ -236,7 +240,9 @@ static int __irq_startup(struct irq_desc *desc)
236 struct irq_data *d = irq_desc_get_irq_data(desc); 240 struct irq_data *d = irq_desc_get_irq_data(desc);
237 int ret = 0; 241 int ret = 0;
238 242
239 irq_domain_activate_irq(d); 243 /* Warn if this interrupt is not activated but try nevertheless */
244 WARN_ON_ONCE(!irqd_is_activated(d));
245
240 if (d->chip->irq_startup) { 246 if (d->chip->irq_startup) {
241 ret = d->chip->irq_startup(d); 247 ret = d->chip->irq_startup(d);
242 irq_state_clr_disabled(desc); 248 irq_state_clr_disabled(desc);
@@ -269,6 +275,7 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force)
269 ret = __irq_startup(desc); 275 ret = __irq_startup(desc);
270 break; 276 break;
271 case IRQ_STARTUP_ABORT: 277 case IRQ_STARTUP_ABORT:
278 irqd_set_managed_shutdown(d);
272 return 0; 279 return 0;
273 } 280 }
274 } 281 }
@@ -278,6 +285,22 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force)
278 return ret; 285 return ret;
279} 286}
280 287
288int irq_activate(struct irq_desc *desc)
289{
290 struct irq_data *d = irq_desc_get_irq_data(desc);
291
292 if (!irqd_affinity_is_managed(d))
293 return irq_domain_activate_irq(d, false);
294 return 0;
295}
296
297void irq_activate_and_startup(struct irq_desc *desc, bool resend)
298{
299 if (WARN_ON(irq_activate(desc)))
300 return;
301 irq_startup(desc, resend, IRQ_START_FORCE);
302}
303
281static void __irq_disable(struct irq_desc *desc, bool mask); 304static void __irq_disable(struct irq_desc *desc, bool mask);
282 305
283void irq_shutdown(struct irq_desc *desc) 306void irq_shutdown(struct irq_desc *desc)
@@ -953,7 +976,7 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
953 irq_settings_set_norequest(desc); 976 irq_settings_set_norequest(desc);
954 irq_settings_set_nothread(desc); 977 irq_settings_set_nothread(desc);
955 desc->action = &chained_action; 978 desc->action = &chained_action;
956 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE); 979 irq_activate_and_startup(desc, IRQ_RESEND);
957 } 980 }
958} 981}
959 982
diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c
index c3fdb36dec30..7f608ac39653 100644
--- a/kernel/irq/debugfs.c
+++ b/kernel/irq/debugfs.c
@@ -81,6 +81,8 @@ irq_debug_show_data(struct seq_file *m, struct irq_data *data, int ind)
81 data->domain ? data->domain->name : ""); 81 data->domain ? data->domain->name : "");
82 seq_printf(m, "%*shwirq: 0x%lx\n", ind + 1, "", data->hwirq); 82 seq_printf(m, "%*shwirq: 0x%lx\n", ind + 1, "", data->hwirq);
83 irq_debug_show_chip(m, data, ind + 1); 83 irq_debug_show_chip(m, data, ind + 1);
84 if (data->domain && data->domain->ops && data->domain->ops->debug_show)
85 data->domain->ops->debug_show(m, NULL, data, ind + 1);
84#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 86#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
85 if (!data->parent_data) 87 if (!data->parent_data)
86 return; 88 return;
@@ -149,6 +151,7 @@ static int irq_debug_show(struct seq_file *m, void *p)
149 raw_spin_lock_irq(&desc->lock); 151 raw_spin_lock_irq(&desc->lock);
150 data = irq_desc_get_irq_data(desc); 152 data = irq_desc_get_irq_data(desc);
151 seq_printf(m, "handler: %pf\n", desc->handle_irq); 153 seq_printf(m, "handler: %pf\n", desc->handle_irq);
154 seq_printf(m, "device: %s\n", desc->dev_name);
152 seq_printf(m, "status: 0x%08x\n", desc->status_use_accessors); 155 seq_printf(m, "status: 0x%08x\n", desc->status_use_accessors);
153 irq_debug_show_bits(m, 0, desc->status_use_accessors, irqdesc_states, 156 irq_debug_show_bits(m, 0, desc->status_use_accessors, irqdesc_states,
154 ARRAY_SIZE(irqdesc_states)); 157 ARRAY_SIZE(irqdesc_states));
@@ -226,6 +229,15 @@ static const struct file_operations dfs_irq_ops = {
226 .release = single_release, 229 .release = single_release,
227}; 230};
228 231
232void irq_debugfs_copy_devname(int irq, struct device *dev)
233{
234 struct irq_desc *desc = irq_to_desc(irq);
235 const char *name = dev_name(dev);
236
237 if (name)
238 desc->dev_name = kstrdup(name, GFP_KERNEL);
239}
240
229void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc) 241void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc)
230{ 242{
231 char name [10]; 243 char name [10];
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 44ed5f8c8759..07d08ca701ec 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -75,6 +75,8 @@ extern void __enable_irq(struct irq_desc *desc);
75#define IRQ_START_FORCE true 75#define IRQ_START_FORCE true
76#define IRQ_START_COND false 76#define IRQ_START_COND false
77 77
78extern int irq_activate(struct irq_desc *desc);
79extern void irq_activate_and_startup(struct irq_desc *desc, bool resend);
78extern int irq_startup(struct irq_desc *desc, bool resend, bool force); 80extern int irq_startup(struct irq_desc *desc, bool resend, bool force);
79 81
80extern void irq_shutdown(struct irq_desc *desc); 82extern void irq_shutdown(struct irq_desc *desc);
@@ -437,6 +439,18 @@ static inline bool irq_fixup_move_pending(struct irq_desc *desc, bool fclear)
437} 439}
438#endif /* !CONFIG_GENERIC_PENDING_IRQ */ 440#endif /* !CONFIG_GENERIC_PENDING_IRQ */
439 441
442#if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY)
443static inline int irq_domain_activate_irq(struct irq_data *data, bool early)
444{
445 irqd_set_activated(data);
446 return 0;
447}
448static inline void irq_domain_deactivate_irq(struct irq_data *data)
449{
450 irqd_clr_activated(data);
451}
452#endif
453
440#ifdef CONFIG_GENERIC_IRQ_DEBUGFS 454#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
441#include <linux/debugfs.h> 455#include <linux/debugfs.h>
442 456
@@ -444,7 +458,9 @@ void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc);
444static inline void irq_remove_debugfs_entry(struct irq_desc *desc) 458static inline void irq_remove_debugfs_entry(struct irq_desc *desc)
445{ 459{
446 debugfs_remove(desc->debugfs_file); 460 debugfs_remove(desc->debugfs_file);
461 kfree(desc->dev_name);
447} 462}
463void irq_debugfs_copy_devname(int irq, struct device *dev);
448# ifdef CONFIG_IRQ_DOMAIN 464# ifdef CONFIG_IRQ_DOMAIN
449void irq_domain_debugfs_init(struct dentry *root); 465void irq_domain_debugfs_init(struct dentry *root);
450# else 466# else
@@ -459,4 +475,7 @@ static inline void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *d)
459static inline void irq_remove_debugfs_entry(struct irq_desc *d) 475static inline void irq_remove_debugfs_entry(struct irq_desc *d)
460{ 476{
461} 477}
478static inline void irq_debugfs_copy_devname(int irq, struct device *dev)
479{
480}
462#endif /* CONFIG_GENERIC_IRQ_DEBUGFS */ 481#endif /* CONFIG_GENERIC_IRQ_DEBUGFS */
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 82afb7ed369f..f2edcf85780d 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -27,7 +27,7 @@ static struct lock_class_key irq_desc_lock_class;
27#if defined(CONFIG_SMP) 27#if defined(CONFIG_SMP)
28static int __init irq_affinity_setup(char *str) 28static int __init irq_affinity_setup(char *str)
29{ 29{
30 zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); 30 alloc_bootmem_cpumask_var(&irq_default_affinity);
31 cpulist_parse(str, irq_default_affinity); 31 cpulist_parse(str, irq_default_affinity);
32 /* 32 /*
33 * Set at least the boot cpu. We don't want to end up with 33 * Set at least the boot cpu. We don't want to end up with
@@ -40,10 +40,8 @@ __setup("irqaffinity=", irq_affinity_setup);
40 40
41static void __init init_irq_default_affinity(void) 41static void __init init_irq_default_affinity(void)
42{ 42{
43#ifdef CONFIG_CPUMASK_OFFSTACK 43 if (!cpumask_available(irq_default_affinity))
44 if (!irq_default_affinity)
45 zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); 44 zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
46#endif
47 if (cpumask_empty(irq_default_affinity)) 45 if (cpumask_empty(irq_default_affinity))
48 cpumask_setall(irq_default_affinity); 46 cpumask_setall(irq_default_affinity);
49} 47}
@@ -448,7 +446,7 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node,
448 } 446 }
449 } 447 }
450 448
451 flags = affinity ? IRQD_AFFINITY_MANAGED : 0; 449 flags = affinity ? IRQD_AFFINITY_MANAGED | IRQD_MANAGED_SHUTDOWN : 0;
452 mask = NULL; 450 mask = NULL;
453 451
454 for (i = 0; i < cnt; i++) { 452 for (i = 0; i < cnt; i++) {
@@ -462,6 +460,7 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node,
462 goto err; 460 goto err;
463 irq_insert_desc(start + i, desc); 461 irq_insert_desc(start + i, desc);
464 irq_sysfs_add(start + i, desc); 462 irq_sysfs_add(start + i, desc);
463 irq_add_debugfs_entry(start + i, desc);
465 } 464 }
466 bitmap_set(allocated_irqs, start, cnt); 465 bitmap_set(allocated_irqs, start, cnt);
467 return start; 466 return start;
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index ac4644e92b49..4f4f60015e8a 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -21,7 +21,6 @@
21static LIST_HEAD(irq_domain_list); 21static LIST_HEAD(irq_domain_list);
22static DEFINE_MUTEX(irq_domain_mutex); 22static DEFINE_MUTEX(irq_domain_mutex);
23 23
24static DEFINE_MUTEX(revmap_trees_mutex);
25static struct irq_domain *irq_default_domain; 24static struct irq_domain *irq_default_domain;
26 25
27static void irq_domain_check_hierarchy(struct irq_domain *domain); 26static void irq_domain_check_hierarchy(struct irq_domain *domain);
@@ -211,6 +210,7 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
211 210
212 /* Fill structure */ 211 /* Fill structure */
213 INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL); 212 INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
213 mutex_init(&domain->revmap_tree_mutex);
214 domain->ops = ops; 214 domain->ops = ops;
215 domain->host_data = host_data; 215 domain->host_data = host_data;
216 domain->hwirq_max = hwirq_max; 216 domain->hwirq_max = hwirq_max;
@@ -462,9 +462,9 @@ static void irq_domain_clear_mapping(struct irq_domain *domain,
462 if (hwirq < domain->revmap_size) { 462 if (hwirq < domain->revmap_size) {
463 domain->linear_revmap[hwirq] = 0; 463 domain->linear_revmap[hwirq] = 0;
464 } else { 464 } else {
465 mutex_lock(&revmap_trees_mutex); 465 mutex_lock(&domain->revmap_tree_mutex);
466 radix_tree_delete(&domain->revmap_tree, hwirq); 466 radix_tree_delete(&domain->revmap_tree, hwirq);
467 mutex_unlock(&revmap_trees_mutex); 467 mutex_unlock(&domain->revmap_tree_mutex);
468 } 468 }
469} 469}
470 470
@@ -475,9 +475,9 @@ static void irq_domain_set_mapping(struct irq_domain *domain,
475 if (hwirq < domain->revmap_size) { 475 if (hwirq < domain->revmap_size) {
476 domain->linear_revmap[hwirq] = irq_data->irq; 476 domain->linear_revmap[hwirq] = irq_data->irq;
477 } else { 477 } else {
478 mutex_lock(&revmap_trees_mutex); 478 mutex_lock(&domain->revmap_tree_mutex);
479 radix_tree_insert(&domain->revmap_tree, hwirq, irq_data); 479 radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
480 mutex_unlock(&revmap_trees_mutex); 480 mutex_unlock(&domain->revmap_tree_mutex);
481 } 481 }
482} 482}
483 483
@@ -921,8 +921,7 @@ static void virq_debug_show_one(struct seq_file *m, struct irq_desc *desc)
921 chip = irq_data_get_irq_chip(data); 921 chip = irq_data_get_irq_chip(data);
922 seq_printf(m, "%-15s ", (chip && chip->name) ? chip->name : "none"); 922 seq_printf(m, "%-15s ", (chip && chip->name) ? chip->name : "none");
923 923
924 seq_printf(m, data ? "0x%p " : " %p ", 924 seq_printf(m, "0x%p ", irq_data_get_irq_chip_data(data));
925 irq_data_get_irq_chip_data(data));
926 925
927 seq_printf(m, " %c ", (desc->action && desc->action->handler) ? '*' : ' '); 926 seq_printf(m, " %c ", (desc->action && desc->action->handler) ? '*' : ' ');
928 direct = (irq == hwirq) && (irq < domain->revmap_direct_max_irq); 927 direct = (irq == hwirq) && (irq < domain->revmap_direct_max_irq);
@@ -1459,11 +1458,11 @@ static void irq_domain_fix_revmap(struct irq_data *d)
1459 return; /* Not using radix tree. */ 1458 return; /* Not using radix tree. */
1460 1459
1461 /* Fix up the revmap. */ 1460 /* Fix up the revmap. */
1462 mutex_lock(&revmap_trees_mutex); 1461 mutex_lock(&d->domain->revmap_tree_mutex);
1463 slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq); 1462 slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq);
1464 if (slot) 1463 if (slot)
1465 radix_tree_replace_slot(&d->domain->revmap_tree, slot, d); 1464 radix_tree_replace_slot(&d->domain->revmap_tree, slot, d);
1466 mutex_unlock(&revmap_trees_mutex); 1465 mutex_unlock(&d->domain->revmap_tree_mutex);
1467} 1466}
1468 1467
1469/** 1468/**
@@ -1682,28 +1681,36 @@ void irq_domain_free_irqs_parent(struct irq_domain *domain,
1682} 1681}
1683EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent); 1682EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
1684 1683
1685static void __irq_domain_activate_irq(struct irq_data *irq_data) 1684static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
1686{ 1685{
1687 if (irq_data && irq_data->domain) { 1686 if (irq_data && irq_data->domain) {
1688 struct irq_domain *domain = irq_data->domain; 1687 struct irq_domain *domain = irq_data->domain;
1689 1688
1689 if (domain->ops->deactivate)
1690 domain->ops->deactivate(domain, irq_data);
1690 if (irq_data->parent_data) 1691 if (irq_data->parent_data)
1691 __irq_domain_activate_irq(irq_data->parent_data); 1692 __irq_domain_deactivate_irq(irq_data->parent_data);
1692 if (domain->ops->activate)
1693 domain->ops->activate(domain, irq_data);
1694 } 1693 }
1695} 1694}
1696 1695
1697static void __irq_domain_deactivate_irq(struct irq_data *irq_data) 1696static int __irq_domain_activate_irq(struct irq_data *irqd, bool early)
1698{ 1697{
1699 if (irq_data && irq_data->domain) { 1698 int ret = 0;
1700 struct irq_domain *domain = irq_data->domain;
1701 1699
1702 if (domain->ops->deactivate) 1700 if (irqd && irqd->domain) {
1703 domain->ops->deactivate(domain, irq_data); 1701 struct irq_domain *domain = irqd->domain;
1704 if (irq_data->parent_data) 1702
1705 __irq_domain_deactivate_irq(irq_data->parent_data); 1703 if (irqd->parent_data)
1704 ret = __irq_domain_activate_irq(irqd->parent_data,
1705 early);
1706 if (!ret && domain->ops->activate) {
1707 ret = domain->ops->activate(domain, irqd, early);
1708 /* Rollback in case of error */
1709 if (ret && irqd->parent_data)
1710 __irq_domain_deactivate_irq(irqd->parent_data);
1711 }
1706 } 1712 }
1713 return ret;
1707} 1714}
1708 1715
1709/** 1716/**
@@ -1714,12 +1721,15 @@ static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
1714 * This is the second step to call domain_ops->activate to program interrupt 1721 * This is the second step to call domain_ops->activate to program interrupt
1715 * controllers, so the interrupt could actually get delivered. 1722 * controllers, so the interrupt could actually get delivered.
1716 */ 1723 */
1717void irq_domain_activate_irq(struct irq_data *irq_data) 1724int irq_domain_activate_irq(struct irq_data *irq_data, bool early)
1718{ 1725{
1719 if (!irqd_is_activated(irq_data)) { 1726 int ret = 0;
1720 __irq_domain_activate_irq(irq_data); 1727
1728 if (!irqd_is_activated(irq_data))
1729 ret = __irq_domain_activate_irq(irq_data, early);
1730 if (!ret)
1721 irqd_set_activated(irq_data); 1731 irqd_set_activated(irq_data);
1722 } 1732 return ret;
1723} 1733}
1724 1734
1725/** 1735/**
@@ -1810,6 +1820,8 @@ irq_domain_debug_show_one(struct seq_file *m, struct irq_domain *d, int ind)
1810 d->revmap_size + d->revmap_direct_max_irq); 1820 d->revmap_size + d->revmap_direct_max_irq);
1811 seq_printf(m, "%*smapped: %u\n", ind + 1, "", d->mapcount); 1821 seq_printf(m, "%*smapped: %u\n", ind + 1, "", d->mapcount);
1812 seq_printf(m, "%*sflags: 0x%08x\n", ind +1 , "", d->flags); 1822 seq_printf(m, "%*sflags: 0x%08x\n", ind +1 , "", d->flags);
1823 if (d->ops && d->ops->debug_show)
1824 d->ops->debug_show(m, d, NULL, ind + 1);
1813#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 1825#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1814 if (!d->parent) 1826 if (!d->parent)
1815 return; 1827 return;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 4bff6a10ae8e..2ff1c0c82fc9 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -398,7 +398,8 @@ int irq_select_affinity_usr(unsigned int irq)
398/** 398/**
399 * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt 399 * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
400 * @irq: interrupt number to set affinity 400 * @irq: interrupt number to set affinity
401 * @vcpu_info: vCPU specific data 401 * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
402 * specific data for percpu_devid interrupts
402 * 403 *
403 * This function uses the vCPU specific data to set the vCPU 404 * This function uses the vCPU specific data to set the vCPU
404 * affinity for an irq. The vCPU specific data is passed from 405 * affinity for an irq. The vCPU specific data is passed from
@@ -536,7 +537,7 @@ void __enable_irq(struct irq_desc *desc)
536 * time. If it was already started up, then irq_startup() 537 * time. If it was already started up, then irq_startup()
537 * will invoke irq_enable() under the hood. 538 * will invoke irq_enable() under the hood.
538 */ 539 */
539 irq_startup(desc, IRQ_RESEND, IRQ_START_COND); 540 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
540 break; 541 break;
541 } 542 }
542 default: 543 default:
@@ -1305,7 +1306,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1305 * thread_mask assigned. See the loop above which or's 1306 * thread_mask assigned. See the loop above which or's
1306 * all existing action->thread_mask bits. 1307 * all existing action->thread_mask bits.
1307 */ 1308 */
1308 new->thread_mask = 1 << ffz(thread_mask); 1309 new->thread_mask = 1UL << ffz(thread_mask);
1309 1310
1310 } else if (new->handler == irq_default_primary_handler && 1311 } else if (new->handler == irq_default_primary_handler &&
1311 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { 1312 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
@@ -1342,6 +1343,21 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1342 goto out_unlock; 1343 goto out_unlock;
1343 } 1344 }
1344 1345
1346 /*
1347 * Activate the interrupt. That activation must happen
1348 * independently of IRQ_NOAUTOEN. request_irq() can fail
1349 * and the callers are supposed to handle
1350 * that. enable_irq() of an interrupt requested with
1351 * IRQ_NOAUTOEN is not supposed to fail. The activation
1352 * keeps it in shutdown mode, it merily associates
1353 * resources if necessary and if that's not possible it
1354 * fails. Interrupts which are in managed shutdown mode
1355 * will simply ignore that activation request.
1356 */
1357 ret = irq_activate(desc);
1358 if (ret)
1359 goto out_unlock;
1360
1345 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ 1361 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1346 IRQS_ONESHOT | IRQS_WAITING); 1362 IRQS_ONESHOT | IRQS_WAITING);
1347 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 1363 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
@@ -1417,7 +1433,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1417 wake_up_process(new->secondary->thread); 1433 wake_up_process(new->secondary->thread);
1418 1434
1419 register_irq_proc(irq, desc); 1435 register_irq_proc(irq, desc);
1420 irq_add_debugfs_entry(irq, desc);
1421 new->dir = NULL; 1436 new->dir = NULL;
1422 register_handler_proc(irq, new); 1437 register_handler_proc(irq, new);
1423 return 0; 1438 return 0;
diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
new file mode 100644
index 000000000000..a3cbbc8191c5
--- /dev/null
+++ b/kernel/irq/matrix.c
@@ -0,0 +1,443 @@
1/*
2 * Copyright (C) 2017 Thomas Gleixner <tglx@linutronix.de>
3 *
4 * SPDX-License-Identifier: GPL-2.0
5 */
6#include <linux/spinlock.h>
7#include <linux/seq_file.h>
8#include <linux/bitmap.h>
9#include <linux/percpu.h>
10#include <linux/cpu.h>
11#include <linux/irq.h>
12
13#define IRQ_MATRIX_SIZE (BITS_TO_LONGS(IRQ_MATRIX_BITS) * sizeof(unsigned long))
14
15struct cpumap {
16 unsigned int available;
17 unsigned int allocated;
18 unsigned int managed;
19 bool online;
20 unsigned long alloc_map[IRQ_MATRIX_SIZE];
21 unsigned long managed_map[IRQ_MATRIX_SIZE];
22};
23
24struct irq_matrix {
25 unsigned int matrix_bits;
26 unsigned int alloc_start;
27 unsigned int alloc_end;
28 unsigned int alloc_size;
29 unsigned int global_available;
30 unsigned int global_reserved;
31 unsigned int systembits_inalloc;
32 unsigned int total_allocated;
33 unsigned int online_maps;
34 struct cpumap __percpu *maps;
35 unsigned long scratch_map[IRQ_MATRIX_SIZE];
36 unsigned long system_map[IRQ_MATRIX_SIZE];
37};
38
39#define CREATE_TRACE_POINTS
40#include <trace/events/irq_matrix.h>
41
42/**
43 * irq_alloc_matrix - Allocate a irq_matrix structure and initialize it
44 * @matrix_bits: Number of matrix bits must be <= IRQ_MATRIX_BITS
45 * @alloc_start: From which bit the allocation search starts
46 * @alloc_end: At which bit the allocation search ends, i.e first
47 * invalid bit
48 */
49__init struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits,
50 unsigned int alloc_start,
51 unsigned int alloc_end)
52{
53 struct irq_matrix *m;
54
55 if (matrix_bits > IRQ_MATRIX_BITS)
56 return NULL;
57
58 m = kzalloc(sizeof(*m), GFP_KERNEL);
59 if (!m)
60 return NULL;
61
62 m->matrix_bits = matrix_bits;
63 m->alloc_start = alloc_start;
64 m->alloc_end = alloc_end;
65 m->alloc_size = alloc_end - alloc_start;
66 m->maps = alloc_percpu(*m->maps);
67 if (!m->maps) {
68 kfree(m);
69 return NULL;
70 }
71 return m;
72}
73
74/**
75 * irq_matrix_online - Bring the local CPU matrix online
76 * @m: Matrix pointer
77 */
78void irq_matrix_online(struct irq_matrix *m)
79{
80 struct cpumap *cm = this_cpu_ptr(m->maps);
81
82 BUG_ON(cm->online);
83
84 bitmap_zero(cm->alloc_map, m->matrix_bits);
85 cm->available = m->alloc_size - (cm->managed + m->systembits_inalloc);
86 cm->allocated = 0;
87 m->global_available += cm->available;
88 cm->online = true;
89 m->online_maps++;
90 trace_irq_matrix_online(m);
91}
92
93/**
94 * irq_matrix_offline - Bring the local CPU matrix offline
95 * @m: Matrix pointer
96 */
97void irq_matrix_offline(struct irq_matrix *m)
98{
99 struct cpumap *cm = this_cpu_ptr(m->maps);
100
101 /* Update the global available size */
102 m->global_available -= cm->available;
103 cm->online = false;
104 m->online_maps--;
105 trace_irq_matrix_offline(m);
106}
107
108static unsigned int matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm,
109 unsigned int num, bool managed)
110{
111 unsigned int area, start = m->alloc_start;
112 unsigned int end = m->alloc_end;
113
114 bitmap_or(m->scratch_map, cm->managed_map, m->system_map, end);
115 bitmap_or(m->scratch_map, m->scratch_map, cm->alloc_map, end);
116 area = bitmap_find_next_zero_area(m->scratch_map, end, start, num, 0);
117 if (area >= end)
118 return area;
119 if (managed)
120 bitmap_set(cm->managed_map, area, num);
121 else
122 bitmap_set(cm->alloc_map, area, num);
123 return area;
124}
125
126/**
127 * irq_matrix_assign_system - Assign system wide entry in the matrix
128 * @m: Matrix pointer
129 * @bit: Which bit to reserve
130 * @replace: Replace an already allocated vector with a system
131 * vector at the same bit position.
132 *
133 * The BUG_ON()s below are on purpose. If this goes wrong in the
134 * early boot process, then the chance to survive is about zero.
135 * If this happens when the system is life, it's not much better.
136 */
137void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit,
138 bool replace)
139{
140 struct cpumap *cm = this_cpu_ptr(m->maps);
141
142 BUG_ON(bit > m->matrix_bits);
143 BUG_ON(m->online_maps > 1 || (m->online_maps && !replace));
144
145 set_bit(bit, m->system_map);
146 if (replace) {
147 BUG_ON(!test_and_clear_bit(bit, cm->alloc_map));
148 cm->allocated--;
149 m->total_allocated--;
150 }
151 if (bit >= m->alloc_start && bit < m->alloc_end)
152 m->systembits_inalloc++;
153
154 trace_irq_matrix_assign_system(bit, m);
155}
156
157/**
158 * irq_matrix_reserve_managed - Reserve a managed interrupt in a CPU map
159 * @m: Matrix pointer
160 * @msk: On which CPUs the bits should be reserved.
161 *
162 * Can be called for offline CPUs. Note, this will only reserve one bit
163 * on all CPUs in @msk, but it's not guaranteed that the bits are at the
164 * same offset on all CPUs
165 */
166int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk)
167{
168 unsigned int cpu, failed_cpu;
169
170 for_each_cpu(cpu, msk) {
171 struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
172 unsigned int bit;
173
174 bit = matrix_alloc_area(m, cm, 1, true);
175 if (bit >= m->alloc_end)
176 goto cleanup;
177 cm->managed++;
178 if (cm->online) {
179 cm->available--;
180 m->global_available--;
181 }
182 trace_irq_matrix_reserve_managed(bit, cpu, m, cm);
183 }
184 return 0;
185cleanup:
186 failed_cpu = cpu;
187 for_each_cpu(cpu, msk) {
188 if (cpu == failed_cpu)
189 break;
190 irq_matrix_remove_managed(m, cpumask_of(cpu));
191 }
192 return -ENOSPC;
193}
194
195/**
196 * irq_matrix_remove_managed - Remove managed interrupts in a CPU map
197 * @m: Matrix pointer
198 * @msk: On which CPUs the bits should be removed
199 *
200 * Can be called for offline CPUs
201 *
202 * This removes not allocated managed interrupts from the map. It does
203 * not matter which one because the managed interrupts free their
204 * allocation when they shut down. If not, the accounting is screwed,
205 * but all what can be done at this point is warn about it.
206 */
207void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk)
208{
209 unsigned int cpu;
210
211 for_each_cpu(cpu, msk) {
212 struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
213 unsigned int bit, end = m->alloc_end;
214
215 if (WARN_ON_ONCE(!cm->managed))
216 continue;
217
218 /* Get managed bit which are not allocated */
219 bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
220
221 bit = find_first_bit(m->scratch_map, end);
222 if (WARN_ON_ONCE(bit >= end))
223 continue;
224
225 clear_bit(bit, cm->managed_map);
226
227 cm->managed--;
228 if (cm->online) {
229 cm->available++;
230 m->global_available++;
231 }
232 trace_irq_matrix_remove_managed(bit, cpu, m, cm);
233 }
234}
235
236/**
237 * irq_matrix_alloc_managed - Allocate a managed interrupt in a CPU map
238 * @m: Matrix pointer
239 * @cpu: On which CPU the interrupt should be allocated
240 */
241int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu)
242{
243 struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
244 unsigned int bit, end = m->alloc_end;
245
246 /* Get managed bit which are not allocated */
247 bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
248 bit = find_first_bit(m->scratch_map, end);
249 if (bit >= end)
250 return -ENOSPC;
251 set_bit(bit, cm->alloc_map);
252 cm->allocated++;
253 m->total_allocated++;
254 trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
255 return bit;
256}
257
258/**
259 * irq_matrix_assign - Assign a preallocated interrupt in the local CPU map
260 * @m: Matrix pointer
261 * @bit: Which bit to mark
262 *
263 * This should only be used to mark preallocated vectors
264 */
265void irq_matrix_assign(struct irq_matrix *m, unsigned int bit)
266{
267 struct cpumap *cm = this_cpu_ptr(m->maps);
268
269 if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
270 return;
271 if (WARN_ON_ONCE(test_and_set_bit(bit, cm->alloc_map)))
272 return;
273 cm->allocated++;
274 m->total_allocated++;
275 cm->available--;
276 m->global_available--;
277 trace_irq_matrix_assign(bit, smp_processor_id(), m, cm);
278}
279
280/**
281 * irq_matrix_reserve - Reserve interrupts
282 * @m: Matrix pointer
283 *
284 * This is merily a book keeping call. It increments the number of globally
285 * reserved interrupt bits w/o actually allocating them. This allows to
286 * setup interrupt descriptors w/o assigning low level resources to it.
287 * The actual allocation happens when the interrupt gets activated.
288 */
289void irq_matrix_reserve(struct irq_matrix *m)
290{
291 if (m->global_reserved <= m->global_available &&
292 m->global_reserved + 1 > m->global_available)
293 pr_warn("Interrupt reservation exceeds available resources\n");
294
295 m->global_reserved++;
296 trace_irq_matrix_reserve(m);
297}
298
299/**
300 * irq_matrix_remove_reserved - Remove interrupt reservation
301 * @m: Matrix pointer
302 *
303 * This is merily a book keeping call. It decrements the number of globally
304 * reserved interrupt bits. This is used to undo irq_matrix_reserve() when the
305 * interrupt was never in use and a real vector allocated, which undid the
306 * reservation.
307 */
308void irq_matrix_remove_reserved(struct irq_matrix *m)
309{
310 m->global_reserved--;
311 trace_irq_matrix_remove_reserved(m);
312}
313
314/**
315 * irq_matrix_alloc - Allocate a regular interrupt in a CPU map
316 * @m: Matrix pointer
317 * @msk: Which CPUs to search in
318 * @reserved: Allocate previously reserved interrupts
319 * @mapped_cpu: Pointer to store the CPU for which the irq was allocated
320 */
321int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
322 bool reserved, unsigned int *mapped_cpu)
323{
324 unsigned int cpu;
325
326 for_each_cpu(cpu, msk) {
327 struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
328 unsigned int bit;
329
330 if (!cm->online)
331 continue;
332
333 bit = matrix_alloc_area(m, cm, 1, false);
334 if (bit < m->alloc_end) {
335 cm->allocated++;
336 cm->available--;
337 m->total_allocated++;
338 m->global_available--;
339 if (reserved)
340 m->global_reserved--;
341 *mapped_cpu = cpu;
342 trace_irq_matrix_alloc(bit, cpu, m, cm);
343 return bit;
344 }
345 }
346 return -ENOSPC;
347}
348
349/**
350 * irq_matrix_free - Free allocated interrupt in the matrix
351 * @m: Matrix pointer
352 * @cpu: Which CPU map needs be updated
353 * @bit: The bit to remove
354 * @managed: If true, the interrupt is managed and not accounted
355 * as available.
356 */
357void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
358 unsigned int bit, bool managed)
359{
360 struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
361
362 if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
363 return;
364
365 if (cm->online) {
366 clear_bit(bit, cm->alloc_map);
367 cm->allocated--;
368 m->total_allocated--;
369 if (!managed) {
370 cm->available++;
371 m->global_available++;
372 }
373 }
374 trace_irq_matrix_free(bit, cpu, m, cm);
375}
376
377/**
378 * irq_matrix_available - Get the number of globally available irqs
379 * @m: Pointer to the matrix to query
380 * @cpudown: If true, the local CPU is about to go down, adjust
381 * the number of available irqs accordingly
382 */
383unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown)
384{
385 struct cpumap *cm = this_cpu_ptr(m->maps);
386
387 return m->global_available - cpudown ? cm->available : 0;
388}
389
390/**
391 * irq_matrix_reserved - Get the number of globally reserved irqs
392 * @m: Pointer to the matrix to query
393 */
394unsigned int irq_matrix_reserved(struct irq_matrix *m)
395{
396 return m->global_reserved;
397}
398
399/**
400 * irq_matrix_allocated - Get the number of allocated irqs on the local cpu
401 * @m: Pointer to the matrix to search
402 *
403 * This returns number of allocated irqs
404 */
405unsigned int irq_matrix_allocated(struct irq_matrix *m)
406{
407 struct cpumap *cm = this_cpu_ptr(m->maps);
408
409 return cm->allocated;
410}
411
412#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
413/**
414 * irq_matrix_debug_show - Show detailed allocation information
415 * @sf: Pointer to the seq_file to print to
416 * @m: Pointer to the matrix allocator
417 * @ind: Indentation for the print format
418 *
419 * Note, this is a lockless snapshot.
420 */
421void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind)
422{
423 unsigned int nsys = bitmap_weight(m->system_map, m->matrix_bits);
424 int cpu;
425
426 seq_printf(sf, "Online bitmaps: %6u\n", m->online_maps);
427 seq_printf(sf, "Global available: %6u\n", m->global_available);
428 seq_printf(sf, "Global reserved: %6u\n", m->global_reserved);
429 seq_printf(sf, "Total allocated: %6u\n", m->total_allocated);
430 seq_printf(sf, "System: %u: %*pbl\n", nsys, m->matrix_bits,
431 m->system_map);
432 seq_printf(sf, "%*s| CPU | avl | man | act | vectors\n", ind, " ");
433 cpus_read_lock();
434 for_each_online_cpu(cpu) {
435 struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
436
437 seq_printf(sf, "%*s %4d %4u %4u %4u %*pbl\n", ind, " ",
438 cpu, cm->available, cm->managed, cm->allocated,
439 m->matrix_bits, cm->alloc_map);
440 }
441 cpus_read_unlock();
442}
443#endif
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index 3fa4bd59f569..edb987b2c58d 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -16,6 +16,8 @@
16#include <linux/msi.h> 16#include <linux/msi.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18 18
19#include "internals.h"
20
19/** 21/**
20 * alloc_msi_entry - Allocate an initialize msi_entry 22 * alloc_msi_entry - Allocate an initialize msi_entry
21 * @dev: Pointer to the device for which this is allocated 23 * @dev: Pointer to the device for which this is allocated
@@ -100,13 +102,14 @@ int msi_domain_set_affinity(struct irq_data *irq_data,
100 return ret; 102 return ret;
101} 103}
102 104
103static void msi_domain_activate(struct irq_domain *domain, 105static int msi_domain_activate(struct irq_domain *domain,
104 struct irq_data *irq_data) 106 struct irq_data *irq_data, bool early)
105{ 107{
106 struct msi_msg msg; 108 struct msi_msg msg;
107 109
108 BUG_ON(irq_chip_compose_msi_msg(irq_data, &msg)); 110 BUG_ON(irq_chip_compose_msi_msg(irq_data, &msg));
109 irq_chip_write_msi_msg(irq_data, &msg); 111 irq_chip_write_msi_msg(irq_data, &msg);
112 return 0;
110} 113}
111 114
112static void msi_domain_deactivate(struct irq_domain *domain, 115static void msi_domain_deactivate(struct irq_domain *domain,
@@ -373,8 +376,10 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
373 return ret; 376 return ret;
374 } 377 }
375 378
376 for (i = 0; i < desc->nvec_used; i++) 379 for (i = 0; i < desc->nvec_used; i++) {
377 irq_set_msi_desc_off(virq, i, desc); 380 irq_set_msi_desc_off(virq, i, desc);
381 irq_debugfs_copy_devname(virq + i, dev);
382 }
378 } 383 }
379 384
380 if (ops->msi_finish) 385 if (ops->msi_finish)
@@ -396,11 +401,28 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
396 struct irq_data *irq_data; 401 struct irq_data *irq_data;
397 402
398 irq_data = irq_domain_get_irq_data(domain, desc->irq); 403 irq_data = irq_domain_get_irq_data(domain, desc->irq);
399 irq_domain_activate_irq(irq_data); 404 ret = irq_domain_activate_irq(irq_data, true);
405 if (ret)
406 goto cleanup;
407 if (info->flags & MSI_FLAG_MUST_REACTIVATE)
408 irqd_clr_activated(irq_data);
400 } 409 }
401 } 410 }
402
403 return 0; 411 return 0;
412
413cleanup:
414 for_each_msi_entry(desc, dev) {
415 struct irq_data *irqd;
416
417 if (desc->irq == virq)
418 break;
419
420 irqd = irq_domain_get_irq_data(domain, desc->irq);
421 if (irqd_is_activated(irqd))
422 irq_domain_deactivate_irq(irqd);
423 }
424 msi_domain_free_irqs(domain, dev);
425 return ret;
404} 426}
405 427
406/** 428/**
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index c010cc0daf79..e8f374971e37 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -155,8 +155,9 @@ static ssize_t write_irq_affinity(int type, struct file *file,
155 */ 155 */
156 err = irq_select_affinity_usr(irq) ? -EINVAL : count; 156 err = irq_select_affinity_usr(irq) ? -EINVAL : count;
157 } else { 157 } else {
158 irq_set_affinity(irq, new_value); 158 err = irq_set_affinity(irq, new_value);
159 err = count; 159 if (!err)
160 err = count;
160 } 161 }
161 162
162free_cpumask: 163free_cpumask:
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index ec8ac337404d..40e9d739c169 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -131,9 +131,9 @@ bool irq_work_needs_cpu(void)
131 131
132static void irq_work_run_list(struct llist_head *list) 132static void irq_work_run_list(struct llist_head *list)
133{ 133{
134 unsigned long flags; 134 struct irq_work *work, *tmp;
135 struct irq_work *work;
136 struct llist_node *llnode; 135 struct llist_node *llnode;
136 unsigned long flags;
137 137
138 BUG_ON(!irqs_disabled()); 138 BUG_ON(!irqs_disabled());
139 139
@@ -141,11 +141,7 @@ static void irq_work_run_list(struct llist_head *list)
141 return; 141 return;
142 142
143 llnode = llist_del_all(list); 143 llnode = llist_del_all(list);
144 while (llnode != NULL) { 144 llist_for_each_entry_safe(work, tmp, llnode, llnode) {
145 work = llist_entry(llnode, struct irq_work, llnode);
146
147 llnode = llist_next(llnode);
148
149 /* 145 /*
150 * Clear the PENDING bit, after this point the @work 146 * Clear the PENDING bit, after this point the @work
151 * can be re-used. 147 * can be re-used.