aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/al,alpine-msix.txt26
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.txt44
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/mips-gic.txt7
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/sigma,smp8642-intc.txt49
-rw-r--r--Documentation/kernel-parameters.txt9
-rw-r--r--MAINTAINERS1
-rw-r--r--arch/arm/mach-mvebu/Kconfig6
-rw-r--r--arch/mips/Kconfig8
-rw-r--r--arch/mips/ath79/irq.c244
-rw-r--r--arch/mips/bmips/irq.c10
-rw-r--r--arch/mips/include/asm/mach-ath79/ath79.h4
-rw-r--r--arch/mips/include/asm/smp-ops.h5
-rw-r--r--arch/mips/kernel/Makefile1
-rw-r--r--arch/mips/kernel/smp-cmp.c4
-rw-r--r--arch/mips/kernel/smp-cps.c4
-rw-r--r--arch/mips/kernel/smp-mt.c2
-rw-r--r--arch/mips/kernel/smp.c136
-rw-r--r--arch/x86/include/asm/ipi.h58
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c2
-rw-r--r--arch/x86/kernel/apic/ipi.c60
-rw-r--r--drivers/irqchip/Kconfig28
-rw-r--r--drivers/irqchip/Makefile8
-rw-r--r--drivers/irqchip/irq-alpine-msi.c293
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c156
-rw-r--r--drivers/irqchip/irq-ath79-cpu.c97
-rw-r--r--drivers/irqchip/irq-ath79-misc.c189
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.c14
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.h7
-rw-r--r--drivers/irqchip/irq-atmel-aic.c9
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c9
-rw-r--r--drivers/irqchip/irq-bcm2836.c1
-rw-r--r--drivers/irqchip/irq-bcm6345-l1.c364
-rw-r--r--drivers/irqchip/irq-gic-realview.c44
-rw-r--r--drivers/irqchip/irq-gic-v2m.c14
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c10
-rw-r--r--drivers/irqchip/irq-gic-v3.c349
-rw-r--r--drivers/irqchip/irq-gic.c2
-rw-r--r--drivers/irqchip/irq-mips-gic.c354
-rw-r--r--drivers/irqchip/irq-mvebu-odmi.c236
-rw-r--r--drivers/irqchip/irq-mxs.c2
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c4
-rw-r--r--drivers/irqchip/irq-tango.c232
-rw-r--r--drivers/irqchip/irq-ts4800.c2
-rw-r--r--include/linux/irq.h21
-rw-r--r--include/linux/irqchip/mips-gic.h3
-rw-r--r--include/linux/irqdomain.h45
-rw-r--r--kernel/irq/Kconfig4
-rw-r--r--kernel/irq/Makefile1
-rw-r--r--kernel/irq/chip.c4
-rw-r--r--kernel/irq/handle.c6
-rw-r--r--kernel/irq/internals.h3
-rw-r--r--kernel/irq/ipi.c326
-rw-r--r--kernel/irq/irqdesc.c21
-rw-r--r--kernel/irq/irqdomain.c11
-rw-r--r--kernel/irq/manage.c8
-rw-r--r--kernel/irq/proc.c2
-rw-r--r--kernel/irq/spurious.c4
58 files changed, 2956 insertions, 608 deletions
diff --git a/Documentation/devicetree/bindings/interrupt-controller/al,alpine-msix.txt b/Documentation/devicetree/bindings/interrupt-controller/al,alpine-msix.txt
new file mode 100644
index 000000000000..f6f1c14bf99b
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/al,alpine-msix.txt
@@ -0,0 +1,26 @@
1Alpine MSIX controller
2
3See arm,gic-v3.txt for SPI and MSI definitions.
4
5Required properties:
6
7- compatible: should be "al,alpine-msix"
8- reg: physical base address and size of the registers
9- interrupt-parent: specifies the parent interrupt controller.
10- interrupt-controller: identifies the node as an interrupt controller
11- msi-controller: identifies the node as an PCI Message Signaled Interrupt
12 controller
13- al,msi-base-spi: SPI base of the MSI frame
14- al,msi-num-spis: number of SPIs assigned to the MSI frame, relative to SPI0
15
16Example:
17
18msix: msix {
19 compatible = "al,alpine-msix";
20 reg = <0x0 0xfbe00000 0x0 0x100000>;
21 interrupt-parent = <&gic>;
22 interrupt-controller;
23 msi-controller;
24 al,msi-base-spi = <160>;
25 al,msi-num-spis = <160>;
26};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt
index 5a1cb4bc3dfe..793c20ff8fcc 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt
@@ -16,6 +16,7 @@ Main node required properties:
16 "arm,cortex-a15-gic" 16 "arm,cortex-a15-gic"
17 "arm,cortex-a7-gic" 17 "arm,cortex-a7-gic"
18 "arm,cortex-a9-gic" 18 "arm,cortex-a9-gic"
19 "arm,eb11mp-gic"
19 "arm,gic-400" 20 "arm,gic-400"
20 "arm,pl390" 21 "arm,pl390"
21 "arm,tc11mp-gic" 22 "arm,tc11mp-gic"
diff --git a/Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.txt b/Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.txt
new file mode 100644
index 000000000000..8af0a8e613ab
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.txt
@@ -0,0 +1,44 @@
1
2* Marvell ODMI for MSI support
3
4Some Marvell SoCs have an On-Die Message Interrupt (ODMI) controller
5which can be used by on-board peripheral for MSI interrupts.
6
7Required properties:
8
9- compatible : The value here should contain:
10
11 "marvell,ap806-odmi-controller", "marvell,odmi-controller".
12
13- interrupt,controller : Identifies the node as an interrupt controller.
14
15- msi-controller : Identifies the node as an MSI controller.
16
17- marvell,odmi-frames : Number of ODMI frames available. Each frame
18 provides a number of events.
19
20- reg : List of register definitions, one for each
21 ODMI frame.
22
23- marvell,spi-base : List of GIC base SPI interrupts, one for each
24 ODMI frame. Those SPI interrupts are 0-based,
25 i.e marvell,spi-base = <128> will use SPI #96.
26 See Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt
27 for details about the GIC Device Tree binding.
28
29- interrupt-parent : Reference to the parent interrupt controller.
30
31Example:
32
33 odmi: odmi@300000 {
34 compatible = "marvell,ap806-odm-controller",
35 "marvell,odmi-controller";
36 interrupt-controller;
37 msi-controller;
38 marvell,odmi-frames = <4>;
39 reg = <0x300000 0x4000>,
40 <0x304000 0x4000>,
41 <0x308000 0x4000>,
42 <0x30C000 0x4000>;
43 marvell,spi-base = <128>, <136>, <144>, <152>;
44 };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/mips-gic.txt b/Documentation/devicetree/bindings/interrupt-controller/mips-gic.txt
index aae4c384ee1f..173595305e26 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/mips-gic.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/mips-gic.txt
@@ -23,6 +23,12 @@ Optional properties:
23- mti,reserved-cpu-vectors : Specifies the list of CPU interrupt vectors 23- mti,reserved-cpu-vectors : Specifies the list of CPU interrupt vectors
24 to which the GIC may not route interrupts. Valid values are 2 - 7. 24 to which the GIC may not route interrupts. Valid values are 2 - 7.
25 This property is ignored if the CPU is started in EIC mode. 25 This property is ignored if the CPU is started in EIC mode.
26- mti,reserved-ipi-vectors : Specifies the range of GIC interrupts that are
27 reserved for IPIs.
28 It accepts 2 values, the 1st is the starting interrupt and the 2nd is the size
29 of the reserved range.
30 If not specified, the driver will allocate the last 2 * number of VPEs in the
31 system.
26 32
27Required properties for timer sub-node: 33Required properties for timer sub-node:
28- compatible : Should be "mti,gic-timer". 34- compatible : Should be "mti,gic-timer".
@@ -44,6 +50,7 @@ Example:
44 #interrupt-cells = <3>; 50 #interrupt-cells = <3>;
45 51
46 mti,reserved-cpu-vectors = <7>; 52 mti,reserved-cpu-vectors = <7>;
53 mti,reserved-ipi-vectors = <40 8>;
47 54
48 timer { 55 timer {
49 compatible = "mti,gic-timer"; 56 compatible = "mti,gic-timer";
diff --git a/Documentation/devicetree/bindings/interrupt-controller/sigma,smp8642-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/sigma,smp8642-intc.txt
new file mode 100644
index 000000000000..1f441fa0ad40
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/sigma,smp8642-intc.txt
@@ -0,0 +1,49 @@
1Sigma Designs SMP86xx/SMP87xx secondary interrupt controller
2
3Required properties:
4- compatible: should be "sigma,smp8642-intc"
5- reg: physical address of MMIO region
6- ranges: address space mapping of child nodes
7- interrupt-parent: phandle of parent interrupt controller
8- interrupt-controller: boolean
9- #address-cells: should be <1>
10- #size-cells: should be <1>
11
12One child node per control block with properties:
13- reg: address of registers for this control block
14- interrupt-controller: boolean
15- #interrupt-cells: should be <2>, interrupt index and flags per interrupts.txt
16- interrupts: interrupt spec of primary interrupt controller
17
18Example:
19
20interrupt-controller@6e000 {
21 compatible = "sigma,smp8642-intc";
22 reg = <0x6e000 0x400>;
23 ranges = <0x0 0x6e000 0x400>;
24 interrupt-parent = <&gic>;
25 interrupt-controller;
26 #address-cells = <1>;
27 #size-cells = <1>;
28
29 irq0: interrupt-controller@0 {
30 reg = <0x000 0x100>;
31 interrupt-controller;
32 #interrupt-cells = <2>;
33 interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
34 };
35
36 irq1: interrupt-controller@100 {
37 reg = <0x100 0x100>;
38 interrupt-controller;
39 #interrupt-cells = <2>;
40 interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
41 };
42
43 irq2: interrupt-controller@300 {
44 reg = <0x300 0x100>;
45 interrupt-controller;
46 #interrupt-cells = <2>;
47 interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
48 };
49};
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 4324f2437e6a..4d9ca7d92a20 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1687,6 +1687,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1687 ip= [IP_PNP] 1687 ip= [IP_PNP]
1688 See Documentation/filesystems/nfs/nfsroot.txt. 1688 See Documentation/filesystems/nfs/nfsroot.txt.
1689 1689
1690 irqaffinity= [SMP] Set the default irq affinity mask
1691 Format:
1692 <cpu number>,...,<cpu number>
1693 or
1694 <cpu number>-<cpu number>
1695 (must be a positive range in ascending order)
1696 or a mixture
1697 <cpu number>,...,<cpu number>-<cpu number>
1698
1690 irqfixup [HW] 1699 irqfixup [HW]
1691 When an interrupt is not handled search all handlers 1700 When an interrupt is not handled search all handlers
1692 for it. Intended to get systems with badly broken 1701 for it. Intended to get systems with badly broken
diff --git a/MAINTAINERS b/MAINTAINERS
index 2061ea77667c..57adf395a61f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2422,6 +2422,7 @@ F: arch/mips/bmips/*
2422F: arch/mips/include/asm/mach-bmips/* 2422F: arch/mips/include/asm/mach-bmips/*
2423F: arch/mips/kernel/*bmips* 2423F: arch/mips/kernel/*bmips*
2424F: arch/mips/boot/dts/brcm/bcm*.dts* 2424F: arch/mips/boot/dts/brcm/bcm*.dts*
2425F: drivers/irqchip/irq-bcm63*
2425F: drivers/irqchip/irq-bcm7* 2426F: drivers/irqchip/irq-bcm7*
2426F: drivers/irqchip/irq-brcmstb* 2427F: drivers/irqchip/irq-brcmstb*
2427F: include/linux/bcm963xx_nvram.h 2428F: include/linux/bcm963xx_nvram.h
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
index 64e3d2ce9a07..b003e3afd693 100644
--- a/arch/arm/mach-mvebu/Kconfig
+++ b/arch/arm/mach-mvebu/Kconfig
@@ -3,7 +3,6 @@ menuconfig ARCH_MVEBU
3 depends on ARCH_MULTI_V7 || ARCH_MULTI_V5 3 depends on ARCH_MULTI_V7 || ARCH_MULTI_V5
4 select ARCH_SUPPORTS_BIG_ENDIAN 4 select ARCH_SUPPORTS_BIG_ENDIAN
5 select CLKSRC_MMIO 5 select CLKSRC_MMIO
6 select GENERIC_IRQ_CHIP
7 select PINCTRL 6 select PINCTRL
8 select PLAT_ORION 7 select PLAT_ORION
9 select SOC_BUS 8 select SOC_BUS
@@ -29,6 +28,7 @@ config MACH_ARMADA_370
29 bool "Marvell Armada 370 boards" 28 bool "Marvell Armada 370 boards"
30 depends on ARCH_MULTI_V7 29 depends on ARCH_MULTI_V7
31 select ARMADA_370_CLK 30 select ARMADA_370_CLK
31 select ARMADA_370_XP_IRQ
32 select CPU_PJ4B 32 select CPU_PJ4B
33 select MACH_MVEBU_V7 33 select MACH_MVEBU_V7
34 select PINCTRL_ARMADA_370 34 select PINCTRL_ARMADA_370
@@ -39,6 +39,7 @@ config MACH_ARMADA_370
39config MACH_ARMADA_375 39config MACH_ARMADA_375
40 bool "Marvell Armada 375 boards" 40 bool "Marvell Armada 375 boards"
41 depends on ARCH_MULTI_V7 41 depends on ARCH_MULTI_V7
42 select ARMADA_370_XP_IRQ
42 select ARM_ERRATA_720789 43 select ARM_ERRATA_720789
43 select ARM_ERRATA_753970 44 select ARM_ERRATA_753970
44 select ARM_GIC 45 select ARM_GIC
@@ -58,6 +59,7 @@ config MACH_ARMADA_38X
58 select ARM_ERRATA_720789 59 select ARM_ERRATA_720789
59 select ARM_ERRATA_753970 60 select ARM_ERRATA_753970
60 select ARM_GIC 61 select ARM_GIC
62 select ARMADA_370_XP_IRQ
61 select ARMADA_38X_CLK 63 select ARMADA_38X_CLK
62 select HAVE_ARM_SCU 64 select HAVE_ARM_SCU
63 select HAVE_ARM_TWD if SMP 65 select HAVE_ARM_TWD if SMP
@@ -72,6 +74,7 @@ config MACH_ARMADA_39X
72 bool "Marvell Armada 39x boards" 74 bool "Marvell Armada 39x boards"
73 depends on ARCH_MULTI_V7 75 depends on ARCH_MULTI_V7
74 select ARM_GIC 76 select ARM_GIC
77 select ARMADA_370_XP_IRQ
75 select ARMADA_39X_CLK 78 select ARMADA_39X_CLK
76 select CACHE_L2X0 79 select CACHE_L2X0
77 select HAVE_ARM_SCU 80 select HAVE_ARM_SCU
@@ -86,6 +89,7 @@ config MACH_ARMADA_39X
86config MACH_ARMADA_XP 89config MACH_ARMADA_XP
87 bool "Marvell Armada XP boards" 90 bool "Marvell Armada XP boards"
88 depends on ARCH_MULTI_V7 91 depends on ARCH_MULTI_V7
92 select ARMADA_370_XP_IRQ
89 select ARMADA_XP_CLK 93 select ARMADA_XP_CLK
90 select CPU_PJ4B 94 select CPU_PJ4B
91 select MACH_MVEBU_V7 95 select MACH_MVEBU_V7
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index d3da79dda629..a65eacf59918 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -151,6 +151,7 @@ config BMIPS_GENERIC
151 select CSRC_R4K 151 select CSRC_R4K
152 select SYNC_R4K 152 select SYNC_R4K
153 select COMMON_CLK 153 select COMMON_CLK
154 select BCM6345_L1_IRQ
154 select BCM7038_L1_IRQ 155 select BCM7038_L1_IRQ
155 select BCM7120_L2_IRQ 156 select BCM7120_L2_IRQ
156 select BRCMSTB_L2_IRQ 157 select BRCMSTB_L2_IRQ
@@ -2169,7 +2170,6 @@ config MIPS_MT_SMP
2169 select CPU_MIPSR2_IRQ_VI 2170 select CPU_MIPSR2_IRQ_VI
2170 select CPU_MIPSR2_IRQ_EI 2171 select CPU_MIPSR2_IRQ_EI
2171 select SYNC_R4K 2172 select SYNC_R4K
2172 select MIPS_GIC_IPI if MIPS_GIC
2173 select MIPS_MT 2173 select MIPS_MT
2174 select SMP 2174 select SMP
2175 select SMP_UP 2175 select SMP_UP
@@ -2267,7 +2267,6 @@ config MIPS_VPE_APSP_API_MT
2267config MIPS_CMP 2267config MIPS_CMP
2268 bool "MIPS CMP framework support (DEPRECATED)" 2268 bool "MIPS CMP framework support (DEPRECATED)"
2269 depends on SYS_SUPPORTS_MIPS_CMP && !CPU_MIPSR6 2269 depends on SYS_SUPPORTS_MIPS_CMP && !CPU_MIPSR6
2270 select MIPS_GIC_IPI if MIPS_GIC
2271 select SMP 2270 select SMP
2272 select SYNC_R4K 2271 select SYNC_R4K
2273 select SYS_SUPPORTS_SMP 2272 select SYS_SUPPORTS_SMP
@@ -2287,7 +2286,6 @@ config MIPS_CPS
2287 select MIPS_CM 2286 select MIPS_CM
2288 select MIPS_CPC 2287 select MIPS_CPC
2289 select MIPS_CPS_PM if HOTPLUG_CPU 2288 select MIPS_CPS_PM if HOTPLUG_CPU
2290 select MIPS_GIC_IPI if MIPS_GIC
2291 select SMP 2289 select SMP
2292 select SYNC_R4K if (CEVT_R4K || CSRC_R4K) 2290 select SYNC_R4K if (CEVT_R4K || CSRC_R4K)
2293 select SYS_SUPPORTS_HOTPLUG_CPU 2291 select SYS_SUPPORTS_HOTPLUG_CPU
@@ -2305,10 +2303,6 @@ config MIPS_CPS_PM
2305 select MIPS_CPC 2303 select MIPS_CPC
2306 bool 2304 bool
2307 2305
2308config MIPS_GIC_IPI
2309 depends on MIPS_GIC
2310 bool
2311
2312config MIPS_CM 2306config MIPS_CM
2313 bool 2307 bool
2314 2308
diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c
index 511c06560dc1..2dfff1f19004 100644
--- a/arch/mips/ath79/irq.c
+++ b/arch/mips/ath79/irq.c
@@ -26,90 +26,6 @@
26#include "common.h" 26#include "common.h"
27#include "machtypes.h" 27#include "machtypes.h"
28 28
29static void __init ath79_misc_intc_domain_init(
30 struct device_node *node, int irq);
31
32static void ath79_misc_irq_handler(struct irq_desc *desc)
33{
34 struct irq_domain *domain = irq_desc_get_handler_data(desc);
35 void __iomem *base = domain->host_data;
36 u32 pending;
37
38 pending = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS) &
39 __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
40
41 if (!pending) {
42 spurious_interrupt();
43 return;
44 }
45
46 while (pending) {
47 int bit = __ffs(pending);
48
49 generic_handle_irq(irq_linear_revmap(domain, bit));
50 pending &= ~BIT(bit);
51 }
52}
53
54static void ar71xx_misc_irq_unmask(struct irq_data *d)
55{
56 void __iomem *base = irq_data_get_irq_chip_data(d);
57 unsigned int irq = d->hwirq;
58 u32 t;
59
60 t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
61 __raw_writel(t | (1 << irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE);
62
63 /* flush write */
64 __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
65}
66
67static void ar71xx_misc_irq_mask(struct irq_data *d)
68{
69 void __iomem *base = irq_data_get_irq_chip_data(d);
70 unsigned int irq = d->hwirq;
71 u32 t;
72
73 t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
74 __raw_writel(t & ~(1 << irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE);
75
76 /* flush write */
77 __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
78}
79
80static void ar724x_misc_irq_ack(struct irq_data *d)
81{
82 void __iomem *base = irq_data_get_irq_chip_data(d);
83 unsigned int irq = d->hwirq;
84 u32 t;
85
86 t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS);
87 __raw_writel(t & ~(1 << irq), base + AR71XX_RESET_REG_MISC_INT_STATUS);
88
89 /* flush write */
90 __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS);
91}
92
93static struct irq_chip ath79_misc_irq_chip = {
94 .name = "MISC",
95 .irq_unmask = ar71xx_misc_irq_unmask,
96 .irq_mask = ar71xx_misc_irq_mask,
97};
98
99static void __init ath79_misc_irq_init(void)
100{
101 if (soc_is_ar71xx() || soc_is_ar913x())
102 ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
103 else if (soc_is_ar724x() ||
104 soc_is_ar933x() ||
105 soc_is_ar934x() ||
106 soc_is_qca955x())
107 ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
108 else
109 BUG();
110
111 ath79_misc_intc_domain_init(NULL, ATH79_CPU_IRQ(6));
112}
113 29
114static void ar934x_ip2_irq_dispatch(struct irq_desc *desc) 30static void ar934x_ip2_irq_dispatch(struct irq_desc *desc)
115{ 31{
@@ -212,142 +128,12 @@ static void qca955x_irq_init(void)
212 irq_set_chained_handler(ATH79_CPU_IRQ(3), qca955x_ip3_irq_dispatch); 128 irq_set_chained_handler(ATH79_CPU_IRQ(3), qca955x_ip3_irq_dispatch);
213} 129}
214 130
215/*
216 * The IP2/IP3 lines are tied to a PCI/WMAC/USB device. Drivers for
217 * these devices typically allocate coherent DMA memory, however the
218 * DMA controller may still have some unsynchronized data in the FIFO.
219 * Issue a flush in the handlers to ensure that the driver sees
220 * the update.
221 *
222 * This array map the interrupt lines to the DDR write buffer channels.
223 */
224
225static unsigned irq_wb_chan[8] = {
226 -1, -1, -1, -1, -1, -1, -1, -1,
227};
228
229asmlinkage void plat_irq_dispatch(void)
230{
231 unsigned long pending;
232 int irq;
233
234 pending = read_c0_status() & read_c0_cause() & ST0_IM;
235
236 if (!pending) {
237 spurious_interrupt();
238 return;
239 }
240
241 pending >>= CAUSEB_IP;
242 while (pending) {
243 irq = fls(pending) - 1;
244 if (irq < ARRAY_SIZE(irq_wb_chan) && irq_wb_chan[irq] != -1)
245 ath79_ddr_wb_flush(irq_wb_chan[irq]);
246 do_IRQ(MIPS_CPU_IRQ_BASE + irq);
247 pending &= ~BIT(irq);
248 }
249}
250
251static int misc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
252{
253 irq_set_chip_and_handler(irq, &ath79_misc_irq_chip, handle_level_irq);
254 irq_set_chip_data(irq, d->host_data);
255 return 0;
256}
257
258static const struct irq_domain_ops misc_irq_domain_ops = {
259 .xlate = irq_domain_xlate_onecell,
260 .map = misc_map,
261};
262
263static void __init ath79_misc_intc_domain_init(
264 struct device_node *node, int irq)
265{
266 void __iomem *base = ath79_reset_base;
267 struct irq_domain *domain;
268
269 domain = irq_domain_add_legacy(node, ATH79_MISC_IRQ_COUNT,
270 ATH79_MISC_IRQ_BASE, 0, &misc_irq_domain_ops, base);
271 if (!domain)
272 panic("Failed to add MISC irqdomain");
273
274 /* Disable and clear all interrupts */
275 __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE);
276 __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS);
277
278 irq_set_chained_handler_and_data(irq, ath79_misc_irq_handler, domain);
279}
280
281static int __init ath79_misc_intc_of_init(
282 struct device_node *node, struct device_node *parent)
283{
284 int irq;
285
286 irq = irq_of_parse_and_map(node, 0);
287 if (!irq)
288 panic("Failed to get MISC IRQ");
289
290 ath79_misc_intc_domain_init(node, irq);
291 return 0;
292}
293
294static int __init ar7100_misc_intc_of_init(
295 struct device_node *node, struct device_node *parent)
296{
297 ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
298 return ath79_misc_intc_of_init(node, parent);
299}
300
301IRQCHIP_DECLARE(ar7100_misc_intc, "qca,ar7100-misc-intc",
302 ar7100_misc_intc_of_init);
303
304static int __init ar7240_misc_intc_of_init(
305 struct device_node *node, struct device_node *parent)
306{
307 ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
308 return ath79_misc_intc_of_init(node, parent);
309}
310
311IRQCHIP_DECLARE(ar7240_misc_intc, "qca,ar7240-misc-intc",
312 ar7240_misc_intc_of_init);
313
314static int __init ar79_cpu_intc_of_init(
315 struct device_node *node, struct device_node *parent)
316{
317 int err, i, count;
318
319 /* Fill the irq_wb_chan table */
320 count = of_count_phandle_with_args(
321 node, "qca,ddr-wb-channels", "#qca,ddr-wb-channel-cells");
322
323 for (i = 0; i < count; i++) {
324 struct of_phandle_args args;
325 u32 irq = i;
326
327 of_property_read_u32_index(
328 node, "qca,ddr-wb-channel-interrupts", i, &irq);
329 if (irq >= ARRAY_SIZE(irq_wb_chan))
330 continue;
331
332 err = of_parse_phandle_with_args(
333 node, "qca,ddr-wb-channels",
334 "#qca,ddr-wb-channel-cells",
335 i, &args);
336 if (err)
337 return err;
338
339 irq_wb_chan[irq] = args.args[0];
340 pr_info("IRQ: Set flush channel of IRQ%d to %d\n",
341 irq, args.args[0]);
342 }
343
344 return mips_cpu_irq_of_init(node, parent);
345}
346IRQCHIP_DECLARE(ar79_cpu_intc, "qca,ar7100-cpu-intc",
347 ar79_cpu_intc_of_init);
348
349void __init arch_init_irq(void) 131void __init arch_init_irq(void)
350{ 132{
133 unsigned irq_wb_chan2 = -1;
134 unsigned irq_wb_chan3 = -1;
135 bool misc_is_ar71xx;
136
351 if (mips_machtype == ATH79_MACH_GENERIC_OF) { 137 if (mips_machtype == ATH79_MACH_GENERIC_OF) {
352 irqchip_init(); 138 irqchip_init();
353 return; 139 return;
@@ -355,14 +141,26 @@ void __init arch_init_irq(void)
355 141
356 if (soc_is_ar71xx() || soc_is_ar724x() || 142 if (soc_is_ar71xx() || soc_is_ar724x() ||
357 soc_is_ar913x() || soc_is_ar933x()) { 143 soc_is_ar913x() || soc_is_ar933x()) {
358 irq_wb_chan[2] = 3; 144 irq_wb_chan2 = 3;
359 irq_wb_chan[3] = 2; 145 irq_wb_chan3 = 2;
360 } else if (soc_is_ar934x()) { 146 } else if (soc_is_ar934x()) {
361 irq_wb_chan[3] = 2; 147 irq_wb_chan3 = 2;
362 } 148 }
363 149
364 mips_cpu_irq_init(); 150 ath79_cpu_irq_init(irq_wb_chan2, irq_wb_chan3);
365 ath79_misc_irq_init(); 151
152 if (soc_is_ar71xx() || soc_is_ar913x())
153 misc_is_ar71xx = true;
154 else if (soc_is_ar724x() ||
155 soc_is_ar933x() ||
156 soc_is_ar934x() ||
157 soc_is_qca955x())
158 misc_is_ar71xx = false;
159 else
160 BUG();
161 ath79_misc_irq_init(
162 ath79_reset_base + AR71XX_RESET_REG_MISC_INT_STATUS,
163 ATH79_CPU_IRQ(6), ATH79_MISC_IRQ_BASE, misc_is_ar71xx);
366 164
367 if (soc_is_ar934x()) 165 if (soc_is_ar934x())
368 ar934x_ip2_irq_init(); 166 ar934x_ip2_irq_init();
diff --git a/arch/mips/bmips/irq.c b/arch/mips/bmips/irq.c
index e7fc6f9348ba..7efefcf44033 100644
--- a/arch/mips/bmips/irq.c
+++ b/arch/mips/bmips/irq.c
@@ -15,6 +15,12 @@
15#include <asm/irq_cpu.h> 15#include <asm/irq_cpu.h>
16#include <asm/time.h> 16#include <asm/time.h>
17 17
18static const struct of_device_id smp_intc_dt_match[] = {
19 { .compatible = "brcm,bcm7038-l1-intc" },
20 { .compatible = "brcm,bcm6345-l1-intc" },
21 {}
22};
23
18unsigned int get_c0_compare_int(void) 24unsigned int get_c0_compare_int(void)
19{ 25{
20 return CP0_LEGACY_COMPARE_IRQ; 26 return CP0_LEGACY_COMPARE_IRQ;
@@ -24,8 +30,8 @@ void __init arch_init_irq(void)
24{ 30{
25 struct device_node *dn; 31 struct device_node *dn;
26 32
27 /* Only the STB (bcm7038) controller supports SMP IRQ affinity */ 33 /* Only these controllers support SMP IRQ affinity */
28 dn = of_find_compatible_node(NULL, NULL, "brcm,bcm7038-l1-intc"); 34 dn = of_find_matching_node(NULL, smp_intc_dt_match);
29 if (dn) 35 if (dn)
30 of_node_put(dn); 36 of_node_put(dn);
31 else 37 else
diff --git a/arch/mips/include/asm/mach-ath79/ath79.h b/arch/mips/include/asm/mach-ath79/ath79.h
index 2b3487213d1e..441faa92c3cd 100644
--- a/arch/mips/include/asm/mach-ath79/ath79.h
+++ b/arch/mips/include/asm/mach-ath79/ath79.h
@@ -144,4 +144,8 @@ static inline u32 ath79_reset_rr(unsigned reg)
144void ath79_device_reset_set(u32 mask); 144void ath79_device_reset_set(u32 mask);
145void ath79_device_reset_clear(u32 mask); 145void ath79_device_reset_clear(u32 mask);
146 146
147void ath79_cpu_irq_init(unsigned irq_wb_chan2, unsigned irq_wb_chan3);
148void ath79_misc_irq_init(void __iomem *regs, int irq,
149 int irq_base, bool is_ar71xx);
150
147#endif /* __ASM_MACH_ATH79_H */ 151#endif /* __ASM_MACH_ATH79_H */
diff --git a/arch/mips/include/asm/smp-ops.h b/arch/mips/include/asm/smp-ops.h
index 6ba1fb8b11e2..db7c322f057f 100644
--- a/arch/mips/include/asm/smp-ops.h
+++ b/arch/mips/include/asm/smp-ops.h
@@ -44,8 +44,9 @@ static inline void plat_smp_setup(void)
44 mp_ops->smp_setup(); 44 mp_ops->smp_setup();
45} 45}
46 46
47extern void gic_send_ipi_single(int cpu, unsigned int action); 47extern void mips_smp_send_ipi_single(int cpu, unsigned int action);
48extern void gic_send_ipi_mask(const struct cpumask *mask, unsigned int action); 48extern void mips_smp_send_ipi_mask(const struct cpumask *mask,
49 unsigned int action);
49 50
50#else /* !CONFIG_SMP */ 51#else /* !CONFIG_SMP */
51 52
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 68e2b7db9348..b0988fd62fcc 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -52,7 +52,6 @@ obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o
52obj-$(CONFIG_MIPS_CMP) += smp-cmp.o 52obj-$(CONFIG_MIPS_CMP) += smp-cmp.o
53obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o 53obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o
54obj-$(CONFIG_MIPS_CPS_NS16550) += cps-vec-ns16550.o 54obj-$(CONFIG_MIPS_CPS_NS16550) += cps-vec-ns16550.o
55obj-$(CONFIG_MIPS_GIC_IPI) += smp-gic.o
56obj-$(CONFIG_MIPS_SPRAM) += spram.o 55obj-$(CONFIG_MIPS_SPRAM) += spram.o
57 56
58obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o 57obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index d5e0f949dc48..76923349b4fe 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -149,8 +149,8 @@ void __init cmp_prepare_cpus(unsigned int max_cpus)
149} 149}
150 150
151struct plat_smp_ops cmp_smp_ops = { 151struct plat_smp_ops cmp_smp_ops = {
152 .send_ipi_single = gic_send_ipi_single, 152 .send_ipi_single = mips_smp_send_ipi_single,
153 .send_ipi_mask = gic_send_ipi_mask, 153 .send_ipi_mask = mips_smp_send_ipi_mask,
154 .init_secondary = cmp_init_secondary, 154 .init_secondary = cmp_init_secondary,
155 .smp_finish = cmp_smp_finish, 155 .smp_finish = cmp_smp_finish,
156 .boot_secondary = cmp_boot_secondary, 156 .boot_secondary = cmp_boot_secondary,
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index 2ad4e4c96d61..253e1409338c 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -472,8 +472,8 @@ static struct plat_smp_ops cps_smp_ops = {
472 .boot_secondary = cps_boot_secondary, 472 .boot_secondary = cps_boot_secondary,
473 .init_secondary = cps_init_secondary, 473 .init_secondary = cps_init_secondary,
474 .smp_finish = cps_smp_finish, 474 .smp_finish = cps_smp_finish,
475 .send_ipi_single = gic_send_ipi_single, 475 .send_ipi_single = mips_smp_send_ipi_single,
476 .send_ipi_mask = gic_send_ipi_mask, 476 .send_ipi_mask = mips_smp_send_ipi_mask,
477#ifdef CONFIG_HOTPLUG_CPU 477#ifdef CONFIG_HOTPLUG_CPU
478 .cpu_disable = cps_cpu_disable, 478 .cpu_disable = cps_cpu_disable,
479 .cpu_die = cps_cpu_die, 479 .cpu_die = cps_cpu_die,
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 86311a164ef1..4f9570a57e8d 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -121,7 +121,7 @@ static void vsmp_send_ipi_single(int cpu, unsigned int action)
121 121
122#ifdef CONFIG_MIPS_GIC 122#ifdef CONFIG_MIPS_GIC
123 if (gic_present) { 123 if (gic_present) {
124 gic_send_ipi_single(cpu, action); 124 mips_smp_send_ipi_single(cpu, action);
125 return; 125 return;
126 } 126 }
127#endif 127#endif
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 2b521e07b860..8b687fee0cb0 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -33,12 +33,16 @@
33#include <linux/cpu.h> 33#include <linux/cpu.h>
34#include <linux/err.h> 34#include <linux/err.h>
35#include <linux/ftrace.h> 35#include <linux/ftrace.h>
36#include <linux/irqdomain.h>
37#include <linux/of.h>
38#include <linux/of_irq.h>
36 39
37#include <linux/atomic.h> 40#include <linux/atomic.h>
38#include <asm/cpu.h> 41#include <asm/cpu.h>
39#include <asm/processor.h> 42#include <asm/processor.h>
40#include <asm/idle.h> 43#include <asm/idle.h>
41#include <asm/r4k-timer.h> 44#include <asm/r4k-timer.h>
45#include <asm/mips-cpc.h>
42#include <asm/mmu_context.h> 46#include <asm/mmu_context.h>
43#include <asm/time.h> 47#include <asm/time.h>
44#include <asm/setup.h> 48#include <asm/setup.h>
@@ -79,6 +83,11 @@ static cpumask_t cpu_core_setup_map;
79 83
80cpumask_t cpu_coherent_mask; 84cpumask_t cpu_coherent_mask;
81 85
86#ifdef CONFIG_GENERIC_IRQ_IPI
87static struct irq_desc *call_desc;
88static struct irq_desc *sched_desc;
89#endif
90
82static inline void set_cpu_sibling_map(int cpu) 91static inline void set_cpu_sibling_map(int cpu)
83{ 92{
84 int i; 93 int i;
@@ -146,6 +155,133 @@ void register_smp_ops(struct plat_smp_ops *ops)
146 mp_ops = ops; 155 mp_ops = ops;
147} 156}
148 157
158#ifdef CONFIG_GENERIC_IRQ_IPI
159void mips_smp_send_ipi_single(int cpu, unsigned int action)
160{
161 mips_smp_send_ipi_mask(cpumask_of(cpu), action);
162}
163
164void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
165{
166 unsigned long flags;
167 unsigned int core;
168 int cpu;
169
170 local_irq_save(flags);
171
172 switch (action) {
173 case SMP_CALL_FUNCTION:
174 __ipi_send_mask(call_desc, mask);
175 break;
176
177 case SMP_RESCHEDULE_YOURSELF:
178 __ipi_send_mask(sched_desc, mask);
179 break;
180
181 default:
182 BUG();
183 }
184
185 if (mips_cpc_present()) {
186 for_each_cpu(cpu, mask) {
187 core = cpu_data[cpu].core;
188
189 if (core == current_cpu_data.core)
190 continue;
191
192 while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
193 mips_cpc_lock_other(core);
194 write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
195 mips_cpc_unlock_other();
196 }
197 }
198 }
199
200 local_irq_restore(flags);
201}
202
203
204static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
205{
206 scheduler_ipi();
207
208 return IRQ_HANDLED;
209}
210
211static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
212{
213 generic_smp_call_function_interrupt();
214
215 return IRQ_HANDLED;
216}
217
218static struct irqaction irq_resched = {
219 .handler = ipi_resched_interrupt,
220 .flags = IRQF_PERCPU,
221 .name = "IPI resched"
222};
223
224static struct irqaction irq_call = {
225 .handler = ipi_call_interrupt,
226 .flags = IRQF_PERCPU,
227 .name = "IPI call"
228};
229
230static __init void smp_ipi_init_one(unsigned int virq,
231 struct irqaction *action)
232{
233 int ret;
234
235 irq_set_handler(virq, handle_percpu_irq);
236 ret = setup_irq(virq, action);
237 BUG_ON(ret);
238}
239
240static int __init mips_smp_ipi_init(void)
241{
242 unsigned int call_virq, sched_virq;
243 struct irq_domain *ipidomain;
244 struct device_node *node;
245
246 node = of_irq_find_parent(of_root);
247 ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
248
249 /*
250 * Some platforms have half DT setup. So if we found irq node but
251 * didn't find an ipidomain, try to search for one that is not in the
252 * DT.
253 */
254 if (node && !ipidomain)
255 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
256
257 BUG_ON(!ipidomain);
258
259 call_virq = irq_reserve_ipi(ipidomain, cpu_possible_mask);
260 BUG_ON(!call_virq);
261
262 sched_virq = irq_reserve_ipi(ipidomain, cpu_possible_mask);
263 BUG_ON(!sched_virq);
264
265 if (irq_domain_is_ipi_per_cpu(ipidomain)) {
266 int cpu;
267
268 for_each_cpu(cpu, cpu_possible_mask) {
269 smp_ipi_init_one(call_virq + cpu, &irq_call);
270 smp_ipi_init_one(sched_virq + cpu, &irq_resched);
271 }
272 } else {
273 smp_ipi_init_one(call_virq, &irq_call);
274 smp_ipi_init_one(sched_virq, &irq_resched);
275 }
276
277 call_desc = irq_to_desc(call_virq);
278 sched_desc = irq_to_desc(sched_virq);
279
280 return 0;
281}
282early_initcall(mips_smp_ipi_init);
283#endif
284
149/* 285/*
150 * First C code run on the secondary CPUs after being started up by 286 * First C code run on the secondary CPUs after being started up by
151 * the master. 287 * the master.
diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h
index cfc9a0d2d07c..a4fe16e42b7b 100644
--- a/arch/x86/include/asm/ipi.h
+++ b/arch/x86/include/asm/ipi.h
@@ -57,67 +57,13 @@ static inline void __xapic_wait_icr_idle(void)
57 cpu_relax(); 57 cpu_relax();
58} 58}
59 59
60static inline void 60void __default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest);
61__default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
62{
63 /*
64 * Subtle. In the case of the 'never do double writes' workaround
65 * we have to lock out interrupts to be safe. As we don't care
66 * of the value read we use an atomic rmw access to avoid costly
67 * cli/sti. Otherwise we use an even cheaper single atomic write
68 * to the APIC.
69 */
70 unsigned int cfg;
71
72 /*
73 * Wait for idle.
74 */
75 __xapic_wait_icr_idle();
76
77 /*
78 * No need to touch the target chip field
79 */
80 cfg = __prepare_ICR(shortcut, vector, dest);
81
82 /*
83 * Send the IPI. The write to APIC_ICR fires this off.
84 */
85 native_apic_mem_write(APIC_ICR, cfg);
86}
87 61
88/* 62/*
89 * This is used to send an IPI with no shorthand notation (the destination is 63 * This is used to send an IPI with no shorthand notation (the destination is
90 * specified in bits 56 to 63 of the ICR). 64 * specified in bits 56 to 63 of the ICR).
91 */ 65 */
92static inline void 66void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest);
93 __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
94{
95 unsigned long cfg;
96
97 /*
98 * Wait for idle.
99 */
100 if (unlikely(vector == NMI_VECTOR))
101 safe_apic_wait_icr_idle();
102 else
103 __xapic_wait_icr_idle();
104
105 /*
106 * prepare target chip field
107 */
108 cfg = __prepare_ICR2(mask);
109 native_apic_mem_write(APIC_ICR2, cfg);
110
111 /*
112 * program the ICR
113 */
114 cfg = __prepare_ICR(0, vector, dest);
115
116 /*
117 * Send the IPI. The write to APIC_ICR fires this off.
118 */
119 native_apic_mem_write(APIC_ICR, cfg);
120}
121 67
122extern void default_send_IPI_single(int cpu, int vector); 68extern void default_send_IPI_single(int cpu, int vector);
123extern void default_send_IPI_single_phys(int cpu, int vector); 69extern void default_send_IPI_single_phys(int cpu, int vector);
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index 9968f30cca3e..76f89e2b245a 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -53,7 +53,7 @@ void flat_init_apic_ldr(void)
53 apic_write(APIC_LDR, val); 53 apic_write(APIC_LDR, val);
54} 54}
55 55
56static inline void _flat_send_IPI_mask(unsigned long mask, int vector) 56static void _flat_send_IPI_mask(unsigned long mask, int vector)
57{ 57{
58 unsigned long flags; 58 unsigned long flags;
59 59
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c
index eb45fc9b6124..28bde88b0085 100644
--- a/arch/x86/kernel/apic/ipi.c
+++ b/arch/x86/kernel/apic/ipi.c
@@ -18,6 +18,66 @@
18#include <asm/proto.h> 18#include <asm/proto.h>
19#include <asm/ipi.h> 19#include <asm/ipi.h>
20 20
21void __default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
22{
23 /*
24 * Subtle. In the case of the 'never do double writes' workaround
25 * we have to lock out interrupts to be safe. As we don't care
26 * of the value read we use an atomic rmw access to avoid costly
27 * cli/sti. Otherwise we use an even cheaper single atomic write
28 * to the APIC.
29 */
30 unsigned int cfg;
31
32 /*
33 * Wait for idle.
34 */
35 __xapic_wait_icr_idle();
36
37 /*
38 * No need to touch the target chip field
39 */
40 cfg = __prepare_ICR(shortcut, vector, dest);
41
42 /*
43 * Send the IPI. The write to APIC_ICR fires this off.
44 */
45 native_apic_mem_write(APIC_ICR, cfg);
46}
47
48/*
49 * This is used to send an IPI with no shorthand notation (the destination is
50 * specified in bits 56 to 63 of the ICR).
51 */
52void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
53{
54 unsigned long cfg;
55
56 /*
57 * Wait for idle.
58 */
59 if (unlikely(vector == NMI_VECTOR))
60 safe_apic_wait_icr_idle();
61 else
62 __xapic_wait_icr_idle();
63
64 /*
65 * prepare target chip field
66 */
67 cfg = __prepare_ICR2(mask);
68 native_apic_mem_write(APIC_ICR2, cfg);
69
70 /*
71 * program the ICR
72 */
73 cfg = __prepare_ICR(0, vector, dest);
74
75 /*
76 * Send the IPI. The write to APIC_ICR fires this off.
77 */
78 native_apic_mem_write(APIC_ICR, cfg);
79}
80
21void default_send_IPI_single_phys(int cpu, int vector) 81void default_send_IPI_single_phys(int cpu, int vector)
22{ 82{
23 unsigned long flags; 83 unsigned long flags;
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index fb50911b3940..7e8c441ff2de 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -60,6 +60,17 @@ config ARM_VIC_NR
60 The maximum number of VICs available in the system, for 60 The maximum number of VICs available in the system, for
61 power management. 61 power management.
62 62
63config ARMADA_370_XP_IRQ
64 bool
65 select GENERIC_IRQ_CHIP
66 select PCI_MSI_IRQ_DOMAIN if PCI_MSI
67
68config ALPINE_MSI
69 bool
70 depends on PCI && PCI_MSI
71 select GENERIC_IRQ_CHIP
72 select PCI_MSI_IRQ_DOMAIN
73
63config ATMEL_AIC_IRQ 74config ATMEL_AIC_IRQ
64 bool 75 bool
65 select GENERIC_IRQ_CHIP 76 select GENERIC_IRQ_CHIP
@@ -78,6 +89,11 @@ config I8259
78 bool 89 bool
79 select IRQ_DOMAIN 90 select IRQ_DOMAIN
80 91
92config BCM6345_L1_IRQ
93 bool
94 select GENERIC_IRQ_CHIP
95 select IRQ_DOMAIN
96
81config BCM7038_L1_IRQ 97config BCM7038_L1_IRQ
82 bool 98 bool
83 select GENERIC_IRQ_CHIP 99 select GENERIC_IRQ_CHIP
@@ -151,6 +167,11 @@ config ST_IRQCHIP
151 help 167 help
152 Enables SysCfg Controlled IRQs on STi based platforms. 168 Enables SysCfg Controlled IRQs on STi based platforms.
153 169
170config TANGO_IRQ
171 bool
172 select IRQ_DOMAIN
173 select GENERIC_IRQ_CHIP
174
154config TB10X_IRQC 175config TB10X_IRQC
155 bool 176 bool
156 select IRQ_DOMAIN 177 select IRQ_DOMAIN
@@ -160,6 +181,7 @@ config TS4800_IRQ
160 tristate "TS-4800 IRQ controller" 181 tristate "TS-4800 IRQ controller"
161 select IRQ_DOMAIN 182 select IRQ_DOMAIN
162 depends on HAS_IOMEM 183 depends on HAS_IOMEM
184 depends on SOC_IMX51 || COMPILE_TEST
163 help 185 help
164 Support for the TS-4800 FPGA IRQ controller 186 Support for the TS-4800 FPGA IRQ controller
165 187
@@ -193,6 +215,8 @@ config KEYSTONE_IRQ
193 215
194config MIPS_GIC 216config MIPS_GIC
195 bool 217 bool
218 select GENERIC_IRQ_IPI
219 select IRQ_DOMAIN_HIERARCHY
196 select MIPS_CM 220 select MIPS_CM
197 221
198config INGENIC_IRQ 222config INGENIC_IRQ
@@ -218,3 +242,7 @@ config IRQ_MXS
218 def_bool y if MACH_ASM9260 || ARCH_MXS 242 def_bool y if MACH_ASM9260 || ARCH_MXS
219 select IRQ_DOMAIN 243 select IRQ_DOMAIN
220 select STMP_DEVICE 244 select STMP_DEVICE
245
246config MVEBU_ODMI
247 bool
248 select GENERIC_MSI_IRQ_DOMAIN
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 18caacb60d58..b03cfcbbac6b 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -1,11 +1,13 @@
1obj-$(CONFIG_IRQCHIP) += irqchip.o 1obj-$(CONFIG_IRQCHIP) += irqchip.o
2 2
3obj-$(CONFIG_ALPINE_MSI) += irq-alpine-msi.o
4obj-$(CONFIG_ATH79) += irq-ath79-cpu.o
5obj-$(CONFIG_ATH79) += irq-ath79-misc.o
3obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o 6obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
4obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o 7obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o
5obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o 8obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o
6obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o 9obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o
7obj-$(CONFIG_ARCH_MMP) += irq-mmp.o 10obj-$(CONFIG_ARCH_MMP) += irq-mmp.o
8obj-$(CONFIG_ARCH_MVEBU) += irq-armada-370-xp.o
9obj-$(CONFIG_IRQ_MXS) += irq-mxs.o 11obj-$(CONFIG_IRQ_MXS) += irq-mxs.o
10obj-$(CONFIG_ARCH_TEGRA) += irq-tegra.o 12obj-$(CONFIG_ARCH_TEGRA) += irq-tegra.o
11obj-$(CONFIG_ARCH_S3C24XX) += irq-s3c24xx.o 13obj-$(CONFIG_ARCH_S3C24XX) += irq-s3c24xx.o
@@ -28,6 +30,7 @@ obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-g
28obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o 30obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o
29obj-$(CONFIG_ARM_NVIC) += irq-nvic.o 31obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
30obj-$(CONFIG_ARM_VIC) += irq-vic.o 32obj-$(CONFIG_ARM_VIC) += irq-vic.o
33obj-$(CONFIG_ARMADA_370_XP_IRQ) += irq-armada-370-xp.o
31obj-$(CONFIG_ATMEL_AIC_IRQ) += irq-atmel-aic-common.o irq-atmel-aic.o 34obj-$(CONFIG_ATMEL_AIC_IRQ) += irq-atmel-aic-common.o irq-atmel-aic.o
32obj-$(CONFIG_ATMEL_AIC5_IRQ) += irq-atmel-aic-common.o irq-atmel-aic5.o 35obj-$(CONFIG_ATMEL_AIC5_IRQ) += irq-atmel-aic-common.o irq-atmel-aic5.o
33obj-$(CONFIG_I8259) += irq-i8259.o 36obj-$(CONFIG_I8259) += irq-i8259.o
@@ -40,12 +43,14 @@ obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
40obj-$(CONFIG_ARCH_NSPIRE) += irq-zevio.o 43obj-$(CONFIG_ARCH_NSPIRE) += irq-zevio.o
41obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o 44obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o
42obj-$(CONFIG_ST_IRQCHIP) += irq-st.o 45obj-$(CONFIG_ST_IRQCHIP) += irq-st.o
46obj-$(CONFIG_TANGO_IRQ) += irq-tango.o
43obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o 47obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o
44obj-$(CONFIG_TS4800_IRQ) += irq-ts4800.o 48obj-$(CONFIG_TS4800_IRQ) += irq-ts4800.o
45obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o 49obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o
46obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o 50obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o
47obj-$(CONFIG_IRQ_CROSSBAR) += irq-crossbar.o 51obj-$(CONFIG_IRQ_CROSSBAR) += irq-crossbar.o
48obj-$(CONFIG_SOC_VF610) += irq-vf610-mscm-ir.o 52obj-$(CONFIG_SOC_VF610) += irq-vf610-mscm-ir.o
53obj-$(CONFIG_BCM6345_L1_IRQ) += irq-bcm6345-l1.o
49obj-$(CONFIG_BCM7038_L1_IRQ) += irq-bcm7038-l1.o 54obj-$(CONFIG_BCM7038_L1_IRQ) += irq-bcm7038-l1.o
50obj-$(CONFIG_BCM7120_L2_IRQ) += irq-bcm7120-l2.o 55obj-$(CONFIG_BCM7120_L2_IRQ) += irq-bcm7120-l2.o
51obj-$(CONFIG_BRCMSTB_L2_IRQ) += irq-brcmstb-l2.o 56obj-$(CONFIG_BRCMSTB_L2_IRQ) += irq-brcmstb-l2.o
@@ -59,3 +64,4 @@ obj-$(CONFIG_ARCH_SA1100) += irq-sa11x0.o
59obj-$(CONFIG_INGENIC_IRQ) += irq-ingenic.o 64obj-$(CONFIG_INGENIC_IRQ) += irq-ingenic.o
60obj-$(CONFIG_IMX_GPCV2) += irq-imx-gpcv2.o 65obj-$(CONFIG_IMX_GPCV2) += irq-imx-gpcv2.o
61obj-$(CONFIG_PIC32_EVIC) += irq-pic32-evic.o 66obj-$(CONFIG_PIC32_EVIC) += irq-pic32-evic.o
67obj-$(CONFIG_MVEBU_ODMI) += irq-mvebu-odmi.o
diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c
new file mode 100644
index 000000000000..25384255b30f
--- /dev/null
+++ b/drivers/irqchip/irq-alpine-msi.c
@@ -0,0 +1,293 @@
1/*
2 * Annapurna Labs MSIX support services
3 *
4 * Copyright (C) 2016, Amazon.com, Inc. or its affiliates. All Rights Reserved.
5 *
6 * Antoine Tenart <antoine.tenart@free-electrons.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/irqchip.h>
16#include <linux/irqchip/arm-gic.h>
17#include <linux/msi.h>
18#include <linux/of.h>
19#include <linux/of_address.h>
20#include <linux/of_irq.h>
21#include <linux/of_pci.h>
22#include <linux/pci.h>
23#include <linux/slab.h>
24
25#include <asm/irq.h>
26#include <asm-generic/msi.h>
27
28/* MSIX message address format: local GIC target */
29#define ALPINE_MSIX_SPI_TARGET_CLUSTER0 BIT(16)
30
31struct alpine_msix_data {
32 spinlock_t msi_map_lock;
33 phys_addr_t addr;
34 u32 spi_first; /* The SGI number that MSIs start */
35 u32 num_spis; /* The number of SGIs for MSIs */
36 unsigned long *msi_map;
37};
38
39static void alpine_msix_mask_msi_irq(struct irq_data *d)
40{
41 pci_msi_mask_irq(d);
42 irq_chip_mask_parent(d);
43}
44
45static void alpine_msix_unmask_msi_irq(struct irq_data *d)
46{
47 pci_msi_unmask_irq(d);
48 irq_chip_unmask_parent(d);
49}
50
51static struct irq_chip alpine_msix_irq_chip = {
52 .name = "MSIx",
53 .irq_mask = alpine_msix_mask_msi_irq,
54 .irq_unmask = alpine_msix_unmask_msi_irq,
55 .irq_eoi = irq_chip_eoi_parent,
56 .irq_set_affinity = irq_chip_set_affinity_parent,
57};
58
59static int alpine_msix_allocate_sgi(struct alpine_msix_data *priv, int num_req)
60{
61 int first;
62
63 spin_lock(&priv->msi_map_lock);
64
65 first = bitmap_find_next_zero_area(priv->msi_map, priv->num_spis, 0,
66 num_req, 0);
67 if (first >= priv->num_spis) {
68 spin_unlock(&priv->msi_map_lock);
69 return -ENOSPC;
70 }
71
72 bitmap_set(priv->msi_map, first, num_req);
73
74 spin_unlock(&priv->msi_map_lock);
75
76 return priv->spi_first + first;
77}
78
79static void alpine_msix_free_sgi(struct alpine_msix_data *priv, unsigned sgi,
80 int num_req)
81{
82 int first = sgi - priv->spi_first;
83
84 spin_lock(&priv->msi_map_lock);
85
86 bitmap_clear(priv->msi_map, first, num_req);
87
88 spin_unlock(&priv->msi_map_lock);
89}
90
91static void alpine_msix_compose_msi_msg(struct irq_data *data,
92 struct msi_msg *msg)
93{
94 struct alpine_msix_data *priv = irq_data_get_irq_chip_data(data);
95 phys_addr_t msg_addr = priv->addr;
96
97 msg_addr |= (data->hwirq << 3);
98
99 msg->address_hi = upper_32_bits(msg_addr);
100 msg->address_lo = lower_32_bits(msg_addr);
101 msg->data = 0;
102}
103
104static struct msi_domain_info alpine_msix_domain_info = {
105 .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
106 MSI_FLAG_PCI_MSIX,
107 .chip = &alpine_msix_irq_chip,
108};
109
110static struct irq_chip middle_irq_chip = {
111 .name = "alpine_msix_middle",
112 .irq_mask = irq_chip_mask_parent,
113 .irq_unmask = irq_chip_unmask_parent,
114 .irq_eoi = irq_chip_eoi_parent,
115 .irq_set_affinity = irq_chip_set_affinity_parent,
116 .irq_compose_msi_msg = alpine_msix_compose_msi_msg,
117};
118
119static int alpine_msix_gic_domain_alloc(struct irq_domain *domain,
120 unsigned int virq, int sgi)
121{
122 struct irq_fwspec fwspec;
123 struct irq_data *d;
124 int ret;
125
126 if (!is_of_node(domain->parent->fwnode))
127 return -EINVAL;
128
129 fwspec.fwnode = domain->parent->fwnode;
130 fwspec.param_count = 3;
131 fwspec.param[0] = 0;
132 fwspec.param[1] = sgi;
133 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
134
135 ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
136 if (ret)
137 return ret;
138
139 d = irq_domain_get_irq_data(domain->parent, virq);
140 d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
141
142 return 0;
143}
144
145static int alpine_msix_middle_domain_alloc(struct irq_domain *domain,
146 unsigned int virq,
147 unsigned int nr_irqs, void *args)
148{
149 struct alpine_msix_data *priv = domain->host_data;
150 int sgi, err, i;
151
152 sgi = alpine_msix_allocate_sgi(priv, nr_irqs);
153 if (sgi < 0)
154 return sgi;
155
156 for (i = 0; i < nr_irqs; i++) {
157 err = alpine_msix_gic_domain_alloc(domain, virq + i, sgi + i);
158 if (err)
159 goto err_sgi;
160
161 irq_domain_set_hwirq_and_chip(domain, virq + i, sgi + i,
162 &middle_irq_chip, priv);
163 }
164
165 return 0;
166
167err_sgi:
168 while (--i >= 0)
169 irq_domain_free_irqs_parent(domain, virq, i);
170 alpine_msix_free_sgi(priv, sgi, nr_irqs);
171 return err;
172}
173
174static void alpine_msix_middle_domain_free(struct irq_domain *domain,
175 unsigned int virq,
176 unsigned int nr_irqs)
177{
178 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
179 struct alpine_msix_data *priv = irq_data_get_irq_chip_data(d);
180
181 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
182 alpine_msix_free_sgi(priv, d->hwirq, nr_irqs);
183}
184
185static const struct irq_domain_ops alpine_msix_middle_domain_ops = {
186 .alloc = alpine_msix_middle_domain_alloc,
187 .free = alpine_msix_middle_domain_free,
188};
189
190static int alpine_msix_init_domains(struct alpine_msix_data *priv,
191 struct device_node *node)
192{
193 struct irq_domain *middle_domain, *msi_domain, *gic_domain;
194 struct device_node *gic_node;
195
196 gic_node = of_irq_find_parent(node);
197 if (!gic_node) {
198 pr_err("Failed to find the GIC node\n");
199 return -ENODEV;
200 }
201
202 gic_domain = irq_find_host(gic_node);
203 if (!gic_domain) {
204 pr_err("Failed to find the GIC domain\n");
205 return -ENXIO;
206 }
207
208 middle_domain = irq_domain_add_tree(NULL,
209 &alpine_msix_middle_domain_ops,
210 priv);
211 if (!middle_domain) {
212 pr_err("Failed to create the MSIX middle domain\n");
213 return -ENOMEM;
214 }
215
216 middle_domain->parent = gic_domain;
217
218 msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
219 &alpine_msix_domain_info,
220 middle_domain);
221 if (!msi_domain) {
222 pr_err("Failed to create MSI domain\n");
223 irq_domain_remove(middle_domain);
224 return -ENOMEM;
225 }
226
227 return 0;
228}
229
230static int alpine_msix_init(struct device_node *node,
231 struct device_node *parent)
232{
233 struct alpine_msix_data *priv;
234 struct resource res;
235 int ret;
236
237 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
238 if (!priv)
239 return -ENOMEM;
240
241 spin_lock_init(&priv->msi_map_lock);
242
243 ret = of_address_to_resource(node, 0, &res);
244 if (ret) {
245 pr_err("Failed to allocate resource\n");
246 goto err_priv;
247 }
248
249 /*
250 * The 20 least significant bits of addr provide direct information
251 * regarding the interrupt destination.
252 *
253 * To select the primary GIC as the target GIC, bits [18:17] must be set
254 * to 0x0. In this case, bit 16 (SPI_TARGET_CLUSTER0) must be set.
255 */
256 priv->addr = res.start & GENMASK_ULL(63,20);
257 priv->addr |= ALPINE_MSIX_SPI_TARGET_CLUSTER0;
258
259 if (of_property_read_u32(node, "al,msi-base-spi", &priv->spi_first)) {
260 pr_err("Unable to parse MSI base\n");
261 ret = -EINVAL;
262 goto err_priv;
263 }
264
265 if (of_property_read_u32(node, "al,msi-num-spis", &priv->num_spis)) {
266 pr_err("Unable to parse MSI numbers\n");
267 ret = -EINVAL;
268 goto err_priv;
269 }
270
271 priv->msi_map = kzalloc(sizeof(*priv->msi_map) * BITS_TO_LONGS(priv->num_spis),
272 GFP_KERNEL);
273 if (!priv->msi_map) {
274 ret = -ENOMEM;
275 goto err_priv;
276 }
277
278 pr_debug("Registering %d msixs, starting at %d\n",
279 priv->num_spis, priv->spi_first);
280
281 ret = alpine_msix_init_domains(priv, node);
282 if (ret)
283 goto err_map;
284
285 return 0;
286
287err_map:
288 kfree(priv->msi_map);
289err_priv:
290 kfree(priv);
291 return ret;
292}
293IRQCHIP_DECLARE(alpine_msix, "al,alpine-msix", alpine_msix_init);
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 3f3a8c3d2175..e7dc6cbda2a1 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -71,6 +71,7 @@ static u32 doorbell_mask_reg;
71static int parent_irq; 71static int parent_irq;
72#ifdef CONFIG_PCI_MSI 72#ifdef CONFIG_PCI_MSI
73static struct irq_domain *armada_370_xp_msi_domain; 73static struct irq_domain *armada_370_xp_msi_domain;
74static struct irq_domain *armada_370_xp_msi_inner_domain;
74static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR); 75static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR);
75static DEFINE_MUTEX(msi_used_lock); 76static DEFINE_MUTEX(msi_used_lock);
76static phys_addr_t msi_doorbell_addr; 77static phys_addr_t msi_doorbell_addr;
@@ -115,127 +116,102 @@ static void armada_370_xp_irq_unmask(struct irq_data *d)
115 116
116#ifdef CONFIG_PCI_MSI 117#ifdef CONFIG_PCI_MSI
117 118
118static int armada_370_xp_alloc_msi(void) 119static struct irq_chip armada_370_xp_msi_irq_chip = {
119{ 120 .name = "MPIC MSI",
120 int hwirq; 121 .irq_mask = pci_msi_mask_irq,
122 .irq_unmask = pci_msi_unmask_irq,
123};
121 124
122 mutex_lock(&msi_used_lock); 125static struct msi_domain_info armada_370_xp_msi_domain_info = {
123 hwirq = find_first_zero_bit(&msi_used, PCI_MSI_DOORBELL_NR); 126 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
124 if (hwirq >= PCI_MSI_DOORBELL_NR) 127 MSI_FLAG_MULTI_PCI_MSI),
125 hwirq = -ENOSPC; 128 .chip = &armada_370_xp_msi_irq_chip,
126 else 129};
127 set_bit(hwirq, msi_used);
128 mutex_unlock(&msi_used_lock);
129 130
130 return hwirq; 131static void armada_370_xp_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
132{
133 msg->address_lo = lower_32_bits(msi_doorbell_addr);
134 msg->address_hi = upper_32_bits(msi_doorbell_addr);
135 msg->data = 0xf00 | (data->hwirq + PCI_MSI_DOORBELL_START);
131} 136}
132 137
133static void armada_370_xp_free_msi(int hwirq) 138static int armada_370_xp_msi_set_affinity(struct irq_data *irq_data,
139 const struct cpumask *mask, bool force)
134{ 140{
135 mutex_lock(&msi_used_lock); 141 return -EINVAL;
136 if (!test_bit(hwirq, msi_used))
137 pr_err("trying to free unused MSI#%d\n", hwirq);
138 else
139 clear_bit(hwirq, msi_used);
140 mutex_unlock(&msi_used_lock);
141} 142}
142 143
143static int armada_370_xp_setup_msi_irq(struct msi_controller *chip, 144static struct irq_chip armada_370_xp_msi_bottom_irq_chip = {
144 struct pci_dev *pdev, 145 .name = "MPIC MSI",
145 struct msi_desc *desc) 146 .irq_compose_msi_msg = armada_370_xp_compose_msi_msg,
146{ 147 .irq_set_affinity = armada_370_xp_msi_set_affinity,
147 struct msi_msg msg; 148};
148 int virq, hwirq;
149 149
150 /* We support MSI, but not MSI-X */ 150static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
151 if (desc->msi_attrib.is_msix) 151 unsigned int nr_irqs, void *args)
152 return -EINVAL; 152{
153 int hwirq, i;
153 154
154 hwirq = armada_370_xp_alloc_msi(); 155 mutex_lock(&msi_used_lock);
155 if (hwirq < 0)
156 return hwirq;
157 156
158 virq = irq_create_mapping(armada_370_xp_msi_domain, hwirq); 157 hwirq = bitmap_find_next_zero_area(msi_used, PCI_MSI_DOORBELL_NR,
159 if (!virq) { 158 0, nr_irqs, 0);
160 armada_370_xp_free_msi(hwirq); 159 if (hwirq >= PCI_MSI_DOORBELL_NR) {
161 return -EINVAL; 160 mutex_unlock(&msi_used_lock);
161 return -ENOSPC;
162 } 162 }
163 163
164 irq_set_msi_desc(virq, desc); 164 bitmap_set(msi_used, hwirq, nr_irqs);
165 165 mutex_unlock(&msi_used_lock);
166 msg.address_lo = msi_doorbell_addr;
167 msg.address_hi = 0;
168 msg.data = 0xf00 | (hwirq + 16);
169
170 pci_write_msi_msg(virq, &msg);
171 return 0;
172}
173 166
174static void armada_370_xp_teardown_msi_irq(struct msi_controller *chip, 167 for (i = 0; i < nr_irqs; i++) {
175 unsigned int irq) 168 irq_domain_set_info(domain, virq + i, hwirq + i,
176{ 169 &armada_370_xp_msi_bottom_irq_chip,
177 struct irq_data *d = irq_get_irq_data(irq); 170 domain->host_data, handle_simple_irq,
178 unsigned long hwirq = d->hwirq; 171 NULL, NULL);
172 }
179 173
180 irq_dispose_mapping(irq); 174 return hwirq;
181 armada_370_xp_free_msi(hwirq);
182} 175}
183 176
184static struct irq_chip armada_370_xp_msi_irq_chip = { 177static void armada_370_xp_msi_free(struct irq_domain *domain,
185 .name = "armada_370_xp_msi_irq", 178 unsigned int virq, unsigned int nr_irqs)
186 .irq_enable = pci_msi_unmask_irq,
187 .irq_disable = pci_msi_mask_irq,
188 .irq_mask = pci_msi_mask_irq,
189 .irq_unmask = pci_msi_unmask_irq,
190};
191
192static int armada_370_xp_msi_map(struct irq_domain *domain, unsigned int virq,
193 irq_hw_number_t hw)
194{ 179{
195 irq_set_chip_and_handler(virq, &armada_370_xp_msi_irq_chip, 180 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
196 handle_simple_irq);
197 181
198 return 0; 182 mutex_lock(&msi_used_lock);
183 bitmap_clear(msi_used, d->hwirq, nr_irqs);
184 mutex_unlock(&msi_used_lock);
199} 185}
200 186
201static const struct irq_domain_ops armada_370_xp_msi_irq_ops = { 187static const struct irq_domain_ops armada_370_xp_msi_domain_ops = {
202 .map = armada_370_xp_msi_map, 188 .alloc = armada_370_xp_msi_alloc,
189 .free = armada_370_xp_msi_free,
203}; 190};
204 191
205static int armada_370_xp_msi_init(struct device_node *node, 192static int armada_370_xp_msi_init(struct device_node *node,
206 phys_addr_t main_int_phys_base) 193 phys_addr_t main_int_phys_base)
207{ 194{
208 struct msi_controller *msi_chip;
209 u32 reg; 195 u32 reg;
210 int ret;
211 196
212 msi_doorbell_addr = main_int_phys_base + 197 msi_doorbell_addr = main_int_phys_base +
213 ARMADA_370_XP_SW_TRIG_INT_OFFS; 198 ARMADA_370_XP_SW_TRIG_INT_OFFS;
214 199
215 msi_chip = kzalloc(sizeof(*msi_chip), GFP_KERNEL); 200 armada_370_xp_msi_inner_domain =
216 if (!msi_chip) 201 irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR,
202 &armada_370_xp_msi_domain_ops, NULL);
203 if (!armada_370_xp_msi_inner_domain)
217 return -ENOMEM; 204 return -ENOMEM;
218 205
219 msi_chip->setup_irq = armada_370_xp_setup_msi_irq;
220 msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq;
221 msi_chip->of_node = node;
222
223 armada_370_xp_msi_domain = 206 armada_370_xp_msi_domain =
224 irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR, 207 pci_msi_create_irq_domain(of_node_to_fwnode(node),
225 &armada_370_xp_msi_irq_ops, 208 &armada_370_xp_msi_domain_info,
226 NULL); 209 armada_370_xp_msi_inner_domain);
227 if (!armada_370_xp_msi_domain) { 210 if (!armada_370_xp_msi_domain) {
228 kfree(msi_chip); 211 irq_domain_remove(armada_370_xp_msi_inner_domain);
229 return -ENOMEM; 212 return -ENOMEM;
230 } 213 }
231 214
232 ret = of_pci_msi_chip_add(msi_chip);
233 if (ret < 0) {
234 irq_domain_remove(armada_370_xp_msi_domain);
235 kfree(msi_chip);
236 return ret;
237 }
238
239 reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS) 215 reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS)
240 | PCI_MSI_DOORBELL_MASK; 216 | PCI_MSI_DOORBELL_MASK;
241 217
@@ -280,7 +256,7 @@ static int armada_xp_set_affinity(struct irq_data *d,
280#endif 256#endif
281 257
282static struct irq_chip armada_370_xp_irq_chip = { 258static struct irq_chip armada_370_xp_irq_chip = {
283 .name = "armada_370_xp_irq", 259 .name = "MPIC",
284 .irq_mask = armada_370_xp_irq_mask, 260 .irq_mask = armada_370_xp_irq_mask,
285 .irq_mask_ack = armada_370_xp_irq_mask, 261 .irq_mask_ack = armada_370_xp_irq_mask,
286 .irq_unmask = armada_370_xp_irq_unmask, 262 .irq_unmask = armada_370_xp_irq_unmask,
@@ -427,12 +403,12 @@ static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
427 continue; 403 continue;
428 404
429 if (is_chained) { 405 if (is_chained) {
430 irq = irq_find_mapping(armada_370_xp_msi_domain, 406 irq = irq_find_mapping(armada_370_xp_msi_inner_domain,
431 msinr - 16); 407 msinr - PCI_MSI_DOORBELL_START);
432 generic_handle_irq(irq); 408 generic_handle_irq(irq);
433 } else { 409 } else {
434 irq = msinr - 16; 410 irq = msinr - PCI_MSI_DOORBELL_START;
435 handle_domain_irq(armada_370_xp_msi_domain, 411 handle_domain_irq(armada_370_xp_msi_inner_domain,
436 irq, regs); 412 irq, regs);
437 } 413 }
438 } 414 }
@@ -604,8 +580,8 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
604 armada_370_xp_mpic_domain = 580 armada_370_xp_mpic_domain =
605 irq_domain_add_linear(node, nr_irqs, 581 irq_domain_add_linear(node, nr_irqs,
606 &armada_370_xp_mpic_irq_ops, NULL); 582 &armada_370_xp_mpic_irq_ops, NULL);
607
608 BUG_ON(!armada_370_xp_mpic_domain); 583 BUG_ON(!armada_370_xp_mpic_domain);
584 armada_370_xp_mpic_domain->bus_token = DOMAIN_BUS_WIRED;
609 585
610 /* Setup for the boot CPU */ 586 /* Setup for the boot CPU */
611 armada_xp_mpic_perf_init(); 587 armada_xp_mpic_perf_init();
diff --git a/drivers/irqchip/irq-ath79-cpu.c b/drivers/irqchip/irq-ath79-cpu.c
new file mode 100644
index 000000000000..befe93c5a51a
--- /dev/null
+++ b/drivers/irqchip/irq-ath79-cpu.c
@@ -0,0 +1,97 @@
1/*
2 * Atheros AR71xx/AR724x/AR913x specific interrupt handling
3 *
4 * Copyright (C) 2015 Alban Bedel <albeu@free.fr>
5 * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
6 * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
7 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
8 *
9 * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published
13 * by the Free Software Foundation.
14 */
15
16#include <linux/interrupt.h>
17#include <linux/irqchip.h>
18#include <linux/of.h>
19
20#include <asm/irq_cpu.h>
21#include <asm/mach-ath79/ath79.h>
22
23/*
24 * The IP2/IP3 lines are tied to a PCI/WMAC/USB device. Drivers for
25 * these devices typically allocate coherent DMA memory, however the
26 * DMA controller may still have some unsynchronized data in the FIFO.
27 * Issue a flush in the handlers to ensure that the driver sees
28 * the update.
29 *
30 * This array map the interrupt lines to the DDR write buffer channels.
31 */
32
33static unsigned irq_wb_chan[8] = {
34 -1, -1, -1, -1, -1, -1, -1, -1,
35};
36
37asmlinkage void plat_irq_dispatch(void)
38{
39 unsigned long pending;
40 int irq;
41
42 pending = read_c0_status() & read_c0_cause() & ST0_IM;
43
44 if (!pending) {
45 spurious_interrupt();
46 return;
47 }
48
49 pending >>= CAUSEB_IP;
50 while (pending) {
51 irq = fls(pending) - 1;
52 if (irq < ARRAY_SIZE(irq_wb_chan) && irq_wb_chan[irq] != -1)
53 ath79_ddr_wb_flush(irq_wb_chan[irq]);
54 do_IRQ(MIPS_CPU_IRQ_BASE + irq);
55 pending &= ~BIT(irq);
56 }
57}
58
59static int __init ar79_cpu_intc_of_init(
60 struct device_node *node, struct device_node *parent)
61{
62 int err, i, count;
63
64 /* Fill the irq_wb_chan table */
65 count = of_count_phandle_with_args(
66 node, "qca,ddr-wb-channels", "#qca,ddr-wb-channel-cells");
67
68 for (i = 0; i < count; i++) {
69 struct of_phandle_args args;
70 u32 irq = i;
71
72 of_property_read_u32_index(
73 node, "qca,ddr-wb-channel-interrupts", i, &irq);
74 if (irq >= ARRAY_SIZE(irq_wb_chan))
75 continue;
76
77 err = of_parse_phandle_with_args(
78 node, "qca,ddr-wb-channels",
79 "#qca,ddr-wb-channel-cells",
80 i, &args);
81 if (err)
82 return err;
83
84 irq_wb_chan[irq] = args.args[0];
85 }
86
87 return mips_cpu_irq_of_init(node, parent);
88}
89IRQCHIP_DECLARE(ar79_cpu_intc, "qca,ar7100-cpu-intc",
90 ar79_cpu_intc_of_init);
91
92void __init ath79_cpu_irq_init(unsigned irq_wb_chan2, unsigned irq_wb_chan3)
93{
94 irq_wb_chan[2] = irq_wb_chan2;
95 irq_wb_chan[3] = irq_wb_chan3;
96 mips_cpu_irq_init();
97}
diff --git a/drivers/irqchip/irq-ath79-misc.c b/drivers/irqchip/irq-ath79-misc.c
new file mode 100644
index 000000000000..aa7290784636
--- /dev/null
+++ b/drivers/irqchip/irq-ath79-misc.c
@@ -0,0 +1,189 @@
1/*
2 * Atheros AR71xx/AR724x/AR913x MISC interrupt controller
3 *
4 * Copyright (C) 2015 Alban Bedel <albeu@free.fr>
5 * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
6 * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
7 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
8 *
9 * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published
13 * by the Free Software Foundation.
14 */
15
16#include <linux/irqchip.h>
17#include <linux/irqchip/chained_irq.h>
18#include <linux/of_address.h>
19#include <linux/of_irq.h>
20
21#define AR71XX_RESET_REG_MISC_INT_STATUS 0
22#define AR71XX_RESET_REG_MISC_INT_ENABLE 4
23
24#define ATH79_MISC_IRQ_COUNT 32
25
26static void ath79_misc_irq_handler(struct irq_desc *desc)
27{
28 struct irq_domain *domain = irq_desc_get_handler_data(desc);
29 struct irq_chip *chip = irq_desc_get_chip(desc);
30 void __iomem *base = domain->host_data;
31 u32 pending;
32
33 chained_irq_enter(chip, desc);
34
35 pending = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS) &
36 __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
37
38 if (!pending) {
39 spurious_interrupt();
40 chained_irq_exit(chip, desc);
41 return;
42 }
43
44 while (pending) {
45 int bit = __ffs(pending);
46
47 generic_handle_irq(irq_linear_revmap(domain, bit));
48 pending &= ~BIT(bit);
49 }
50
51 chained_irq_exit(chip, desc);
52}
53
54static void ar71xx_misc_irq_unmask(struct irq_data *d)
55{
56 void __iomem *base = irq_data_get_irq_chip_data(d);
57 unsigned int irq = d->hwirq;
58 u32 t;
59
60 t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
61 __raw_writel(t | BIT(irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE);
62
63 /* flush write */
64 __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
65}
66
67static void ar71xx_misc_irq_mask(struct irq_data *d)
68{
69 void __iomem *base = irq_data_get_irq_chip_data(d);
70 unsigned int irq = d->hwirq;
71 u32 t;
72
73 t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
74 __raw_writel(t & ~BIT(irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE);
75
76 /* flush write */
77 __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
78}
79
80static void ar724x_misc_irq_ack(struct irq_data *d)
81{
82 void __iomem *base = irq_data_get_irq_chip_data(d);
83 unsigned int irq = d->hwirq;
84 u32 t;
85
86 t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS);
87 __raw_writel(t & ~BIT(irq), base + AR71XX_RESET_REG_MISC_INT_STATUS);
88
89 /* flush write */
90 __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS);
91}
92
93static struct irq_chip ath79_misc_irq_chip = {
94 .name = "MISC",
95 .irq_unmask = ar71xx_misc_irq_unmask,
96 .irq_mask = ar71xx_misc_irq_mask,
97};
98
99static int misc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
100{
101 irq_set_chip_and_handler(irq, &ath79_misc_irq_chip, handle_level_irq);
102 irq_set_chip_data(irq, d->host_data);
103 return 0;
104}
105
106static const struct irq_domain_ops misc_irq_domain_ops = {
107 .xlate = irq_domain_xlate_onecell,
108 .map = misc_map,
109};
110
111static void __init ath79_misc_intc_domain_init(
112 struct irq_domain *domain, int irq)
113{
114 void __iomem *base = domain->host_data;
115
116 /* Disable and clear all interrupts */
117 __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE);
118 __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS);
119
120 irq_set_chained_handler_and_data(irq, ath79_misc_irq_handler, domain);
121}
122
123static int __init ath79_misc_intc_of_init(
124 struct device_node *node, struct device_node *parent)
125{
126 struct irq_domain *domain;
127 void __iomem *base;
128 int irq;
129
130 irq = irq_of_parse_and_map(node, 0);
131 if (!irq) {
132 pr_err("Failed to get MISC IRQ\n");
133 return -EINVAL;
134 }
135
136 base = of_iomap(node, 0);
137 if (!base) {
138 pr_err("Failed to get MISC IRQ registers\n");
139 return -ENOMEM;
140 }
141
142 domain = irq_domain_add_linear(node, ATH79_MISC_IRQ_COUNT,
143 &misc_irq_domain_ops, base);
144 if (!domain) {
145 pr_err("Failed to add MISC irqdomain\n");
146 return -EINVAL;
147 }
148
149 ath79_misc_intc_domain_init(domain, irq);
150 return 0;
151}
152
153static int __init ar7100_misc_intc_of_init(
154 struct device_node *node, struct device_node *parent)
155{
156 ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
157 return ath79_misc_intc_of_init(node, parent);
158}
159
160IRQCHIP_DECLARE(ar7100_misc_intc, "qca,ar7100-misc-intc",
161 ar7100_misc_intc_of_init);
162
163static int __init ar7240_misc_intc_of_init(
164 struct device_node *node, struct device_node *parent)
165{
166 ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
167 return ath79_misc_intc_of_init(node, parent);
168}
169
170IRQCHIP_DECLARE(ar7240_misc_intc, "qca,ar7240-misc-intc",
171 ar7240_misc_intc_of_init);
172
173void __init ath79_misc_irq_init(void __iomem *regs, int irq,
174 int irq_base, bool is_ar71xx)
175{
176 struct irq_domain *domain;
177
178 if (is_ar71xx)
179 ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
180 else
181 ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
182
183 domain = irq_domain_add_legacy(NULL, ATH79_MISC_IRQ_COUNT,
184 irq_base, 0, &misc_irq_domain_ops, regs);
185 if (!domain)
186 panic("Failed to create MISC irqdomain");
187
188 ath79_misc_intc_domain_init(domain, irq);
189}
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
index 37199b9b2cfa..28b26c80f4cf 100644
--- a/drivers/irqchip/irq-atmel-aic-common.c
+++ b/drivers/irqchip/irq-atmel-aic-common.c
@@ -80,16 +80,10 @@ int aic_common_set_type(struct irq_data *d, unsigned type, unsigned *val)
80 return 0; 80 return 0;
81} 81}
82 82
83int aic_common_set_priority(int priority, unsigned *val) 83void aic_common_set_priority(int priority, unsigned *val)
84{ 84{
85 if (priority < AT91_AIC_IRQ_MIN_PRIORITY ||
86 priority > AT91_AIC_IRQ_MAX_PRIORITY)
87 return -EINVAL;
88
89 *val &= ~AT91_AIC_PRIOR; 85 *val &= ~AT91_AIC_PRIOR;
90 *val |= priority; 86 *val |= priority;
91
92 return 0;
93} 87}
94 88
95int aic_common_irq_domain_xlate(struct irq_domain *d, 89int aic_common_irq_domain_xlate(struct irq_domain *d,
@@ -193,7 +187,7 @@ void __init aic_common_rtt_irq_fixup(struct device_node *root)
193 } 187 }
194} 188}
195 189
196void __init aic_common_irq_fixup(const struct of_device_id *matches) 190static void __init aic_common_irq_fixup(const struct of_device_id *matches)
197{ 191{
198 struct device_node *root = of_find_node_by_path("/"); 192 struct device_node *root = of_find_node_by_path("/");
199 const struct of_device_id *match; 193 const struct of_device_id *match;
@@ -214,7 +208,8 @@ void __init aic_common_irq_fixup(const struct of_device_id *matches)
214 208
215struct irq_domain *__init aic_common_of_init(struct device_node *node, 209struct irq_domain *__init aic_common_of_init(struct device_node *node,
216 const struct irq_domain_ops *ops, 210 const struct irq_domain_ops *ops,
217 const char *name, int nirqs) 211 const char *name, int nirqs,
212 const struct of_device_id *matches)
218{ 213{
219 struct irq_chip_generic *gc; 214 struct irq_chip_generic *gc;
220 struct irq_domain *domain; 215 struct irq_domain *domain;
@@ -264,6 +259,7 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node,
264 } 259 }
265 260
266 aic_common_ext_irq_of_init(domain); 261 aic_common_ext_irq_of_init(domain);
262 aic_common_irq_fixup(matches);
267 263
268 return domain; 264 return domain;
269 265
diff --git a/drivers/irqchip/irq-atmel-aic-common.h b/drivers/irqchip/irq-atmel-aic-common.h
index 603f0a9d5411..af60376d50de 100644
--- a/drivers/irqchip/irq-atmel-aic-common.h
+++ b/drivers/irqchip/irq-atmel-aic-common.h
@@ -19,7 +19,7 @@
19 19
20int aic_common_set_type(struct irq_data *d, unsigned type, unsigned *val); 20int aic_common_set_type(struct irq_data *d, unsigned type, unsigned *val);
21 21
22int aic_common_set_priority(int priority, unsigned *val); 22void aic_common_set_priority(int priority, unsigned *val);
23 23
24int aic_common_irq_domain_xlate(struct irq_domain *d, 24int aic_common_irq_domain_xlate(struct irq_domain *d,
25 struct device_node *ctrlr, 25 struct device_node *ctrlr,
@@ -30,12 +30,11 @@ int aic_common_irq_domain_xlate(struct irq_domain *d,
30 30
31struct irq_domain *__init aic_common_of_init(struct device_node *node, 31struct irq_domain *__init aic_common_of_init(struct device_node *node,
32 const struct irq_domain_ops *ops, 32 const struct irq_domain_ops *ops,
33 const char *name, int nirqs); 33 const char *name, int nirqs,
34 const struct of_device_id *matches);
34 35
35void __init aic_common_rtc_irq_fixup(struct device_node *root); 36void __init aic_common_rtc_irq_fixup(struct device_node *root);
36 37
37void __init aic_common_rtt_irq_fixup(struct device_node *root); 38void __init aic_common_rtt_irq_fixup(struct device_node *root);
38 39
39void __init aic_common_irq_fixup(const struct of_device_id *matches);
40
41#endif /* __IRQ_ATMEL_AIC_COMMON_H */ 40#endif /* __IRQ_ATMEL_AIC_COMMON_H */
diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c
index 8a0c7f288198..112e17c2768b 100644
--- a/drivers/irqchip/irq-atmel-aic.c
+++ b/drivers/irqchip/irq-atmel-aic.c
@@ -196,9 +196,8 @@ static int aic_irq_domain_xlate(struct irq_domain *d,
196 196
197 irq_gc_lock(gc); 197 irq_gc_lock(gc);
198 smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq)); 198 smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq));
199 ret = aic_common_set_priority(intspec[2], &smr); 199 aic_common_set_priority(intspec[2], &smr);
200 if (!ret) 200 irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq));
201 irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq));
202 irq_gc_unlock(gc); 201 irq_gc_unlock(gc);
203 202
204 return ret; 203 return ret;
@@ -248,12 +247,10 @@ static int __init aic_of_init(struct device_node *node,
248 return -EEXIST; 247 return -EEXIST;
249 248
250 domain = aic_common_of_init(node, &aic_irq_ops, "atmel-aic", 249 domain = aic_common_of_init(node, &aic_irq_ops, "atmel-aic",
251 NR_AIC_IRQS); 250 NR_AIC_IRQS, aic_irq_fixups);
252 if (IS_ERR(domain)) 251 if (IS_ERR(domain))
253 return PTR_ERR(domain); 252 return PTR_ERR(domain);
254 253
255 aic_common_irq_fixup(aic_irq_fixups);
256
257 aic_domain = domain; 254 aic_domain = domain;
258 gc = irq_get_domain_generic_chip(domain, 0); 255 gc = irq_get_domain_generic_chip(domain, 0);
259 256
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index 62bb840c613f..4f0d068e1abe 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -272,9 +272,8 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
272 irq_gc_lock(bgc); 272 irq_gc_lock(bgc);
273 irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR); 273 irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR);
274 smr = irq_reg_readl(bgc, AT91_AIC5_SMR); 274 smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
275 ret = aic_common_set_priority(intspec[2], &smr); 275 aic_common_set_priority(intspec[2], &smr);
276 if (!ret) 276 irq_reg_writel(bgc, smr, AT91_AIC5_SMR);
277 irq_reg_writel(bgc, intspec[2] | smr, AT91_AIC5_SMR);
278 irq_gc_unlock(bgc); 277 irq_gc_unlock(bgc);
279 278
280 return ret; 279 return ret;
@@ -312,12 +311,10 @@ static int __init aic5_of_init(struct device_node *node,
312 return -EEXIST; 311 return -EEXIST;
313 312
314 domain = aic_common_of_init(node, &aic5_irq_ops, "atmel-aic5", 313 domain = aic_common_of_init(node, &aic5_irq_ops, "atmel-aic5",
315 nirqs); 314 nirqs, aic5_irq_fixups);
316 if (IS_ERR(domain)) 315 if (IS_ERR(domain))
317 return PTR_ERR(domain); 316 return PTR_ERR(domain);
318 317
319 aic_common_irq_fixup(aic5_irq_fixups);
320
321 aic5_domain = domain; 318 aic5_domain = domain;
322 nchips = aic5_domain->revmap_size / 32; 319 nchips = aic5_domain->revmap_size / 32;
323 for (i = 0; i < nchips; i++) { 320 for (i = 0; i < nchips; i++) {
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index 963065a0d774..b6e950d4782a 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -229,7 +229,6 @@ int __init bcm2836_smp_boot_secondary(unsigned int cpu,
229 unsigned long secondary_startup_phys = 229 unsigned long secondary_startup_phys =
230 (unsigned long)virt_to_phys((void *)secondary_startup); 230 (unsigned long)virt_to_phys((void *)secondary_startup);
231 231
232 dsb();
233 writel(secondary_startup_phys, 232 writel(secondary_startup_phys,
234 intc.base + LOCAL_MAILBOX3_SET0 + 16 * cpu); 233 intc.base + LOCAL_MAILBOX3_SET0 + 16 * cpu);
235 234
diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c
new file mode 100644
index 000000000000..b844c89a9506
--- /dev/null
+++ b/drivers/irqchip/irq-bcm6345-l1.c
@@ -0,0 +1,364 @@
1/*
2 * Broadcom BCM6345 style Level 1 interrupt controller driver
3 *
4 * Copyright (C) 2014 Broadcom Corporation
5 * Copyright 2015 Simon Arlott
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This is based on the BCM7038 (which supports SMP) but with a single
12 * enable register instead of separate mask/set/clear registers.
13 *
14 * The BCM3380 has a similar mask/status register layout, but each pair
15 * of words is at separate locations (and SMP is not supported).
16 *
17 * ENABLE/STATUS words are packed next to each other for each CPU:
18 *
19 * BCM6368:
20 * 0x1000_0020: CPU0_W0_ENABLE
21 * 0x1000_0024: CPU0_W1_ENABLE
22 * 0x1000_0028: CPU0_W0_STATUS IRQs 31-63
23 * 0x1000_002c: CPU0_W1_STATUS IRQs 0-31
24 * 0x1000_0030: CPU1_W0_ENABLE
25 * 0x1000_0034: CPU1_W1_ENABLE
26 * 0x1000_0038: CPU1_W0_STATUS IRQs 31-63
27 * 0x1000_003c: CPU1_W1_STATUS IRQs 0-31
28 *
29 * BCM63168:
30 * 0x1000_0020: CPU0_W0_ENABLE
31 * 0x1000_0024: CPU0_W1_ENABLE
32 * 0x1000_0028: CPU0_W2_ENABLE
33 * 0x1000_002c: CPU0_W3_ENABLE
34 * 0x1000_0030: CPU0_W0_STATUS IRQs 96-127
35 * 0x1000_0034: CPU0_W1_STATUS IRQs 64-95
36 * 0x1000_0038: CPU0_W2_STATUS IRQs 32-63
37 * 0x1000_003c: CPU0_W3_STATUS IRQs 0-31
38 * 0x1000_0040: CPU1_W0_ENABLE
39 * 0x1000_0044: CPU1_W1_ENABLE
40 * 0x1000_0048: CPU1_W2_ENABLE
41 * 0x1000_004c: CPU1_W3_ENABLE
42 * 0x1000_0050: CPU1_W0_STATUS IRQs 96-127
43 * 0x1000_0054: CPU1_W1_STATUS IRQs 64-95
44 * 0x1000_0058: CPU1_W2_STATUS IRQs 32-63
45 * 0x1000_005c: CPU1_W3_STATUS IRQs 0-31
46 *
47 * IRQs are numbered in CPU native endian order
48 * (which is big-endian in these examples)
49 */
50
51#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
52
53#include <linux/bitops.h>
54#include <linux/cpumask.h>
55#include <linux/kconfig.h>
56#include <linux/kernel.h>
57#include <linux/init.h>
58#include <linux/interrupt.h>
59#include <linux/io.h>
60#include <linux/ioport.h>
61#include <linux/irq.h>
62#include <linux/irqdomain.h>
63#include <linux/module.h>
64#include <linux/of.h>
65#include <linux/of_irq.h>
66#include <linux/of_address.h>
67#include <linux/of_platform.h>
68#include <linux/platform_device.h>
69#include <linux/slab.h>
70#include <linux/smp.h>
71#include <linux/types.h>
72#include <linux/irqchip.h>
73#include <linux/irqchip/chained_irq.h>
74
75#define IRQS_PER_WORD 32
76#define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 2)
77
78struct bcm6345_l1_cpu;
79
80struct bcm6345_l1_chip {
81 raw_spinlock_t lock;
82 unsigned int n_words;
83 struct irq_domain *domain;
84 struct cpumask cpumask;
85 struct bcm6345_l1_cpu *cpus[NR_CPUS];
86};
87
88struct bcm6345_l1_cpu {
89 void __iomem *map_base;
90 unsigned int parent_irq;
91 u32 enable_cache[];
92};
93
94static inline unsigned int reg_enable(struct bcm6345_l1_chip *intc,
95 unsigned int word)
96{
97#ifdef __BIG_ENDIAN
98 return (1 * intc->n_words - word - 1) * sizeof(u32);
99#else
100 return (0 * intc->n_words + word) * sizeof(u32);
101#endif
102}
103
104static inline unsigned int reg_status(struct bcm6345_l1_chip *intc,
105 unsigned int word)
106{
107#ifdef __BIG_ENDIAN
108 return (2 * intc->n_words - word - 1) * sizeof(u32);
109#else
110 return (1 * intc->n_words + word) * sizeof(u32);
111#endif
112}
113
114static inline unsigned int cpu_for_irq(struct bcm6345_l1_chip *intc,
115 struct irq_data *d)
116{
117 return cpumask_first_and(&intc->cpumask, irq_data_get_affinity_mask(d));
118}
119
120static void bcm6345_l1_irq_handle(struct irq_desc *desc)
121{
122 struct bcm6345_l1_chip *intc = irq_desc_get_handler_data(desc);
123 struct bcm6345_l1_cpu *cpu;
124 struct irq_chip *chip = irq_desc_get_chip(desc);
125 unsigned int idx;
126
127#ifdef CONFIG_SMP
128 cpu = intc->cpus[cpu_logical_map(smp_processor_id())];
129#else
130 cpu = intc->cpus[0];
131#endif
132
133 chained_irq_enter(chip, desc);
134
135 for (idx = 0; idx < intc->n_words; idx++) {
136 int base = idx * IRQS_PER_WORD;
137 unsigned long pending;
138 irq_hw_number_t hwirq;
139 unsigned int irq;
140
141 pending = __raw_readl(cpu->map_base + reg_status(intc, idx));
142 pending &= __raw_readl(cpu->map_base + reg_enable(intc, idx));
143
144 for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
145 irq = irq_linear_revmap(intc->domain, base + hwirq);
146 if (irq)
147 do_IRQ(irq);
148 else
149 spurious_interrupt();
150 }
151 }
152
153 chained_irq_exit(chip, desc);
154}
155
156static inline void __bcm6345_l1_unmask(struct irq_data *d)
157{
158 struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
159 u32 word = d->hwirq / IRQS_PER_WORD;
160 u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
161 unsigned int cpu_idx = cpu_for_irq(intc, d);
162
163 intc->cpus[cpu_idx]->enable_cache[word] |= mask;
164 __raw_writel(intc->cpus[cpu_idx]->enable_cache[word],
165 intc->cpus[cpu_idx]->map_base + reg_enable(intc, word));
166}
167
168static inline void __bcm6345_l1_mask(struct irq_data *d)
169{
170 struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
171 u32 word = d->hwirq / IRQS_PER_WORD;
172 u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
173 unsigned int cpu_idx = cpu_for_irq(intc, d);
174
175 intc->cpus[cpu_idx]->enable_cache[word] &= ~mask;
176 __raw_writel(intc->cpus[cpu_idx]->enable_cache[word],
177 intc->cpus[cpu_idx]->map_base + reg_enable(intc, word));
178}
179
180static void bcm6345_l1_unmask(struct irq_data *d)
181{
182 struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
183 unsigned long flags;
184
185 raw_spin_lock_irqsave(&intc->lock, flags);
186 __bcm6345_l1_unmask(d);
187 raw_spin_unlock_irqrestore(&intc->lock, flags);
188}
189
190static void bcm6345_l1_mask(struct irq_data *d)
191{
192 struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
193 unsigned long flags;
194
195 raw_spin_lock_irqsave(&intc->lock, flags);
196 __bcm6345_l1_mask(d);
197 raw_spin_unlock_irqrestore(&intc->lock, flags);
198}
199
200static int bcm6345_l1_set_affinity(struct irq_data *d,
201 const struct cpumask *dest,
202 bool force)
203{
204 struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
205 u32 word = d->hwirq / IRQS_PER_WORD;
206 u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
207 unsigned int old_cpu = cpu_for_irq(intc, d);
208 unsigned int new_cpu;
209 struct cpumask valid;
210 unsigned long flags;
211 bool enabled;
212
213 if (!cpumask_and(&valid, &intc->cpumask, dest))
214 return -EINVAL;
215
216 new_cpu = cpumask_any_and(&valid, cpu_online_mask);
217 if (new_cpu >= nr_cpu_ids)
218 return -EINVAL;
219
220 dest = cpumask_of(new_cpu);
221
222 raw_spin_lock_irqsave(&intc->lock, flags);
223 if (old_cpu != new_cpu) {
224 enabled = intc->cpus[old_cpu]->enable_cache[word] & mask;
225 if (enabled)
226 __bcm6345_l1_mask(d);
227 cpumask_copy(irq_data_get_affinity_mask(d), dest);
228 if (enabled)
229 __bcm6345_l1_unmask(d);
230 } else {
231 cpumask_copy(irq_data_get_affinity_mask(d), dest);
232 }
233 raw_spin_unlock_irqrestore(&intc->lock, flags);
234
235 return IRQ_SET_MASK_OK_NOCOPY;
236}
237
238static int __init bcm6345_l1_init_one(struct device_node *dn,
239 unsigned int idx,
240 struct bcm6345_l1_chip *intc)
241{
242 struct resource res;
243 resource_size_t sz;
244 struct bcm6345_l1_cpu *cpu;
245 unsigned int i, n_words;
246
247 if (of_address_to_resource(dn, idx, &res))
248 return -EINVAL;
249 sz = resource_size(&res);
250 n_words = sz / REG_BYTES_PER_IRQ_WORD;
251
252 if (!intc->n_words)
253 intc->n_words = n_words;
254 else if (intc->n_words != n_words)
255 return -EINVAL;
256
257 cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32),
258 GFP_KERNEL);
259 if (!cpu)
260 return -ENOMEM;
261
262 cpu->map_base = ioremap(res.start, sz);
263 if (!cpu->map_base)
264 return -ENOMEM;
265
266 for (i = 0; i < n_words; i++) {
267 cpu->enable_cache[i] = 0;
268 __raw_writel(0, cpu->map_base + reg_enable(intc, i));
269 }
270
271 cpu->parent_irq = irq_of_parse_and_map(dn, idx);
272 if (!cpu->parent_irq) {
273 pr_err("failed to map parent interrupt %d\n", cpu->parent_irq);
274 return -EINVAL;
275 }
276 irq_set_chained_handler_and_data(cpu->parent_irq,
277 bcm6345_l1_irq_handle, intc);
278
279 return 0;
280}
281
282static struct irq_chip bcm6345_l1_irq_chip = {
283 .name = "bcm6345-l1",
284 .irq_mask = bcm6345_l1_mask,
285 .irq_unmask = bcm6345_l1_unmask,
286 .irq_set_affinity = bcm6345_l1_set_affinity,
287};
288
289static int bcm6345_l1_map(struct irq_domain *d, unsigned int virq,
290 irq_hw_number_t hw_irq)
291{
292 irq_set_chip_and_handler(virq,
293 &bcm6345_l1_irq_chip, handle_percpu_irq);
294 irq_set_chip_data(virq, d->host_data);
295 return 0;
296}
297
298static const struct irq_domain_ops bcm6345_l1_domain_ops = {
299 .xlate = irq_domain_xlate_onecell,
300 .map = bcm6345_l1_map,
301};
302
303static int __init bcm6345_l1_of_init(struct device_node *dn,
304 struct device_node *parent)
305{
306 struct bcm6345_l1_chip *intc;
307 unsigned int idx;
308 int ret;
309
310 intc = kzalloc(sizeof(*intc), GFP_KERNEL);
311 if (!intc)
312 return -ENOMEM;
313
314 for_each_possible_cpu(idx) {
315 ret = bcm6345_l1_init_one(dn, idx, intc);
316 if (ret)
317 pr_err("failed to init intc L1 for cpu %d: %d\n",
318 idx, ret);
319 else
320 cpumask_set_cpu(idx, &intc->cpumask);
321 }
322
323 if (!cpumask_weight(&intc->cpumask)) {
324 ret = -ENODEV;
325 goto out_free;
326 }
327
328 raw_spin_lock_init(&intc->lock);
329
330 intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words,
331 &bcm6345_l1_domain_ops,
332 intc);
333 if (!intc->domain) {
334 ret = -ENOMEM;
335 goto out_unmap;
336 }
337
338 pr_info("registered BCM6345 L1 intc (IRQs: %d)\n",
339 IRQS_PER_WORD * intc->n_words);
340 for_each_cpu(idx, &intc->cpumask) {
341 struct bcm6345_l1_cpu *cpu = intc->cpus[idx];
342
343 pr_info(" CPU%u at MMIO 0x%p (irq = %d)\n", idx,
344 cpu->map_base, cpu->parent_irq);
345 }
346
347 return 0;
348
349out_unmap:
350 for_each_possible_cpu(idx) {
351 struct bcm6345_l1_cpu *cpu = intc->cpus[idx];
352
353 if (cpu) {
354 if (cpu->map_base)
355 iounmap(cpu->map_base);
356 kfree(cpu);
357 }
358 }
359out_free:
360 kfree(intc);
361 return ret;
362}
363
364IRQCHIP_DECLARE(bcm6345_l1, "brcm,bcm6345-l1-intc", bcm6345_l1_of_init);
diff --git a/drivers/irqchip/irq-gic-realview.c b/drivers/irqchip/irq-gic-realview.c
index aa46eb280a7f..54c296401525 100644
--- a/drivers/irqchip/irq-gic-realview.c
+++ b/drivers/irqchip/irq-gic-realview.c
@@ -10,7 +10,8 @@
10#include <linux/irqchip/arm-gic.h> 10#include <linux/irqchip/arm-gic.h>
11 11
12#define REALVIEW_SYS_LOCK_OFFSET 0x20 12#define REALVIEW_SYS_LOCK_OFFSET 0x20
13#define REALVIEW_PB11MP_SYS_PLD_CTRL1 0x74 13#define REALVIEW_SYS_PLD_CTRL1 0x74
14#define REALVIEW_EB_REVB_SYS_PLD_CTRL1 0xD8
14#define VERSATILE_LOCK_VAL 0xA05F 15#define VERSATILE_LOCK_VAL 0xA05F
15#define PLD_INTMODE_MASK BIT(22)|BIT(23)|BIT(24) 16#define PLD_INTMODE_MASK BIT(22)|BIT(23)|BIT(24)
16#define PLD_INTMODE_LEGACY 0x0 17#define PLD_INTMODE_LEGACY 0x0
@@ -18,26 +19,57 @@
18#define PLD_INTMODE_NEW_NO_DCC BIT(23) 19#define PLD_INTMODE_NEW_NO_DCC BIT(23)
19#define PLD_INTMODE_FIQ_ENABLE BIT(24) 20#define PLD_INTMODE_FIQ_ENABLE BIT(24)
20 21
22/* For some reason RealView EB Rev B moved this register */
23static const struct of_device_id syscon_pldset_of_match[] = {
24 {
25 .compatible = "arm,realview-eb11mp-revb-syscon",
26 .data = (void *)REALVIEW_EB_REVB_SYS_PLD_CTRL1,
27 },
28 {
29 .compatible = "arm,realview-eb11mp-revc-syscon",
30 .data = (void *)REALVIEW_SYS_PLD_CTRL1,
31 },
32 {
33 .compatible = "arm,realview-eb-syscon",
34 .data = (void *)REALVIEW_SYS_PLD_CTRL1,
35 },
36 {
37 .compatible = "arm,realview-pb11mp-syscon",
38 .data = (void *)REALVIEW_SYS_PLD_CTRL1,
39 },
40 {},
41};
42
21static int __init 43static int __init
22realview_gic_of_init(struct device_node *node, struct device_node *parent) 44realview_gic_of_init(struct device_node *node, struct device_node *parent)
23{ 45{
24 static struct regmap *map; 46 static struct regmap *map;
47 struct device_node *np;
48 const struct of_device_id *gic_id;
49 u32 pld1_ctrl;
50
51 np = of_find_matching_node_and_match(NULL, syscon_pldset_of_match,
52 &gic_id);
53 if (!np)
54 return -ENODEV;
55 pld1_ctrl = (u32)gic_id->data;
25 56
26 /* The PB11MPCore GIC needs to be configured in the syscon */ 57 /* The PB11MPCore GIC needs to be configured in the syscon */
27 map = syscon_regmap_lookup_by_compatible("arm,realview-pb11mp-syscon"); 58 map = syscon_node_to_regmap(np);
28 if (!IS_ERR(map)) { 59 if (!IS_ERR(map)) {
29 /* new irq mode with no DCC */ 60 /* new irq mode with no DCC */
30 regmap_write(map, REALVIEW_SYS_LOCK_OFFSET, 61 regmap_write(map, REALVIEW_SYS_LOCK_OFFSET,
31 VERSATILE_LOCK_VAL); 62 VERSATILE_LOCK_VAL);
32 regmap_update_bits(map, REALVIEW_PB11MP_SYS_PLD_CTRL1, 63 regmap_update_bits(map, pld1_ctrl,
33 PLD_INTMODE_NEW_NO_DCC, 64 PLD_INTMODE_NEW_NO_DCC,
34 PLD_INTMODE_MASK); 65 PLD_INTMODE_MASK);
35 regmap_write(map, REALVIEW_SYS_LOCK_OFFSET, 0x0000); 66 regmap_write(map, REALVIEW_SYS_LOCK_OFFSET, 0x0000);
36 pr_info("TC11MP GIC: set up interrupt controller to NEW mode, no DCC\n"); 67 pr_info("RealView GIC: set up interrupt controller to NEW mode, no DCC\n");
37 } else { 68 } else {
38 pr_err("TC11MP GIC setup: could not find syscon\n"); 69 pr_err("RealView GIC setup: could not find syscon\n");
39 return -ENXIO; 70 return -ENODEV;
40 } 71 }
41 return gic_of_init(node, parent); 72 return gic_of_init(node, parent);
42} 73}
43IRQCHIP_DECLARE(armtc11mp_gic, "arm,tc11mp-gic", realview_gic_of_init); 74IRQCHIP_DECLARE(armtc11mp_gic, "arm,tc11mp-gic", realview_gic_of_init);
75IRQCHIP_DECLARE(armeb11mp_gic, "arm,eb11mp-gic", realview_gic_of_init);
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index c779f83e511d..28f047c61baa 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -92,18 +92,6 @@ static struct msi_domain_info gicv2m_msi_domain_info = {
92 .chip = &gicv2m_msi_irq_chip, 92 .chip = &gicv2m_msi_irq_chip,
93}; 93};
94 94
95static int gicv2m_set_affinity(struct irq_data *irq_data,
96 const struct cpumask *mask, bool force)
97{
98 int ret;
99
100 ret = irq_chip_set_affinity_parent(irq_data, mask, force);
101 if (ret == IRQ_SET_MASK_OK)
102 ret = IRQ_SET_MASK_OK_DONE;
103
104 return ret;
105}
106
107static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) 95static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
108{ 96{
109 struct v2m_data *v2m = irq_data_get_irq_chip_data(data); 97 struct v2m_data *v2m = irq_data_get_irq_chip_data(data);
@@ -122,7 +110,7 @@ static struct irq_chip gicv2m_irq_chip = {
122 .irq_mask = irq_chip_mask_parent, 110 .irq_mask = irq_chip_mask_parent,
123 .irq_unmask = irq_chip_unmask_parent, 111 .irq_unmask = irq_chip_unmask_parent,
124 .irq_eoi = irq_chip_eoi_parent, 112 .irq_eoi = irq_chip_eoi_parent,
125 .irq_set_affinity = gicv2m_set_affinity, 113 .irq_set_affinity = irq_chip_set_affinity_parent,
126 .irq_compose_msi_msg = gicv2m_compose_msi_msg, 114 .irq_compose_msi_msg = gicv2m_compose_msi_msg,
127}; 115};
128 116
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 43dfd15c1dd2..39261798c59f 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -103,7 +103,6 @@ struct its_device {
103 103
104static LIST_HEAD(its_nodes); 104static LIST_HEAD(its_nodes);
105static DEFINE_SPINLOCK(its_lock); 105static DEFINE_SPINLOCK(its_lock);
106static struct device_node *gic_root_node;
107static struct rdists *gic_rdists; 106static struct rdists *gic_rdists;
108 107
109#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) 108#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
@@ -671,7 +670,7 @@ static int its_chunk_to_lpi(int chunk)
671 return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192; 670 return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192;
672} 671}
673 672
674static int its_lpi_init(u32 id_bits) 673static int __init its_lpi_init(u32 id_bits)
675{ 674{
676 lpi_chunks = its_lpi_to_chunk(1UL << id_bits); 675 lpi_chunks = its_lpi_to_chunk(1UL << id_bits);
677 676
@@ -1430,7 +1429,8 @@ static void its_enable_quirks(struct its_node *its)
1430 gic_enable_quirks(iidr, its_quirks, its); 1429 gic_enable_quirks(iidr, its_quirks, its);
1431} 1430}
1432 1431
1433static int its_probe(struct device_node *node, struct irq_domain *parent) 1432static int __init its_probe(struct device_node *node,
1433 struct irq_domain *parent)
1434{ 1434{
1435 struct resource res; 1435 struct resource res;
1436 struct its_node *its; 1436 struct its_node *its;
@@ -1591,7 +1591,7 @@ static struct of_device_id its_device_id[] = {
1591 {}, 1591 {},
1592}; 1592};
1593 1593
1594int its_init(struct device_node *node, struct rdists *rdists, 1594int __init its_init(struct device_node *node, struct rdists *rdists,
1595 struct irq_domain *parent_domain) 1595 struct irq_domain *parent_domain)
1596{ 1596{
1597 struct device_node *np; 1597 struct device_node *np;
@@ -1607,8 +1607,6 @@ int its_init(struct device_node *node, struct rdists *rdists,
1607 } 1607 }
1608 1608
1609 gic_rdists = rdists; 1609 gic_rdists = rdists;
1610 gic_root_node = node;
1611
1612 its_alloc_lpi_tables(); 1610 its_alloc_lpi_tables();
1613 its_lpi_init(rdists->id_bits); 1611 its_lpi_init(rdists->id_bits);
1614 1612
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index d7be6ddc34f6..5b7d3c2129d8 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -15,10 +15,12 @@
15 * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 16 */
17 17
18#include <linux/acpi.h>
18#include <linux/cpu.h> 19#include <linux/cpu.h>
19#include <linux/cpu_pm.h> 20#include <linux/cpu_pm.h>
20#include <linux/delay.h> 21#include <linux/delay.h>
21#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/irqdomain.h>
22#include <linux/of.h> 24#include <linux/of.h>
23#include <linux/of_address.h> 25#include <linux/of_address.h>
24#include <linux/of_irq.h> 26#include <linux/of_irq.h>
@@ -38,6 +40,7 @@
38struct redist_region { 40struct redist_region {
39 void __iomem *redist_base; 41 void __iomem *redist_base;
40 phys_addr_t phys_base; 42 phys_addr_t phys_base;
43 bool single_redist;
41}; 44};
42 45
43struct gic_chip_data { 46struct gic_chip_data {
@@ -434,6 +437,9 @@ static int gic_populate_rdist(void)
434 return 0; 437 return 0;
435 } 438 }
436 439
440 if (gic_data.redist_regions[i].single_redist)
441 break;
442
437 if (gic_data.redist_stride) { 443 if (gic_data.redist_stride) {
438 ptr += gic_data.redist_stride; 444 ptr += gic_data.redist_stride;
439 } else { 445 } else {
@@ -634,7 +640,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
634 else 640 else
635 gic_dist_wait_for_rwp(); 641 gic_dist_wait_for_rwp();
636 642
637 return IRQ_SET_MASK_OK; 643 return IRQ_SET_MASK_OK_DONE;
638} 644}
639#else 645#else
640#define gic_set_affinity NULL 646#define gic_set_affinity NULL
@@ -764,6 +770,15 @@ static int gic_irq_domain_translate(struct irq_domain *d,
764 return 0; 770 return 0;
765 } 771 }
766 772
773 if (is_fwnode_irqchip(fwspec->fwnode)) {
774 if(fwspec->param_count != 2)
775 return -EINVAL;
776
777 *hwirq = fwspec->param[0];
778 *type = fwspec->param[1];
779 return 0;
780 }
781
767 return -EINVAL; 782 return -EINVAL;
768} 783}
769 784
@@ -811,17 +826,88 @@ static void gicv3_enable_quirks(void)
811#endif 826#endif
812} 827}
813 828
829static int __init gic_init_bases(void __iomem *dist_base,
830 struct redist_region *rdist_regs,
831 u32 nr_redist_regions,
832 u64 redist_stride,
833 struct fwnode_handle *handle)
834{
835 struct device_node *node;
836 u32 typer;
837 int gic_irqs;
838 int err;
839
840 if (!is_hyp_mode_available())
841 static_key_slow_dec(&supports_deactivate);
842
843 if (static_key_true(&supports_deactivate))
844 pr_info("GIC: Using split EOI/Deactivate mode\n");
845
846 gic_data.dist_base = dist_base;
847 gic_data.redist_regions = rdist_regs;
848 gic_data.nr_redist_regions = nr_redist_regions;
849 gic_data.redist_stride = redist_stride;
850
851 gicv3_enable_quirks();
852
853 /*
854 * Find out how many interrupts are supported.
855 * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
856 */
857 typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
858 gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer);
859 gic_irqs = GICD_TYPER_IRQS(typer);
860 if (gic_irqs > 1020)
861 gic_irqs = 1020;
862 gic_data.irq_nr = gic_irqs;
863
864 gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
865 &gic_data);
866 gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
867
868 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
869 err = -ENOMEM;
870 goto out_free;
871 }
872
873 set_handle_irq(gic_handle_irq);
874
875 node = to_of_node(handle);
876 if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis() &&
877 node) /* Temp hack to prevent ITS init for ACPI */
878 its_init(node, &gic_data.rdists, gic_data.domain);
879
880 gic_smp_init();
881 gic_dist_init();
882 gic_cpu_init();
883 gic_cpu_pm_init();
884
885 return 0;
886
887out_free:
888 if (gic_data.domain)
889 irq_domain_remove(gic_data.domain);
890 free_percpu(gic_data.rdists.rdist);
891 return err;
892}
893
894static int __init gic_validate_dist_version(void __iomem *dist_base)
895{
896 u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
897
898 if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4)
899 return -ENODEV;
900
901 return 0;
902}
903
814static int __init gic_of_init(struct device_node *node, struct device_node *parent) 904static int __init gic_of_init(struct device_node *node, struct device_node *parent)
815{ 905{
816 void __iomem *dist_base; 906 void __iomem *dist_base;
817 struct redist_region *rdist_regs; 907 struct redist_region *rdist_regs;
818 u64 redist_stride; 908 u64 redist_stride;
819 u32 nr_redist_regions; 909 u32 nr_redist_regions;
820 u32 typer; 910 int err, i;
821 u32 reg;
822 int gic_irqs;
823 int err;
824 int i;
825 911
826 dist_base = of_iomap(node, 0); 912 dist_base = of_iomap(node, 0);
827 if (!dist_base) { 913 if (!dist_base) {
@@ -830,11 +916,10 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
830 return -ENXIO; 916 return -ENXIO;
831 } 917 }
832 918
833 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; 919 err = gic_validate_dist_version(dist_base);
834 if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) { 920 if (err) {
835 pr_err("%s: no distributor detected, giving up\n", 921 pr_err("%s: no distributor detected, giving up\n",
836 node->full_name); 922 node->full_name);
837 err = -ENODEV;
838 goto out_unmap_dist; 923 goto out_unmap_dist;
839 } 924 }
840 925
@@ -865,63 +950,229 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
865 if (of_property_read_u64(node, "redistributor-stride", &redist_stride)) 950 if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
866 redist_stride = 0; 951 redist_stride = 0;
867 952
868 if (!is_hyp_mode_available()) 953 err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
869 static_key_slow_dec(&supports_deactivate); 954 redist_stride, &node->fwnode);
955 if (!err)
956 return 0;
870 957
871 if (static_key_true(&supports_deactivate)) 958out_unmap_rdist:
872 pr_info("GIC: Using split EOI/Deactivate mode\n"); 959 for (i = 0; i < nr_redist_regions; i++)
960 if (rdist_regs[i].redist_base)
961 iounmap(rdist_regs[i].redist_base);
962 kfree(rdist_regs);
963out_unmap_dist:
964 iounmap(dist_base);
965 return err;
966}
873 967
874 gic_data.dist_base = dist_base; 968IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
875 gic_data.redist_regions = rdist_regs;
876 gic_data.nr_redist_regions = nr_redist_regions;
877 gic_data.redist_stride = redist_stride;
878 969
879 gicv3_enable_quirks(); 970#ifdef CONFIG_ACPI
971static void __iomem *dist_base;
972static struct redist_region *redist_regs __initdata;
973static u32 nr_redist_regions __initdata;
974static bool single_redist;
975
976static void __init
977gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
978{
979 static int count = 0;
980
981 redist_regs[count].phys_base = phys_base;
982 redist_regs[count].redist_base = redist_base;
983 redist_regs[count].single_redist = single_redist;
984 count++;
985}
986
987static int __init
988gic_acpi_parse_madt_redist(struct acpi_subtable_header *header,
989 const unsigned long end)
990{
991 struct acpi_madt_generic_redistributor *redist =
992 (struct acpi_madt_generic_redistributor *)header;
993 void __iomem *redist_base;
994
995 redist_base = ioremap(redist->base_address, redist->length);
996 if (!redist_base) {
997 pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
998 return -ENOMEM;
999 }
1000
1001 gic_acpi_register_redist(redist->base_address, redist_base);
1002 return 0;
1003}
1004
1005static int __init
1006gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header,
1007 const unsigned long end)
1008{
1009 struct acpi_madt_generic_interrupt *gicc =
1010 (struct acpi_madt_generic_interrupt *)header;
1011 u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
1012 u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
1013 void __iomem *redist_base;
1014
1015 redist_base = ioremap(gicc->gicr_base_address, size);
1016 if (!redist_base)
1017 return -ENOMEM;
1018
1019 gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
1020 return 0;
1021}
1022
1023static int __init gic_acpi_collect_gicr_base(void)
1024{
1025 acpi_tbl_entry_handler redist_parser;
1026 enum acpi_madt_type type;
1027
1028 if (single_redist) {
1029 type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
1030 redist_parser = gic_acpi_parse_madt_gicc;
1031 } else {
1032 type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR;
1033 redist_parser = gic_acpi_parse_madt_redist;
1034 }
1035
1036 /* Collect redistributor base addresses in GICR entries */
1037 if (acpi_table_parse_madt(type, redist_parser, 0) > 0)
1038 return 0;
1039
1040 pr_info("No valid GICR entries exist\n");
1041 return -ENODEV;
1042}
1043
1044static int __init gic_acpi_match_gicr(struct acpi_subtable_header *header,
1045 const unsigned long end)
1046{
1047 /* Subtable presence means that redist exists, that's it */
1048 return 0;
1049}
1050
1051static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header,
1052 const unsigned long end)
1053{
1054 struct acpi_madt_generic_interrupt *gicc =
1055 (struct acpi_madt_generic_interrupt *)header;
880 1056
881 /* 1057 /*
882 * Find out how many interrupts are supported. 1058 * If GICC is enabled and has valid gicr base address, then it means
883 * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI) 1059 * GICR base is presented via GICC
884 */ 1060 */
885 typer = readl_relaxed(gic_data.dist_base + GICD_TYPER); 1061 if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address)
886 gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer); 1062 return 0;
887 gic_irqs = GICD_TYPER_IRQS(typer);
888 if (gic_irqs > 1020)
889 gic_irqs = 1020;
890 gic_data.irq_nr = gic_irqs;
891 1063
892 gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops, 1064 return -ENODEV;
893 &gic_data); 1065}
894 gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
895 1066
896 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { 1067static int __init gic_acpi_count_gicr_regions(void)
1068{
1069 int count;
1070
1071 /*
1072 * Count how many redistributor regions we have. It is not allowed
1073 * to mix redistributor description, GICR and GICC subtables have to be
1074 * mutually exclusive.
1075 */
1076 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
1077 gic_acpi_match_gicr, 0);
1078 if (count > 0) {
1079 single_redist = false;
1080 return count;
1081 }
1082
1083 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
1084 gic_acpi_match_gicc, 0);
1085 if (count > 0)
1086 single_redist = true;
1087
1088 return count;
1089}
1090
1091static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
1092 struct acpi_probe_entry *ape)
1093{
1094 struct acpi_madt_generic_distributor *dist;
1095 int count;
1096
1097 dist = (struct acpi_madt_generic_distributor *)header;
1098 if (dist->version != ape->driver_data)
1099 return false;
1100
1101 /* We need to do that exercise anyway, the sooner the better */
1102 count = gic_acpi_count_gicr_regions();
1103 if (count <= 0)
1104 return false;
1105
1106 nr_redist_regions = count;
1107 return true;
1108}
1109
1110#define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
1111
1112static int __init
1113gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end)
1114{
1115 struct acpi_madt_generic_distributor *dist;
1116 struct fwnode_handle *domain_handle;
1117 int i, err;
1118
1119 /* Get distributor base address */
1120 dist = (struct acpi_madt_generic_distributor *)header;
1121 dist_base = ioremap(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE);
1122 if (!dist_base) {
1123 pr_err("Unable to map GICD registers\n");
1124 return -ENOMEM;
1125 }
1126
1127 err = gic_validate_dist_version(dist_base);
1128 if (err) {
1129 pr_err("No distributor detected at @%p, giving up", dist_base);
1130 goto out_dist_unmap;
1131 }
1132
1133 redist_regs = kzalloc(sizeof(*redist_regs) * nr_redist_regions,
1134 GFP_KERNEL);
1135 if (!redist_regs) {
897 err = -ENOMEM; 1136 err = -ENOMEM;
898 goto out_free; 1137 goto out_dist_unmap;
899 } 1138 }
900 1139
901 set_handle_irq(gic_handle_irq); 1140 err = gic_acpi_collect_gicr_base();
1141 if (err)
1142 goto out_redist_unmap;
902 1143
903 if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis()) 1144 domain_handle = irq_domain_alloc_fwnode(dist_base);
904 its_init(node, &gic_data.rdists, gic_data.domain); 1145 if (!domain_handle) {
1146 err = -ENOMEM;
1147 goto out_redist_unmap;
1148 }
905 1149
906 gic_smp_init(); 1150 err = gic_init_bases(dist_base, redist_regs, nr_redist_regions, 0,
907 gic_dist_init(); 1151 domain_handle);
908 gic_cpu_init(); 1152 if (err)
909 gic_cpu_pm_init(); 1153 goto out_fwhandle_free;
910 1154
1155 acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
911 return 0; 1156 return 0;
912 1157
913out_free: 1158out_fwhandle_free:
914 if (gic_data.domain) 1159 irq_domain_free_fwnode(domain_handle);
915 irq_domain_remove(gic_data.domain); 1160out_redist_unmap:
916 free_percpu(gic_data.rdists.rdist);
917out_unmap_rdist:
918 for (i = 0; i < nr_redist_regions; i++) 1161 for (i = 0; i < nr_redist_regions; i++)
919 if (rdist_regs[i].redist_base) 1162 if (redist_regs[i].redist_base)
920 iounmap(rdist_regs[i].redist_base); 1163 iounmap(redist_regs[i].redist_base);
921 kfree(rdist_regs); 1164 kfree(redist_regs);
922out_unmap_dist: 1165out_dist_unmap:
923 iounmap(dist_base); 1166 iounmap(dist_base);
924 return err; 1167 return err;
925} 1168}
926 1169IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
927IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init); 1170 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3,
1171 gic_acpi_init);
1172IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
1173 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4,
1174 gic_acpi_init);
1175IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
1176 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE,
1177 gic_acpi_init);
1178#endif
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 8f9ebf714e2b..282344b95ec2 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -319,7 +319,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
319 writel_relaxed(val | bit, reg); 319 writel_relaxed(val | bit, reg);
320 raw_spin_unlock_irqrestore(&irq_controller_lock, flags); 320 raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
321 321
322 return IRQ_SET_MASK_OK; 322 return IRQ_SET_MASK_OK_DONE;
323} 323}
324#endif 324#endif
325 325
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 9e17ef27a183..94a30da0cfac 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -29,16 +29,32 @@ struct gic_pcpu_mask {
29 DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS); 29 DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
30}; 30};
31 31
32struct gic_irq_spec {
33 enum {
34 GIC_DEVICE,
35 GIC_IPI
36 } type;
37
38 union {
39 struct cpumask *ipimask;
40 unsigned int hwirq;
41 };
42};
43
32static unsigned long __gic_base_addr; 44static unsigned long __gic_base_addr;
45
33static void __iomem *gic_base; 46static void __iomem *gic_base;
34static struct gic_pcpu_mask pcpu_masks[NR_CPUS]; 47static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
35static DEFINE_SPINLOCK(gic_lock); 48static DEFINE_SPINLOCK(gic_lock);
36static struct irq_domain *gic_irq_domain; 49static struct irq_domain *gic_irq_domain;
50static struct irq_domain *gic_dev_domain;
51static struct irq_domain *gic_ipi_domain;
37static int gic_shared_intrs; 52static int gic_shared_intrs;
38static int gic_vpes; 53static int gic_vpes;
39static unsigned int gic_cpu_pin; 54static unsigned int gic_cpu_pin;
40static unsigned int timer_cpu_pin; 55static unsigned int timer_cpu_pin;
41static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller; 56static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
57DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
42 58
43static void __gic_irq_dispatch(void); 59static void __gic_irq_dispatch(void);
44 60
@@ -264,9 +280,11 @@ static void gic_bind_eic_interrupt(int irq, int set)
264 GIC_VPE_EIC_SS(irq), set); 280 GIC_VPE_EIC_SS(irq), set);
265} 281}
266 282
267void gic_send_ipi(unsigned int intr) 283static void gic_send_ipi(struct irq_data *d, unsigned int cpu)
268{ 284{
269 gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(intr)); 285 irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d));
286
287 gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(hwirq));
270} 288}
271 289
272int gic_get_c0_compare_int(void) 290int gic_get_c0_compare_int(void)
@@ -449,7 +467,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
449 gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp))); 467 gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
450 468
451 /* Update the pcpu_masks */ 469 /* Update the pcpu_masks */
452 for (i = 0; i < NR_CPUS; i++) 470 for (i = 0; i < gic_vpes; i++)
453 clear_bit(irq, pcpu_masks[i].pcpu_mask); 471 clear_bit(irq, pcpu_masks[i].pcpu_mask);
454 set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask); 472 set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
455 473
@@ -479,6 +497,7 @@ static struct irq_chip gic_edge_irq_controller = {
479#ifdef CONFIG_SMP 497#ifdef CONFIG_SMP
480 .irq_set_affinity = gic_set_affinity, 498 .irq_set_affinity = gic_set_affinity,
481#endif 499#endif
500 .ipi_send_single = gic_send_ipi,
482}; 501};
483 502
484static void gic_handle_local_int(bool chained) 503static void gic_handle_local_int(bool chained)
@@ -572,83 +591,6 @@ static void gic_irq_dispatch(struct irq_desc *desc)
572 gic_handle_shared_int(true); 591 gic_handle_shared_int(true);
573} 592}
574 593
575#ifdef CONFIG_MIPS_GIC_IPI
576static int gic_resched_int_base;
577static int gic_call_int_base;
578
579unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
580{
581 return gic_resched_int_base + cpu;
582}
583
584unsigned int plat_ipi_call_int_xlate(unsigned int cpu)
585{
586 return gic_call_int_base + cpu;
587}
588
589static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
590{
591 scheduler_ipi();
592
593 return IRQ_HANDLED;
594}
595
596static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
597{
598 generic_smp_call_function_interrupt();
599
600 return IRQ_HANDLED;
601}
602
603static struct irqaction irq_resched = {
604 .handler = ipi_resched_interrupt,
605 .flags = IRQF_PERCPU,
606 .name = "IPI resched"
607};
608
609static struct irqaction irq_call = {
610 .handler = ipi_call_interrupt,
611 .flags = IRQF_PERCPU,
612 .name = "IPI call"
613};
614
615static __init void gic_ipi_init_one(unsigned int intr, int cpu,
616 struct irqaction *action)
617{
618 int virq = irq_create_mapping(gic_irq_domain,
619 GIC_SHARED_TO_HWIRQ(intr));
620 int i;
621
622 gic_map_to_vpe(intr, mips_cm_vp_id(cpu));
623 for (i = 0; i < NR_CPUS; i++)
624 clear_bit(intr, pcpu_masks[i].pcpu_mask);
625 set_bit(intr, pcpu_masks[cpu].pcpu_mask);
626
627 irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
628
629 irq_set_handler(virq, handle_percpu_irq);
630 setup_irq(virq, action);
631}
632
633static __init void gic_ipi_init(void)
634{
635 int i;
636
637 /* Use last 2 * NR_CPUS interrupts as IPIs */
638 gic_resched_int_base = gic_shared_intrs - nr_cpu_ids;
639 gic_call_int_base = gic_resched_int_base - nr_cpu_ids;
640
641 for (i = 0; i < nr_cpu_ids; i++) {
642 gic_ipi_init_one(gic_call_int_base + i, i, &irq_call);
643 gic_ipi_init_one(gic_resched_int_base + i, i, &irq_resched);
644 }
645}
646#else
647static inline void gic_ipi_init(void)
648{
649}
650#endif
651
652static void __init gic_basic_init(void) 594static void __init gic_basic_init(void)
653{ 595{
654 unsigned int i; 596 unsigned int i;
@@ -753,19 +695,21 @@ static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
753} 695}
754 696
755static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq, 697static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
756 irq_hw_number_t hw) 698 irq_hw_number_t hw, unsigned int vpe)
757{ 699{
758 int intr = GIC_HWIRQ_TO_SHARED(hw); 700 int intr = GIC_HWIRQ_TO_SHARED(hw);
759 unsigned long flags; 701 unsigned long flags;
702 int i;
760 703
761 irq_set_chip_and_handler(virq, &gic_level_irq_controller, 704 irq_set_chip_and_handler(virq, &gic_level_irq_controller,
762 handle_level_irq); 705 handle_level_irq);
763 706
764 spin_lock_irqsave(&gic_lock, flags); 707 spin_lock_irqsave(&gic_lock, flags);
765 gic_map_to_pin(intr, gic_cpu_pin); 708 gic_map_to_pin(intr, gic_cpu_pin);
766 /* Map to VPE 0 by default */ 709 gic_map_to_vpe(intr, vpe);
767 gic_map_to_vpe(intr, 0); 710 for (i = 0; i < gic_vpes; i++)
768 set_bit(intr, pcpu_masks[0].pcpu_mask); 711 clear_bit(intr, pcpu_masks[i].pcpu_mask);
712 set_bit(intr, pcpu_masks[vpe].pcpu_mask);
769 spin_unlock_irqrestore(&gic_lock, flags); 713 spin_unlock_irqrestore(&gic_lock, flags);
770 714
771 return 0; 715 return 0;
@@ -776,10 +720,93 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
776{ 720{
777 if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS) 721 if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
778 return gic_local_irq_domain_map(d, virq, hw); 722 return gic_local_irq_domain_map(d, virq, hw);
779 return gic_shared_irq_domain_map(d, virq, hw); 723 return gic_shared_irq_domain_map(d, virq, hw, 0);
780} 724}
781 725
782static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, 726static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
727 unsigned int nr_irqs, void *arg)
728{
729 struct gic_irq_spec *spec = arg;
730 irq_hw_number_t hwirq, base_hwirq;
731 int cpu, ret, i;
732
733 if (spec->type == GIC_DEVICE) {
734 /* verify that it doesn't conflict with an IPI irq */
735 if (test_bit(spec->hwirq, ipi_resrv))
736 return -EBUSY;
737 } else {
738 base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs);
739 if (base_hwirq == gic_shared_intrs) {
740 return -ENOMEM;
741 }
742
743 /* check that we have enough space */
744 for (i = base_hwirq; i < nr_irqs; i++) {
745 if (!test_bit(i, ipi_resrv))
746 return -EBUSY;
747 }
748 bitmap_clear(ipi_resrv, base_hwirq, nr_irqs);
749
750 /* map the hwirq for each cpu consecutively */
751 i = 0;
752 for_each_cpu(cpu, spec->ipimask) {
753 hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i);
754
755 ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq,
756 &gic_edge_irq_controller,
757 NULL);
758 if (ret)
759 goto error;
760
761 ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu);
762 if (ret)
763 goto error;
764
765 i++;
766 }
767
768 /*
769 * tell the parent about the base hwirq we allocated so it can
770 * set its own domain data
771 */
772 spec->hwirq = base_hwirq;
773 }
774
775 return 0;
776error:
777 bitmap_set(ipi_resrv, base_hwirq, nr_irqs);
778 return ret;
779}
780
781void gic_irq_domain_free(struct irq_domain *d, unsigned int virq,
782 unsigned int nr_irqs)
783{
784 irq_hw_number_t base_hwirq;
785 struct irq_data *data;
786
787 data = irq_get_irq_data(virq);
788 if (!data)
789 return;
790
791 base_hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data));
792 bitmap_set(ipi_resrv, base_hwirq, nr_irqs);
793}
794
795int gic_irq_domain_match(struct irq_domain *d, struct device_node *node,
796 enum irq_domain_bus_token bus_token)
797{
798 /* this domain should'nt be accessed directly */
799 return 0;
800}
801
802static const struct irq_domain_ops gic_irq_domain_ops = {
803 .map = gic_irq_domain_map,
804 .alloc = gic_irq_domain_alloc,
805 .free = gic_irq_domain_free,
806 .match = gic_irq_domain_match,
807};
808
809static int gic_dev_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
783 const u32 *intspec, unsigned int intsize, 810 const u32 *intspec, unsigned int intsize,
784 irq_hw_number_t *out_hwirq, 811 irq_hw_number_t *out_hwirq,
785 unsigned int *out_type) 812 unsigned int *out_type)
@@ -798,9 +825,130 @@ static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
798 return 0; 825 return 0;
799} 826}
800 827
801static const struct irq_domain_ops gic_irq_domain_ops = { 828static int gic_dev_domain_alloc(struct irq_domain *d, unsigned int virq,
802 .map = gic_irq_domain_map, 829 unsigned int nr_irqs, void *arg)
803 .xlate = gic_irq_domain_xlate, 830{
831 struct irq_fwspec *fwspec = arg;
832 struct gic_irq_spec spec = {
833 .type = GIC_DEVICE,
834 .hwirq = fwspec->param[1],
835 };
836 int i, ret;
837 bool is_shared = fwspec->param[0] == GIC_SHARED;
838
839 if (is_shared) {
840 ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec);
841 if (ret)
842 return ret;
843 }
844
845 for (i = 0; i < nr_irqs; i++) {
846 irq_hw_number_t hwirq;
847
848 if (is_shared)
849 hwirq = GIC_SHARED_TO_HWIRQ(spec.hwirq + i);
850 else
851 hwirq = GIC_LOCAL_TO_HWIRQ(spec.hwirq + i);
852
853 ret = irq_domain_set_hwirq_and_chip(d, virq + i,
854 hwirq,
855 &gic_level_irq_controller,
856 NULL);
857 if (ret)
858 return ret;
859 }
860
861 return 0;
862}
863
864void gic_dev_domain_free(struct irq_domain *d, unsigned int virq,
865 unsigned int nr_irqs)
866{
867 /* no real allocation is done for dev irqs, so no need to free anything */
868 return;
869}
870
871static struct irq_domain_ops gic_dev_domain_ops = {
872 .xlate = gic_dev_domain_xlate,
873 .alloc = gic_dev_domain_alloc,
874 .free = gic_dev_domain_free,
875};
876
877static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
878 const u32 *intspec, unsigned int intsize,
879 irq_hw_number_t *out_hwirq,
880 unsigned int *out_type)
881{
882 /*
883 * There's nothing to translate here. hwirq is dynamically allocated and
884 * the irq type is always edge triggered.
885 * */
886 *out_hwirq = 0;
887 *out_type = IRQ_TYPE_EDGE_RISING;
888
889 return 0;
890}
891
892static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq,
893 unsigned int nr_irqs, void *arg)
894{
895 struct cpumask *ipimask = arg;
896 struct gic_irq_spec spec = {
897 .type = GIC_IPI,
898 .ipimask = ipimask
899 };
900 int ret, i;
901
902 ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec);
903 if (ret)
904 return ret;
905
906 /* the parent should have set spec.hwirq to the base_hwirq it allocated */
907 for (i = 0; i < nr_irqs; i++) {
908 ret = irq_domain_set_hwirq_and_chip(d, virq + i,
909 GIC_SHARED_TO_HWIRQ(spec.hwirq + i),
910 &gic_edge_irq_controller,
911 NULL);
912 if (ret)
913 goto error;
914
915 ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING);
916 if (ret)
917 goto error;
918 }
919
920 return 0;
921error:
922 irq_domain_free_irqs_parent(d, virq, nr_irqs);
923 return ret;
924}
925
926void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq,
927 unsigned int nr_irqs)
928{
929 irq_domain_free_irqs_parent(d, virq, nr_irqs);
930}
931
932int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
933 enum irq_domain_bus_token bus_token)
934{
935 bool is_ipi;
936
937 switch (bus_token) {
938 case DOMAIN_BUS_IPI:
939 is_ipi = d->bus_token == bus_token;
940 return to_of_node(d->fwnode) == node && is_ipi;
941 break;
942 default:
943 return 0;
944 }
945}
946
947static struct irq_domain_ops gic_ipi_domain_ops = {
948 .xlate = gic_ipi_domain_xlate,
949 .alloc = gic_ipi_domain_alloc,
950 .free = gic_ipi_domain_free,
951 .match = gic_ipi_domain_match,
804}; 952};
805 953
806static void __init __gic_init(unsigned long gic_base_addr, 954static void __init __gic_init(unsigned long gic_base_addr,
@@ -809,6 +957,7 @@ static void __init __gic_init(unsigned long gic_base_addr,
809 struct device_node *node) 957 struct device_node *node)
810{ 958{
811 unsigned int gicconfig; 959 unsigned int gicconfig;
960 unsigned int v[2];
812 961
813 __gic_base_addr = gic_base_addr; 962 __gic_base_addr = gic_base_addr;
814 963
@@ -864,9 +1013,32 @@ static void __init __gic_init(unsigned long gic_base_addr,
864 if (!gic_irq_domain) 1013 if (!gic_irq_domain)
865 panic("Failed to add GIC IRQ domain"); 1014 panic("Failed to add GIC IRQ domain");
866 1015
867 gic_basic_init(); 1016 gic_dev_domain = irq_domain_add_hierarchy(gic_irq_domain, 0,
1017 GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
1018 node, &gic_dev_domain_ops, NULL);
1019 if (!gic_dev_domain)
1020 panic("Failed to add GIC DEV domain");
1021
1022 gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
1023 IRQ_DOMAIN_FLAG_IPI_PER_CPU,
1024 GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
1025 node, &gic_ipi_domain_ops, NULL);
1026 if (!gic_ipi_domain)
1027 panic("Failed to add GIC IPI domain");
868 1028
869 gic_ipi_init(); 1029 gic_ipi_domain->bus_token = DOMAIN_BUS_IPI;
1030
1031 if (node &&
1032 !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
1033 bitmap_set(ipi_resrv, v[0], v[1]);
1034 } else {
1035 /* Make the last 2 * gic_vpes available for IPIs */
1036 bitmap_set(ipi_resrv,
1037 gic_shared_intrs - 2 * gic_vpes,
1038 2 * gic_vpes);
1039 }
1040
1041 gic_basic_init();
870} 1042}
871 1043
872void __init gic_init(unsigned long gic_base_addr, 1044void __init gic_init(unsigned long gic_base_addr,
diff --git a/drivers/irqchip/irq-mvebu-odmi.c b/drivers/irqchip/irq-mvebu-odmi.c
new file mode 100644
index 000000000000..b4d367868dbb
--- /dev/null
+++ b/drivers/irqchip/irq-mvebu-odmi.c
@@ -0,0 +1,236 @@
1/*
2 * Copyright (C) 2016 Marvell
3 *
4 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
10
11#define pr_fmt(fmt) "GIC-ODMI: " fmt
12
13#include <linux/irq.h>
14#include <linux/irqchip.h>
15#include <linux/irqdomain.h>
16#include <linux/kernel.h>
17#include <linux/msi.h>
18#include <linux/of_address.h>
19#include <linux/slab.h>
20#include <dt-bindings/interrupt-controller/arm-gic.h>
21
22#define GICP_ODMIN_SET 0x40
23#define GICP_ODMI_INT_NUM_SHIFT 12
24#define GICP_ODMIN_GM_EP_R0 0x110
25#define GICP_ODMIN_GM_EP_R1 0x114
26#define GICP_ODMIN_GM_EA_R0 0x108
27#define GICP_ODMIN_GM_EA_R1 0x118
28
29/*
30 * We don't support the group events, so we simply have 8 interrupts
31 * per frame.
32 */
33#define NODMIS_SHIFT 3
34#define NODMIS_PER_FRAME (1 << NODMIS_SHIFT)
35#define NODMIS_MASK (NODMIS_PER_FRAME - 1)
36
37struct odmi_data {
38 struct resource res;
39 void __iomem *base;
40 unsigned int spi_base;
41};
42
43static struct odmi_data *odmis;
44static unsigned long *odmis_bm;
45static unsigned int odmis_count;
46
47/* Protects odmis_bm */
48static DEFINE_SPINLOCK(odmis_bm_lock);
49
50static void odmi_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
51{
52 struct odmi_data *odmi;
53 phys_addr_t addr;
54 unsigned int odmin;
55
56 if (WARN_ON(d->hwirq >= odmis_count * NODMIS_PER_FRAME))
57 return;
58
59 odmi = &odmis[d->hwirq >> NODMIS_SHIFT];
60 odmin = d->hwirq & NODMIS_MASK;
61
62 addr = odmi->res.start + GICP_ODMIN_SET;
63
64 msg->address_hi = upper_32_bits(addr);
65 msg->address_lo = lower_32_bits(addr);
66 msg->data = odmin << GICP_ODMI_INT_NUM_SHIFT;
67}
68
69static struct irq_chip odmi_irq_chip = {
70 .name = "ODMI",
71 .irq_mask = irq_chip_mask_parent,
72 .irq_unmask = irq_chip_unmask_parent,
73 .irq_eoi = irq_chip_eoi_parent,
74 .irq_set_affinity = irq_chip_set_affinity_parent,
75 .irq_compose_msi_msg = odmi_compose_msi_msg,
76};
77
78static int odmi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
79 unsigned int nr_irqs, void *args)
80{
81 struct odmi_data *odmi = NULL;
82 struct irq_fwspec fwspec;
83 struct irq_data *d;
84 unsigned int hwirq, odmin;
85 int ret;
86
87 spin_lock(&odmis_bm_lock);
88 hwirq = find_first_zero_bit(odmis_bm, NODMIS_PER_FRAME * odmis_count);
89 if (hwirq >= NODMIS_PER_FRAME * odmis_count) {
90 spin_unlock(&odmis_bm_lock);
91 return -ENOSPC;
92 }
93
94 __set_bit(hwirq, odmis_bm);
95 spin_unlock(&odmis_bm_lock);
96
97 odmi = &odmis[hwirq >> NODMIS_SHIFT];
98 odmin = hwirq & NODMIS_MASK;
99
100 fwspec.fwnode = domain->parent->fwnode;
101 fwspec.param_count = 3;
102 fwspec.param[0] = GIC_SPI;
103 fwspec.param[1] = odmi->spi_base - 32 + odmin;
104 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
105
106 ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
107 if (ret) {
108 pr_err("Cannot allocate parent IRQ\n");
109 spin_lock(&odmis_bm_lock);
110 __clear_bit(odmin, odmis_bm);
111 spin_unlock(&odmis_bm_lock);
112 return ret;
113 }
114
115 /* Configure the interrupt line to be edge */
116 d = irq_domain_get_irq_data(domain->parent, virq);
117 d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
118
119 irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
120 &odmi_irq_chip, NULL);
121
122 return 0;
123}
124
125static void odmi_irq_domain_free(struct irq_domain *domain,
126 unsigned int virq, unsigned int nr_irqs)
127{
128 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
129
130 if (d->hwirq >= odmis_count * NODMIS_PER_FRAME) {
131 pr_err("Failed to teardown msi. Invalid hwirq %lu\n", d->hwirq);
132 return;
133 }
134
135 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
136
137 /* Actually free the MSI */
138 spin_lock(&odmis_bm_lock);
139 __clear_bit(d->hwirq, odmis_bm);
140 spin_unlock(&odmis_bm_lock);
141}
142
143static const struct irq_domain_ops odmi_domain_ops = {
144 .alloc = odmi_irq_domain_alloc,
145 .free = odmi_irq_domain_free,
146};
147
148static struct irq_chip odmi_msi_irq_chip = {
149 .name = "ODMI",
150};
151
152static struct msi_domain_ops odmi_msi_ops = {
153};
154
155static struct msi_domain_info odmi_msi_domain_info = {
156 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
157 .ops = &odmi_msi_ops,
158 .chip = &odmi_msi_irq_chip,
159};
160
161static int __init mvebu_odmi_init(struct device_node *node,
162 struct device_node *parent)
163{
164 struct irq_domain *inner_domain, *plat_domain;
165 int ret, i;
166
167 if (of_property_read_u32(node, "marvell,odmi-frames", &odmis_count))
168 return -EINVAL;
169
170 odmis = kcalloc(odmis_count, sizeof(struct odmi_data), GFP_KERNEL);
171 if (!odmis)
172 return -ENOMEM;
173
174 odmis_bm = kcalloc(BITS_TO_LONGS(odmis_count * NODMIS_PER_FRAME),
175 sizeof(long), GFP_KERNEL);
176 if (!odmis_bm) {
177 ret = -ENOMEM;
178 goto err_alloc;
179 }
180
181 for (i = 0; i < odmis_count; i++) {
182 struct odmi_data *odmi = &odmis[i];
183
184 ret = of_address_to_resource(node, i, &odmi->res);
185 if (ret)
186 goto err_unmap;
187
188 odmi->base = of_io_request_and_map(node, i, "odmi");
189 if (IS_ERR(odmi->base)) {
190 ret = PTR_ERR(odmi->base);
191 goto err_unmap;
192 }
193
194 if (of_property_read_u32_index(node, "marvell,spi-base",
195 i, &odmi->spi_base)) {
196 ret = -EINVAL;
197 goto err_unmap;
198 }
199 }
200
201 inner_domain = irq_domain_create_linear(of_node_to_fwnode(node),
202 odmis_count * NODMIS_PER_FRAME,
203 &odmi_domain_ops, NULL);
204 if (!inner_domain) {
205 ret = -ENOMEM;
206 goto err_unmap;
207 }
208
209 inner_domain->parent = irq_find_host(parent);
210
211 plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node),
212 &odmi_msi_domain_info,
213 inner_domain);
214 if (!plat_domain) {
215 ret = -ENOMEM;
216 goto err_remove_inner;
217 }
218
219 return 0;
220
221err_remove_inner:
222 irq_domain_remove(inner_domain);
223err_unmap:
224 for (i = 0; i < odmis_count; i++) {
225 struct odmi_data *odmi = &odmis[i];
226
227 if (odmi->base && !IS_ERR(odmi->base))
228 iounmap(odmis[i].base);
229 }
230 kfree(odmis_bm);
231err_alloc:
232 kfree(odmis);
233 return ret;
234}
235
236IRQCHIP_DECLARE(mvebu_odmi, "marvell,odmi-controller", mvebu_odmi_init);
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index efe50845939d..17304705f2cf 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -183,7 +183,7 @@ static void __iomem * __init icoll_init_iobase(struct device_node *np)
183 void __iomem *icoll_base; 183 void __iomem *icoll_base;
184 184
185 icoll_base = of_io_request_and_map(np, 0, np->name); 185 icoll_base = of_io_request_and_map(np, 0, np->name);
186 if (!icoll_base) 186 if (IS_ERR(icoll_base))
187 panic("%s: unable to map resource", np->full_name); 187 panic("%s: unable to map resource", np->full_name);
188 return icoll_base; 188 return icoll_base;
189} 189}
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
index 0820f67cc9a7..668730c5cb66 100644
--- a/drivers/irqchip/irq-sunxi-nmi.c
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -160,9 +160,9 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
160 160
161 gc = irq_get_domain_generic_chip(domain, 0); 161 gc = irq_get_domain_generic_chip(domain, 0);
162 gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node)); 162 gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node));
163 if (!gc->reg_base) { 163 if (IS_ERR(gc->reg_base)) {
164 pr_err("unable to map resource\n"); 164 pr_err("unable to map resource\n");
165 ret = -ENOMEM; 165 ret = PTR_ERR(gc->reg_base);
166 goto fail_irqd_remove; 166 goto fail_irqd_remove;
167 } 167 }
168 168
diff --git a/drivers/irqchip/irq-tango.c b/drivers/irqchip/irq-tango.c
new file mode 100644
index 000000000000..bdbb5c0ff7fe
--- /dev/null
+++ b/drivers/irqchip/irq-tango.c
@@ -0,0 +1,232 @@
1/*
2 * Copyright (C) 2014 Mans Rullgard <mans@mansr.com>
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 */
9
10#include <linux/init.h>
11#include <linux/irq.h>
12#include <linux/irqchip.h>
13#include <linux/irqchip/chained_irq.h>
14#include <linux/ioport.h>
15#include <linux/io.h>
16#include <linux/of_address.h>
17#include <linux/of_irq.h>
18#include <linux/slab.h>
19
20#define IRQ0_CTL_BASE 0x0000
21#define IRQ1_CTL_BASE 0x0100
22#define EDGE_CTL_BASE 0x0200
23#define IRQ2_CTL_BASE 0x0300
24
25#define IRQ_CTL_HI 0x18
26#define EDGE_CTL_HI 0x20
27
28#define IRQ_STATUS 0x00
29#define IRQ_RAWSTAT 0x04
30#define IRQ_EN_SET 0x08
31#define IRQ_EN_CLR 0x0c
32#define IRQ_SOFT_SET 0x10
33#define IRQ_SOFT_CLR 0x14
34
35#define EDGE_STATUS 0x00
36#define EDGE_RAWSTAT 0x04
37#define EDGE_CFG_RISE 0x08
38#define EDGE_CFG_FALL 0x0c
39#define EDGE_CFG_RISE_SET 0x10
40#define EDGE_CFG_RISE_CLR 0x14
41#define EDGE_CFG_FALL_SET 0x18
42#define EDGE_CFG_FALL_CLR 0x1c
43
44struct tangox_irq_chip {
45 void __iomem *base;
46 unsigned long ctl;
47};
48
49static inline u32 intc_readl(struct tangox_irq_chip *chip, int reg)
50{
51 return readl_relaxed(chip->base + reg);
52}
53
54static inline void intc_writel(struct tangox_irq_chip *chip, int reg, u32 val)
55{
56 writel_relaxed(val, chip->base + reg);
57}
58
59static void tangox_dispatch_irqs(struct irq_domain *dom, unsigned int status,
60 int base)
61{
62 unsigned int hwirq;
63 unsigned int virq;
64
65 while (status) {
66 hwirq = __ffs(status);
67 virq = irq_find_mapping(dom, base + hwirq);
68 if (virq)
69 generic_handle_irq(virq);
70 status &= ~BIT(hwirq);
71 }
72}
73
74static void tangox_irq_handler(struct irq_desc *desc)
75{
76 struct irq_domain *dom = irq_desc_get_handler_data(desc);
77 struct irq_chip *host_chip = irq_desc_get_chip(desc);
78 struct tangox_irq_chip *chip = dom->host_data;
79 unsigned int status_lo, status_hi;
80
81 chained_irq_enter(host_chip, desc);
82
83 status_lo = intc_readl(chip, chip->ctl + IRQ_STATUS);
84 status_hi = intc_readl(chip, chip->ctl + IRQ_CTL_HI + IRQ_STATUS);
85
86 tangox_dispatch_irqs(dom, status_lo, 0);
87 tangox_dispatch_irqs(dom, status_hi, 32);
88
89 chained_irq_exit(host_chip, desc);
90}
91
92static int tangox_irq_set_type(struct irq_data *d, unsigned int flow_type)
93{
94 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
95 struct tangox_irq_chip *chip = gc->domain->host_data;
96 struct irq_chip_regs *regs = &gc->chip_types[0].regs;
97
98 switch (flow_type & IRQ_TYPE_SENSE_MASK) {
99 case IRQ_TYPE_EDGE_RISING:
100 intc_writel(chip, regs->type + EDGE_CFG_RISE_SET, d->mask);
101 intc_writel(chip, regs->type + EDGE_CFG_FALL_CLR, d->mask);
102 break;
103
104 case IRQ_TYPE_EDGE_FALLING:
105 intc_writel(chip, regs->type + EDGE_CFG_RISE_CLR, d->mask);
106 intc_writel(chip, regs->type + EDGE_CFG_FALL_SET, d->mask);
107 break;
108
109 case IRQ_TYPE_LEVEL_HIGH:
110 intc_writel(chip, regs->type + EDGE_CFG_RISE_CLR, d->mask);
111 intc_writel(chip, regs->type + EDGE_CFG_FALL_CLR, d->mask);
112 break;
113
114 case IRQ_TYPE_LEVEL_LOW:
115 intc_writel(chip, regs->type + EDGE_CFG_RISE_SET, d->mask);
116 intc_writel(chip, regs->type + EDGE_CFG_FALL_SET, d->mask);
117 break;
118
119 default:
120 pr_err("Invalid trigger mode %x for IRQ %d\n",
121 flow_type, d->irq);
122 return -EINVAL;
123 }
124
125 return irq_setup_alt_chip(d, flow_type);
126}
127
128static void __init tangox_irq_init_chip(struct irq_chip_generic *gc,
129 unsigned long ctl_offs,
130 unsigned long edge_offs)
131{
132 struct tangox_irq_chip *chip = gc->domain->host_data;
133 struct irq_chip_type *ct = gc->chip_types;
134 unsigned long ctl_base = chip->ctl + ctl_offs;
135 unsigned long edge_base = EDGE_CTL_BASE + edge_offs;
136 int i;
137
138 gc->reg_base = chip->base;
139 gc->unused = 0;
140
141 for (i = 0; i < 2; i++) {
142 ct[i].chip.irq_ack = irq_gc_ack_set_bit;
143 ct[i].chip.irq_mask = irq_gc_mask_disable_reg;
144 ct[i].chip.irq_mask_ack = irq_gc_mask_disable_reg_and_ack;
145 ct[i].chip.irq_unmask = irq_gc_unmask_enable_reg;
146 ct[i].chip.irq_set_type = tangox_irq_set_type;
147 ct[i].chip.name = gc->domain->name;
148
149 ct[i].regs.enable = ctl_base + IRQ_EN_SET;
150 ct[i].regs.disable = ctl_base + IRQ_EN_CLR;
151 ct[i].regs.ack = edge_base + EDGE_RAWSTAT;
152 ct[i].regs.type = edge_base;
153 }
154
155 ct[0].type = IRQ_TYPE_LEVEL_MASK;
156 ct[0].handler = handle_level_irq;
157
158 ct[1].type = IRQ_TYPE_EDGE_BOTH;
159 ct[1].handler = handle_edge_irq;
160
161 intc_writel(chip, ct->regs.disable, 0xffffffff);
162 intc_writel(chip, ct->regs.ack, 0xffffffff);
163}
164
165static void __init tangox_irq_domain_init(struct irq_domain *dom)
166{
167 struct irq_chip_generic *gc;
168 int i;
169
170 for (i = 0; i < 2; i++) {
171 gc = irq_get_domain_generic_chip(dom, i * 32);
172 tangox_irq_init_chip(gc, i * IRQ_CTL_HI, i * EDGE_CTL_HI);
173 }
174}
175
176static int __init tangox_irq_init(void __iomem *base, struct resource *baseres,
177 struct device_node *node)
178{
179 struct tangox_irq_chip *chip;
180 struct irq_domain *dom;
181 struct resource res;
182 int irq;
183 int err;
184
185 irq = irq_of_parse_and_map(node, 0);
186 if (!irq)
187 panic("%s: failed to get IRQ", node->name);
188
189 err = of_address_to_resource(node, 0, &res);
190 if (err)
191 panic("%s: failed to get address", node->name);
192
193 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
194 chip->ctl = res.start - baseres->start;
195 chip->base = base;
196
197 dom = irq_domain_add_linear(node, 64, &irq_generic_chip_ops, chip);
198 if (!dom)
199 panic("%s: failed to create irqdomain", node->name);
200
201 err = irq_alloc_domain_generic_chips(dom, 32, 2, node->name,
202 handle_level_irq, 0, 0, 0);
203 if (err)
204 panic("%s: failed to allocate irqchip", node->name);
205
206 tangox_irq_domain_init(dom);
207
208 irq_set_chained_handler(irq, tangox_irq_handler);
209 irq_set_handler_data(irq, dom);
210
211 return 0;
212}
213
214static int __init tangox_of_irq_init(struct device_node *node,
215 struct device_node *parent)
216{
217 struct device_node *c;
218 struct resource res;
219 void __iomem *base;
220
221 base = of_iomap(node, 0);
222 if (!base)
223 panic("%s: of_iomap failed", node->name);
224
225 of_address_to_resource(node, 0, &res);
226
227 for_each_child_of_node(node, c)
228 tangox_irq_init(base, &res, c);
229
230 return 0;
231}
232IRQCHIP_DECLARE(tangox_intc, "sigma,smp8642-intc", tangox_of_irq_init);
diff --git a/drivers/irqchip/irq-ts4800.c b/drivers/irqchip/irq-ts4800.c
index 4192bdcd2734..2325fb3c482b 100644
--- a/drivers/irqchip/irq-ts4800.c
+++ b/drivers/irqchip/irq-ts4800.c
@@ -59,7 +59,7 @@ static int ts4800_irqdomain_map(struct irq_domain *d, unsigned int irq,
59 return 0; 59 return 0;
60} 60}
61 61
62struct irq_domain_ops ts4800_ic_ops = { 62static const struct irq_domain_ops ts4800_ic_ops = {
63 .map = ts4800_irqdomain_map, 63 .map = ts4800_irqdomain_map,
64 .xlate = irq_domain_xlate_onecell, 64 .xlate = irq_domain_xlate_onecell,
65}; 65};
diff --git a/include/linux/irq.h b/include/linux/irq.h
index cd14cd4a22b4..c4de62348ff2 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -133,8 +133,11 @@ struct irq_domain;
133 * Use accessor functions to deal with it 133 * Use accessor functions to deal with it
134 * @node: node index useful for balancing 134 * @node: node index useful for balancing
135 * @handler_data: per-IRQ data for the irq_chip methods 135 * @handler_data: per-IRQ data for the irq_chip methods
136 * @affinity: IRQ affinity on SMP 136 * @affinity: IRQ affinity on SMP. If this is an IPI
137 * related irq, then this is the mask of the
138 * CPUs to which an IPI can be sent.
137 * @msi_desc: MSI descriptor 139 * @msi_desc: MSI descriptor
140 * @ipi_offset: Offset of first IPI target cpu in @affinity. Optional.
138 */ 141 */
139struct irq_common_data { 142struct irq_common_data {
140 unsigned int __private state_use_accessors; 143 unsigned int __private state_use_accessors;
@@ -144,6 +147,9 @@ struct irq_common_data {
144 void *handler_data; 147 void *handler_data;
145 struct msi_desc *msi_desc; 148 struct msi_desc *msi_desc;
146 cpumask_var_t affinity; 149 cpumask_var_t affinity;
150#ifdef CONFIG_GENERIC_IRQ_IPI
151 unsigned int ipi_offset;
152#endif
147}; 153};
148 154
149/** 155/**
@@ -343,6 +349,8 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
343 * @irq_get_irqchip_state: return the internal state of an interrupt 349 * @irq_get_irqchip_state: return the internal state of an interrupt
344 * @irq_set_irqchip_state: set the internal state of a interrupt 350 * @irq_set_irqchip_state: set the internal state of a interrupt
345 * @irq_set_vcpu_affinity: optional to target a vCPU in a virtual machine 351 * @irq_set_vcpu_affinity: optional to target a vCPU in a virtual machine
352 * @ipi_send_single: send a single IPI to destination cpus
353 * @ipi_send_mask: send an IPI to destination cpus in cpumask
346 * @flags: chip specific flags 354 * @flags: chip specific flags
347 */ 355 */
348struct irq_chip { 356struct irq_chip {
@@ -387,6 +395,9 @@ struct irq_chip {
387 395
388 int (*irq_set_vcpu_affinity)(struct irq_data *data, void *vcpu_info); 396 int (*irq_set_vcpu_affinity)(struct irq_data *data, void *vcpu_info);
389 397
398 void (*ipi_send_single)(struct irq_data *data, unsigned int cpu);
399 void (*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest);
400
390 unsigned long flags; 401 unsigned long flags;
391}; 402};
392 403
@@ -936,4 +947,12 @@ static inline u32 irq_reg_readl(struct irq_chip_generic *gc,
936 return readl(gc->reg_base + reg_offset); 947 return readl(gc->reg_base + reg_offset);
937} 948}
938 949
950/* Contrary to Linux irqs, for hardware irqs the irq number 0 is valid */
951#define INVALID_HWIRQ (~0UL)
952irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu);
953int __ipi_send_single(struct irq_desc *desc, unsigned int cpu);
954int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest);
955int ipi_send_single(unsigned int virq, unsigned int cpu);
956int ipi_send_mask(unsigned int virq, const struct cpumask *dest);
957
939#endif /* _LINUX_IRQ_H */ 958#endif /* _LINUX_IRQ_H */
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
index ce824db48d64..80f89e4a29ac 100644
--- a/include/linux/irqchip/mips-gic.h
+++ b/include/linux/irqchip/mips-gic.h
@@ -261,9 +261,6 @@ extern void gic_write_compare(cycle_t cnt);
261extern void gic_write_cpu_compare(cycle_t cnt, int cpu); 261extern void gic_write_cpu_compare(cycle_t cnt, int cpu);
262extern void gic_start_count(void); 262extern void gic_start_count(void);
263extern void gic_stop_count(void); 263extern void gic_stop_count(void);
264extern void gic_send_ipi(unsigned int intr);
265extern unsigned int plat_ipi_call_int_xlate(unsigned int);
266extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
267extern int gic_get_c0_compare_int(void); 264extern int gic_get_c0_compare_int(void);
268extern int gic_get_c0_perfcount_int(void); 265extern int gic_get_c0_perfcount_int(void);
269extern int gic_get_c0_fdc_int(void); 266extern int gic_get_c0_fdc_int(void);
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 04579d9fbce4..ed48594e96d2 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -74,6 +74,7 @@ enum irq_domain_bus_token {
74 DOMAIN_BUS_PCI_MSI, 74 DOMAIN_BUS_PCI_MSI,
75 DOMAIN_BUS_PLATFORM_MSI, 75 DOMAIN_BUS_PLATFORM_MSI,
76 DOMAIN_BUS_NEXUS, 76 DOMAIN_BUS_NEXUS,
77 DOMAIN_BUS_IPI,
77}; 78};
78 79
79/** 80/**
@@ -172,6 +173,12 @@ enum {
172 /* Core calls alloc/free recursive through the domain hierarchy. */ 173 /* Core calls alloc/free recursive through the domain hierarchy. */
173 IRQ_DOMAIN_FLAG_AUTO_RECURSIVE = (1 << 1), 174 IRQ_DOMAIN_FLAG_AUTO_RECURSIVE = (1 << 1),
174 175
176 /* Irq domain is an IPI domain with virq per cpu */
177 IRQ_DOMAIN_FLAG_IPI_PER_CPU = (1 << 2),
178
179 /* Irq domain is an IPI domain with single virq */
180 IRQ_DOMAIN_FLAG_IPI_SINGLE = (1 << 3),
181
175 /* 182 /*
176 * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved 183 * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
177 * for implementation specific purposes and ignored by the 184 * for implementation specific purposes and ignored by the
@@ -206,6 +213,8 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
206extern struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode, 213extern struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
207 enum irq_domain_bus_token bus_token); 214 enum irq_domain_bus_token bus_token);
208extern void irq_set_default_host(struct irq_domain *host); 215extern void irq_set_default_host(struct irq_domain *host);
216extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
217 irq_hw_number_t hwirq, int node);
209 218
210static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node) 219static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node)
211{ 220{
@@ -335,6 +344,11 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
335 const u32 *intspec, unsigned int intsize, 344 const u32 *intspec, unsigned int intsize,
336 irq_hw_number_t *out_hwirq, unsigned int *out_type); 345 irq_hw_number_t *out_hwirq, unsigned int *out_type);
337 346
347/* IPI functions */
348unsigned int irq_reserve_ipi(struct irq_domain *domain,
349 const struct cpumask *dest);
350void irq_destroy_ipi(unsigned int irq);
351
338/* V2 interfaces to support hierarchy IRQ domains. */ 352/* V2 interfaces to support hierarchy IRQ domains. */
339extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, 353extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
340 unsigned int virq); 354 unsigned int virq);
@@ -400,6 +414,22 @@ static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
400{ 414{
401 return domain->flags & IRQ_DOMAIN_FLAG_HIERARCHY; 415 return domain->flags & IRQ_DOMAIN_FLAG_HIERARCHY;
402} 416}
417
418static inline bool irq_domain_is_ipi(struct irq_domain *domain)
419{
420 return domain->flags &
421 (IRQ_DOMAIN_FLAG_IPI_PER_CPU | IRQ_DOMAIN_FLAG_IPI_SINGLE);
422}
423
424static inline bool irq_domain_is_ipi_per_cpu(struct irq_domain *domain)
425{
426 return domain->flags & IRQ_DOMAIN_FLAG_IPI_PER_CPU;
427}
428
429static inline bool irq_domain_is_ipi_single(struct irq_domain *domain)
430{
431 return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE;
432}
403#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ 433#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
404static inline void irq_domain_activate_irq(struct irq_data *data) { } 434static inline void irq_domain_activate_irq(struct irq_data *data) { }
405static inline void irq_domain_deactivate_irq(struct irq_data *data) { } 435static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
@@ -413,6 +443,21 @@ static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
413{ 443{
414 return false; 444 return false;
415} 445}
446
447static inline bool irq_domain_is_ipi(struct irq_domain *domain)
448{
449 return false;
450}
451
452static inline bool irq_domain_is_ipi_per_cpu(struct irq_domain *domain)
453{
454 return false;
455}
456
457static inline bool irq_domain_is_ipi_single(struct irq_domain *domain)
458{
459 return false;
460}
416#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ 461#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
417 462
418#else /* CONFIG_IRQ_DOMAIN */ 463#else /* CONFIG_IRQ_DOMAIN */
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 3b48dab80164..3bbfd6a9c475 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -64,6 +64,10 @@ config IRQ_DOMAIN_HIERARCHY
64 bool 64 bool
65 select IRQ_DOMAIN 65 select IRQ_DOMAIN
66 66
67# Generic IRQ IPI support
68config GENERIC_IRQ_IPI
69 bool
70
67# Generic MSI interrupt support 71# Generic MSI interrupt support
68config GENERIC_MSI_IRQ 72config GENERIC_MSI_IRQ
69 bool 73 bool
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index 2fc9cbdf35b6..2ee42e95a3ce 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
8obj-$(CONFIG_GENERIC_IRQ_MIGRATION) += cpuhotplug.o 8obj-$(CONFIG_GENERIC_IRQ_MIGRATION) += cpuhotplug.o
9obj-$(CONFIG_PM_SLEEP) += pm.o 9obj-$(CONFIG_PM_SLEEP) += pm.o
10obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o 10obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o
11obj-$(CONFIG_GENERIC_IRQ_IPI) += ipi.o
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 5797909f4e5b..2f9f2b0e79f2 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -961,6 +961,7 @@ void irq_chip_mask_parent(struct irq_data *data)
961 data = data->parent_data; 961 data = data->parent_data;
962 data->chip->irq_mask(data); 962 data->chip->irq_mask(data);
963} 963}
964EXPORT_SYMBOL_GPL(irq_chip_mask_parent);
964 965
965/** 966/**
966 * irq_chip_unmask_parent - Unmask the parent interrupt 967 * irq_chip_unmask_parent - Unmask the parent interrupt
@@ -971,6 +972,7 @@ void irq_chip_unmask_parent(struct irq_data *data)
971 data = data->parent_data; 972 data = data->parent_data;
972 data->chip->irq_unmask(data); 973 data->chip->irq_unmask(data);
973} 974}
975EXPORT_SYMBOL_GPL(irq_chip_unmask_parent);
974 976
975/** 977/**
976 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt 978 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
@@ -981,6 +983,7 @@ void irq_chip_eoi_parent(struct irq_data *data)
981 data = data->parent_data; 983 data = data->parent_data;
982 data->chip->irq_eoi(data); 984 data->chip->irq_eoi(data);
983} 985}
986EXPORT_SYMBOL_GPL(irq_chip_eoi_parent);
984 987
985/** 988/**
986 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt 989 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
@@ -1016,6 +1019,7 @@ int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
1016 1019
1017 return -ENOSYS; 1020 return -ENOSYS;
1018} 1021}
1022EXPORT_SYMBOL_GPL(irq_chip_set_type_parent);
1019 1023
1020/** 1024/**
1021 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware 1025 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 57bff7857e87..a15b5485b446 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -136,10 +136,9 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
136{ 136{
137 irqreturn_t retval = IRQ_NONE; 137 irqreturn_t retval = IRQ_NONE;
138 unsigned int flags = 0, irq = desc->irq_data.irq; 138 unsigned int flags = 0, irq = desc->irq_data.irq;
139 struct irqaction *action = desc->action; 139 struct irqaction *action;
140 140
141 /* action might have become NULL since we dropped the lock */ 141 for_each_action_of_desc(desc, action) {
142 while (action) {
143 irqreturn_t res; 142 irqreturn_t res;
144 143
145 trace_irq_handler_entry(irq, action); 144 trace_irq_handler_entry(irq, action);
@@ -173,7 +172,6 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
173 } 172 }
174 173
175 retval |= res; 174 retval |= res;
176 action = action->next;
177 } 175 }
178 176
179 add_interrupt_randomness(irq, flags); 177 add_interrupt_randomness(irq, flags);
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 3d182932d2d1..09be2c903c6d 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -131,6 +131,9 @@ static inline void chip_bus_sync_unlock(struct irq_desc *desc)
131#define IRQ_GET_DESC_CHECK_GLOBAL (_IRQ_DESC_CHECK) 131#define IRQ_GET_DESC_CHECK_GLOBAL (_IRQ_DESC_CHECK)
132#define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU) 132#define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU)
133 133
134#define for_each_action_of_desc(desc, act) \
135 for (act = desc->act; act; act = act->next)
136
134struct irq_desc * 137struct irq_desc *
135__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, 138__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
136 unsigned int check); 139 unsigned int check);
diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c
new file mode 100644
index 000000000000..c37f34b00a11
--- /dev/null
+++ b/kernel/irq/ipi.c
@@ -0,0 +1,326 @@
1/*
2 * linux/kernel/irq/ipi.c
3 *
4 * Copyright (C) 2015 Imagination Technologies Ltd
5 * Author: Qais Yousef <qais.yousef@imgtec.com>
6 *
7 * This file contains driver APIs to the IPI subsystem.
8 */
9
10#define pr_fmt(fmt) "genirq/ipi: " fmt
11
12#include <linux/irqdomain.h>
13#include <linux/irq.h>
14
15/**
16 * irq_reserve_ipi() - Setup an IPI to destination cpumask
17 * @domain: IPI domain
18 * @dest: cpumask of cpus which can receive the IPI
19 *
20 * Allocate a virq that can be used to send IPI to any CPU in dest mask.
21 *
22 * On success it'll return linux irq number and 0 on failure
23 */
24unsigned int irq_reserve_ipi(struct irq_domain *domain,
25 const struct cpumask *dest)
26{
27 unsigned int nr_irqs, offset;
28 struct irq_data *data;
29 int virq, i;
30
31 if (!domain ||!irq_domain_is_ipi(domain)) {
32 pr_warn("Reservation on a non IPI domain\n");
33 return 0;
34 }
35
36 if (!cpumask_subset(dest, cpu_possible_mask)) {
37 pr_warn("Reservation is not in possible_cpu_mask\n");
38 return 0;
39 }
40
41 nr_irqs = cpumask_weight(dest);
42 if (!nr_irqs) {
43 pr_warn("Reservation for empty destination mask\n");
44 return 0;
45 }
46
47 if (irq_domain_is_ipi_single(domain)) {
48 /*
49 * If the underlying implementation uses a single HW irq on
50 * all cpus then we only need a single Linux irq number for
51 * it. We have no restrictions vs. the destination mask. The
52 * underlying implementation can deal with holes nicely.
53 */
54 nr_irqs = 1;
55 offset = 0;
56 } else {
57 unsigned int next;
58
59 /*
60 * The IPI requires a seperate HW irq on each CPU. We require
61 * that the destination mask is consecutive. If an
62 * implementation needs to support holes, it can reserve
63 * several IPI ranges.
64 */
65 offset = cpumask_first(dest);
66 /*
67 * Find a hole and if found look for another set bit after the
68 * hole. For now we don't support this scenario.
69 */
70 next = cpumask_next_zero(offset, dest);
71 if (next < nr_cpu_ids)
72 next = cpumask_next(next, dest);
73 if (next < nr_cpu_ids) {
74 pr_warn("Destination mask has holes\n");
75 return 0;
76 }
77 }
78
79 virq = irq_domain_alloc_descs(-1, nr_irqs, 0, NUMA_NO_NODE);
80 if (virq <= 0) {
81 pr_warn("Can't reserve IPI, failed to alloc descs\n");
82 return 0;
83 }
84
85 virq = __irq_domain_alloc_irqs(domain, virq, nr_irqs, NUMA_NO_NODE,
86 (void *) dest, true);
87
88 if (virq <= 0) {
89 pr_warn("Can't reserve IPI, failed to alloc hw irqs\n");
90 goto free_descs;
91 }
92
93 for (i = 0; i < nr_irqs; i++) {
94 data = irq_get_irq_data(virq + i);
95 cpumask_copy(data->common->affinity, dest);
96 data->common->ipi_offset = offset;
97 }
98 return virq;
99
100free_descs:
101 irq_free_descs(virq, nr_irqs);
102 return 0;
103}
104
105/**
106 * irq_destroy_ipi() - unreserve an IPI that was previously allocated
107 * @irq: linux irq number to be destroyed
108 *
109 * Return the IPIs allocated with irq_reserve_ipi() to the system destroying
110 * all virqs associated with them.
111 */
112void irq_destroy_ipi(unsigned int irq)
113{
114 struct irq_data *data = irq_get_irq_data(irq);
115 struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL;
116 struct irq_domain *domain;
117 unsigned int nr_irqs;
118
119 if (!irq || !data || !ipimask)
120 return;
121
122 domain = data->domain;
123 if (WARN_ON(domain == NULL))
124 return;
125
126 if (!irq_domain_is_ipi(domain)) {
127 pr_warn("Trying to destroy a non IPI domain!\n");
128 return;
129 }
130
131 if (irq_domain_is_ipi_per_cpu(domain))
132 nr_irqs = cpumask_weight(ipimask);
133 else
134 nr_irqs = 1;
135
136 irq_domain_free_irqs(irq, nr_irqs);
137}
138
139/**
140 * ipi_get_hwirq - Get the hwirq associated with an IPI to a cpu
141 * @irq: linux irq number
142 * @cpu: the target cpu
143 *
144 * When dealing with coprocessors IPI, we need to inform the coprocessor of
145 * the hwirq it needs to use to receive and send IPIs.
146 *
147 * Returns hwirq value on success and INVALID_HWIRQ on failure.
148 */
149irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu)
150{
151 struct irq_data *data = irq_get_irq_data(irq);
152 struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL;
153
154 if (!data || !ipimask || cpu > nr_cpu_ids)
155 return INVALID_HWIRQ;
156
157 if (!cpumask_test_cpu(cpu, ipimask))
158 return INVALID_HWIRQ;
159
160 /*
161 * Get the real hardware irq number if the underlying implementation
162 * uses a seperate irq per cpu. If the underlying implementation uses
163 * a single hardware irq for all cpus then the IPI send mechanism
164 * needs to take care of the cpu destinations.
165 */
166 if (irq_domain_is_ipi_per_cpu(data->domain))
167 data = irq_get_irq_data(irq + cpu - data->common->ipi_offset);
168
169 return data ? irqd_to_hwirq(data) : INVALID_HWIRQ;
170}
171EXPORT_SYMBOL_GPL(ipi_get_hwirq);
172
173static int ipi_send_verify(struct irq_chip *chip, struct irq_data *data,
174 const struct cpumask *dest, unsigned int cpu)
175{
176 struct cpumask *ipimask = irq_data_get_affinity_mask(data);
177
178 if (!chip || !ipimask)
179 return -EINVAL;
180
181 if (!chip->ipi_send_single && !chip->ipi_send_mask)
182 return -EINVAL;
183
184 if (cpu > nr_cpu_ids)
185 return -EINVAL;
186
187 if (dest) {
188 if (!cpumask_subset(dest, ipimask))
189 return -EINVAL;
190 } else {
191 if (!cpumask_test_cpu(cpu, ipimask))
192 return -EINVAL;
193 }
194 return 0;
195}
196
197/**
198 * __ipi_send_single - send an IPI to a target Linux SMP CPU
199 * @desc: pointer to irq_desc of the IRQ
200 * @cpu: destination CPU, must in the destination mask passed to
201 * irq_reserve_ipi()
202 *
203 * This function is for architecture or core code to speed up IPI sending. Not
204 * usable from driver code.
205 *
206 * Returns zero on success and negative error number on failure.
207 */
208int __ipi_send_single(struct irq_desc *desc, unsigned int cpu)
209{
210 struct irq_data *data = irq_desc_get_irq_data(desc);
211 struct irq_chip *chip = irq_data_get_irq_chip(data);
212
213#ifdef DEBUG
214 /*
215 * Minimise the overhead by omitting the checks for Linux SMP IPIs.
216 * Since the callers should be arch or core code which is generally
217 * trusted, only check for errors when debugging.
218 */
219 if (WARN_ON_ONCE(ipi_send_verify(chip, data, NULL, cpu)))
220 return -EINVAL;
221#endif
222 if (!chip->ipi_send_single) {
223 chip->ipi_send_mask(data, cpumask_of(cpu));
224 return 0;
225 }
226
227 /* FIXME: Store this information in irqdata flags */
228 if (irq_domain_is_ipi_per_cpu(data->domain) &&
229 cpu != data->common->ipi_offset) {
230 /* use the correct data for that cpu */
231 unsigned irq = data->irq + cpu - data->common->ipi_offset;
232
233 data = irq_get_irq_data(irq);
234 }
235 chip->ipi_send_single(data, cpu);
236 return 0;
237}
238
239/**
240 * ipi_send_mask - send an IPI to target Linux SMP CPU(s)
241 * @desc: pointer to irq_desc of the IRQ
242 * @dest: dest CPU(s), must be a subset of the mask passed to
243 * irq_reserve_ipi()
244 *
245 * This function is for architecture or core code to speed up IPI sending. Not
246 * usable from driver code.
247 *
248 * Returns zero on success and negative error number on failure.
249 */
250int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest)
251{
252 struct irq_data *data = irq_desc_get_irq_data(desc);
253 struct irq_chip *chip = irq_data_get_irq_chip(data);
254 unsigned int cpu;
255
256#ifdef DEBUG
257 /*
258 * Minimise the overhead by omitting the checks for Linux SMP IPIs.
259 * Since the callers should be arch or core code which is generally
260 * trusted, only check for errors when debugging.
261 */
262 if (WARN_ON_ONCE(ipi_send_verify(chip, data, dest, 0)))
263 return -EINVAL;
264#endif
265 if (chip->ipi_send_mask) {
266 chip->ipi_send_mask(data, dest);
267 return 0;
268 }
269
270 if (irq_domain_is_ipi_per_cpu(data->domain)) {
271 unsigned int base = data->irq;
272
273 for_each_cpu(cpu, dest) {
274 unsigned irq = base + cpu - data->common->ipi_offset;
275
276 data = irq_get_irq_data(irq);
277 chip->ipi_send_single(data, cpu);
278 }
279 } else {
280 for_each_cpu(cpu, dest)
281 chip->ipi_send_single(data, cpu);
282 }
283 return 0;
284}
285
286/**
287 * ipi_send_single - Send an IPI to a single CPU
288 * @virq: linux irq number from irq_reserve_ipi()
289 * @cpu: destination CPU, must in the destination mask passed to
290 * irq_reserve_ipi()
291 *
292 * Returns zero on success and negative error number on failure.
293 */
294int ipi_send_single(unsigned int virq, unsigned int cpu)
295{
296 struct irq_desc *desc = irq_to_desc(virq);
297 struct irq_data *data = desc ? irq_desc_get_irq_data(desc) : NULL;
298 struct irq_chip *chip = data ? irq_data_get_irq_chip(data) : NULL;
299
300 if (WARN_ON_ONCE(ipi_send_verify(chip, data, NULL, cpu)))
301 return -EINVAL;
302
303 return __ipi_send_single(desc, cpu);
304}
305EXPORT_SYMBOL_GPL(ipi_send_single);
306
307/**
308 * ipi_send_mask - Send an IPI to target CPU(s)
309 * @virq: linux irq number from irq_reserve_ipi()
310 * @dest: dest CPU(s), must be a subset of the mask passed to
311 * irq_reserve_ipi()
312 *
313 * Returns zero on success and negative error number on failure.
314 */
315int ipi_send_mask(unsigned int virq, const struct cpumask *dest)
316{
317 struct irq_desc *desc = irq_to_desc(virq);
318 struct irq_data *data = desc ? irq_desc_get_irq_data(desc) : NULL;
319 struct irq_chip *chip = data ? irq_data_get_irq_chip(data) : NULL;
320
321 if (WARN_ON_ONCE(ipi_send_verify(chip, data, dest, 0)))
322 return -EINVAL;
323
324 return __ipi_send_mask(desc, dest);
325}
326EXPORT_SYMBOL_GPL(ipi_send_mask);
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 0409da0bcc33..0ccd028817d7 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -24,10 +24,27 @@
24static struct lock_class_key irq_desc_lock_class; 24static struct lock_class_key irq_desc_lock_class;
25 25
26#if defined(CONFIG_SMP) 26#if defined(CONFIG_SMP)
27static int __init irq_affinity_setup(char *str)
28{
29 zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
30 cpulist_parse(str, irq_default_affinity);
31 /*
32 * Set at least the boot cpu. We don't want to end up with
33 * bugreports caused by random comandline masks
34 */
35 cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
36 return 1;
37}
38__setup("irqaffinity=", irq_affinity_setup);
39
27static void __init init_irq_default_affinity(void) 40static void __init init_irq_default_affinity(void)
28{ 41{
29 alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); 42#ifdef CONFIG_CPUMASK_OFFSTACK
30 cpumask_setall(irq_default_affinity); 43 if (!irq_default_affinity)
44 zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
45#endif
46 if (cpumask_empty(irq_default_affinity))
47 cpumask_setall(irq_default_affinity);
31} 48}
32#else 49#else
33static void __init init_irq_default_affinity(void) 50static void __init init_irq_default_affinity(void)
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 3e56d2f03e24..3a519a01118b 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -23,8 +23,6 @@ static DEFINE_MUTEX(irq_domain_mutex);
23static DEFINE_MUTEX(revmap_trees_mutex); 23static DEFINE_MUTEX(revmap_trees_mutex);
24static struct irq_domain *irq_default_domain; 24static struct irq_domain *irq_default_domain;
25 25
26static int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
27 irq_hw_number_t hwirq, int node);
28static void irq_domain_check_hierarchy(struct irq_domain *domain); 26static void irq_domain_check_hierarchy(struct irq_domain *domain);
29 27
30struct irqchip_fwid { 28struct irqchip_fwid {
@@ -840,8 +838,8 @@ const struct irq_domain_ops irq_domain_simple_ops = {
840}; 838};
841EXPORT_SYMBOL_GPL(irq_domain_simple_ops); 839EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
842 840
843static int irq_domain_alloc_descs(int virq, unsigned int cnt, 841int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq,
844 irq_hw_number_t hwirq, int node) 842 int node)
845{ 843{
846 unsigned int hint; 844 unsigned int hint;
847 845
@@ -895,6 +893,7 @@ struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
895 893
896 return domain; 894 return domain;
897} 895}
896EXPORT_SYMBOL_GPL(irq_domain_create_hierarchy);
898 897
899static void irq_domain_insert_irq(int virq) 898static void irq_domain_insert_irq(int virq)
900{ 899{
@@ -1045,6 +1044,7 @@ int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq,
1045 1044
1046 return 0; 1045 return 0;
1047} 1046}
1047EXPORT_SYMBOL_GPL(irq_domain_set_hwirq_and_chip);
1048 1048
1049/** 1049/**
1050 * irq_domain_set_info - Set the complete data for a @virq in @domain 1050 * irq_domain_set_info - Set the complete data for a @virq in @domain
@@ -1078,6 +1078,7 @@ void irq_domain_reset_irq_data(struct irq_data *irq_data)
1078 irq_data->chip = &no_irq_chip; 1078 irq_data->chip = &no_irq_chip;
1079 irq_data->chip_data = NULL; 1079 irq_data->chip_data = NULL;
1080} 1080}
1081EXPORT_SYMBOL_GPL(irq_domain_reset_irq_data);
1081 1082
1082/** 1083/**
1083 * irq_domain_free_irqs_common - Clear irq_data and free the parent 1084 * irq_domain_free_irqs_common - Clear irq_data and free the parent
@@ -1275,6 +1276,7 @@ int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
1275 nr_irqs, arg); 1276 nr_irqs, arg);
1276 return -ENOSYS; 1277 return -ENOSYS;
1277} 1278}
1279EXPORT_SYMBOL_GPL(irq_domain_alloc_irqs_parent);
1278 1280
1279/** 1281/**
1280 * irq_domain_free_irqs_parent - Free interrupts from parent domain 1282 * irq_domain_free_irqs_parent - Free interrupts from parent domain
@@ -1292,6 +1294,7 @@ void irq_domain_free_irqs_parent(struct irq_domain *domain,
1292 irq_domain_free_irqs_recursive(domain->parent, irq_base, 1294 irq_domain_free_irqs_recursive(domain->parent, irq_base,
1293 nr_irqs); 1295 nr_irqs);
1294} 1296}
1297EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
1295 1298
1296/** 1299/**
1297 * irq_domain_activate_irq - Call domain_ops->activate recursively to activate 1300 * irq_domain_activate_irq - Call domain_ops->activate recursively to activate
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 841187239adc..3ddd2297ee95 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -144,13 +144,11 @@ int irq_can_set_affinity(unsigned int irq)
144 */ 144 */
145void irq_set_thread_affinity(struct irq_desc *desc) 145void irq_set_thread_affinity(struct irq_desc *desc)
146{ 146{
147 struct irqaction *action = desc->action; 147 struct irqaction *action;
148 148
149 while (action) { 149 for_each_action_of_desc(desc, action)
150 if (action->thread) 150 if (action->thread)
151 set_bit(IRQTF_AFFINITY, &action->thread_flags); 151 set_bit(IRQTF_AFFINITY, &action->thread_flags);
152 action = action->next;
153 }
154} 152}
155 153
156#ifdef CONFIG_GENERIC_PENDING_IRQ 154#ifdef CONFIG_GENERIC_PENDING_IRQ
@@ -994,7 +992,7 @@ void irq_wake_thread(unsigned int irq, void *dev_id)
994 return; 992 return;
995 993
996 raw_spin_lock_irqsave(&desc->lock, flags); 994 raw_spin_lock_irqsave(&desc->lock, flags);
997 for (action = desc->action; action; action = action->next) { 995 for_each_action_of_desc(desc, action) {
998 if (action->dev_id == dev_id) { 996 if (action->dev_id == dev_id) {
999 if (action->thread) 997 if (action->thread)
1000 __irq_wake_thread(desc, action); 998 __irq_wake_thread(desc, action);
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index a2c02fd5d6d0..4e1b94726818 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -291,7 +291,7 @@ static int name_unique(unsigned int irq, struct irqaction *new_action)
291 int ret = 1; 291 int ret = 1;
292 292
293 raw_spin_lock_irqsave(&desc->lock, flags); 293 raw_spin_lock_irqsave(&desc->lock, flags);
294 for (action = desc->action ; action; action = action->next) { 294 for_each_action_of_desc(desc, action) {
295 if ((action != new_action) && action->name && 295 if ((action != new_action) && action->name &&
296 !strcmp(new_action->name, action->name)) { 296 !strcmp(new_action->name, action->name)) {
297 ret = 0; 297 ret = 0;
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 32144175458d..5707f97a3e6a 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -211,14 +211,12 @@ static void __report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret)
211 * desc->lock here. See synchronize_irq(). 211 * desc->lock here. See synchronize_irq().
212 */ 212 */
213 raw_spin_lock_irqsave(&desc->lock, flags); 213 raw_spin_lock_irqsave(&desc->lock, flags);
214 action = desc->action; 214 for_each_action_of_desc(desc, action) {
215 while (action) {
216 printk(KERN_ERR "[<%p>] %pf", action->handler, action->handler); 215 printk(KERN_ERR "[<%p>] %pf", action->handler, action->handler);
217 if (action->thread_fn) 216 if (action->thread_fn)
218 printk(KERN_CONT " threaded [<%p>] %pf", 217 printk(KERN_CONT " threaded [<%p>] %pf",
219 action->thread_fn, action->thread_fn); 218 action->thread_fn, action->thread_fn);
220 printk(KERN_CONT "\n"); 219 printk(KERN_CONT "\n");
221 action = action->next;
222 } 220 }
223 raw_spin_unlock_irqrestore(&desc->lock, flags); 221 raw_spin_unlock_irqrestore(&desc->lock, flags);
224} 222}