aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-17 13:27:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-17 13:27:29 -0400
commitede40902cf80714ece199977b308e2ee437cae0b (patch)
treee85e57194e7c9c7575ed0fa27b72495135a7eb23
parent91e8d0cbc94f81f110e508c3105dd93fb146d6b5 (diff)
parent0097852c302aca943a8b76f7f85e133af6e1701a (diff)
Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq updates from Thomas Gleixner: "This update delivers: - Yet another interrupt chip diver (LPC32xx) - Core functions to handle partitioned per-cpu interrupts - Enhancements to the IPI core - Proper handling of irq type configuration - A large set of ARM GIC enhancements - The usual pile of small fixes, cleanups and enhancements" * 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (31 commits) irqchip/bcm2836: Use a more generic memory barrier call irqchip/bcm2836: Fix compiler warning on 64-bit build irqchip/bcm2836: Drop smp_set_ops on arm64 builds irqchip/gic: Add helper functions for GIC setup and teardown irqchip/gic: Store GIC configuration parameters irqchip/gic: Pass GIC pointer to save/restore functions irqchip/gic: Return an error if GIC initialisation fails irqchip/gic: Remove static irq_chip definition for eoimode1 irqchip/gic: Don't initialise chip if mapping IO space fails irqchip/gic: WARN if setting the interrupt type for a PPI fails irqchip/gic: Don't unnecessarily write the IRQ configuration irqchip: Mask the non-type/sense bits when translating an IRQ genirq: Ensure IRQ descriptor is valid when setting-up the IRQ irqchip/gic-v3: Configure all interrupts as non-secure Group-1 irqchip/gic-v2m: Add workaround for Broadcom NS2 GICv2m erratum irqchip/irq-alpine-msi: Don't use <asm-generic/msi.h> irqchip/mbigen: Checking for IS_ERR() instead of NULL irqchip/gic-v3: Remove inexistant register definition irqchip/gicv3-its: Don't allow devices whose ID is outside range irqchip: Add LPC32xx interrupt controller driver ...
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt34
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt30
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/mach-lpc32xx/phy3250.c1
-rw-r--r--drivers/irqchip/Kconfig9
-rw-r--r--drivers/irqchip/Makefile3
-rw-r--r--drivers/irqchip/irq-alpine-msi.c2
-rw-r--r--drivers/irqchip/irq-bcm2836.c10
-rw-r--r--drivers/irqchip/irq-crossbar.c2
-rw-r--r--drivers/irqchip/irq-gic-common.c20
-rw-r--r--drivers/irqchip/irq-gic-v2m.c19
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c42
-rw-r--r--drivers/irqchip/irq-gic-v3.c195
-rw-r--r--drivers/irqchip/irq-gic.c322
-rw-r--r--drivers/irqchip/irq-lpc32xx.c238
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c240
-rw-r--r--drivers/irqchip/irq-mbigen.c4
-rw-r--r--drivers/irqchip/irq-partition-percpu.c256
-rw-r--r--drivers/irqchip/irq-tegra.c2
-rw-r--r--include/linux/irq.h4
-rw-r--r--include/linux/irqchip/arm-gic-v3.h2
-rw-r--r--include/linux/irqchip/irq-partition-percpu.h59
-rw-r--r--include/linux/irqdesc.h1
-rw-r--r--include/linux/irqdomain.h20
-rw-r--r--kernel/irq/ipi.c45
-rw-r--r--kernel/irq/irqdesc.c26
-rw-r--r--kernel/irq/irqdomain.c19
-rw-r--r--kernel/irq/manage.c2
28 files changed, 1438 insertions, 171 deletions
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
index 007a5b46256a..4c29cdab0ea5 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
@@ -11,6 +11,8 @@ Main node required properties:
11- interrupt-controller : Identifies the node as an interrupt controller 11- interrupt-controller : Identifies the node as an interrupt controller
12- #interrupt-cells : Specifies the number of cells needed to encode an 12- #interrupt-cells : Specifies the number of cells needed to encode an
13 interrupt source. Must be a single cell with a value of at least 3. 13 interrupt source. Must be a single cell with a value of at least 3.
14 If the system requires describing PPI affinity, then the value must
15 be at least 4.
14 16
15 The 1st cell is the interrupt type; 0 for SPI interrupts, 1 for PPI 17 The 1st cell is the interrupt type; 0 for SPI interrupts, 1 for PPI
16 interrupts. Other values are reserved for future use. 18 interrupts. Other values are reserved for future use.
@@ -24,7 +26,14 @@ Main node required properties:
24 1 = edge triggered 26 1 = edge triggered
25 4 = level triggered 27 4 = level triggered
26 28
27 Cells 4 and beyond are reserved for future use and must have a value 29 The 4th cell is a phandle to a node describing a set of CPUs this
30 interrupt is affine to. The interrupt must be a PPI, and the node
31 pointed must be a subnode of the "ppi-partitions" subnode. For
32 interrupt types other than PPI or PPIs that are not partitionned,
33 this cell must be zero. See the "ppi-partitions" node description
34 below.
35
36 Cells 5 and beyond are reserved for future use and must have a value
28 of 0 if present. 37 of 0 if present.
29 38
30- reg : Specifies base physical address(s) and size of the GIC 39- reg : Specifies base physical address(s) and size of the GIC
@@ -50,6 +59,11 @@ Optional
50 59
51Sub-nodes: 60Sub-nodes:
52 61
62PPI affinity can be expressed as a single "ppi-partitions" node,
63containing a set of sub-nodes, each with the following property:
64- affinity: Should be a list of phandles to CPU nodes (as described in
65Documentation/devicetree/bindings/arm/cpus.txt).
66
53GICv3 has one or more Interrupt Translation Services (ITS) that are 67GICv3 has one or more Interrupt Translation Services (ITS) that are
54used to route Message Signalled Interrupts (MSI) to the CPUs. 68used to route Message Signalled Interrupts (MSI) to the CPUs.
55 69
@@ -91,7 +105,7 @@ Examples:
91 105
92 gic: interrupt-controller@2c010000 { 106 gic: interrupt-controller@2c010000 {
93 compatible = "arm,gic-v3"; 107 compatible = "arm,gic-v3";
94 #interrupt-cells = <3>; 108 #interrupt-cells = <4>;
95 #address-cells = <2>; 109 #address-cells = <2>;
96 #size-cells = <2>; 110 #size-cells = <2>;
97 ranges; 111 ranges;
@@ -119,4 +133,20 @@ Examples:
119 #msi-cells = <1>; 133 #msi-cells = <1>;
120 reg = <0x0 0x2c400000 0 0x200000>; 134 reg = <0x0 0x2c400000 0 0x200000>;
121 }; 135 };
136
137 ppi-partitions {
138 part0: interrupt-partition-0 {
139 affinity = <&cpu0 &cpu2>;
140 };
141
142 part1: interrupt-partition-1 {
143 affinity = <&cpu1 &cpu3>;
144 };
145 };
146 };
147
148
149 device@0 {
150 reg = <0 0 0 4>;
151 interrupts = <1 1 4 &part0>;
122 }; 152 };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt b/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt
new file mode 100644
index 000000000000..9e389493203f
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt
@@ -0,0 +1,30 @@
1* Freescale Layerscape SCFG PCIe MSI controller
2
3Required properties:
4
5- compatible: should be "fsl,<soc-name>-msi" to identify
6 Layerscape PCIe MSI controller block such as:
7 "fsl,1s1021a-msi"
8 "fsl,1s1043a-msi"
9- msi-controller: indicates that this is a PCIe MSI controller node
10- reg: physical base address of the controller and length of memory mapped.
11- interrupts: an interrupt to the parent interrupt controller.
12
13Optional properties:
14- interrupt-parent: the phandle to the parent interrupt controller.
15
16This interrupt controller hardware is a second level interrupt controller that
17is hooked to a parent interrupt controller: e.g: ARM GIC for ARM-based
18platforms. If interrupt-parent is not provided, the default parent interrupt
19controller will be used.
20Each PCIe node needs to have property msi-parent that points to
21MSI controller node
22
23Examples:
24
25 msi1: msi-controller@1571000 {
26 compatible = "fsl,1s1043a-msi";
27 reg = <0x0 0x1571000 0x0 0x8>,
28 msi-controller;
29 interrupts = <0 116 0x4>;
30 };
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index cdfa6c2b7626..6684af91cb73 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -531,6 +531,8 @@ config ARCH_LPC32XX
531 select COMMON_CLK 531 select COMMON_CLK
532 select CPU_ARM926T 532 select CPU_ARM926T
533 select GENERIC_CLOCKEVENTS 533 select GENERIC_CLOCKEVENTS
534 select MULTI_IRQ_HANDLER
535 select SPARSE_IRQ
534 select USE_OF 536 select USE_OF
535 help 537 help
536 Support for the NXP LPC32XX family of processors 538 Support for the NXP LPC32XX family of processors
diff --git a/arch/arm/mach-lpc32xx/phy3250.c b/arch/arm/mach-lpc32xx/phy3250.c
index b2f9e226febe..f9209d091c4b 100644
--- a/arch/arm/mach-lpc32xx/phy3250.c
+++ b/arch/arm/mach-lpc32xx/phy3250.c
@@ -206,7 +206,6 @@ static const char *const lpc32xx_dt_compat[] __initconst = {
206DT_MACHINE_START(LPC32XX_DT, "LPC32XX SoC (Flattened Device Tree)") 206DT_MACHINE_START(LPC32XX_DT, "LPC32XX SoC (Flattened Device Tree)")
207 .atag_offset = 0x100, 207 .atag_offset = 0x100,
208 .map_io = lpc32xx_map_io, 208 .map_io = lpc32xx_map_io,
209 .init_irq = lpc32xx_init_irq,
210 .init_machine = lpc3250_machine_init, 209 .init_machine = lpc3250_machine_init,
211 .dt_compat = lpc32xx_dt_compat, 210 .dt_compat = lpc32xx_dt_compat,
212MACHINE_END 211MACHINE_END
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 3e124793e224..81f88ada3a61 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -27,6 +27,7 @@ config ARM_GIC_V3
27 select IRQ_DOMAIN 27 select IRQ_DOMAIN
28 select MULTI_IRQ_HANDLER 28 select MULTI_IRQ_HANDLER
29 select IRQ_DOMAIN_HIERARCHY 29 select IRQ_DOMAIN_HIERARCHY
30 select PARTITION_PERCPU
30 31
31config ARM_GIC_V3_ITS 32config ARM_GIC_V3_ITS
32 bool 33 bool
@@ -244,3 +245,11 @@ config IRQ_MXS
244config MVEBU_ODMI 245config MVEBU_ODMI
245 bool 246 bool
246 select GENERIC_MSI_IRQ_DOMAIN 247 select GENERIC_MSI_IRQ_DOMAIN
248
249config LS_SCFG_MSI
250 def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE
251 depends on PCI && PCI_MSI
252 select PCI_MSI_IRQ_DOMAIN
253
254config PARTITION_PERCPU
255 bool
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index b03cfcbbac6b..f828244b44c2 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
7obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o 7obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o
8obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o 8obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o
9obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o 9obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o
10obj-$(CONFIG_ARCH_LPC32XX) += irq-lpc32xx.o
10obj-$(CONFIG_ARCH_MMP) += irq-mmp.o 11obj-$(CONFIG_ARCH_MMP) += irq-mmp.o
11obj-$(CONFIG_IRQ_MXS) += irq-mxs.o 12obj-$(CONFIG_IRQ_MXS) += irq-mxs.o
12obj-$(CONFIG_ARCH_TEGRA) += irq-tegra.o 13obj-$(CONFIG_ARCH_TEGRA) += irq-tegra.o
@@ -27,6 +28,7 @@ obj-$(CONFIG_REALVIEW_DT) += irq-gic-realview.o
27obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o 28obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
28obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o 29obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
29obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o 30obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o
31obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o
30obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o 32obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o
31obj-$(CONFIG_ARM_NVIC) += irq-nvic.o 33obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
32obj-$(CONFIG_ARM_VIC) += irq-vic.o 34obj-$(CONFIG_ARM_VIC) += irq-vic.o
@@ -65,3 +67,4 @@ obj-$(CONFIG_INGENIC_IRQ) += irq-ingenic.o
65obj-$(CONFIG_IMX_GPCV2) += irq-imx-gpcv2.o 67obj-$(CONFIG_IMX_GPCV2) += irq-imx-gpcv2.o
66obj-$(CONFIG_PIC32_EVIC) += irq-pic32-evic.o 68obj-$(CONFIG_PIC32_EVIC) += irq-pic32-evic.o
67obj-$(CONFIG_MVEBU_ODMI) += irq-mvebu-odmi.o 69obj-$(CONFIG_MVEBU_ODMI) += irq-mvebu-odmi.o
70obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o
diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c
index 25384255b30f..63d980995d17 100644
--- a/drivers/irqchip/irq-alpine-msi.c
+++ b/drivers/irqchip/irq-alpine-msi.c
@@ -23,7 +23,7 @@
23#include <linux/slab.h> 23#include <linux/slab.h>
24 24
25#include <asm/irq.h> 25#include <asm/irq.h>
26#include <asm-generic/msi.h> 26#include <asm/msi.h>
27 27
28/* MSIX message address format: local GIC target */ 28/* MSIX message address format: local GIC target */
29#define ALPINE_MSIX_SPI_TARGET_CLUSTER0 BIT(16) 29#define ALPINE_MSIX_SPI_TARGET_CLUSTER0 BIT(16)
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index b6e950d4782a..72ff1d5c5de6 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -195,7 +195,7 @@ static void bcm2836_arm_irqchip_send_ipi(const struct cpumask *mask,
195 * Ensure that stores to normal memory are visible to the 195 * Ensure that stores to normal memory are visible to the
196 * other CPUs before issuing the IPI. 196 * other CPUs before issuing the IPI.
197 */ 197 */
198 dsb(); 198 smp_wmb();
199 199
200 for_each_cpu(cpu, mask) { 200 for_each_cpu(cpu, mask) {
201 writel(1 << ipi, mailbox0_base + 16 * cpu); 201 writel(1 << ipi, mailbox0_base + 16 * cpu);
@@ -223,6 +223,7 @@ static struct notifier_block bcm2836_arm_irqchip_cpu_notifier = {
223 .priority = 100, 223 .priority = 100,
224}; 224};
225 225
226#ifdef CONFIG_ARM
226int __init bcm2836_smp_boot_secondary(unsigned int cpu, 227int __init bcm2836_smp_boot_secondary(unsigned int cpu,
227 struct task_struct *idle) 228 struct task_struct *idle)
228{ 229{
@@ -238,7 +239,7 @@ int __init bcm2836_smp_boot_secondary(unsigned int cpu,
238static const struct smp_operations bcm2836_smp_ops __initconst = { 239static const struct smp_operations bcm2836_smp_ops __initconst = {
239 .smp_boot_secondary = bcm2836_smp_boot_secondary, 240 .smp_boot_secondary = bcm2836_smp_boot_secondary,
240}; 241};
241 242#endif
242#endif 243#endif
243 244
244static const struct irq_domain_ops bcm2836_arm_irqchip_intc_ops = { 245static const struct irq_domain_ops bcm2836_arm_irqchip_intc_ops = {
@@ -252,12 +253,15 @@ bcm2836_arm_irqchip_smp_init(void)
252 /* Unmask IPIs to the boot CPU. */ 253 /* Unmask IPIs to the boot CPU. */
253 bcm2836_arm_irqchip_cpu_notify(&bcm2836_arm_irqchip_cpu_notifier, 254 bcm2836_arm_irqchip_cpu_notify(&bcm2836_arm_irqchip_cpu_notifier,
254 CPU_STARTING, 255 CPU_STARTING,
255 (void *)smp_processor_id()); 256 (void *)(uintptr_t)smp_processor_id());
256 register_cpu_notifier(&bcm2836_arm_irqchip_cpu_notifier); 257 register_cpu_notifier(&bcm2836_arm_irqchip_cpu_notifier);
257 258
258 set_smp_cross_call(bcm2836_arm_irqchip_send_ipi); 259 set_smp_cross_call(bcm2836_arm_irqchip_send_ipi);
260
261#ifdef CONFIG_ARM
259 smp_set_ops(&bcm2836_smp_ops); 262 smp_set_ops(&bcm2836_smp_ops);
260#endif 263#endif
264#endif
261} 265}
262 266
263/* 267/*
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index 75573fa431ba..1eef56a89b1f 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -183,7 +183,7 @@ static int crossbar_domain_translate(struct irq_domain *d,
183 return -EINVAL; 183 return -EINVAL;
184 184
185 *hwirq = fwspec->param[1]; 185 *hwirq = fwspec->param[1];
186 *type = fwspec->param[2]; 186 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
187 return 0; 187 return 0;
188 } 188 }
189 189
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
index f174ce0ca361..97c0028e8388 100644
--- a/drivers/irqchip/irq-gic-common.c
+++ b/drivers/irqchip/irq-gic-common.c
@@ -50,14 +50,26 @@ int gic_configure_irq(unsigned int irq, unsigned int type,
50 else if (type & IRQ_TYPE_EDGE_BOTH) 50 else if (type & IRQ_TYPE_EDGE_BOTH)
51 val |= confmask; 51 val |= confmask;
52 52
53 /* If the current configuration is the same, then we are done */
54 if (val == oldval)
55 return 0;
56
53 /* 57 /*
54 * Write back the new configuration, and possibly re-enable 58 * Write back the new configuration, and possibly re-enable
55 * the interrupt. If we tried to write a new configuration and failed, 59 * the interrupt. If we fail to write a new configuration for
56 * return an error. 60 * an SPI then WARN and return an error. If we fail to write the
61 * configuration for a PPI this is most likely because the GIC
62 * does not allow us to set the configuration or we are in a
63 * non-secure mode, and hence it may not be catastrophic.
57 */ 64 */
58 writel_relaxed(val, base + GIC_DIST_CONFIG + confoff); 65 writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
59 if (readl_relaxed(base + GIC_DIST_CONFIG + confoff) != val && val != oldval) 66 if (readl_relaxed(base + GIC_DIST_CONFIG + confoff) != val) {
60 ret = -EINVAL; 67 if (WARN_ON(irq >= 32))
68 ret = -EINVAL;
69 else
70 pr_warn("GIC: PPI%d is secure or misconfigured\n",
71 irq - 16);
72 }
61 73
62 if (sync_access) 74 if (sync_access)
63 sync_access(); 75 sync_access();
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index 28f047c61baa..ad0d2960b664 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -49,6 +49,9 @@
49/* APM X-Gene with GICv2m MSI_IIDR register value */ 49/* APM X-Gene with GICv2m MSI_IIDR register value */
50#define XGENE_GICV2M_MSI_IIDR 0x06000170 50#define XGENE_GICV2M_MSI_IIDR 0x06000170
51 51
52/* Broadcom NS2 GICv2m MSI_IIDR register value */
53#define BCM_NS2_GICV2M_MSI_IIDR 0x0000013f
54
52/* List of flags for specific v2m implementation */ 55/* List of flags for specific v2m implementation */
53#define GICV2M_NEEDS_SPI_OFFSET 0x00000001 56#define GICV2M_NEEDS_SPI_OFFSET 0x00000001
54 57
@@ -62,6 +65,7 @@ struct v2m_data {
62 void __iomem *base; /* GICv2m virt address */ 65 void __iomem *base; /* GICv2m virt address */
63 u32 spi_start; /* The SPI number that MSIs start */ 66 u32 spi_start; /* The SPI number that MSIs start */
64 u32 nr_spis; /* The number of SPIs for MSIs */ 67 u32 nr_spis; /* The number of SPIs for MSIs */
68 u32 spi_offset; /* offset to be subtracted from SPI number */
65 unsigned long *bm; /* MSI vector bitmap */ 69 unsigned long *bm; /* MSI vector bitmap */
66 u32 flags; /* v2m flags for specific implementation */ 70 u32 flags; /* v2m flags for specific implementation */
67}; 71};
@@ -102,7 +106,7 @@ static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
102 msg->data = data->hwirq; 106 msg->data = data->hwirq;
103 107
104 if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET) 108 if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
105 msg->data -= v2m->spi_start; 109 msg->data -= v2m->spi_offset;
106} 110}
107 111
108static struct irq_chip gicv2m_irq_chip = { 112static struct irq_chip gicv2m_irq_chip = {
@@ -340,9 +344,20 @@ static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
340 * different from the standard GICv2m implementation where 344 * different from the standard GICv2m implementation where
341 * the MSI data is the absolute value within the range from 345 * the MSI data is the absolute value within the range from
342 * spi_start to (spi_start + num_spis). 346 * spi_start to (spi_start + num_spis).
347 *
348 * Broadom NS2 GICv2m implementation has an erratum where the MSI data
349 * is 'spi_number - 32'
343 */ 350 */
344 if (readl_relaxed(v2m->base + V2M_MSI_IIDR) == XGENE_GICV2M_MSI_IIDR) 351 switch (readl_relaxed(v2m->base + V2M_MSI_IIDR)) {
352 case XGENE_GICV2M_MSI_IIDR:
353 v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
354 v2m->spi_offset = v2m->spi_start;
355 break;
356 case BCM_NS2_GICV2M_MSI_IIDR:
345 v2m->flags |= GICV2M_NEEDS_SPI_OFFSET; 357 v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
358 v2m->spi_offset = 32;
359 break;
360 }
346 361
347 v2m->bm = kzalloc(sizeof(long) * BITS_TO_LONGS(v2m->nr_spis), 362 v2m->bm = kzalloc(sizeof(long) * BITS_TO_LONGS(v2m->nr_spis),
348 GFP_KERNEL); 363 GFP_KERNEL);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 39261798c59f..6bd881be24ea 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -55,6 +55,16 @@ struct its_collection {
55}; 55};
56 56
57/* 57/*
58 * The ITS_BASER structure - contains memory information and cached
59 * value of BASER register configuration.
60 */
61struct its_baser {
62 void *base;
63 u64 val;
64 u32 order;
65};
66
67/*
58 * The ITS structure - contains most of the infrastructure, with the 68 * The ITS structure - contains most of the infrastructure, with the
59 * top-level MSI domain, the command queue, the collections, and the 69 * top-level MSI domain, the command queue, the collections, and the
60 * list of devices writing to it. 70 * list of devices writing to it.
@@ -66,14 +76,12 @@ struct its_node {
66 unsigned long phys_base; 76 unsigned long phys_base;
67 struct its_cmd_block *cmd_base; 77 struct its_cmd_block *cmd_base;
68 struct its_cmd_block *cmd_write; 78 struct its_cmd_block *cmd_write;
69 struct { 79 struct its_baser tables[GITS_BASER_NR_REGS];
70 void *base;
71 u32 order;
72 } tables[GITS_BASER_NR_REGS];
73 struct its_collection *collections; 80 struct its_collection *collections;
74 struct list_head its_device_list; 81 struct list_head its_device_list;
75 u64 flags; 82 u64 flags;
76 u32 ite_size; 83 u32 ite_size;
84 u32 device_ids;
77}; 85};
78 86
79#define ITS_ITT_ALIGN SZ_256 87#define ITS_ITT_ALIGN SZ_256
@@ -838,6 +846,8 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
838 ids = GITS_TYPER_DEVBITS(typer); 846 ids = GITS_TYPER_DEVBITS(typer);
839 } 847 }
840 848
849 its->device_ids = ids;
850
841 for (i = 0; i < GITS_BASER_NR_REGS; i++) { 851 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
842 u64 val = readq_relaxed(its->base + GITS_BASER + i * 8); 852 u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
843 u64 type = GITS_BASER_TYPE(val); 853 u64 type = GITS_BASER_TYPE(val);
@@ -913,6 +923,7 @@ retry_baser:
913 } 923 }
914 924
915 val |= alloc_pages - 1; 925 val |= alloc_pages - 1;
926 its->tables[i].val = val;
916 927
917 writeq_relaxed(val, its->base + GITS_BASER + i * 8); 928 writeq_relaxed(val, its->base + GITS_BASER + i * 8);
918 tmp = readq_relaxed(its->base + GITS_BASER + i * 8); 929 tmp = readq_relaxed(its->base + GITS_BASER + i * 8);
@@ -1138,9 +1149,22 @@ static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
1138 return its_dev; 1149 return its_dev;
1139} 1150}
1140 1151
1152static struct its_baser *its_get_baser(struct its_node *its, u32 type)
1153{
1154 int i;
1155
1156 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
1157 if (GITS_BASER_TYPE(its->tables[i].val) == type)
1158 return &its->tables[i];
1159 }
1160
1161 return NULL;
1162}
1163
1141static struct its_device *its_create_device(struct its_node *its, u32 dev_id, 1164static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1142 int nvecs) 1165 int nvecs)
1143{ 1166{
1167 struct its_baser *baser;
1144 struct its_device *dev; 1168 struct its_device *dev;
1145 unsigned long *lpi_map; 1169 unsigned long *lpi_map;
1146 unsigned long flags; 1170 unsigned long flags;
@@ -1151,6 +1175,16 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1151 int nr_ites; 1175 int nr_ites;
1152 int sz; 1176 int sz;
1153 1177
1178 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
1179
1180 /* Don't allow 'dev_id' that exceeds single, flat table limit */
1181 if (baser) {
1182 if (dev_id >= (PAGE_ORDER_TO_SIZE(baser->order) /
1183 GITS_BASER_ENTRY_SIZE(baser->val)))
1184 return NULL;
1185 } else if (ilog2(dev_id) >= its->device_ids)
1186 return NULL;
1187
1154 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1188 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1155 /* 1189 /*
1156 * At least one bit of EventID is being used, hence a minimum 1190 * At least one bit of EventID is being used, hence a minimum
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 5b7d3c2129d8..1a1ea4f733c1 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -29,6 +29,7 @@
29 29
30#include <linux/irqchip.h> 30#include <linux/irqchip.h>
31#include <linux/irqchip/arm-gic-v3.h> 31#include <linux/irqchip/arm-gic-v3.h>
32#include <linux/irqchip/irq-partition-percpu.h>
32 33
33#include <asm/cputype.h> 34#include <asm/cputype.h>
34#include <asm/exception.h> 35#include <asm/exception.h>
@@ -44,6 +45,7 @@ struct redist_region {
44}; 45};
45 46
46struct gic_chip_data { 47struct gic_chip_data {
48 struct fwnode_handle *fwnode;
47 void __iomem *dist_base; 49 void __iomem *dist_base;
48 struct redist_region *redist_regions; 50 struct redist_region *redist_regions;
49 struct rdists rdists; 51 struct rdists rdists;
@@ -51,6 +53,7 @@ struct gic_chip_data {
51 u64 redist_stride; 53 u64 redist_stride;
52 u32 nr_redist_regions; 54 u32 nr_redist_regions;
53 unsigned int irq_nr; 55 unsigned int irq_nr;
56 struct partition_desc *ppi_descs[16];
54}; 57};
55 58
56static struct gic_chip_data gic_data __read_mostly; 59static struct gic_chip_data gic_data __read_mostly;
@@ -364,6 +367,13 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
364 if (static_key_true(&supports_deactivate)) 367 if (static_key_true(&supports_deactivate))
365 gic_write_dir(irqnr); 368 gic_write_dir(irqnr);
366#ifdef CONFIG_SMP 369#ifdef CONFIG_SMP
370 /*
371 * Unlike GICv2, we don't need an smp_rmb() here.
372 * The control dependency from gic_read_iar to
373 * the ISB in gic_write_eoir is enough to ensure
374 * that any shared data read by handle_IPI will
375 * be read after the ACK.
376 */
367 handle_IPI(irqnr, regs); 377 handle_IPI(irqnr, regs);
368#else 378#else
369 WARN_ONCE(true, "Unexpected SGI received!\n"); 379 WARN_ONCE(true, "Unexpected SGI received!\n");
@@ -383,6 +393,15 @@ static void __init gic_dist_init(void)
383 writel_relaxed(0, base + GICD_CTLR); 393 writel_relaxed(0, base + GICD_CTLR);
384 gic_dist_wait_for_rwp(); 394 gic_dist_wait_for_rwp();
385 395
396 /*
397 * Configure SPIs as non-secure Group-1. This will only matter
398 * if the GIC only has a single security state. This will not
399 * do the right thing if the kernel is running in secure mode,
400 * but that's not the intended use case anyway.
401 */
402 for (i = 32; i < gic_data.irq_nr; i += 32)
403 writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
404
386 gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp); 405 gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
387 406
388 /* Enable distributor with ARE, Group1 */ 407 /* Enable distributor with ARE, Group1 */
@@ -500,6 +519,9 @@ static void gic_cpu_init(void)
500 519
501 rbase = gic_data_rdist_sgi_base(); 520 rbase = gic_data_rdist_sgi_base();
502 521
522 /* Configure SGIs/PPIs as non-secure Group-1 */
523 writel_relaxed(~0, rbase + GICR_IGROUPR0);
524
503 gic_cpu_config(rbase, gic_redist_wait_for_rwp); 525 gic_cpu_config(rbase, gic_redist_wait_for_rwp);
504 526
505 /* Give LPIs a spin */ 527 /* Give LPIs a spin */
@@ -812,10 +834,62 @@ static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
812 } 834 }
813} 835}
814 836
837static int gic_irq_domain_select(struct irq_domain *d,
838 struct irq_fwspec *fwspec,
839 enum irq_domain_bus_token bus_token)
840{
841 /* Not for us */
842 if (fwspec->fwnode != d->fwnode)
843 return 0;
844
845 /* If this is not DT, then we have a single domain */
846 if (!is_of_node(fwspec->fwnode))
847 return 1;
848
849 /*
850 * If this is a PPI and we have a 4th (non-null) parameter,
851 * then we need to match the partition domain.
852 */
853 if (fwspec->param_count >= 4 &&
854 fwspec->param[0] == 1 && fwspec->param[3] != 0)
855 return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]);
856
857 return d == gic_data.domain;
858}
859
815static const struct irq_domain_ops gic_irq_domain_ops = { 860static const struct irq_domain_ops gic_irq_domain_ops = {
816 .translate = gic_irq_domain_translate, 861 .translate = gic_irq_domain_translate,
817 .alloc = gic_irq_domain_alloc, 862 .alloc = gic_irq_domain_alloc,
818 .free = gic_irq_domain_free, 863 .free = gic_irq_domain_free,
864 .select = gic_irq_domain_select,
865};
866
867static int partition_domain_translate(struct irq_domain *d,
868 struct irq_fwspec *fwspec,
869 unsigned long *hwirq,
870 unsigned int *type)
871{
872 struct device_node *np;
873 int ret;
874
875 np = of_find_node_by_phandle(fwspec->param[3]);
876 if (WARN_ON(!np))
877 return -EINVAL;
878
879 ret = partition_translate_id(gic_data.ppi_descs[fwspec->param[1]],
880 of_node_to_fwnode(np));
881 if (ret < 0)
882 return ret;
883
884 *hwirq = ret;
885 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
886
887 return 0;
888}
889
890static const struct irq_domain_ops partition_domain_ops = {
891 .translate = partition_domain_translate,
892 .select = gic_irq_domain_select,
819}; 893};
820 894
821static void gicv3_enable_quirks(void) 895static void gicv3_enable_quirks(void)
@@ -843,6 +917,7 @@ static int __init gic_init_bases(void __iomem *dist_base,
843 if (static_key_true(&supports_deactivate)) 917 if (static_key_true(&supports_deactivate))
844 pr_info("GIC: Using split EOI/Deactivate mode\n"); 918 pr_info("GIC: Using split EOI/Deactivate mode\n");
845 919
920 gic_data.fwnode = handle;
846 gic_data.dist_base = dist_base; 921 gic_data.dist_base = dist_base;
847 gic_data.redist_regions = rdist_regs; 922 gic_data.redist_regions = rdist_regs;
848 gic_data.nr_redist_regions = nr_redist_regions; 923 gic_data.nr_redist_regions = nr_redist_regions;
@@ -901,6 +976,119 @@ static int __init gic_validate_dist_version(void __iomem *dist_base)
901 return 0; 976 return 0;
902} 977}
903 978
979static int get_cpu_number(struct device_node *dn)
980{
981 const __be32 *cell;
982 u64 hwid;
983 int i;
984
985 cell = of_get_property(dn, "reg", NULL);
986 if (!cell)
987 return -1;
988
989 hwid = of_read_number(cell, of_n_addr_cells(dn));
990
991 /*
992 * Non affinity bits must be set to 0 in the DT
993 */
994 if (hwid & ~MPIDR_HWID_BITMASK)
995 return -1;
996
997 for (i = 0; i < num_possible_cpus(); i++)
998 if (cpu_logical_map(i) == hwid)
999 return i;
1000
1001 return -1;
1002}
1003
1004/* Create all possible partitions at boot time */
1005static void gic_populate_ppi_partitions(struct device_node *gic_node)
1006{
1007 struct device_node *parts_node, *child_part;
1008 int part_idx = 0, i;
1009 int nr_parts;
1010 struct partition_affinity *parts;
1011
1012 parts_node = of_find_node_by_name(gic_node, "ppi-partitions");
1013 if (!parts_node)
1014 return;
1015
1016 nr_parts = of_get_child_count(parts_node);
1017
1018 if (!nr_parts)
1019 return;
1020
1021 parts = kzalloc(sizeof(*parts) * nr_parts, GFP_KERNEL);
1022 if (WARN_ON(!parts))
1023 return;
1024
1025 for_each_child_of_node(parts_node, child_part) {
1026 struct partition_affinity *part;
1027 int n;
1028
1029 part = &parts[part_idx];
1030
1031 part->partition_id = of_node_to_fwnode(child_part);
1032
1033 pr_info("GIC: PPI partition %s[%d] { ",
1034 child_part->name, part_idx);
1035
1036 n = of_property_count_elems_of_size(child_part, "affinity",
1037 sizeof(u32));
1038 WARN_ON(n <= 0);
1039
1040 for (i = 0; i < n; i++) {
1041 int err, cpu;
1042 u32 cpu_phandle;
1043 struct device_node *cpu_node;
1044
1045 err = of_property_read_u32_index(child_part, "affinity",
1046 i, &cpu_phandle);
1047 if (WARN_ON(err))
1048 continue;
1049
1050 cpu_node = of_find_node_by_phandle(cpu_phandle);
1051 if (WARN_ON(!cpu_node))
1052 continue;
1053
1054 cpu = get_cpu_number(cpu_node);
1055 if (WARN_ON(cpu == -1))
1056 continue;
1057
1058 pr_cont("%s[%d] ", cpu_node->full_name, cpu);
1059
1060 cpumask_set_cpu(cpu, &part->mask);
1061 }
1062
1063 pr_cont("}\n");
1064 part_idx++;
1065 }
1066
1067 for (i = 0; i < 16; i++) {
1068 unsigned int irq;
1069 struct partition_desc *desc;
1070 struct irq_fwspec ppi_fwspec = {
1071 .fwnode = gic_data.fwnode,
1072 .param_count = 3,
1073 .param = {
1074 [0] = 1,
1075 [1] = i,
1076 [2] = IRQ_TYPE_NONE,
1077 },
1078 };
1079
1080 irq = irq_create_fwspec_mapping(&ppi_fwspec);
1081 if (WARN_ON(!irq))
1082 continue;
1083 desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
1084 irq, &partition_domain_ops);
1085 if (WARN_ON(!desc))
1086 continue;
1087
1088 gic_data.ppi_descs[i] = desc;
1089 }
1090}
1091
904static int __init gic_of_init(struct device_node *node, struct device_node *parent) 1092static int __init gic_of_init(struct device_node *node, struct device_node *parent)
905{ 1093{
906 void __iomem *dist_base; 1094 void __iomem *dist_base;
@@ -952,8 +1140,11 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
952 1140
953 err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions, 1141 err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
954 redist_stride, &node->fwnode); 1142 redist_stride, &node->fwnode);
955 if (!err) 1143 if (err)
956 return 0; 1144 goto out_unmap_rdist;
1145
1146 gic_populate_ppi_partitions(node);
1147 return 0;
957 1148
958out_unmap_rdist: 1149out_unmap_rdist:
959 for (i = 0; i < nr_redist_regions; i++) 1150 for (i = 0; i < nr_redist_regions; i++)
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 095bb5b5c3f2..1de20e14a721 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -72,6 +72,9 @@ struct gic_chip_data {
72 struct irq_chip chip; 72 struct irq_chip chip;
73 union gic_base dist_base; 73 union gic_base dist_base;
74 union gic_base cpu_base; 74 union gic_base cpu_base;
75 void __iomem *raw_dist_base;
76 void __iomem *raw_cpu_base;
77 u32 percpu_offset;
75#ifdef CONFIG_CPU_PM 78#ifdef CONFIG_CPU_PM
76 u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)]; 79 u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
77 u32 saved_spi_active[DIV_ROUND_UP(1020, 32)]; 80 u32 saved_spi_active[DIV_ROUND_UP(1020, 32)];
@@ -344,6 +347,14 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
344 if (static_key_true(&supports_deactivate)) 347 if (static_key_true(&supports_deactivate))
345 writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE); 348 writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE);
346#ifdef CONFIG_SMP 349#ifdef CONFIG_SMP
350 /*
351 * Ensure any shared data written by the CPU sending
352 * the IPI is read after we've read the ACK register
353 * on the GIC.
354 *
355 * Pairs with the write barrier in gic_raise_softirq
356 */
357 smp_rmb();
347 handle_IPI(irqnr, regs); 358 handle_IPI(irqnr, regs);
348#endif 359#endif
349 continue; 360 continue;
@@ -391,20 +402,6 @@ static struct irq_chip gic_chip = {
391 IRQCHIP_MASK_ON_SUSPEND, 402 IRQCHIP_MASK_ON_SUSPEND,
392}; 403};
393 404
394static struct irq_chip gic_eoimode1_chip = {
395 .name = "GICv2",
396 .irq_mask = gic_eoimode1_mask_irq,
397 .irq_unmask = gic_unmask_irq,
398 .irq_eoi = gic_eoimode1_eoi_irq,
399 .irq_set_type = gic_set_type,
400 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
401 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
402 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
403 .flags = IRQCHIP_SET_TYPE_MASKED |
404 IRQCHIP_SKIP_SET_WAKE |
405 IRQCHIP_MASK_ON_SUSPEND,
406};
407
408void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) 405void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
409{ 406{
410 BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); 407 BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
@@ -473,7 +470,7 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
473 writel_relaxed(GICD_ENABLE, base + GIC_DIST_CTRL); 470 writel_relaxed(GICD_ENABLE, base + GIC_DIST_CTRL);
474} 471}
475 472
476static void gic_cpu_init(struct gic_chip_data *gic) 473static int gic_cpu_init(struct gic_chip_data *gic)
477{ 474{
478 void __iomem *dist_base = gic_data_dist_base(gic); 475 void __iomem *dist_base = gic_data_dist_base(gic);
479 void __iomem *base = gic_data_cpu_base(gic); 476 void __iomem *base = gic_data_cpu_base(gic);
@@ -489,7 +486,9 @@ static void gic_cpu_init(struct gic_chip_data *gic)
489 /* 486 /*
490 * Get what the GIC says our CPU mask is. 487 * Get what the GIC says our CPU mask is.
491 */ 488 */
492 BUG_ON(cpu >= NR_GIC_CPU_IF); 489 if (WARN_ON(cpu >= NR_GIC_CPU_IF))
490 return -EINVAL;
491
493 gic_check_cpu_features(); 492 gic_check_cpu_features();
494 cpu_mask = gic_get_cpumask(gic); 493 cpu_mask = gic_get_cpumask(gic);
495 gic_cpu_map[cpu] = cpu_mask; 494 gic_cpu_map[cpu] = cpu_mask;
@@ -507,6 +506,8 @@ static void gic_cpu_init(struct gic_chip_data *gic)
507 506
508 writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK); 507 writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK);
509 gic_cpu_if_up(gic); 508 gic_cpu_if_up(gic);
509
510 return 0;
510} 511}
511 512
512int gic_cpu_if_down(unsigned int gic_nr) 513int gic_cpu_if_down(unsigned int gic_nr)
@@ -532,34 +533,35 @@ int gic_cpu_if_down(unsigned int gic_nr)
532 * this function, no interrupts will be delivered by the GIC, and another 533 * this function, no interrupts will be delivered by the GIC, and another
533 * platform-specific wakeup source must be enabled. 534 * platform-specific wakeup source must be enabled.
534 */ 535 */
535static void gic_dist_save(unsigned int gic_nr) 536static void gic_dist_save(struct gic_chip_data *gic)
536{ 537{
537 unsigned int gic_irqs; 538 unsigned int gic_irqs;
538 void __iomem *dist_base; 539 void __iomem *dist_base;
539 int i; 540 int i;
540 541
541 BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); 542 if (WARN_ON(!gic))
543 return;
542 544
543 gic_irqs = gic_data[gic_nr].gic_irqs; 545 gic_irqs = gic->gic_irqs;
544 dist_base = gic_data_dist_base(&gic_data[gic_nr]); 546 dist_base = gic_data_dist_base(gic);
545 547
546 if (!dist_base) 548 if (!dist_base)
547 return; 549 return;
548 550
549 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) 551 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
550 gic_data[gic_nr].saved_spi_conf[i] = 552 gic->saved_spi_conf[i] =
551 readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); 553 readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
552 554
553 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) 555 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
554 gic_data[gic_nr].saved_spi_target[i] = 556 gic->saved_spi_target[i] =
555 readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); 557 readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
556 558
557 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) 559 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
558 gic_data[gic_nr].saved_spi_enable[i] = 560 gic->saved_spi_enable[i] =
559 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); 561 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
560 562
561 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) 563 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
562 gic_data[gic_nr].saved_spi_active[i] = 564 gic->saved_spi_active[i] =
563 readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4); 565 readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
564} 566}
565 567
@@ -570,16 +572,17 @@ static void gic_dist_save(unsigned int gic_nr)
570 * handled normally, but any edge interrupts that occured will not be seen by 572 * handled normally, but any edge interrupts that occured will not be seen by
571 * the GIC and need to be handled by the platform-specific wakeup source. 573 * the GIC and need to be handled by the platform-specific wakeup source.
572 */ 574 */
573static void gic_dist_restore(unsigned int gic_nr) 575static void gic_dist_restore(struct gic_chip_data *gic)
574{ 576{
575 unsigned int gic_irqs; 577 unsigned int gic_irqs;
576 unsigned int i; 578 unsigned int i;
577 void __iomem *dist_base; 579 void __iomem *dist_base;
578 580
579 BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); 581 if (WARN_ON(!gic))
582 return;
580 583
581 gic_irqs = gic_data[gic_nr].gic_irqs; 584 gic_irqs = gic->gic_irqs;
582 dist_base = gic_data_dist_base(&gic_data[gic_nr]); 585 dist_base = gic_data_dist_base(gic);
583 586
584 if (!dist_base) 587 if (!dist_base)
585 return; 588 return;
@@ -587,7 +590,7 @@ static void gic_dist_restore(unsigned int gic_nr)
587 writel_relaxed(GICD_DISABLE, dist_base + GIC_DIST_CTRL); 590 writel_relaxed(GICD_DISABLE, dist_base + GIC_DIST_CTRL);
588 591
589 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) 592 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
590 writel_relaxed(gic_data[gic_nr].saved_spi_conf[i], 593 writel_relaxed(gic->saved_spi_conf[i],
591 dist_base + GIC_DIST_CONFIG + i * 4); 594 dist_base + GIC_DIST_CONFIG + i * 4);
592 595
593 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) 596 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
@@ -595,85 +598,87 @@ static void gic_dist_restore(unsigned int gic_nr)
595 dist_base + GIC_DIST_PRI + i * 4); 598 dist_base + GIC_DIST_PRI + i * 4);
596 599
597 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) 600 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
598 writel_relaxed(gic_data[gic_nr].saved_spi_target[i], 601 writel_relaxed(gic->saved_spi_target[i],
599 dist_base + GIC_DIST_TARGET + i * 4); 602 dist_base + GIC_DIST_TARGET + i * 4);
600 603
601 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) { 604 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
602 writel_relaxed(GICD_INT_EN_CLR_X32, 605 writel_relaxed(GICD_INT_EN_CLR_X32,
603 dist_base + GIC_DIST_ENABLE_CLEAR + i * 4); 606 dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
604 writel_relaxed(gic_data[gic_nr].saved_spi_enable[i], 607 writel_relaxed(gic->saved_spi_enable[i],
605 dist_base + GIC_DIST_ENABLE_SET + i * 4); 608 dist_base + GIC_DIST_ENABLE_SET + i * 4);
606 } 609 }
607 610
608 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) { 611 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
609 writel_relaxed(GICD_INT_EN_CLR_X32, 612 writel_relaxed(GICD_INT_EN_CLR_X32,
610 dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4); 613 dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
611 writel_relaxed(gic_data[gic_nr].saved_spi_active[i], 614 writel_relaxed(gic->saved_spi_active[i],
612 dist_base + GIC_DIST_ACTIVE_SET + i * 4); 615 dist_base + GIC_DIST_ACTIVE_SET + i * 4);
613 } 616 }
614 617
615 writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL); 618 writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL);
616} 619}
617 620
618static void gic_cpu_save(unsigned int gic_nr) 621static void gic_cpu_save(struct gic_chip_data *gic)
619{ 622{
620 int i; 623 int i;
621 u32 *ptr; 624 u32 *ptr;
622 void __iomem *dist_base; 625 void __iomem *dist_base;
623 void __iomem *cpu_base; 626 void __iomem *cpu_base;
624 627
625 BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); 628 if (WARN_ON(!gic))
629 return;
626 630
627 dist_base = gic_data_dist_base(&gic_data[gic_nr]); 631 dist_base = gic_data_dist_base(gic);
628 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); 632 cpu_base = gic_data_cpu_base(gic);
629 633
630 if (!dist_base || !cpu_base) 634 if (!dist_base || !cpu_base)
631 return; 635 return;
632 636
633 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); 637 ptr = raw_cpu_ptr(gic->saved_ppi_enable);
634 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) 638 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
635 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); 639 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
636 640
637 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active); 641 ptr = raw_cpu_ptr(gic->saved_ppi_active);
638 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) 642 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
639 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4); 643 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
640 644
641 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); 645 ptr = raw_cpu_ptr(gic->saved_ppi_conf);
642 for (i = 0; i < DIV_ROUND_UP(32, 16); i++) 646 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
643 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); 647 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
644 648
645} 649}
646 650
647static void gic_cpu_restore(unsigned int gic_nr) 651static void gic_cpu_restore(struct gic_chip_data *gic)
648{ 652{
649 int i; 653 int i;
650 u32 *ptr; 654 u32 *ptr;
651 void __iomem *dist_base; 655 void __iomem *dist_base;
652 void __iomem *cpu_base; 656 void __iomem *cpu_base;
653 657
654 BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); 658 if (WARN_ON(!gic))
659 return;
655 660
656 dist_base = gic_data_dist_base(&gic_data[gic_nr]); 661 dist_base = gic_data_dist_base(gic);
657 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); 662 cpu_base = gic_data_cpu_base(gic);
658 663
659 if (!dist_base || !cpu_base) 664 if (!dist_base || !cpu_base)
660 return; 665 return;
661 666
662 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); 667 ptr = raw_cpu_ptr(gic->saved_ppi_enable);
663 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) { 668 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
664 writel_relaxed(GICD_INT_EN_CLR_X32, 669 writel_relaxed(GICD_INT_EN_CLR_X32,
665 dist_base + GIC_DIST_ENABLE_CLEAR + i * 4); 670 dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
666 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); 671 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
667 } 672 }
668 673
669 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active); 674 ptr = raw_cpu_ptr(gic->saved_ppi_active);
670 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) { 675 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
671 writel_relaxed(GICD_INT_EN_CLR_X32, 676 writel_relaxed(GICD_INT_EN_CLR_X32,
672 dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4); 677 dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
673 writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4); 678 writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4);
674 } 679 }
675 680
676 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); 681 ptr = raw_cpu_ptr(gic->saved_ppi_conf);
677 for (i = 0; i < DIV_ROUND_UP(32, 16); i++) 682 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
678 writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4); 683 writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
679 684
@@ -682,7 +687,7 @@ static void gic_cpu_restore(unsigned int gic_nr)
682 dist_base + GIC_DIST_PRI + i * 4); 687 dist_base + GIC_DIST_PRI + i * 4);
683 688
684 writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK); 689 writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK);
685 gic_cpu_if_up(&gic_data[gic_nr]); 690 gic_cpu_if_up(gic);
686} 691}
687 692
688static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v) 693static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
@@ -697,18 +702,18 @@ static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
697#endif 702#endif
698 switch (cmd) { 703 switch (cmd) {
699 case CPU_PM_ENTER: 704 case CPU_PM_ENTER:
700 gic_cpu_save(i); 705 gic_cpu_save(&gic_data[i]);
701 break; 706 break;
702 case CPU_PM_ENTER_FAILED: 707 case CPU_PM_ENTER_FAILED:
703 case CPU_PM_EXIT: 708 case CPU_PM_EXIT:
704 gic_cpu_restore(i); 709 gic_cpu_restore(&gic_data[i]);
705 break; 710 break;
706 case CPU_CLUSTER_PM_ENTER: 711 case CPU_CLUSTER_PM_ENTER:
707 gic_dist_save(i); 712 gic_dist_save(&gic_data[i]);
708 break; 713 break;
709 case CPU_CLUSTER_PM_ENTER_FAILED: 714 case CPU_CLUSTER_PM_ENTER_FAILED:
710 case CPU_CLUSTER_PM_EXIT: 715 case CPU_CLUSTER_PM_EXIT:
711 gic_dist_restore(i); 716 gic_dist_restore(&gic_data[i]);
712 break; 717 break;
713 } 718 }
714 } 719 }
@@ -720,26 +725,39 @@ static struct notifier_block gic_notifier_block = {
720 .notifier_call = gic_notifier, 725 .notifier_call = gic_notifier,
721}; 726};
722 727
723static void __init gic_pm_init(struct gic_chip_data *gic) 728static int __init gic_pm_init(struct gic_chip_data *gic)
724{ 729{
725 gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, 730 gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
726 sizeof(u32)); 731 sizeof(u32));
727 BUG_ON(!gic->saved_ppi_enable); 732 if (WARN_ON(!gic->saved_ppi_enable))
733 return -ENOMEM;
728 734
729 gic->saved_ppi_active = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, 735 gic->saved_ppi_active = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
730 sizeof(u32)); 736 sizeof(u32));
731 BUG_ON(!gic->saved_ppi_active); 737 if (WARN_ON(!gic->saved_ppi_active))
738 goto free_ppi_enable;
732 739
733 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, 740 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
734 sizeof(u32)); 741 sizeof(u32));
735 BUG_ON(!gic->saved_ppi_conf); 742 if (WARN_ON(!gic->saved_ppi_conf))
743 goto free_ppi_active;
736 744
737 if (gic == &gic_data[0]) 745 if (gic == &gic_data[0])
738 cpu_pm_register_notifier(&gic_notifier_block); 746 cpu_pm_register_notifier(&gic_notifier_block);
747
748 return 0;
749
750free_ppi_active:
751 free_percpu(gic->saved_ppi_active);
752free_ppi_enable:
753 free_percpu(gic->saved_ppi_enable);
754
755 return -ENOMEM;
739} 756}
740#else 757#else
741static void __init gic_pm_init(struct gic_chip_data *gic) 758static int __init gic_pm_init(struct gic_chip_data *gic)
742{ 759{
760 return 0;
743} 761}
744#endif 762#endif
745 763
@@ -1012,61 +1030,63 @@ static const struct irq_domain_ops gic_irq_domain_ops = {
1012 .unmap = gic_irq_domain_unmap, 1030 .unmap = gic_irq_domain_unmap,
1013}; 1031};
1014 1032
1015static void __init __gic_init_bases(unsigned int gic_nr, int irq_start, 1033static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start,
1016 void __iomem *dist_base, void __iomem *cpu_base, 1034 struct fwnode_handle *handle)
1017 u32 percpu_offset, struct fwnode_handle *handle)
1018{ 1035{
1019 irq_hw_number_t hwirq_base; 1036 irq_hw_number_t hwirq_base;
1020 struct gic_chip_data *gic; 1037 int gic_irqs, irq_base, i, ret;
1021 int gic_irqs, irq_base, i;
1022
1023 BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
1024 1038
1025 gic = &gic_data[gic_nr]; 1039 if (WARN_ON(!gic || gic->domain))
1040 return -EINVAL;
1026 1041
1027 /* Initialize irq_chip */ 1042 /* Initialize irq_chip */
1028 if (static_key_true(&supports_deactivate) && gic_nr == 0) { 1043 gic->chip = gic_chip;
1029 gic->chip = gic_eoimode1_chip; 1044
1045 if (static_key_true(&supports_deactivate) && gic == &gic_data[0]) {
1046 gic->chip.irq_mask = gic_eoimode1_mask_irq;
1047 gic->chip.irq_eoi = gic_eoimode1_eoi_irq;
1048 gic->chip.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity;
1049 gic->chip.name = kasprintf(GFP_KERNEL, "GICv2");
1030 } else { 1050 } else {
1031 gic->chip = gic_chip; 1051 gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d",
1032 gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d", gic_nr); 1052 (int)(gic - &gic_data[0]));
1033 } 1053 }
1034 1054
1035#ifdef CONFIG_SMP 1055#ifdef CONFIG_SMP
1036 if (gic_nr == 0) 1056 if (gic == &gic_data[0])
1037 gic->chip.irq_set_affinity = gic_set_affinity; 1057 gic->chip.irq_set_affinity = gic_set_affinity;
1038#endif 1058#endif
1039 1059
1040#ifdef CONFIG_GIC_NON_BANKED 1060 if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
1041 if (percpu_offset) { /* Frankein-GIC without banked registers... */ 1061 /* Frankein-GIC without banked registers... */
1042 unsigned int cpu; 1062 unsigned int cpu;
1043 1063
1044 gic->dist_base.percpu_base = alloc_percpu(void __iomem *); 1064 gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
1045 gic->cpu_base.percpu_base = alloc_percpu(void __iomem *); 1065 gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
1046 if (WARN_ON(!gic->dist_base.percpu_base || 1066 if (WARN_ON(!gic->dist_base.percpu_base ||
1047 !gic->cpu_base.percpu_base)) { 1067 !gic->cpu_base.percpu_base)) {
1048 free_percpu(gic->dist_base.percpu_base); 1068 ret = -ENOMEM;
1049 free_percpu(gic->cpu_base.percpu_base); 1069 goto error;
1050 return;
1051 } 1070 }
1052 1071
1053 for_each_possible_cpu(cpu) { 1072 for_each_possible_cpu(cpu) {
1054 u32 mpidr = cpu_logical_map(cpu); 1073 u32 mpidr = cpu_logical_map(cpu);
1055 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); 1074 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
1056 unsigned long offset = percpu_offset * core_id; 1075 unsigned long offset = gic->percpu_offset * core_id;
1057 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset; 1076 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) =
1058 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset; 1077 gic->raw_dist_base + offset;
1078 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) =
1079 gic->raw_cpu_base + offset;
1059 } 1080 }
1060 1081
1061 gic_set_base_accessor(gic, gic_get_percpu_base); 1082 gic_set_base_accessor(gic, gic_get_percpu_base);
1062 } else 1083 } else {
1063#endif 1084 /* Normal, sane GIC... */
1064 { /* Normal, sane GIC... */ 1085 WARN(gic->percpu_offset,
1065 WARN(percpu_offset,
1066 "GIC_NON_BANKED not enabled, ignoring %08x offset!", 1086 "GIC_NON_BANKED not enabled, ignoring %08x offset!",
1067 percpu_offset); 1087 gic->percpu_offset);
1068 gic->dist_base.common_base = dist_base; 1088 gic->dist_base.common_base = gic->raw_dist_base;
1069 gic->cpu_base.common_base = cpu_base; 1089 gic->cpu_base.common_base = gic->raw_cpu_base;
1070 gic_set_base_accessor(gic, gic_get_common_base); 1090 gic_set_base_accessor(gic, gic_get_common_base);
1071 } 1091 }
1072 1092
@@ -1089,7 +1109,7 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
1089 * For primary GICs, skip over SGIs. 1109 * For primary GICs, skip over SGIs.
1090 * For secondary GICs, skip over PPIs, too. 1110 * For secondary GICs, skip over PPIs, too.
1091 */ 1111 */
1092 if (gic_nr == 0 && (irq_start & 31) > 0) { 1112 if (gic == &gic_data[0] && (irq_start & 31) > 0) {
1093 hwirq_base = 16; 1113 hwirq_base = 16;
1094 if (irq_start != -1) 1114 if (irq_start != -1)
1095 irq_start = (irq_start & ~31) + 16; 1115 irq_start = (irq_start & ~31) + 16;
@@ -1111,10 +1131,12 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
1111 hwirq_base, &gic_irq_domain_ops, gic); 1131 hwirq_base, &gic_irq_domain_ops, gic);
1112 } 1132 }
1113 1133
1114 if (WARN_ON(!gic->domain)) 1134 if (WARN_ON(!gic->domain)) {
1115 return; 1135 ret = -ENODEV;
1136 goto error;
1137 }
1116 1138
1117 if (gic_nr == 0) { 1139 if (gic == &gic_data[0]) {
1118 /* 1140 /*
1119 * Initialize the CPU interface map to all CPUs. 1141 * Initialize the CPU interface map to all CPUs.
1120 * It will be refined as each CPU probes its ID. 1142 * It will be refined as each CPU probes its ID.
@@ -1132,19 +1154,57 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
1132 } 1154 }
1133 1155
1134 gic_dist_init(gic); 1156 gic_dist_init(gic);
1135 gic_cpu_init(gic); 1157 ret = gic_cpu_init(gic);
1136 gic_pm_init(gic); 1158 if (ret)
1159 goto error;
1160
1161 ret = gic_pm_init(gic);
1162 if (ret)
1163 goto error;
1164
1165 return 0;
1166
1167error:
1168 if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
1169 free_percpu(gic->dist_base.percpu_base);
1170 free_percpu(gic->cpu_base.percpu_base);
1171 }
1172
1173 kfree(gic->chip.name);
1174
1175 return ret;
1137} 1176}
1138 1177
1139void __init gic_init(unsigned int gic_nr, int irq_start, 1178void __init gic_init(unsigned int gic_nr, int irq_start,
1140 void __iomem *dist_base, void __iomem *cpu_base) 1179 void __iomem *dist_base, void __iomem *cpu_base)
1141{ 1180{
1181 struct gic_chip_data *gic;
1182
1183 if (WARN_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR))
1184 return;
1185
1142 /* 1186 /*
1143 * Non-DT/ACPI systems won't run a hypervisor, so let's not 1187 * Non-DT/ACPI systems won't run a hypervisor, so let's not
1144 * bother with these... 1188 * bother with these...
1145 */ 1189 */
1146 static_key_slow_dec(&supports_deactivate); 1190 static_key_slow_dec(&supports_deactivate);
1147 __gic_init_bases(gic_nr, irq_start, dist_base, cpu_base, 0, NULL); 1191
1192 gic = &gic_data[gic_nr];
1193 gic->raw_dist_base = dist_base;
1194 gic->raw_cpu_base = cpu_base;
1195
1196 __gic_init_bases(gic, irq_start, NULL);
1197}
1198
1199static void gic_teardown(struct gic_chip_data *gic)
1200{
1201 if (WARN_ON(!gic))
1202 return;
1203
1204 if (gic->raw_dist_base)
1205 iounmap(gic->raw_dist_base);
1206 if (gic->raw_cpu_base)
1207 iounmap(gic->raw_cpu_base);
1148} 1208}
1149 1209
1150#ifdef CONFIG_OF 1210#ifdef CONFIG_OF
@@ -1188,35 +1248,61 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
1188 return true; 1248 return true;
1189} 1249}
1190 1250
1251static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
1252{
1253 if (!gic || !node)
1254 return -EINVAL;
1255
1256 gic->raw_dist_base = of_iomap(node, 0);
1257 if (WARN(!gic->raw_dist_base, "unable to map gic dist registers\n"))
1258 goto error;
1259
1260 gic->raw_cpu_base = of_iomap(node, 1);
1261 if (WARN(!gic->raw_cpu_base, "unable to map gic cpu registers\n"))
1262 goto error;
1263
1264 if (of_property_read_u32(node, "cpu-offset", &gic->percpu_offset))
1265 gic->percpu_offset = 0;
1266
1267 return 0;
1268
1269error:
1270 gic_teardown(gic);
1271
1272 return -ENOMEM;
1273}
1274
1191int __init 1275int __init
1192gic_of_init(struct device_node *node, struct device_node *parent) 1276gic_of_init(struct device_node *node, struct device_node *parent)
1193{ 1277{
1194 void __iomem *cpu_base; 1278 struct gic_chip_data *gic;
1195 void __iomem *dist_base; 1279 int irq, ret;
1196 u32 percpu_offset;
1197 int irq;
1198 1280
1199 if (WARN_ON(!node)) 1281 if (WARN_ON(!node))
1200 return -ENODEV; 1282 return -ENODEV;
1201 1283
1202 dist_base = of_iomap(node, 0); 1284 if (WARN_ON(gic_cnt >= CONFIG_ARM_GIC_MAX_NR))
1203 WARN(!dist_base, "unable to map gic dist registers\n"); 1285 return -EINVAL;
1286
1287 gic = &gic_data[gic_cnt];
1204 1288
1205 cpu_base = of_iomap(node, 1); 1289 ret = gic_of_setup(gic, node);
1206 WARN(!cpu_base, "unable to map gic cpu registers\n"); 1290 if (ret)
1291 return ret;
1207 1292
1208 /* 1293 /*
1209 * Disable split EOI/Deactivate if either HYP is not available 1294 * Disable split EOI/Deactivate if either HYP is not available
1210 * or the CPU interface is too small. 1295 * or the CPU interface is too small.
1211 */ 1296 */
1212 if (gic_cnt == 0 && !gic_check_eoimode(node, &cpu_base)) 1297 if (gic_cnt == 0 && !gic_check_eoimode(node, &gic->raw_cpu_base))
1213 static_key_slow_dec(&supports_deactivate); 1298 static_key_slow_dec(&supports_deactivate);
1214 1299
1215 if (of_property_read_u32(node, "cpu-offset", &percpu_offset)) 1300 ret = __gic_init_bases(gic, -1, &node->fwnode);
1216 percpu_offset = 0; 1301 if (ret) {
1302 gic_teardown(gic);
1303 return ret;
1304 }
1217 1305
1218 __gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset,
1219 &node->fwnode);
1220 if (!gic_cnt) 1306 if (!gic_cnt)
1221 gic_init_physaddr(node); 1307 gic_init_physaddr(node);
1222 1308
@@ -1303,9 +1389,9 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
1303 const unsigned long end) 1389 const unsigned long end)
1304{ 1390{
1305 struct acpi_madt_generic_distributor *dist; 1391 struct acpi_madt_generic_distributor *dist;
1306 void __iomem *cpu_base, *dist_base;
1307 struct fwnode_handle *domain_handle; 1392 struct fwnode_handle *domain_handle;
1308 int count; 1393 struct gic_chip_data *gic = &gic_data[0];
1394 int count, ret;
1309 1395
1310 /* Collect CPU base addresses */ 1396 /* Collect CPU base addresses */
1311 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, 1397 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
@@ -1315,17 +1401,18 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
1315 return -EINVAL; 1401 return -EINVAL;
1316 } 1402 }
1317 1403
1318 cpu_base = ioremap(cpu_phy_base, ACPI_GIC_CPU_IF_MEM_SIZE); 1404 gic->raw_cpu_base = ioremap(cpu_phy_base, ACPI_GIC_CPU_IF_MEM_SIZE);
1319 if (!cpu_base) { 1405 if (!gic->raw_cpu_base) {
1320 pr_err("Unable to map GICC registers\n"); 1406 pr_err("Unable to map GICC registers\n");
1321 return -ENOMEM; 1407 return -ENOMEM;
1322 } 1408 }
1323 1409
1324 dist = (struct acpi_madt_generic_distributor *)header; 1410 dist = (struct acpi_madt_generic_distributor *)header;
1325 dist_base = ioremap(dist->base_address, ACPI_GICV2_DIST_MEM_SIZE); 1411 gic->raw_dist_base = ioremap(dist->base_address,
1326 if (!dist_base) { 1412 ACPI_GICV2_DIST_MEM_SIZE);
1413 if (!gic->raw_dist_base) {
1327 pr_err("Unable to map GICD registers\n"); 1414 pr_err("Unable to map GICD registers\n");
1328 iounmap(cpu_base); 1415 gic_teardown(gic);
1329 return -ENOMEM; 1416 return -ENOMEM;
1330 } 1417 }
1331 1418
@@ -1340,15 +1427,20 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
1340 /* 1427 /*
1341 * Initialize GIC instance zero (no multi-GIC support). 1428 * Initialize GIC instance zero (no multi-GIC support).
1342 */ 1429 */
1343 domain_handle = irq_domain_alloc_fwnode(dist_base); 1430 domain_handle = irq_domain_alloc_fwnode(gic->raw_dist_base);
1344 if (!domain_handle) { 1431 if (!domain_handle) {
1345 pr_err("Unable to allocate domain handle\n"); 1432 pr_err("Unable to allocate domain handle\n");
1346 iounmap(cpu_base); 1433 gic_teardown(gic);
1347 iounmap(dist_base);
1348 return -ENOMEM; 1434 return -ENOMEM;
1349 } 1435 }
1350 1436
1351 __gic_init_bases(0, -1, dist_base, cpu_base, 0, domain_handle); 1437 ret = __gic_init_bases(gic, -1, domain_handle);
1438 if (ret) {
1439 pr_err("Failed to initialise GIC\n");
1440 irq_domain_free_fwnode(domain_handle);
1441 gic_teardown(gic);
1442 return ret;
1443 }
1352 1444
1353 acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle); 1445 acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
1354 1446
diff --git a/drivers/irqchip/irq-lpc32xx.c b/drivers/irqchip/irq-lpc32xx.c
new file mode 100644
index 000000000000..1034aeb2e98a
--- /dev/null
+++ b/drivers/irqchip/irq-lpc32xx.c
@@ -0,0 +1,238 @@
1/*
2 * Copyright 2015-2016 Vladimir Zapolskiy <vz@mleia.com>
3 *
4 * The code contained herein is licensed under the GNU General Public
5 * License. You may obtain a copy of the GNU General Public License
6 * Version 2 or later at the following locations:
7 *
8 * http://www.opensource.org/licenses/gpl-license.html
9 * http://www.gnu.org/copyleft/gpl.html
10 */
11
12#define pr_fmt(fmt) "%s: " fmt, __func__
13
14#include <linux/io.h>
15#include <linux/irqchip.h>
16#include <linux/irqchip/chained_irq.h>
17#include <linux/of_address.h>
18#include <linux/of_irq.h>
19#include <linux/of_platform.h>
20#include <linux/slab.h>
21#include <asm/exception.h>
22
23#define LPC32XX_INTC_MASK 0x00
24#define LPC32XX_INTC_RAW 0x04
25#define LPC32XX_INTC_STAT 0x08
26#define LPC32XX_INTC_POL 0x0C
27#define LPC32XX_INTC_TYPE 0x10
28#define LPC32XX_INTC_FIQ 0x14
29
30#define NR_LPC32XX_IC_IRQS 32
31
32struct lpc32xx_irq_chip {
33 void __iomem *base;
34 struct irq_domain *domain;
35 struct irq_chip chip;
36};
37
38static struct lpc32xx_irq_chip *lpc32xx_mic_irqc;
39
40static inline u32 lpc32xx_ic_read(struct lpc32xx_irq_chip *ic, u32 reg)
41{
42 return readl_relaxed(ic->base + reg);
43}
44
45static inline void lpc32xx_ic_write(struct lpc32xx_irq_chip *ic,
46 u32 reg, u32 val)
47{
48 writel_relaxed(val, ic->base + reg);
49}
50
51static void lpc32xx_irq_mask(struct irq_data *d)
52{
53 struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
54 u32 val, mask = BIT(d->hwirq);
55
56 val = lpc32xx_ic_read(ic, LPC32XX_INTC_MASK) & ~mask;
57 lpc32xx_ic_write(ic, LPC32XX_INTC_MASK, val);
58}
59
60static void lpc32xx_irq_unmask(struct irq_data *d)
61{
62 struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
63 u32 val, mask = BIT(d->hwirq);
64
65 val = lpc32xx_ic_read(ic, LPC32XX_INTC_MASK) | mask;
66 lpc32xx_ic_write(ic, LPC32XX_INTC_MASK, val);
67}
68
69static void lpc32xx_irq_ack(struct irq_data *d)
70{
71 struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
72 u32 mask = BIT(d->hwirq);
73
74 lpc32xx_ic_write(ic, LPC32XX_INTC_RAW, mask);
75}
76
77static int lpc32xx_irq_set_type(struct irq_data *d, unsigned int type)
78{
79 struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
80 u32 val, mask = BIT(d->hwirq);
81 bool high, edge;
82
83 switch (type) {
84 case IRQ_TYPE_EDGE_RISING:
85 edge = true;
86 high = true;
87 break;
88 case IRQ_TYPE_EDGE_FALLING:
89 edge = true;
90 high = false;
91 break;
92 case IRQ_TYPE_LEVEL_HIGH:
93 edge = false;
94 high = true;
95 break;
96 case IRQ_TYPE_LEVEL_LOW:
97 edge = false;
98 high = false;
99 break;
100 default:
101 pr_info("unsupported irq type %d\n", type);
102 return -EINVAL;
103 }
104
105 irqd_set_trigger_type(d, type);
106
107 val = lpc32xx_ic_read(ic, LPC32XX_INTC_POL);
108 if (high)
109 val |= mask;
110 else
111 val &= ~mask;
112 lpc32xx_ic_write(ic, LPC32XX_INTC_POL, val);
113
114 val = lpc32xx_ic_read(ic, LPC32XX_INTC_TYPE);
115 if (edge) {
116 val |= mask;
117 irq_set_handler_locked(d, handle_edge_irq);
118 } else {
119 val &= ~mask;
120 irq_set_handler_locked(d, handle_level_irq);
121 }
122 lpc32xx_ic_write(ic, LPC32XX_INTC_TYPE, val);
123
124 return 0;
125}
126
127static void __exception_irq_entry lpc32xx_handle_irq(struct pt_regs *regs)
128{
129 struct lpc32xx_irq_chip *ic = lpc32xx_mic_irqc;
130 u32 hwirq = lpc32xx_ic_read(ic, LPC32XX_INTC_STAT), irq;
131
132 while (hwirq) {
133 irq = __ffs(hwirq);
134 hwirq &= ~BIT(irq);
135 handle_domain_irq(lpc32xx_mic_irqc->domain, irq, regs);
136 }
137}
138
139static void lpc32xx_sic_handler(struct irq_desc *desc)
140{
141 struct lpc32xx_irq_chip *ic = irq_desc_get_handler_data(desc);
142 struct irq_chip *chip = irq_desc_get_chip(desc);
143 u32 hwirq = lpc32xx_ic_read(ic, LPC32XX_INTC_STAT), irq;
144
145 chained_irq_enter(chip, desc);
146
147 while (hwirq) {
148 irq = __ffs(hwirq);
149 hwirq &= ~BIT(irq);
150 generic_handle_irq(irq_find_mapping(ic->domain, irq));
151 }
152
153 chained_irq_exit(chip, desc);
154}
155
156static int lpc32xx_irq_domain_map(struct irq_domain *id, unsigned int virq,
157 irq_hw_number_t hw)
158{
159 struct lpc32xx_irq_chip *ic = id->host_data;
160
161 irq_set_chip_data(virq, ic);
162 irq_set_chip_and_handler(virq, &ic->chip, handle_level_irq);
163 irq_set_status_flags(virq, IRQ_LEVEL);
164 irq_set_noprobe(virq);
165
166 return 0;
167}
168
169static void lpc32xx_irq_domain_unmap(struct irq_domain *id, unsigned int virq)
170{
171 irq_set_chip_and_handler(virq, NULL, NULL);
172}
173
174static const struct irq_domain_ops lpc32xx_irq_domain_ops = {
175 .map = lpc32xx_irq_domain_map,
176 .unmap = lpc32xx_irq_domain_unmap,
177 .xlate = irq_domain_xlate_twocell,
178};
179
180static int __init lpc32xx_of_ic_init(struct device_node *node,
181 struct device_node *parent)
182{
183 struct lpc32xx_irq_chip *irqc;
184 bool is_mic = of_device_is_compatible(node, "nxp,lpc3220-mic");
185 const __be32 *reg = of_get_property(node, "reg", NULL);
186 u32 parent_irq, i, addr = reg ? be32_to_cpu(*reg) : 0;
187
188 irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
189 if (!irqc)
190 return -ENOMEM;
191
192 irqc->base = of_iomap(node, 0);
193 if (!irqc->base) {
194 pr_err("%s: unable to map registers\n", node->full_name);
195 kfree(irqc);
196 return -EINVAL;
197 }
198
199 irqc->chip.irq_ack = lpc32xx_irq_ack;
200 irqc->chip.irq_mask = lpc32xx_irq_mask;
201 irqc->chip.irq_unmask = lpc32xx_irq_unmask;
202 irqc->chip.irq_set_type = lpc32xx_irq_set_type;
203 if (is_mic)
204 irqc->chip.name = kasprintf(GFP_KERNEL, "%08x.mic", addr);
205 else
206 irqc->chip.name = kasprintf(GFP_KERNEL, "%08x.sic", addr);
207
208 irqc->domain = irq_domain_add_linear(node, NR_LPC32XX_IC_IRQS,
209 &lpc32xx_irq_domain_ops, irqc);
210 if (!irqc->domain) {
211 pr_err("unable to add irq domain\n");
212 iounmap(irqc->base);
213 kfree(irqc->chip.name);
214 kfree(irqc);
215 return -ENODEV;
216 }
217
218 if (is_mic) {
219 lpc32xx_mic_irqc = irqc;
220 set_handle_irq(lpc32xx_handle_irq);
221 } else {
222 for (i = 0; i < of_irq_count(node); i++) {
223 parent_irq = irq_of_parse_and_map(node, i);
224 if (parent_irq)
225 irq_set_chained_handler_and_data(parent_irq,
226 lpc32xx_sic_handler, irqc);
227 }
228 }
229
230 lpc32xx_ic_write(irqc, LPC32XX_INTC_MASK, 0x00);
231 lpc32xx_ic_write(irqc, LPC32XX_INTC_POL, 0x00);
232 lpc32xx_ic_write(irqc, LPC32XX_INTC_TYPE, 0x00);
233
234 return 0;
235}
236
237IRQCHIP_DECLARE(nxp_lpc32xx_mic, "nxp,lpc3220-mic", lpc32xx_of_ic_init);
238IRQCHIP_DECLARE(nxp_lpc32xx_sic, "nxp,lpc3220-sic", lpc32xx_of_ic_init);
diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
new file mode 100644
index 000000000000..02cca74cab94
--- /dev/null
+++ b/drivers/irqchip/irq-ls-scfg-msi.c
@@ -0,0 +1,240 @@
1/*
2 * Freescale SCFG MSI(-X) support
3 *
4 * Copyright (C) 2016 Freescale Semiconductor.
5 *
6 * Author: Minghuan Lian <Minghuan.Lian@nxp.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/msi.h>
16#include <linux/interrupt.h>
17#include <linux/irq.h>
18#include <linux/irqchip/chained_irq.h>
19#include <linux/irqdomain.h>
20#include <linux/of_pci.h>
21#include <linux/of_platform.h>
22#include <linux/spinlock.h>
23
24#define MSI_MAX_IRQS 32
25#define MSI_IBS_SHIFT 3
26#define MSIR 4
27
28struct ls_scfg_msi {
29 spinlock_t lock;
30 struct platform_device *pdev;
31 struct irq_domain *parent;
32 struct irq_domain *msi_domain;
33 void __iomem *regs;
34 phys_addr_t msiir_addr;
35 int irq;
36 DECLARE_BITMAP(used, MSI_MAX_IRQS);
37};
38
39static struct irq_chip ls_scfg_msi_irq_chip = {
40 .name = "MSI",
41 .irq_mask = pci_msi_mask_irq,
42 .irq_unmask = pci_msi_unmask_irq,
43};
44
45static struct msi_domain_info ls_scfg_msi_domain_info = {
46 .flags = (MSI_FLAG_USE_DEF_DOM_OPS |
47 MSI_FLAG_USE_DEF_CHIP_OPS |
48 MSI_FLAG_PCI_MSIX),
49 .chip = &ls_scfg_msi_irq_chip,
50};
51
52static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
53{
54 struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data);
55
56 msg->address_hi = upper_32_bits(msi_data->msiir_addr);
57 msg->address_lo = lower_32_bits(msi_data->msiir_addr);
58 msg->data = data->hwirq << MSI_IBS_SHIFT;
59}
60
61static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
62 const struct cpumask *mask, bool force)
63{
64 return -EINVAL;
65}
66
67static struct irq_chip ls_scfg_msi_parent_chip = {
68 .name = "SCFG",
69 .irq_compose_msi_msg = ls_scfg_msi_compose_msg,
70 .irq_set_affinity = ls_scfg_msi_set_affinity,
71};
72
73static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain,
74 unsigned int virq,
75 unsigned int nr_irqs,
76 void *args)
77{
78 struct ls_scfg_msi *msi_data = domain->host_data;
79 int pos, err = 0;
80
81 WARN_ON(nr_irqs != 1);
82
83 spin_lock(&msi_data->lock);
84 pos = find_first_zero_bit(msi_data->used, MSI_MAX_IRQS);
85 if (pos < MSI_MAX_IRQS)
86 __set_bit(pos, msi_data->used);
87 else
88 err = -ENOSPC;
89 spin_unlock(&msi_data->lock);
90
91 if (err)
92 return err;
93
94 irq_domain_set_info(domain, virq, pos,
95 &ls_scfg_msi_parent_chip, msi_data,
96 handle_simple_irq, NULL, NULL);
97
98 return 0;
99}
100
101static void ls_scfg_msi_domain_irq_free(struct irq_domain *domain,
102 unsigned int virq, unsigned int nr_irqs)
103{
104 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
105 struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(d);
106 int pos;
107
108 pos = d->hwirq;
109 if (pos < 0 || pos >= MSI_MAX_IRQS) {
110 pr_err("failed to teardown msi. Invalid hwirq %d\n", pos);
111 return;
112 }
113
114 spin_lock(&msi_data->lock);
115 __clear_bit(pos, msi_data->used);
116 spin_unlock(&msi_data->lock);
117}
118
119static const struct irq_domain_ops ls_scfg_msi_domain_ops = {
120 .alloc = ls_scfg_msi_domain_irq_alloc,
121 .free = ls_scfg_msi_domain_irq_free,
122};
123
124static void ls_scfg_msi_irq_handler(struct irq_desc *desc)
125{
126 struct ls_scfg_msi *msi_data = irq_desc_get_handler_data(desc);
127 unsigned long val;
128 int pos, virq;
129
130 chained_irq_enter(irq_desc_get_chip(desc), desc);
131
132 val = ioread32be(msi_data->regs + MSIR);
133 for_each_set_bit(pos, &val, MSI_MAX_IRQS) {
134 virq = irq_find_mapping(msi_data->parent, (31 - pos));
135 if (virq)
136 generic_handle_irq(virq);
137 }
138
139 chained_irq_exit(irq_desc_get_chip(desc), desc);
140}
141
142static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data)
143{
144 /* Initialize MSI domain parent */
145 msi_data->parent = irq_domain_add_linear(NULL,
146 MSI_MAX_IRQS,
147 &ls_scfg_msi_domain_ops,
148 msi_data);
149 if (!msi_data->parent) {
150 dev_err(&msi_data->pdev->dev, "failed to create IRQ domain\n");
151 return -ENOMEM;
152 }
153
154 msi_data->msi_domain = pci_msi_create_irq_domain(
155 of_node_to_fwnode(msi_data->pdev->dev.of_node),
156 &ls_scfg_msi_domain_info,
157 msi_data->parent);
158 if (!msi_data->msi_domain) {
159 dev_err(&msi_data->pdev->dev, "failed to create MSI domain\n");
160 irq_domain_remove(msi_data->parent);
161 return -ENOMEM;
162 }
163
164 return 0;
165}
166
167static int ls_scfg_msi_probe(struct platform_device *pdev)
168{
169 struct ls_scfg_msi *msi_data;
170 struct resource *res;
171 int ret;
172
173 msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL);
174 if (!msi_data)
175 return -ENOMEM;
176
177 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
178 msi_data->regs = devm_ioremap_resource(&pdev->dev, res);
179 if (IS_ERR(msi_data->regs)) {
180 dev_err(&pdev->dev, "failed to initialize 'regs'\n");
181 return PTR_ERR(msi_data->regs);
182 }
183 msi_data->msiir_addr = res->start;
184
185 msi_data->irq = platform_get_irq(pdev, 0);
186 if (msi_data->irq <= 0) {
187 dev_err(&pdev->dev, "failed to get MSI irq\n");
188 return -ENODEV;
189 }
190
191 msi_data->pdev = pdev;
192 spin_lock_init(&msi_data->lock);
193
194 ret = ls_scfg_msi_domains_init(msi_data);
195 if (ret)
196 return ret;
197
198 irq_set_chained_handler_and_data(msi_data->irq,
199 ls_scfg_msi_irq_handler,
200 msi_data);
201
202 platform_set_drvdata(pdev, msi_data);
203
204 return 0;
205}
206
207static int ls_scfg_msi_remove(struct platform_device *pdev)
208{
209 struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev);
210
211 irq_set_chained_handler_and_data(msi_data->irq, NULL, NULL);
212
213 irq_domain_remove(msi_data->msi_domain);
214 irq_domain_remove(msi_data->parent);
215
216 platform_set_drvdata(pdev, NULL);
217
218 return 0;
219}
220
221static const struct of_device_id ls_scfg_msi_id[] = {
222 { .compatible = "fsl,1s1021a-msi", },
223 { .compatible = "fsl,1s1043a-msi", },
224 {},
225};
226
227static struct platform_driver ls_scfg_msi_driver = {
228 .driver = {
229 .name = "ls-scfg-msi",
230 .of_match_table = ls_scfg_msi_id,
231 },
232 .probe = ls_scfg_msi_probe,
233 .remove = ls_scfg_msi_remove,
234};
235
236module_platform_driver(ls_scfg_msi_driver);
237
238MODULE_AUTHOR("Minghuan Lian <Minghuan.Lian@nxp.com>");
239MODULE_DESCRIPTION("Freescale Layerscape SCFG MSI controller driver");
240MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index d67baa231c13..03b79b061d24 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -263,8 +263,8 @@ static int mbigen_device_probe(struct platform_device *pdev)
263 263
264 parent = platform_bus_type.dev_root; 264 parent = platform_bus_type.dev_root;
265 child = of_platform_device_create(np, NULL, parent); 265 child = of_platform_device_create(np, NULL, parent);
266 if (IS_ERR(child)) 266 if (!child)
267 return PTR_ERR(child); 267 return -ENOMEM;
268 268
269 if (of_property_read_u32(child->dev.of_node, "num-pins", 269 if (of_property_read_u32(child->dev.of_node, "num-pins",
270 &num_pins) < 0) { 270 &num_pins) < 0) {
diff --git a/drivers/irqchip/irq-partition-percpu.c b/drivers/irqchip/irq-partition-percpu.c
new file mode 100644
index 000000000000..ccd72c2cbc23
--- /dev/null
+++ b/drivers/irqchip/irq-partition-percpu.c
@@ -0,0 +1,256 @@
1/*
2 * Copyright (C) 2016 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/bitops.h>
19#include <linux/interrupt.h>
20#include <linux/irqchip.h>
21#include <linux/irqchip/chained_irq.h>
22#include <linux/irqchip/irq-partition-percpu.h>
23#include <linux/irqdomain.h>
24#include <linux/seq_file.h>
25#include <linux/slab.h>
26
27struct partition_desc {
28 int nr_parts;
29 struct partition_affinity *parts;
30 struct irq_domain *domain;
31 struct irq_desc *chained_desc;
32 unsigned long *bitmap;
33 struct irq_domain_ops ops;
34};
35
36static bool partition_check_cpu(struct partition_desc *part,
37 unsigned int cpu, unsigned int hwirq)
38{
39 return cpumask_test_cpu(cpu, &part->parts[hwirq].mask);
40}
41
42static void partition_irq_mask(struct irq_data *d)
43{
44 struct partition_desc *part = irq_data_get_irq_chip_data(d);
45 struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
46 struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
47
48 if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
49 chip->irq_mask)
50 chip->irq_mask(data);
51}
52
53static void partition_irq_unmask(struct irq_data *d)
54{
55 struct partition_desc *part = irq_data_get_irq_chip_data(d);
56 struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
57 struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
58
59 if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
60 chip->irq_unmask)
61 chip->irq_unmask(data);
62}
63
64static int partition_irq_set_irqchip_state(struct irq_data *d,
65 enum irqchip_irq_state which,
66 bool val)
67{
68 struct partition_desc *part = irq_data_get_irq_chip_data(d);
69 struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
70 struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
71
72 if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
73 chip->irq_set_irqchip_state)
74 return chip->irq_set_irqchip_state(data, which, val);
75
76 return -EINVAL;
77}
78
79static int partition_irq_get_irqchip_state(struct irq_data *d,
80 enum irqchip_irq_state which,
81 bool *val)
82{
83 struct partition_desc *part = irq_data_get_irq_chip_data(d);
84 struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
85 struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
86
87 if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
88 chip->irq_get_irqchip_state)
89 return chip->irq_get_irqchip_state(data, which, val);
90
91 return -EINVAL;
92}
93
94static int partition_irq_set_type(struct irq_data *d, unsigned int type)
95{
96 struct partition_desc *part = irq_data_get_irq_chip_data(d);
97 struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
98 struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
99
100 if (chip->irq_set_type)
101 return chip->irq_set_type(data, type);
102
103 return -EINVAL;
104}
105
106static void partition_irq_print_chip(struct irq_data *d, struct seq_file *p)
107{
108 struct partition_desc *part = irq_data_get_irq_chip_data(d);
109 struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
110 struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
111
112 seq_printf(p, " %5s-%lu", chip->name, data->hwirq);
113}
114
115static struct irq_chip partition_irq_chip = {
116 .irq_mask = partition_irq_mask,
117 .irq_unmask = partition_irq_unmask,
118 .irq_set_type = partition_irq_set_type,
119 .irq_get_irqchip_state = partition_irq_get_irqchip_state,
120 .irq_set_irqchip_state = partition_irq_set_irqchip_state,
121 .irq_print_chip = partition_irq_print_chip,
122};
123
124static void partition_handle_irq(struct irq_desc *desc)
125{
126 struct partition_desc *part = irq_desc_get_handler_data(desc);
127 struct irq_chip *chip = irq_desc_get_chip(desc);
128 int cpu = smp_processor_id();
129 int hwirq;
130
131 chained_irq_enter(chip, desc);
132
133 for_each_set_bit(hwirq, part->bitmap, part->nr_parts) {
134 if (partition_check_cpu(part, cpu, hwirq))
135 break;
136 }
137
138 if (unlikely(hwirq == part->nr_parts)) {
139 handle_bad_irq(desc);
140 } else {
141 unsigned int irq;
142 irq = irq_find_mapping(part->domain, hwirq);
143 generic_handle_irq(irq);
144 }
145
146 chained_irq_exit(chip, desc);
147}
148
149static int partition_domain_alloc(struct irq_domain *domain, unsigned int virq,
150 unsigned int nr_irqs, void *arg)
151{
152 int ret;
153 irq_hw_number_t hwirq;
154 unsigned int type;
155 struct irq_fwspec *fwspec = arg;
156 struct partition_desc *part;
157
158 BUG_ON(nr_irqs != 1);
159 ret = domain->ops->translate(domain, fwspec, &hwirq, &type);
160 if (ret)
161 return ret;
162
163 part = domain->host_data;
164
165 set_bit(hwirq, part->bitmap);
166 irq_set_chained_handler_and_data(irq_desc_get_irq(part->chained_desc),
167 partition_handle_irq, part);
168 irq_set_percpu_devid_partition(virq, &part->parts[hwirq].mask);
169 irq_domain_set_info(domain, virq, hwirq, &partition_irq_chip, part,
170 handle_percpu_devid_irq, NULL, NULL);
171 irq_set_status_flags(virq, IRQ_NOAUTOEN);
172
173 return 0;
174}
175
176static void partition_domain_free(struct irq_domain *domain, unsigned int virq,
177 unsigned int nr_irqs)
178{
179 struct irq_data *d;
180
181 BUG_ON(nr_irqs != 1);
182
183 d = irq_domain_get_irq_data(domain, virq);
184 irq_set_handler(virq, NULL);
185 irq_domain_reset_irq_data(d);
186}
187
188int partition_translate_id(struct partition_desc *desc, void *partition_id)
189{
190 struct partition_affinity *part = NULL;
191 int i;
192
193 for (i = 0; i < desc->nr_parts; i++) {
194 if (desc->parts[i].partition_id == partition_id) {
195 part = &desc->parts[i];
196 break;
197 }
198 }
199
200 if (WARN_ON(!part)) {
201 pr_err("Failed to find partition\n");
202 return -EINVAL;
203 }
204
205 return i;
206}
207
208struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode,
209 struct partition_affinity *parts,
210 int nr_parts,
211 int chained_irq,
212 const struct irq_domain_ops *ops)
213{
214 struct partition_desc *desc;
215 struct irq_domain *d;
216
217 BUG_ON(!ops->select || !ops->translate);
218
219 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
220 if (!desc)
221 return NULL;
222
223 desc->ops = *ops;
224 desc->ops.free = partition_domain_free;
225 desc->ops.alloc = partition_domain_alloc;
226
227 d = irq_domain_create_linear(fwnode, nr_parts, &desc->ops, desc);
228 if (!d)
229 goto out;
230 desc->domain = d;
231
232 desc->bitmap = kzalloc(sizeof(long) * BITS_TO_LONGS(nr_parts),
233 GFP_KERNEL);
234 if (WARN_ON(!desc->bitmap))
235 goto out;
236
237 desc->chained_desc = irq_to_desc(chained_irq);
238 desc->nr_parts = nr_parts;
239 desc->parts = parts;
240
241 return desc;
242out:
243 if (d)
244 irq_domain_remove(d);
245 kfree(desc);
246
247 return NULL;
248}
249
250struct irq_domain *partition_get_domain(struct partition_desc *dsc)
251{
252 if (dsc)
253 return dsc->domain;
254
255 return NULL;
256}
diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c
index 50be9639e27e..e902f081e16c 100644
--- a/drivers/irqchip/irq-tegra.c
+++ b/drivers/irqchip/irq-tegra.c
@@ -235,7 +235,7 @@ static int tegra_ictlr_domain_translate(struct irq_domain *d,
235 return -EINVAL; 235 return -EINVAL;
236 236
237 *hwirq = fwspec->param[1]; 237 *hwirq = fwspec->param[1];
238 *type = fwspec->param[2]; 238 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
239 return 0; 239 return 0;
240 } 240 }
241 241
diff --git a/include/linux/irq.h b/include/linux/irq.h
index c4de62348ff2..4d758a7c604a 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -530,6 +530,10 @@ static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *c
530} 530}
531 531
532extern int irq_set_percpu_devid(unsigned int irq); 532extern int irq_set_percpu_devid(unsigned int irq);
533extern int irq_set_percpu_devid_partition(unsigned int irq,
534 const struct cpumask *affinity);
535extern int irq_get_percpu_devid_partition(unsigned int irq,
536 struct cpumask *affinity);
533 537
534extern void 538extern void
535__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 539__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index d5d798b35c1f..9e6fdd33bdb2 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -102,8 +102,6 @@
102#define GICR_SYNCR 0x00C0 102#define GICR_SYNCR 0x00C0
103#define GICR_MOVLPIR 0x0100 103#define GICR_MOVLPIR 0x0100
104#define GICR_MOVALLR 0x0110 104#define GICR_MOVALLR 0x0110
105#define GICR_ISACTIVER GICD_ISACTIVER
106#define GICR_ICACTIVER GICD_ICACTIVER
107#define GICR_IDREGS GICD_IDREGS 105#define GICR_IDREGS GICD_IDREGS
108#define GICR_PIDR2 GICD_PIDR2 106#define GICR_PIDR2 GICD_PIDR2
109 107
diff --git a/include/linux/irqchip/irq-partition-percpu.h b/include/linux/irqchip/irq-partition-percpu.h
new file mode 100644
index 000000000000..87433a5d1285
--- /dev/null
+++ b/include/linux/irqchip/irq-partition-percpu.h
@@ -0,0 +1,59 @@
1/*
2 * Copyright (C) 2016 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/fwnode.h>
19#include <linux/cpumask.h>
20#include <linux/irqdomain.h>
21
22struct partition_affinity {
23 cpumask_t mask;
24 void *partition_id;
25};
26
27struct partition_desc;
28
29#ifdef CONFIG_PARTITION_PERCPU
30int partition_translate_id(struct partition_desc *desc, void *partition_id);
31struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode,
32 struct partition_affinity *parts,
33 int nr_parts,
34 int chained_irq,
35 const struct irq_domain_ops *ops);
36struct irq_domain *partition_get_domain(struct partition_desc *dsc);
37#else
38static inline int partition_translate_id(struct partition_desc *desc,
39 void *partition_id)
40{
41 return -EINVAL;
42}
43
44static inline
45struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode,
46 struct partition_affinity *parts,
47 int nr_parts,
48 int chained_irq,
49 const struct irq_domain_ops *ops)
50{
51 return NULL;
52}
53
54static inline
55struct irq_domain *partition_get_domain(struct partition_desc *dsc)
56{
57 return NULL;
58}
59#endif
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index dcca77c4b9d2..b51beebf9804 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -66,6 +66,7 @@ struct irq_desc {
66 int threads_handled_last; 66 int threads_handled_last;
67 raw_spinlock_t lock; 67 raw_spinlock_t lock;
68 struct cpumask *percpu_enabled; 68 struct cpumask *percpu_enabled;
69 const struct cpumask *percpu_affinity;
69#ifdef CONFIG_SMP 70#ifdef CONFIG_SMP
70 const struct cpumask *affinity_hint; 71 const struct cpumask *affinity_hint;
71 struct irq_affinity_notify *affinity_notify; 72 struct irq_affinity_notify *affinity_notify;
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 2aed04396210..f1f36e04d885 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -96,6 +96,8 @@ enum irq_domain_bus_token {
96struct irq_domain_ops { 96struct irq_domain_ops {
97 int (*match)(struct irq_domain *d, struct device_node *node, 97 int (*match)(struct irq_domain *d, struct device_node *node,
98 enum irq_domain_bus_token bus_token); 98 enum irq_domain_bus_token bus_token);
99 int (*select)(struct irq_domain *d, struct irq_fwspec *fwspec,
100 enum irq_domain_bus_token bus_token);
99 int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw); 101 int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw);
100 void (*unmap)(struct irq_domain *d, unsigned int virq); 102 void (*unmap)(struct irq_domain *d, unsigned int virq);
101 int (*xlate)(struct irq_domain *d, struct device_node *node, 103 int (*xlate)(struct irq_domain *d, struct device_node *node,
@@ -211,7 +213,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
211 irq_hw_number_t first_hwirq, 213 irq_hw_number_t first_hwirq,
212 const struct irq_domain_ops *ops, 214 const struct irq_domain_ops *ops,
213 void *host_data); 215 void *host_data);
214extern struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode, 216extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
215 enum irq_domain_bus_token bus_token); 217 enum irq_domain_bus_token bus_token);
216extern void irq_set_default_host(struct irq_domain *host); 218extern void irq_set_default_host(struct irq_domain *host);
217extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, 219extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
@@ -227,6 +229,17 @@ static inline bool is_fwnode_irqchip(struct fwnode_handle *fwnode)
227 return fwnode && fwnode->type == FWNODE_IRQCHIP; 229 return fwnode && fwnode->type == FWNODE_IRQCHIP;
228} 230}
229 231
232static inline
233struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
234 enum irq_domain_bus_token bus_token)
235{
236 struct irq_fwspec fwspec = {
237 .fwnode = fwnode,
238 };
239
240 return irq_find_matching_fwspec(&fwspec, bus_token);
241}
242
230static inline struct irq_domain *irq_find_matching_host(struct device_node *node, 243static inline struct irq_domain *irq_find_matching_host(struct device_node *node,
231 enum irq_domain_bus_token bus_token) 244 enum irq_domain_bus_token bus_token)
232{ 245{
@@ -346,9 +359,8 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
346 irq_hw_number_t *out_hwirq, unsigned int *out_type); 359 irq_hw_number_t *out_hwirq, unsigned int *out_type);
347 360
348/* IPI functions */ 361/* IPI functions */
349unsigned int irq_reserve_ipi(struct irq_domain *domain, 362int irq_reserve_ipi(struct irq_domain *domain, const struct cpumask *dest);
350 const struct cpumask *dest); 363int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest);
351void irq_destroy_ipi(unsigned int irq);
352 364
353/* V2 interfaces to support hierarchy IRQ domains. */ 365/* V2 interfaces to support hierarchy IRQ domains. */
354extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, 366extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c
index 14777af8e097..c42742208e5e 100644
--- a/kernel/irq/ipi.c
+++ b/kernel/irq/ipi.c
@@ -19,9 +19,9 @@
19 * 19 *
20 * Allocate a virq that can be used to send IPI to any CPU in dest mask. 20 * Allocate a virq that can be used to send IPI to any CPU in dest mask.
21 * 21 *
22 * On success it'll return linux irq number and 0 on failure 22 * On success it'll return linux irq number and error code on failure
23 */ 23 */
24unsigned int irq_reserve_ipi(struct irq_domain *domain, 24int irq_reserve_ipi(struct irq_domain *domain,
25 const struct cpumask *dest) 25 const struct cpumask *dest)
26{ 26{
27 unsigned int nr_irqs, offset; 27 unsigned int nr_irqs, offset;
@@ -30,18 +30,18 @@ unsigned int irq_reserve_ipi(struct irq_domain *domain,
30 30
31 if (!domain ||!irq_domain_is_ipi(domain)) { 31 if (!domain ||!irq_domain_is_ipi(domain)) {
32 pr_warn("Reservation on a non IPI domain\n"); 32 pr_warn("Reservation on a non IPI domain\n");
33 return 0; 33 return -EINVAL;
34 } 34 }
35 35
36 if (!cpumask_subset(dest, cpu_possible_mask)) { 36 if (!cpumask_subset(dest, cpu_possible_mask)) {
37 pr_warn("Reservation is not in possible_cpu_mask\n"); 37 pr_warn("Reservation is not in possible_cpu_mask\n");
38 return 0; 38 return -EINVAL;
39 } 39 }
40 40
41 nr_irqs = cpumask_weight(dest); 41 nr_irqs = cpumask_weight(dest);
42 if (!nr_irqs) { 42 if (!nr_irqs) {
43 pr_warn("Reservation for empty destination mask\n"); 43 pr_warn("Reservation for empty destination mask\n");
44 return 0; 44 return -EINVAL;
45 } 45 }
46 46
47 if (irq_domain_is_ipi_single(domain)) { 47 if (irq_domain_is_ipi_single(domain)) {
@@ -72,14 +72,14 @@ unsigned int irq_reserve_ipi(struct irq_domain *domain,
72 next = cpumask_next(next, dest); 72 next = cpumask_next(next, dest);
73 if (next < nr_cpu_ids) { 73 if (next < nr_cpu_ids) {
74 pr_warn("Destination mask has holes\n"); 74 pr_warn("Destination mask has holes\n");
75 return 0; 75 return -EINVAL;
76 } 76 }
77 } 77 }
78 78
79 virq = irq_domain_alloc_descs(-1, nr_irqs, 0, NUMA_NO_NODE); 79 virq = irq_domain_alloc_descs(-1, nr_irqs, 0, NUMA_NO_NODE);
80 if (virq <= 0) { 80 if (virq <= 0) {
81 pr_warn("Can't reserve IPI, failed to alloc descs\n"); 81 pr_warn("Can't reserve IPI, failed to alloc descs\n");
82 return 0; 82 return -ENOMEM;
83 } 83 }
84 84
85 virq = __irq_domain_alloc_irqs(domain, virq, nr_irqs, NUMA_NO_NODE, 85 virq = __irq_domain_alloc_irqs(domain, virq, nr_irqs, NUMA_NO_NODE,
@@ -100,17 +100,20 @@ unsigned int irq_reserve_ipi(struct irq_domain *domain,
100 100
101free_descs: 101free_descs:
102 irq_free_descs(virq, nr_irqs); 102 irq_free_descs(virq, nr_irqs);
103 return 0; 103 return -EBUSY;
104} 104}
105 105
106/** 106/**
107 * irq_destroy_ipi() - unreserve an IPI that was previously allocated 107 * irq_destroy_ipi() - unreserve an IPI that was previously allocated
108 * @irq: linux irq number to be destroyed 108 * @irq: linux irq number to be destroyed
109 * @dest: cpumask of cpus which should have the IPI removed
109 * 110 *
110 * Return the IPIs allocated with irq_reserve_ipi() to the system destroying 111 * The IPIs allocated with irq_reserve_ipi() are retuerned to the system
111 * all virqs associated with them. 112 * destroying all virqs associated with them.
113 *
114 * Return 0 on success or error code on failure.
112 */ 115 */
113void irq_destroy_ipi(unsigned int irq) 116int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest)
114{ 117{
115 struct irq_data *data = irq_get_irq_data(irq); 118 struct irq_data *data = irq_get_irq_data(irq);
116 struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL; 119 struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL;
@@ -118,7 +121,7 @@ void irq_destroy_ipi(unsigned int irq)
118 unsigned int nr_irqs; 121 unsigned int nr_irqs;
119 122
120 if (!irq || !data || !ipimask) 123 if (!irq || !data || !ipimask)
121 return; 124 return -EINVAL;
122 125
123 domain = data->domain; 126 domain = data->domain;
124 if (WARN_ON(domain == NULL)) 127 if (WARN_ON(domain == NULL))
@@ -126,15 +129,25 @@ void irq_destroy_ipi(unsigned int irq)
126 129
127 if (!irq_domain_is_ipi(domain)) { 130 if (!irq_domain_is_ipi(domain)) {
128 pr_warn("Trying to destroy a non IPI domain!\n"); 131 pr_warn("Trying to destroy a non IPI domain!\n");
129 return; 132 return -EINVAL;
130 } 133 }
131 134
132 if (irq_domain_is_ipi_per_cpu(domain)) 135 if (WARN_ON(!cpumask_subset(dest, ipimask)))
133 nr_irqs = cpumask_weight(ipimask); 136 /*
134 else 137 * Must be destroying a subset of CPUs to which this IPI
138 * was set up to target
139 */
140 return -EINVAL;
141
142 if (irq_domain_is_ipi_per_cpu(domain)) {
143 irq = irq + cpumask_first(dest) - data->common->ipi_offset;
144 nr_irqs = cpumask_weight(dest);
145 } else {
135 nr_irqs = 1; 146 nr_irqs = 1;
147 }
136 148
137 irq_domain_free_irqs(irq, nr_irqs); 149 irq_domain_free_irqs(irq, nr_irqs);
150 return 0;
138} 151}
139 152
140/** 153/**
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 0ccd028817d7..8731e1c5d1e7 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -595,7 +595,8 @@ void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
595 chip_bus_sync_unlock(desc); 595 chip_bus_sync_unlock(desc);
596} 596}
597 597
598int irq_set_percpu_devid(unsigned int irq) 598int irq_set_percpu_devid_partition(unsigned int irq,
599 const struct cpumask *affinity)
599{ 600{
600 struct irq_desc *desc = irq_to_desc(irq); 601 struct irq_desc *desc = irq_to_desc(irq);
601 602
@@ -610,10 +611,33 @@ int irq_set_percpu_devid(unsigned int irq)
610 if (!desc->percpu_enabled) 611 if (!desc->percpu_enabled)
611 return -ENOMEM; 612 return -ENOMEM;
612 613
614 if (affinity)
615 desc->percpu_affinity = affinity;
616 else
617 desc->percpu_affinity = cpu_possible_mask;
618
613 irq_set_percpu_devid_flags(irq); 619 irq_set_percpu_devid_flags(irq);
614 return 0; 620 return 0;
615} 621}
616 622
623int irq_set_percpu_devid(unsigned int irq)
624{
625 return irq_set_percpu_devid_partition(irq, NULL);
626}
627
628int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity)
629{
630 struct irq_desc *desc = irq_to_desc(irq);
631
632 if (!desc || !desc->percpu_enabled)
633 return -EINVAL;
634
635 if (affinity)
636 cpumask_copy(affinity, desc->percpu_affinity);
637
638 return 0;
639}
640
617void kstat_incr_irq_this_cpu(unsigned int irq) 641void kstat_incr_irq_this_cpu(unsigned int irq)
618{ 642{
619 kstat_incr_irqs_this_cpu(irq_to_desc(irq)); 643 kstat_incr_irqs_this_cpu(irq_to_desc(irq));
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 3a519a01118b..503c5b9dd030 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -243,14 +243,15 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
243EXPORT_SYMBOL_GPL(irq_domain_add_legacy); 243EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
244 244
245/** 245/**
246 * irq_find_matching_fwnode() - Locates a domain for a given fwnode 246 * irq_find_matching_fwspec() - Locates a domain for a given fwspec
247 * @fwnode: FW descriptor of the interrupt controller 247 * @fwspec: FW specifier for an interrupt
248 * @bus_token: domain-specific data 248 * @bus_token: domain-specific data
249 */ 249 */
250struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode, 250struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
251 enum irq_domain_bus_token bus_token) 251 enum irq_domain_bus_token bus_token)
252{ 252{
253 struct irq_domain *h, *found = NULL; 253 struct irq_domain *h, *found = NULL;
254 struct fwnode_handle *fwnode = fwspec->fwnode;
254 int rc; 255 int rc;
255 256
256 /* We might want to match the legacy controller last since 257 /* We might want to match the legacy controller last since
@@ -264,7 +265,9 @@ struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
264 */ 265 */
265 mutex_lock(&irq_domain_mutex); 266 mutex_lock(&irq_domain_mutex);
266 list_for_each_entry(h, &irq_domain_list, link) { 267 list_for_each_entry(h, &irq_domain_list, link) {
267 if (h->ops->match) 268 if (h->ops->select && fwspec->param_count)
269 rc = h->ops->select(h, fwspec, bus_token);
270 else if (h->ops->match)
268 rc = h->ops->match(h, to_of_node(fwnode), bus_token); 271 rc = h->ops->match(h, to_of_node(fwnode), bus_token);
269 else 272 else
270 rc = ((fwnode != NULL) && (h->fwnode == fwnode) && 273 rc = ((fwnode != NULL) && (h->fwnode == fwnode) &&
@@ -279,7 +282,7 @@ struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
279 mutex_unlock(&irq_domain_mutex); 282 mutex_unlock(&irq_domain_mutex);
280 return found; 283 return found;
281} 284}
282EXPORT_SYMBOL_GPL(irq_find_matching_fwnode); 285EXPORT_SYMBOL_GPL(irq_find_matching_fwspec);
283 286
284/** 287/**
285 * irq_set_default_host() - Set a "default" irq domain 288 * irq_set_default_host() - Set a "default" irq domain
@@ -574,11 +577,9 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
574 int virq; 577 int virq;
575 578
576 if (fwspec->fwnode) { 579 if (fwspec->fwnode) {
577 domain = irq_find_matching_fwnode(fwspec->fwnode, 580 domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_WIRED);
578 DOMAIN_BUS_WIRED);
579 if (!domain) 581 if (!domain)
580 domain = irq_find_matching_fwnode(fwspec->fwnode, 582 domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_ANY);
581 DOMAIN_BUS_ANY);
582 } else { 583 } else {
583 domain = irq_default_domain; 584 domain = irq_default_domain;
584 } 585 }
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index cc1cc641d653..ef0bc02c3a70 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1407,7 +1407,7 @@ int setup_irq(unsigned int irq, struct irqaction *act)
1407 int retval; 1407 int retval;
1408 struct irq_desc *desc = irq_to_desc(irq); 1408 struct irq_desc *desc = irq_to_desc(irq);
1409 1409
1410 if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) 1410 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1411 return -EINVAL; 1411 return -EINVAL;
1412 chip_bus_lock(desc); 1412 chip_bus_lock(desc);
1413 retval = __setup_irq(irq, desc, act); 1413 retval = __setup_irq(irq, desc, act);