aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-09-04 16:08:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-04 16:08:27 -0400
commit93cc1228b4a60584f3dadbd84f19f365bb945acb (patch)
tree9b50949e72f6df7116727e898a137f0881cb1b43
parentdd90cccffc20a15d8e4c3ac8813f4b6a6cd4766f (diff)
parent9fbd7fd28d1a1053325967670915c12b4b246a61 (diff)
Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq updates from Thomas Gleixner: "The interrupt subsystem delivers this time: - Refactoring of the GIC-V3 driver to prepare for the GIC-V4 support - Initial GIC-V4 support - Consolidation of the FSL MSI support - Utilize the effective affinity interface in various ARM irqchip drivers - Yet another interrupt chip driver (UniPhier AIDET) - Bulk conversion of the irq chip driver to use %pOF - The usual small fixes and improvements all over the place" * 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (77 commits) irqchip/ls-scfg-msi: Add MSI affinity support irqchip/ls-scfg-msi: Add LS1043a v1.1 MSI support irqchip/ls-scfg-msi: Add LS1046a MSI support arm64: dts: ls1046a: Add MSI dts node arm64: dts: ls1043a: Share all MSIs arm: dts: ls1021a: Share all MSIs arm64: dts: ls1043a: Fix typo of MSI compatible string arm: dts: ls1021a: Fix typo of MSI compatible string irqchip/ls-scfg-msi: Fix typo of MSI compatible strings irqchip/irq-bcm7120-l2: Use correct I/O accessors for irq_fwd_mask irqchip/mmp: Make mmp_intc_conf const irqchip/gic: Make irq_chip const irqchip/gic-v3: Advertise GICv4 support to KVM irqchip/gic-v4: Enable low-level GICv4 operations irqchip/gic-v4: Add some basic documentation irqchip/gic-v4: Add VLPI configuration interface irqchip/gic-v4: Add VPE command interface irqchip/gic-v4: Add per-VM VPE domain creation irqchip/gic-v3-its: Set implementation defined bit to enable VLPIs irqchip/gic-v3-its: Allow doorbell interrupts to be injected/cleared ...
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt8
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/socionext,uniphier-aidet.txt32
-rw-r--r--Documentation/driver-model/devres.txt1
-rw-r--r--MAINTAINERS1
-rw-r--r--arch/arm/boot/dts/ls1021a.dtsi8
-rw-r--r--arch/arm/include/asm/arch_gicv3.h34
-rw-r--r--arch/arm/mach-hisi/Kconfig1
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi12
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi31
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h7
-rw-r--r--arch/metag/Kconfig1
-rw-r--r--drivers/irqchip/Kconfig15
-rw-r--r--drivers/irqchip/Makefile3
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c5
-rw-r--r--drivers/irqchip/irq-bcm2835.c9
-rw-r--r--drivers/irqchip/irq-bcm2836.c5
-rw-r--r--drivers/irqchip/irq-bcm6345-l1.c3
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c3
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c10
-rw-r--r--drivers/irqchip/irq-crossbar.c6
-rw-r--r--drivers/irqchip/irq-digicolor.c8
-rw-r--r--drivers/irqchip/irq-dw-apb-ictl.c12
-rw-r--r--drivers/irqchip/irq-gic-v3-its-pci-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c1496
-rw-r--r--drivers/irqchip/irq-gic-v3.c109
-rw-r--r--drivers/irqchip/irq-gic-v4.c225
-rw-r--r--drivers/irqchip/irq-gic.c5
-rw-r--r--drivers/irqchip/irq-hip04.c3
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c4
-rw-r--r--drivers/irqchip/irq-lpc32xx.c2
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c256
-rw-r--r--drivers/irqchip/irq-metag-ext.c4
-rw-r--r--drivers/irqchip/irq-mips-gic.c10
-rw-r--r--drivers/irqchip/irq-mmp.c4
-rw-r--r--drivers/irqchip/irq-mtk-sysirq.c3
-rw-r--r--drivers/irqchip/irq-mxs.c4
-rw-r--r--drivers/irqchip/irq-stm32-exti.c8
-rw-r--r--drivers/irqchip/irq-sun4i.c6
-rw-r--r--drivers/irqchip/irq-tegra.c16
-rw-r--r--drivers/irqchip/irq-uniphier-aidet.c261
-rw-r--r--drivers/irqchip/irq-xilinx-intc.c4
-rw-r--r--drivers/irqchip/irq-xtensa-mx.c6
-rw-r--r--include/linux/irq.h7
-rw-r--r--include/linux/irq_sim.h44
-rw-r--r--include/linux/irqchip/arm-gic-common.h2
-rw-r--r--include/linux/irqchip/arm-gic-v3.h84
-rw-r--r--include/linux/irqchip/arm-gic-v4.h105
-rw-r--r--include/linux/irqdomain.h3
-rw-r--r--kernel/irq/Kconfig9
-rw-r--r--kernel/irq/Makefile1
-rw-r--r--kernel/irq/chip.c109
-rw-r--r--kernel/irq/debugfs.c50
-rw-r--r--kernel/irq/internals.h2
-rw-r--r--kernel/irq/irq_sim.c164
-rw-r--r--kernel/irq/irqdomain.c230
-rw-r--r--kernel/irq/manage.c14
-rw-r--r--kernel/irq/proc.c8
57 files changed, 3182 insertions, 293 deletions
diff --git a/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt b/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt
index 9e389493203f..49ccabbfa6f3 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt
@@ -4,8 +4,10 @@ Required properties:
4 4
5- compatible: should be "fsl,<soc-name>-msi" to identify 5- compatible: should be "fsl,<soc-name>-msi" to identify
6 Layerscape PCIe MSI controller block such as: 6 Layerscape PCIe MSI controller block such as:
7 "fsl,1s1021a-msi" 7 "fsl,ls1021a-msi"
8 "fsl,1s1043a-msi" 8 "fsl,ls1043a-msi"
9 "fsl,ls1046a-msi"
10 "fsl,ls1043a-v1.1-msi"
9- msi-controller: indicates that this is a PCIe MSI controller node 11- msi-controller: indicates that this is a PCIe MSI controller node
10- reg: physical base address of the controller and length of memory mapped. 12- reg: physical base address of the controller and length of memory mapped.
11- interrupts: an interrupt to the parent interrupt controller. 13- interrupts: an interrupt to the parent interrupt controller.
@@ -23,7 +25,7 @@ MSI controller node
23Examples: 25Examples:
24 26
25 msi1: msi-controller@1571000 { 27 msi1: msi-controller@1571000 {
26 compatible = "fsl,1s1043a-msi"; 28 compatible = "fsl,ls1043a-msi";
27 reg = <0x0 0x1571000 0x0 0x8>, 29 reg = <0x0 0x1571000 0x0 0x8>,
28 msi-controller; 30 msi-controller;
29 interrupts = <0 116 0x4>; 31 interrupts = <0 116 0x4>;
diff --git a/Documentation/devicetree/bindings/interrupt-controller/socionext,uniphier-aidet.txt b/Documentation/devicetree/bindings/interrupt-controller/socionext,uniphier-aidet.txt
new file mode 100644
index 000000000000..48e71d3ac2ad
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/socionext,uniphier-aidet.txt
@@ -0,0 +1,32 @@
1UniPhier AIDET
2
3UniPhier AIDET (ARM Interrupt Detector) is an add-on block for ARM GIC (Generic
4Interrupt Controller). GIC itself can handle only high level and rising edge
5interrupts. The AIDET provides logic inverter to support low level and falling
6edge interrupts.
7
8Required properties:
9- compatible: Should be one of the following:
10 "socionext,uniphier-ld4-aidet" - for LD4 SoC
11 "socionext,uniphier-pro4-aidet" - for Pro4 SoC
12 "socionext,uniphier-sld8-aidet" - for sLD8 SoC
13 "socionext,uniphier-pro5-aidet" - for Pro5 SoC
14 "socionext,uniphier-pxs2-aidet" - for PXs2/LD6b SoC
15 "socionext,uniphier-ld11-aidet" - for LD11 SoC
16 "socionext,uniphier-ld20-aidet" - for LD20 SoC
17 "socionext,uniphier-pxs3-aidet" - for PXs3 SoC
18- reg: Specifies offset and length of the register set for the device.
19- interrupt-controller: Identifies the node as an interrupt controller
20- #interrupt-cells : Specifies the number of cells needed to encode an interrupt
21 source. The value should be 2. The first cell defines the interrupt number
22 (corresponds to the SPI interrupt number of GIC). The second cell specifies
23 the trigger type as defined in interrupts.txt in this directory.
24
25Example:
26
27 aidet: aidet@5fc20000 {
28 compatible = "socionext,uniphier-pro4-aidet";
29 reg = <0x5fc20000 0x200>;
30 interrupt-controller;
31 #interrupt-cells = <2>;
32 };
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index 30e04f7a690d..69f08c0f23a8 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -312,6 +312,7 @@ IRQ
312 devm_irq_alloc_descs_from() 312 devm_irq_alloc_descs_from()
313 devm_irq_alloc_generic_chip() 313 devm_irq_alloc_generic_chip()
314 devm_irq_setup_generic_chip() 314 devm_irq_setup_generic_chip()
315 devm_irq_sim_init()
315 316
316LED 317LED
317 devm_led_classdev_register() 318 devm_led_classdev_register()
diff --git a/MAINTAINERS b/MAINTAINERS
index 75994ad6333f..b81e93b71c4b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1993,6 +1993,7 @@ F: arch/arm64/boot/dts/socionext/
1993F: drivers/bus/uniphier-system-bus.c 1993F: drivers/bus/uniphier-system-bus.c
1994F: drivers/clk/uniphier/ 1994F: drivers/clk/uniphier/
1995F: drivers/i2c/busses/i2c-uniphier* 1995F: drivers/i2c/busses/i2c-uniphier*
1996F: drivers/irqchip/irq-uniphier-aidet.c
1996F: drivers/pinctrl/uniphier/ 1997F: drivers/pinctrl/uniphier/
1997F: drivers/reset/reset-uniphier.c 1998F: drivers/reset/reset-uniphier.c
1998F: drivers/tty/serial/8250/8250_uniphier.c 1999F: drivers/tty/serial/8250/8250_uniphier.c
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index 7bb9df2c1460..9319e1f0f1d8 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -129,14 +129,14 @@
129 }; 129 };
130 130
131 msi1: msi-controller@1570e00 { 131 msi1: msi-controller@1570e00 {
132 compatible = "fsl,1s1021a-msi"; 132 compatible = "fsl,ls1021a-msi";
133 reg = <0x0 0x1570e00 0x0 0x8>; 133 reg = <0x0 0x1570e00 0x0 0x8>;
134 msi-controller; 134 msi-controller;
135 interrupts = <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>; 135 interrupts = <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>;
136 }; 136 };
137 137
138 msi2: msi-controller@1570e08 { 138 msi2: msi-controller@1570e08 {
139 compatible = "fsl,1s1021a-msi"; 139 compatible = "fsl,ls1021a-msi";
140 reg = <0x0 0x1570e08 0x0 0x8>; 140 reg = <0x0 0x1570e08 0x0 0x8>;
141 msi-controller; 141 msi-controller;
142 interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>; 142 interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
@@ -699,7 +699,7 @@
699 bus-range = <0x0 0xff>; 699 bus-range = <0x0 0xff>;
700 ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ 700 ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
701 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ 701 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
702 msi-parent = <&msi1>; 702 msi-parent = <&msi1>, <&msi2>;
703 #interrupt-cells = <1>; 703 #interrupt-cells = <1>;
704 interrupt-map-mask = <0 0 0 7>; 704 interrupt-map-mask = <0 0 0 7>;
705 interrupt-map = <0000 0 0 1 &gic GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>, 705 interrupt-map = <0000 0 0 1 &gic GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>,
@@ -722,7 +722,7 @@
722 bus-range = <0x0 0xff>; 722 bus-range = <0x0 0xff>;
723 ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */ 723 ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */
724 0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ 724 0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
725 msi-parent = <&msi2>; 725 msi-parent = <&msi1>, <&msi2>;
726 #interrupt-cells = <1>; 726 #interrupt-cells = <1>;
727 interrupt-map-mask = <0 0 0 7>; 727 interrupt-map-mask = <0 0 0 7>;
728 interrupt-map = <0000 0 0 1 &gic GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>, 728 interrupt-map = <0000 0 0 1 &gic GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index 27475904e096..eee269321923 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -276,6 +276,12 @@ static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr)
276#define gicr_write_pendbaser(v, c) __gic_writeq_nonatomic(v, c) 276#define gicr_write_pendbaser(v, c) __gic_writeq_nonatomic(v, c)
277 277
278/* 278/*
279 * GICR_xLPIR - only the lower bits are significant
280 */
281#define gic_read_lpir(c) readl_relaxed(c)
282#define gic_write_lpir(v, c) writel_relaxed(lower_32_bits(v), c)
283
284/*
279 * GITS_TYPER is an ID register and doesn't need atomicity. 285 * GITS_TYPER is an ID register and doesn't need atomicity.
280 */ 286 */
281#define gits_read_typer(c) __gic_readq_nonatomic(c) 287#define gits_read_typer(c) __gic_readq_nonatomic(c)
@@ -291,5 +297,33 @@ static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr)
291 */ 297 */
292#define gits_write_cwriter(v, c) __gic_writeq_nonatomic(v, c) 298#define gits_write_cwriter(v, c) __gic_writeq_nonatomic(v, c)
293 299
300/*
301 * GITS_VPROPBASER - hi and lo bits may be accessed independently.
302 */
303#define gits_write_vpropbaser(v, c) __gic_writeq_nonatomic(v, c)
304
305/*
306 * GITS_VPENDBASER - the Valid bit must be cleared before changing
307 * anything else.
308 */
309static inline void gits_write_vpendbaser(u64 val, void * __iomem addr)
310{
311 u32 tmp;
312
313 tmp = readl_relaxed(addr + 4);
314 if (tmp & (GICR_VPENDBASER_Valid >> 32)) {
315 tmp &= ~(GICR_VPENDBASER_Valid >> 32);
316 writel_relaxed(tmp, addr + 4);
317 }
318
319 /*
320 * Use the fact that __gic_writeq_nonatomic writes the second
321 * half of the 64bit quantity after the first.
322 */
323 __gic_writeq_nonatomic(val, addr);
324}
325
326#define gits_read_vpendbaser(c) __gic_readq_nonatomic(c)
327
294#endif /* !__ASSEMBLY__ */ 328#endif /* !__ASSEMBLY__ */
295#endif /* !__ASM_ARCH_GICV3_H */ 329#endif /* !__ASM_ARCH_GICV3_H */
diff --git a/arch/arm/mach-hisi/Kconfig b/arch/arm/mach-hisi/Kconfig
index a3b091a4d344..65a048fa08ec 100644
--- a/arch/arm/mach-hisi/Kconfig
+++ b/arch/arm/mach-hisi/Kconfig
@@ -39,6 +39,7 @@ config ARCH_HIP04
39 select HAVE_ARM_ARCH_TIMER 39 select HAVE_ARM_ARCH_TIMER
40 select MCPM if SMP 40 select MCPM if SMP
41 select MCPM_QUAD_CLUSTER if SMP 41 select MCPM_QUAD_CLUSTER if SMP
42 select GENERIC_IRQ_EFFECTIVE_AFF_MASK
42 help 43 help
43 Support for Hisilicon HiP04 SoC family 44 Support for Hisilicon HiP04 SoC family
44 45
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index 31fd77f82ced..d16b9cc1e825 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -653,21 +653,21 @@
653 }; 653 };
654 654
655 msi1: msi-controller1@1571000 { 655 msi1: msi-controller1@1571000 {
656 compatible = "fsl,1s1043a-msi"; 656 compatible = "fsl,ls1043a-msi";
657 reg = <0x0 0x1571000 0x0 0x8>; 657 reg = <0x0 0x1571000 0x0 0x8>;
658 msi-controller; 658 msi-controller;
659 interrupts = <0 116 0x4>; 659 interrupts = <0 116 0x4>;
660 }; 660 };
661 661
662 msi2: msi-controller2@1572000 { 662 msi2: msi-controller2@1572000 {
663 compatible = "fsl,1s1043a-msi"; 663 compatible = "fsl,ls1043a-msi";
664 reg = <0x0 0x1572000 0x0 0x8>; 664 reg = <0x0 0x1572000 0x0 0x8>;
665 msi-controller; 665 msi-controller;
666 interrupts = <0 126 0x4>; 666 interrupts = <0 126 0x4>;
667 }; 667 };
668 668
669 msi3: msi-controller3@1573000 { 669 msi3: msi-controller3@1573000 {
670 compatible = "fsl,1s1043a-msi"; 670 compatible = "fsl,ls1043a-msi";
671 reg = <0x0 0x1573000 0x0 0x8>; 671 reg = <0x0 0x1573000 0x0 0x8>;
672 msi-controller; 672 msi-controller;
673 interrupts = <0 160 0x4>; 673 interrupts = <0 160 0x4>;
@@ -689,7 +689,7 @@
689 bus-range = <0x0 0xff>; 689 bus-range = <0x0 0xff>;
690 ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ 690 ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
691 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ 691 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
692 msi-parent = <&msi1>; 692 msi-parent = <&msi1>, <&msi2>, <&msi3>;
693 #interrupt-cells = <1>; 693 #interrupt-cells = <1>;
694 interrupt-map-mask = <0 0 0 7>; 694 interrupt-map-mask = <0 0 0 7>;
695 interrupt-map = <0000 0 0 1 &gic 0 110 0x4>, 695 interrupt-map = <0000 0 0 1 &gic 0 110 0x4>,
@@ -714,7 +714,7 @@
714 bus-range = <0x0 0xff>; 714 bus-range = <0x0 0xff>;
715 ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */ 715 ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */
716 0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ 716 0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
717 msi-parent = <&msi2>; 717 msi-parent = <&msi1>, <&msi2>, <&msi3>;
718 #interrupt-cells = <1>; 718 #interrupt-cells = <1>;
719 interrupt-map-mask = <0 0 0 7>; 719 interrupt-map-mask = <0 0 0 7>;
720 interrupt-map = <0000 0 0 1 &gic 0 120 0x4>, 720 interrupt-map = <0000 0 0 1 &gic 0 120 0x4>,
@@ -739,7 +739,7 @@
739 bus-range = <0x0 0xff>; 739 bus-range = <0x0 0xff>;
740 ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */ 740 ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */
741 0x82000000 0x0 0x40000000 0x50 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ 741 0x82000000 0x0 0x40000000 0x50 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
742 msi-parent = <&msi3>; 742 msi-parent = <&msi1>, <&msi2>, <&msi3>;
743 #interrupt-cells = <1>; 743 #interrupt-cells = <1>;
744 interrupt-map-mask = <0 0 0 7>; 744 interrupt-map-mask = <0 0 0 7>;
745 interrupt-map = <0000 0 0 1 &gic 0 154 0x4>, 745 interrupt-map = <0000 0 0 1 &gic 0 154 0x4>,
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
index dc1640be0345..c8ff0baddf1d 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
@@ -630,6 +630,37 @@
630 interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>; 630 interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
631 clocks = <&clockgen 4 1>; 631 clocks = <&clockgen 4 1>;
632 }; 632 };
633
634 msi1: msi-controller@1580000 {
635 compatible = "fsl,ls1046a-msi";
636 msi-controller;
637 reg = <0x0 0x1580000 0x0 0x10000>;
638 interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
639 <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
640 <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
641 <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
642 };
643
644 msi2: msi-controller@1590000 {
645 compatible = "fsl,ls1046a-msi";
646 msi-controller;
647 reg = <0x0 0x1590000 0x0 0x10000>;
648 interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
649 <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
650 <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
651 <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
652 };
653
654 msi3: msi-controller@15a0000 {
655 compatible = "fsl,ls1046a-msi";
656 msi-controller;
657 reg = <0x0 0x15a0000 0x0 0x10000>;
658 interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
659 <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>,
660 <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>,
661 <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
662 };
663
633 }; 664 };
634 665
635 reserved-memory { 666 reserved-memory {
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 8cef47fa2218..b7e3f74822da 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -116,6 +116,8 @@ static inline void gic_write_bpr1(u32 val)
116 116
117#define gic_read_typer(c) readq_relaxed(c) 117#define gic_read_typer(c) readq_relaxed(c)
118#define gic_write_irouter(v, c) writeq_relaxed(v, c) 118#define gic_write_irouter(v, c) writeq_relaxed(v, c)
119#define gic_read_lpir(c) readq_relaxed(c)
120#define gic_write_lpir(v, c) writeq_relaxed(v, c)
119 121
120#define gic_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) 122#define gic_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
121 123
@@ -133,5 +135,10 @@ static inline void gic_write_bpr1(u32 val)
133#define gicr_write_pendbaser(v, c) writeq_relaxed(v, c) 135#define gicr_write_pendbaser(v, c) writeq_relaxed(v, c)
134#define gicr_read_pendbaser(c) readq_relaxed(c) 136#define gicr_read_pendbaser(c) readq_relaxed(c)
135 137
138#define gits_write_vpropbaser(v, c) writeq_relaxed(v, c)
139
140#define gits_write_vpendbaser(v, c) writeq_relaxed(v, c)
141#define gits_read_vpendbaser(c) readq_relaxed(c)
142
136#endif /* __ASSEMBLY__ */ 143#endif /* __ASSEMBLY__ */
137#endif /* __ASM_ARCH_GICV3_H */ 144#endif /* __ASM_ARCH_GICV3_H */
diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig
index 5b7a45d99cfb..7d8b322e5101 100644
--- a/arch/metag/Kconfig
+++ b/arch/metag/Kconfig
@@ -26,6 +26,7 @@ config METAG
26 select HAVE_SYSCALL_TRACEPOINTS 26 select HAVE_SYSCALL_TRACEPOINTS
27 select HAVE_UNDERSCORE_SYMBOL_PREFIX 27 select HAVE_UNDERSCORE_SYMBOL_PREFIX
28 select IRQ_DOMAIN 28 select IRQ_DOMAIN
29 select GENERIC_IRQ_EFFECTIVE_AFF_MASK
29 select MODULES_USE_ELF_RELA 30 select MODULES_USE_ELF_RELA
30 select OF 31 select OF
31 select OF_EARLY_FLATTREE 32 select OF_EARLY_FLATTREE
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index f1fd5f44d1d4..9d8a1dd2e2c2 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -7,6 +7,7 @@ config ARM_GIC
7 select IRQ_DOMAIN 7 select IRQ_DOMAIN
8 select IRQ_DOMAIN_HIERARCHY 8 select IRQ_DOMAIN_HIERARCHY
9 select MULTI_IRQ_HANDLER 9 select MULTI_IRQ_HANDLER
10 select GENERIC_IRQ_EFFECTIVE_AFF_MASK
10 11
11config ARM_GIC_PM 12config ARM_GIC_PM
12 bool 13 bool
@@ -34,6 +35,7 @@ config ARM_GIC_V3
34 select MULTI_IRQ_HANDLER 35 select MULTI_IRQ_HANDLER
35 select IRQ_DOMAIN_HIERARCHY 36 select IRQ_DOMAIN_HIERARCHY
36 select PARTITION_PERCPU 37 select PARTITION_PERCPU
38 select GENERIC_IRQ_EFFECTIVE_AFF_MASK
37 39
38config ARM_GIC_V3_ITS 40config ARM_GIC_V3_ITS
39 bool 41 bool
@@ -64,6 +66,7 @@ config ARMADA_370_XP_IRQ
64 bool 66 bool
65 select GENERIC_IRQ_CHIP 67 select GENERIC_IRQ_CHIP
66 select PCI_MSI if PCI 68 select PCI_MSI if PCI
69 select GENERIC_IRQ_EFFECTIVE_AFF_MASK
67 70
68config ALPINE_MSI 71config ALPINE_MSI
69 bool 72 bool
@@ -93,11 +96,13 @@ config BCM6345_L1_IRQ
93 bool 96 bool
94 select GENERIC_IRQ_CHIP 97 select GENERIC_IRQ_CHIP
95 select IRQ_DOMAIN 98 select IRQ_DOMAIN
99 select GENERIC_IRQ_EFFECTIVE_AFF_MASK
96 100
97config BCM7038_L1_IRQ 101config BCM7038_L1_IRQ
98 bool 102 bool
99 select GENERIC_IRQ_CHIP 103 select GENERIC_IRQ_CHIP
100 select IRQ_DOMAIN 104 select IRQ_DOMAIN
105 select GENERIC_IRQ_EFFECTIVE_AFF_MASK
101 106
102config BCM7120_L2_IRQ 107config BCM7120_L2_IRQ
103 bool 108 bool
@@ -136,6 +141,7 @@ config IRQ_MIPS_CPU
136 select GENERIC_IRQ_IPI if SYS_SUPPORTS_MULTITHREADING 141 select GENERIC_IRQ_IPI if SYS_SUPPORTS_MULTITHREADING
137 select IRQ_DOMAIN 142 select IRQ_DOMAIN
138 select IRQ_DOMAIN_HIERARCHY if GENERIC_IRQ_IPI 143 select IRQ_DOMAIN_HIERARCHY if GENERIC_IRQ_IPI
144 select GENERIC_IRQ_EFFECTIVE_AFF_MASK
139 145
140config CLPS711X_IRQCHIP 146config CLPS711X_IRQCHIP
141 bool 147 bool
@@ -217,6 +223,7 @@ config VERSATILE_FPGA_IRQ_NR
217config XTENSA_MX 223config XTENSA_MX
218 bool 224 bool
219 select IRQ_DOMAIN 225 select IRQ_DOMAIN
226 select GENERIC_IRQ_EFFECTIVE_AFF_MASK
220 227
221config XILINX_INTC 228config XILINX_INTC
222 bool 229 bool
@@ -306,3 +313,11 @@ config QCOM_IRQ_COMBINER
306 help 313 help
307 Say yes here to add support for the IRQ combiner devices embedded 314 Say yes here to add support for the IRQ combiner devices embedded
308 in Qualcomm Technologies chips. 315 in Qualcomm Technologies chips.
316
317config IRQ_UNIPHIER_AIDET
318 bool "UniPhier AIDET support" if COMPILE_TEST
319 depends on ARCH_UNIPHIER || COMPILE_TEST
320 default ARCH_UNIPHIER
321 select IRQ_DOMAIN_HIERARCHY
322 help
323 Support for the UniPhier AIDET (ARM Interrupt Detector).
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index e88d856cc09c..845abc107ad5 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -28,7 +28,7 @@ obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o
28obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-realview.o 28obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-realview.o
29obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o 29obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
30obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o 30obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
31obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o 31obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o
32obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o 32obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o
33obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o 33obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o
34obj-$(CONFIG_ARM_NVIC) += irq-nvic.o 34obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
@@ -78,3 +78,4 @@ obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o
78obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o 78obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o
79obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o 79obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o
80obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o 80obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o
81obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index b207b2c3aa55..c9bdc5221b82 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -203,7 +203,7 @@ static struct irq_chip armada_370_xp_msi_irq_chip = {
203 203
204static struct msi_domain_info armada_370_xp_msi_domain_info = { 204static struct msi_domain_info armada_370_xp_msi_domain_info = {
205 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | 205 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
206 MSI_FLAG_MULTI_PCI_MSI), 206 MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
207 .chip = &armada_370_xp_msi_irq_chip, 207 .chip = &armada_370_xp_msi_irq_chip,
208}; 208};
209 209
@@ -330,6 +330,8 @@ static int armada_xp_set_affinity(struct irq_data *d,
330 writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); 330 writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
331 raw_spin_unlock(&irq_controller_lock); 331 raw_spin_unlock(&irq_controller_lock);
332 332
333 irq_data_update_effective_affinity(d, cpumask_of(cpu));
334
333 return IRQ_SET_MASK_OK; 335 return IRQ_SET_MASK_OK;
334} 336}
335#endif 337#endif
@@ -363,6 +365,7 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
363 } else { 365 } else {
364 irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, 366 irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
365 handle_level_irq); 367 handle_level_irq);
368 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
366 } 369 }
367 irq_set_probe(virq); 370 irq_set_probe(virq);
368 371
diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c
index 44d7c38dde47..d2da8a1e6b1b 100644
--- a/drivers/irqchip/irq-bcm2835.c
+++ b/drivers/irqchip/irq-bcm2835.c
@@ -147,13 +147,12 @@ static int __init armctrl_of_init(struct device_node *node,
147 147
148 base = of_iomap(node, 0); 148 base = of_iomap(node, 0);
149 if (!base) 149 if (!base)
150 panic("%s: unable to map IC registers\n", 150 panic("%pOF: unable to map IC registers\n", node);
151 node->full_name);
152 151
153 intc.domain = irq_domain_add_linear(node, MAKE_HWIRQ(NR_BANKS, 0), 152 intc.domain = irq_domain_add_linear(node, MAKE_HWIRQ(NR_BANKS, 0),
154 &armctrl_ops, NULL); 153 &armctrl_ops, NULL);
155 if (!intc.domain) 154 if (!intc.domain)
156 panic("%s: unable to create IRQ domain\n", node->full_name); 155 panic("%pOF: unable to create IRQ domain\n", node);
157 156
158 for (b = 0; b < NR_BANKS; b++) { 157 for (b = 0; b < NR_BANKS; b++) {
159 intc.pending[b] = base + reg_pending[b]; 158 intc.pending[b] = base + reg_pending[b];
@@ -173,8 +172,8 @@ static int __init armctrl_of_init(struct device_node *node,
173 int parent_irq = irq_of_parse_and_map(node, 0); 172 int parent_irq = irq_of_parse_and_map(node, 0);
174 173
175 if (!parent_irq) { 174 if (!parent_irq) {
176 panic("%s: unable to get parent interrupt.\n", 175 panic("%pOF: unable to get parent interrupt.\n",
177 node->full_name); 176 node);
178 } 177 }
179 irq_set_chained_handler(parent_irq, bcm2836_chained_handle_irq); 178 irq_set_chained_handler(parent_irq, bcm2836_chained_handle_irq);
180 } else { 179 } else {
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index e7463e3c0814..dc8c1e3eafe7 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -282,8 +282,7 @@ static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node,
282{ 282{
283 intc.base = of_iomap(node, 0); 283 intc.base = of_iomap(node, 0);
284 if (!intc.base) { 284 if (!intc.base) {
285 panic("%s: unable to map local interrupt registers\n", 285 panic("%pOF: unable to map local interrupt registers\n", node);
286 node->full_name);
287 } 286 }
288 287
289 bcm2835_init_local_timer_frequency(); 288 bcm2835_init_local_timer_frequency();
@@ -292,7 +291,7 @@ static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node,
292 &bcm2836_arm_irqchip_intc_ops, 291 &bcm2836_arm_irqchip_intc_ops,
293 NULL); 292 NULL);
294 if (!intc.domain) 293 if (!intc.domain)
295 panic("%s: unable to create IRQ domain\n", node->full_name); 294 panic("%pOF: unable to create IRQ domain\n", node);
296 295
297 bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTPSIRQ, 296 bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTPSIRQ,
298 &bcm2836_arm_irqchip_timer); 297 &bcm2836_arm_irqchip_timer);
diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c
index daa4ae89e466..43f8abe40878 100644
--- a/drivers/irqchip/irq-bcm6345-l1.c
+++ b/drivers/irqchip/irq-bcm6345-l1.c
@@ -231,6 +231,8 @@ static int bcm6345_l1_set_affinity(struct irq_data *d,
231 } 231 }
232 raw_spin_unlock_irqrestore(&intc->lock, flags); 232 raw_spin_unlock_irqrestore(&intc->lock, flags);
233 233
234 irq_data_update_effective_affinity(d, cpumask_of(new_cpu));
235
234 return IRQ_SET_MASK_OK_NOCOPY; 236 return IRQ_SET_MASK_OK_NOCOPY;
235} 237}
236 238
@@ -291,6 +293,7 @@ static int bcm6345_l1_map(struct irq_domain *d, unsigned int virq,
291 irq_set_chip_and_handler(virq, 293 irq_set_chip_and_handler(virq,
292 &bcm6345_l1_irq_chip, handle_percpu_irq); 294 &bcm6345_l1_irq_chip, handle_percpu_irq);
293 irq_set_chip_data(virq, d->host_data); 295 irq_set_chip_data(virq, d->host_data);
296 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
294 return 0; 297 return 0;
295} 298}
296 299
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index c2662a1bfdd3..55cfb986225b 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -212,6 +212,8 @@ static int bcm7038_l1_set_affinity(struct irq_data *d,
212 __bcm7038_l1_unmask(d, first_cpu); 212 __bcm7038_l1_unmask(d, first_cpu);
213 213
214 raw_spin_unlock_irqrestore(&intc->lock, flags); 214 raw_spin_unlock_irqrestore(&intc->lock, flags);
215 irq_data_update_effective_affinity(d, cpumask_of(first_cpu));
216
215 return 0; 217 return 0;
216} 218}
217 219
@@ -299,6 +301,7 @@ static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
299{ 301{
300 irq_set_chip_and_handler(virq, &bcm7038_l1_irq_chip, handle_level_irq); 302 irq_set_chip_and_handler(virq, &bcm7038_l1_irq_chip, handle_level_irq);
301 irq_set_chip_data(virq, d->host_data); 303 irq_set_chip_data(virq, d->host_data);
304 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
302 return 0; 305 return 0;
303} 306}
304 307
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index 64c2692070ef..983640eba418 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -250,12 +250,6 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
250 if (ret < 0) 250 if (ret < 0)
251 goto out_free_l1_data; 251 goto out_free_l1_data;
252 252
253 for (idx = 0; idx < data->n_words; idx++) {
254 __raw_writel(data->irq_fwd_mask[idx],
255 data->pair_base[idx] +
256 data->en_offset[idx]);
257 }
258
259 for (irq = 0; irq < data->num_parent_irqs; irq++) { 253 for (irq = 0; irq < data->num_parent_irqs; irq++) {
260 ret = bcm7120_l2_intc_init_one(dn, data, irq, valid_mask); 254 ret = bcm7120_l2_intc_init_one(dn, data, irq, valid_mask);
261 if (ret) 255 if (ret)
@@ -297,6 +291,10 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
297 gc->reg_base = data->pair_base[idx]; 291 gc->reg_base = data->pair_base[idx];
298 ct->regs.mask = data->en_offset[idx]; 292 ct->regs.mask = data->en_offset[idx];
299 293
294 /* gc->reg_base is defined and so is gc->writel */
295 irq_reg_writel(gc, data->irq_fwd_mask[idx],
296 data->en_offset[idx]);
297
300 ct->chip.irq_mask = irq_gc_mask_clr_bit; 298 ct->chip.irq_mask = irq_gc_mask_clr_bit;
301 ct->chip.irq_unmask = irq_gc_mask_set_bit; 299 ct->chip.irq_unmask = irq_gc_mask_set_bit;
302 ct->chip.irq_ack = irq_gc_noop; 300 ct->chip.irq_ack = irq_gc_noop;
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index f96601268f71..99d97d7e3fd7 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -341,13 +341,13 @@ static int __init irqcrossbar_init(struct device_node *node,
341 int err; 341 int err;
342 342
343 if (!parent) { 343 if (!parent) {
344 pr_err("%s: no parent, giving up\n", node->full_name); 344 pr_err("%pOF: no parent, giving up\n", node);
345 return -ENODEV; 345 return -ENODEV;
346 } 346 }
347 347
348 parent_domain = irq_find_host(parent); 348 parent_domain = irq_find_host(parent);
349 if (!parent_domain) { 349 if (!parent_domain) {
350 pr_err("%s: unable to obtain parent domain\n", node->full_name); 350 pr_err("%pOF: unable to obtain parent domain\n", node);
351 return -ENXIO; 351 return -ENXIO;
352 } 352 }
353 353
@@ -360,7 +360,7 @@ static int __init irqcrossbar_init(struct device_node *node,
360 node, &crossbar_domain_ops, 360 node, &crossbar_domain_ops,
361 NULL); 361 NULL);
362 if (!domain) { 362 if (!domain) {
363 pr_err("%s: failed to allocated domain\n", node->full_name); 363 pr_err("%pOF: failed to allocated domain\n", node);
364 return -ENOMEM; 364 return -ENOMEM;
365 } 365 }
366 366
diff --git a/drivers/irqchip/irq-digicolor.c b/drivers/irqchip/irq-digicolor.c
index 3aae015469a5..fc38d2da11b9 100644
--- a/drivers/irqchip/irq-digicolor.c
+++ b/drivers/irqchip/irq-digicolor.c
@@ -78,7 +78,7 @@ static int __init digicolor_of_init(struct device_node *node,
78 78
79 reg_base = of_iomap(node, 0); 79 reg_base = of_iomap(node, 0);
80 if (!reg_base) { 80 if (!reg_base) {
81 pr_err("%s: unable to map IC registers\n", node->full_name); 81 pr_err("%pOF: unable to map IC registers\n", node);
82 return -ENXIO; 82 return -ENXIO;
83 } 83 }
84 84
@@ -88,7 +88,7 @@ static int __init digicolor_of_init(struct device_node *node,
88 88
89 ucregs = syscon_regmap_lookup_by_phandle(node, "syscon"); 89 ucregs = syscon_regmap_lookup_by_phandle(node, "syscon");
90 if (IS_ERR(ucregs)) { 90 if (IS_ERR(ucregs)) {
91 pr_err("%s: unable to map UC registers\n", node->full_name); 91 pr_err("%pOF: unable to map UC registers\n", node);
92 return PTR_ERR(ucregs); 92 return PTR_ERR(ucregs);
93 } 93 }
94 /* channel 1, regular IRQs */ 94 /* channel 1, regular IRQs */
@@ -97,7 +97,7 @@ static int __init digicolor_of_init(struct device_node *node,
97 digicolor_irq_domain = 97 digicolor_irq_domain =
98 irq_domain_add_linear(node, 64, &irq_generic_chip_ops, NULL); 98 irq_domain_add_linear(node, 64, &irq_generic_chip_ops, NULL);
99 if (!digicolor_irq_domain) { 99 if (!digicolor_irq_domain) {
100 pr_err("%s: unable to create IRQ domain\n", node->full_name); 100 pr_err("%pOF: unable to create IRQ domain\n", node);
101 return -ENOMEM; 101 return -ENOMEM;
102 } 102 }
103 103
@@ -105,7 +105,7 @@ static int __init digicolor_of_init(struct device_node *node,
105 "digicolor_irq", handle_level_irq, 105 "digicolor_irq", handle_level_irq,
106 clr, 0, 0); 106 clr, 0, 0);
107 if (ret) { 107 if (ret) {
108 pr_err("%s: unable to allocate IRQ gc\n", node->full_name); 108 pr_err("%pOF: unable to allocate IRQ gc\n", node);
109 return ret; 109 return ret;
110 } 110 }
111 111
diff --git a/drivers/irqchip/irq-dw-apb-ictl.c b/drivers/irqchip/irq-dw-apb-ictl.c
index 052f266364c0..0a19618ce2c8 100644
--- a/drivers/irqchip/irq-dw-apb-ictl.c
+++ b/drivers/irqchip/irq-dw-apb-ictl.c
@@ -79,24 +79,24 @@ static int __init dw_apb_ictl_init(struct device_node *np,
79 /* Map the parent interrupt for the chained handler */ 79 /* Map the parent interrupt for the chained handler */
80 irq = irq_of_parse_and_map(np, 0); 80 irq = irq_of_parse_and_map(np, 0);
81 if (irq <= 0) { 81 if (irq <= 0) {
82 pr_err("%s: unable to parse irq\n", np->full_name); 82 pr_err("%pOF: unable to parse irq\n", np);
83 return -EINVAL; 83 return -EINVAL;
84 } 84 }
85 85
86 ret = of_address_to_resource(np, 0, &r); 86 ret = of_address_to_resource(np, 0, &r);
87 if (ret) { 87 if (ret) {
88 pr_err("%s: unable to get resource\n", np->full_name); 88 pr_err("%pOF: unable to get resource\n", np);
89 return ret; 89 return ret;
90 } 90 }
91 91
92 if (!request_mem_region(r.start, resource_size(&r), np->full_name)) { 92 if (!request_mem_region(r.start, resource_size(&r), np->full_name)) {
93 pr_err("%s: unable to request mem region\n", np->full_name); 93 pr_err("%pOF: unable to request mem region\n", np);
94 return -ENOMEM; 94 return -ENOMEM;
95 } 95 }
96 96
97 iobase = ioremap(r.start, resource_size(&r)); 97 iobase = ioremap(r.start, resource_size(&r));
98 if (!iobase) { 98 if (!iobase) {
99 pr_err("%s: unable to map resource\n", np->full_name); 99 pr_err("%pOF: unable to map resource\n", np);
100 ret = -ENOMEM; 100 ret = -ENOMEM;
101 goto err_release; 101 goto err_release;
102 } 102 }
@@ -123,7 +123,7 @@ static int __init dw_apb_ictl_init(struct device_node *np,
123 domain = irq_domain_add_linear(np, nrirqs, 123 domain = irq_domain_add_linear(np, nrirqs,
124 &irq_generic_chip_ops, NULL); 124 &irq_generic_chip_ops, NULL);
125 if (!domain) { 125 if (!domain) {
126 pr_err("%s: unable to add irq domain\n", np->full_name); 126 pr_err("%pOF: unable to add irq domain\n", np);
127 ret = -ENOMEM; 127 ret = -ENOMEM;
128 goto err_unmap; 128 goto err_unmap;
129 } 129 }
@@ -132,7 +132,7 @@ static int __init dw_apb_ictl_init(struct device_node *np,
132 handle_level_irq, clr, 0, 132 handle_level_irq, clr, 0,
133 IRQ_GC_INIT_MASK_CACHE); 133 IRQ_GC_INIT_MASK_CACHE);
134 if (ret) { 134 if (ret) {
135 pr_err("%s: unable to alloc irq domain gc\n", np->full_name); 135 pr_err("%pOF: unable to alloc irq domain gc\n", np);
136 goto err_unmap; 136 goto err_unmap;
137 } 137 }
138 138
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
index 77931214d954..14a8c0a7e095 100644
--- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
@@ -138,7 +138,7 @@ static int __init its_pci_of_msi_init(void)
138 if (its_pci_msi_init_one(of_node_to_fwnode(np), np->full_name)) 138 if (its_pci_msi_init_one(of_node_to_fwnode(np), np->full_name))
139 continue; 139 continue;
140 140
141 pr_info("PCI/MSI: %s domain created\n", np->full_name); 141 pr_info("PCI/MSI: %pOF domain created\n", np);
142 } 142 }
143 143
144 return 0; 144 return 0;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 284738add89b..e8d89343d613 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved. 2 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com> 3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
@@ -36,6 +36,7 @@
36 36
37#include <linux/irqchip.h> 37#include <linux/irqchip.h>
38#include <linux/irqchip/arm-gic-v3.h> 38#include <linux/irqchip/arm-gic-v3.h>
39#include <linux/irqchip/arm-gic-v4.h>
39 40
40#include <asm/cputype.h> 41#include <asm/cputype.h>
41#include <asm/exception.h> 42#include <asm/exception.h>
@@ -48,6 +49,19 @@
48 49
49#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) 50#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
50 51
52static u32 lpi_id_bits;
53
54/*
55 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
56 * deal with (one configuration byte per interrupt). PENDBASE has to
57 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
58 */
59#define LPI_NRBITS lpi_id_bits
60#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
61#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
62
63#define LPI_PROP_DEFAULT_PRIO 0xa0
64
51/* 65/*
52 * Collection structure - just an ID, and a redistributor address to 66 * Collection structure - just an ID, and a redistributor address to
53 * ping. We use one per CPU as a bag of interrupts assigned to this 67 * ping. We use one per CPU as a bag of interrupts assigned to this
@@ -88,6 +102,7 @@ struct its_node {
88 u32 ite_size; 102 u32 ite_size;
89 u32 device_ids; 103 u32 device_ids;
90 int numa_node; 104 int numa_node;
105 bool is_v4;
91}; 106};
92 107
93#define ITS_ITT_ALIGN SZ_256 108#define ITS_ITT_ALIGN SZ_256
@@ -100,11 +115,17 @@ struct event_lpi_map {
100 u16 *col_map; 115 u16 *col_map;
101 irq_hw_number_t lpi_base; 116 irq_hw_number_t lpi_base;
102 int nr_lpis; 117 int nr_lpis;
118 struct mutex vlpi_lock;
119 struct its_vm *vm;
120 struct its_vlpi_map *vlpi_maps;
121 int nr_vlpis;
103}; 122};
104 123
105/* 124/*
106 * The ITS view of a device - belongs to an ITS, a collection, owns an 125 * The ITS view of a device - belongs to an ITS, owns an interrupt
107 * interrupt translation table, and a list of interrupts. 126 * translation table, and a list of interrupts. If it some of its
127 * LPIs are injected into a guest (GICv4), the event_map.vm field
128 * indicates which one.
108 */ 129 */
109struct its_device { 130struct its_device {
110 struct list_head entry; 131 struct list_head entry;
@@ -115,13 +136,33 @@ struct its_device {
115 u32 device_id; 136 u32 device_id;
116}; 137};
117 138
139static struct {
140 raw_spinlock_t lock;
141 struct its_device *dev;
142 struct its_vpe **vpes;
143 int next_victim;
144} vpe_proxy;
145
118static LIST_HEAD(its_nodes); 146static LIST_HEAD(its_nodes);
119static DEFINE_SPINLOCK(its_lock); 147static DEFINE_SPINLOCK(its_lock);
120static struct rdists *gic_rdists; 148static struct rdists *gic_rdists;
121static struct irq_domain *its_parent; 149static struct irq_domain *its_parent;
122 150
151/*
152 * We have a maximum number of 16 ITSs in the whole system if we're
153 * using the ITSList mechanism
154 */
155#define ITS_LIST_MAX 16
156
157static unsigned long its_list_map;
158static u16 vmovp_seq_num;
159static DEFINE_RAW_SPINLOCK(vmovp_lock);
160
161static DEFINE_IDA(its_vpeid_ida);
162
123#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) 163#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
124#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) 164#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
165#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
125 166
126static struct its_collection *dev_event_to_col(struct its_device *its_dev, 167static struct its_collection *dev_event_to_col(struct its_device *its_dev,
127 u32 event) 168 u32 event)
@@ -145,6 +186,11 @@ struct its_cmd_desc {
145 struct { 186 struct {
146 struct its_device *dev; 187 struct its_device *dev;
147 u32 event_id; 188 u32 event_id;
189 } its_clear_cmd;
190
191 struct {
192 struct its_device *dev;
193 u32 event_id;
148 } its_int_cmd; 194 } its_int_cmd;
149 195
150 struct { 196 struct {
@@ -177,6 +223,38 @@ struct its_cmd_desc {
177 struct { 223 struct {
178 struct its_collection *col; 224 struct its_collection *col;
179 } its_invall_cmd; 225 } its_invall_cmd;
226
227 struct {
228 struct its_vpe *vpe;
229 } its_vinvall_cmd;
230
231 struct {
232 struct its_vpe *vpe;
233 struct its_collection *col;
234 bool valid;
235 } its_vmapp_cmd;
236
237 struct {
238 struct its_vpe *vpe;
239 struct its_device *dev;
240 u32 virt_id;
241 u32 event_id;
242 bool db_enabled;
243 } its_vmapti_cmd;
244
245 struct {
246 struct its_vpe *vpe;
247 struct its_device *dev;
248 u32 event_id;
249 bool db_enabled;
250 } its_vmovi_cmd;
251
252 struct {
253 struct its_vpe *vpe;
254 struct its_collection *col;
255 u16 seq_num;
256 u16 its_list;
257 } its_vmovp_cmd;
180 }; 258 };
181}; 259};
182 260
@@ -193,6 +271,9 @@ struct its_cmd_block {
193typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *, 271typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *,
194 struct its_cmd_desc *); 272 struct its_cmd_desc *);
195 273
274typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_cmd_block *,
275 struct its_cmd_desc *);
276
196static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l) 277static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
197{ 278{
198 u64 mask = GENMASK_ULL(h, l); 279 u64 mask = GENMASK_ULL(h, l);
@@ -245,6 +326,46 @@ static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
245 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0); 326 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
246} 327}
247 328
329static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
330{
331 its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
332}
333
334static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
335{
336 its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
337}
338
339static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
340{
341 its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
342}
343
344static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
345{
346 its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
347}
348
349static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
350{
351 its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
352}
353
354static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
355{
356 its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
357}
358
359static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
360{
361 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 50, 16);
362}
363
364static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
365{
366 its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
367}
368
248static inline void its_fixup_cmd(struct its_cmd_block *cmd) 369static inline void its_fixup_cmd(struct its_cmd_block *cmd)
249{ 370{
250 /* Let's fixup BE commands */ 371 /* Let's fixup BE commands */
@@ -358,6 +479,40 @@ static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
358 return col; 479 return col;
359} 480}
360 481
482static struct its_collection *its_build_int_cmd(struct its_cmd_block *cmd,
483 struct its_cmd_desc *desc)
484{
485 struct its_collection *col;
486
487 col = dev_event_to_col(desc->its_int_cmd.dev,
488 desc->its_int_cmd.event_id);
489
490 its_encode_cmd(cmd, GITS_CMD_INT);
491 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
492 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
493
494 its_fixup_cmd(cmd);
495
496 return col;
497}
498
499static struct its_collection *its_build_clear_cmd(struct its_cmd_block *cmd,
500 struct its_cmd_desc *desc)
501{
502 struct its_collection *col;
503
504 col = dev_event_to_col(desc->its_clear_cmd.dev,
505 desc->its_clear_cmd.event_id);
506
507 its_encode_cmd(cmd, GITS_CMD_CLEAR);
508 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
509 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
510
511 its_fixup_cmd(cmd);
512
513 return col;
514}
515
361static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd, 516static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
362 struct its_cmd_desc *desc) 517 struct its_cmd_desc *desc)
363{ 518{
@@ -369,6 +524,94 @@ static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
369 return NULL; 524 return NULL;
370} 525}
371 526
527static struct its_vpe *its_build_vinvall_cmd(struct its_cmd_block *cmd,
528 struct its_cmd_desc *desc)
529{
530 its_encode_cmd(cmd, GITS_CMD_VINVALL);
531 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
532
533 its_fixup_cmd(cmd);
534
535 return desc->its_vinvall_cmd.vpe;
536}
537
538static struct its_vpe *its_build_vmapp_cmd(struct its_cmd_block *cmd,
539 struct its_cmd_desc *desc)
540{
541 unsigned long vpt_addr;
542
543 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
544
545 its_encode_cmd(cmd, GITS_CMD_VMAPP);
546 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
547 its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
548 its_encode_target(cmd, desc->its_vmapp_cmd.col->target_address);
549 its_encode_vpt_addr(cmd, vpt_addr);
550 its_encode_vpt_size(cmd, LPI_NRBITS - 1);
551
552 its_fixup_cmd(cmd);
553
554 return desc->its_vmapp_cmd.vpe;
555}
556
557static struct its_vpe *its_build_vmapti_cmd(struct its_cmd_block *cmd,
558 struct its_cmd_desc *desc)
559{
560 u32 db;
561
562 if (desc->its_vmapti_cmd.db_enabled)
563 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
564 else
565 db = 1023;
566
567 its_encode_cmd(cmd, GITS_CMD_VMAPTI);
568 its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
569 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
570 its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
571 its_encode_db_phys_id(cmd, db);
572 its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
573
574 its_fixup_cmd(cmd);
575
576 return desc->its_vmapti_cmd.vpe;
577}
578
579static struct its_vpe *its_build_vmovi_cmd(struct its_cmd_block *cmd,
580 struct its_cmd_desc *desc)
581{
582 u32 db;
583
584 if (desc->its_vmovi_cmd.db_enabled)
585 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
586 else
587 db = 1023;
588
589 its_encode_cmd(cmd, GITS_CMD_VMOVI);
590 its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
591 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
592 its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
593 its_encode_db_phys_id(cmd, db);
594 its_encode_db_valid(cmd, true);
595
596 its_fixup_cmd(cmd);
597
598 return desc->its_vmovi_cmd.vpe;
599}
600
601static struct its_vpe *its_build_vmovp_cmd(struct its_cmd_block *cmd,
602 struct its_cmd_desc *desc)
603{
604 its_encode_cmd(cmd, GITS_CMD_VMOVP);
605 its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
606 its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
607 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
608 its_encode_target(cmd, desc->its_vmovp_cmd.col->target_address);
609
610 its_fixup_cmd(cmd);
611
612 return desc->its_vmovp_cmd.vpe;
613}
614
372static u64 its_cmd_ptr_to_offset(struct its_node *its, 615static u64 its_cmd_ptr_to_offset(struct its_node *its,
373 struct its_cmd_block *ptr) 616 struct its_cmd_block *ptr)
374{ 617{
@@ -453,7 +696,13 @@ static void its_wait_for_range_completion(struct its_node *its,
453 696
454 while (1) { 697 while (1) {
455 rd_idx = readl_relaxed(its->base + GITS_CREADR); 698 rd_idx = readl_relaxed(its->base + GITS_CREADR);
456 if (rd_idx >= to_idx || rd_idx < from_idx) 699
700 /* Direct case */
701 if (from_idx < to_idx && rd_idx >= to_idx)
702 break;
703
704 /* Wrapped case */
705 if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx)
457 break; 706 break;
458 707
459 count--; 708 count--;
@@ -466,42 +715,84 @@ static void its_wait_for_range_completion(struct its_node *its,
466 } 715 }
467} 716}
468 717
469static void its_send_single_command(struct its_node *its, 718/* Warning, macro hell follows */
470 its_cmd_builder_t builder, 719#define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
471 struct its_cmd_desc *desc) 720void name(struct its_node *its, \
721 buildtype builder, \
722 struct its_cmd_desc *desc) \
723{ \
724 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
725 synctype *sync_obj; \
726 unsigned long flags; \
727 \
728 raw_spin_lock_irqsave(&its->lock, flags); \
729 \
730 cmd = its_allocate_entry(its); \
731 if (!cmd) { /* We're soooooo screewed... */ \
732 raw_spin_unlock_irqrestore(&its->lock, flags); \
733 return; \
734 } \
735 sync_obj = builder(cmd, desc); \
736 its_flush_cmd(its, cmd); \
737 \
738 if (sync_obj) { \
739 sync_cmd = its_allocate_entry(its); \
740 if (!sync_cmd) \
741 goto post; \
742 \
743 buildfn(sync_cmd, sync_obj); \
744 its_flush_cmd(its, sync_cmd); \
745 } \
746 \
747post: \
748 next_cmd = its_post_commands(its); \
749 raw_spin_unlock_irqrestore(&its->lock, flags); \
750 \
751 its_wait_for_range_completion(its, cmd, next_cmd); \
752}
753
754static void its_build_sync_cmd(struct its_cmd_block *sync_cmd,
755 struct its_collection *sync_col)
756{
757 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
758 its_encode_target(sync_cmd, sync_col->target_address);
759
760 its_fixup_cmd(sync_cmd);
761}
762
763static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
764 struct its_collection, its_build_sync_cmd)
765
766static void its_build_vsync_cmd(struct its_cmd_block *sync_cmd,
767 struct its_vpe *sync_vpe)
768{
769 its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
770 its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
771
772 its_fixup_cmd(sync_cmd);
773}
774
775static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
776 struct its_vpe, its_build_vsync_cmd)
777
778static void its_send_int(struct its_device *dev, u32 event_id)
472{ 779{
473 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; 780 struct its_cmd_desc desc;
474 struct its_collection *sync_col;
475 unsigned long flags;
476 781
477 raw_spin_lock_irqsave(&its->lock, flags); 782 desc.its_int_cmd.dev = dev;
783 desc.its_int_cmd.event_id = event_id;
478 784
479 cmd = its_allocate_entry(its); 785 its_send_single_command(dev->its, its_build_int_cmd, &desc);
480 if (!cmd) { /* We're soooooo screewed... */ 786}
481 pr_err_ratelimited("ITS can't allocate, dropping command\n");
482 raw_spin_unlock_irqrestore(&its->lock, flags);
483 return;
484 }
485 sync_col = builder(cmd, desc);
486 its_flush_cmd(its, cmd);
487 787
488 if (sync_col) { 788static void its_send_clear(struct its_device *dev, u32 event_id)
489 sync_cmd = its_allocate_entry(its); 789{
490 if (!sync_cmd) { 790 struct its_cmd_desc desc;
491 pr_err_ratelimited("ITS can't SYNC, skipping\n");
492 goto post;
493 }
494 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
495 its_encode_target(sync_cmd, sync_col->target_address);
496 its_fixup_cmd(sync_cmd);
497 its_flush_cmd(its, sync_cmd);
498 }
499 791
500post: 792 desc.its_clear_cmd.dev = dev;
501 next_cmd = its_post_commands(its); 793 desc.its_clear_cmd.event_id = event_id;
502 raw_spin_unlock_irqrestore(&its->lock, flags);
503 794
504 its_wait_for_range_completion(its, cmd, next_cmd); 795 its_send_single_command(dev->its, its_build_clear_cmd, &desc);
505} 796}
506 797
507static void its_send_inv(struct its_device *dev, u32 event_id) 798static void its_send_inv(struct its_device *dev, u32 event_id)
@@ -577,6 +868,106 @@ static void its_send_invall(struct its_node *its, struct its_collection *col)
577 its_send_single_command(its, its_build_invall_cmd, &desc); 868 its_send_single_command(its, its_build_invall_cmd, &desc);
578} 869}
579 870
871static void its_send_vmapti(struct its_device *dev, u32 id)
872{
873 struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
874 struct its_cmd_desc desc;
875
876 desc.its_vmapti_cmd.vpe = map->vpe;
877 desc.its_vmapti_cmd.dev = dev;
878 desc.its_vmapti_cmd.virt_id = map->vintid;
879 desc.its_vmapti_cmd.event_id = id;
880 desc.its_vmapti_cmd.db_enabled = map->db_enabled;
881
882 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
883}
884
885static void its_send_vmovi(struct its_device *dev, u32 id)
886{
887 struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
888 struct its_cmd_desc desc;
889
890 desc.its_vmovi_cmd.vpe = map->vpe;
891 desc.its_vmovi_cmd.dev = dev;
892 desc.its_vmovi_cmd.event_id = id;
893 desc.its_vmovi_cmd.db_enabled = map->db_enabled;
894
895 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
896}
897
898static void its_send_vmapp(struct its_vpe *vpe, bool valid)
899{
900 struct its_cmd_desc desc;
901 struct its_node *its;
902
903 desc.its_vmapp_cmd.vpe = vpe;
904 desc.its_vmapp_cmd.valid = valid;
905
906 list_for_each_entry(its, &its_nodes, entry) {
907 if (!its->is_v4)
908 continue;
909
910 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
911 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
912 }
913}
914
915static void its_send_vmovp(struct its_vpe *vpe)
916{
917 struct its_cmd_desc desc;
918 struct its_node *its;
919 unsigned long flags;
920 int col_id = vpe->col_idx;
921
922 desc.its_vmovp_cmd.vpe = vpe;
923 desc.its_vmovp_cmd.its_list = (u16)its_list_map;
924
925 if (!its_list_map) {
926 its = list_first_entry(&its_nodes, struct its_node, entry);
927 desc.its_vmovp_cmd.seq_num = 0;
928 desc.its_vmovp_cmd.col = &its->collections[col_id];
929 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
930 return;
931 }
932
933 /*
934 * Yet another marvel of the architecture. If using the
935 * its_list "feature", we need to make sure that all ITSs
936 * receive all VMOVP commands in the same order. The only way
937 * to guarantee this is to make vmovp a serialization point.
938 *
939 * Wall <-- Head.
940 */
941 raw_spin_lock_irqsave(&vmovp_lock, flags);
942
943 desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
944
945 /* Emit VMOVPs */
946 list_for_each_entry(its, &its_nodes, entry) {
947 if (!its->is_v4)
948 continue;
949
950 desc.its_vmovp_cmd.col = &its->collections[col_id];
951 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
952 }
953
954 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
955}
956
957static void its_send_vinvall(struct its_vpe *vpe)
958{
959 struct its_cmd_desc desc;
960 struct its_node *its;
961
962 desc.its_vinvall_cmd.vpe = vpe;
963
964 list_for_each_entry(its, &its_nodes, entry) {
965 if (!its->is_v4)
966 continue;
967 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
968 }
969}
970
580/* 971/*
581 * irqchip functions - assumes MSI, mostly. 972 * irqchip functions - assumes MSI, mostly.
582 */ 973 */
@@ -587,17 +978,26 @@ static inline u32 its_get_event_id(struct irq_data *d)
587 return d->hwirq - its_dev->event_map.lpi_base; 978 return d->hwirq - its_dev->event_map.lpi_base;
588} 979}
589 980
590static void lpi_set_config(struct irq_data *d, bool enable) 981static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
591{ 982{
592 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 983 irq_hw_number_t hwirq;
593 irq_hw_number_t hwirq = d->hwirq; 984 struct page *prop_page;
594 u32 id = its_get_event_id(d); 985 u8 *cfg;
595 u8 *cfg = page_address(gic_rdists->prop_page) + hwirq - 8192;
596 986
597 if (enable) 987 if (irqd_is_forwarded_to_vcpu(d)) {
598 *cfg |= LPI_PROP_ENABLED; 988 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
599 else 989 u32 event = its_get_event_id(d);
600 *cfg &= ~LPI_PROP_ENABLED; 990
991 prop_page = its_dev->event_map.vm->vprop_page;
992 hwirq = its_dev->event_map.vlpi_maps[event].vintid;
993 } else {
994 prop_page = gic_rdists->prop_page;
995 hwirq = d->hwirq;
996 }
997
998 cfg = page_address(prop_page) + hwirq - 8192;
999 *cfg &= ~clr;
1000 *cfg |= set | LPI_PROP_GROUP1;
601 1001
602 /* 1002 /*
603 * Make the above write visible to the redistributors. 1003 * Make the above write visible to the redistributors.
@@ -608,17 +1008,53 @@ static void lpi_set_config(struct irq_data *d, bool enable)
608 gic_flush_dcache_to_poc(cfg, sizeof(*cfg)); 1008 gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
609 else 1009 else
610 dsb(ishst); 1010 dsb(ishst);
611 its_send_inv(its_dev, id); 1011}
1012
1013static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1014{
1015 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1016
1017 lpi_write_config(d, clr, set);
1018 its_send_inv(its_dev, its_get_event_id(d));
1019}
1020
1021static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1022{
1023 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1024 u32 event = its_get_event_id(d);
1025
1026 if (its_dev->event_map.vlpi_maps[event].db_enabled == enable)
1027 return;
1028
1029 its_dev->event_map.vlpi_maps[event].db_enabled = enable;
1030
1031 /*
1032 * More fun with the architecture:
1033 *
1034 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1035 * value or to 1023, depending on the enable bit. But that
1036 * would be issueing a mapping for an /existing/ DevID+EventID
1037 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1038 * to the /same/ vPE, using this opportunity to adjust the
1039 * doorbell. Mouahahahaha. We loves it, Precious.
1040 */
1041 its_send_vmovi(its_dev, event);
612} 1042}
613 1043
614static void its_mask_irq(struct irq_data *d) 1044static void its_mask_irq(struct irq_data *d)
615{ 1045{
616 lpi_set_config(d, false); 1046 if (irqd_is_forwarded_to_vcpu(d))
1047 its_vlpi_set_doorbell(d, false);
1048
1049 lpi_update_config(d, LPI_PROP_ENABLED, 0);
617} 1050}
618 1051
619static void its_unmask_irq(struct irq_data *d) 1052static void its_unmask_irq(struct irq_data *d)
620{ 1053{
621 lpi_set_config(d, true); 1054 if (irqd_is_forwarded_to_vcpu(d))
1055 its_vlpi_set_doorbell(d, true);
1056
1057 lpi_update_config(d, 0, LPI_PROP_ENABLED);
622} 1058}
623 1059
624static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, 1060static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
@@ -630,6 +1066,10 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
630 struct its_collection *target_col; 1066 struct its_collection *target_col;
631 u32 id = its_get_event_id(d); 1067 u32 id = its_get_event_id(d);
632 1068
1069 /* A forwarded interrupt should use irq_set_vcpu_affinity */
1070 if (irqd_is_forwarded_to_vcpu(d))
1071 return -EINVAL;
1072
633 /* lpi cannot be routed to a redistributor that is on a foreign node */ 1073 /* lpi cannot be routed to a redistributor that is on a foreign node */
634 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { 1074 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
635 if (its_dev->its->numa_node >= 0) { 1075 if (its_dev->its->numa_node >= 0) {
@@ -649,6 +1089,7 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
649 target_col = &its_dev->its->collections[cpu]; 1089 target_col = &its_dev->its->collections[cpu];
650 its_send_movi(its_dev, target_col, id); 1090 its_send_movi(its_dev, target_col, id);
651 its_dev->event_map.col_map[id] = cpu; 1091 its_dev->event_map.col_map[id] = cpu;
1092 irq_data_update_effective_affinity(d, cpumask_of(cpu));
652 } 1093 }
653 1094
654 return IRQ_SET_MASK_OK_DONE; 1095 return IRQ_SET_MASK_OK_DONE;
@@ -670,6 +1111,179 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
670 iommu_dma_map_msi_msg(d->irq, msg); 1111 iommu_dma_map_msi_msg(d->irq, msg);
671} 1112}
672 1113
1114static int its_irq_set_irqchip_state(struct irq_data *d,
1115 enum irqchip_irq_state which,
1116 bool state)
1117{
1118 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1119 u32 event = its_get_event_id(d);
1120
1121 if (which != IRQCHIP_STATE_PENDING)
1122 return -EINVAL;
1123
1124 if (state)
1125 its_send_int(its_dev, event);
1126 else
1127 its_send_clear(its_dev, event);
1128
1129 return 0;
1130}
1131
1132static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1133{
1134 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1135 u32 event = its_get_event_id(d);
1136 int ret = 0;
1137
1138 if (!info->map)
1139 return -EINVAL;
1140
1141 mutex_lock(&its_dev->event_map.vlpi_lock);
1142
1143 if (!its_dev->event_map.vm) {
1144 struct its_vlpi_map *maps;
1145
1146 maps = kzalloc(sizeof(*maps) * its_dev->event_map.nr_lpis,
1147 GFP_KERNEL);
1148 if (!maps) {
1149 ret = -ENOMEM;
1150 goto out;
1151 }
1152
1153 its_dev->event_map.vm = info->map->vm;
1154 its_dev->event_map.vlpi_maps = maps;
1155 } else if (its_dev->event_map.vm != info->map->vm) {
1156 ret = -EINVAL;
1157 goto out;
1158 }
1159
1160 /* Get our private copy of the mapping information */
1161 its_dev->event_map.vlpi_maps[event] = *info->map;
1162
1163 if (irqd_is_forwarded_to_vcpu(d)) {
1164 /* Already mapped, move it around */
1165 its_send_vmovi(its_dev, event);
1166 } else {
1167 /* Drop the physical mapping */
1168 its_send_discard(its_dev, event);
1169
1170 /* and install the virtual one */
1171 its_send_vmapti(its_dev, event);
1172 irqd_set_forwarded_to_vcpu(d);
1173
1174 /* Increment the number of VLPIs */
1175 its_dev->event_map.nr_vlpis++;
1176 }
1177
1178out:
1179 mutex_unlock(&its_dev->event_map.vlpi_lock);
1180 return ret;
1181}
1182
1183static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1184{
1185 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1186 u32 event = its_get_event_id(d);
1187 int ret = 0;
1188
1189 mutex_lock(&its_dev->event_map.vlpi_lock);
1190
1191 if (!its_dev->event_map.vm ||
1192 !its_dev->event_map.vlpi_maps[event].vm) {
1193 ret = -EINVAL;
1194 goto out;
1195 }
1196
1197 /* Copy our mapping information to the incoming request */
1198 *info->map = its_dev->event_map.vlpi_maps[event];
1199
1200out:
1201 mutex_unlock(&its_dev->event_map.vlpi_lock);
1202 return ret;
1203}
1204
1205static int its_vlpi_unmap(struct irq_data *d)
1206{
1207 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1208 u32 event = its_get_event_id(d);
1209 int ret = 0;
1210
1211 mutex_lock(&its_dev->event_map.vlpi_lock);
1212
1213 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
1214 ret = -EINVAL;
1215 goto out;
1216 }
1217
1218 /* Drop the virtual mapping */
1219 its_send_discard(its_dev, event);
1220
1221 /* and restore the physical one */
1222 irqd_clr_forwarded_to_vcpu(d);
1223 its_send_mapti(its_dev, d->hwirq, event);
1224 lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
1225 LPI_PROP_ENABLED |
1226 LPI_PROP_GROUP1));
1227
1228 /*
1229 * Drop the refcount and make the device available again if
1230 * this was the last VLPI.
1231 */
1232 if (!--its_dev->event_map.nr_vlpis) {
1233 its_dev->event_map.vm = NULL;
1234 kfree(its_dev->event_map.vlpi_maps);
1235 }
1236
1237out:
1238 mutex_unlock(&its_dev->event_map.vlpi_lock);
1239 return ret;
1240}
1241
1242static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
1243{
1244 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1245
1246 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1247 return -EINVAL;
1248
1249 if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
1250 lpi_update_config(d, 0xff, info->config);
1251 else
1252 lpi_write_config(d, 0xff, info->config);
1253 its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
1254
1255 return 0;
1256}
1257
1258static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
1259{
1260 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1261 struct its_cmd_info *info = vcpu_info;
1262
1263 /* Need a v4 ITS */
1264 if (!its_dev->its->is_v4)
1265 return -EINVAL;
1266
1267 /* Unmap request? */
1268 if (!info)
1269 return its_vlpi_unmap(d);
1270
1271 switch (info->cmd_type) {
1272 case MAP_VLPI:
1273 return its_vlpi_map(d, info);
1274
1275 case GET_VLPI:
1276 return its_vlpi_get(d, info);
1277
1278 case PROP_UPDATE_VLPI:
1279 case PROP_UPDATE_AND_INV_VLPI:
1280 return its_vlpi_prop_update(d, info);
1281
1282 default:
1283 return -EINVAL;
1284 }
1285}
1286
673static struct irq_chip its_irq_chip = { 1287static struct irq_chip its_irq_chip = {
674 .name = "ITS", 1288 .name = "ITS",
675 .irq_mask = its_mask_irq, 1289 .irq_mask = its_mask_irq,
@@ -677,6 +1291,8 @@ static struct irq_chip its_irq_chip = {
677 .irq_eoi = irq_chip_eoi_parent, 1291 .irq_eoi = irq_chip_eoi_parent,
678 .irq_set_affinity = its_set_affinity, 1292 .irq_set_affinity = its_set_affinity,
679 .irq_compose_msi_msg = its_irq_compose_msi_msg, 1293 .irq_compose_msi_msg = its_irq_compose_msi_msg,
1294 .irq_set_irqchip_state = its_irq_set_irqchip_state,
1295 .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
680}; 1296};
681 1297
682/* 1298/*
@@ -695,7 +1311,6 @@ static struct irq_chip its_irq_chip = {
695 1311
696static unsigned long *lpi_bitmap; 1312static unsigned long *lpi_bitmap;
697static u32 lpi_chunks; 1313static u32 lpi_chunks;
698static u32 lpi_id_bits;
699static DEFINE_SPINLOCK(lpi_lock); 1314static DEFINE_SPINLOCK(lpi_lock);
700 1315
701static int its_lpi_to_chunk(int lpi) 1316static int its_lpi_to_chunk(int lpi)
@@ -766,16 +1381,15 @@ out:
766 return bitmap; 1381 return bitmap;
767} 1382}
768 1383
769static void its_lpi_free(struct event_lpi_map *map) 1384static void its_lpi_free_chunks(unsigned long *bitmap, int base, int nr_ids)
770{ 1385{
771 int base = map->lpi_base;
772 int nr_ids = map->nr_lpis;
773 int lpi; 1386 int lpi;
774 1387
775 spin_lock(&lpi_lock); 1388 spin_lock(&lpi_lock);
776 1389
777 for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) { 1390 for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) {
778 int chunk = its_lpi_to_chunk(lpi); 1391 int chunk = its_lpi_to_chunk(lpi);
1392
779 BUG_ON(chunk > lpi_chunks); 1393 BUG_ON(chunk > lpi_chunks);
780 if (test_bit(chunk, lpi_bitmap)) { 1394 if (test_bit(chunk, lpi_bitmap)) {
781 clear_bit(chunk, lpi_bitmap); 1395 clear_bit(chunk, lpi_bitmap);
@@ -786,28 +1400,40 @@ static void its_lpi_free(struct event_lpi_map *map)
786 1400
787 spin_unlock(&lpi_lock); 1401 spin_unlock(&lpi_lock);
788 1402
789 kfree(map->lpi_map); 1403 kfree(bitmap);
790 kfree(map->col_map);
791} 1404}
792 1405
793/* 1406static struct page *its_allocate_prop_table(gfp_t gfp_flags)
794 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to 1407{
795 * deal with (one configuration byte per interrupt). PENDBASE has to 1408 struct page *prop_page;
796 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
797 */
798#define LPI_NRBITS lpi_id_bits
799#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
800#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
801 1409
802#define LPI_PROP_DEFAULT_PRIO 0xa0 1410 prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
1411 if (!prop_page)
1412 return NULL;
1413
1414 /* Priority 0xa0, Group-1, disabled */
1415 memset(page_address(prop_page),
1416 LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
1417 LPI_PROPBASE_SZ);
1418
1419 /* Make sure the GIC will observe the written configuration */
1420 gic_flush_dcache_to_poc(page_address(prop_page), LPI_PROPBASE_SZ);
1421
1422 return prop_page;
1423}
1424
1425static void its_free_prop_table(struct page *prop_page)
1426{
1427 free_pages((unsigned long)page_address(prop_page),
1428 get_order(LPI_PROPBASE_SZ));
1429}
803 1430
804static int __init its_alloc_lpi_tables(void) 1431static int __init its_alloc_lpi_tables(void)
805{ 1432{
806 phys_addr_t paddr; 1433 phys_addr_t paddr;
807 1434
808 lpi_id_bits = min_t(u32, gic_rdists->id_bits, ITS_MAX_LPI_NRBITS); 1435 lpi_id_bits = min_t(u32, gic_rdists->id_bits, ITS_MAX_LPI_NRBITS);
809 gic_rdists->prop_page = alloc_pages(GFP_NOWAIT, 1436 gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
810 get_order(LPI_PROPBASE_SZ));
811 if (!gic_rdists->prop_page) { 1437 if (!gic_rdists->prop_page) {
812 pr_err("Failed to allocate PROPBASE\n"); 1438 pr_err("Failed to allocate PROPBASE\n");
813 return -ENOMEM; 1439 return -ENOMEM;
@@ -816,14 +1442,6 @@ static int __init its_alloc_lpi_tables(void)
816 paddr = page_to_phys(gic_rdists->prop_page); 1442 paddr = page_to_phys(gic_rdists->prop_page);
817 pr_info("GIC: using LPI property table @%pa\n", &paddr); 1443 pr_info("GIC: using LPI property table @%pa\n", &paddr);
818 1444
819 /* Priority 0xa0, Group-1, disabled */
820 memset(page_address(gic_rdists->prop_page),
821 LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
822 LPI_PROPBASE_SZ);
823
824 /* Make sure the GIC will observe the written configuration */
825 gic_flush_dcache_to_poc(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ);
826
827 return its_lpi_init(lpi_id_bits); 1445 return its_lpi_init(lpi_id_bits);
828} 1446}
829 1447
@@ -962,10 +1580,13 @@ retry_baser:
962 return 0; 1580 return 0;
963} 1581}
964 1582
965static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser, 1583static bool its_parse_indirect_baser(struct its_node *its,
966 u32 psz, u32 *order) 1584 struct its_baser *baser,
1585 u32 psz, u32 *order)
967{ 1586{
968 u64 esz = GITS_BASER_ENTRY_SIZE(its_read_baser(its, baser)); 1587 u64 tmp = its_read_baser(its, baser);
1588 u64 type = GITS_BASER_TYPE(tmp);
1589 u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
969 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb; 1590 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
970 u32 ids = its->device_ids; 1591 u32 ids = its->device_ids;
971 u32 new_order = *order; 1592 u32 new_order = *order;
@@ -1004,8 +1625,9 @@ static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser
1004 if (new_order >= MAX_ORDER) { 1625 if (new_order >= MAX_ORDER) {
1005 new_order = MAX_ORDER - 1; 1626 new_order = MAX_ORDER - 1;
1006 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz); 1627 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
1007 pr_warn("ITS@%pa: Device Table too large, reduce ids %u->%u\n", 1628 pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n",
1008 &its->phys_base, its->device_ids, ids); 1629 &its->phys_base, its_base_type_string[type],
1630 its->device_ids, ids);
1009 } 1631 }
1010 1632
1011 *order = new_order; 1633 *order = new_order;
@@ -1053,11 +1675,16 @@ static int its_alloc_tables(struct its_node *its)
1053 u32 order = get_order(psz); 1675 u32 order = get_order(psz);
1054 bool indirect = false; 1676 bool indirect = false;
1055 1677
1056 if (type == GITS_BASER_TYPE_NONE) 1678 switch (type) {
1679 case GITS_BASER_TYPE_NONE:
1057 continue; 1680 continue;
1058 1681
1059 if (type == GITS_BASER_TYPE_DEVICE) 1682 case GITS_BASER_TYPE_DEVICE:
1060 indirect = its_parse_baser_device(its, baser, psz, &order); 1683 case GITS_BASER_TYPE_VCPU:
1684 indirect = its_parse_indirect_baser(its, baser,
1685 psz, &order);
1686 break;
1687 }
1061 1688
1062 err = its_setup_baser(its, baser, cache, shr, psz, order, indirect); 1689 err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
1063 if (err < 0) { 1690 if (err < 0) {
@@ -1084,6 +1711,30 @@ static int its_alloc_collections(struct its_node *its)
1084 return 0; 1711 return 0;
1085} 1712}
1086 1713
1714static struct page *its_allocate_pending_table(gfp_t gfp_flags)
1715{
1716 struct page *pend_page;
1717 /*
1718 * The pending pages have to be at least 64kB aligned,
1719 * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
1720 */
1721 pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
1722 get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
1723 if (!pend_page)
1724 return NULL;
1725
1726 /* Make sure the GIC will observe the zero-ed page */
1727 gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
1728
1729 return pend_page;
1730}
1731
1732static void its_free_pending_table(struct page *pt)
1733{
1734 free_pages((unsigned long)page_address(pt),
1735 get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
1736}
1737
1087static void its_cpu_init_lpis(void) 1738static void its_cpu_init_lpis(void)
1088{ 1739{
1089 void __iomem *rbase = gic_data_rdist_rd_base(); 1740 void __iomem *rbase = gic_data_rdist_rd_base();
@@ -1094,21 +1745,14 @@ static void its_cpu_init_lpis(void)
1094 pend_page = gic_data_rdist()->pend_page; 1745 pend_page = gic_data_rdist()->pend_page;
1095 if (!pend_page) { 1746 if (!pend_page) {
1096 phys_addr_t paddr; 1747 phys_addr_t paddr;
1097 /* 1748
1098 * The pending pages have to be at least 64kB aligned, 1749 pend_page = its_allocate_pending_table(GFP_NOWAIT);
1099 * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
1100 */
1101 pend_page = alloc_pages(GFP_NOWAIT | __GFP_ZERO,
1102 get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
1103 if (!pend_page) { 1750 if (!pend_page) {
1104 pr_err("Failed to allocate PENDBASE for CPU%d\n", 1751 pr_err("Failed to allocate PENDBASE for CPU%d\n",
1105 smp_processor_id()); 1752 smp_processor_id());
1106 return; 1753 return;
1107 } 1754 }
1108 1755
1109 /* Make sure the GIC will observe the zero-ed page */
1110 gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
1111
1112 paddr = page_to_phys(pend_page); 1756 paddr = page_to_phys(pend_page);
1113 pr_info("CPU%d: using LPI pending table @%pa\n", 1757 pr_info("CPU%d: using LPI pending table @%pa\n",
1114 smp_processor_id(), &paddr); 1758 smp_processor_id(), &paddr);
@@ -1259,26 +1903,19 @@ static struct its_baser *its_get_baser(struct its_node *its, u32 type)
1259 return NULL; 1903 return NULL;
1260} 1904}
1261 1905
1262static bool its_alloc_device_table(struct its_node *its, u32 dev_id) 1906static bool its_alloc_table_entry(struct its_baser *baser, u32 id)
1263{ 1907{
1264 struct its_baser *baser;
1265 struct page *page; 1908 struct page *page;
1266 u32 esz, idx; 1909 u32 esz, idx;
1267 __le64 *table; 1910 __le64 *table;
1268 1911
1269 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
1270
1271 /* Don't allow device id that exceeds ITS hardware limit */
1272 if (!baser)
1273 return (ilog2(dev_id) < its->device_ids);
1274
1275 /* Don't allow device id that exceeds single, flat table limit */ 1912 /* Don't allow device id that exceeds single, flat table limit */
1276 esz = GITS_BASER_ENTRY_SIZE(baser->val); 1913 esz = GITS_BASER_ENTRY_SIZE(baser->val);
1277 if (!(baser->val & GITS_BASER_INDIRECT)) 1914 if (!(baser->val & GITS_BASER_INDIRECT))
1278 return (dev_id < (PAGE_ORDER_TO_SIZE(baser->order) / esz)); 1915 return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
1279 1916
1280 /* Compute 1st level table index & check if that exceeds table limit */ 1917 /* Compute 1st level table index & check if that exceeds table limit */
1281 idx = dev_id >> ilog2(baser->psz / esz); 1918 idx = id >> ilog2(baser->psz / esz);
1282 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE)) 1919 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
1283 return false; 1920 return false;
1284 1921
@@ -1307,11 +1944,52 @@ static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
1307 return true; 1944 return true;
1308} 1945}
1309 1946
1947static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
1948{
1949 struct its_baser *baser;
1950
1951 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
1952
1953 /* Don't allow device id that exceeds ITS hardware limit */
1954 if (!baser)
1955 return (ilog2(dev_id) < its->device_ids);
1956
1957 return its_alloc_table_entry(baser, dev_id);
1958}
1959
1960static bool its_alloc_vpe_table(u32 vpe_id)
1961{
1962 struct its_node *its;
1963
1964 /*
1965 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
1966 * could try and only do it on ITSs corresponding to devices
1967 * that have interrupts targeted at this VPE, but the
1968 * complexity becomes crazy (and you have tons of memory
1969 * anyway, right?).
1970 */
1971 list_for_each_entry(its, &its_nodes, entry) {
1972 struct its_baser *baser;
1973
1974 if (!its->is_v4)
1975 continue;
1976
1977 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
1978 if (!baser)
1979 return false;
1980
1981 if (!its_alloc_table_entry(baser, vpe_id))
1982 return false;
1983 }
1984
1985 return true;
1986}
1987
1310static struct its_device *its_create_device(struct its_node *its, u32 dev_id, 1988static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1311 int nvecs) 1989 int nvecs, bool alloc_lpis)
1312{ 1990{
1313 struct its_device *dev; 1991 struct its_device *dev;
1314 unsigned long *lpi_map; 1992 unsigned long *lpi_map = NULL;
1315 unsigned long flags; 1993 unsigned long flags;
1316 u16 *col_map = NULL; 1994 u16 *col_map = NULL;
1317 void *itt; 1995 void *itt;
@@ -1333,11 +2011,18 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1333 sz = nr_ites * its->ite_size; 2011 sz = nr_ites * its->ite_size;
1334 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; 2012 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
1335 itt = kzalloc(sz, GFP_KERNEL); 2013 itt = kzalloc(sz, GFP_KERNEL);
1336 lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); 2014 if (alloc_lpis) {
1337 if (lpi_map) 2015 lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
1338 col_map = kzalloc(sizeof(*col_map) * nr_lpis, GFP_KERNEL); 2016 if (lpi_map)
2017 col_map = kzalloc(sizeof(*col_map) * nr_lpis,
2018 GFP_KERNEL);
2019 } else {
2020 col_map = kzalloc(sizeof(*col_map) * nr_ites, GFP_KERNEL);
2021 nr_lpis = 0;
2022 lpi_base = 0;
2023 }
1339 2024
1340 if (!dev || !itt || !lpi_map || !col_map) { 2025 if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
1341 kfree(dev); 2026 kfree(dev);
1342 kfree(itt); 2027 kfree(itt);
1343 kfree(lpi_map); 2028 kfree(lpi_map);
@@ -1354,6 +2039,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1354 dev->event_map.col_map = col_map; 2039 dev->event_map.col_map = col_map;
1355 dev->event_map.lpi_base = lpi_base; 2040 dev->event_map.lpi_base = lpi_base;
1356 dev->event_map.nr_lpis = nr_lpis; 2041 dev->event_map.nr_lpis = nr_lpis;
2042 mutex_init(&dev->event_map.vlpi_lock);
1357 dev->device_id = dev_id; 2043 dev->device_id = dev_id;
1358 INIT_LIST_HEAD(&dev->entry); 2044 INIT_LIST_HEAD(&dev->entry);
1359 2045
@@ -1412,6 +2098,16 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
1412 msi_info = msi_get_domain_info(domain); 2098 msi_info = msi_get_domain_info(domain);
1413 its = msi_info->data; 2099 its = msi_info->data;
1414 2100
2101 if (!gic_rdists->has_direct_lpi &&
2102 vpe_proxy.dev &&
2103 vpe_proxy.dev->its == its &&
2104 dev_id == vpe_proxy.dev->device_id) {
2105 /* Bad luck. Get yourself a better implementation */
2106 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
2107 dev_id);
2108 return -EINVAL;
2109 }
2110
1415 its_dev = its_find_device(its, dev_id); 2111 its_dev = its_find_device(its, dev_id);
1416 if (its_dev) { 2112 if (its_dev) {
1417 /* 2113 /*
@@ -1423,7 +2119,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
1423 goto out; 2119 goto out;
1424 } 2120 }
1425 2121
1426 its_dev = its_create_device(its, dev_id, nvec); 2122 its_dev = its_create_device(its, dev_id, nvec, true);
1427 if (!its_dev) 2123 if (!its_dev)
1428 return -ENOMEM; 2124 return -ENOMEM;
1429 2125
@@ -1481,6 +2177,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1481 2177
1482 irq_domain_set_hwirq_and_chip(domain, virq + i, 2178 irq_domain_set_hwirq_and_chip(domain, virq + i,
1483 hwirq, &its_irq_chip, its_dev); 2179 hwirq, &its_irq_chip, its_dev);
2180 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
1484 pr_debug("ID:%d pID:%d vID:%d\n", 2181 pr_debug("ID:%d pID:%d vID:%d\n",
1485 (int)(hwirq - its_dev->event_map.lpi_base), 2182 (int)(hwirq - its_dev->event_map.lpi_base),
1486 (int) hwirq, virq + i); 2183 (int) hwirq, virq + i);
@@ -1495,13 +2192,16 @@ static void its_irq_domain_activate(struct irq_domain *domain,
1495 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 2192 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1496 u32 event = its_get_event_id(d); 2193 u32 event = its_get_event_id(d);
1497 const struct cpumask *cpu_mask = cpu_online_mask; 2194 const struct cpumask *cpu_mask = cpu_online_mask;
2195 int cpu;
1498 2196
1499 /* get the cpu_mask of local node */ 2197 /* get the cpu_mask of local node */
1500 if (its_dev->its->numa_node >= 0) 2198 if (its_dev->its->numa_node >= 0)
1501 cpu_mask = cpumask_of_node(its_dev->its->numa_node); 2199 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
1502 2200
1503 /* Bind the LPI to the first possible CPU */ 2201 /* Bind the LPI to the first possible CPU */
1504 its_dev->event_map.col_map[event] = cpumask_first(cpu_mask); 2202 cpu = cpumask_first(cpu_mask);
2203 its_dev->event_map.col_map[event] = cpu;
2204 irq_data_update_effective_affinity(d, cpumask_of(cpu));
1505 2205
1506 /* Map the GIC IRQ and event to the device */ 2206 /* Map the GIC IRQ and event to the device */
1507 its_send_mapti(its_dev, d->hwirq, event); 2207 its_send_mapti(its_dev, d->hwirq, event);
@@ -1539,7 +2239,10 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1539 /* If all interrupts have been freed, start mopping the floor */ 2239 /* If all interrupts have been freed, start mopping the floor */
1540 if (bitmap_empty(its_dev->event_map.lpi_map, 2240 if (bitmap_empty(its_dev->event_map.lpi_map,
1541 its_dev->event_map.nr_lpis)) { 2241 its_dev->event_map.nr_lpis)) {
1542 its_lpi_free(&its_dev->event_map); 2242 its_lpi_free_chunks(its_dev->event_map.lpi_map,
2243 its_dev->event_map.lpi_base,
2244 its_dev->event_map.nr_lpis);
2245 kfree(its_dev->event_map.col_map);
1543 2246
1544 /* Unmap device/itt */ 2247 /* Unmap device/itt */
1545 its_send_mapd(its_dev, 0); 2248 its_send_mapd(its_dev, 0);
@@ -1556,6 +2259,451 @@ static const struct irq_domain_ops its_domain_ops = {
1556 .deactivate = its_irq_domain_deactivate, 2259 .deactivate = its_irq_domain_deactivate,
1557}; 2260};
1558 2261
2262/*
2263 * This is insane.
2264 *
2265 * If a GICv4 doesn't implement Direct LPIs (which is extremely
2266 * likely), the only way to perform an invalidate is to use a fake
2267 * device to issue an INV command, implying that the LPI has first
2268 * been mapped to some event on that device. Since this is not exactly
2269 * cheap, we try to keep that mapping around as long as possible, and
2270 * only issue an UNMAP if we're short on available slots.
2271 *
2272 * Broken by design(tm).
2273 */
2274static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
2275{
2276 /* Already unmapped? */
2277 if (vpe->vpe_proxy_event == -1)
2278 return;
2279
2280 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
2281 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
2282
2283 /*
2284 * We don't track empty slots at all, so let's move the
2285 * next_victim pointer if we can quickly reuse that slot
2286 * instead of nuking an existing entry. Not clear that this is
2287 * always a win though, and this might just generate a ripple
2288 * effect... Let's just hope VPEs don't migrate too often.
2289 */
2290 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2291 vpe_proxy.next_victim = vpe->vpe_proxy_event;
2292
2293 vpe->vpe_proxy_event = -1;
2294}
2295
2296static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
2297{
2298 if (!gic_rdists->has_direct_lpi) {
2299 unsigned long flags;
2300
2301 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2302 its_vpe_db_proxy_unmap_locked(vpe);
2303 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2304 }
2305}
2306
2307static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
2308{
2309 /* Already mapped? */
2310 if (vpe->vpe_proxy_event != -1)
2311 return;
2312
2313 /* This slot was already allocated. Kick the other VPE out. */
2314 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2315 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
2316
2317 /* Map the new VPE instead */
2318 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
2319 vpe->vpe_proxy_event = vpe_proxy.next_victim;
2320 vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
2321
2322 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
2323 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
2324}
2325
2326static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
2327{
2328 unsigned long flags;
2329 struct its_collection *target_col;
2330
2331 if (gic_rdists->has_direct_lpi) {
2332 void __iomem *rdbase;
2333
2334 rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
2335 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
2336 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2337 cpu_relax();
2338
2339 return;
2340 }
2341
2342 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2343
2344 its_vpe_db_proxy_map_locked(vpe);
2345
2346 target_col = &vpe_proxy.dev->its->collections[to];
2347 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
2348 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
2349
2350 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2351}
2352
2353static int its_vpe_set_affinity(struct irq_data *d,
2354 const struct cpumask *mask_val,
2355 bool force)
2356{
2357 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2358 int cpu = cpumask_first(mask_val);
2359
2360 /*
2361 * Changing affinity is mega expensive, so let's be as lazy as
2362 * we can and only do it if we really have to. Also, if mapped
2363 * into the proxy device, we need to move the doorbell
2364 * interrupt to its new location.
2365 */
2366 if (vpe->col_idx != cpu) {
2367 int from = vpe->col_idx;
2368
2369 vpe->col_idx = cpu;
2370 its_send_vmovp(vpe);
2371 its_vpe_db_proxy_move(vpe, from, cpu);
2372 }
2373
2374 return IRQ_SET_MASK_OK_DONE;
2375}
2376
2377static void its_vpe_schedule(struct its_vpe *vpe)
2378{
2379 void * __iomem vlpi_base = gic_data_rdist_vlpi_base();
2380 u64 val;
2381
2382 /* Schedule the VPE */
2383 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
2384 GENMASK_ULL(51, 12);
2385 val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
2386 val |= GICR_VPROPBASER_RaWb;
2387 val |= GICR_VPROPBASER_InnerShareable;
2388 gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2389
2390 val = virt_to_phys(page_address(vpe->vpt_page)) &
2391 GENMASK_ULL(51, 16);
2392 val |= GICR_VPENDBASER_RaWaWb;
2393 val |= GICR_VPENDBASER_NonShareable;
2394 /*
2395 * There is no good way of finding out if the pending table is
2396 * empty as we can race against the doorbell interrupt very
2397 * easily. So in the end, vpe->pending_last is only an
2398 * indication that the vcpu has something pending, not one
2399 * that the pending table is empty. A good implementation
2400 * would be able to read its coarse map pretty quickly anyway,
2401 * making this a tolerable issue.
2402 */
2403 val |= GICR_VPENDBASER_PendingLast;
2404 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
2405 val |= GICR_VPENDBASER_Valid;
2406 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2407}
2408
2409static void its_vpe_deschedule(struct its_vpe *vpe)
2410{
2411 void * __iomem vlpi_base = gic_data_rdist_vlpi_base();
2412 u32 count = 1000000; /* 1s! */
2413 bool clean;
2414 u64 val;
2415
2416 /* We're being scheduled out */
2417 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2418 val &= ~GICR_VPENDBASER_Valid;
2419 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2420
2421 do {
2422 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2423 clean = !(val & GICR_VPENDBASER_Dirty);
2424 if (!clean) {
2425 count--;
2426 cpu_relax();
2427 udelay(1);
2428 }
2429 } while (!clean && count);
2430
2431 if (unlikely(!clean && !count)) {
2432 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
2433 vpe->idai = false;
2434 vpe->pending_last = true;
2435 } else {
2436 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
2437 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
2438 }
2439}
2440
2441static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
2442{
2443 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2444 struct its_cmd_info *info = vcpu_info;
2445
2446 switch (info->cmd_type) {
2447 case SCHEDULE_VPE:
2448 its_vpe_schedule(vpe);
2449 return 0;
2450
2451 case DESCHEDULE_VPE:
2452 its_vpe_deschedule(vpe);
2453 return 0;
2454
2455 case INVALL_VPE:
2456 its_send_vinvall(vpe);
2457 return 0;
2458
2459 default:
2460 return -EINVAL;
2461 }
2462}
2463
2464static void its_vpe_send_cmd(struct its_vpe *vpe,
2465 void (*cmd)(struct its_device *, u32))
2466{
2467 unsigned long flags;
2468
2469 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2470
2471 its_vpe_db_proxy_map_locked(vpe);
2472 cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
2473
2474 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2475}
2476
2477static void its_vpe_send_inv(struct irq_data *d)
2478{
2479 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2480
2481 if (gic_rdists->has_direct_lpi) {
2482 void __iomem *rdbase;
2483
2484 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
2485 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR);
2486 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2487 cpu_relax();
2488 } else {
2489 its_vpe_send_cmd(vpe, its_send_inv);
2490 }
2491}
2492
2493static void its_vpe_mask_irq(struct irq_data *d)
2494{
2495 /*
2496 * We need to unmask the LPI, which is described by the parent
2497 * irq_data. Instead of calling into the parent (which won't
2498 * exactly do the right thing, let's simply use the
2499 * parent_data pointer. Yes, I'm naughty.
2500 */
2501 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
2502 its_vpe_send_inv(d);
2503}
2504
2505static void its_vpe_unmask_irq(struct irq_data *d)
2506{
2507 /* Same hack as above... */
2508 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
2509 its_vpe_send_inv(d);
2510}
2511
2512static int its_vpe_set_irqchip_state(struct irq_data *d,
2513 enum irqchip_irq_state which,
2514 bool state)
2515{
2516 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2517
2518 if (which != IRQCHIP_STATE_PENDING)
2519 return -EINVAL;
2520
2521 if (gic_rdists->has_direct_lpi) {
2522 void __iomem *rdbase;
2523
2524 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
2525 if (state) {
2526 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
2527 } else {
2528 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
2529 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2530 cpu_relax();
2531 }
2532 } else {
2533 if (state)
2534 its_vpe_send_cmd(vpe, its_send_int);
2535 else
2536 its_vpe_send_cmd(vpe, its_send_clear);
2537 }
2538
2539 return 0;
2540}
2541
2542static struct irq_chip its_vpe_irq_chip = {
2543 .name = "GICv4-vpe",
2544 .irq_mask = its_vpe_mask_irq,
2545 .irq_unmask = its_vpe_unmask_irq,
2546 .irq_eoi = irq_chip_eoi_parent,
2547 .irq_set_affinity = its_vpe_set_affinity,
2548 .irq_set_irqchip_state = its_vpe_set_irqchip_state,
2549 .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
2550};
2551
2552static int its_vpe_id_alloc(void)
2553{
2554 return ida_simple_get(&its_vpeid_ida, 0, 1 << 16, GFP_KERNEL);
2555}
2556
2557static void its_vpe_id_free(u16 id)
2558{
2559 ida_simple_remove(&its_vpeid_ida, id);
2560}
2561
2562static int its_vpe_init(struct its_vpe *vpe)
2563{
2564 struct page *vpt_page;
2565 int vpe_id;
2566
2567 /* Allocate vpe_id */
2568 vpe_id = its_vpe_id_alloc();
2569 if (vpe_id < 0)
2570 return vpe_id;
2571
2572 /* Allocate VPT */
2573 vpt_page = its_allocate_pending_table(GFP_KERNEL);
2574 if (!vpt_page) {
2575 its_vpe_id_free(vpe_id);
2576 return -ENOMEM;
2577 }
2578
2579 if (!its_alloc_vpe_table(vpe_id)) {
2580 its_vpe_id_free(vpe_id);
2581 its_free_pending_table(vpe->vpt_page);
2582 return -ENOMEM;
2583 }
2584
2585 vpe->vpe_id = vpe_id;
2586 vpe->vpt_page = vpt_page;
2587 vpe->vpe_proxy_event = -1;
2588
2589 return 0;
2590}
2591
2592static void its_vpe_teardown(struct its_vpe *vpe)
2593{
2594 its_vpe_db_proxy_unmap(vpe);
2595 its_vpe_id_free(vpe->vpe_id);
2596 its_free_pending_table(vpe->vpt_page);
2597}
2598
2599static void its_vpe_irq_domain_free(struct irq_domain *domain,
2600 unsigned int virq,
2601 unsigned int nr_irqs)
2602{
2603 struct its_vm *vm = domain->host_data;
2604 int i;
2605
2606 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
2607
2608 for (i = 0; i < nr_irqs; i++) {
2609 struct irq_data *data = irq_domain_get_irq_data(domain,
2610 virq + i);
2611 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
2612
2613 BUG_ON(vm != vpe->its_vm);
2614
2615 clear_bit(data->hwirq, vm->db_bitmap);
2616 its_vpe_teardown(vpe);
2617 irq_domain_reset_irq_data(data);
2618 }
2619
2620 if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
2621 its_lpi_free_chunks(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
2622 its_free_prop_table(vm->vprop_page);
2623 }
2624}
2625
2626static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2627 unsigned int nr_irqs, void *args)
2628{
2629 struct its_vm *vm = args;
2630 unsigned long *bitmap;
2631 struct page *vprop_page;
2632 int base, nr_ids, i, err = 0;
2633
2634 BUG_ON(!vm);
2635
2636 bitmap = its_lpi_alloc_chunks(nr_irqs, &base, &nr_ids);
2637 if (!bitmap)
2638 return -ENOMEM;
2639
2640 if (nr_ids < nr_irqs) {
2641 its_lpi_free_chunks(bitmap, base, nr_ids);
2642 return -ENOMEM;
2643 }
2644
2645 vprop_page = its_allocate_prop_table(GFP_KERNEL);
2646 if (!vprop_page) {
2647 its_lpi_free_chunks(bitmap, base, nr_ids);
2648 return -ENOMEM;
2649 }
2650
2651 vm->db_bitmap = bitmap;
2652 vm->db_lpi_base = base;
2653 vm->nr_db_lpis = nr_ids;
2654 vm->vprop_page = vprop_page;
2655
2656 for (i = 0; i < nr_irqs; i++) {
2657 vm->vpes[i]->vpe_db_lpi = base + i;
2658 err = its_vpe_init(vm->vpes[i]);
2659 if (err)
2660 break;
2661 err = its_irq_gic_domain_alloc(domain, virq + i,
2662 vm->vpes[i]->vpe_db_lpi);
2663 if (err)
2664 break;
2665 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
2666 &its_vpe_irq_chip, vm->vpes[i]);
2667 set_bit(i, bitmap);
2668 }
2669
2670 if (err) {
2671 if (i > 0)
2672 its_vpe_irq_domain_free(domain, virq, i - 1);
2673
2674 its_lpi_free_chunks(bitmap, base, nr_ids);
2675 its_free_prop_table(vprop_page);
2676 }
2677
2678 return err;
2679}
2680
2681static void its_vpe_irq_domain_activate(struct irq_domain *domain,
2682 struct irq_data *d)
2683{
2684 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2685
2686 /* Map the VPE to the first possible CPU */
2687 vpe->col_idx = cpumask_first(cpu_online_mask);
2688 its_send_vmapp(vpe, true);
2689 its_send_vinvall(vpe);
2690}
2691
2692static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
2693 struct irq_data *d)
2694{
2695 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2696
2697 its_send_vmapp(vpe, false);
2698}
2699
2700static const struct irq_domain_ops its_vpe_domain_ops = {
2701 .alloc = its_vpe_irq_domain_alloc,
2702 .free = its_vpe_irq_domain_free,
2703 .activate = its_vpe_irq_domain_activate,
2704 .deactivate = its_vpe_irq_domain_deactivate,
2705};
2706
1559static int its_force_quiescent(void __iomem *base) 2707static int its_force_quiescent(void __iomem *base)
1560{ 2708{
1561 u32 count = 1000000; /* 1s */ 2709 u32 count = 1000000; /* 1s */
@@ -1571,7 +2719,7 @@ static int its_force_quiescent(void __iomem *base)
1571 return 0; 2719 return 0;
1572 2720
1573 /* Disable the generation of all interrupts to this ITS */ 2721 /* Disable the generation of all interrupts to this ITS */
1574 val &= ~GITS_CTLR_ENABLE; 2722 val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
1575 writel_relaxed(val, base + GITS_CTLR); 2723 writel_relaxed(val, base + GITS_CTLR);
1576 2724
1577 /* Poll GITS_CTLR and wait until ITS becomes quiescent */ 2725 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
@@ -1672,13 +2820,92 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
1672 return 0; 2820 return 0;
1673} 2821}
1674 2822
2823static int its_init_vpe_domain(void)
2824{
2825 struct its_node *its;
2826 u32 devid;
2827 int entries;
2828
2829 if (gic_rdists->has_direct_lpi) {
2830 pr_info("ITS: Using DirectLPI for VPE invalidation\n");
2831 return 0;
2832 }
2833
2834 /* Any ITS will do, even if not v4 */
2835 its = list_first_entry(&its_nodes, struct its_node, entry);
2836
2837 entries = roundup_pow_of_two(nr_cpu_ids);
2838 vpe_proxy.vpes = kzalloc(sizeof(*vpe_proxy.vpes) * entries,
2839 GFP_KERNEL);
2840 if (!vpe_proxy.vpes) {
2841 pr_err("ITS: Can't allocate GICv4 proxy device array\n");
2842 return -ENOMEM;
2843 }
2844
2845 /* Use the last possible DevID */
2846 devid = GENMASK(its->device_ids - 1, 0);
2847 vpe_proxy.dev = its_create_device(its, devid, entries, false);
2848 if (!vpe_proxy.dev) {
2849 kfree(vpe_proxy.vpes);
2850 pr_err("ITS: Can't allocate GICv4 proxy device\n");
2851 return -ENOMEM;
2852 }
2853
2854 BUG_ON(entries != vpe_proxy.dev->nr_ites);
2855
2856 raw_spin_lock_init(&vpe_proxy.lock);
2857 vpe_proxy.next_victim = 0;
2858 pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
2859 devid, vpe_proxy.dev->nr_ites);
2860
2861 return 0;
2862}
2863
2864static int __init its_compute_its_list_map(struct resource *res,
2865 void __iomem *its_base)
2866{
2867 int its_number;
2868 u32 ctlr;
2869
2870 /*
2871 * This is assumed to be done early enough that we're
2872 * guaranteed to be single-threaded, hence no
2873 * locking. Should this change, we should address
2874 * this.
2875 */
2876 its_number = find_first_zero_bit(&its_list_map, ITS_LIST_MAX);
2877 if (its_number >= ITS_LIST_MAX) {
2878 pr_err("ITS@%pa: No ITSList entry available!\n",
2879 &res->start);
2880 return -EINVAL;
2881 }
2882
2883 ctlr = readl_relaxed(its_base + GITS_CTLR);
2884 ctlr &= ~GITS_CTLR_ITS_NUMBER;
2885 ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
2886 writel_relaxed(ctlr, its_base + GITS_CTLR);
2887 ctlr = readl_relaxed(its_base + GITS_CTLR);
2888 if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
2889 its_number = ctlr & GITS_CTLR_ITS_NUMBER;
2890 its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
2891 }
2892
2893 if (test_and_set_bit(its_number, &its_list_map)) {
2894 pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
2895 &res->start, its_number);
2896 return -EINVAL;
2897 }
2898
2899 return its_number;
2900}
2901
1675static int __init its_probe_one(struct resource *res, 2902static int __init its_probe_one(struct resource *res,
1676 struct fwnode_handle *handle, int numa_node) 2903 struct fwnode_handle *handle, int numa_node)
1677{ 2904{
1678 struct its_node *its; 2905 struct its_node *its;
1679 void __iomem *its_base; 2906 void __iomem *its_base;
1680 u32 val; 2907 u32 val, ctlr;
1681 u64 baser, tmp; 2908 u64 baser, tmp, typer;
1682 int err; 2909 int err;
1683 2910
1684 its_base = ioremap(res->start, resource_size(res)); 2911 its_base = ioremap(res->start, resource_size(res));
@@ -1711,9 +2938,24 @@ static int __init its_probe_one(struct resource *res,
1711 raw_spin_lock_init(&its->lock); 2938 raw_spin_lock_init(&its->lock);
1712 INIT_LIST_HEAD(&its->entry); 2939 INIT_LIST_HEAD(&its->entry);
1713 INIT_LIST_HEAD(&its->its_device_list); 2940 INIT_LIST_HEAD(&its->its_device_list);
2941 typer = gic_read_typer(its_base + GITS_TYPER);
1714 its->base = its_base; 2942 its->base = its_base;
1715 its->phys_base = res->start; 2943 its->phys_base = res->start;
1716 its->ite_size = ((gic_read_typer(its_base + GITS_TYPER) >> 4) & 0xf) + 1; 2944 its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer);
2945 its->is_v4 = !!(typer & GITS_TYPER_VLPIS);
2946 if (its->is_v4) {
2947 if (!(typer & GITS_TYPER_VMOVP)) {
2948 err = its_compute_its_list_map(res, its_base);
2949 if (err < 0)
2950 goto out_free_its;
2951
2952 pr_info("ITS@%pa: Using ITS number %d\n",
2953 &res->start, err);
2954 } else {
2955 pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
2956 }
2957 }
2958
1717 its->numa_node = numa_node; 2959 its->numa_node = numa_node;
1718 2960
1719 its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2961 its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
@@ -1760,7 +3002,11 @@ static int __init its_probe_one(struct resource *res,
1760 } 3002 }
1761 3003
1762 gits_write_cwriter(0, its->base + GITS_CWRITER); 3004 gits_write_cwriter(0, its->base + GITS_CWRITER);
1763 writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR); 3005 ctlr = readl_relaxed(its->base + GITS_CTLR);
3006 ctlr |= GITS_CTLR_ENABLE;
3007 if (its->is_v4)
3008 ctlr |= GITS_CTLR_ImDe;
3009 writel_relaxed(ctlr, its->base + GITS_CTLR);
1764 3010
1765 err = its_init_domain(handle, its); 3011 err = its_init_domain(handle, its);
1766 if (err) 3012 if (err)
@@ -1816,13 +3062,13 @@ static int __init its_of_probe(struct device_node *node)
1816 for (np = of_find_matching_node(node, its_device_id); np; 3062 for (np = of_find_matching_node(node, its_device_id); np;
1817 np = of_find_matching_node(np, its_device_id)) { 3063 np = of_find_matching_node(np, its_device_id)) {
1818 if (!of_property_read_bool(np, "msi-controller")) { 3064 if (!of_property_read_bool(np, "msi-controller")) {
1819 pr_warn("%s: no msi-controller property, ITS ignored\n", 3065 pr_warn("%pOF: no msi-controller property, ITS ignored\n",
1820 np->full_name); 3066 np);
1821 continue; 3067 continue;
1822 } 3068 }
1823 3069
1824 if (of_address_to_resource(np, 0, &res)) { 3070 if (of_address_to_resource(np, 0, &res)) {
1825 pr_warn("%s: no regs?\n", np->full_name); 3071 pr_warn("%pOF: no regs?\n", np);
1826 continue; 3072 continue;
1827 } 3073 }
1828 3074
@@ -1984,6 +3230,9 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
1984 struct irq_domain *parent_domain) 3230 struct irq_domain *parent_domain)
1985{ 3231{
1986 struct device_node *of_node; 3232 struct device_node *of_node;
3233 struct its_node *its;
3234 bool has_v4 = false;
3235 int err;
1987 3236
1988 its_parent = parent_domain; 3237 its_parent = parent_domain;
1989 of_node = to_of_node(handle); 3238 of_node = to_of_node(handle);
@@ -1998,5 +3247,20 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
1998 } 3247 }
1999 3248
2000 gic_rdists = rdists; 3249 gic_rdists = rdists;
2001 return its_alloc_lpi_tables(); 3250 err = its_alloc_lpi_tables();
3251 if (err)
3252 return err;
3253
3254 list_for_each_entry(its, &its_nodes, entry)
3255 has_v4 |= its->is_v4;
3256
3257 if (has_v4 & rdists->has_vlpis) {
3258 if (its_init_vpe_domain() ||
3259 its_init_v4(parent_domain, &its_vpe_domain_ops)) {
3260 rdists->has_vlpis = false;
3261 pr_err("ITS: Disabling GICv4 support\n");
3262 }
3263 }
3264
3265 return 0;
2002} 3266}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 984c3ecfd22c..519149ec9053 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved. 2 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com> 3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
@@ -423,24 +423,14 @@ static void __init gic_dist_init(void)
423 gic_write_irouter(affinity, base + GICD_IROUTER + i * 8); 423 gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
424} 424}
425 425
426static int gic_populate_rdist(void) 426static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
427{ 427{
428 unsigned long mpidr = cpu_logical_map(smp_processor_id()); 428 int ret = -ENODEV;
429 u64 typer;
430 u32 aff;
431 int i; 429 int i;
432 430
433 /*
434 * Convert affinity to a 32bit value that can be matched to
435 * GICR_TYPER bits [63:32].
436 */
437 aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
438 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
439 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
440 MPIDR_AFFINITY_LEVEL(mpidr, 0));
441
442 for (i = 0; i < gic_data.nr_redist_regions; i++) { 431 for (i = 0; i < gic_data.nr_redist_regions; i++) {
443 void __iomem *ptr = gic_data.redist_regions[i].redist_base; 432 void __iomem *ptr = gic_data.redist_regions[i].redist_base;
433 u64 typer;
444 u32 reg; 434 u32 reg;
445 435
446 reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK; 436 reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
@@ -452,15 +442,9 @@ static int gic_populate_rdist(void)
452 442
453 do { 443 do {
454 typer = gic_read_typer(ptr + GICR_TYPER); 444 typer = gic_read_typer(ptr + GICR_TYPER);
455 if ((typer >> 32) == aff) { 445 ret = fn(gic_data.redist_regions + i, ptr);
456 u64 offset = ptr - gic_data.redist_regions[i].redist_base; 446 if (!ret)
457 gic_data_rdist_rd_base() = ptr;
458 gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset;
459 pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
460 smp_processor_id(), mpidr, i,
461 &gic_data_rdist()->phys_base);
462 return 0; 447 return 0;
463 }
464 448
465 if (gic_data.redist_regions[i].single_redist) 449 if (gic_data.redist_regions[i].single_redist)
466 break; 450 break;
@@ -475,12 +459,71 @@ static int gic_populate_rdist(void)
475 } while (!(typer & GICR_TYPER_LAST)); 459 } while (!(typer & GICR_TYPER_LAST));
476 } 460 }
477 461
462 return ret ? -ENODEV : 0;
463}
464
465static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
466{
467 unsigned long mpidr = cpu_logical_map(smp_processor_id());
468 u64 typer;
469 u32 aff;
470
471 /*
472 * Convert affinity to a 32bit value that can be matched to
473 * GICR_TYPER bits [63:32].
474 */
475 aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
476 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
477 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
478 MPIDR_AFFINITY_LEVEL(mpidr, 0));
479
480 typer = gic_read_typer(ptr + GICR_TYPER);
481 if ((typer >> 32) == aff) {
482 u64 offset = ptr - region->redist_base;
483 gic_data_rdist_rd_base() = ptr;
484 gic_data_rdist()->phys_base = region->phys_base + offset;
485
486 pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
487 smp_processor_id(), mpidr,
488 (int)(region - gic_data.redist_regions),
489 &gic_data_rdist()->phys_base);
490 return 0;
491 }
492
493 /* Try next one */
494 return 1;
495}
496
497static int gic_populate_rdist(void)
498{
499 if (gic_iterate_rdists(__gic_populate_rdist) == 0)
500 return 0;
501
478 /* We couldn't even deal with ourselves... */ 502 /* We couldn't even deal with ourselves... */
479 WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n", 503 WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
480 smp_processor_id(), mpidr); 504 smp_processor_id(),
505 (unsigned long)cpu_logical_map(smp_processor_id()));
481 return -ENODEV; 506 return -ENODEV;
482} 507}
483 508
509static int __gic_update_vlpi_properties(struct redist_region *region,
510 void __iomem *ptr)
511{
512 u64 typer = gic_read_typer(ptr + GICR_TYPER);
513 gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
514 gic_data.rdists.has_direct_lpi &= !!(typer & GICR_TYPER_DirectLPIS);
515
516 return 1;
517}
518
519static void gic_update_vlpi_properties(void)
520{
521 gic_iterate_rdists(__gic_update_vlpi_properties);
522 pr_info("%sVLPI support, %sdirect LPI support\n",
523 !gic_data.rdists.has_vlpis ? "no " : "",
524 !gic_data.rdists.has_direct_lpi ? "no " : "");
525}
526
484static void gic_cpu_sys_reg_init(void) 527static void gic_cpu_sys_reg_init(void)
485{ 528{
486 /* 529 /*
@@ -677,6 +720,8 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
677 else 720 else
678 gic_dist_wait_for_rwp(); 721 gic_dist_wait_for_rwp();
679 722
723 irq_data_update_effective_affinity(d, cpumask_of(cpu));
724
680 return IRQ_SET_MASK_OK_DONE; 725 return IRQ_SET_MASK_OK_DONE;
681} 726}
682#else 727#else
@@ -775,6 +820,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
775 irq_domain_set_info(d, irq, hw, chip, d->host_data, 820 irq_domain_set_info(d, irq, hw, chip, d->host_data,
776 handle_fasteoi_irq, NULL, NULL); 821 handle_fasteoi_irq, NULL, NULL);
777 irq_set_probe(irq); 822 irq_set_probe(irq);
823 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
778 } 824 }
779 /* LPIs */ 825 /* LPIs */
780 if (hw >= 8192 && hw < GIC_ID_NR) { 826 if (hw >= 8192 && hw < GIC_ID_NR) {
@@ -953,6 +999,8 @@ static int __init gic_init_bases(void __iomem *dist_base,
953 gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops, 999 gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
954 &gic_data); 1000 &gic_data);
955 gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); 1001 gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
1002 gic_data.rdists.has_vlpis = true;
1003 gic_data.rdists.has_direct_lpi = true;
956 1004
957 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { 1005 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
958 err = -ENOMEM; 1006 err = -ENOMEM;
@@ -961,6 +1009,8 @@ static int __init gic_init_bases(void __iomem *dist_base,
961 1009
962 set_handle_irq(gic_handle_irq); 1010 set_handle_irq(gic_handle_irq);
963 1011
1012 gic_update_vlpi_properties();
1013
964 if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis()) 1014 if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
965 its_init(handle, &gic_data.rdists, gic_data.domain); 1015 its_init(handle, &gic_data.rdists, gic_data.domain);
966 1016
@@ -1067,7 +1117,7 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
1067 if (WARN_ON(cpu == -1)) 1117 if (WARN_ON(cpu == -1))
1068 continue; 1118 continue;
1069 1119
1070 pr_cont("%s[%d] ", cpu_node->full_name, cpu); 1120 pr_cont("%pOF[%d] ", cpu_node, cpu);
1071 1121
1072 cpumask_set_cpu(cpu, &part->mask); 1122 cpumask_set_cpu(cpu, &part->mask);
1073 } 1123 }
@@ -1122,6 +1172,7 @@ static void __init gic_of_setup_kvm_info(struct device_node *node)
1122 if (!ret) 1172 if (!ret)
1123 gic_v3_kvm_info.vcpu = r; 1173 gic_v3_kvm_info.vcpu = r;
1124 1174
1175 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
1125 gic_set_kvm_info(&gic_v3_kvm_info); 1176 gic_set_kvm_info(&gic_v3_kvm_info);
1126} 1177}
1127 1178
@@ -1135,15 +1186,13 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
1135 1186
1136 dist_base = of_iomap(node, 0); 1187 dist_base = of_iomap(node, 0);
1137 if (!dist_base) { 1188 if (!dist_base) {
1138 pr_err("%s: unable to map gic dist registers\n", 1189 pr_err("%pOF: unable to map gic dist registers\n", node);
1139 node->full_name);
1140 return -ENXIO; 1190 return -ENXIO;
1141 } 1191 }
1142 1192
1143 err = gic_validate_dist_version(dist_base); 1193 err = gic_validate_dist_version(dist_base);
1144 if (err) { 1194 if (err) {
1145 pr_err("%s: no distributor detected, giving up\n", 1195 pr_err("%pOF: no distributor detected, giving up\n", node);
1146 node->full_name);
1147 goto out_unmap_dist; 1196 goto out_unmap_dist;
1148 } 1197 }
1149 1198
@@ -1163,8 +1212,7 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
1163 ret = of_address_to_resource(node, 1 + i, &res); 1212 ret = of_address_to_resource(node, 1 + i, &res);
1164 rdist_regs[i].redist_base = of_iomap(node, 1 + i); 1213 rdist_regs[i].redist_base = of_iomap(node, 1 + i);
1165 if (ret || !rdist_regs[i].redist_base) { 1214 if (ret || !rdist_regs[i].redist_base) {
1166 pr_err("%s: couldn't map region %d\n", 1215 pr_err("%pOF: couldn't map region %d\n", node, i);
1167 node->full_name, i);
1168 err = -ENODEV; 1216 err = -ENODEV;
1169 goto out_unmap_rdist; 1217 goto out_unmap_rdist;
1170 } 1218 }
@@ -1418,6 +1466,7 @@ static void __init gic_acpi_setup_kvm_info(void)
1418 vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1; 1466 vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
1419 } 1467 }
1420 1468
1469 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
1421 gic_set_kvm_info(&gic_v3_kvm_info); 1470 gic_set_kvm_info(&gic_v3_kvm_info);
1422} 1471}
1423 1472
diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c
new file mode 100644
index 000000000000..2370e6d9e603
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v4.c
@@ -0,0 +1,225 @@
1/*
2 * Copyright (C) 2016,2017 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/interrupt.h>
19#include <linux/irq.h>
20#include <linux/irqdomain.h>
21#include <linux/msi.h>
22#include <linux/sched.h>
23
24#include <linux/irqchip/arm-gic-v4.h>
25
26/*
27 * WARNING: The blurb below assumes that you understand the
28 * intricacies of GICv3, GICv4, and how a guest's view of a GICv3 gets
29 * translated into GICv4 commands. So it effectively targets at most
30 * two individuals. You know who you are.
31 *
32 * The core GICv4 code is designed to *avoid* exposing too much of the
33 * core GIC code (that would in turn leak into the hypervisor code),
34 * and instead provide a hypervisor agnostic interface to the HW (of
35 * course, the astute reader will quickly realize that hypervisor
36 * agnostic actually means KVM-specific - what were you thinking?).
37 *
38 * In order to achieve a modicum of isolation, we try to hide most of
39 * the GICv4 "stuff" behind normal irqchip operations:
40 *
41 * - Any guest-visible VLPI is backed by a Linux interrupt (and a
42 * physical LPI which gets unmapped when the guest maps the
43 * VLPI). This allows the same DevID/EventID pair to be either
44 * mapped to the LPI (host) or the VLPI (guest). Note that this is
45 * exclusive, and you cannot have both.
46 *
47 * - Enabling/disabling a VLPI is done by issuing mask/unmask calls.
48 *
49 * - Guest INT/CLEAR commands are implemented through
50 * irq_set_irqchip_state().
51 *
52 * - The *bizarre* stuff (mapping/unmapping an interrupt to a VLPI, or
53 * issuing an INV after changing a priority) gets shoved into the
54 * irq_set_vcpu_affinity() method. While this is quite horrible
55 * (let's face it, this is the irqchip version of an ioctl), it
56 * confines the crap to a single location. And map/unmap really is
57 * about setting the affinity of a VLPI to a vcpu, so only INV is
58 * majorly out of place. So there.
59 *
60 * A number of commands are simply not provided by this interface, as
61 * they do not make direct sense. For example, MAPD is purely local to
62 * the virtual ITS (because it references a virtual device, and the
63 * physical ITS is still very much in charge of the physical
64 * device). Same goes for things like MAPC (the physical ITS deals
65 * with the actual vPE affinity, and not the braindead concept of
66 * collection). SYNC is not provided either, as each and every command
67 * is followed by a VSYNC. This could be relaxed in the future, should
68 * this be seen as a bottleneck (yes, this means *never*).
69 *
70 * But handling VLPIs is only one side of the job of the GICv4
71 * code. The other (darker) side is to take care of the doorbell
72 * interrupts which are delivered when a VLPI targeting a non-running
73 * vcpu is being made pending.
74 *
75 * The choice made here is that each vcpu (VPE in old northern GICv4
76 * dialect) gets a single doorbell LPI, no matter how many interrupts
77 * are targeting it. This has a nice property, which is that the
78 * interrupt becomes a handle for the VPE, and that the hypervisor
79 * code can manipulate it through the normal interrupt API:
80 *
81 * - VMs (or rather the VM abstraction that matters to the GIC)
82 * contain an irq domain where each interrupt maps to a VPE. In
83 * turn, this domain sits on top of the normal LPI allocator, and a
84 * specially crafted irq_chip implementation.
85 *
86 * - mask/unmask do what is expected on the doorbell interrupt.
87 *
88 * - irq_set_affinity is used to move a VPE from one redistributor to
89 * another.
90 *
91 * - irq_set_vcpu_affinity once again gets hijacked for the purpose of
92 * creating a new sub-API, namely scheduling/descheduling a VPE
93 * (which involves programming GICR_V{PROP,PEND}BASER) and
94 * performing INVALL operations.
95 */
96
97static struct irq_domain *gic_domain;
98static const struct irq_domain_ops *vpe_domain_ops;
99
100int its_alloc_vcpu_irqs(struct its_vm *vm)
101{
102 int vpe_base_irq, i;
103
104 vm->fwnode = irq_domain_alloc_named_id_fwnode("GICv4-vpe",
105 task_pid_nr(current));
106 if (!vm->fwnode)
107 goto err;
108
109 vm->domain = irq_domain_create_hierarchy(gic_domain, 0, vm->nr_vpes,
110 vm->fwnode, vpe_domain_ops,
111 vm);
112 if (!vm->domain)
113 goto err;
114
115 for (i = 0; i < vm->nr_vpes; i++) {
116 vm->vpes[i]->its_vm = vm;
117 vm->vpes[i]->idai = true;
118 }
119
120 vpe_base_irq = __irq_domain_alloc_irqs(vm->domain, -1, vm->nr_vpes,
121 NUMA_NO_NODE, vm,
122 false, NULL);
123 if (vpe_base_irq <= 0)
124 goto err;
125
126 for (i = 0; i < vm->nr_vpes; i++)
127 vm->vpes[i]->irq = vpe_base_irq + i;
128
129 return 0;
130
131err:
132 if (vm->domain)
133 irq_domain_remove(vm->domain);
134 if (vm->fwnode)
135 irq_domain_free_fwnode(vm->fwnode);
136
137 return -ENOMEM;
138}
139
140void its_free_vcpu_irqs(struct its_vm *vm)
141{
142 irq_domain_free_irqs(vm->vpes[0]->irq, vm->nr_vpes);
143 irq_domain_remove(vm->domain);
144 irq_domain_free_fwnode(vm->fwnode);
145}
146
147static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info)
148{
149 return irq_set_vcpu_affinity(vpe->irq, info);
150}
151
152int its_schedule_vpe(struct its_vpe *vpe, bool on)
153{
154 struct its_cmd_info info;
155
156 WARN_ON(preemptible());
157
158 info.cmd_type = on ? SCHEDULE_VPE : DESCHEDULE_VPE;
159
160 return its_send_vpe_cmd(vpe, &info);
161}
162
163int its_invall_vpe(struct its_vpe *vpe)
164{
165 struct its_cmd_info info = {
166 .cmd_type = INVALL_VPE,
167 };
168
169 return its_send_vpe_cmd(vpe, &info);
170}
171
172int its_map_vlpi(int irq, struct its_vlpi_map *map)
173{
174 struct its_cmd_info info = {
175 .cmd_type = MAP_VLPI,
176 .map = map,
177 };
178
179 /*
180 * The host will never see that interrupt firing again, so it
181 * is vital that we don't do any lazy masking.
182 */
183 irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
184
185 return irq_set_vcpu_affinity(irq, &info);
186}
187
188int its_get_vlpi(int irq, struct its_vlpi_map *map)
189{
190 struct its_cmd_info info = {
191 .cmd_type = GET_VLPI,
192 .map = map,
193 };
194
195 return irq_set_vcpu_affinity(irq, &info);
196}
197
198int its_unmap_vlpi(int irq)
199{
200 irq_clear_status_flags(irq, IRQ_DISABLE_UNLAZY);
201 return irq_set_vcpu_affinity(irq, NULL);
202}
203
204int its_prop_update_vlpi(int irq, u8 config, bool inv)
205{
206 struct its_cmd_info info = {
207 .cmd_type = inv ? PROP_UPDATE_AND_INV_VLPI : PROP_UPDATE_VLPI,
208 .config = config,
209 };
210
211 return irq_set_vcpu_affinity(irq, &info);
212}
213
214int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops)
215{
216 if (domain) {
217 pr_info("ITS: Enabling GICv4 support\n");
218 gic_domain = domain;
219 vpe_domain_ops = ops;
220 return 0;
221 }
222
223 pr_err("ITS: No GICv4 VPE domain allocated\n");
224 return -ENODEV;
225}
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index d3e7c43718b8..651d726e8b12 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -344,6 +344,8 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
344 writel_relaxed(val | bit, reg); 344 writel_relaxed(val | bit, reg);
345 gic_unlock_irqrestore(flags); 345 gic_unlock_irqrestore(flags);
346 346
347 irq_data_update_effective_affinity(d, cpumask_of(cpu));
348
347 return IRQ_SET_MASK_OK_DONE; 349 return IRQ_SET_MASK_OK_DONE;
348} 350}
349#endif 351#endif
@@ -413,7 +415,7 @@ static void gic_handle_cascade_irq(struct irq_desc *desc)
413 chained_irq_exit(chip, desc); 415 chained_irq_exit(chip, desc);
414} 416}
415 417
416static struct irq_chip gic_chip = { 418static const struct irq_chip gic_chip = {
417 .irq_mask = gic_mask_irq, 419 .irq_mask = gic_mask_irq,
418 .irq_unmask = gic_unmask_irq, 420 .irq_unmask = gic_unmask_irq,
419 .irq_eoi = gic_eoi_irq, 421 .irq_eoi = gic_eoi_irq,
@@ -969,6 +971,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
969 irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data, 971 irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
970 handle_fasteoi_irq, NULL, NULL); 972 handle_fasteoi_irq, NULL, NULL);
971 irq_set_probe(irq); 973 irq_set_probe(irq);
974 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
972 } 975 }
973 return 0; 976 return 0;
974} 977}
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index c1b4ee955dbe..5b4fd2f4e5f8 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -165,6 +165,8 @@ static int hip04_irq_set_affinity(struct irq_data *d,
165 writel_relaxed(val | bit, reg); 165 writel_relaxed(val | bit, reg);
166 raw_spin_unlock(&irq_controller_lock); 166 raw_spin_unlock(&irq_controller_lock);
167 167
168 irq_data_update_effective_affinity(d, cpumask_of(cpu));
169
168 return IRQ_SET_MASK_OK; 170 return IRQ_SET_MASK_OK;
169} 171}
170#endif 172#endif
@@ -312,6 +314,7 @@ static int hip04_irq_domain_map(struct irq_domain *d, unsigned int irq,
312 irq_set_chip_and_handler(irq, &hip04_irq_chip, 314 irq_set_chip_and_handler(irq, &hip04_irq_chip,
313 handle_fasteoi_irq); 315 handle_fasteoi_irq);
314 irq_set_probe(irq); 316 irq_set_probe(irq);
317 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
315 } 318 }
316 irq_set_chip_data(irq, d->host_data); 319 irq_set_chip_data(irq, d->host_data);
317 return 0; 320 return 0;
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index bb36f572e322..675eda5ff2b8 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -214,13 +214,13 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
214 int i; 214 int i;
215 215
216 if (!parent) { 216 if (!parent) {
217 pr_err("%s: no parent, giving up\n", node->full_name); 217 pr_err("%pOF: no parent, giving up\n", node);
218 return -ENODEV; 218 return -ENODEV;
219 } 219 }
220 220
221 parent_domain = irq_find_host(parent); 221 parent_domain = irq_find_host(parent);
222 if (!parent_domain) { 222 if (!parent_domain) {
223 pr_err("%s: unable to get parent domain\n", node->full_name); 223 pr_err("%pOF: unable to get parent domain\n", node);
224 return -ENXIO; 224 return -ENXIO;
225 } 225 }
226 226
diff --git a/drivers/irqchip/irq-lpc32xx.c b/drivers/irqchip/irq-lpc32xx.c
index 1034aeb2e98a..a48357d369b5 100644
--- a/drivers/irqchip/irq-lpc32xx.c
+++ b/drivers/irqchip/irq-lpc32xx.c
@@ -191,7 +191,7 @@ static int __init lpc32xx_of_ic_init(struct device_node *node,
191 191
192 irqc->base = of_iomap(node, 0); 192 irqc->base = of_iomap(node, 0);
193 if (!irqc->base) { 193 if (!irqc->base) {
194 pr_err("%s: unable to map registers\n", node->full_name); 194 pr_err("%pOF: unable to map registers\n", node);
195 kfree(irqc); 195 kfree(irqc);
196 return -EINVAL; 196 return -EINVAL;
197 } 197 }
diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
index 02cca74cab94..119f4ef0d421 100644
--- a/drivers/irqchip/irq-ls-scfg-msi.c
+++ b/drivers/irqchip/irq-ls-scfg-msi.c
@@ -17,13 +17,32 @@
17#include <linux/irq.h> 17#include <linux/irq.h>
18#include <linux/irqchip/chained_irq.h> 18#include <linux/irqchip/chained_irq.h>
19#include <linux/irqdomain.h> 19#include <linux/irqdomain.h>
20#include <linux/of_irq.h>
20#include <linux/of_pci.h> 21#include <linux/of_pci.h>
21#include <linux/of_platform.h> 22#include <linux/of_platform.h>
22#include <linux/spinlock.h> 23#include <linux/spinlock.h>
23 24
24#define MSI_MAX_IRQS 32 25#define MSI_IRQS_PER_MSIR 32
25#define MSI_IBS_SHIFT 3 26#define MSI_MSIR_OFFSET 4
26#define MSIR 4 27
28#define MSI_LS1043V1_1_IRQS_PER_MSIR 8
29#define MSI_LS1043V1_1_MSIR_OFFSET 0x10
30
31struct ls_scfg_msi_cfg {
32 u32 ibs_shift; /* Shift of interrupt bit select */
33 u32 msir_irqs; /* The irq number per MSIR */
34 u32 msir_base; /* The base address of MSIR */
35};
36
37struct ls_scfg_msir {
38 struct ls_scfg_msi *msi_data;
39 unsigned int index;
40 unsigned int gic_irq;
41 unsigned int bit_start;
42 unsigned int bit_end;
43 unsigned int srs; /* Shared interrupt register select */
44 void __iomem *reg;
45};
27 46
28struct ls_scfg_msi { 47struct ls_scfg_msi {
29 spinlock_t lock; 48 spinlock_t lock;
@@ -32,8 +51,11 @@ struct ls_scfg_msi {
32 struct irq_domain *msi_domain; 51 struct irq_domain *msi_domain;
33 void __iomem *regs; 52 void __iomem *regs;
34 phys_addr_t msiir_addr; 53 phys_addr_t msiir_addr;
35 int irq; 54 struct ls_scfg_msi_cfg *cfg;
36 DECLARE_BITMAP(used, MSI_MAX_IRQS); 55 u32 msir_num;
56 struct ls_scfg_msir *msir;
57 u32 irqs_num;
58 unsigned long *used;
37}; 59};
38 60
39static struct irq_chip ls_scfg_msi_irq_chip = { 61static struct irq_chip ls_scfg_msi_irq_chip = {
@@ -49,19 +71,56 @@ static struct msi_domain_info ls_scfg_msi_domain_info = {
49 .chip = &ls_scfg_msi_irq_chip, 71 .chip = &ls_scfg_msi_irq_chip,
50}; 72};
51 73
74static int msi_affinity_flag = 1;
75
76static int __init early_parse_ls_scfg_msi(char *p)
77{
78 if (p && strncmp(p, "no-affinity", 11) == 0)
79 msi_affinity_flag = 0;
80 else
81 msi_affinity_flag = 1;
82
83 return 0;
84}
85early_param("lsmsi", early_parse_ls_scfg_msi);
86
52static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) 87static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
53{ 88{
54 struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data); 89 struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data);
55 90
56 msg->address_hi = upper_32_bits(msi_data->msiir_addr); 91 msg->address_hi = upper_32_bits(msi_data->msiir_addr);
57 msg->address_lo = lower_32_bits(msi_data->msiir_addr); 92 msg->address_lo = lower_32_bits(msi_data->msiir_addr);
58 msg->data = data->hwirq << MSI_IBS_SHIFT; 93 msg->data = data->hwirq;
94
95 if (msi_affinity_flag)
96 msg->data |= cpumask_first(data->common->affinity);
59} 97}
60 98
61static int ls_scfg_msi_set_affinity(struct irq_data *irq_data, 99static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
62 const struct cpumask *mask, bool force) 100 const struct cpumask *mask, bool force)
63{ 101{
64 return -EINVAL; 102 struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(irq_data);
103 u32 cpu;
104
105 if (!msi_affinity_flag)
106 return -EINVAL;
107
108 if (!force)
109 cpu = cpumask_any_and(mask, cpu_online_mask);
110 else
111 cpu = cpumask_first(mask);
112
113 if (cpu >= msi_data->msir_num)
114 return -EINVAL;
115
116 if (msi_data->msir[cpu].gic_irq <= 0) {
117 pr_warn("cannot bind the irq to cpu%d\n", cpu);
118 return -EINVAL;
119 }
120
121 cpumask_copy(irq_data->common->affinity, mask);
122
123 return IRQ_SET_MASK_OK;
65} 124}
66 125
67static struct irq_chip ls_scfg_msi_parent_chip = { 126static struct irq_chip ls_scfg_msi_parent_chip = {
@@ -81,8 +140,8 @@ static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain,
81 WARN_ON(nr_irqs != 1); 140 WARN_ON(nr_irqs != 1);
82 141
83 spin_lock(&msi_data->lock); 142 spin_lock(&msi_data->lock);
84 pos = find_first_zero_bit(msi_data->used, MSI_MAX_IRQS); 143 pos = find_first_zero_bit(msi_data->used, msi_data->irqs_num);
85 if (pos < MSI_MAX_IRQS) 144 if (pos < msi_data->irqs_num)
86 __set_bit(pos, msi_data->used); 145 __set_bit(pos, msi_data->used);
87 else 146 else
88 err = -ENOSPC; 147 err = -ENOSPC;
@@ -106,7 +165,7 @@ static void ls_scfg_msi_domain_irq_free(struct irq_domain *domain,
106 int pos; 165 int pos;
107 166
108 pos = d->hwirq; 167 pos = d->hwirq;
109 if (pos < 0 || pos >= MSI_MAX_IRQS) { 168 if (pos < 0 || pos >= msi_data->irqs_num) {
110 pr_err("failed to teardown msi. Invalid hwirq %d\n", pos); 169 pr_err("failed to teardown msi. Invalid hwirq %d\n", pos);
111 return; 170 return;
112 } 171 }
@@ -123,15 +182,22 @@ static const struct irq_domain_ops ls_scfg_msi_domain_ops = {
123 182
124static void ls_scfg_msi_irq_handler(struct irq_desc *desc) 183static void ls_scfg_msi_irq_handler(struct irq_desc *desc)
125{ 184{
126 struct ls_scfg_msi *msi_data = irq_desc_get_handler_data(desc); 185 struct ls_scfg_msir *msir = irq_desc_get_handler_data(desc);
186 struct ls_scfg_msi *msi_data = msir->msi_data;
127 unsigned long val; 187 unsigned long val;
128 int pos, virq; 188 int pos, size, virq, hwirq;
129 189
130 chained_irq_enter(irq_desc_get_chip(desc), desc); 190 chained_irq_enter(irq_desc_get_chip(desc), desc);
131 191
132 val = ioread32be(msi_data->regs + MSIR); 192 val = ioread32be(msir->reg);
133 for_each_set_bit(pos, &val, MSI_MAX_IRQS) { 193
134 virq = irq_find_mapping(msi_data->parent, (31 - pos)); 194 pos = msir->bit_start;
195 size = msir->bit_end + 1;
196
197 for_each_set_bit_from(pos, &val, size) {
198 hwirq = ((msir->bit_end - pos) << msi_data->cfg->ibs_shift) |
199 msir->srs;
200 virq = irq_find_mapping(msi_data->parent, hwirq);
135 if (virq) 201 if (virq)
136 generic_handle_irq(virq); 202 generic_handle_irq(virq);
137 } 203 }
@@ -143,7 +209,7 @@ static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data)
143{ 209{
144 /* Initialize MSI domain parent */ 210 /* Initialize MSI domain parent */
145 msi_data->parent = irq_domain_add_linear(NULL, 211 msi_data->parent = irq_domain_add_linear(NULL,
146 MSI_MAX_IRQS, 212 msi_data->irqs_num,
147 &ls_scfg_msi_domain_ops, 213 &ls_scfg_msi_domain_ops,
148 msi_data); 214 msi_data);
149 if (!msi_data->parent) { 215 if (!msi_data->parent) {
@@ -164,16 +230,117 @@ static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data)
164 return 0; 230 return 0;
165} 231}
166 232
233static int ls_scfg_msi_setup_hwirq(struct ls_scfg_msi *msi_data, int index)
234{
235 struct ls_scfg_msir *msir;
236 int virq, i, hwirq;
237
238 virq = platform_get_irq(msi_data->pdev, index);
239 if (virq <= 0)
240 return -ENODEV;
241
242 msir = &msi_data->msir[index];
243 msir->index = index;
244 msir->msi_data = msi_data;
245 msir->gic_irq = virq;
246 msir->reg = msi_data->regs + msi_data->cfg->msir_base + 4 * index;
247
248 if (msi_data->cfg->msir_irqs == MSI_LS1043V1_1_IRQS_PER_MSIR) {
249 msir->bit_start = 32 - ((msir->index + 1) *
250 MSI_LS1043V1_1_IRQS_PER_MSIR);
251 msir->bit_end = msir->bit_start +
252 MSI_LS1043V1_1_IRQS_PER_MSIR - 1;
253 } else {
254 msir->bit_start = 0;
255 msir->bit_end = msi_data->cfg->msir_irqs - 1;
256 }
257
258 irq_set_chained_handler_and_data(msir->gic_irq,
259 ls_scfg_msi_irq_handler,
260 msir);
261
262 if (msi_affinity_flag) {
263 /* Associate MSIR interrupt to the cpu */
264 irq_set_affinity(msir->gic_irq, get_cpu_mask(index));
265 msir->srs = 0; /* This value is determined by the CPU */
266 } else
267 msir->srs = index;
268
269 /* Release the hwirqs corresponding to this MSIR */
270 if (!msi_affinity_flag || msir->index == 0) {
271 for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
272 hwirq = i << msi_data->cfg->ibs_shift | msir->index;
273 bitmap_clear(msi_data->used, hwirq, 1);
274 }
275 }
276
277 return 0;
278}
279
280static int ls_scfg_msi_teardown_hwirq(struct ls_scfg_msir *msir)
281{
282 struct ls_scfg_msi *msi_data = msir->msi_data;
283 int i, hwirq;
284
285 if (msir->gic_irq > 0)
286 irq_set_chained_handler_and_data(msir->gic_irq, NULL, NULL);
287
288 for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
289 hwirq = i << msi_data->cfg->ibs_shift | msir->index;
290 bitmap_set(msi_data->used, hwirq, 1);
291 }
292
293 return 0;
294}
295
296static struct ls_scfg_msi_cfg ls1021_msi_cfg = {
297 .ibs_shift = 3,
298 .msir_irqs = MSI_IRQS_PER_MSIR,
299 .msir_base = MSI_MSIR_OFFSET,
300};
301
302static struct ls_scfg_msi_cfg ls1046_msi_cfg = {
303 .ibs_shift = 2,
304 .msir_irqs = MSI_IRQS_PER_MSIR,
305 .msir_base = MSI_MSIR_OFFSET,
306};
307
308static struct ls_scfg_msi_cfg ls1043_v1_1_msi_cfg = {
309 .ibs_shift = 2,
310 .msir_irqs = MSI_LS1043V1_1_IRQS_PER_MSIR,
311 .msir_base = MSI_LS1043V1_1_MSIR_OFFSET,
312};
313
314static const struct of_device_id ls_scfg_msi_id[] = {
315 /* The following two misspelled compatibles are obsolete */
316 { .compatible = "fsl,1s1021a-msi", .data = &ls1021_msi_cfg},
317 { .compatible = "fsl,1s1043a-msi", .data = &ls1021_msi_cfg},
318
319 { .compatible = "fsl,ls1021a-msi", .data = &ls1021_msi_cfg },
320 { .compatible = "fsl,ls1043a-msi", .data = &ls1021_msi_cfg },
321 { .compatible = "fsl,ls1043a-v1.1-msi", .data = &ls1043_v1_1_msi_cfg },
322 { .compatible = "fsl,ls1046a-msi", .data = &ls1046_msi_cfg },
323 {},
324};
325MODULE_DEVICE_TABLE(of, ls_scfg_msi_id);
326
167static int ls_scfg_msi_probe(struct platform_device *pdev) 327static int ls_scfg_msi_probe(struct platform_device *pdev)
168{ 328{
329 const struct of_device_id *match;
169 struct ls_scfg_msi *msi_data; 330 struct ls_scfg_msi *msi_data;
170 struct resource *res; 331 struct resource *res;
171 int ret; 332 int i, ret;
333
334 match = of_match_device(ls_scfg_msi_id, &pdev->dev);
335 if (!match)
336 return -ENODEV;
172 337
173 msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL); 338 msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL);
174 if (!msi_data) 339 if (!msi_data)
175 return -ENOMEM; 340 return -ENOMEM;
176 341
342 msi_data->cfg = (struct ls_scfg_msi_cfg *) match->data;
343
177 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 344 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
178 msi_data->regs = devm_ioremap_resource(&pdev->dev, res); 345 msi_data->regs = devm_ioremap_resource(&pdev->dev, res);
179 if (IS_ERR(msi_data->regs)) { 346 if (IS_ERR(msi_data->regs)) {
@@ -182,23 +349,48 @@ static int ls_scfg_msi_probe(struct platform_device *pdev)
182 } 349 }
183 msi_data->msiir_addr = res->start; 350 msi_data->msiir_addr = res->start;
184 351
185 msi_data->irq = platform_get_irq(pdev, 0);
186 if (msi_data->irq <= 0) {
187 dev_err(&pdev->dev, "failed to get MSI irq\n");
188 return -ENODEV;
189 }
190
191 msi_data->pdev = pdev; 352 msi_data->pdev = pdev;
192 spin_lock_init(&msi_data->lock); 353 spin_lock_init(&msi_data->lock);
193 354
355 msi_data->irqs_num = MSI_IRQS_PER_MSIR *
356 (1 << msi_data->cfg->ibs_shift);
357 msi_data->used = devm_kcalloc(&pdev->dev,
358 BITS_TO_LONGS(msi_data->irqs_num),
359 sizeof(*msi_data->used),
360 GFP_KERNEL);
361 if (!msi_data->used)
362 return -ENOMEM;
363 /*
364 * Reserve all the hwirqs
365 * The available hwirqs will be released in ls1_msi_setup_hwirq()
366 */
367 bitmap_set(msi_data->used, 0, msi_data->irqs_num);
368
369 msi_data->msir_num = of_irq_count(pdev->dev.of_node);
370
371 if (msi_affinity_flag) {
372 u32 cpu_num;
373
374 cpu_num = num_possible_cpus();
375 if (msi_data->msir_num >= cpu_num)
376 msi_data->msir_num = cpu_num;
377 else
378 msi_affinity_flag = 0;
379 }
380
381 msi_data->msir = devm_kcalloc(&pdev->dev, msi_data->msir_num,
382 sizeof(*msi_data->msir),
383 GFP_KERNEL);
384 if (!msi_data->msir)
385 return -ENOMEM;
386
387 for (i = 0; i < msi_data->msir_num; i++)
388 ls_scfg_msi_setup_hwirq(msi_data, i);
389
194 ret = ls_scfg_msi_domains_init(msi_data); 390 ret = ls_scfg_msi_domains_init(msi_data);
195 if (ret) 391 if (ret)
196 return ret; 392 return ret;
197 393
198 irq_set_chained_handler_and_data(msi_data->irq,
199 ls_scfg_msi_irq_handler,
200 msi_data);
201
202 platform_set_drvdata(pdev, msi_data); 394 platform_set_drvdata(pdev, msi_data);
203 395
204 return 0; 396 return 0;
@@ -207,8 +399,10 @@ static int ls_scfg_msi_probe(struct platform_device *pdev)
207static int ls_scfg_msi_remove(struct platform_device *pdev) 399static int ls_scfg_msi_remove(struct platform_device *pdev)
208{ 400{
209 struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev); 401 struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev);
402 int i;
210 403
211 irq_set_chained_handler_and_data(msi_data->irq, NULL, NULL); 404 for (i = 0; i < msi_data->msir_num; i++)
405 ls_scfg_msi_teardown_hwirq(&msi_data->msir[i]);
212 406
213 irq_domain_remove(msi_data->msi_domain); 407 irq_domain_remove(msi_data->msi_domain);
214 irq_domain_remove(msi_data->parent); 408 irq_domain_remove(msi_data->parent);
@@ -218,12 +412,6 @@ static int ls_scfg_msi_remove(struct platform_device *pdev)
218 return 0; 412 return 0;
219} 413}
220 414
221static const struct of_device_id ls_scfg_msi_id[] = {
222 { .compatible = "fsl,1s1021a-msi", },
223 { .compatible = "fsl,1s1043a-msi", },
224 {},
225};
226
227static struct platform_driver ls_scfg_msi_driver = { 415static struct platform_driver ls_scfg_msi_driver = {
228 .driver = { 416 .driver = {
229 .name = "ls-scfg-msi", 417 .name = "ls-scfg-msi",
diff --git a/drivers/irqchip/irq-metag-ext.c b/drivers/irqchip/irq-metag-ext.c
index 0cdd923d1535..be7216bfb8dd 100644
--- a/drivers/irqchip/irq-metag-ext.c
+++ b/drivers/irqchip/irq-metag-ext.c
@@ -518,6 +518,8 @@ static int meta_intc_set_affinity(struct irq_data *data,
518 518
519 metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr); 519 metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
520 520
521 irq_data_update_effective_affinity(data, cpumask_of(cpu));
522
521 return 0; 523 return 0;
522} 524}
523#else 525#else
@@ -578,6 +580,8 @@ static int meta_intc_map(struct irq_domain *d, unsigned int irq,
578 else 580 else
579 irq_set_chip_and_handler(irq, &meta_intc_edge_chip, 581 irq_set_chip_and_handler(irq, &meta_intc_edge_chip,
580 handle_edge_irq); 582 handle_edge_irq);
583
584 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
581 return 0; 585 return 0;
582} 586}
583 587
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 48ee1bad473f..b3a60da088db 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -445,24 +445,27 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
445 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 445 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
446 cpumask_t tmp = CPU_MASK_NONE; 446 cpumask_t tmp = CPU_MASK_NONE;
447 unsigned long flags; 447 unsigned long flags;
448 int i; 448 int i, cpu;
449 449
450 cpumask_and(&tmp, cpumask, cpu_online_mask); 450 cpumask_and(&tmp, cpumask, cpu_online_mask);
451 if (cpumask_empty(&tmp)) 451 if (cpumask_empty(&tmp))
452 return -EINVAL; 452 return -EINVAL;
453 453
454 cpu = cpumask_first(&tmp);
455
454 /* Assumption : cpumask refers to a single CPU */ 456 /* Assumption : cpumask refers to a single CPU */
455 spin_lock_irqsave(&gic_lock, flags); 457 spin_lock_irqsave(&gic_lock, flags);
456 458
457 /* Re-route this IRQ */ 459 /* Re-route this IRQ */
458 gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp))); 460 gic_map_to_vpe(irq, mips_cm_vp_id(cpu));
459 461
460 /* Update the pcpu_masks */ 462 /* Update the pcpu_masks */
461 for (i = 0; i < min(gic_vpes, NR_CPUS); i++) 463 for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
462 clear_bit(irq, pcpu_masks[i].pcpu_mask); 464 clear_bit(irq, pcpu_masks[i].pcpu_mask);
463 set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask); 465 set_bit(irq, pcpu_masks[cpu].pcpu_mask);
464 466
465 cpumask_copy(irq_data_get_affinity_mask(d), cpumask); 467 cpumask_copy(irq_data_get_affinity_mask(d), cpumask);
468 irq_data_update_effective_affinity(d, cpumask_of(cpu));
466 spin_unlock_irqrestore(&gic_lock, flags); 469 spin_unlock_irqrestore(&gic_lock, flags);
467 470
468 return IRQ_SET_MASK_OK_NOCOPY; 471 return IRQ_SET_MASK_OK_NOCOPY;
@@ -716,6 +719,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
716 if (err) 719 if (err)
717 return err; 720 return err;
718 721
722 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
719 return gic_shared_irq_domain_map(d, virq, hwirq, 0); 723 return gic_shared_irq_domain_map(d, virq, hwirq, 0);
720 } 724 }
721 725
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
index 013fc9659a84..25f32e1d7764 100644
--- a/drivers/irqchip/irq-mmp.c
+++ b/drivers/irqchip/irq-mmp.c
@@ -181,13 +181,13 @@ const struct irq_domain_ops mmp_irq_domain_ops = {
181 .xlate = mmp_irq_domain_xlate, 181 .xlate = mmp_irq_domain_xlate,
182}; 182};
183 183
184static struct mmp_intc_conf mmp_conf = { 184static const struct mmp_intc_conf mmp_conf = {
185 .conf_enable = 0x51, 185 .conf_enable = 0x51,
186 .conf_disable = 0x0, 186 .conf_disable = 0x0,
187 .conf_mask = 0x7f, 187 .conf_mask = 0x7f,
188}; 188};
189 189
190static struct mmp_intc_conf mmp2_conf = { 190static const struct mmp_intc_conf mmp2_conf = {
191 .conf_enable = 0x20, 191 .conf_enable = 0x20,
192 .conf_disable = 0x0, 192 .conf_disable = 0x0,
193 .conf_mask = 0x7f, 193 .conf_mask = 0x7f,
diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c
index eeac512ec5a8..90aaf190157f 100644
--- a/drivers/irqchip/irq-mtk-sysirq.c
+++ b/drivers/irqchip/irq-mtk-sysirq.c
@@ -178,8 +178,7 @@ static int __init mtk_sysirq_of_init(struct device_node *node,
178 chip_data->intpol_words[i] = size / 4; 178 chip_data->intpol_words[i] = size / 4;
179 chip_data->intpol_bases[i] = of_iomap(node, i); 179 chip_data->intpol_bases[i] = of_iomap(node, i);
180 if (ret || !chip_data->intpol_bases[i]) { 180 if (ret || !chip_data->intpol_bases[i]) {
181 pr_err("%s: couldn't map region %d\n", 181 pr_err("%pOF: couldn't map region %d\n", node, i);
182 node->full_name, i);
183 ret = -ENODEV; 182 ret = -ENODEV;
184 goto out_free_intpol; 183 goto out_free_intpol;
185 } 184 }
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index 05fa9f7af53c..e8b31f52e071 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -179,7 +179,7 @@ static void __init icoll_add_domain(struct device_node *np,
179 &icoll_irq_domain_ops, NULL); 179 &icoll_irq_domain_ops, NULL);
180 180
181 if (!icoll_domain) 181 if (!icoll_domain)
182 panic("%s: unable to create irq domain", np->full_name); 182 panic("%pOF: unable to create irq domain", np);
183} 183}
184 184
185static void __iomem * __init icoll_init_iobase(struct device_node *np) 185static void __iomem * __init icoll_init_iobase(struct device_node *np)
@@ -188,7 +188,7 @@ static void __iomem * __init icoll_init_iobase(struct device_node *np)
188 188
189 icoll_base = of_io_request_and_map(np, 0, np->name); 189 icoll_base = of_io_request_and_map(np, 0, np->name);
190 if (IS_ERR(icoll_base)) 190 if (IS_ERR(icoll_base))
191 panic("%s: unable to map resource", np->full_name); 191 panic("%pOF: unable to map resource", np);
192 return icoll_base; 192 return icoll_base;
193} 193}
194 194
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 491568c95aa5..45363ff8d06f 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -140,7 +140,7 @@ static int __init stm32_exti_init(struct device_node *node,
140 140
141 base = of_iomap(node, 0); 141 base = of_iomap(node, 0);
142 if (!base) { 142 if (!base) {
143 pr_err("%s: Unable to map registers\n", node->full_name); 143 pr_err("%pOF: Unable to map registers\n", node);
144 return -ENOMEM; 144 return -ENOMEM;
145 } 145 }
146 146
@@ -149,7 +149,7 @@ static int __init stm32_exti_init(struct device_node *node,
149 nr_exti = fls(readl_relaxed(base + EXTI_RTSR)); 149 nr_exti = fls(readl_relaxed(base + EXTI_RTSR));
150 writel_relaxed(0, base + EXTI_RTSR); 150 writel_relaxed(0, base + EXTI_RTSR);
151 151
152 pr_info("%s: %d External IRQs detected\n", node->full_name, nr_exti); 152 pr_info("%pOF: %d External IRQs detected\n", node, nr_exti);
153 153
154 domain = irq_domain_add_linear(node, nr_exti, 154 domain = irq_domain_add_linear(node, nr_exti,
155 &irq_exti_domain_ops, NULL); 155 &irq_exti_domain_ops, NULL);
@@ -163,8 +163,8 @@ static int __init stm32_exti_init(struct device_node *node,
163 ret = irq_alloc_domain_generic_chips(domain, nr_exti, 1, "exti", 163 ret = irq_alloc_domain_generic_chips(domain, nr_exti, 1, "exti",
164 handle_edge_irq, clr, 0, 0); 164 handle_edge_irq, clr, 0, 0);
165 if (ret) { 165 if (ret) {
166 pr_err("%s: Could not allocate generic interrupt chip.\n", 166 pr_err("%pOF: Could not allocate generic interrupt chip.\n",
167 node->full_name); 167 node);
168 goto out_free_domain; 168 goto out_free_domain;
169 } 169 }
170 170
diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c
index 376b28074e0d..e3e5b9132b75 100644
--- a/drivers/irqchip/irq-sun4i.c
+++ b/drivers/irqchip/irq-sun4i.c
@@ -97,8 +97,8 @@ static int __init sun4i_of_init(struct device_node *node,
97{ 97{
98 sun4i_irq_base = of_iomap(node, 0); 98 sun4i_irq_base = of_iomap(node, 0);
99 if (!sun4i_irq_base) 99 if (!sun4i_irq_base)
100 panic("%s: unable to map IC registers\n", 100 panic("%pOF: unable to map IC registers\n",
101 node->full_name); 101 node);
102 102
103 /* Disable all interrupts */ 103 /* Disable all interrupts */
104 writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(0)); 104 writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(0));
@@ -124,7 +124,7 @@ static int __init sun4i_of_init(struct device_node *node,
124 sun4i_irq_domain = irq_domain_add_linear(node, 3 * 32, 124 sun4i_irq_domain = irq_domain_add_linear(node, 3 * 32,
125 &sun4i_irq_ops, NULL); 125 &sun4i_irq_ops, NULL);
126 if (!sun4i_irq_domain) 126 if (!sun4i_irq_domain)
127 panic("%s: unable to create IRQ domain\n", node->full_name); 127 panic("%pOF: unable to create IRQ domain\n", node);
128 128
129 set_handle_irq(sun4i_handle_irq); 129 set_handle_irq(sun4i_handle_irq);
130 130
diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c
index 3973a14bb15b..0abc0cd1c32e 100644
--- a/drivers/irqchip/irq-tegra.c
+++ b/drivers/irqchip/irq-tegra.c
@@ -291,13 +291,13 @@ static int __init tegra_ictlr_init(struct device_node *node,
291 int err; 291 int err;
292 292
293 if (!parent) { 293 if (!parent) {
294 pr_err("%s: no parent, giving up\n", node->full_name); 294 pr_err("%pOF: no parent, giving up\n", node);
295 return -ENODEV; 295 return -ENODEV;
296 } 296 }
297 297
298 parent_domain = irq_find_host(parent); 298 parent_domain = irq_find_host(parent);
299 if (!parent_domain) { 299 if (!parent_domain) {
300 pr_err("%s: unable to obtain parent domain\n", node->full_name); 300 pr_err("%pOF: unable to obtain parent domain\n", node);
301 return -ENXIO; 301 return -ENXIO;
302 } 302 }
303 303
@@ -329,29 +329,29 @@ static int __init tegra_ictlr_init(struct device_node *node,
329 } 329 }
330 330
331 if (!num_ictlrs) { 331 if (!num_ictlrs) {
332 pr_err("%s: no valid regions, giving up\n", node->full_name); 332 pr_err("%pOF: no valid regions, giving up\n", node);
333 err = -ENOMEM; 333 err = -ENOMEM;
334 goto out_free; 334 goto out_free;
335 } 335 }
336 336
337 WARN(num_ictlrs != soc->num_ictlrs, 337 WARN(num_ictlrs != soc->num_ictlrs,
338 "%s: Found %u interrupt controllers in DT; expected %u.\n", 338 "%pOF: Found %u interrupt controllers in DT; expected %u.\n",
339 node->full_name, num_ictlrs, soc->num_ictlrs); 339 node, num_ictlrs, soc->num_ictlrs);
340 340
341 341
342 domain = irq_domain_add_hierarchy(parent_domain, 0, num_ictlrs * 32, 342 domain = irq_domain_add_hierarchy(parent_domain, 0, num_ictlrs * 32,
343 node, &tegra_ictlr_domain_ops, 343 node, &tegra_ictlr_domain_ops,
344 lic); 344 lic);
345 if (!domain) { 345 if (!domain) {
346 pr_err("%s: failed to allocated domain\n", node->full_name); 346 pr_err("%pOF: failed to allocated domain\n", node);
347 err = -ENOMEM; 347 err = -ENOMEM;
348 goto out_unmap; 348 goto out_unmap;
349 } 349 }
350 350
351 tegra_ictlr_syscore_init(); 351 tegra_ictlr_syscore_init();
352 352
353 pr_info("%s: %d interrupts forwarded to %s\n", 353 pr_info("%pOF: %d interrupts forwarded to %pOF\n",
354 node->full_name, num_ictlrs * 32, parent->full_name); 354 node, num_ictlrs * 32, parent);
355 355
356 return 0; 356 return 0;
357 357
diff --git a/drivers/irqchip/irq-uniphier-aidet.c b/drivers/irqchip/irq-uniphier-aidet.c
new file mode 100644
index 000000000000..7ba7f253470e
--- /dev/null
+++ b/drivers/irqchip/irq-uniphier-aidet.c
@@ -0,0 +1,261 @@
1/*
2 * Driver for UniPhier AIDET (ARM Interrupt Detector)
3 *
4 * Copyright (C) 2017 Socionext Inc.
5 * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/bitops.h>
18#include <linux/init.h>
19#include <linux/irq.h>
20#include <linux/irqdomain.h>
21#include <linux/kernel.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/of_irq.h>
25#include <linux/platform_device.h>
26#include <linux/spinlock.h>
27
28#define UNIPHIER_AIDET_NR_IRQS 256
29
30#define UNIPHIER_AIDET_DETCONF 0x04 /* inverter register base */
31
32struct uniphier_aidet_priv {
33 struct irq_domain *domain;
34 void __iomem *reg_base;
35 spinlock_t lock;
36 u32 saved_vals[UNIPHIER_AIDET_NR_IRQS / 32];
37};
38
39static void uniphier_aidet_reg_update(struct uniphier_aidet_priv *priv,
40 unsigned int reg, u32 mask, u32 val)
41{
42 unsigned long flags;
43 u32 tmp;
44
45 spin_lock_irqsave(&priv->lock, flags);
46 tmp = readl_relaxed(priv->reg_base + reg);
47 tmp &= ~mask;
48 tmp |= mask & val;
49 writel_relaxed(tmp, priv->reg_base + reg);
50 spin_unlock_irqrestore(&priv->lock, flags);
51}
52
53static void uniphier_aidet_detconf_update(struct uniphier_aidet_priv *priv,
54 unsigned long index, unsigned int val)
55{
56 unsigned int reg;
57 u32 mask;
58
59 reg = UNIPHIER_AIDET_DETCONF + index / 32 * 4;
60 mask = BIT(index % 32);
61
62 uniphier_aidet_reg_update(priv, reg, mask, val ? mask : 0);
63}
64
65static int uniphier_aidet_irq_set_type(struct irq_data *data, unsigned int type)
66{
67 struct uniphier_aidet_priv *priv = data->chip_data;
68 unsigned int val;
69
70 /* enable inverter for active low triggers */
71 switch (type) {
72 case IRQ_TYPE_EDGE_RISING:
73 case IRQ_TYPE_LEVEL_HIGH:
74 val = 0;
75 break;
76 case IRQ_TYPE_EDGE_FALLING:
77 val = 1;
78 type = IRQ_TYPE_EDGE_RISING;
79 break;
80 case IRQ_TYPE_LEVEL_LOW:
81 val = 1;
82 type = IRQ_TYPE_LEVEL_HIGH;
83 break;
84 default:
85 return -EINVAL;
86 }
87
88 uniphier_aidet_detconf_update(priv, data->hwirq, val);
89
90 return irq_chip_set_type_parent(data, type);
91}
92
93static struct irq_chip uniphier_aidet_irq_chip = {
94 .name = "AIDET",
95 .irq_mask = irq_chip_mask_parent,
96 .irq_unmask = irq_chip_unmask_parent,
97 .irq_eoi = irq_chip_eoi_parent,
98 .irq_set_affinity = irq_chip_set_affinity_parent,
99 .irq_set_type = uniphier_aidet_irq_set_type,
100};
101
102static int uniphier_aidet_domain_translate(struct irq_domain *domain,
103 struct irq_fwspec *fwspec,
104 unsigned long *out_hwirq,
105 unsigned int *out_type)
106{
107 if (WARN_ON(fwspec->param_count < 2))
108 return -EINVAL;
109
110 *out_hwirq = fwspec->param[0];
111 *out_type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
112
113 return 0;
114}
115
116static int uniphier_aidet_domain_alloc(struct irq_domain *domain,
117 unsigned int virq, unsigned int nr_irqs,
118 void *arg)
119{
120 struct irq_fwspec parent_fwspec;
121 irq_hw_number_t hwirq;
122 unsigned int type;
123 int ret;
124
125 if (nr_irqs != 1)
126 return -EINVAL;
127
128 ret = uniphier_aidet_domain_translate(domain, arg, &hwirq, &type);
129 if (ret)
130 return ret;
131
132 switch (type) {
133 case IRQ_TYPE_EDGE_RISING:
134 case IRQ_TYPE_LEVEL_HIGH:
135 break;
136 case IRQ_TYPE_EDGE_FALLING:
137 type = IRQ_TYPE_EDGE_RISING;
138 break;
139 case IRQ_TYPE_LEVEL_LOW:
140 type = IRQ_TYPE_LEVEL_HIGH;
141 break;
142 default:
143 return -EINVAL;
144 }
145
146 if (hwirq >= UNIPHIER_AIDET_NR_IRQS)
147 return -ENXIO;
148
149 ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
150 &uniphier_aidet_irq_chip,
151 domain->host_data);
152 if (ret)
153 return ret;
154
155 /* parent is GIC */
156 parent_fwspec.fwnode = domain->parent->fwnode;
157 parent_fwspec.param_count = 3;
158 parent_fwspec.param[0] = 0; /* SPI */
159 parent_fwspec.param[1] = hwirq;
160 parent_fwspec.param[2] = type;
161
162 return irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
163}
164
165static const struct irq_domain_ops uniphier_aidet_domain_ops = {
166 .alloc = uniphier_aidet_domain_alloc,
167 .free = irq_domain_free_irqs_common,
168 .translate = uniphier_aidet_domain_translate,
169};
170
171static int uniphier_aidet_probe(struct platform_device *pdev)
172{
173 struct device *dev = &pdev->dev;
174 struct device_node *parent_np;
175 struct irq_domain *parent_domain;
176 struct uniphier_aidet_priv *priv;
177 struct resource *res;
178
179 parent_np = of_irq_find_parent(dev->of_node);
180 if (!parent_np)
181 return -ENXIO;
182
183 parent_domain = irq_find_host(parent_np);
184 of_node_put(parent_np);
185 if (!parent_domain)
186 return -EPROBE_DEFER;
187
188 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
189 if (!priv)
190 return -ENOMEM;
191
192 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
193 priv->reg_base = devm_ioremap_resource(dev, res);
194 if (IS_ERR(priv->reg_base))
195 return PTR_ERR(priv->reg_base);
196
197 spin_lock_init(&priv->lock);
198
199 priv->domain = irq_domain_create_hierarchy(
200 parent_domain, 0,
201 UNIPHIER_AIDET_NR_IRQS,
202 of_node_to_fwnode(dev->of_node),
203 &uniphier_aidet_domain_ops, priv);
204 if (!priv->domain)
205 return -ENOMEM;
206
207 platform_set_drvdata(pdev, priv);
208
209 return 0;
210}
211
212static int __maybe_unused uniphier_aidet_suspend(struct device *dev)
213{
214 struct uniphier_aidet_priv *priv = dev_get_drvdata(dev);
215 int i;
216
217 for (i = 0; i < ARRAY_SIZE(priv->saved_vals); i++)
218 priv->saved_vals[i] = readl_relaxed(
219 priv->reg_base + UNIPHIER_AIDET_DETCONF + i * 4);
220
221 return 0;
222}
223
224static int __maybe_unused uniphier_aidet_resume(struct device *dev)
225{
226 struct uniphier_aidet_priv *priv = dev_get_drvdata(dev);
227 int i;
228
229 for (i = 0; i < ARRAY_SIZE(priv->saved_vals); i++)
230 writel_relaxed(priv->saved_vals[i],
231 priv->reg_base + UNIPHIER_AIDET_DETCONF + i * 4);
232
233 return 0;
234}
235
236static const struct dev_pm_ops uniphier_aidet_pm_ops = {
237 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(uniphier_aidet_suspend,
238 uniphier_aidet_resume)
239};
240
241static const struct of_device_id uniphier_aidet_match[] = {
242 { .compatible = "socionext,uniphier-ld4-aidet" },
243 { .compatible = "socionext,uniphier-pro4-aidet" },
244 { .compatible = "socionext,uniphier-sld8-aidet" },
245 { .compatible = "socionext,uniphier-pro5-aidet" },
246 { .compatible = "socionext,uniphier-pxs2-aidet" },
247 { .compatible = "socionext,uniphier-ld11-aidet" },
248 { .compatible = "socionext,uniphier-ld20-aidet" },
249 { .compatible = "socionext,uniphier-pxs3-aidet" },
250 { /* sentinel */ }
251};
252
253static struct platform_driver uniphier_aidet_driver = {
254 .probe = uniphier_aidet_probe,
255 .driver = {
256 .name = "uniphier-aidet",
257 .of_match_table = uniphier_aidet_match,
258 .pm = &uniphier_aidet_pm_ops,
259 },
260};
261builtin_platform_driver(uniphier_aidet_driver);
diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c
index 3db7ab1c9741..e3043ded8973 100644
--- a/drivers/irqchip/irq-xilinx-intc.c
+++ b/drivers/irqchip/irq-xilinx-intc.c
@@ -186,8 +186,8 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
186 if (irqc->intr_mask >> nr_irq) 186 if (irqc->intr_mask >> nr_irq)
187 pr_warn("irq-xilinx: mismatch in kind-of-intr param\n"); 187 pr_warn("irq-xilinx: mismatch in kind-of-intr param\n");
188 188
189 pr_info("irq-xilinx: %s: num_irq=%d, edge=0x%x\n", 189 pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n",
190 intc->full_name, nr_irq, irqc->intr_mask); 190 intc, nr_irq, irqc->intr_mask);
191 191
192 192
193 /* 193 /*
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
index 72a391e01011..a15a9510c904 100644
--- a/drivers/irqchip/irq-xtensa-mx.c
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -32,6 +32,7 @@ static int xtensa_mx_irq_map(struct irq_domain *d, unsigned int irq,
32 irq_set_status_flags(irq, IRQ_LEVEL); 32 irq_set_status_flags(irq, IRQ_LEVEL);
33 return 0; 33 return 0;
34 } 34 }
35 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
35 return xtensa_irq_map(d, irq, hw); 36 return xtensa_irq_map(d, irq, hw);
36} 37}
37 38
@@ -121,9 +122,12 @@ static int xtensa_mx_irq_retrigger(struct irq_data *d)
121static int xtensa_mx_irq_set_affinity(struct irq_data *d, 122static int xtensa_mx_irq_set_affinity(struct irq_data *d,
122 const struct cpumask *dest, bool force) 123 const struct cpumask *dest, bool force)
123{ 124{
124 unsigned mask = 1u << cpumask_any_and(dest, cpu_online_mask); 125 int cpu = cpumask_any_and(dest, cpu_online_mask);
126 unsigned mask = 1u << cpu;
125 127
126 set_er(mask, MIROUT(d->hwirq - HW_IRQ_MX_BASE)); 128 set_er(mask, MIROUT(d->hwirq - HW_IRQ_MX_BASE));
129 irq_data_update_effective_affinity(d, cpumask_of(cpu));
130
127 return 0; 131 return 0;
128 132
129} 133}
diff --git a/include/linux/irq.h b/include/linux/irq.h
index d2d543794093..b99a784635ff 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -568,6 +568,8 @@ extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
568extern int irq_chip_pm_get(struct irq_data *data); 568extern int irq_chip_pm_get(struct irq_data *data);
569extern int irq_chip_pm_put(struct irq_data *data); 569extern int irq_chip_pm_put(struct irq_data *data);
570#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 570#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
571extern void handle_fasteoi_ack_irq(struct irq_desc *desc);
572extern void handle_fasteoi_mask_irq(struct irq_desc *desc);
571extern void irq_chip_enable_parent(struct irq_data *data); 573extern void irq_chip_enable_parent(struct irq_data *data);
572extern void irq_chip_disable_parent(struct irq_data *data); 574extern void irq_chip_disable_parent(struct irq_data *data);
573extern void irq_chip_ack_parent(struct irq_data *data); 575extern void irq_chip_ack_parent(struct irq_data *data);
@@ -781,7 +783,10 @@ static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
781static inline 783static inline
782struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) 784struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
783{ 785{
784 return d->common->effective_affinity; 786 if (!cpumask_empty(d->common->effective_affinity))
787 return d->common->effective_affinity;
788
789 return d->common->affinity;
785} 790}
786static inline void irq_data_update_effective_affinity(struct irq_data *d, 791static inline void irq_data_update_effective_affinity(struct irq_data *d,
787 const struct cpumask *m) 792 const struct cpumask *m)
diff --git a/include/linux/irq_sim.h b/include/linux/irq_sim.h
new file mode 100644
index 000000000000..0380d899b955
--- /dev/null
+++ b/include/linux/irq_sim.h
@@ -0,0 +1,44 @@
1#ifndef _LINUX_IRQ_SIM_H
2#define _LINUX_IRQ_SIM_H
3/*
4 * Copyright (C) 2017 Bartosz Golaszewski <brgl@bgdev.pl>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#include <linux/irq_work.h>
13#include <linux/device.h>
14
15/*
16 * Provides a framework for allocating simulated interrupts which can be
17 * requested like normal irqs and enqueued from process context.
18 */
19
20struct irq_sim_work_ctx {
21 struct irq_work work;
22 int irq;
23};
24
25struct irq_sim_irq_ctx {
26 int irqnum;
27 bool enabled;
28};
29
30struct irq_sim {
31 struct irq_sim_work_ctx work_ctx;
32 int irq_base;
33 unsigned int irq_count;
34 struct irq_sim_irq_ctx *irqs;
35};
36
37int irq_sim_init(struct irq_sim *sim, unsigned int num_irqs);
38int devm_irq_sim_init(struct device *dev, struct irq_sim *sim,
39 unsigned int num_irqs);
40void irq_sim_fini(struct irq_sim *sim);
41void irq_sim_fire(struct irq_sim *sim, unsigned int offset);
42int irq_sim_irqnum(struct irq_sim *sim, unsigned int offset);
43
44#endif /* _LINUX_IRQ_SIM_H */
diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h
index c647b0547bcd..0a83b4379f34 100644
--- a/include/linux/irqchip/arm-gic-common.h
+++ b/include/linux/irqchip/arm-gic-common.h
@@ -27,6 +27,8 @@ struct gic_kvm_info {
27 unsigned int maint_irq; 27 unsigned int maint_irq;
28 /* Virtual control interface */ 28 /* Virtual control interface */
29 struct resource vctrl; 29 struct resource vctrl;
30 /* vlpi support */
31 bool has_v4;
30}; 32};
31 33
32const struct gic_kvm_info *gic_get_kvm_info(void); 34const struct gic_kvm_info *gic_get_kvm_info(void);
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 6a1f87ff94e2..1ea576c8126f 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -204,6 +204,7 @@
204 204
205#define GICR_TYPER_PLPIS (1U << 0) 205#define GICR_TYPER_PLPIS (1U << 0)
206#define GICR_TYPER_VLPIS (1U << 1) 206#define GICR_TYPER_VLPIS (1U << 1)
207#define GICR_TYPER_DirectLPIS (1U << 3)
207#define GICR_TYPER_LAST (1U << 4) 208#define GICR_TYPER_LAST (1U << 4)
208 209
209#define GIC_V3_REDIST_SIZE 0x20000 210#define GIC_V3_REDIST_SIZE 0x20000
@@ -212,6 +213,69 @@
212#define LPI_PROP_ENABLED (1 << 0) 213#define LPI_PROP_ENABLED (1 << 0)
213 214
214/* 215/*
216 * Re-Distributor registers, offsets from VLPI_base
217 */
218#define GICR_VPROPBASER 0x0070
219
220#define GICR_VPROPBASER_IDBITS_MASK 0x1f
221
222#define GICR_VPROPBASER_SHAREABILITY_SHIFT (10)
223#define GICR_VPROPBASER_INNER_CACHEABILITY_SHIFT (7)
224#define GICR_VPROPBASER_OUTER_CACHEABILITY_SHIFT (56)
225
226#define GICR_VPROPBASER_SHAREABILITY_MASK \
227 GIC_BASER_SHAREABILITY(GICR_VPROPBASER, SHAREABILITY_MASK)
228#define GICR_VPROPBASER_INNER_CACHEABILITY_MASK \
229 GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, MASK)
230#define GICR_VPROPBASER_OUTER_CACHEABILITY_MASK \
231 GIC_BASER_CACHEABILITY(GICR_VPROPBASER, OUTER, MASK)
232#define GICR_VPROPBASER_CACHEABILITY_MASK \
233 GICR_VPROPBASER_INNER_CACHEABILITY_MASK
234
235#define GICR_VPROPBASER_InnerShareable \
236 GIC_BASER_SHAREABILITY(GICR_VPROPBASER, InnerShareable)
237
238#define GICR_VPROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nCnB)
239#define GICR_VPROPBASER_nC GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nC)
240#define GICR_VPROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt)
241#define GICR_VPROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt)
242#define GICR_VPROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWt)
243#define GICR_VPROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWb)
244#define GICR_VPROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWt)
245#define GICR_VPROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWb)
246
247#define GICR_VPENDBASER 0x0078
248
249#define GICR_VPENDBASER_SHAREABILITY_SHIFT (10)
250#define GICR_VPENDBASER_INNER_CACHEABILITY_SHIFT (7)
251#define GICR_VPENDBASER_OUTER_CACHEABILITY_SHIFT (56)
252#define GICR_VPENDBASER_SHAREABILITY_MASK \
253 GIC_BASER_SHAREABILITY(GICR_VPENDBASER, SHAREABILITY_MASK)
254#define GICR_VPENDBASER_INNER_CACHEABILITY_MASK \
255 GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, MASK)
256#define GICR_VPENDBASER_OUTER_CACHEABILITY_MASK \
257 GIC_BASER_CACHEABILITY(GICR_VPENDBASER, OUTER, MASK)
258#define GICR_VPENDBASER_CACHEABILITY_MASK \
259 GICR_VPENDBASER_INNER_CACHEABILITY_MASK
260
261#define GICR_VPENDBASER_NonShareable \
262 GIC_BASER_SHAREABILITY(GICR_VPENDBASER, NonShareable)
263
264#define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB)
265#define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC)
266#define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt)
267#define GICR_VPENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt)
268#define GICR_VPENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWt)
269#define GICR_VPENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWb)
270#define GICR_VPENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWt)
271#define GICR_VPENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWb)
272
273#define GICR_VPENDBASER_Dirty (1ULL << 60)
274#define GICR_VPENDBASER_PendingLast (1ULL << 61)
275#define GICR_VPENDBASER_IDAI (1ULL << 62)
276#define GICR_VPENDBASER_Valid (1ULL << 63)
277
278/*
215 * ITS registers, offsets from ITS_base 279 * ITS registers, offsets from ITS_base
216 */ 280 */
217#define GITS_CTLR 0x0000 281#define GITS_CTLR 0x0000
@@ -234,15 +298,21 @@
234#define GITS_TRANSLATER 0x10040 298#define GITS_TRANSLATER 0x10040
235 299
236#define GITS_CTLR_ENABLE (1U << 0) 300#define GITS_CTLR_ENABLE (1U << 0)
301#define GITS_CTLR_ImDe (1U << 1)
302#define GITS_CTLR_ITS_NUMBER_SHIFT 4
303#define GITS_CTLR_ITS_NUMBER (0xFU << GITS_CTLR_ITS_NUMBER_SHIFT)
237#define GITS_CTLR_QUIESCENT (1U << 31) 304#define GITS_CTLR_QUIESCENT (1U << 31)
238 305
239#define GITS_TYPER_PLPIS (1UL << 0) 306#define GITS_TYPER_PLPIS (1UL << 0)
307#define GITS_TYPER_VLPIS (1UL << 1)
240#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4 308#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4
309#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
241#define GITS_TYPER_IDBITS_SHIFT 8 310#define GITS_TYPER_IDBITS_SHIFT 8
242#define GITS_TYPER_DEVBITS_SHIFT 13 311#define GITS_TYPER_DEVBITS_SHIFT 13
243#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) 312#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
244#define GITS_TYPER_PTA (1UL << 19) 313#define GITS_TYPER_PTA (1UL << 19)
245#define GITS_TYPER_HWCOLLCNT_SHIFT 24 314#define GITS_TYPER_HWCOLLCNT_SHIFT 24
315#define GITS_TYPER_VMOVP (1ULL << 37)
246 316
247#define GITS_IIDR_REV_SHIFT 12 317#define GITS_IIDR_REV_SHIFT 12
248#define GITS_IIDR_REV_MASK (0xf << GITS_IIDR_REV_SHIFT) 318#define GITS_IIDR_REV_MASK (0xf << GITS_IIDR_REV_SHIFT)
@@ -342,6 +412,18 @@
342#define GITS_CMD_SYNC 0x05 412#define GITS_CMD_SYNC 0x05
343 413
344/* 414/*
415 * GICv4 ITS specific commands
416 */
417#define GITS_CMD_GICv4(x) ((x) | 0x20)
418#define GITS_CMD_VINVALL GITS_CMD_GICv4(GITS_CMD_INVALL)
419#define GITS_CMD_VMAPP GITS_CMD_GICv4(GITS_CMD_MAPC)
420#define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI)
421#define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI)
422#define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC)
423/* VMOVP is the odd one, as it doesn't have a physical counterpart */
424#define GITS_CMD_VMOVP GITS_CMD_GICv4(2)
425
426/*
345 * ITS error numbers 427 * ITS error numbers
346 */ 428 */
347#define E_ITS_MOVI_UNMAPPED_INTERRUPT 0x010107 429#define E_ITS_MOVI_UNMAPPED_INTERRUPT 0x010107
@@ -487,6 +569,8 @@ struct rdists {
487 struct page *prop_page; 569 struct page *prop_page;
488 int id_bits; 570 int id_bits;
489 u64 flags; 571 u64 flags;
572 bool has_vlpis;
573 bool has_direct_lpi;
490}; 574};
491 575
492struct irq_domain; 576struct irq_domain;
diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h
new file mode 100644
index 000000000000..58a4d89aa82c
--- /dev/null
+++ b/include/linux/irqchip/arm-gic-v4.h
@@ -0,0 +1,105 @@
1/*
2 * Copyright (C) 2016,2017 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __LINUX_IRQCHIP_ARM_GIC_V4_H
19#define __LINUX_IRQCHIP_ARM_GIC_V4_H
20
21struct its_vpe;
22
23/* Embedded in kvm.arch */
24struct its_vm {
25 struct fwnode_handle *fwnode;
26 struct irq_domain *domain;
27 struct page *vprop_page;
28 struct its_vpe **vpes;
29 int nr_vpes;
30 irq_hw_number_t db_lpi_base;
31 unsigned long *db_bitmap;
32 int nr_db_lpis;
33};
34
35/* Embedded in kvm_vcpu.arch */
36struct its_vpe {
37 struct page *vpt_page;
38 struct its_vm *its_vm;
39 /* Doorbell interrupt */
40 int irq;
41 irq_hw_number_t vpe_db_lpi;
42 /* VPE proxy mapping */
43 int vpe_proxy_event;
44 /*
45 * This collection ID is used to indirect the target
46 * redistributor for this VPE. The ID itself isn't involved in
47 * programming of the ITS.
48 */
49 u16 col_idx;
50 /* Unique (system-wide) VPE identifier */
51 u16 vpe_id;
52 /* Implementation Defined Area Invalid */
53 bool idai;
54 /* Pending VLPIs on schedule out? */
55 bool pending_last;
56};
57
58/*
59 * struct its_vlpi_map: structure describing the mapping of a
60 * VLPI. Only to be interpreted in the context of a physical interrupt
61 * it complements. To be used as the vcpu_info passed to
62 * irq_set_vcpu_affinity().
63 *
64 * @vm: Pointer to the GICv4 notion of a VM
65 * @vpe: Pointer to the GICv4 notion of a virtual CPU (VPE)
66 * @vintid: Virtual LPI number
67 * @db_enabled: Is the VPE doorbell to be generated?
68 */
69struct its_vlpi_map {
70 struct its_vm *vm;
71 struct its_vpe *vpe;
72 u32 vintid;
73 bool db_enabled;
74};
75
76enum its_vcpu_info_cmd_type {
77 MAP_VLPI,
78 GET_VLPI,
79 PROP_UPDATE_VLPI,
80 PROP_UPDATE_AND_INV_VLPI,
81 SCHEDULE_VPE,
82 DESCHEDULE_VPE,
83 INVALL_VPE,
84};
85
86struct its_cmd_info {
87 enum its_vcpu_info_cmd_type cmd_type;
88 union {
89 struct its_vlpi_map *map;
90 u8 config;
91 };
92};
93
94int its_alloc_vcpu_irqs(struct its_vm *vm);
95void its_free_vcpu_irqs(struct its_vm *vm);
96int its_schedule_vpe(struct its_vpe *vpe, bool on);
97int its_invall_vpe(struct its_vpe *vpe);
98int its_map_vlpi(int irq, struct its_vlpi_map *map);
99int its_get_vlpi(int irq, struct its_vlpi_map *map);
100int its_unmap_vlpi(int irq);
101int its_prop_update_vlpi(int irq, u8 config, bool inv);
102
103int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops);
104
105#endif
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index cac77a5c5555..2318f29054af 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -460,6 +460,9 @@ extern void irq_domain_free_irqs_common(struct irq_domain *domain,
460extern void irq_domain_free_irqs_top(struct irq_domain *domain, 460extern void irq_domain_free_irqs_top(struct irq_domain *domain,
461 unsigned int virq, unsigned int nr_irqs); 461 unsigned int virq, unsigned int nr_irqs);
462 462
463extern int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg);
464extern int irq_domain_pop_irq(struct irq_domain *domain, int virq);
465
463extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain, 466extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
464 unsigned int irq_base, 467 unsigned int irq_base,
465 unsigned int nr_irqs, void *arg); 468 unsigned int nr_irqs, void *arg);
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 27c4e774071c..a117adf7084b 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -63,11 +63,20 @@ config GENERIC_IRQ_CHIP
63config IRQ_DOMAIN 63config IRQ_DOMAIN
64 bool 64 bool
65 65
66# Support for simulated interrupts
67config IRQ_SIM
68 bool
69 select IRQ_WORK
70
66# Support for hierarchical irq domains 71# Support for hierarchical irq domains
67config IRQ_DOMAIN_HIERARCHY 72config IRQ_DOMAIN_HIERARCHY
68 bool 73 bool
69 select IRQ_DOMAIN 74 select IRQ_DOMAIN
70 75
76# Support for hierarchical fasteoi+edge and fasteoi+level handlers
77config IRQ_FASTEOI_HIERARCHY_HANDLERS
78 bool
79
71# Generic IRQ IPI support 80# Generic IRQ IPI support
72config GENERIC_IRQ_IPI 81config GENERIC_IRQ_IPI
73 bool 82 bool
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index e4aef7351f2b..1970cafe8f2a 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_IRQ_TIMINGS) += timings.o
4obj-$(CONFIG_GENERIC_IRQ_CHIP) += generic-chip.o 4obj-$(CONFIG_GENERIC_IRQ_CHIP) += generic-chip.o
5obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o 5obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
6obj-$(CONFIG_IRQ_DOMAIN) += irqdomain.o 6obj-$(CONFIG_IRQ_DOMAIN) += irqdomain.o
7obj-$(CONFIG_IRQ_SIM) += irq_sim.o
7obj-$(CONFIG_PROC_FS) += proc.o 8obj-$(CONFIG_PROC_FS) += proc.o
8obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o 9obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
9obj-$(CONFIG_GENERIC_IRQ_MIGRATION) += cpuhotplug.o 10obj-$(CONFIG_GENERIC_IRQ_MIGRATION) += cpuhotplug.o
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 3675c6004f2a..f51b7b6d2451 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -1098,6 +1098,112 @@ void irq_cpu_offline(void)
1098} 1098}
1099 1099
1100#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 1100#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1101
1102#ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS
1103/**
1104 * handle_fasteoi_ack_irq - irq handler for edge hierarchy
1105 * stacked on transparent controllers
1106 *
1107 * @desc: the interrupt description structure for this irq
1108 *
1109 * Like handle_fasteoi_irq(), but for use with hierarchy where
1110 * the irq_chip also needs to have its ->irq_ack() function
1111 * called.
1112 */
1113void handle_fasteoi_ack_irq(struct irq_desc *desc)
1114{
1115 struct irq_chip *chip = desc->irq_data.chip;
1116
1117 raw_spin_lock(&desc->lock);
1118
1119 if (!irq_may_run(desc))
1120 goto out;
1121
1122 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
1123
1124 /*
1125 * If its disabled or no action available
1126 * then mask it and get out of here:
1127 */
1128 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
1129 desc->istate |= IRQS_PENDING;
1130 mask_irq(desc);
1131 goto out;
1132 }
1133
1134 kstat_incr_irqs_this_cpu(desc);
1135 if (desc->istate & IRQS_ONESHOT)
1136 mask_irq(desc);
1137
1138 /* Start handling the irq */
1139 desc->irq_data.chip->irq_ack(&desc->irq_data);
1140
1141 preflow_handler(desc);
1142 handle_irq_event(desc);
1143
1144 cond_unmask_eoi_irq(desc, chip);
1145
1146 raw_spin_unlock(&desc->lock);
1147 return;
1148out:
1149 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
1150 chip->irq_eoi(&desc->irq_data);
1151 raw_spin_unlock(&desc->lock);
1152}
1153EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq);
1154
1155/**
1156 * handle_fasteoi_mask_irq - irq handler for level hierarchy
1157 * stacked on transparent controllers
1158 *
1159 * @desc: the interrupt description structure for this irq
1160 *
1161 * Like handle_fasteoi_irq(), but for use with hierarchy where
1162 * the irq_chip also needs to have its ->irq_mask_ack() function
1163 * called.
1164 */
1165void handle_fasteoi_mask_irq(struct irq_desc *desc)
1166{
1167 struct irq_chip *chip = desc->irq_data.chip;
1168
1169 raw_spin_lock(&desc->lock);
1170 mask_ack_irq(desc);
1171
1172 if (!irq_may_run(desc))
1173 goto out;
1174
1175 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
1176
1177 /*
1178 * If its disabled or no action available
1179 * then mask it and get out of here:
1180 */
1181 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
1182 desc->istate |= IRQS_PENDING;
1183 mask_irq(desc);
1184 goto out;
1185 }
1186
1187 kstat_incr_irqs_this_cpu(desc);
1188 if (desc->istate & IRQS_ONESHOT)
1189 mask_irq(desc);
1190
1191 preflow_handler(desc);
1192 handle_irq_event(desc);
1193
1194 cond_unmask_eoi_irq(desc, chip);
1195
1196 raw_spin_unlock(&desc->lock);
1197 return;
1198out:
1199 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
1200 chip->irq_eoi(&desc->irq_data);
1201 raw_spin_unlock(&desc->lock);
1202}
1203EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq);
1204
1205#endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */
1206
1101/** 1207/**
1102 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if 1208 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
1103 * NULL) 1209 * NULL)
@@ -1111,6 +1217,7 @@ void irq_chip_enable_parent(struct irq_data *data)
1111 else 1217 else
1112 data->chip->irq_unmask(data); 1218 data->chip->irq_unmask(data);
1113} 1219}
1220EXPORT_SYMBOL_GPL(irq_chip_enable_parent);
1114 1221
1115/** 1222/**
1116 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if 1223 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
@@ -1125,6 +1232,7 @@ void irq_chip_disable_parent(struct irq_data *data)
1125 else 1232 else
1126 data->chip->irq_mask(data); 1233 data->chip->irq_mask(data);
1127} 1234}
1235EXPORT_SYMBOL_GPL(irq_chip_disable_parent);
1128 1236
1129/** 1237/**
1130 * irq_chip_ack_parent - Acknowledge the parent interrupt 1238 * irq_chip_ack_parent - Acknowledge the parent interrupt
@@ -1187,6 +1295,7 @@ int irq_chip_set_affinity_parent(struct irq_data *data,
1187 1295
1188 return -ENOSYS; 1296 return -ENOSYS;
1189} 1297}
1298EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent);
1190 1299
1191/** 1300/**
1192 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt 1301 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c
index 4d384edc0c64..c3fdb36dec30 100644
--- a/kernel/irq/debugfs.c
+++ b/kernel/irq/debugfs.c
@@ -5,6 +5,7 @@
5 */ 5 */
6#include <linux/irqdomain.h> 6#include <linux/irqdomain.h>
7#include <linux/irq.h> 7#include <linux/irq.h>
8#include <linux/uaccess.h>
8 9
9#include "internals.h" 10#include "internals.h"
10 11
@@ -171,8 +172,55 @@ static int irq_debug_open(struct inode *inode, struct file *file)
171 return single_open(file, irq_debug_show, inode->i_private); 172 return single_open(file, irq_debug_show, inode->i_private);
172} 173}
173 174
175static ssize_t irq_debug_write(struct file *file, const char __user *user_buf,
176 size_t count, loff_t *ppos)
177{
178 struct irq_desc *desc = file_inode(file)->i_private;
179 char buf[8] = { 0, };
180 size_t size;
181
182 size = min(sizeof(buf) - 1, count);
183 if (copy_from_user(buf, user_buf, size))
184 return -EFAULT;
185
186 if (!strncmp(buf, "trigger", size)) {
187 unsigned long flags;
188 int err;
189
190 /* Try the HW interface first */
191 err = irq_set_irqchip_state(irq_desc_get_irq(desc),
192 IRQCHIP_STATE_PENDING, true);
193 if (!err)
194 return count;
195
196 /*
197 * Otherwise, try to inject via the resend interface,
198 * which may or may not succeed.
199 */
200 chip_bus_lock(desc);
201 raw_spin_lock_irqsave(&desc->lock, flags);
202
203 if (irq_settings_is_level(desc)) {
204 /* Can't do level, sorry */
205 err = -EINVAL;
206 } else {
207 desc->istate |= IRQS_PENDING;
208 check_irq_resend(desc);
209 err = 0;
210 }
211
212 raw_spin_unlock_irqrestore(&desc->lock, flags);
213 chip_bus_sync_unlock(desc);
214
215 return err ? err : count;
216 }
217
218 return count;
219}
220
174static const struct file_operations dfs_irq_ops = { 221static const struct file_operations dfs_irq_ops = {
175 .open = irq_debug_open, 222 .open = irq_debug_open,
223 .write = irq_debug_write,
176 .read = seq_read, 224 .read = seq_read,
177 .llseek = seq_lseek, 225 .llseek = seq_lseek,
178 .release = single_release, 226 .release = single_release,
@@ -186,7 +234,7 @@ void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc)
186 return; 234 return;
187 235
188 sprintf(name, "%d", irq); 236 sprintf(name, "%d", irq);
189 desc->debugfs_file = debugfs_create_file(name, 0444, irq_dir, desc, 237 desc->debugfs_file = debugfs_create_file(name, 0644, irq_dir, desc,
190 &dfs_irq_ops); 238 &dfs_irq_ops);
191} 239}
192 240
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index a2c48058354c..a4aa39009f0d 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -151,7 +151,7 @@ static inline void chip_bus_sync_unlock(struct irq_desc *desc)
151#define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU) 151#define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU)
152 152
153#define for_each_action_of_desc(desc, act) \ 153#define for_each_action_of_desc(desc, act) \
154 for (act = desc->act; act; act = act->next) 154 for (act = desc->action; act; act = act->next)
155 155
156struct irq_desc * 156struct irq_desc *
157__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, 157__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
diff --git a/kernel/irq/irq_sim.c b/kernel/irq/irq_sim.c
new file mode 100644
index 000000000000..24caabf1a0f7
--- /dev/null
+++ b/kernel/irq/irq_sim.c
@@ -0,0 +1,164 @@
1/*
2 * Copyright (C) 2017 Bartosz Golaszewski <brgl@bgdev.pl>
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 */
9
10#include <linux/irq_sim.h>
11#include <linux/irq.h>
12
13struct irq_sim_devres {
14 struct irq_sim *sim;
15};
16
17static void irq_sim_irqmask(struct irq_data *data)
18{
19 struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
20
21 irq_ctx->enabled = false;
22}
23
24static void irq_sim_irqunmask(struct irq_data *data)
25{
26 struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
27
28 irq_ctx->enabled = true;
29}
30
31static struct irq_chip irq_sim_irqchip = {
32 .name = "irq_sim",
33 .irq_mask = irq_sim_irqmask,
34 .irq_unmask = irq_sim_irqunmask,
35};
36
37static void irq_sim_handle_irq(struct irq_work *work)
38{
39 struct irq_sim_work_ctx *work_ctx;
40
41 work_ctx = container_of(work, struct irq_sim_work_ctx, work);
42 handle_simple_irq(irq_to_desc(work_ctx->irq));
43}
44
45/**
46 * irq_sim_init - Initialize the interrupt simulator: allocate a range of
47 * dummy interrupts.
48 *
49 * @sim: The interrupt simulator object to initialize.
50 * @num_irqs: Number of interrupts to allocate
51 *
52 * Returns 0 on success and a negative error number on failure.
53 */
54int irq_sim_init(struct irq_sim *sim, unsigned int num_irqs)
55{
56 int i;
57
58 sim->irqs = kmalloc_array(num_irqs, sizeof(*sim->irqs), GFP_KERNEL);
59 if (!sim->irqs)
60 return -ENOMEM;
61
62 sim->irq_base = irq_alloc_descs(-1, 0, num_irqs, 0);
63 if (sim->irq_base < 0) {
64 kfree(sim->irqs);
65 return sim->irq_base;
66 }
67
68 for (i = 0; i < num_irqs; i++) {
69 sim->irqs[i].irqnum = sim->irq_base + i;
70 sim->irqs[i].enabled = false;
71 irq_set_chip(sim->irq_base + i, &irq_sim_irqchip);
72 irq_set_chip_data(sim->irq_base + i, &sim->irqs[i]);
73 irq_set_handler(sim->irq_base + i, &handle_simple_irq);
74 irq_modify_status(sim->irq_base + i,
75 IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
76 }
77
78 init_irq_work(&sim->work_ctx.work, irq_sim_handle_irq);
79 sim->irq_count = num_irqs;
80
81 return 0;
82}
83EXPORT_SYMBOL_GPL(irq_sim_init);
84
85/**
86 * irq_sim_fini - Deinitialize the interrupt simulator: free the interrupt
87 * descriptors and allocated memory.
88 *
89 * @sim: The interrupt simulator to tear down.
90 */
91void irq_sim_fini(struct irq_sim *sim)
92{
93 irq_work_sync(&sim->work_ctx.work);
94 irq_free_descs(sim->irq_base, sim->irq_count);
95 kfree(sim->irqs);
96}
97EXPORT_SYMBOL_GPL(irq_sim_fini);
98
99static void devm_irq_sim_release(struct device *dev, void *res)
100{
101 struct irq_sim_devres *this = res;
102
103 irq_sim_fini(this->sim);
104}
105
106/**
107 * irq_sim_init - Initialize the interrupt simulator for a managed device.
108 *
109 * @dev: Device to initialize the simulator object for.
110 * @sim: The interrupt simulator object to initialize.
111 * @num_irqs: Number of interrupts to allocate
112 *
113 * Returns 0 on success and a negative error number on failure.
114 */
115int devm_irq_sim_init(struct device *dev, struct irq_sim *sim,
116 unsigned int num_irqs)
117{
118 struct irq_sim_devres *dr;
119 int rv;
120
121 dr = devres_alloc(devm_irq_sim_release, sizeof(*dr), GFP_KERNEL);
122 if (!dr)
123 return -ENOMEM;
124
125 rv = irq_sim_init(sim, num_irqs);
126 if (rv) {
127 devres_free(dr);
128 return rv;
129 }
130
131 dr->sim = sim;
132 devres_add(dev, dr);
133
134 return 0;
135}
136EXPORT_SYMBOL_GPL(devm_irq_sim_init);
137
138/**
139 * irq_sim_fire - Enqueue an interrupt.
140 *
141 * @sim: The interrupt simulator object.
142 * @offset: Offset of the simulated interrupt which should be fired.
143 */
144void irq_sim_fire(struct irq_sim *sim, unsigned int offset)
145{
146 if (sim->irqs[offset].enabled) {
147 sim->work_ctx.irq = irq_sim_irqnum(sim, offset);
148 irq_work_queue(&sim->work_ctx.work);
149 }
150}
151EXPORT_SYMBOL_GPL(irq_sim_fire);
152
153/**
154 * irq_sim_irqnum - Get the allocated number of a dummy interrupt.
155 *
156 * @sim: The interrupt simulator object.
157 * @offset: Offset of the simulated interrupt for which to retrieve
158 * the number.
159 */
160int irq_sim_irqnum(struct irq_sim *sim, unsigned int offset)
161{
162 return sim->irqs[offset].irqnum;
163}
164EXPORT_SYMBOL_GPL(irq_sim_irqnum);
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index f1f251479aa6..d62351714f3e 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -455,6 +455,31 @@ void irq_set_default_host(struct irq_domain *domain)
455} 455}
456EXPORT_SYMBOL_GPL(irq_set_default_host); 456EXPORT_SYMBOL_GPL(irq_set_default_host);
457 457
458static void irq_domain_clear_mapping(struct irq_domain *domain,
459 irq_hw_number_t hwirq)
460{
461 if (hwirq < domain->revmap_size) {
462 domain->linear_revmap[hwirq] = 0;
463 } else {
464 mutex_lock(&revmap_trees_mutex);
465 radix_tree_delete(&domain->revmap_tree, hwirq);
466 mutex_unlock(&revmap_trees_mutex);
467 }
468}
469
470static void irq_domain_set_mapping(struct irq_domain *domain,
471 irq_hw_number_t hwirq,
472 struct irq_data *irq_data)
473{
474 if (hwirq < domain->revmap_size) {
475 domain->linear_revmap[hwirq] = irq_data->irq;
476 } else {
477 mutex_lock(&revmap_trees_mutex);
478 radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
479 mutex_unlock(&revmap_trees_mutex);
480 }
481}
482
458void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq) 483void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
459{ 484{
460 struct irq_data *irq_data = irq_get_irq_data(irq); 485 struct irq_data *irq_data = irq_get_irq_data(irq);
@@ -483,13 +508,7 @@ void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
483 domain->mapcount--; 508 domain->mapcount--;
484 509
485 /* Clear reverse map for this hwirq */ 510 /* Clear reverse map for this hwirq */
486 if (hwirq < domain->revmap_size) { 511 irq_domain_clear_mapping(domain, hwirq);
487 domain->linear_revmap[hwirq] = 0;
488 } else {
489 mutex_lock(&revmap_trees_mutex);
490 radix_tree_delete(&domain->revmap_tree, hwirq);
491 mutex_unlock(&revmap_trees_mutex);
492 }
493} 512}
494 513
495int irq_domain_associate(struct irq_domain *domain, unsigned int virq, 514int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
@@ -533,13 +552,7 @@ int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
533 } 552 }
534 553
535 domain->mapcount++; 554 domain->mapcount++;
536 if (hwirq < domain->revmap_size) { 555 irq_domain_set_mapping(domain, hwirq, irq_data);
537 domain->linear_revmap[hwirq] = virq;
538 } else {
539 mutex_lock(&revmap_trees_mutex);
540 radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
541 mutex_unlock(&revmap_trees_mutex);
542 }
543 mutex_unlock(&irq_domain_mutex); 556 mutex_unlock(&irq_domain_mutex);
544 557
545 irq_clear_status_flags(virq, IRQ_NOREQUEST); 558 irq_clear_status_flags(virq, IRQ_NOREQUEST);
@@ -1138,16 +1151,9 @@ static void irq_domain_insert_irq(int virq)
1138 1151
1139 for (data = irq_get_irq_data(virq); data; data = data->parent_data) { 1152 for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
1140 struct irq_domain *domain = data->domain; 1153 struct irq_domain *domain = data->domain;
1141 irq_hw_number_t hwirq = data->hwirq;
1142 1154
1143 domain->mapcount++; 1155 domain->mapcount++;
1144 if (hwirq < domain->revmap_size) { 1156 irq_domain_set_mapping(domain, data->hwirq, data);
1145 domain->linear_revmap[hwirq] = virq;
1146 } else {
1147 mutex_lock(&revmap_trees_mutex);
1148 radix_tree_insert(&domain->revmap_tree, hwirq, data);
1149 mutex_unlock(&revmap_trees_mutex);
1150 }
1151 1157
1152 /* If not already assigned, give the domain the chip's name */ 1158 /* If not already assigned, give the domain the chip's name */
1153 if (!domain->name && data->chip) 1159 if (!domain->name && data->chip)
@@ -1171,13 +1177,7 @@ static void irq_domain_remove_irq(int virq)
1171 irq_hw_number_t hwirq = data->hwirq; 1177 irq_hw_number_t hwirq = data->hwirq;
1172 1178
1173 domain->mapcount--; 1179 domain->mapcount--;
1174 if (hwirq < domain->revmap_size) { 1180 irq_domain_clear_mapping(domain, hwirq);
1175 domain->linear_revmap[hwirq] = 0;
1176 } else {
1177 mutex_lock(&revmap_trees_mutex);
1178 radix_tree_delete(&domain->revmap_tree, hwirq);
1179 mutex_unlock(&revmap_trees_mutex);
1180 }
1181 } 1181 }
1182} 1182}
1183 1183
@@ -1362,7 +1362,8 @@ static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain,
1362 unsigned int irq_base, 1362 unsigned int irq_base,
1363 unsigned int nr_irqs) 1363 unsigned int nr_irqs)
1364{ 1364{
1365 domain->ops->free(domain, irq_base, nr_irqs); 1365 if (domain->ops->free)
1366 domain->ops->free(domain, irq_base, nr_irqs);
1366} 1367}
1367 1368
1368int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain, 1369int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
@@ -1448,6 +1449,175 @@ out_free_desc:
1448 return ret; 1449 return ret;
1449} 1450}
1450 1451
1452/* The irq_data was moved, fix the revmap to refer to the new location */
1453static void irq_domain_fix_revmap(struct irq_data *d)
1454{
1455 void **slot;
1456
1457 if (d->hwirq < d->domain->revmap_size)
1458 return; /* Not using radix tree. */
1459
1460 /* Fix up the revmap. */
1461 mutex_lock(&revmap_trees_mutex);
1462 slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq);
1463 if (slot)
1464 radix_tree_replace_slot(&d->domain->revmap_tree, slot, d);
1465 mutex_unlock(&revmap_trees_mutex);
1466}
1467
1468/**
1469 * irq_domain_push_irq() - Push a domain in to the top of a hierarchy.
1470 * @domain: Domain to push.
1471 * @virq: Irq to push the domain in to.
1472 * @arg: Passed to the irq_domain_ops alloc() function.
1473 *
1474 * For an already existing irqdomain hierarchy, as might be obtained
1475 * via a call to pci_enable_msix(), add an additional domain to the
1476 * head of the processing chain. Must be called before request_irq()
1477 * has been called.
1478 */
1479int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg)
1480{
1481 struct irq_data *child_irq_data;
1482 struct irq_data *root_irq_data = irq_get_irq_data(virq);
1483 struct irq_desc *desc;
1484 int rv = 0;
1485
1486 /*
1487 * Check that no action has been set, which indicates the virq
1488 * is in a state where this function doesn't have to deal with
1489 * races between interrupt handling and maintaining the
1490 * hierarchy. This will catch gross misuse. Attempting to
1491 * make the check race free would require holding locks across
1492 * calls to struct irq_domain_ops->alloc(), which could lead
1493 * to deadlock, so we just do a simple check before starting.
1494 */
1495 desc = irq_to_desc(virq);
1496 if (!desc)
1497 return -EINVAL;
1498 if (WARN_ON(desc->action))
1499 return -EBUSY;
1500
1501 if (domain == NULL)
1502 return -EINVAL;
1503
1504 if (WARN_ON(!irq_domain_is_hierarchy(domain)))
1505 return -EINVAL;
1506
1507 if (!root_irq_data)
1508 return -EINVAL;
1509
1510 if (domain->parent != root_irq_data->domain)
1511 return -EINVAL;
1512
1513 child_irq_data = kzalloc_node(sizeof(*child_irq_data), GFP_KERNEL,
1514 irq_data_get_node(root_irq_data));
1515 if (!child_irq_data)
1516 return -ENOMEM;
1517
1518 mutex_lock(&irq_domain_mutex);
1519
1520 /* Copy the original irq_data. */
1521 *child_irq_data = *root_irq_data;
1522
1523 /*
1524 * Overwrite the root_irq_data, which is embedded in struct
1525 * irq_desc, with values for this domain.
1526 */
1527 root_irq_data->parent_data = child_irq_data;
1528 root_irq_data->domain = domain;
1529 root_irq_data->mask = 0;
1530 root_irq_data->hwirq = 0;
1531 root_irq_data->chip = NULL;
1532 root_irq_data->chip_data = NULL;
1533
1534 /* May (probably does) set hwirq, chip, etc. */
1535 rv = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
1536 if (rv) {
1537 /* Restore the original irq_data. */
1538 *root_irq_data = *child_irq_data;
1539 goto error;
1540 }
1541
1542 irq_domain_fix_revmap(child_irq_data);
1543 irq_domain_set_mapping(domain, root_irq_data->hwirq, root_irq_data);
1544
1545error:
1546 mutex_unlock(&irq_domain_mutex);
1547
1548 return rv;
1549}
1550EXPORT_SYMBOL_GPL(irq_domain_push_irq);
1551
1552/**
1553 * irq_domain_pop_irq() - Remove a domain from the top of a hierarchy.
1554 * @domain: Domain to remove.
1555 * @virq: Irq to remove the domain from.
1556 *
1557 * Undo the effects of a call to irq_domain_push_irq(). Must be
1558 * called either before request_irq() or after free_irq().
1559 */
1560int irq_domain_pop_irq(struct irq_domain *domain, int virq)
1561{
1562 struct irq_data *root_irq_data = irq_get_irq_data(virq);
1563 struct irq_data *child_irq_data;
1564 struct irq_data *tmp_irq_data;
1565 struct irq_desc *desc;
1566
1567 /*
1568 * Check that no action is set, which indicates the virq is in
1569 * a state where this function doesn't have to deal with races
1570 * between interrupt handling and maintaining the hierarchy.
1571 * This will catch gross misuse. Attempting to make the check
1572 * race free would require holding locks across calls to
1573 * struct irq_domain_ops->free(), which could lead to
1574 * deadlock, so we just do a simple check before starting.
1575 */
1576 desc = irq_to_desc(virq);
1577 if (!desc)
1578 return -EINVAL;
1579 if (WARN_ON(desc->action))
1580 return -EBUSY;
1581
1582 if (domain == NULL)
1583 return -EINVAL;
1584
1585 if (!root_irq_data)
1586 return -EINVAL;
1587
1588 tmp_irq_data = irq_domain_get_irq_data(domain, virq);
1589
1590 /* We can only "pop" if this domain is at the top of the list */
1591 if (WARN_ON(root_irq_data != tmp_irq_data))
1592 return -EINVAL;
1593
1594 if (WARN_ON(root_irq_data->domain != domain))
1595 return -EINVAL;
1596
1597 child_irq_data = root_irq_data->parent_data;
1598 if (WARN_ON(!child_irq_data))
1599 return -EINVAL;
1600
1601 mutex_lock(&irq_domain_mutex);
1602
1603 root_irq_data->parent_data = NULL;
1604
1605 irq_domain_clear_mapping(domain, root_irq_data->hwirq);
1606 irq_domain_free_irqs_hierarchy(domain, virq, 1);
1607
1608 /* Restore the original irq_data. */
1609 *root_irq_data = *child_irq_data;
1610
1611 irq_domain_fix_revmap(root_irq_data);
1612
1613 mutex_unlock(&irq_domain_mutex);
1614
1615 kfree(child_irq_data);
1616
1617 return 0;
1618}
1619EXPORT_SYMBOL_GPL(irq_domain_pop_irq);
1620
1451/** 1621/**
1452 * irq_domain_free_irqs - Free IRQ number and associated data structures 1622 * irq_domain_free_irqs - Free IRQ number and associated data structures
1453 * @virq: base IRQ number 1623 * @virq: base IRQ number
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 1d1a5b945ab4..573dc52b0806 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -400,8 +400,18 @@ int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
400 return -EINVAL; 400 return -EINVAL;
401 401
402 data = irq_desc_get_irq_data(desc); 402 data = irq_desc_get_irq_data(desc);
403 chip = irq_data_get_irq_chip(data); 403 do {
404 if (chip && chip->irq_set_vcpu_affinity) 404 chip = irq_data_get_irq_chip(data);
405 if (chip && chip->irq_set_vcpu_affinity)
406 break;
407#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
408 data = data->parent_data;
409#else
410 data = NULL;
411#endif
412 } while (data);
413
414 if (data)
405 ret = chip->irq_set_vcpu_affinity(data, vcpu_info); 415 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
406 irq_put_desc_unlock(desc, flags); 416 irq_put_desc_unlock(desc, flags);
407 417
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 7f9642a1e267..6376b4a598d3 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -61,12 +61,12 @@ static int show_irq_affinity(int type, struct seq_file *m)
61 case EFFECTIVE: 61 case EFFECTIVE:
62 case EFFECTIVE_LIST: 62 case EFFECTIVE_LIST:
63#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK 63#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
64 mask = desc->irq_common_data.effective_affinity; 64 mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
65 break; 65 break;
66#else
67 return -EINVAL;
68#endif 66#endif
69 }; 67 default:
68 return -EINVAL;
69 }
70 70
71 switch (type) { 71 switch (type) {
72 case AFFINITY_LIST: 72 case AFFINITY_LIST: