diff options
90 files changed, 2575 insertions, 888 deletions
diff --git a/Documentation/IRQ-domain.txt b/Documentation/IRQ-domain.txt index 3a8e15cba816..8d990bde8693 100644 --- a/Documentation/IRQ-domain.txt +++ b/Documentation/IRQ-domain.txt | |||
@@ -32,9 +32,9 @@ top of the irq_alloc_desc*() API. An irq_domain to manage mapping is | |||
32 | preferred over interrupt controller drivers open coding their own | 32 | preferred over interrupt controller drivers open coding their own |
33 | reverse mapping scheme. | 33 | reverse mapping scheme. |
34 | 34 | ||
35 | irq_domain also implements translation from Device Tree interrupt | 35 | irq_domain also implements translation from an abstract irq_fwspec |
36 | specifiers to hwirq numbers, and can be easily extended to support | 36 | structure to hwirq numbers (Device Tree and ACPI GSI so far), and can |
37 | other IRQ topology data sources. | 37 | be easily extended to support other IRQ topology data sources. |
38 | 38 | ||
39 | === irq_domain usage === | 39 | === irq_domain usage === |
40 | An interrupt controller driver creates and registers an irq_domain by | 40 | An interrupt controller driver creates and registers an irq_domain by |
@@ -184,7 +184,7 @@ There are four major interfaces to use hierarchy irq_domain: | |||
184 | related resources associated with these interrupts. | 184 | related resources associated with these interrupts. |
185 | 3) irq_domain_activate_irq(): activate interrupt controller hardware to | 185 | 3) irq_domain_activate_irq(): activate interrupt controller hardware to |
186 | deliver the interrupt. | 186 | deliver the interrupt. |
187 | 3) irq_domain_deactivate_irq(): deactivate interrupt controller hardware | 187 | 4) irq_domain_deactivate_irq(): deactivate interrupt controller hardware |
188 | to stop delivering the interrupt. | 188 | to stop delivering the interrupt. |
189 | 189 | ||
190 | Following changes are needed to support hierarchy irq_domain. | 190 | Following changes are needed to support hierarchy irq_domain. |
diff --git a/Documentation/arm64/booting.txt b/Documentation/arm64/booting.txt index 7d9d3c2286b2..369a4f48eb0d 100644 --- a/Documentation/arm64/booting.txt +++ b/Documentation/arm64/booting.txt | |||
@@ -173,13 +173,22 @@ Before jumping into the kernel, the following conditions must be met: | |||
173 | the kernel image will be entered must be initialised by software at a | 173 | the kernel image will be entered must be initialised by software at a |
174 | higher exception level to prevent execution in an UNKNOWN state. | 174 | higher exception level to prevent execution in an UNKNOWN state. |
175 | 175 | ||
176 | For systems with a GICv3 interrupt controller: | 176 | For systems with a GICv3 interrupt controller to be used in v3 mode: |
177 | - If EL3 is present: | 177 | - If EL3 is present: |
178 | ICC_SRE_EL3.Enable (bit 3) must be initialiased to 0b1. | 178 | ICC_SRE_EL3.Enable (bit 3) must be initialiased to 0b1. |
179 | ICC_SRE_EL3.SRE (bit 0) must be initialised to 0b1. | 179 | ICC_SRE_EL3.SRE (bit 0) must be initialised to 0b1. |
180 | - If the kernel is entered at EL1: | 180 | - If the kernel is entered at EL1: |
181 | ICC.SRE_EL2.Enable (bit 3) must be initialised to 0b1 | 181 | ICC.SRE_EL2.Enable (bit 3) must be initialised to 0b1 |
182 | ICC_SRE_EL2.SRE (bit 0) must be initialised to 0b1. | 182 | ICC_SRE_EL2.SRE (bit 0) must be initialised to 0b1. |
183 | - The DT or ACPI tables must describe a GICv3 interrupt controller. | ||
184 | |||
185 | For systems with a GICv3 interrupt controller to be used in | ||
186 | compatibility (v2) mode: | ||
187 | - If EL3 is present: | ||
188 | ICC_SRE_EL3.SRE (bit 0) must be initialised to 0b0. | ||
189 | - If the kernel is entered at EL1: | ||
190 | ICC_SRE_EL2.SRE (bit 0) must be initialised to 0b0. | ||
191 | - The DT or ACPI tables must describe a GICv2 interrupt controller. | ||
183 | 192 | ||
184 | The requirements described above for CPU mode, caches, MMUs, architected | 193 | The requirements described above for CPU mode, caches, MMUs, architected |
185 | timers, coherency and system registers apply to all CPUs. All CPUs must | 194 | timers, coherency and system registers apply to all CPUs. All CPUs must |
diff --git a/Documentation/devicetree/bindings/arm/gic.txt b/Documentation/devicetree/bindings/arm/gic.txt index 2da059a4790c..cc56021eb60b 100644 --- a/Documentation/devicetree/bindings/arm/gic.txt +++ b/Documentation/devicetree/bindings/arm/gic.txt | |||
@@ -11,13 +11,14 @@ have PPIs or SGIs. | |||
11 | Main node required properties: | 11 | Main node required properties: |
12 | 12 | ||
13 | - compatible : should be one of: | 13 | - compatible : should be one of: |
14 | "arm,gic-400" | 14 | "arm,arm1176jzf-devchip-gic" |
15 | "arm,arm11mp-gic" | ||
15 | "arm,cortex-a15-gic" | 16 | "arm,cortex-a15-gic" |
16 | "arm,cortex-a9-gic" | ||
17 | "arm,cortex-a7-gic" | 17 | "arm,cortex-a7-gic" |
18 | "arm,arm11mp-gic" | 18 | "arm,cortex-a9-gic" |
19 | "arm,gic-400" | ||
20 | "arm,pl390" | ||
19 | "brcm,brahma-b15-gic" | 21 | "brcm,brahma-b15-gic" |
20 | "arm,arm1176jzf-devchip-gic" | ||
21 | "qcom,msm-8660-qgic" | 22 | "qcom,msm-8660-qgic" |
22 | "qcom,msm-qgic2" | 23 | "qcom,msm-qgic2" |
23 | - interrupt-controller : Identifies the node as an interrupt controller | 24 | - interrupt-controller : Identifies the node as an interrupt controller |
@@ -58,6 +59,21 @@ Optional | |||
58 | regions, used when the GIC doesn't have banked registers. The offset is | 59 | regions, used when the GIC doesn't have banked registers. The offset is |
59 | cpu-offset * cpu-nr. | 60 | cpu-offset * cpu-nr. |
60 | 61 | ||
62 | - clocks : List of phandle and clock-specific pairs, one for each entry | ||
63 | in clock-names. | ||
64 | - clock-names : List of names for the GIC clock input(s). Valid clock names | ||
65 | depend on the GIC variant: | ||
66 | "ic_clk" (for "arm,arm11mp-gic") | ||
67 | "PERIPHCLKEN" (for "arm,cortex-a15-gic") | ||
68 | "PERIPHCLK", "PERIPHCLKEN" (for "arm,cortex-a9-gic") | ||
69 | "clk" (for "arm,gic-400") | ||
70 | "gclk" (for "arm,pl390") | ||
71 | |||
72 | - power-domains : A phandle and PM domain specifier as defined by bindings of | ||
73 | the power controller specified by phandle, used when the GIC | ||
74 | is part of a Power or Clock Domain. | ||
75 | |||
76 | |||
61 | Example: | 77 | Example: |
62 | 78 | ||
63 | intc: interrupt-controller@fff11000 { | 79 | intc: interrupt-controller@fff11000 { |
diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt index 63633bdea7e4..ae5054c27c99 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt | |||
@@ -10,6 +10,7 @@ Required properties: | |||
10 | - "renesas,irqc-r8a7792" (R-Car V2H) | 10 | - "renesas,irqc-r8a7792" (R-Car V2H) |
11 | - "renesas,irqc-r8a7793" (R-Car M2-N) | 11 | - "renesas,irqc-r8a7793" (R-Car M2-N) |
12 | - "renesas,irqc-r8a7794" (R-Car E2) | 12 | - "renesas,irqc-r8a7794" (R-Car E2) |
13 | - "renesas,intc-ex-r8a7795" (R-Car H3) | ||
13 | - #interrupt-cells: has to be <2>: an interrupt index and flags, as defined in | 14 | - #interrupt-cells: has to be <2>: an interrupt index and flags, as defined in |
14 | interrupts.txt in this directory | 15 | interrupts.txt in this directory |
15 | - clocks: Must contain a reference to the functional clock. | 16 | - clocks: Must contain a reference to the functional clock. |
diff --git a/Documentation/devicetree/bindings/pci/pci-msi.txt b/Documentation/devicetree/bindings/pci/pci-msi.txt new file mode 100644 index 000000000000..9b3cc817d181 --- /dev/null +++ b/Documentation/devicetree/bindings/pci/pci-msi.txt | |||
@@ -0,0 +1,220 @@ | |||
1 | This document describes the generic device tree binding for describing the | ||
2 | relationship between PCI devices and MSI controllers. | ||
3 | |||
4 | Each PCI device under a root complex is uniquely identified by its Requester ID | ||
5 | (AKA RID). A Requester ID is a triplet of a Bus number, Device number, and | ||
6 | Function number. | ||
7 | |||
8 | For the purpose of this document, when treated as a numeric value, a RID is | ||
9 | formatted such that: | ||
10 | |||
11 | * Bits [15:8] are the Bus number. | ||
12 | * Bits [7:3] are the Device number. | ||
13 | * Bits [2:0] are the Function number. | ||
14 | * Any other bits required for padding must be zero. | ||
15 | |||
16 | MSIs may be distinguished in part through the use of sideband data accompanying | ||
17 | writes. In the case of PCI devices, this sideband data may be derived from the | ||
18 | Requester ID. A mechanism is required to associate a device with both the MSI | ||
19 | controllers it can address, and the sideband data that will be associated with | ||
20 | its writes to those controllers. | ||
21 | |||
22 | For generic MSI bindings, see | ||
23 | Documentation/devicetree/bindings/interrupt-controller/msi.txt. | ||
24 | |||
25 | |||
26 | PCI root complex | ||
27 | ================ | ||
28 | |||
29 | Optional properties | ||
30 | ------------------- | ||
31 | |||
32 | - msi-map: Maps a Requester ID to an MSI controller and associated | ||
33 | msi-specifier data. The property is an arbitrary number of tuples of | ||
34 | (rid-base,msi-controller,msi-base,length), where: | ||
35 | |||
36 | * rid-base is a single cell describing the first RID matched by the entry. | ||
37 | |||
38 | * msi-controller is a single phandle to an MSI controller | ||
39 | |||
40 | * msi-base is an msi-specifier describing the msi-specifier produced for the | ||
41 | first RID matched by the entry. | ||
42 | |||
43 | * length is a single cell describing how many consecutive RIDs are matched | ||
44 | following the rid-base. | ||
45 | |||
46 | Any RID r in the interval [rid-base, rid-base + length) is associated with | ||
47 | the listed msi-controller, with the msi-specifier (r - rid-base + msi-base). | ||
48 | |||
49 | - msi-map-mask: A mask to be applied to each Requester ID prior to being mapped | ||
50 | to an msi-specifier per the msi-map property. | ||
51 | |||
52 | - msi-parent: Describes the MSI parent of the root complex itself. Where | ||
53 | the root complex and MSI controller do not pass sideband data with MSI | ||
54 | writes, this property may be used to describe the MSI controller(s) | ||
55 | used by PCI devices under the root complex, if defined as such in the | ||
56 | binding for the root complex. | ||
57 | |||
58 | |||
59 | Example (1) | ||
60 | =========== | ||
61 | |||
62 | / { | ||
63 | #address-cells = <1>; | ||
64 | #size-cells = <1>; | ||
65 | |||
66 | msi: msi-controller@a { | ||
67 | reg = <0xa 0x1>; | ||
68 | compatible = "vendor,some-controller"; | ||
69 | msi-controller; | ||
70 | #msi-cells = <1>; | ||
71 | }; | ||
72 | |||
73 | pci: pci@f { | ||
74 | reg = <0xf 0x1>; | ||
75 | compatible = "vendor,pcie-root-complex"; | ||
76 | device_type = "pci"; | ||
77 | |||
78 | /* | ||
79 | * The sideband data provided to the MSI controller is | ||
80 | * the RID, identity-mapped. | ||
81 | */ | ||
82 | msi-map = <0x0 &msi_a 0x0 0x10000>, | ||
83 | }; | ||
84 | }; | ||
85 | |||
86 | |||
87 | Example (2) | ||
88 | =========== | ||
89 | |||
90 | / { | ||
91 | #address-cells = <1>; | ||
92 | #size-cells = <1>; | ||
93 | |||
94 | msi: msi-controller@a { | ||
95 | reg = <0xa 0x1>; | ||
96 | compatible = "vendor,some-controller"; | ||
97 | msi-controller; | ||
98 | #msi-cells = <1>; | ||
99 | }; | ||
100 | |||
101 | pci: pci@f { | ||
102 | reg = <0xf 0x1>; | ||
103 | compatible = "vendor,pcie-root-complex"; | ||
104 | device_type = "pci"; | ||
105 | |||
106 | /* | ||
107 | * The sideband data provided to the MSI controller is | ||
108 | * the RID, masked to only the device and function bits. | ||
109 | */ | ||
110 | msi-map = <0x0 &msi_a 0x0 0x100>, | ||
111 | msi-map-mask = <0xff> | ||
112 | }; | ||
113 | }; | ||
114 | |||
115 | |||
116 | Example (3) | ||
117 | =========== | ||
118 | |||
119 | / { | ||
120 | #address-cells = <1>; | ||
121 | #size-cells = <1>; | ||
122 | |||
123 | msi: msi-controller@a { | ||
124 | reg = <0xa 0x1>; | ||
125 | compatible = "vendor,some-controller"; | ||
126 | msi-controller; | ||
127 | #msi-cells = <1>; | ||
128 | }; | ||
129 | |||
130 | pci: pci@f { | ||
131 | reg = <0xf 0x1>; | ||
132 | compatible = "vendor,pcie-root-complex"; | ||
133 | device_type = "pci"; | ||
134 | |||
135 | /* | ||
136 | * The sideband data provided to the MSI controller is | ||
137 | * the RID, but the high bit of the bus number is | ||
138 | * ignored. | ||
139 | */ | ||
140 | msi-map = <0x0000 &msi 0x0000 0x8000>, | ||
141 | <0x8000 &msi 0x0000 0x8000>; | ||
142 | }; | ||
143 | }; | ||
144 | |||
145 | |||
146 | Example (4) | ||
147 | =========== | ||
148 | |||
149 | / { | ||
150 | #address-cells = <1>; | ||
151 | #size-cells = <1>; | ||
152 | |||
153 | msi: msi-controller@a { | ||
154 | reg = <0xa 0x1>; | ||
155 | compatible = "vendor,some-controller"; | ||
156 | msi-controller; | ||
157 | #msi-cells = <1>; | ||
158 | }; | ||
159 | |||
160 | pci: pci@f { | ||
161 | reg = <0xf 0x1>; | ||
162 | compatible = "vendor,pcie-root-complex"; | ||
163 | device_type = "pci"; | ||
164 | |||
165 | /* | ||
166 | * The sideband data provided to the MSI controller is | ||
167 | * the RID, but the high bit of the bus number is | ||
168 | * negated. | ||
169 | */ | ||
170 | msi-map = <0x0000 &msi 0x8000 0x8000>, | ||
171 | <0x8000 &msi 0x0000 0x8000>; | ||
172 | }; | ||
173 | }; | ||
174 | |||
175 | |||
176 | Example (5) | ||
177 | =========== | ||
178 | |||
179 | / { | ||
180 | #address-cells = <1>; | ||
181 | #size-cells = <1>; | ||
182 | |||
183 | msi_a: msi-controller@a { | ||
184 | reg = <0xa 0x1>; | ||
185 | compatible = "vendor,some-controller"; | ||
186 | msi-controller; | ||
187 | #msi-cells = <1>; | ||
188 | }; | ||
189 | |||
190 | msi_b: msi-controller@b { | ||
191 | reg = <0xb 0x1>; | ||
192 | compatible = "vendor,some-controller"; | ||
193 | msi-controller; | ||
194 | #msi-cells = <1>; | ||
195 | }; | ||
196 | |||
197 | msi_c: msi-controller@c { | ||
198 | reg = <0xc 0x1>; | ||
199 | compatible = "vendor,some-controller"; | ||
200 | msi-controller; | ||
201 | #msi-cells = <1>; | ||
202 | }; | ||
203 | |||
204 | pci: pci@c { | ||
205 | reg = <0xf 0x1>; | ||
206 | compatible = "vendor,pcie-root-complex"; | ||
207 | device_type = "pci"; | ||
208 | |||
209 | /* | ||
210 | * The sideband data provided to MSI controller a is the | ||
211 | * RID, but the high bit of the bus number is negated. | ||
212 | * The sideband data provided to MSI controller b is the | ||
213 | * RID, identity-mapped. | ||
214 | * MSI controller c is not addressable. | ||
215 | */ | ||
216 | msi-map = <0x0000 &msi_a 0x8000 0x08000>, | ||
217 | <0x8000 &msi_a 0x0000 0x08000>, | ||
218 | <0x0000 &msi_b 0x0000 0x10000>; | ||
219 | }; | ||
220 | }; | ||
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 823f90ea65c4..f1ed1109f488 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -820,6 +820,7 @@ config ARCH_VIRT | |||
820 | bool "Dummy Virtual Machine" if ARCH_MULTI_V7 | 820 | bool "Dummy Virtual Machine" if ARCH_MULTI_V7 |
821 | select ARM_AMBA | 821 | select ARM_AMBA |
822 | select ARM_GIC | 822 | select ARM_GIC |
823 | select ARM_GIC_V3 | ||
823 | select ARM_PSCI | 824 | select ARM_PSCI |
824 | select HAVE_ARM_ARCH_TIMER | 825 | select HAVE_ARM_ARCH_TIMER |
825 | 826 | ||
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h new file mode 100644 index 000000000000..6607d976e07d --- /dev/null +++ b/arch/arm/include/asm/arch_gicv3.h | |||
@@ -0,0 +1,188 @@ | |||
1 | /* | ||
2 | * arch/arm/include/asm/arch_gicv3.h | ||
3 | * | ||
4 | * Copyright (C) 2015 ARM Ltd. | ||
5 | * | ||
6 | * This program is free software: you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | #ifndef __ASM_ARCH_GICV3_H | ||
19 | #define __ASM_ARCH_GICV3_H | ||
20 | |||
21 | #ifndef __ASSEMBLY__ | ||
22 | |||
23 | #include <linux/io.h> | ||
24 | |||
25 | #define __ACCESS_CP15(CRn, Op1, CRm, Op2) p15, Op1, %0, CRn, CRm, Op2 | ||
26 | #define __ACCESS_CP15_64(Op1, CRm) p15, Op1, %Q0, %R0, CRm | ||
27 | |||
28 | #define ICC_EOIR1 __ACCESS_CP15(c12, 0, c12, 1) | ||
29 | #define ICC_DIR __ACCESS_CP15(c12, 0, c11, 1) | ||
30 | #define ICC_IAR1 __ACCESS_CP15(c12, 0, c12, 0) | ||
31 | #define ICC_SGI1R __ACCESS_CP15_64(0, c12) | ||
32 | #define ICC_PMR __ACCESS_CP15(c4, 0, c6, 0) | ||
33 | #define ICC_CTLR __ACCESS_CP15(c12, 0, c12, 4) | ||
34 | #define ICC_SRE __ACCESS_CP15(c12, 0, c12, 5) | ||
35 | #define ICC_IGRPEN1 __ACCESS_CP15(c12, 0, c12, 7) | ||
36 | |||
37 | #define ICC_HSRE __ACCESS_CP15(c12, 4, c9, 5) | ||
38 | |||
39 | #define ICH_VSEIR __ACCESS_CP15(c12, 4, c9, 4) | ||
40 | #define ICH_HCR __ACCESS_CP15(c12, 4, c11, 0) | ||
41 | #define ICH_VTR __ACCESS_CP15(c12, 4, c11, 1) | ||
42 | #define ICH_MISR __ACCESS_CP15(c12, 4, c11, 2) | ||
43 | #define ICH_EISR __ACCESS_CP15(c12, 4, c11, 3) | ||
44 | #define ICH_ELSR __ACCESS_CP15(c12, 4, c11, 5) | ||
45 | #define ICH_VMCR __ACCESS_CP15(c12, 4, c11, 7) | ||
46 | |||
47 | #define __LR0(x) __ACCESS_CP15(c12, 4, c12, x) | ||
48 | #define __LR8(x) __ACCESS_CP15(c12, 4, c13, x) | ||
49 | |||
50 | #define ICH_LR0 __LR0(0) | ||
51 | #define ICH_LR1 __LR0(1) | ||
52 | #define ICH_LR2 __LR0(2) | ||
53 | #define ICH_LR3 __LR0(3) | ||
54 | #define ICH_LR4 __LR0(4) | ||
55 | #define ICH_LR5 __LR0(5) | ||
56 | #define ICH_LR6 __LR0(6) | ||
57 | #define ICH_LR7 __LR0(7) | ||
58 | #define ICH_LR8 __LR8(0) | ||
59 | #define ICH_LR9 __LR8(1) | ||
60 | #define ICH_LR10 __LR8(2) | ||
61 | #define ICH_LR11 __LR8(3) | ||
62 | #define ICH_LR12 __LR8(4) | ||
63 | #define ICH_LR13 __LR8(5) | ||
64 | #define ICH_LR14 __LR8(6) | ||
65 | #define ICH_LR15 __LR8(7) | ||
66 | |||
67 | /* LR top half */ | ||
68 | #define __LRC0(x) __ACCESS_CP15(c12, 4, c14, x) | ||
69 | #define __LRC8(x) __ACCESS_CP15(c12, 4, c15, x) | ||
70 | |||
71 | #define ICH_LRC0 __LRC0(0) | ||
72 | #define ICH_LRC1 __LRC0(1) | ||
73 | #define ICH_LRC2 __LRC0(2) | ||
74 | #define ICH_LRC3 __LRC0(3) | ||
75 | #define ICH_LRC4 __LRC0(4) | ||
76 | #define ICH_LRC5 __LRC0(5) | ||
77 | #define ICH_LRC6 __LRC0(6) | ||
78 | #define ICH_LRC7 __LRC0(7) | ||
79 | #define ICH_LRC8 __LRC8(0) | ||
80 | #define ICH_LRC9 __LRC8(1) | ||
81 | #define ICH_LRC10 __LRC8(2) | ||
82 | #define ICH_LRC11 __LRC8(3) | ||
83 | #define ICH_LRC12 __LRC8(4) | ||
84 | #define ICH_LRC13 __LRC8(5) | ||
85 | #define ICH_LRC14 __LRC8(6) | ||
86 | #define ICH_LRC15 __LRC8(7) | ||
87 | |||
88 | #define __AP0Rx(x) __ACCESS_CP15(c12, 4, c8, x) | ||
89 | #define ICH_AP0R0 __AP0Rx(0) | ||
90 | #define ICH_AP0R1 __AP0Rx(1) | ||
91 | #define ICH_AP0R2 __AP0Rx(2) | ||
92 | #define ICH_AP0R3 __AP0Rx(3) | ||
93 | |||
94 | #define __AP1Rx(x) __ACCESS_CP15(c12, 4, c9, x) | ||
95 | #define ICH_AP1R0 __AP1Rx(0) | ||
96 | #define ICH_AP1R1 __AP1Rx(1) | ||
97 | #define ICH_AP1R2 __AP1Rx(2) | ||
98 | #define ICH_AP1R3 __AP1Rx(3) | ||
99 | |||
100 | /* Low-level accessors */ | ||
101 | |||
102 | static inline void gic_write_eoir(u32 irq) | ||
103 | { | ||
104 | asm volatile("mcr " __stringify(ICC_EOIR1) : : "r" (irq)); | ||
105 | isb(); | ||
106 | } | ||
107 | |||
108 | static inline void gic_write_dir(u32 val) | ||
109 | { | ||
110 | asm volatile("mcr " __stringify(ICC_DIR) : : "r" (val)); | ||
111 | isb(); | ||
112 | } | ||
113 | |||
114 | static inline u32 gic_read_iar(void) | ||
115 | { | ||
116 | u32 irqstat; | ||
117 | |||
118 | asm volatile("mrc " __stringify(ICC_IAR1) : "=r" (irqstat)); | ||
119 | return irqstat; | ||
120 | } | ||
121 | |||
122 | static inline void gic_write_pmr(u32 val) | ||
123 | { | ||
124 | asm volatile("mcr " __stringify(ICC_PMR) : : "r" (val)); | ||
125 | } | ||
126 | |||
127 | static inline void gic_write_ctlr(u32 val) | ||
128 | { | ||
129 | asm volatile("mcr " __stringify(ICC_CTLR) : : "r" (val)); | ||
130 | isb(); | ||
131 | } | ||
132 | |||
133 | static inline void gic_write_grpen1(u32 val) | ||
134 | { | ||
135 | asm volatile("mcr " __stringify(ICC_IGRPEN1) : : "r" (val)); | ||
136 | isb(); | ||
137 | } | ||
138 | |||
139 | static inline void gic_write_sgi1r(u64 val) | ||
140 | { | ||
141 | asm volatile("mcrr " __stringify(ICC_SGI1R) : : "r" (val)); | ||
142 | } | ||
143 | |||
144 | static inline u32 gic_read_sre(void) | ||
145 | { | ||
146 | u32 val; | ||
147 | |||
148 | asm volatile("mrc " __stringify(ICC_SRE) : "=r" (val)); | ||
149 | return val; | ||
150 | } | ||
151 | |||
152 | static inline void gic_write_sre(u32 val) | ||
153 | { | ||
154 | asm volatile("mcr " __stringify(ICC_SRE) : : "r" (val)); | ||
155 | isb(); | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * Even in 32bit systems that use LPAE, there is no guarantee that the I/O | ||
160 | * interface provides true 64bit atomic accesses, so using strd/ldrd doesn't | ||
161 | * make much sense. | ||
162 | * Moreover, 64bit I/O emulation is extremely difficult to implement on | ||
163 | * AArch32, since the syndrome register doesn't provide any information for | ||
164 | * them. | ||
165 | * Consequently, the following IO helpers use 32bit accesses. | ||
166 | * | ||
167 | * There are only two registers that need 64bit accesses in this driver: | ||
168 | * - GICD_IROUTERn, contain the affinity values associated to each interrupt. | ||
169 | * The upper-word (aff3) will always be 0, so there is no need for a lock. | ||
170 | * - GICR_TYPER is an ID register and doesn't need atomicity. | ||
171 | */ | ||
172 | static inline void gic_write_irouter(u64 val, volatile void __iomem *addr) | ||
173 | { | ||
174 | writel_relaxed((u32)val, addr); | ||
175 | writel_relaxed((u32)(val >> 32), addr + 4); | ||
176 | } | ||
177 | |||
178 | static inline u64 gic_read_typer(const volatile void __iomem *addr) | ||
179 | { | ||
180 | u64 val; | ||
181 | |||
182 | val = readl_relaxed(addr); | ||
183 | val |= (u64)readl_relaxed(addr + 4) << 32; | ||
184 | return val; | ||
185 | } | ||
186 | |||
187 | #endif /* !__ASSEMBLY__ */ | ||
188 | #endif /* !__ASM_ARCH_GICV3_H */ | ||
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c index e00eb39453a4..5a7e47ceec91 100644 --- a/arch/arm/mach-exynos/suspend.c +++ b/arch/arm/mach-exynos/suspend.c | |||
@@ -177,54 +177,57 @@ static struct irq_chip exynos_pmu_chip = { | |||
177 | #endif | 177 | #endif |
178 | }; | 178 | }; |
179 | 179 | ||
180 | static int exynos_pmu_domain_xlate(struct irq_domain *domain, | 180 | static int exynos_pmu_domain_translate(struct irq_domain *d, |
181 | struct device_node *controller, | 181 | struct irq_fwspec *fwspec, |
182 | const u32 *intspec, | 182 | unsigned long *hwirq, |
183 | unsigned int intsize, | 183 | unsigned int *type) |
184 | unsigned long *out_hwirq, | ||
185 | unsigned int *out_type) | ||
186 | { | 184 | { |
187 | if (domain->of_node != controller) | 185 | if (is_of_node(fwspec->fwnode)) { |
188 | return -EINVAL; /* Shouldn't happen, really... */ | 186 | if (fwspec->param_count != 3) |
189 | if (intsize != 3) | 187 | return -EINVAL; |
190 | return -EINVAL; /* Not GIC compliant */ | ||
191 | if (intspec[0] != 0) | ||
192 | return -EINVAL; /* No PPI should point to this domain */ | ||
193 | 188 | ||
194 | *out_hwirq = intspec[1]; | 189 | /* No PPI should point to this domain */ |
195 | *out_type = intspec[2]; | 190 | if (fwspec->param[0] != 0) |
196 | return 0; | 191 | return -EINVAL; |
192 | |||
193 | *hwirq = fwspec->param[1]; | ||
194 | *type = fwspec->param[2]; | ||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | return -EINVAL; | ||
197 | } | 199 | } |
198 | 200 | ||
199 | static int exynos_pmu_domain_alloc(struct irq_domain *domain, | 201 | static int exynos_pmu_domain_alloc(struct irq_domain *domain, |
200 | unsigned int virq, | 202 | unsigned int virq, |
201 | unsigned int nr_irqs, void *data) | 203 | unsigned int nr_irqs, void *data) |
202 | { | 204 | { |
203 | struct of_phandle_args *args = data; | 205 | struct irq_fwspec *fwspec = data; |
204 | struct of_phandle_args parent_args; | 206 | struct irq_fwspec parent_fwspec; |
205 | irq_hw_number_t hwirq; | 207 | irq_hw_number_t hwirq; |
206 | int i; | 208 | int i; |
207 | 209 | ||
208 | if (args->args_count != 3) | 210 | if (fwspec->param_count != 3) |
209 | return -EINVAL; /* Not GIC compliant */ | 211 | return -EINVAL; /* Not GIC compliant */ |
210 | if (args->args[0] != 0) | 212 | if (fwspec->param[0] != 0) |
211 | return -EINVAL; /* No PPI should point to this domain */ | 213 | return -EINVAL; /* No PPI should point to this domain */ |
212 | 214 | ||
213 | hwirq = args->args[1]; | 215 | hwirq = fwspec->param[1]; |
214 | 216 | ||
215 | for (i = 0; i < nr_irqs; i++) | 217 | for (i = 0; i < nr_irqs; i++) |
216 | irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, | 218 | irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, |
217 | &exynos_pmu_chip, NULL); | 219 | &exynos_pmu_chip, NULL); |
218 | 220 | ||
219 | parent_args = *args; | 221 | parent_fwspec = *fwspec; |
220 | parent_args.np = domain->parent->of_node; | 222 | parent_fwspec.fwnode = domain->parent->fwnode; |
221 | return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_args); | 223 | return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, |
224 | &parent_fwspec); | ||
222 | } | 225 | } |
223 | 226 | ||
224 | static const struct irq_domain_ops exynos_pmu_domain_ops = { | 227 | static const struct irq_domain_ops exynos_pmu_domain_ops = { |
225 | .xlate = exynos_pmu_domain_xlate, | 228 | .translate = exynos_pmu_domain_translate, |
226 | .alloc = exynos_pmu_domain_alloc, | 229 | .alloc = exynos_pmu_domain_alloc, |
227 | .free = irq_domain_free_irqs_common, | 230 | .free = irq_domain_free_irqs_common, |
228 | }; | 231 | }; |
229 | 232 | ||
230 | static int __init exynos_pmu_irq_init(struct device_node *node, | 233 | static int __init exynos_pmu_irq_init(struct device_node *node, |
diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c index 8c4467fad837..10bf7159b27d 100644 --- a/arch/arm/mach-imx/gpc.c +++ b/arch/arm/mach-imx/gpc.c | |||
@@ -181,40 +181,42 @@ static struct irq_chip imx_gpc_chip = { | |||
181 | #endif | 181 | #endif |
182 | }; | 182 | }; |
183 | 183 | ||
184 | static int imx_gpc_domain_xlate(struct irq_domain *domain, | 184 | static int imx_gpc_domain_translate(struct irq_domain *d, |
185 | struct device_node *controller, | 185 | struct irq_fwspec *fwspec, |
186 | const u32 *intspec, | 186 | unsigned long *hwirq, |
187 | unsigned int intsize, | 187 | unsigned int *type) |
188 | unsigned long *out_hwirq, | ||
189 | unsigned int *out_type) | ||
190 | { | 188 | { |
191 | if (domain->of_node != controller) | 189 | if (is_of_node(fwspec->fwnode)) { |
192 | return -EINVAL; /* Shouldn't happen, really... */ | 190 | if (fwspec->param_count != 3) |
193 | if (intsize != 3) | 191 | return -EINVAL; |
194 | return -EINVAL; /* Not GIC compliant */ | ||
195 | if (intspec[0] != 0) | ||
196 | return -EINVAL; /* No PPI should point to this domain */ | ||
197 | 192 | ||
198 | *out_hwirq = intspec[1]; | 193 | /* No PPI should point to this domain */ |
199 | *out_type = intspec[2]; | 194 | if (fwspec->param[0] != 0) |
200 | return 0; | 195 | return -EINVAL; |
196 | |||
197 | *hwirq = fwspec->param[1]; | ||
198 | *type = fwspec->param[2]; | ||
199 | return 0; | ||
200 | } | ||
201 | |||
202 | return -EINVAL; | ||
201 | } | 203 | } |
202 | 204 | ||
203 | static int imx_gpc_domain_alloc(struct irq_domain *domain, | 205 | static int imx_gpc_domain_alloc(struct irq_domain *domain, |
204 | unsigned int irq, | 206 | unsigned int irq, |
205 | unsigned int nr_irqs, void *data) | 207 | unsigned int nr_irqs, void *data) |
206 | { | 208 | { |
207 | struct of_phandle_args *args = data; | 209 | struct irq_fwspec *fwspec = data; |
208 | struct of_phandle_args parent_args; | 210 | struct irq_fwspec parent_fwspec; |
209 | irq_hw_number_t hwirq; | 211 | irq_hw_number_t hwirq; |
210 | int i; | 212 | int i; |
211 | 213 | ||
212 | if (args->args_count != 3) | 214 | if (fwspec->param_count != 3) |
213 | return -EINVAL; /* Not GIC compliant */ | 215 | return -EINVAL; /* Not GIC compliant */ |
214 | if (args->args[0] != 0) | 216 | if (fwspec->param[0] != 0) |
215 | return -EINVAL; /* No PPI should point to this domain */ | 217 | return -EINVAL; /* No PPI should point to this domain */ |
216 | 218 | ||
217 | hwirq = args->args[1]; | 219 | hwirq = fwspec->param[1]; |
218 | if (hwirq >= GPC_MAX_IRQS) | 220 | if (hwirq >= GPC_MAX_IRQS) |
219 | return -EINVAL; /* Can't deal with this */ | 221 | return -EINVAL; /* Can't deal with this */ |
220 | 222 | ||
@@ -222,15 +224,16 @@ static int imx_gpc_domain_alloc(struct irq_domain *domain, | |||
222 | irq_domain_set_hwirq_and_chip(domain, irq + i, hwirq + i, | 224 | irq_domain_set_hwirq_and_chip(domain, irq + i, hwirq + i, |
223 | &imx_gpc_chip, NULL); | 225 | &imx_gpc_chip, NULL); |
224 | 226 | ||
225 | parent_args = *args; | 227 | parent_fwspec = *fwspec; |
226 | parent_args.np = domain->parent->of_node; | 228 | parent_fwspec.fwnode = domain->parent->fwnode; |
227 | return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs, &parent_args); | 229 | return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs, |
230 | &parent_fwspec); | ||
228 | } | 231 | } |
229 | 232 | ||
230 | static const struct irq_domain_ops imx_gpc_domain_ops = { | 233 | static const struct irq_domain_ops imx_gpc_domain_ops = { |
231 | .xlate = imx_gpc_domain_xlate, | 234 | .translate = imx_gpc_domain_translate, |
232 | .alloc = imx_gpc_domain_alloc, | 235 | .alloc = imx_gpc_domain_alloc, |
233 | .free = irq_domain_free_irqs_common, | 236 | .free = irq_domain_free_irqs_common, |
234 | }; | 237 | }; |
235 | 238 | ||
236 | static int __init imx_gpc_init(struct device_node *node, | 239 | static int __init imx_gpc_init(struct device_node *node, |
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c index e1d2e991d17a..db7e0bab3587 100644 --- a/arch/arm/mach-omap2/omap-wakeupgen.c +++ b/arch/arm/mach-omap2/omap-wakeupgen.c | |||
@@ -399,40 +399,42 @@ static struct irq_chip wakeupgen_chip = { | |||
399 | #endif | 399 | #endif |
400 | }; | 400 | }; |
401 | 401 | ||
402 | static int wakeupgen_domain_xlate(struct irq_domain *domain, | 402 | static int wakeupgen_domain_translate(struct irq_domain *d, |
403 | struct device_node *controller, | 403 | struct irq_fwspec *fwspec, |
404 | const u32 *intspec, | 404 | unsigned long *hwirq, |
405 | unsigned int intsize, | 405 | unsigned int *type) |
406 | unsigned long *out_hwirq, | ||
407 | unsigned int *out_type) | ||
408 | { | 406 | { |
409 | if (domain->of_node != controller) | 407 | if (is_of_node(fwspec->fwnode)) { |
410 | return -EINVAL; /* Shouldn't happen, really... */ | 408 | if (fwspec->param_count != 3) |
411 | if (intsize != 3) | 409 | return -EINVAL; |
412 | return -EINVAL; /* Not GIC compliant */ | ||
413 | if (intspec[0] != 0) | ||
414 | return -EINVAL; /* No PPI should point to this domain */ | ||
415 | 410 | ||
416 | *out_hwirq = intspec[1]; | 411 | /* No PPI should point to this domain */ |
417 | *out_type = intspec[2]; | 412 | if (fwspec->param[0] != 0) |
418 | return 0; | 413 | return -EINVAL; |
414 | |||
415 | *hwirq = fwspec->param[1]; | ||
416 | *type = fwspec->param[2]; | ||
417 | return 0; | ||
418 | } | ||
419 | |||
420 | return -EINVAL; | ||
419 | } | 421 | } |
420 | 422 | ||
421 | static int wakeupgen_domain_alloc(struct irq_domain *domain, | 423 | static int wakeupgen_domain_alloc(struct irq_domain *domain, |
422 | unsigned int virq, | 424 | unsigned int virq, |
423 | unsigned int nr_irqs, void *data) | 425 | unsigned int nr_irqs, void *data) |
424 | { | 426 | { |
425 | struct of_phandle_args *args = data; | 427 | struct irq_fwspec *fwspec = data; |
426 | struct of_phandle_args parent_args; | 428 | struct irq_fwspec parent_fwspec; |
427 | irq_hw_number_t hwirq; | 429 | irq_hw_number_t hwirq; |
428 | int i; | 430 | int i; |
429 | 431 | ||
430 | if (args->args_count != 3) | 432 | if (fwspec->param_count != 3) |
431 | return -EINVAL; /* Not GIC compliant */ | 433 | return -EINVAL; /* Not GIC compliant */ |
432 | if (args->args[0] != 0) | 434 | if (fwspec->param[0] != 0) |
433 | return -EINVAL; /* No PPI should point to this domain */ | 435 | return -EINVAL; /* No PPI should point to this domain */ |
434 | 436 | ||
435 | hwirq = args->args[1]; | 437 | hwirq = fwspec->param[1]; |
436 | if (hwirq >= MAX_IRQS) | 438 | if (hwirq >= MAX_IRQS) |
437 | return -EINVAL; /* Can't deal with this */ | 439 | return -EINVAL; /* Can't deal with this */ |
438 | 440 | ||
@@ -440,15 +442,16 @@ static int wakeupgen_domain_alloc(struct irq_domain *domain, | |||
440 | irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, | 442 | irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, |
441 | &wakeupgen_chip, NULL); | 443 | &wakeupgen_chip, NULL); |
442 | 444 | ||
443 | parent_args = *args; | 445 | parent_fwspec = *fwspec; |
444 | parent_args.np = domain->parent->of_node; | 446 | parent_fwspec.fwnode = domain->parent->fwnode; |
445 | return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_args); | 447 | return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, |
448 | &parent_fwspec); | ||
446 | } | 449 | } |
447 | 450 | ||
448 | static const struct irq_domain_ops wakeupgen_domain_ops = { | 451 | static const struct irq_domain_ops wakeupgen_domain_ops = { |
449 | .xlate = wakeupgen_domain_xlate, | 452 | .translate = wakeupgen_domain_translate, |
450 | .alloc = wakeupgen_domain_alloc, | 453 | .alloc = wakeupgen_domain_alloc, |
451 | .free = irq_domain_free_irqs_common, | 454 | .free = irq_domain_free_irqs_common, |
452 | }; | 455 | }; |
453 | 456 | ||
454 | /* | 457 | /* |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 07d1811aa03f..440d906429de 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -348,6 +348,33 @@ config ARM64_ERRATUM_843419 | |||
348 | 348 | ||
349 | If unsure, say Y. | 349 | If unsure, say Y. |
350 | 350 | ||
351 | config CAVIUM_ERRATUM_22375 | ||
352 | bool "Cavium erratum 22375, 24313" | ||
353 | default y | ||
354 | help | ||
355 | Enable workaround for erratum 22375, 24313. | ||
356 | |||
357 | This implements two gicv3-its errata workarounds for ThunderX. Both | ||
358 | with small impact affecting only ITS table allocation. | ||
359 | |||
360 | erratum 22375: only alloc 8MB table size | ||
361 | erratum 24313: ignore memory access type | ||
362 | |||
363 | The fixes are in ITS initialization and basically ignore memory access | ||
364 | type and table size provided by the TYPER and BASER registers. | ||
365 | |||
366 | If unsure, say Y. | ||
367 | |||
368 | config CAVIUM_ERRATUM_23154 | ||
369 | bool "Cavium erratum 23154: Access to ICC_IAR1_EL1 is not sync'ed" | ||
370 | default y | ||
371 | help | ||
372 | The gicv3 of ThunderX requires a modified version for | ||
373 | reading the IAR status to ensure data synchronization | ||
374 | (access to icc_iar1_el1 is not sync'ed before and after). | ||
375 | |||
376 | If unsure, say Y. | ||
377 | |||
351 | endmenu | 378 | endmenu |
352 | 379 | ||
353 | 380 | ||
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h new file mode 100644 index 000000000000..030cdcb46c6b --- /dev/null +++ b/arch/arm64/include/asm/arch_gicv3.h | |||
@@ -0,0 +1,170 @@ | |||
1 | /* | ||
2 | * arch/arm64/include/asm/arch_gicv3.h | ||
3 | * | ||
4 | * Copyright (C) 2015 ARM Ltd. | ||
5 | * | ||
6 | * This program is free software: you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | #ifndef __ASM_ARCH_GICV3_H | ||
19 | #define __ASM_ARCH_GICV3_H | ||
20 | |||
21 | #include <asm/sysreg.h> | ||
22 | |||
23 | #define ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1) | ||
24 | #define ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1) | ||
25 | #define ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0) | ||
26 | #define ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5) | ||
27 | #define ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0) | ||
28 | #define ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4) | ||
29 | #define ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5) | ||
30 | #define ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7) | ||
31 | |||
32 | #define ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5) | ||
33 | |||
34 | /* | ||
35 | * System register definitions | ||
36 | */ | ||
37 | #define ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4) | ||
38 | #define ICH_HCR_EL2 sys_reg(3, 4, 12, 11, 0) | ||
39 | #define ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1) | ||
40 | #define ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2) | ||
41 | #define ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3) | ||
42 | #define ICH_ELSR_EL2 sys_reg(3, 4, 12, 11, 5) | ||
43 | #define ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7) | ||
44 | |||
45 | #define __LR0_EL2(x) sys_reg(3, 4, 12, 12, x) | ||
46 | #define __LR8_EL2(x) sys_reg(3, 4, 12, 13, x) | ||
47 | |||
48 | #define ICH_LR0_EL2 __LR0_EL2(0) | ||
49 | #define ICH_LR1_EL2 __LR0_EL2(1) | ||
50 | #define ICH_LR2_EL2 __LR0_EL2(2) | ||
51 | #define ICH_LR3_EL2 __LR0_EL2(3) | ||
52 | #define ICH_LR4_EL2 __LR0_EL2(4) | ||
53 | #define ICH_LR5_EL2 __LR0_EL2(5) | ||
54 | #define ICH_LR6_EL2 __LR0_EL2(6) | ||
55 | #define ICH_LR7_EL2 __LR0_EL2(7) | ||
56 | #define ICH_LR8_EL2 __LR8_EL2(0) | ||
57 | #define ICH_LR9_EL2 __LR8_EL2(1) | ||
58 | #define ICH_LR10_EL2 __LR8_EL2(2) | ||
59 | #define ICH_LR11_EL2 __LR8_EL2(3) | ||
60 | #define ICH_LR12_EL2 __LR8_EL2(4) | ||
61 | #define ICH_LR13_EL2 __LR8_EL2(5) | ||
62 | #define ICH_LR14_EL2 __LR8_EL2(6) | ||
63 | #define ICH_LR15_EL2 __LR8_EL2(7) | ||
64 | |||
65 | #define __AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x) | ||
66 | #define ICH_AP0R0_EL2 __AP0Rx_EL2(0) | ||
67 | #define ICH_AP0R1_EL2 __AP0Rx_EL2(1) | ||
68 | #define ICH_AP0R2_EL2 __AP0Rx_EL2(2) | ||
69 | #define ICH_AP0R3_EL2 __AP0Rx_EL2(3) | ||
70 | |||
71 | #define __AP1Rx_EL2(x) sys_reg(3, 4, 12, 9, x) | ||
72 | #define ICH_AP1R0_EL2 __AP1Rx_EL2(0) | ||
73 | #define ICH_AP1R1_EL2 __AP1Rx_EL2(1) | ||
74 | #define ICH_AP1R2_EL2 __AP1Rx_EL2(2) | ||
75 | #define ICH_AP1R3_EL2 __AP1Rx_EL2(3) | ||
76 | |||
77 | #ifndef __ASSEMBLY__ | ||
78 | |||
79 | #include <linux/stringify.h> | ||
80 | |||
81 | /* | ||
82 | * Low-level accessors | ||
83 | * | ||
84 | * These system registers are 32 bits, but we make sure that the compiler | ||
85 | * sets the GP register's most significant bits to 0 with an explicit cast. | ||
86 | */ | ||
87 | |||
88 | static inline void gic_write_eoir(u32 irq) | ||
89 | { | ||
90 | asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" ((u64)irq)); | ||
91 | isb(); | ||
92 | } | ||
93 | |||
94 | static inline void gic_write_dir(u32 irq) | ||
95 | { | ||
96 | asm volatile("msr_s " __stringify(ICC_DIR_EL1) ", %0" : : "r" ((u64)irq)); | ||
97 | isb(); | ||
98 | } | ||
99 | |||
100 | static inline u64 gic_read_iar_common(void) | ||
101 | { | ||
102 | u64 irqstat; | ||
103 | |||
104 | asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat)); | ||
105 | return irqstat; | ||
106 | } | ||
107 | |||
108 | /* | ||
109 | * Cavium ThunderX erratum 23154 | ||
110 | * | ||
111 | * The gicv3 of ThunderX requires a modified version for reading the | ||
112 | * IAR status to ensure data synchronization (access to icc_iar1_el1 | ||
113 | * is not sync'ed before and after). | ||
114 | */ | ||
115 | static inline u64 gic_read_iar_cavium_thunderx(void) | ||
116 | { | ||
117 | u64 irqstat; | ||
118 | |||
119 | asm volatile( | ||
120 | "nop;nop;nop;nop\n\t" | ||
121 | "nop;nop;nop;nop\n\t" | ||
122 | "mrs_s %0, " __stringify(ICC_IAR1_EL1) "\n\t" | ||
123 | "nop;nop;nop;nop" | ||
124 | : "=r" (irqstat)); | ||
125 | mb(); | ||
126 | |||
127 | return irqstat; | ||
128 | } | ||
129 | |||
130 | static inline void gic_write_pmr(u32 val) | ||
131 | { | ||
132 | asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" ((u64)val)); | ||
133 | } | ||
134 | |||
135 | static inline void gic_write_ctlr(u32 val) | ||
136 | { | ||
137 | asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" ((u64)val)); | ||
138 | isb(); | ||
139 | } | ||
140 | |||
141 | static inline void gic_write_grpen1(u32 val) | ||
142 | { | ||
143 | asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" ((u64)val)); | ||
144 | isb(); | ||
145 | } | ||
146 | |||
147 | static inline void gic_write_sgi1r(u64 val) | ||
148 | { | ||
149 | asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val)); | ||
150 | } | ||
151 | |||
152 | static inline u32 gic_read_sre(void) | ||
153 | { | ||
154 | u64 val; | ||
155 | |||
156 | asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val)); | ||
157 | return val; | ||
158 | } | ||
159 | |||
160 | static inline void gic_write_sre(u32 val) | ||
161 | { | ||
162 | asm volatile("msr_s " __stringify(ICC_SRE_EL1) ", %0" : : "r" ((u64)val)); | ||
163 | isb(); | ||
164 | } | ||
165 | |||
166 | #define gic_read_typer(c) readq_relaxed(c) | ||
167 | #define gic_write_irouter(v, c) writeq_relaxed(v, c) | ||
168 | |||
169 | #endif /* __ASSEMBLY__ */ | ||
170 | #endif /* __ASM_ARCH_GICV3_H */ | ||
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 171570702bb8..dbc78d2b8cc6 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h | |||
@@ -27,8 +27,9 @@ | |||
27 | #define ARM64_HAS_SYSREG_GIC_CPUIF 3 | 27 | #define ARM64_HAS_SYSREG_GIC_CPUIF 3 |
28 | #define ARM64_HAS_PAN 4 | 28 | #define ARM64_HAS_PAN 4 |
29 | #define ARM64_HAS_LSE_ATOMICS 5 | 29 | #define ARM64_HAS_LSE_ATOMICS 5 |
30 | #define ARM64_WORKAROUND_CAVIUM_23154 6 | ||
30 | 31 | ||
31 | #define ARM64_NCAPS 6 | 32 | #define ARM64_NCAPS 7 |
32 | 33 | ||
33 | #ifndef __ASSEMBLY__ | 34 | #ifndef __ASSEMBLY__ |
34 | 35 | ||
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index ee6403df9fe4..100a3d1b17c8 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h | |||
@@ -62,15 +62,18 @@ | |||
62 | (0xf << MIDR_ARCHITECTURE_SHIFT) | \ | 62 | (0xf << MIDR_ARCHITECTURE_SHIFT) | \ |
63 | ((partnum) << MIDR_PARTNUM_SHIFT)) | 63 | ((partnum) << MIDR_PARTNUM_SHIFT)) |
64 | 64 | ||
65 | #define ARM_CPU_IMP_ARM 0x41 | 65 | #define ARM_CPU_IMP_ARM 0x41 |
66 | #define ARM_CPU_IMP_APM 0x50 | 66 | #define ARM_CPU_IMP_APM 0x50 |
67 | #define ARM_CPU_IMP_CAVIUM 0x43 | ||
67 | 68 | ||
68 | #define ARM_CPU_PART_AEM_V8 0xD0F | 69 | #define ARM_CPU_PART_AEM_V8 0xD0F |
69 | #define ARM_CPU_PART_FOUNDATION 0xD00 | 70 | #define ARM_CPU_PART_FOUNDATION 0xD00 |
70 | #define ARM_CPU_PART_CORTEX_A57 0xD07 | 71 | #define ARM_CPU_PART_CORTEX_A57 0xD07 |
71 | #define ARM_CPU_PART_CORTEX_A53 0xD03 | 72 | #define ARM_CPU_PART_CORTEX_A53 0xD03 |
72 | 73 | ||
73 | #define APM_CPU_PART_POTENZA 0x000 | 74 | #define APM_CPU_PART_POTENZA 0x000 |
75 | |||
76 | #define CAVIUM_CPU_PART_THUNDERX 0x0A1 | ||
74 | 77 | ||
75 | #define ID_AA64MMFR0_BIGENDEL0_SHIFT 16 | 78 | #define ID_AA64MMFR0_BIGENDEL0_SHIFT 16 |
76 | #define ID_AA64MMFR0_BIGENDEL0_MASK (0xf << ID_AA64MMFR0_BIGENDEL0_SHIFT) | 79 | #define ID_AA64MMFR0_BIGENDEL0_MASK (0xf << ID_AA64MMFR0_BIGENDEL0_SHIFT) |
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 6ffd91438560..574450c257a4 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c | |||
@@ -23,6 +23,7 @@ | |||
23 | 23 | ||
24 | #define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) | 24 | #define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) |
25 | #define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) | 25 | #define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) |
26 | #define MIDR_THUNDERX MIDR_CPU_PART(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) | ||
26 | 27 | ||
27 | #define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \ | 28 | #define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \ |
28 | MIDR_ARCHITECTURE_MASK) | 29 | MIDR_ARCHITECTURE_MASK) |
@@ -82,6 +83,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = { | |||
82 | MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04), | 83 | MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04), |
83 | }, | 84 | }, |
84 | #endif | 85 | #endif |
86 | #ifdef CONFIG_CAVIUM_ERRATUM_23154 | ||
87 | { | ||
88 | /* Cavium ThunderX, pass 1.x */ | ||
89 | .desc = "Cavium erratum 23154", | ||
90 | .capability = ARM64_WORKAROUND_CAVIUM_23154, | ||
91 | MIDR_RANGE(MIDR_THUNDERX, 0x00, 0x01), | ||
92 | }, | ||
93 | #endif | ||
85 | { | 94 | { |
86 | } | 95 | } |
87 | }; | 96 | }; |
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 3c9aed32f70b..305f30dc9e63 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c | |||
@@ -23,6 +23,8 @@ | |||
23 | #include <asm/cpufeature.h> | 23 | #include <asm/cpufeature.h> |
24 | #include <asm/processor.h> | 24 | #include <asm/processor.h> |
25 | 25 | ||
26 | #include <linux/irqchip/arm-gic-v3.h> | ||
27 | |||
26 | static bool | 28 | static bool |
27 | feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry) | 29 | feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry) |
28 | { | 30 | { |
@@ -45,11 +47,26 @@ __ID_FEAT_CHK(id_aa64pfr0); | |||
45 | __ID_FEAT_CHK(id_aa64mmfr1); | 47 | __ID_FEAT_CHK(id_aa64mmfr1); |
46 | __ID_FEAT_CHK(id_aa64isar0); | 48 | __ID_FEAT_CHK(id_aa64isar0); |
47 | 49 | ||
50 | static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry) | ||
51 | { | ||
52 | bool has_sre; | ||
53 | |||
54 | if (!has_id_aa64pfr0_feature(entry)) | ||
55 | return false; | ||
56 | |||
57 | has_sre = gic_enable_sre(); | ||
58 | if (!has_sre) | ||
59 | pr_warn_once("%s present but disabled by higher exception level\n", | ||
60 | entry->desc); | ||
61 | |||
62 | return has_sre; | ||
63 | } | ||
64 | |||
48 | static const struct arm64_cpu_capabilities arm64_features[] = { | 65 | static const struct arm64_cpu_capabilities arm64_features[] = { |
49 | { | 66 | { |
50 | .desc = "GIC system register CPU interface", | 67 | .desc = "GIC system register CPU interface", |
51 | .capability = ARM64_HAS_SYSREG_GIC_CPUIF, | 68 | .capability = ARM64_HAS_SYSREG_GIC_CPUIF, |
52 | .matches = has_id_aa64pfr0_feature, | 69 | .matches = has_useable_gicv3_cpuif, |
53 | .field_pos = 24, | 70 | .field_pos = 24, |
54 | .min_field_value = 1, | 71 | .min_field_value = 1, |
55 | }, | 72 | }, |
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 90d09eddd5b2..351a4de1b1e2 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S | |||
@@ -498,6 +498,8 @@ CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1 | |||
498 | orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1 | 498 | orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1 |
499 | msr_s ICC_SRE_EL2, x0 | 499 | msr_s ICC_SRE_EL2, x0 |
500 | isb // Make sure SRE is now set | 500 | isb // Make sure SRE is now set |
501 | mrs_s x0, ICC_SRE_EL2 // Read SRE back, | ||
502 | tbz x0, #0, 3f // and check that it sticks | ||
501 | msr_s ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults | 503 | msr_s ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults |
502 | 504 | ||
503 | 3: | 505 | 3: |
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 5c7e920e4861..ff5292c6277c 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig | |||
@@ -16,6 +16,9 @@ menuconfig VIRTUALIZATION | |||
16 | 16 | ||
17 | if VIRTUALIZATION | 17 | if VIRTUALIZATION |
18 | 18 | ||
19 | config KVM_ARM_VGIC_V3 | ||
20 | bool | ||
21 | |||
19 | config KVM | 22 | config KVM |
20 | bool "Kernel-based Virtual Machine (KVM) support" | 23 | bool "Kernel-based Virtual Machine (KVM) support" |
21 | depends on OF | 24 | depends on OF |
@@ -31,6 +34,7 @@ config KVM | |||
31 | select KVM_VFIO | 34 | select KVM_VFIO |
32 | select HAVE_KVM_EVENTFD | 35 | select HAVE_KVM_EVENTFD |
33 | select HAVE_KVM_IRQFD | 36 | select HAVE_KVM_IRQFD |
37 | select KVM_ARM_VGIC_V3 | ||
34 | ---help--- | 38 | ---help--- |
35 | Support hosting virtualized guest machines. | 39 | Support hosting virtualized guest machines. |
36 | 40 | ||
diff --git a/arch/c6x/platforms/megamod-pic.c b/arch/c6x/platforms/megamod-pic.c index ddcb45d7dfa7..43afc03e4125 100644 --- a/arch/c6x/platforms/megamod-pic.c +++ b/arch/c6x/platforms/megamod-pic.c | |||
@@ -178,7 +178,7 @@ static void __init set_megamod_mux(struct megamod_pic *pic, int src, int output) | |||
178 | static void __init parse_priority_map(struct megamod_pic *pic, | 178 | static void __init parse_priority_map(struct megamod_pic *pic, |
179 | int *mapping, int size) | 179 | int *mapping, int size) |
180 | { | 180 | { |
181 | struct device_node *np = pic->irqhost->of_node; | 181 | struct device_node *np = irq_domain_get_of_node(pic->irqhost); |
182 | const __be32 *map; | 182 | const __be32 *map; |
183 | int i, maplen; | 183 | int i, maplen; |
184 | u32 val; | 184 | u32 val; |
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index 0352bc8d56b3..4f9eb0576884 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c | |||
@@ -1094,7 +1094,7 @@ static int octeon_irq_gpio_xlat(struct irq_domain *d, | |||
1094 | unsigned int pin; | 1094 | unsigned int pin; |
1095 | unsigned int trigger; | 1095 | unsigned int trigger; |
1096 | 1096 | ||
1097 | if (d->of_node != node) | 1097 | if (irq_domain_get_of_node(d) != node) |
1098 | return -EINVAL; | 1098 | return -EINVAL; |
1099 | 1099 | ||
1100 | if (intsize < 2) | 1100 | if (intsize < 2) |
@@ -2163,7 +2163,7 @@ static int octeon_irq_cib_map(struct irq_domain *d, | |||
2163 | 2163 | ||
2164 | if (hw >= host_data->max_bits) { | 2164 | if (hw >= host_data->max_bits) { |
2165 | pr_err("ERROR: %s mapping %u is to big!\n", | 2165 | pr_err("ERROR: %s mapping %u is to big!\n", |
2166 | d->of_node->name, (unsigned)hw); | 2166 | irq_domain_get_of_node(d)->name, (unsigned)hw); |
2167 | return -EINVAL; | 2167 | return -EINVAL; |
2168 | } | 2168 | } |
2169 | 2169 | ||
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index e0e68a1c0d3c..aed7714495c1 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c | |||
@@ -327,7 +327,7 @@ static void axon_msi_shutdown(struct platform_device *device) | |||
327 | u32 tmp; | 327 | u32 tmp; |
328 | 328 | ||
329 | pr_devel("axon_msi: disabling %s\n", | 329 | pr_devel("axon_msi: disabling %s\n", |
330 | msic->irq_domain->of_node->full_name); | 330 | irq_domain_get_of_node(msic->irq_domain)->full_name); |
331 | tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG); | 331 | tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG); |
332 | tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE; | 332 | tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE; |
333 | msic_dcr_write(msic, MSIC_CTRL_REG, tmp); | 333 | msic_dcr_write(msic, MSIC_CTRL_REG, tmp); |
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c index 9d27de62dc62..54ee5743cb72 100644 --- a/arch/powerpc/platforms/cell/spider-pic.c +++ b/arch/powerpc/platforms/cell/spider-pic.c | |||
@@ -231,20 +231,23 @@ static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic) | |||
231 | const u32 *imap, *tmp; | 231 | const u32 *imap, *tmp; |
232 | int imaplen, intsize, unit; | 232 | int imaplen, intsize, unit; |
233 | struct device_node *iic; | 233 | struct device_node *iic; |
234 | struct device_node *of_node; | ||
235 | |||
236 | of_node = irq_domain_get_of_node(pic->host); | ||
234 | 237 | ||
235 | /* First, we check whether we have a real "interrupts" in the device | 238 | /* First, we check whether we have a real "interrupts" in the device |
236 | * tree in case the device-tree is ever fixed | 239 | * tree in case the device-tree is ever fixed |
237 | */ | 240 | */ |
238 | virq = irq_of_parse_and_map(pic->host->of_node, 0); | 241 | virq = irq_of_parse_and_map(of_node, 0); |
239 | if (virq) | 242 | if (virq) |
240 | return virq; | 243 | return virq; |
241 | 244 | ||
242 | /* Now do the horrible hacks */ | 245 | /* Now do the horrible hacks */ |
243 | tmp = of_get_property(pic->host->of_node, "#interrupt-cells", NULL); | 246 | tmp = of_get_property(of_node, "#interrupt-cells", NULL); |
244 | if (tmp == NULL) | 247 | if (tmp == NULL) |
245 | return NO_IRQ; | 248 | return NO_IRQ; |
246 | intsize = *tmp; | 249 | intsize = *tmp; |
247 | imap = of_get_property(pic->host->of_node, "interrupt-map", &imaplen); | 250 | imap = of_get_property(of_node, "interrupt-map", &imaplen); |
248 | if (imap == NULL || imaplen < (intsize + 1)) | 251 | if (imap == NULL || imaplen < (intsize + 1)) |
249 | return NO_IRQ; | 252 | return NO_IRQ; |
250 | iic = of_find_node_by_phandle(imap[intsize]); | 253 | iic = of_find_node_by_phandle(imap[intsize]); |
diff --git a/arch/powerpc/platforms/pasemi/msi.c b/arch/powerpc/platforms/pasemi/msi.c index b304a9fe55cc..d9af76342d99 100644 --- a/arch/powerpc/platforms/pasemi/msi.c +++ b/arch/powerpc/platforms/pasemi/msi.c | |||
@@ -144,9 +144,11 @@ int mpic_pasemi_msi_init(struct mpic *mpic) | |||
144 | { | 144 | { |
145 | int rc; | 145 | int rc; |
146 | struct pci_controller *phb; | 146 | struct pci_controller *phb; |
147 | struct device_node *of_node; | ||
147 | 148 | ||
148 | if (!mpic->irqhost->of_node || | 149 | of_node = irq_domain_get_of_node(mpic->irqhost); |
149 | !of_device_is_compatible(mpic->irqhost->of_node, | 150 | if (!of_node || |
151 | !of_device_is_compatible(of_node, | ||
150 | "pasemi,pwrficient-openpic")) | 152 | "pasemi,pwrficient-openpic")) |
151 | return -ENODEV; | 153 | return -ENODEV; |
152 | 154 | ||
diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c index 2c91ee7800b9..6ccfb6c1c707 100644 --- a/arch/powerpc/platforms/powernv/opal-irqchip.c +++ b/arch/powerpc/platforms/powernv/opal-irqchip.c | |||
@@ -137,7 +137,7 @@ static void opal_handle_irq_work(struct irq_work *work) | |||
137 | static int opal_event_match(struct irq_domain *h, struct device_node *node, | 137 | static int opal_event_match(struct irq_domain *h, struct device_node *node, |
138 | enum irq_domain_bus_token bus_token) | 138 | enum irq_domain_bus_token bus_token) |
139 | { | 139 | { |
140 | return h->of_node == node; | 140 | return irq_domain_get_of_node(h) == node; |
141 | } | 141 | } |
142 | 142 | ||
143 | static int opal_event_xlate(struct irq_domain *h, struct device_node *np, | 143 | static int opal_event_xlate(struct irq_domain *h, struct device_node *np, |
diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c index eca0b00794fa..bffcc7a486a1 100644 --- a/arch/powerpc/sysdev/ehv_pic.c +++ b/arch/powerpc/sysdev/ehv_pic.c | |||
@@ -181,7 +181,8 @@ static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node, | |||
181 | enum irq_domain_bus_token bus_token) | 181 | enum irq_domain_bus_token bus_token) |
182 | { | 182 | { |
183 | /* Exact match, unless ehv_pic node is NULL */ | 183 | /* Exact match, unless ehv_pic node is NULL */ |
184 | return h->of_node == NULL || h->of_node == node; | 184 | struct device_node *of_node = irq_domain_get_of_node(h); |
185 | return of_node == NULL || of_node == node; | ||
185 | } | 186 | } |
186 | 187 | ||
187 | static int ehv_pic_host_map(struct irq_domain *h, unsigned int virq, | 188 | static int ehv_pic_host_map(struct irq_domain *h, unsigned int virq, |
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index 48a576aa47b9..3a2be3676f43 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c | |||
@@ -110,7 +110,7 @@ static int fsl_msi_init_allocator(struct fsl_msi *msi_data) | |||
110 | int rc, hwirq; | 110 | int rc, hwirq; |
111 | 111 | ||
112 | rc = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS_MAX, | 112 | rc = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS_MAX, |
113 | msi_data->irqhost->of_node); | 113 | irq_domain_get_of_node(msi_data->irqhost)); |
114 | if (rc) | 114 | if (rc) |
115 | return rc; | 115 | return rc; |
116 | 116 | ||
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c index e1a9c2c2d5d3..6f99ed3967fd 100644 --- a/arch/powerpc/sysdev/i8259.c +++ b/arch/powerpc/sysdev/i8259.c | |||
@@ -165,7 +165,8 @@ static struct resource pic_edgectrl_iores = { | |||
165 | static int i8259_host_match(struct irq_domain *h, struct device_node *node, | 165 | static int i8259_host_match(struct irq_domain *h, struct device_node *node, |
166 | enum irq_domain_bus_token bus_token) | 166 | enum irq_domain_bus_token bus_token) |
167 | { | 167 | { |
168 | return h->of_node == NULL || h->of_node == node; | 168 | struct device_node *of_node = irq_domain_get_of_node(h); |
169 | return of_node == NULL || of_node == node; | ||
169 | } | 170 | } |
170 | 171 | ||
171 | static int i8259_host_map(struct irq_domain *h, unsigned int virq, | 172 | static int i8259_host_map(struct irq_domain *h, unsigned int virq, |
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c index b1297ab1599b..f76ee39cb337 100644 --- a/arch/powerpc/sysdev/ipic.c +++ b/arch/powerpc/sysdev/ipic.c | |||
@@ -675,7 +675,8 @@ static int ipic_host_match(struct irq_domain *h, struct device_node *node, | |||
675 | enum irq_domain_bus_token bus_token) | 675 | enum irq_domain_bus_token bus_token) |
676 | { | 676 | { |
677 | /* Exact match, unless ipic node is NULL */ | 677 | /* Exact match, unless ipic node is NULL */ |
678 | return h->of_node == NULL || h->of_node == node; | 678 | struct device_node *of_node = irq_domain_get_of_node(h); |
679 | return of_node == NULL || of_node == node; | ||
679 | } | 680 | } |
680 | 681 | ||
681 | static int ipic_host_map(struct irq_domain *h, unsigned int virq, | 682 | static int ipic_host_map(struct irq_domain *h, unsigned int virq, |
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 537e5db85a06..cecd1156c185 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c | |||
@@ -1011,7 +1011,8 @@ static int mpic_host_match(struct irq_domain *h, struct device_node *node, | |||
1011 | enum irq_domain_bus_token bus_token) | 1011 | enum irq_domain_bus_token bus_token) |
1012 | { | 1012 | { |
1013 | /* Exact match, unless mpic node is NULL */ | 1013 | /* Exact match, unless mpic node is NULL */ |
1014 | return h->of_node == NULL || h->of_node == node; | 1014 | struct device_node *of_node = irq_domain_get_of_node(h); |
1015 | return of_node == NULL || of_node == node; | ||
1015 | } | 1016 | } |
1016 | 1017 | ||
1017 | static int mpic_host_map(struct irq_domain *h, unsigned int virq, | 1018 | static int mpic_host_map(struct irq_domain *h, unsigned int virq, |
diff --git a/arch/powerpc/sysdev/mpic_msi.c b/arch/powerpc/sysdev/mpic_msi.c index 7dc39f35a4cc..1d48a5385905 100644 --- a/arch/powerpc/sysdev/mpic_msi.c +++ b/arch/powerpc/sysdev/mpic_msi.c | |||
@@ -84,7 +84,7 @@ int mpic_msi_init_allocator(struct mpic *mpic) | |||
84 | int rc; | 84 | int rc; |
85 | 85 | ||
86 | rc = msi_bitmap_alloc(&mpic->msi_bitmap, mpic->num_sources, | 86 | rc = msi_bitmap_alloc(&mpic->msi_bitmap, mpic->num_sources, |
87 | mpic->irqhost->of_node); | 87 | irq_domain_get_of_node(mpic->irqhost)); |
88 | if (rc) | 88 | if (rc) |
89 | return rc; | 89 | return rc; |
90 | 90 | ||
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c index fbcc1f855a7f..ef36f16f9f6f 100644 --- a/arch/powerpc/sysdev/qe_lib/qe_ic.c +++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c | |||
@@ -248,7 +248,8 @@ static int qe_ic_host_match(struct irq_domain *h, struct device_node *node, | |||
248 | enum irq_domain_bus_token bus_token) | 248 | enum irq_domain_bus_token bus_token) |
249 | { | 249 | { |
250 | /* Exact match, unless qe_ic node is NULL */ | 250 | /* Exact match, unless qe_ic node is NULL */ |
251 | return h->of_node == NULL || h->of_node == node; | 251 | struct device_node *of_node = irq_domain_get_of_node(h); |
252 | return of_node == NULL || of_node == node; | ||
252 | } | 253 | } |
253 | 254 | ||
254 | static int qe_ic_host_map(struct irq_domain *h, unsigned int virq, | 255 | static int qe_ic_host_map(struct irq_domain *h, unsigned int virq, |
diff --git a/drivers/acpi/gsi.c b/drivers/acpi/gsi.c index 38208f2d0e69..fa4585a6914e 100644 --- a/drivers/acpi/gsi.c +++ b/drivers/acpi/gsi.c | |||
@@ -11,9 +11,12 @@ | |||
11 | #include <linux/acpi.h> | 11 | #include <linux/acpi.h> |
12 | #include <linux/irq.h> | 12 | #include <linux/irq.h> |
13 | #include <linux/irqdomain.h> | 13 | #include <linux/irqdomain.h> |
14 | #include <linux/of.h> | ||
14 | 15 | ||
15 | enum acpi_irq_model_id acpi_irq_model; | 16 | enum acpi_irq_model_id acpi_irq_model; |
16 | 17 | ||
18 | static struct fwnode_handle *acpi_gsi_domain_id; | ||
19 | |||
17 | static unsigned int acpi_gsi_get_irq_type(int trigger, int polarity) | 20 | static unsigned int acpi_gsi_get_irq_type(int trigger, int polarity) |
18 | { | 21 | { |
19 | switch (polarity) { | 22 | switch (polarity) { |
@@ -45,12 +48,10 @@ static unsigned int acpi_gsi_get_irq_type(int trigger, int polarity) | |||
45 | */ | 48 | */ |
46 | int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) | 49 | int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) |
47 | { | 50 | { |
48 | /* | 51 | struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id, |
49 | * Only default domain is supported at present, always find | 52 | DOMAIN_BUS_ANY); |
50 | * the mapping corresponding to default domain by passing NULL | 53 | |
51 | * as irq_domain parameter | 54 | *irq = irq_find_mapping(d, gsi); |
52 | */ | ||
53 | *irq = irq_find_mapping(NULL, gsi); | ||
54 | /* | 55 | /* |
55 | * *irq == 0 means no mapping, that should | 56 | * *irq == 0 means no mapping, that should |
56 | * be reported as a failure | 57 | * be reported as a failure |
@@ -72,23 +73,19 @@ EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); | |||
72 | int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, | 73 | int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, |
73 | int polarity) | 74 | int polarity) |
74 | { | 75 | { |
75 | unsigned int irq; | 76 | struct irq_fwspec fwspec; |
76 | unsigned int irq_type = acpi_gsi_get_irq_type(trigger, polarity); | ||
77 | 77 | ||
78 | /* | 78 | if (WARN_ON(!acpi_gsi_domain_id)) { |
79 | * There is no way at present to look-up the IRQ domain on ACPI, | 79 | pr_warn("GSI: No registered irqchip, giving up\n"); |
80 | * hence always create mapping referring to the default domain | ||
81 | * by passing NULL as irq_domain parameter | ||
82 | */ | ||
83 | irq = irq_create_mapping(NULL, gsi); | ||
84 | if (!irq) | ||
85 | return -EINVAL; | 80 | return -EINVAL; |
81 | } | ||
86 | 82 | ||
87 | /* Set irq type if specified and different than the current one */ | 83 | fwspec.fwnode = acpi_gsi_domain_id; |
88 | if (irq_type != IRQ_TYPE_NONE && | 84 | fwspec.param[0] = gsi; |
89 | irq_type != irq_get_trigger_type(irq)) | 85 | fwspec.param[1] = acpi_gsi_get_irq_type(trigger, polarity); |
90 | irq_set_irq_type(irq, irq_type); | 86 | fwspec.param_count = 2; |
91 | return irq; | 87 | |
88 | return irq_create_fwspec_mapping(&fwspec); | ||
92 | } | 89 | } |
93 | EXPORT_SYMBOL_GPL(acpi_register_gsi); | 90 | EXPORT_SYMBOL_GPL(acpi_register_gsi); |
94 | 91 | ||
@@ -98,8 +95,23 @@ EXPORT_SYMBOL_GPL(acpi_register_gsi); | |||
98 | */ | 95 | */ |
99 | void acpi_unregister_gsi(u32 gsi) | 96 | void acpi_unregister_gsi(u32 gsi) |
100 | { | 97 | { |
101 | int irq = irq_find_mapping(NULL, gsi); | 98 | struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id, |
99 | DOMAIN_BUS_ANY); | ||
100 | int irq = irq_find_mapping(d, gsi); | ||
102 | 101 | ||
103 | irq_dispose_mapping(irq); | 102 | irq_dispose_mapping(irq); |
104 | } | 103 | } |
105 | EXPORT_SYMBOL_GPL(acpi_unregister_gsi); | 104 | EXPORT_SYMBOL_GPL(acpi_unregister_gsi); |
105 | |||
106 | /** | ||
107 | * acpi_set_irq_model - Setup the GSI irqdomain information | ||
108 | * @model: the value assigned to acpi_irq_model | ||
109 | * @fwnode: the irq_domain identifier for mapping and looking up | ||
110 | * GSI interrupts | ||
111 | */ | ||
112 | void __init acpi_set_irq_model(enum acpi_irq_model_id model, | ||
113 | struct fwnode_handle *fwnode) | ||
114 | { | ||
115 | acpi_irq_model = model; | ||
116 | acpi_gsi_domain_id = fwnode; | ||
117 | } | ||
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c index 134483daac25..5df4575b5ba7 100644 --- a/drivers/base/platform-msi.c +++ b/drivers/base/platform-msi.c | |||
@@ -152,7 +152,7 @@ static int platform_msi_alloc_descs(struct device *dev, int nvec, | |||
152 | 152 | ||
153 | /** | 153 | /** |
154 | * platform_msi_create_irq_domain - Create a platform MSI interrupt domain | 154 | * platform_msi_create_irq_domain - Create a platform MSI interrupt domain |
155 | * @np: Optional device-tree node of the interrupt controller | 155 | * @fwnode: Optional fwnode of the interrupt controller |
156 | * @info: MSI domain info | 156 | * @info: MSI domain info |
157 | * @parent: Parent irq domain | 157 | * @parent: Parent irq domain |
158 | * | 158 | * |
@@ -162,7 +162,7 @@ static int platform_msi_alloc_descs(struct device *dev, int nvec, | |||
162 | * Returns: | 162 | * Returns: |
163 | * A domain pointer or NULL in case of failure. | 163 | * A domain pointer or NULL in case of failure. |
164 | */ | 164 | */ |
165 | struct irq_domain *platform_msi_create_irq_domain(struct device_node *np, | 165 | struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode, |
166 | struct msi_domain_info *info, | 166 | struct msi_domain_info *info, |
167 | struct irq_domain *parent) | 167 | struct irq_domain *parent) |
168 | { | 168 | { |
@@ -173,7 +173,7 @@ struct irq_domain *platform_msi_create_irq_domain(struct device_node *np, | |||
173 | if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) | 173 | if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) |
174 | platform_msi_update_chip_ops(info); | 174 | platform_msi_update_chip_ops(info); |
175 | 175 | ||
176 | domain = msi_create_irq_domain(np, info, parent); | 176 | domain = msi_create_irq_domain(fwnode, info, parent); |
177 | if (domain) | 177 | if (domain) |
178 | domain->bus_token = DOMAIN_BUS_PLATFORM_MSI; | 178 | domain->bus_token = DOMAIN_BUS_PLATFORM_MSI; |
179 | 179 | ||
diff --git a/drivers/gpio/gpio-sodaville.c b/drivers/gpio/gpio-sodaville.c index 65bc9f47a68e..34b02b42ab9e 100644 --- a/drivers/gpio/gpio-sodaville.c +++ b/drivers/gpio/gpio-sodaville.c | |||
@@ -102,7 +102,7 @@ static int sdv_xlate(struct irq_domain *h, struct device_node *node, | |||
102 | { | 102 | { |
103 | u32 line, type; | 103 | u32 line, type; |
104 | 104 | ||
105 | if (node != h->of_node) | 105 | if (node != irq_domain_get_of_node(h)) |
106 | return -EINVAL; | 106 | return -EINVAL; |
107 | 107 | ||
108 | if (intsize < 2) | 108 | if (intsize < 2) |
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 27b52c8729cd..4d7294e5d982 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig | |||
@@ -123,6 +123,7 @@ config RENESAS_INTC_IRQPIN | |||
123 | 123 | ||
124 | config RENESAS_IRQC | 124 | config RENESAS_IRQC |
125 | bool | 125 | bool |
126 | select GENERIC_IRQ_CHIP | ||
126 | select IRQ_DOMAIN | 127 | select IRQ_DOMAIN |
127 | 128 | ||
128 | config ST_IRQCHIP | 129 | config ST_IRQCHIP |
@@ -187,3 +188,8 @@ config IMX_GPCV2 | |||
187 | select IRQ_DOMAIN | 188 | select IRQ_DOMAIN |
188 | help | 189 | help |
189 | Enables the wakeup IRQs for IMX platforms with GPCv2 block | 190 | Enables the wakeup IRQs for IMX platforms with GPCv2 block |
191 | |||
192 | config IRQ_MXS | ||
193 | def_bool y if MACH_ASM9260 || ARCH_MXS | ||
194 | select IRQ_DOMAIN | ||
195 | select STMP_DEVICE | ||
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index bb3048f00e64..177f78f6e6d6 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile | |||
@@ -6,7 +6,7 @@ obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o | |||
6 | obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o | 6 | obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o |
7 | obj-$(CONFIG_ARCH_MMP) += irq-mmp.o | 7 | obj-$(CONFIG_ARCH_MMP) += irq-mmp.o |
8 | obj-$(CONFIG_ARCH_MVEBU) += irq-armada-370-xp.o | 8 | obj-$(CONFIG_ARCH_MVEBU) += irq-armada-370-xp.o |
9 | obj-$(CONFIG_ARCH_MXS) += irq-mxs.o | 9 | obj-$(CONFIG_IRQ_MXS) += irq-mxs.o |
10 | obj-$(CONFIG_ARCH_TEGRA) += irq-tegra.o | 10 | obj-$(CONFIG_ARCH_TEGRA) += irq-tegra.o |
11 | obj-$(CONFIG_ARCH_S3C24XX) += irq-s3c24xx.o | 11 | obj-$(CONFIG_ARCH_S3C24XX) += irq-s3c24xx.o |
12 | obj-$(CONFIG_DW_APB_ICTL) += irq-dw-apb-ictl.o | 12 | obj-$(CONFIG_DW_APB_ICTL) += irq-dw-apb-ictl.o |
diff --git a/drivers/irqchip/alphascale_asm9260-icoll.h b/drivers/irqchip/alphascale_asm9260-icoll.h new file mode 100644 index 000000000000..5cec108ee204 --- /dev/null +++ b/drivers/irqchip/alphascale_asm9260-icoll.h | |||
@@ -0,0 +1,109 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2014 Oleksij Rempel <linux@rempel-privat.de> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #ifndef _ALPHASCALE_ASM9260_ICOLL_H | ||
11 | #define _ALPHASCALE_ASM9260_ICOLL_H | ||
12 | |||
13 | #define ASM9260_NUM_IRQS 64 | ||
14 | /* | ||
15 | * this device provide 4 offsets for each register: | ||
16 | * 0x0 - plain read write mode | ||
17 | * 0x4 - set mode, OR logic. | ||
18 | * 0x8 - clr mode, XOR logic. | ||
19 | * 0xc - togle mode. | ||
20 | */ | ||
21 | |||
22 | #define ASM9260_HW_ICOLL_VECTOR 0x0000 | ||
23 | /* | ||
24 | * bits 31:2 | ||
25 | * This register presents the vector address for the interrupt currently | ||
26 | * active on the CPU IRQ input. Writing to this register notifies the | ||
27 | * interrupt collector that the interrupt service routine for the current | ||
28 | * interrupt has been entered. | ||
29 | * The exception trap should have a LDPC instruction from this address: | ||
30 | * LDPC ASM9260_HW_ICOLL_VECTOR_ADDR; IRQ exception at 0xffff0018 | ||
31 | */ | ||
32 | |||
33 | /* | ||
34 | * The Interrupt Collector Level Acknowledge Register is used by software to | ||
35 | * indicate the completion of an interrupt on a specific level. | ||
36 | * This register is written at the very end of an interrupt service routine. If | ||
37 | * nesting is used then the CPU irq must be turned on before writing to this | ||
38 | * register to avoid a race condition in the CPU interrupt hardware. | ||
39 | */ | ||
40 | #define ASM9260_HW_ICOLL_LEVELACK 0x0010 | ||
41 | #define ASM9260_BM_LEVELn(nr) BIT(nr) | ||
42 | |||
43 | #define ASM9260_HW_ICOLL_CTRL 0x0020 | ||
44 | /* | ||
45 | * ASM9260_BM_CTRL_SFTRST and ASM9260_BM_CTRL_CLKGATE are not available on | ||
46 | * asm9260. | ||
47 | */ | ||
48 | #define ASM9260_BM_CTRL_SFTRST BIT(31) | ||
49 | #define ASM9260_BM_CTRL_CLKGATE BIT(30) | ||
50 | /* disable interrupt level nesting */ | ||
51 | #define ASM9260_BM_CTRL_NO_NESTING BIT(19) | ||
52 | /* | ||
53 | * Set this bit to one enable the RISC32-style read side effect associated with | ||
54 | * the vector address register. In this mode, interrupt in-service is signaled | ||
55 | * by the read of the ASM9260_HW_ICOLL_VECTOR register to acquire the interrupt | ||
56 | * vector address. Set this bit to zero for normal operation, in which the ISR | ||
57 | * signals in-service explicitly by means of a write to the | ||
58 | * ASM9260_HW_ICOLL_VECTOR register. | ||
59 | * 0 - Must Write to Vector register to go in-service. | ||
60 | * 1 - Go in-service as a read side effect | ||
61 | */ | ||
62 | #define ASM9260_BM_CTRL_ARM_RSE_MODE BIT(18) | ||
63 | #define ASM9260_BM_CTRL_IRQ_ENABLE BIT(16) | ||
64 | |||
65 | #define ASM9260_HW_ICOLL_STAT_OFFSET 0x0030 | ||
66 | /* | ||
67 | * bits 5:0 | ||
68 | * Vector number of current interrupt. Multiply by 4 and add to vector base | ||
69 | * address to obtain the value in ASM9260_HW_ICOLL_VECTOR. | ||
70 | */ | ||
71 | |||
72 | /* | ||
73 | * RAW0 and RAW1 provides a read-only view of the raw interrupt request lines | ||
74 | * coming from various parts of the chip. Its purpose is to improve diagnostic | ||
75 | * observability. | ||
76 | */ | ||
77 | #define ASM9260_HW_ICOLL_RAW0 0x0040 | ||
78 | #define ASM9260_HW_ICOLL_RAW1 0x0050 | ||
79 | |||
80 | #define ASM9260_HW_ICOLL_INTERRUPT0 0x0060 | ||
81 | #define ASM9260_HW_ICOLL_INTERRUPTn(n) (0x0060 + ((n) >> 2) * 0x10) | ||
82 | /* | ||
83 | * WARNING: Modifying the priority of an enabled interrupt may result in | ||
84 | * undefined behavior. | ||
85 | */ | ||
86 | #define ASM9260_BM_INT_PRIORITY_MASK 0x3 | ||
87 | #define ASM9260_BM_INT_ENABLE BIT(2) | ||
88 | #define ASM9260_BM_INT_SOFTIRQ BIT(3) | ||
89 | |||
90 | #define ASM9260_BM_ICOLL_INTERRUPTn_SHIFT(n) (((n) & 0x3) << 3) | ||
91 | #define ASM9260_BM_ICOLL_INTERRUPTn_ENABLE(n) (1 << (2 + \ | ||
92 | ASM9260_BM_ICOLL_INTERRUPTn_SHIFT(n))) | ||
93 | |||
94 | #define ASM9260_HW_ICOLL_VBASE 0x0160 | ||
95 | /* | ||
96 | * bits 31:2 | ||
97 | * This bitfield holds the upper 30 bits of the base address of the vector | ||
98 | * table. | ||
99 | */ | ||
100 | |||
101 | #define ASM9260_HW_ICOLL_CLEAR0 0x01d0 | ||
102 | #define ASM9260_HW_ICOLL_CLEAR1 0x01e0 | ||
103 | #define ASM9260_HW_ICOLL_CLEARn(n) (((n >> 5) * 0x10) \ | ||
104 | + SET_REG) | ||
105 | #define ASM9260_BM_CLEAR_BIT(n) BIT(n & 0x1f) | ||
106 | |||
107 | /* Scratchpad */ | ||
108 | #define ASM9260_HW_ICOLL_UNDEF_VECTOR 0x01f0 | ||
109 | #endif | ||
diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c index cd7d3bc78e34..ead15be2d20a 100644 --- a/drivers/irqchip/exynos-combiner.c +++ b/drivers/irqchip/exynos-combiner.c | |||
@@ -144,7 +144,7 @@ static int combiner_irq_domain_xlate(struct irq_domain *d, | |||
144 | unsigned long *out_hwirq, | 144 | unsigned long *out_hwirq, |
145 | unsigned int *out_type) | 145 | unsigned int *out_type) |
146 | { | 146 | { |
147 | if (d->of_node != controller) | 147 | if (irq_domain_get_of_node(d) != controller) |
148 | return -EINVAL; | 148 | return -EINVAL; |
149 | 149 | ||
150 | if (intsize < 2) | 150 | if (intsize < 2) |
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c index 63cd031b2c28..b12a5d58546f 100644 --- a/drivers/irqchip/irq-atmel-aic-common.c +++ b/drivers/irqchip/irq-atmel-aic-common.c | |||
@@ -114,7 +114,7 @@ int aic_common_irq_domain_xlate(struct irq_domain *d, | |||
114 | 114 | ||
115 | static void __init aic_common_ext_irq_of_init(struct irq_domain *domain) | 115 | static void __init aic_common_ext_irq_of_init(struct irq_domain *domain) |
116 | { | 116 | { |
117 | struct device_node *node = domain->of_node; | 117 | struct device_node *node = irq_domain_get_of_node(domain); |
118 | struct irq_chip_generic *gc; | 118 | struct irq_chip_generic *gc; |
119 | struct aic_chip_data *aic; | 119 | struct aic_chip_data *aic; |
120 | struct property *prop; | 120 | struct property *prop; |
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c index f6d680485bee..62bb840c613f 100644 --- a/drivers/irqchip/irq-atmel-aic5.c +++ b/drivers/irqchip/irq-atmel-aic5.c | |||
@@ -70,16 +70,15 @@ static struct irq_domain *aic5_domain; | |||
70 | static asmlinkage void __exception_irq_entry | 70 | static asmlinkage void __exception_irq_entry |
71 | aic5_handle(struct pt_regs *regs) | 71 | aic5_handle(struct pt_regs *regs) |
72 | { | 72 | { |
73 | struct irq_domain_chip_generic *dgc = aic5_domain->gc; | 73 | struct irq_chip_generic *bgc = irq_get_domain_generic_chip(aic5_domain, 0); |
74 | struct irq_chip_generic *gc = dgc->gc[0]; | ||
75 | u32 irqnr; | 74 | u32 irqnr; |
76 | u32 irqstat; | 75 | u32 irqstat; |
77 | 76 | ||
78 | irqnr = irq_reg_readl(gc, AT91_AIC5_IVR); | 77 | irqnr = irq_reg_readl(bgc, AT91_AIC5_IVR); |
79 | irqstat = irq_reg_readl(gc, AT91_AIC5_ISR); | 78 | irqstat = irq_reg_readl(bgc, AT91_AIC5_ISR); |
80 | 79 | ||
81 | if (!irqstat) | 80 | if (!irqstat) |
82 | irq_reg_writel(gc, 0, AT91_AIC5_EOICR); | 81 | irq_reg_writel(bgc, 0, AT91_AIC5_EOICR); |
83 | else | 82 | else |
84 | handle_domain_irq(aic5_domain, irqnr, regs); | 83 | handle_domain_irq(aic5_domain, irqnr, regs); |
85 | } | 84 | } |
@@ -87,8 +86,7 @@ aic5_handle(struct pt_regs *regs) | |||
87 | static void aic5_mask(struct irq_data *d) | 86 | static void aic5_mask(struct irq_data *d) |
88 | { | 87 | { |
89 | struct irq_domain *domain = d->domain; | 88 | struct irq_domain *domain = d->domain; |
90 | struct irq_domain_chip_generic *dgc = domain->gc; | 89 | struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); |
91 | struct irq_chip_generic *bgc = dgc->gc[0]; | ||
92 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | 90 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
93 | 91 | ||
94 | /* | 92 | /* |
@@ -105,8 +103,7 @@ static void aic5_mask(struct irq_data *d) | |||
105 | static void aic5_unmask(struct irq_data *d) | 103 | static void aic5_unmask(struct irq_data *d) |
106 | { | 104 | { |
107 | struct irq_domain *domain = d->domain; | 105 | struct irq_domain *domain = d->domain; |
108 | struct irq_domain_chip_generic *dgc = domain->gc; | 106 | struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); |
109 | struct irq_chip_generic *bgc = dgc->gc[0]; | ||
110 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | 107 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
111 | 108 | ||
112 | /* | 109 | /* |
@@ -123,14 +120,13 @@ static void aic5_unmask(struct irq_data *d) | |||
123 | static int aic5_retrigger(struct irq_data *d) | 120 | static int aic5_retrigger(struct irq_data *d) |
124 | { | 121 | { |
125 | struct irq_domain *domain = d->domain; | 122 | struct irq_domain *domain = d->domain; |
126 | struct irq_domain_chip_generic *dgc = domain->gc; | 123 | struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); |
127 | struct irq_chip_generic *gc = dgc->gc[0]; | ||
128 | 124 | ||
129 | /* Enable interrupt on AIC5 */ | 125 | /* Enable interrupt on AIC5 */ |
130 | irq_gc_lock(gc); | 126 | irq_gc_lock(bgc); |
131 | irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); | 127 | irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR); |
132 | irq_reg_writel(gc, 1, AT91_AIC5_ISCR); | 128 | irq_reg_writel(bgc, 1, AT91_AIC5_ISCR); |
133 | irq_gc_unlock(gc); | 129 | irq_gc_unlock(bgc); |
134 | 130 | ||
135 | return 0; | 131 | return 0; |
136 | } | 132 | } |
@@ -138,18 +134,17 @@ static int aic5_retrigger(struct irq_data *d) | |||
138 | static int aic5_set_type(struct irq_data *d, unsigned type) | 134 | static int aic5_set_type(struct irq_data *d, unsigned type) |
139 | { | 135 | { |
140 | struct irq_domain *domain = d->domain; | 136 | struct irq_domain *domain = d->domain; |
141 | struct irq_domain_chip_generic *dgc = domain->gc; | 137 | struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); |
142 | struct irq_chip_generic *gc = dgc->gc[0]; | ||
143 | unsigned int smr; | 138 | unsigned int smr; |
144 | int ret; | 139 | int ret; |
145 | 140 | ||
146 | irq_gc_lock(gc); | 141 | irq_gc_lock(bgc); |
147 | irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); | 142 | irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR); |
148 | smr = irq_reg_readl(gc, AT91_AIC5_SMR); | 143 | smr = irq_reg_readl(bgc, AT91_AIC5_SMR); |
149 | ret = aic_common_set_type(d, type, &smr); | 144 | ret = aic_common_set_type(d, type, &smr); |
150 | if (!ret) | 145 | if (!ret) |
151 | irq_reg_writel(gc, smr, AT91_AIC5_SMR); | 146 | irq_reg_writel(bgc, smr, AT91_AIC5_SMR); |
152 | irq_gc_unlock(gc); | 147 | irq_gc_unlock(bgc); |
153 | 148 | ||
154 | return ret; | 149 | return ret; |
155 | } | 150 | } |
@@ -159,7 +154,7 @@ static void aic5_suspend(struct irq_data *d) | |||
159 | { | 154 | { |
160 | struct irq_domain *domain = d->domain; | 155 | struct irq_domain *domain = d->domain; |
161 | struct irq_domain_chip_generic *dgc = domain->gc; | 156 | struct irq_domain_chip_generic *dgc = domain->gc; |
162 | struct irq_chip_generic *bgc = dgc->gc[0]; | 157 | struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); |
163 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | 158 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
164 | int i; | 159 | int i; |
165 | u32 mask; | 160 | u32 mask; |
@@ -183,7 +178,7 @@ static void aic5_resume(struct irq_data *d) | |||
183 | { | 178 | { |
184 | struct irq_domain *domain = d->domain; | 179 | struct irq_domain *domain = d->domain; |
185 | struct irq_domain_chip_generic *dgc = domain->gc; | 180 | struct irq_domain_chip_generic *dgc = domain->gc; |
186 | struct irq_chip_generic *bgc = dgc->gc[0]; | 181 | struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); |
187 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | 182 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
188 | int i; | 183 | int i; |
189 | u32 mask; | 184 | u32 mask; |
@@ -207,7 +202,7 @@ static void aic5_pm_shutdown(struct irq_data *d) | |||
207 | { | 202 | { |
208 | struct irq_domain *domain = d->domain; | 203 | struct irq_domain *domain = d->domain; |
209 | struct irq_domain_chip_generic *dgc = domain->gc; | 204 | struct irq_domain_chip_generic *dgc = domain->gc; |
210 | struct irq_chip_generic *bgc = dgc->gc[0]; | 205 | struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); |
211 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | 206 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
212 | int i; | 207 | int i; |
213 | 208 | ||
@@ -262,12 +257,11 @@ static int aic5_irq_domain_xlate(struct irq_domain *d, | |||
262 | irq_hw_number_t *out_hwirq, | 257 | irq_hw_number_t *out_hwirq, |
263 | unsigned int *out_type) | 258 | unsigned int *out_type) |
264 | { | 259 | { |
265 | struct irq_domain_chip_generic *dgc = d->gc; | 260 | struct irq_chip_generic *bgc = irq_get_domain_generic_chip(d, 0); |
266 | struct irq_chip_generic *gc; | ||
267 | unsigned smr; | 261 | unsigned smr; |
268 | int ret; | 262 | int ret; |
269 | 263 | ||
270 | if (!dgc) | 264 | if (!bgc) |
271 | return -EINVAL; | 265 | return -EINVAL; |
272 | 266 | ||
273 | ret = aic_common_irq_domain_xlate(d, ctrlr, intspec, intsize, | 267 | ret = aic_common_irq_domain_xlate(d, ctrlr, intspec, intsize, |
@@ -275,15 +269,13 @@ static int aic5_irq_domain_xlate(struct irq_domain *d, | |||
275 | if (ret) | 269 | if (ret) |
276 | return ret; | 270 | return ret; |
277 | 271 | ||
278 | gc = dgc->gc[0]; | 272 | irq_gc_lock(bgc); |
279 | 273 | irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR); | |
280 | irq_gc_lock(gc); | 274 | smr = irq_reg_readl(bgc, AT91_AIC5_SMR); |
281 | irq_reg_writel(gc, *out_hwirq, AT91_AIC5_SSR); | ||
282 | smr = irq_reg_readl(gc, AT91_AIC5_SMR); | ||
283 | ret = aic_common_set_priority(intspec[2], &smr); | 275 | ret = aic_common_set_priority(intspec[2], &smr); |
284 | if (!ret) | 276 | if (!ret) |
285 | irq_reg_writel(gc, intspec[2] | smr, AT91_AIC5_SMR); | 277 | irq_reg_writel(bgc, intspec[2] | smr, AT91_AIC5_SMR); |
286 | irq_gc_unlock(gc); | 278 | irq_gc_unlock(bgc); |
287 | 279 | ||
288 | return ret; | 280 | return ret; |
289 | } | 281 | } |
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c index a7f5626930f5..75573fa431ba 100644 --- a/drivers/irqchip/irq-crossbar.c +++ b/drivers/irqchip/irq-crossbar.c | |||
@@ -78,10 +78,13 @@ static struct irq_chip crossbar_chip = { | |||
78 | static int allocate_gic_irq(struct irq_domain *domain, unsigned virq, | 78 | static int allocate_gic_irq(struct irq_domain *domain, unsigned virq, |
79 | irq_hw_number_t hwirq) | 79 | irq_hw_number_t hwirq) |
80 | { | 80 | { |
81 | struct of_phandle_args args; | 81 | struct irq_fwspec fwspec; |
82 | int i; | 82 | int i; |
83 | int err; | 83 | int err; |
84 | 84 | ||
85 | if (!irq_domain_get_of_node(domain->parent)) | ||
86 | return -EINVAL; | ||
87 | |||
85 | raw_spin_lock(&cb->lock); | 88 | raw_spin_lock(&cb->lock); |
86 | for (i = cb->int_max - 1; i >= 0; i--) { | 89 | for (i = cb->int_max - 1; i >= 0; i--) { |
87 | if (cb->irq_map[i] == IRQ_FREE) { | 90 | if (cb->irq_map[i] == IRQ_FREE) { |
@@ -94,13 +97,13 @@ static int allocate_gic_irq(struct irq_domain *domain, unsigned virq, | |||
94 | if (i < 0) | 97 | if (i < 0) |
95 | return -ENODEV; | 98 | return -ENODEV; |
96 | 99 | ||
97 | args.np = domain->parent->of_node; | 100 | fwspec.fwnode = domain->parent->fwnode; |
98 | args.args_count = 3; | 101 | fwspec.param_count = 3; |
99 | args.args[0] = 0; /* SPI */ | 102 | fwspec.param[0] = 0; /* SPI */ |
100 | args.args[1] = i; | 103 | fwspec.param[1] = i; |
101 | args.args[2] = IRQ_TYPE_LEVEL_HIGH; | 104 | fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH; |
102 | 105 | ||
103 | err = irq_domain_alloc_irqs_parent(domain, virq, 1, &args); | 106 | err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); |
104 | if (err) | 107 | if (err) |
105 | cb->irq_map[i] = IRQ_FREE; | 108 | cb->irq_map[i] = IRQ_FREE; |
106 | else | 109 | else |
@@ -112,16 +115,16 @@ static int allocate_gic_irq(struct irq_domain *domain, unsigned virq, | |||
112 | static int crossbar_domain_alloc(struct irq_domain *d, unsigned int virq, | 115 | static int crossbar_domain_alloc(struct irq_domain *d, unsigned int virq, |
113 | unsigned int nr_irqs, void *data) | 116 | unsigned int nr_irqs, void *data) |
114 | { | 117 | { |
115 | struct of_phandle_args *args = data; | 118 | struct irq_fwspec *fwspec = data; |
116 | irq_hw_number_t hwirq; | 119 | irq_hw_number_t hwirq; |
117 | int i; | 120 | int i; |
118 | 121 | ||
119 | if (args->args_count != 3) | 122 | if (fwspec->param_count != 3) |
120 | return -EINVAL; /* Not GIC compliant */ | 123 | return -EINVAL; /* Not GIC compliant */ |
121 | if (args->args[0] != 0) | 124 | if (fwspec->param[0] != 0) |
122 | return -EINVAL; /* No PPI should point to this domain */ | 125 | return -EINVAL; /* No PPI should point to this domain */ |
123 | 126 | ||
124 | hwirq = args->args[1]; | 127 | hwirq = fwspec->param[1]; |
125 | if ((hwirq + nr_irqs) > cb->max_crossbar_sources) | 128 | if ((hwirq + nr_irqs) > cb->max_crossbar_sources) |
126 | return -EINVAL; /* Can't deal with this */ | 129 | return -EINVAL; /* Can't deal with this */ |
127 | 130 | ||
@@ -166,28 +169,31 @@ static void crossbar_domain_free(struct irq_domain *domain, unsigned int virq, | |||
166 | raw_spin_unlock(&cb->lock); | 169 | raw_spin_unlock(&cb->lock); |
167 | } | 170 | } |
168 | 171 | ||
169 | static int crossbar_domain_xlate(struct irq_domain *d, | 172 | static int crossbar_domain_translate(struct irq_domain *d, |
170 | struct device_node *controller, | 173 | struct irq_fwspec *fwspec, |
171 | const u32 *intspec, unsigned int intsize, | 174 | unsigned long *hwirq, |
172 | unsigned long *out_hwirq, | 175 | unsigned int *type) |
173 | unsigned int *out_type) | ||
174 | { | 176 | { |
175 | if (d->of_node != controller) | 177 | if (is_of_node(fwspec->fwnode)) { |
176 | return -EINVAL; /* Shouldn't happen, really... */ | 178 | if (fwspec->param_count != 3) |
177 | if (intsize != 3) | 179 | return -EINVAL; |
178 | return -EINVAL; /* Not GIC compliant */ | ||
179 | if (intspec[0] != 0) | ||
180 | return -EINVAL; /* No PPI should point to this domain */ | ||
181 | 180 | ||
182 | *out_hwirq = intspec[1]; | 181 | /* No PPI should point to this domain */ |
183 | *out_type = intspec[2]; | 182 | if (fwspec->param[0] != 0) |
184 | return 0; | 183 | return -EINVAL; |
184 | |||
185 | *hwirq = fwspec->param[1]; | ||
186 | *type = fwspec->param[2]; | ||
187 | return 0; | ||
188 | } | ||
189 | |||
190 | return -EINVAL; | ||
185 | } | 191 | } |
186 | 192 | ||
187 | static const struct irq_domain_ops crossbar_domain_ops = { | 193 | static const struct irq_domain_ops crossbar_domain_ops = { |
188 | .alloc = crossbar_domain_alloc, | 194 | .alloc = crossbar_domain_alloc, |
189 | .free = crossbar_domain_free, | 195 | .free = crossbar_domain_free, |
190 | .xlate = crossbar_domain_xlate, | 196 | .translate = crossbar_domain_translate, |
191 | }; | 197 | }; |
192 | 198 | ||
193 | static int __init crossbar_of_init(struct device_node *node) | 199 | static int __init crossbar_of_init(struct device_node *node) |
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c index 9448e391cb71..44a077f3a4a2 100644 --- a/drivers/irqchip/irq-gic-common.c +++ b/drivers/irqchip/irq-gic-common.c | |||
@@ -21,6 +21,17 @@ | |||
21 | 21 | ||
22 | #include "irq-gic-common.h" | 22 | #include "irq-gic-common.h" |
23 | 23 | ||
24 | void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks, | ||
25 | void *data) | ||
26 | { | ||
27 | for (; quirks->desc; quirks++) { | ||
28 | if (quirks->iidr != (quirks->mask & iidr)) | ||
29 | continue; | ||
30 | quirks->init(data); | ||
31 | pr_info("GIC: enabling workaround for %s\n", quirks->desc); | ||
32 | } | ||
33 | } | ||
34 | |||
24 | int gic_configure_irq(unsigned int irq, unsigned int type, | 35 | int gic_configure_irq(unsigned int irq, unsigned int type, |
25 | void __iomem *base, void (*sync_access)(void)) | 36 | void __iomem *base, void (*sync_access)(void)) |
26 | { | 37 | { |
diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h index 35a9884778bd..fff697db8e22 100644 --- a/drivers/irqchip/irq-gic-common.h +++ b/drivers/irqchip/irq-gic-common.h | |||
@@ -20,10 +20,19 @@ | |||
20 | #include <linux/of.h> | 20 | #include <linux/of.h> |
21 | #include <linux/irqdomain.h> | 21 | #include <linux/irqdomain.h> |
22 | 22 | ||
23 | struct gic_quirk { | ||
24 | const char *desc; | ||
25 | void (*init)(void *data); | ||
26 | u32 iidr; | ||
27 | u32 mask; | ||
28 | }; | ||
29 | |||
23 | int gic_configure_irq(unsigned int irq, unsigned int type, | 30 | int gic_configure_irq(unsigned int irq, unsigned int type, |
24 | void __iomem *base, void (*sync_access)(void)); | 31 | void __iomem *base, void (*sync_access)(void)); |
25 | void gic_dist_config(void __iomem *base, int gic_irqs, | 32 | void gic_dist_config(void __iomem *base, int gic_irqs, |
26 | void (*sync_access)(void)); | 33 | void (*sync_access)(void)); |
27 | void gic_cpu_config(void __iomem *base, void (*sync_access)(void)); | 34 | void gic_cpu_config(void __iomem *base, void (*sync_access)(void)); |
35 | void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks, | ||
36 | void *data); | ||
28 | 37 | ||
29 | #endif /* _IRQ_GIC_COMMON_H */ | 38 | #endif /* _IRQ_GIC_COMMON_H */ |
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c index 12985daa66ab..87f8d104acab 100644 --- a/drivers/irqchip/irq-gic-v2m.c +++ b/drivers/irqchip/irq-gic-v2m.c | |||
@@ -37,19 +37,31 @@ | |||
37 | #define V2M_MSI_SETSPI_NS 0x040 | 37 | #define V2M_MSI_SETSPI_NS 0x040 |
38 | #define V2M_MIN_SPI 32 | 38 | #define V2M_MIN_SPI 32 |
39 | #define V2M_MAX_SPI 1019 | 39 | #define V2M_MAX_SPI 1019 |
40 | #define V2M_MSI_IIDR 0xFCC | ||
40 | 41 | ||
41 | #define V2M_MSI_TYPER_BASE_SPI(x) \ | 42 | #define V2M_MSI_TYPER_BASE_SPI(x) \ |
42 | (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK) | 43 | (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK) |
43 | 44 | ||
44 | #define V2M_MSI_TYPER_NUM_SPI(x) ((x) & V2M_MSI_TYPER_NUM_MASK) | 45 | #define V2M_MSI_TYPER_NUM_SPI(x) ((x) & V2M_MSI_TYPER_NUM_MASK) |
45 | 46 | ||
47 | /* APM X-Gene with GICv2m MSI_IIDR register value */ | ||
48 | #define XGENE_GICV2M_MSI_IIDR 0x06000170 | ||
49 | |||
50 | /* List of flags for specific v2m implementation */ | ||
51 | #define GICV2M_NEEDS_SPI_OFFSET 0x00000001 | ||
52 | |||
53 | static LIST_HEAD(v2m_nodes); | ||
54 | static DEFINE_SPINLOCK(v2m_lock); | ||
55 | |||
46 | struct v2m_data { | 56 | struct v2m_data { |
47 | spinlock_t msi_cnt_lock; | 57 | struct list_head entry; |
58 | struct device_node *node; | ||
48 | struct resource res; /* GICv2m resource */ | 59 | struct resource res; /* GICv2m resource */ |
49 | void __iomem *base; /* GICv2m virt address */ | 60 | void __iomem *base; /* GICv2m virt address */ |
50 | u32 spi_start; /* The SPI number that MSIs start */ | 61 | u32 spi_start; /* The SPI number that MSIs start */ |
51 | u32 nr_spis; /* The number of SPIs for MSIs */ | 62 | u32 nr_spis; /* The number of SPIs for MSIs */ |
52 | unsigned long *bm; /* MSI vector bitmap */ | 63 | unsigned long *bm; /* MSI vector bitmap */ |
64 | u32 flags; /* v2m flags for specific implementation */ | ||
53 | }; | 65 | }; |
54 | 66 | ||
55 | static void gicv2m_mask_msi_irq(struct irq_data *d) | 67 | static void gicv2m_mask_msi_irq(struct irq_data *d) |
@@ -98,6 +110,9 @@ static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) | |||
98 | msg->address_hi = upper_32_bits(addr); | 110 | msg->address_hi = upper_32_bits(addr); |
99 | msg->address_lo = lower_32_bits(addr); | 111 | msg->address_lo = lower_32_bits(addr); |
100 | msg->data = data->hwirq; | 112 | msg->data = data->hwirq; |
113 | |||
114 | if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET) | ||
115 | msg->data -= v2m->spi_start; | ||
101 | } | 116 | } |
102 | 117 | ||
103 | static struct irq_chip gicv2m_irq_chip = { | 118 | static struct irq_chip gicv2m_irq_chip = { |
@@ -113,17 +128,21 @@ static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain, | |||
113 | unsigned int virq, | 128 | unsigned int virq, |
114 | irq_hw_number_t hwirq) | 129 | irq_hw_number_t hwirq) |
115 | { | 130 | { |
116 | struct of_phandle_args args; | 131 | struct irq_fwspec fwspec; |
117 | struct irq_data *d; | 132 | struct irq_data *d; |
118 | int err; | 133 | int err; |
119 | 134 | ||
120 | args.np = domain->parent->of_node; | 135 | if (is_of_node(domain->parent->fwnode)) { |
121 | args.args_count = 3; | 136 | fwspec.fwnode = domain->parent->fwnode; |
122 | args.args[0] = 0; | 137 | fwspec.param_count = 3; |
123 | args.args[1] = hwirq - 32; | 138 | fwspec.param[0] = 0; |
124 | args.args[2] = IRQ_TYPE_EDGE_RISING; | 139 | fwspec.param[1] = hwirq - 32; |
140 | fwspec.param[2] = IRQ_TYPE_EDGE_RISING; | ||
141 | } else { | ||
142 | return -EINVAL; | ||
143 | } | ||
125 | 144 | ||
126 | err = irq_domain_alloc_irqs_parent(domain, virq, 1, &args); | 145 | err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); |
127 | if (err) | 146 | if (err) |
128 | return err; | 147 | return err; |
129 | 148 | ||
@@ -143,27 +162,30 @@ static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq) | |||
143 | return; | 162 | return; |
144 | } | 163 | } |
145 | 164 | ||
146 | spin_lock(&v2m->msi_cnt_lock); | 165 | spin_lock(&v2m_lock); |
147 | __clear_bit(pos, v2m->bm); | 166 | __clear_bit(pos, v2m->bm); |
148 | spin_unlock(&v2m->msi_cnt_lock); | 167 | spin_unlock(&v2m_lock); |
149 | } | 168 | } |
150 | 169 | ||
151 | static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | 170 | static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, |
152 | unsigned int nr_irqs, void *args) | 171 | unsigned int nr_irqs, void *args) |
153 | { | 172 | { |
154 | struct v2m_data *v2m = domain->host_data; | 173 | struct v2m_data *v2m = NULL, *tmp; |
155 | int hwirq, offset, err = 0; | 174 | int hwirq, offset, err = 0; |
156 | 175 | ||
157 | spin_lock(&v2m->msi_cnt_lock); | 176 | spin_lock(&v2m_lock); |
158 | offset = find_first_zero_bit(v2m->bm, v2m->nr_spis); | 177 | list_for_each_entry(tmp, &v2m_nodes, entry) { |
159 | if (offset < v2m->nr_spis) | 178 | offset = find_first_zero_bit(tmp->bm, tmp->nr_spis); |
160 | __set_bit(offset, v2m->bm); | 179 | if (offset < tmp->nr_spis) { |
161 | else | 180 | __set_bit(offset, tmp->bm); |
162 | err = -ENOSPC; | 181 | v2m = tmp; |
163 | spin_unlock(&v2m->msi_cnt_lock); | 182 | break; |
183 | } | ||
184 | } | ||
185 | spin_unlock(&v2m_lock); | ||
164 | 186 | ||
165 | if (err) | 187 | if (!v2m) |
166 | return err; | 188 | return -ENOSPC; |
167 | 189 | ||
168 | hwirq = v2m->spi_start + offset; | 190 | hwirq = v2m->spi_start + offset; |
169 | 191 | ||
@@ -224,12 +246,61 @@ static struct msi_domain_info gicv2m_pmsi_domain_info = { | |||
224 | .chip = &gicv2m_pmsi_irq_chip, | 246 | .chip = &gicv2m_pmsi_irq_chip, |
225 | }; | 247 | }; |
226 | 248 | ||
249 | static void gicv2m_teardown(void) | ||
250 | { | ||
251 | struct v2m_data *v2m, *tmp; | ||
252 | |||
253 | list_for_each_entry_safe(v2m, tmp, &v2m_nodes, entry) { | ||
254 | list_del(&v2m->entry); | ||
255 | kfree(v2m->bm); | ||
256 | iounmap(v2m->base); | ||
257 | of_node_put(v2m->node); | ||
258 | kfree(v2m); | ||
259 | } | ||
260 | } | ||
261 | |||
262 | static int gicv2m_allocate_domains(struct irq_domain *parent) | ||
263 | { | ||
264 | struct irq_domain *inner_domain, *pci_domain, *plat_domain; | ||
265 | struct v2m_data *v2m; | ||
266 | |||
267 | v2m = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry); | ||
268 | if (!v2m) | ||
269 | return 0; | ||
270 | |||
271 | inner_domain = irq_domain_create_tree(of_node_to_fwnode(v2m->node), | ||
272 | &gicv2m_domain_ops, v2m); | ||
273 | if (!inner_domain) { | ||
274 | pr_err("Failed to create GICv2m domain\n"); | ||
275 | return -ENOMEM; | ||
276 | } | ||
277 | |||
278 | inner_domain->bus_token = DOMAIN_BUS_NEXUS; | ||
279 | inner_domain->parent = parent; | ||
280 | pci_domain = pci_msi_create_irq_domain(of_node_to_fwnode(v2m->node), | ||
281 | &gicv2m_msi_domain_info, | ||
282 | inner_domain); | ||
283 | plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(v2m->node), | ||
284 | &gicv2m_pmsi_domain_info, | ||
285 | inner_domain); | ||
286 | if (!pci_domain || !plat_domain) { | ||
287 | pr_err("Failed to create MSI domains\n"); | ||
288 | if (plat_domain) | ||
289 | irq_domain_remove(plat_domain); | ||
290 | if (pci_domain) | ||
291 | irq_domain_remove(pci_domain); | ||
292 | irq_domain_remove(inner_domain); | ||
293 | return -ENOMEM; | ||
294 | } | ||
295 | |||
296 | return 0; | ||
297 | } | ||
298 | |||
227 | static int __init gicv2m_init_one(struct device_node *node, | 299 | static int __init gicv2m_init_one(struct device_node *node, |
228 | struct irq_domain *parent) | 300 | struct irq_domain *parent) |
229 | { | 301 | { |
230 | int ret; | 302 | int ret; |
231 | struct v2m_data *v2m; | 303 | struct v2m_data *v2m; |
232 | struct irq_domain *inner_domain, *pci_domain, *plat_domain; | ||
233 | 304 | ||
234 | v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL); | 305 | v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL); |
235 | if (!v2m) { | 306 | if (!v2m) { |
@@ -237,6 +308,9 @@ static int __init gicv2m_init_one(struct device_node *node, | |||
237 | return -ENOMEM; | 308 | return -ENOMEM; |
238 | } | 309 | } |
239 | 310 | ||
311 | INIT_LIST_HEAD(&v2m->entry); | ||
312 | v2m->node = node; | ||
313 | |||
240 | ret = of_address_to_resource(node, 0, &v2m->res); | 314 | ret = of_address_to_resource(node, 0, &v2m->res); |
241 | if (ret) { | 315 | if (ret) { |
242 | pr_err("Failed to allocate v2m resource.\n"); | 316 | pr_err("Failed to allocate v2m resource.\n"); |
@@ -266,6 +340,17 @@ static int __init gicv2m_init_one(struct device_node *node, | |||
266 | goto err_iounmap; | 340 | goto err_iounmap; |
267 | } | 341 | } |
268 | 342 | ||
343 | /* | ||
344 | * APM X-Gene GICv2m implementation has an erratum where | ||
345 | * the MSI data needs to be the offset from the spi_start | ||
346 | * in order to trigger the correct MSI interrupt. This is | ||
347 | * different from the standard GICv2m implementation where | ||
348 | * the MSI data is the absolute value within the range from | ||
349 | * spi_start to (spi_start + num_spis). | ||
350 | */ | ||
351 | if (readl_relaxed(v2m->base + V2M_MSI_IIDR) == XGENE_GICV2M_MSI_IIDR) | ||
352 | v2m->flags |= GICV2M_NEEDS_SPI_OFFSET; | ||
353 | |||
269 | v2m->bm = kzalloc(sizeof(long) * BITS_TO_LONGS(v2m->nr_spis), | 354 | v2m->bm = kzalloc(sizeof(long) * BITS_TO_LONGS(v2m->nr_spis), |
270 | GFP_KERNEL); | 355 | GFP_KERNEL); |
271 | if (!v2m->bm) { | 356 | if (!v2m->bm) { |
@@ -273,43 +358,13 @@ static int __init gicv2m_init_one(struct device_node *node, | |||
273 | goto err_iounmap; | 358 | goto err_iounmap; |
274 | } | 359 | } |
275 | 360 | ||
276 | inner_domain = irq_domain_add_tree(node, &gicv2m_domain_ops, v2m); | 361 | list_add_tail(&v2m->entry, &v2m_nodes); |
277 | if (!inner_domain) { | ||
278 | pr_err("Failed to create GICv2m domain\n"); | ||
279 | ret = -ENOMEM; | ||
280 | goto err_free_bm; | ||
281 | } | ||
282 | |||
283 | inner_domain->bus_token = DOMAIN_BUS_NEXUS; | ||
284 | inner_domain->parent = parent; | ||
285 | pci_domain = pci_msi_create_irq_domain(node, &gicv2m_msi_domain_info, | ||
286 | inner_domain); | ||
287 | plat_domain = platform_msi_create_irq_domain(node, | ||
288 | &gicv2m_pmsi_domain_info, | ||
289 | inner_domain); | ||
290 | if (!pci_domain || !plat_domain) { | ||
291 | pr_err("Failed to create MSI domains\n"); | ||
292 | ret = -ENOMEM; | ||
293 | goto err_free_domains; | ||
294 | } | ||
295 | |||
296 | spin_lock_init(&v2m->msi_cnt_lock); | ||
297 | |||
298 | pr_info("Node %s: range[%#lx:%#lx], SPI[%d:%d]\n", node->name, | 362 | pr_info("Node %s: range[%#lx:%#lx], SPI[%d:%d]\n", node->name, |
299 | (unsigned long)v2m->res.start, (unsigned long)v2m->res.end, | 363 | (unsigned long)v2m->res.start, (unsigned long)v2m->res.end, |
300 | v2m->spi_start, (v2m->spi_start + v2m->nr_spis)); | 364 | v2m->spi_start, (v2m->spi_start + v2m->nr_spis)); |
301 | 365 | ||
302 | return 0; | 366 | return 0; |
303 | 367 | ||
304 | err_free_domains: | ||
305 | if (plat_domain) | ||
306 | irq_domain_remove(plat_domain); | ||
307 | if (pci_domain) | ||
308 | irq_domain_remove(pci_domain); | ||
309 | if (inner_domain) | ||
310 | irq_domain_remove(inner_domain); | ||
311 | err_free_bm: | ||
312 | kfree(v2m->bm); | ||
313 | err_iounmap: | 368 | err_iounmap: |
314 | iounmap(v2m->base); | 369 | iounmap(v2m->base); |
315 | err_free_v2m: | 370 | err_free_v2m: |
@@ -339,5 +394,9 @@ int __init gicv2m_of_init(struct device_node *node, struct irq_domain *parent) | |||
339 | } | 394 | } |
340 | } | 395 | } |
341 | 396 | ||
397 | if (!ret) | ||
398 | ret = gicv2m_allocate_domains(parent); | ||
399 | if (ret) | ||
400 | gicv2m_teardown(); | ||
342 | return ret; | 401 | return ret; |
343 | } | 402 | } |
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c index a7c8c9ffbafd..aee60ed025dc 100644 --- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c | |||
@@ -42,7 +42,6 @@ static struct irq_chip its_msi_irq_chip = { | |||
42 | 42 | ||
43 | struct its_pci_alias { | 43 | struct its_pci_alias { |
44 | struct pci_dev *pdev; | 44 | struct pci_dev *pdev; |
45 | u32 dev_id; | ||
46 | u32 count; | 45 | u32 count; |
47 | }; | 46 | }; |
48 | 47 | ||
@@ -60,7 +59,6 @@ static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data) | |||
60 | { | 59 | { |
61 | struct its_pci_alias *dev_alias = data; | 60 | struct its_pci_alias *dev_alias = data; |
62 | 61 | ||
63 | dev_alias->dev_id = alias; | ||
64 | if (pdev != dev_alias->pdev) | 62 | if (pdev != dev_alias->pdev) |
65 | dev_alias->count += its_pci_msi_vec_count(pdev); | 63 | dev_alias->count += its_pci_msi_vec_count(pdev); |
66 | 64 | ||
@@ -86,7 +84,7 @@ static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev, | |||
86 | pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias); | 84 | pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias); |
87 | 85 | ||
88 | /* ITS specific DeviceID, as the core ITS ignores dev. */ | 86 | /* ITS specific DeviceID, as the core ITS ignores dev. */ |
89 | info->scratchpad[0].ul = dev_alias.dev_id; | 87 | info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain, pdev); |
90 | 88 | ||
91 | return msi_info->ops->msi_prepare(domain->parent, | 89 | return msi_info->ops->msi_prepare(domain->parent, |
92 | dev, dev_alias.count, info); | 90 | dev, dev_alias.count, info); |
@@ -125,7 +123,8 @@ static int __init its_pci_msi_init(void) | |||
125 | continue; | 123 | continue; |
126 | } | 124 | } |
127 | 125 | ||
128 | if (!pci_msi_create_irq_domain(np, &its_pci_msi_domain_info, | 126 | if (!pci_msi_create_irq_domain(of_node_to_fwnode(np), |
127 | &its_pci_msi_domain_info, | ||
129 | parent)) { | 128 | parent)) { |
130 | pr_err("%s: unable to create PCI domain\n", | 129 | pr_err("%s: unable to create PCI domain\n", |
131 | np->full_name); | 130 | np->full_name); |
diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c index a86550562779..470b4aa7d62c 100644 --- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c | |||
@@ -29,13 +29,25 @@ static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev, | |||
29 | { | 29 | { |
30 | struct msi_domain_info *msi_info; | 30 | struct msi_domain_info *msi_info; |
31 | u32 dev_id; | 31 | u32 dev_id; |
32 | int ret; | 32 | int ret, index = 0; |
33 | 33 | ||
34 | msi_info = msi_get_domain_info(domain->parent); | 34 | msi_info = msi_get_domain_info(domain->parent); |
35 | 35 | ||
36 | /* Suck the DeviceID out of the msi-parent property */ | 36 | /* Suck the DeviceID out of the msi-parent property */ |
37 | ret = of_property_read_u32_index(dev->of_node, "msi-parent", | 37 | do { |
38 | 1, &dev_id); | 38 | struct of_phandle_args args; |
39 | |||
40 | ret = of_parse_phandle_with_args(dev->of_node, | ||
41 | "msi-parent", "#msi-cells", | ||
42 | index, &args); | ||
43 | if (args.np == irq_domain_get_of_node(domain)) { | ||
44 | if (WARN_ON(args.args_count != 1)) | ||
45 | return -EINVAL; | ||
46 | dev_id = args.args[0]; | ||
47 | break; | ||
48 | } | ||
49 | } while (!ret); | ||
50 | |||
39 | if (ret) | 51 | if (ret) |
40 | return ret; | 52 | return ret; |
41 | 53 | ||
@@ -78,7 +90,8 @@ static int __init its_pmsi_init(void) | |||
78 | continue; | 90 | continue; |
79 | } | 91 | } |
80 | 92 | ||
81 | if (!platform_msi_create_irq_domain(np, &its_pmsi_domain_info, | 93 | if (!platform_msi_create_irq_domain(of_node_to_fwnode(np), |
94 | &its_pmsi_domain_info, | ||
82 | parent)) { | 95 | parent)) { |
83 | pr_err("%s: unable to create platform domain\n", | 96 | pr_err("%s: unable to create platform domain\n", |
84 | np->full_name); | 97 | np->full_name); |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 25ceae9f7348..e23d1d18f9d6 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
@@ -37,7 +37,10 @@ | |||
37 | #include <asm/cputype.h> | 37 | #include <asm/cputype.h> |
38 | #include <asm/exception.h> | 38 | #include <asm/exception.h> |
39 | 39 | ||
40 | #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1 << 0) | 40 | #include "irq-gic-common.h" |
41 | |||
42 | #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) | ||
43 | #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) | ||
41 | 44 | ||
42 | #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) | 45 | #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) |
43 | 46 | ||
@@ -817,7 +820,22 @@ static int its_alloc_tables(const char *node_name, struct its_node *its) | |||
817 | int i; | 820 | int i; |
818 | int psz = SZ_64K; | 821 | int psz = SZ_64K; |
819 | u64 shr = GITS_BASER_InnerShareable; | 822 | u64 shr = GITS_BASER_InnerShareable; |
820 | u64 cache = GITS_BASER_WaWb; | 823 | u64 cache; |
824 | u64 typer; | ||
825 | u32 ids; | ||
826 | |||
827 | if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) { | ||
828 | /* | ||
829 | * erratum 22375: only alloc 8MB table size | ||
830 | * erratum 24313: ignore memory access type | ||
831 | */ | ||
832 | cache = 0; | ||
833 | ids = 0x14; /* 20 bits, 8MB */ | ||
834 | } else { | ||
835 | cache = GITS_BASER_WaWb; | ||
836 | typer = readq_relaxed(its->base + GITS_TYPER); | ||
837 | ids = GITS_TYPER_DEVBITS(typer); | ||
838 | } | ||
821 | 839 | ||
822 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { | 840 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { |
823 | u64 val = readq_relaxed(its->base + GITS_BASER + i * 8); | 841 | u64 val = readq_relaxed(its->base + GITS_BASER + i * 8); |
@@ -825,6 +843,7 @@ static int its_alloc_tables(const char *node_name, struct its_node *its) | |||
825 | u64 entry_size = GITS_BASER_ENTRY_SIZE(val); | 843 | u64 entry_size = GITS_BASER_ENTRY_SIZE(val); |
826 | int order = get_order(psz); | 844 | int order = get_order(psz); |
827 | int alloc_size; | 845 | int alloc_size; |
846 | int alloc_pages; | ||
828 | u64 tmp; | 847 | u64 tmp; |
829 | void *base; | 848 | void *base; |
830 | 849 | ||
@@ -840,9 +859,6 @@ static int its_alloc_tables(const char *node_name, struct its_node *its) | |||
840 | * For other tables, only allocate a single page. | 859 | * For other tables, only allocate a single page. |
841 | */ | 860 | */ |
842 | if (type == GITS_BASER_TYPE_DEVICE) { | 861 | if (type == GITS_BASER_TYPE_DEVICE) { |
843 | u64 typer = readq_relaxed(its->base + GITS_TYPER); | ||
844 | u32 ids = GITS_TYPER_DEVBITS(typer); | ||
845 | |||
846 | /* | 862 | /* |
847 | * 'order' was initialized earlier to the default page | 863 | * 'order' was initialized earlier to the default page |
848 | * granule of the the ITS. We can't have an allocation | 864 | * granule of the the ITS. We can't have an allocation |
@@ -859,6 +875,14 @@ static int its_alloc_tables(const char *node_name, struct its_node *its) | |||
859 | } | 875 | } |
860 | 876 | ||
861 | alloc_size = (1 << order) * PAGE_SIZE; | 877 | alloc_size = (1 << order) * PAGE_SIZE; |
878 | alloc_pages = (alloc_size / psz); | ||
879 | if (alloc_pages > GITS_BASER_PAGES_MAX) { | ||
880 | alloc_pages = GITS_BASER_PAGES_MAX; | ||
881 | order = get_order(GITS_BASER_PAGES_MAX * psz); | ||
882 | pr_warn("%s: Device Table too large, reduce its page order to %u (%u pages)\n", | ||
883 | node_name, order, alloc_pages); | ||
884 | } | ||
885 | |||
862 | base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); | 886 | base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); |
863 | if (!base) { | 887 | if (!base) { |
864 | err = -ENOMEM; | 888 | err = -ENOMEM; |
@@ -887,7 +911,7 @@ retry_baser: | |||
887 | break; | 911 | break; |
888 | } | 912 | } |
889 | 913 | ||
890 | val |= (alloc_size / psz) - 1; | 914 | val |= alloc_pages - 1; |
891 | 915 | ||
892 | writeq_relaxed(val, its->base + GITS_BASER + i * 8); | 916 | writeq_relaxed(val, its->base + GITS_BASER + i * 8); |
893 | tmp = readq_relaxed(its->base + GITS_BASER + i * 8); | 917 | tmp = readq_relaxed(its->base + GITS_BASER + i * 8); |
@@ -1241,15 +1265,19 @@ static int its_irq_gic_domain_alloc(struct irq_domain *domain, | |||
1241 | unsigned int virq, | 1265 | unsigned int virq, |
1242 | irq_hw_number_t hwirq) | 1266 | irq_hw_number_t hwirq) |
1243 | { | 1267 | { |
1244 | struct of_phandle_args args; | 1268 | struct irq_fwspec fwspec; |
1245 | 1269 | ||
1246 | args.np = domain->parent->of_node; | 1270 | if (irq_domain_get_of_node(domain->parent)) { |
1247 | args.args_count = 3; | 1271 | fwspec.fwnode = domain->parent->fwnode; |
1248 | args.args[0] = GIC_IRQ_TYPE_LPI; | 1272 | fwspec.param_count = 3; |
1249 | args.args[1] = hwirq; | 1273 | fwspec.param[0] = GIC_IRQ_TYPE_LPI; |
1250 | args.args[2] = IRQ_TYPE_EDGE_RISING; | 1274 | fwspec.param[1] = hwirq; |
1275 | fwspec.param[2] = IRQ_TYPE_EDGE_RISING; | ||
1276 | } else { | ||
1277 | return -EINVAL; | ||
1278 | } | ||
1251 | 1279 | ||
1252 | return irq_domain_alloc_irqs_parent(domain, virq, 1, &args); | 1280 | return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); |
1253 | } | 1281 | } |
1254 | 1282 | ||
1255 | static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | 1283 | static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, |
@@ -1370,6 +1398,33 @@ static int its_force_quiescent(void __iomem *base) | |||
1370 | } | 1398 | } |
1371 | } | 1399 | } |
1372 | 1400 | ||
1401 | static void __maybe_unused its_enable_quirk_cavium_22375(void *data) | ||
1402 | { | ||
1403 | struct its_node *its = data; | ||
1404 | |||
1405 | its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; | ||
1406 | } | ||
1407 | |||
1408 | static const struct gic_quirk its_quirks[] = { | ||
1409 | #ifdef CONFIG_CAVIUM_ERRATUM_22375 | ||
1410 | { | ||
1411 | .desc = "ITS: Cavium errata 22375, 24313", | ||
1412 | .iidr = 0xa100034c, /* ThunderX pass 1.x */ | ||
1413 | .mask = 0xffff0fff, | ||
1414 | .init = its_enable_quirk_cavium_22375, | ||
1415 | }, | ||
1416 | #endif | ||
1417 | { | ||
1418 | } | ||
1419 | }; | ||
1420 | |||
1421 | static void its_enable_quirks(struct its_node *its) | ||
1422 | { | ||
1423 | u32 iidr = readl_relaxed(its->base + GITS_IIDR); | ||
1424 | |||
1425 | gic_enable_quirks(iidr, its_quirks, its); | ||
1426 | } | ||
1427 | |||
1373 | static int its_probe(struct device_node *node, struct irq_domain *parent) | 1428 | static int its_probe(struct device_node *node, struct irq_domain *parent) |
1374 | { | 1429 | { |
1375 | struct resource res; | 1430 | struct resource res; |
@@ -1428,6 +1483,8 @@ static int its_probe(struct device_node *node, struct irq_domain *parent) | |||
1428 | } | 1483 | } |
1429 | its->cmd_write = its->cmd_base; | 1484 | its->cmd_write = its->cmd_base; |
1430 | 1485 | ||
1486 | its_enable_quirks(its); | ||
1487 | |||
1431 | err = its_alloc_tables(node->full_name, its); | 1488 | err = its_alloc_tables(node->full_name, its); |
1432 | if (err) | 1489 | if (err) |
1433 | goto out_free_cmd; | 1490 | goto out_free_cmd; |
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 36ecfc870e5a..d7be6ddc34f6 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c | |||
@@ -108,57 +108,17 @@ static void gic_redist_wait_for_rwp(void) | |||
108 | gic_do_wait_for_rwp(gic_data_rdist_rd_base()); | 108 | gic_do_wait_for_rwp(gic_data_rdist_rd_base()); |
109 | } | 109 | } |
110 | 110 | ||
111 | /* Low level accessors */ | 111 | #ifdef CONFIG_ARM64 |
112 | static u64 __maybe_unused gic_read_iar(void) | 112 | static DEFINE_STATIC_KEY_FALSE(is_cavium_thunderx); |
113 | { | ||
114 | u64 irqstat; | ||
115 | |||
116 | asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat)); | ||
117 | return irqstat; | ||
118 | } | ||
119 | |||
120 | static void __maybe_unused gic_write_pmr(u64 val) | ||
121 | { | ||
122 | asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val)); | ||
123 | } | ||
124 | |||
125 | static void __maybe_unused gic_write_ctlr(u64 val) | ||
126 | { | ||
127 | asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val)); | ||
128 | isb(); | ||
129 | } | ||
130 | |||
131 | static void __maybe_unused gic_write_grpen1(u64 val) | ||
132 | { | ||
133 | asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val)); | ||
134 | isb(); | ||
135 | } | ||
136 | 113 | ||
137 | static void __maybe_unused gic_write_sgi1r(u64 val) | 114 | static u64 __maybe_unused gic_read_iar(void) |
138 | { | ||
139 | asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val)); | ||
140 | } | ||
141 | |||
142 | static void gic_enable_sre(void) | ||
143 | { | 115 | { |
144 | u64 val; | 116 | if (static_branch_unlikely(&is_cavium_thunderx)) |
145 | 117 | return gic_read_iar_cavium_thunderx(); | |
146 | asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val)); | 118 | else |
147 | val |= ICC_SRE_EL1_SRE; | 119 | return gic_read_iar_common(); |
148 | asm volatile("msr_s " __stringify(ICC_SRE_EL1) ", %0" : : "r" (val)); | ||
149 | isb(); | ||
150 | |||
151 | /* | ||
152 | * Need to check that the SRE bit has actually been set. If | ||
153 | * not, it means that SRE is disabled at EL2. We're going to | ||
154 | * die painfully, and there is nothing we can do about it. | ||
155 | * | ||
156 | * Kindly inform the luser. | ||
157 | */ | ||
158 | asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val)); | ||
159 | if (!(val & ICC_SRE_EL1_SRE)) | ||
160 | pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n"); | ||
161 | } | 120 | } |
121 | #endif | ||
162 | 122 | ||
163 | static void gic_enable_redist(bool enable) | 123 | static void gic_enable_redist(bool enable) |
164 | { | 124 | { |
@@ -359,11 +319,11 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) | |||
359 | return 0; | 319 | return 0; |
360 | } | 320 | } |
361 | 321 | ||
362 | static u64 gic_mpidr_to_affinity(u64 mpidr) | 322 | static u64 gic_mpidr_to_affinity(unsigned long mpidr) |
363 | { | 323 | { |
364 | u64 aff; | 324 | u64 aff; |
365 | 325 | ||
366 | aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 | | 326 | aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 | |
367 | MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | | 327 | MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | |
368 | MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | | 328 | MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | |
369 | MPIDR_AFFINITY_LEVEL(mpidr, 0)); | 329 | MPIDR_AFFINITY_LEVEL(mpidr, 0)); |
@@ -373,7 +333,7 @@ static u64 gic_mpidr_to_affinity(u64 mpidr) | |||
373 | 333 | ||
374 | static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) | 334 | static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) |
375 | { | 335 | { |
376 | u64 irqnr; | 336 | u32 irqnr; |
377 | 337 | ||
378 | do { | 338 | do { |
379 | irqnr = gic_read_iar(); | 339 | irqnr = gic_read_iar(); |
@@ -432,12 +392,12 @@ static void __init gic_dist_init(void) | |||
432 | */ | 392 | */ |
433 | affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id())); | 393 | affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id())); |
434 | for (i = 32; i < gic_data.irq_nr; i++) | 394 | for (i = 32; i < gic_data.irq_nr; i++) |
435 | writeq_relaxed(affinity, base + GICD_IROUTER + i * 8); | 395 | gic_write_irouter(affinity, base + GICD_IROUTER + i * 8); |
436 | } | 396 | } |
437 | 397 | ||
438 | static int gic_populate_rdist(void) | 398 | static int gic_populate_rdist(void) |
439 | { | 399 | { |
440 | u64 mpidr = cpu_logical_map(smp_processor_id()); | 400 | unsigned long mpidr = cpu_logical_map(smp_processor_id()); |
441 | u64 typer; | 401 | u64 typer; |
442 | u32 aff; | 402 | u32 aff; |
443 | int i; | 403 | int i; |
@@ -463,15 +423,14 @@ static int gic_populate_rdist(void) | |||
463 | } | 423 | } |
464 | 424 | ||
465 | do { | 425 | do { |
466 | typer = readq_relaxed(ptr + GICR_TYPER); | 426 | typer = gic_read_typer(ptr + GICR_TYPER); |
467 | if ((typer >> 32) == aff) { | 427 | if ((typer >> 32) == aff) { |
468 | u64 offset = ptr - gic_data.redist_regions[i].redist_base; | 428 | u64 offset = ptr - gic_data.redist_regions[i].redist_base; |
469 | gic_data_rdist_rd_base() = ptr; | 429 | gic_data_rdist_rd_base() = ptr; |
470 | gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset; | 430 | gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset; |
471 | pr_info("CPU%d: found redistributor %llx region %d:%pa\n", | 431 | pr_info("CPU%d: found redistributor %lx region %d:%pa\n", |
472 | smp_processor_id(), | 432 | smp_processor_id(), mpidr, i, |
473 | (unsigned long long)mpidr, | 433 | &gic_data_rdist()->phys_base); |
474 | i, &gic_data_rdist()->phys_base); | ||
475 | return 0; | 434 | return 0; |
476 | } | 435 | } |
477 | 436 | ||
@@ -486,15 +445,22 @@ static int gic_populate_rdist(void) | |||
486 | } | 445 | } |
487 | 446 | ||
488 | /* We couldn't even deal with ourselves... */ | 447 | /* We couldn't even deal with ourselves... */ |
489 | WARN(true, "CPU%d: mpidr %llx has no re-distributor!\n", | 448 | WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n", |
490 | smp_processor_id(), (unsigned long long)mpidr); | 449 | smp_processor_id(), mpidr); |
491 | return -ENODEV; | 450 | return -ENODEV; |
492 | } | 451 | } |
493 | 452 | ||
494 | static void gic_cpu_sys_reg_init(void) | 453 | static void gic_cpu_sys_reg_init(void) |
495 | { | 454 | { |
496 | /* Enable system registers */ | 455 | /* |
497 | gic_enable_sre(); | 456 | * Need to check that the SRE bit has actually been set. If |
457 | * not, it means that SRE is disabled at EL2. We're going to | ||
458 | * die painfully, and there is nothing we can do about it. | ||
459 | * | ||
460 | * Kindly inform the luser. | ||
461 | */ | ||
462 | if (!gic_enable_sre()) | ||
463 | pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n"); | ||
498 | 464 | ||
499 | /* Set priority mask register */ | 465 | /* Set priority mask register */ |
500 | gic_write_pmr(DEFAULT_PMR_VALUE); | 466 | gic_write_pmr(DEFAULT_PMR_VALUE); |
@@ -557,10 +523,10 @@ static struct notifier_block gic_cpu_notifier = { | |||
557 | }; | 523 | }; |
558 | 524 | ||
559 | static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, | 525 | static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, |
560 | u64 cluster_id) | 526 | unsigned long cluster_id) |
561 | { | 527 | { |
562 | int cpu = *base_cpu; | 528 | int cpu = *base_cpu; |
563 | u64 mpidr = cpu_logical_map(cpu); | 529 | unsigned long mpidr = cpu_logical_map(cpu); |
564 | u16 tlist = 0; | 530 | u16 tlist = 0; |
565 | 531 | ||
566 | while (cpu < nr_cpu_ids) { | 532 | while (cpu < nr_cpu_ids) { |
@@ -621,7 +587,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) | |||
621 | smp_wmb(); | 587 | smp_wmb(); |
622 | 588 | ||
623 | for_each_cpu(cpu, mask) { | 589 | for_each_cpu(cpu, mask) { |
624 | u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL; | 590 | unsigned long cluster_id = cpu_logical_map(cpu) & ~0xffUL; |
625 | u16 tlist; | 591 | u16 tlist; |
626 | 592 | ||
627 | tlist = gic_compute_target_list(&cpu, mask, cluster_id); | 593 | tlist = gic_compute_target_list(&cpu, mask, cluster_id); |
@@ -657,7 +623,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | |||
657 | reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8); | 623 | reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8); |
658 | val = gic_mpidr_to_affinity(cpu_logical_map(cpu)); | 624 | val = gic_mpidr_to_affinity(cpu_logical_map(cpu)); |
659 | 625 | ||
660 | writeq_relaxed(val, reg); | 626 | gic_write_irouter(val, reg); |
661 | 627 | ||
662 | /* | 628 | /* |
663 | * If the interrupt was enabled, enabled it again. Otherwise, | 629 | * If the interrupt was enabled, enabled it again. Otherwise, |
@@ -771,32 +737,34 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, | |||
771 | return 0; | 737 | return 0; |
772 | } | 738 | } |
773 | 739 | ||
774 | static int gic_irq_domain_xlate(struct irq_domain *d, | 740 | static int gic_irq_domain_translate(struct irq_domain *d, |
775 | struct device_node *controller, | 741 | struct irq_fwspec *fwspec, |
776 | const u32 *intspec, unsigned int intsize, | 742 | unsigned long *hwirq, |
777 | unsigned long *out_hwirq, unsigned int *out_type) | 743 | unsigned int *type) |
778 | { | 744 | { |
779 | if (d->of_node != controller) | 745 | if (is_of_node(fwspec->fwnode)) { |
780 | return -EINVAL; | 746 | if (fwspec->param_count < 3) |
781 | if (intsize < 3) | 747 | return -EINVAL; |
782 | return -EINVAL; | ||
783 | 748 | ||
784 | switch(intspec[0]) { | 749 | switch (fwspec->param[0]) { |
785 | case 0: /* SPI */ | 750 | case 0: /* SPI */ |
786 | *out_hwirq = intspec[1] + 32; | 751 | *hwirq = fwspec->param[1] + 32; |
787 | break; | 752 | break; |
788 | case 1: /* PPI */ | 753 | case 1: /* PPI */ |
789 | *out_hwirq = intspec[1] + 16; | 754 | *hwirq = fwspec->param[1] + 16; |
790 | break; | 755 | break; |
791 | case GIC_IRQ_TYPE_LPI: /* LPI */ | 756 | case GIC_IRQ_TYPE_LPI: /* LPI */ |
792 | *out_hwirq = intspec[1]; | 757 | *hwirq = fwspec->param[1]; |
793 | break; | 758 | break; |
794 | default: | 759 | default: |
795 | return -EINVAL; | 760 | return -EINVAL; |
761 | } | ||
762 | |||
763 | *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; | ||
764 | return 0; | ||
796 | } | 765 | } |
797 | 766 | ||
798 | *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; | 767 | return -EINVAL; |
799 | return 0; | ||
800 | } | 768 | } |
801 | 769 | ||
802 | static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | 770 | static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, |
@@ -805,10 +773,9 @@ static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | |||
805 | int i, ret; | 773 | int i, ret; |
806 | irq_hw_number_t hwirq; | 774 | irq_hw_number_t hwirq; |
807 | unsigned int type = IRQ_TYPE_NONE; | 775 | unsigned int type = IRQ_TYPE_NONE; |
808 | struct of_phandle_args *irq_data = arg; | 776 | struct irq_fwspec *fwspec = arg; |
809 | 777 | ||
810 | ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args, | 778 | ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type); |
811 | irq_data->args_count, &hwirq, &type); | ||
812 | if (ret) | 779 | if (ret) |
813 | return ret; | 780 | return ret; |
814 | 781 | ||
@@ -831,11 +798,19 @@ static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq, | |||
831 | } | 798 | } |
832 | 799 | ||
833 | static const struct irq_domain_ops gic_irq_domain_ops = { | 800 | static const struct irq_domain_ops gic_irq_domain_ops = { |
834 | .xlate = gic_irq_domain_xlate, | 801 | .translate = gic_irq_domain_translate, |
835 | .alloc = gic_irq_domain_alloc, | 802 | .alloc = gic_irq_domain_alloc, |
836 | .free = gic_irq_domain_free, | 803 | .free = gic_irq_domain_free, |
837 | }; | 804 | }; |
838 | 805 | ||
806 | static void gicv3_enable_quirks(void) | ||
807 | { | ||
808 | #ifdef CONFIG_ARM64 | ||
809 | if (cpus_have_cap(ARM64_WORKAROUND_CAVIUM_23154)) | ||
810 | static_branch_enable(&is_cavium_thunderx); | ||
811 | #endif | ||
812 | } | ||
813 | |||
839 | static int __init gic_of_init(struct device_node *node, struct device_node *parent) | 814 | static int __init gic_of_init(struct device_node *node, struct device_node *parent) |
840 | { | 815 | { |
841 | void __iomem *dist_base; | 816 | void __iomem *dist_base; |
@@ -901,6 +876,8 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare | |||
901 | gic_data.nr_redist_regions = nr_redist_regions; | 876 | gic_data.nr_redist_regions = nr_redist_regions; |
902 | gic_data.redist_stride = redist_stride; | 877 | gic_data.redist_stride = redist_stride; |
903 | 878 | ||
879 | gicv3_enable_quirks(); | ||
880 | |||
904 | /* | 881 | /* |
905 | * Find out how many interrupts are supported. | 882 | * Find out how many interrupts are supported. |
906 | * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI) | 883 | * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI) |
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 982c09c2d791..1d0e76855106 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c | |||
@@ -51,6 +51,19 @@ | |||
51 | 51 | ||
52 | #include "irq-gic-common.h" | 52 | #include "irq-gic-common.h" |
53 | 53 | ||
54 | #ifdef CONFIG_ARM64 | ||
55 | #include <asm/cpufeature.h> | ||
56 | |||
57 | static void gic_check_cpu_features(void) | ||
58 | { | ||
59 | WARN_TAINT_ONCE(cpus_have_cap(ARM64_HAS_SYSREG_GIC_CPUIF), | ||
60 | TAINT_CPU_OUT_OF_SPEC, | ||
61 | "GICv3 system registers enabled, broken firmware!\n"); | ||
62 | } | ||
63 | #else | ||
64 | #define gic_check_cpu_features() do { } while(0) | ||
65 | #endif | ||
66 | |||
54 | union gic_base { | 67 | union gic_base { |
55 | void __iomem *common_base; | 68 | void __iomem *common_base; |
56 | void __percpu * __iomem *percpu_base; | 69 | void __percpu * __iomem *percpu_base; |
@@ -903,28 +916,39 @@ static void gic_irq_domain_unmap(struct irq_domain *d, unsigned int irq) | |||
903 | { | 916 | { |
904 | } | 917 | } |
905 | 918 | ||
906 | static int gic_irq_domain_xlate(struct irq_domain *d, | 919 | static int gic_irq_domain_translate(struct irq_domain *d, |
907 | struct device_node *controller, | 920 | struct irq_fwspec *fwspec, |
908 | const u32 *intspec, unsigned int intsize, | 921 | unsigned long *hwirq, |
909 | unsigned long *out_hwirq, unsigned int *out_type) | 922 | unsigned int *type) |
910 | { | 923 | { |
911 | unsigned long ret = 0; | 924 | if (is_of_node(fwspec->fwnode)) { |
925 | if (fwspec->param_count < 3) | ||
926 | return -EINVAL; | ||
912 | 927 | ||
913 | if (d->of_node != controller) | 928 | /* Get the interrupt number and add 16 to skip over SGIs */ |
914 | return -EINVAL; | 929 | *hwirq = fwspec->param[1] + 16; |
915 | if (intsize < 3) | ||
916 | return -EINVAL; | ||
917 | 930 | ||
918 | /* Get the interrupt number and add 16 to skip over SGIs */ | 931 | /* |
919 | *out_hwirq = intspec[1] + 16; | 932 | * For SPIs, we need to add 16 more to get the GIC irq |
933 | * ID number | ||
934 | */ | ||
935 | if (!fwspec->param[0]) | ||
936 | *hwirq += 16; | ||
920 | 937 | ||
921 | /* For SPIs, we need to add 16 more to get the GIC irq ID number */ | 938 | *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; |
922 | if (!intspec[0]) | 939 | return 0; |
923 | *out_hwirq += 16; | 940 | } |
924 | 941 | ||
925 | *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; | 942 | if (fwspec->fwnode->type == FWNODE_IRQCHIP) { |
943 | if(fwspec->param_count != 2) | ||
944 | return -EINVAL; | ||
926 | 945 | ||
927 | return ret; | 946 | *hwirq = fwspec->param[0]; |
947 | *type = fwspec->param[1]; | ||
948 | return 0; | ||
949 | } | ||
950 | |||
951 | return -EINVAL; | ||
928 | } | 952 | } |
929 | 953 | ||
930 | #ifdef CONFIG_SMP | 954 | #ifdef CONFIG_SMP |
@@ -952,10 +976,9 @@ static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | |||
952 | int i, ret; | 976 | int i, ret; |
953 | irq_hw_number_t hwirq; | 977 | irq_hw_number_t hwirq; |
954 | unsigned int type = IRQ_TYPE_NONE; | 978 | unsigned int type = IRQ_TYPE_NONE; |
955 | struct of_phandle_args *irq_data = arg; | 979 | struct irq_fwspec *fwspec = arg; |
956 | 980 | ||
957 | ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args, | 981 | ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type); |
958 | irq_data->args_count, &hwirq, &type); | ||
959 | if (ret) | 982 | if (ret) |
960 | return ret; | 983 | return ret; |
961 | 984 | ||
@@ -966,7 +989,7 @@ static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | |||
966 | } | 989 | } |
967 | 990 | ||
968 | static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = { | 991 | static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = { |
969 | .xlate = gic_irq_domain_xlate, | 992 | .translate = gic_irq_domain_translate, |
970 | .alloc = gic_irq_domain_alloc, | 993 | .alloc = gic_irq_domain_alloc, |
971 | .free = irq_domain_free_irqs_top, | 994 | .free = irq_domain_free_irqs_top, |
972 | }; | 995 | }; |
@@ -974,12 +997,11 @@ static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = { | |||
974 | static const struct irq_domain_ops gic_irq_domain_ops = { | 997 | static const struct irq_domain_ops gic_irq_domain_ops = { |
975 | .map = gic_irq_domain_map, | 998 | .map = gic_irq_domain_map, |
976 | .unmap = gic_irq_domain_unmap, | 999 | .unmap = gic_irq_domain_unmap, |
977 | .xlate = gic_irq_domain_xlate, | ||
978 | }; | 1000 | }; |
979 | 1001 | ||
980 | static void __init __gic_init_bases(unsigned int gic_nr, int irq_start, | 1002 | static void __init __gic_init_bases(unsigned int gic_nr, int irq_start, |
981 | void __iomem *dist_base, void __iomem *cpu_base, | 1003 | void __iomem *dist_base, void __iomem *cpu_base, |
982 | u32 percpu_offset, struct device_node *node) | 1004 | u32 percpu_offset, struct fwnode_handle *handle) |
983 | { | 1005 | { |
984 | irq_hw_number_t hwirq_base; | 1006 | irq_hw_number_t hwirq_base; |
985 | struct gic_chip_data *gic; | 1007 | struct gic_chip_data *gic; |
@@ -987,6 +1009,8 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start, | |||
987 | 1009 | ||
988 | BUG_ON(gic_nr >= MAX_GIC_NR); | 1010 | BUG_ON(gic_nr >= MAX_GIC_NR); |
989 | 1011 | ||
1012 | gic_check_cpu_features(); | ||
1013 | |||
990 | gic = &gic_data[gic_nr]; | 1014 | gic = &gic_data[gic_nr]; |
991 | #ifdef CONFIG_GIC_NON_BANKED | 1015 | #ifdef CONFIG_GIC_NON_BANKED |
992 | if (percpu_offset) { /* Frankein-GIC without banked registers... */ | 1016 | if (percpu_offset) { /* Frankein-GIC without banked registers... */ |
@@ -1031,11 +1055,11 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start, | |||
1031 | gic_irqs = 1020; | 1055 | gic_irqs = 1020; |
1032 | gic->gic_irqs = gic_irqs; | 1056 | gic->gic_irqs = gic_irqs; |
1033 | 1057 | ||
1034 | if (node) { /* DT case */ | 1058 | if (handle) { /* DT/ACPI */ |
1035 | gic->domain = irq_domain_add_linear(node, gic_irqs, | 1059 | gic->domain = irq_domain_create_linear(handle, gic_irqs, |
1036 | &gic_irq_domain_hierarchy_ops, | 1060 | &gic_irq_domain_hierarchy_ops, |
1037 | gic); | 1061 | gic); |
1038 | } else { /* Non-DT case */ | 1062 | } else { /* Legacy support */ |
1039 | /* | 1063 | /* |
1040 | * For primary GICs, skip over SGIs. | 1064 | * For primary GICs, skip over SGIs. |
1041 | * For secondary GICs, skip over PPIs, too. | 1065 | * For secondary GICs, skip over PPIs, too. |
@@ -1058,7 +1082,7 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start, | |||
1058 | irq_base = irq_start; | 1082 | irq_base = irq_start; |
1059 | } | 1083 | } |
1060 | 1084 | ||
1061 | gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base, | 1085 | gic->domain = irq_domain_add_legacy(NULL, gic_irqs, irq_base, |
1062 | hwirq_base, &gic_irq_domain_ops, gic); | 1086 | hwirq_base, &gic_irq_domain_ops, gic); |
1063 | } | 1087 | } |
1064 | 1088 | ||
@@ -1087,17 +1111,15 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start, | |||
1087 | gic_pm_init(gic); | 1111 | gic_pm_init(gic); |
1088 | } | 1112 | } |
1089 | 1113 | ||
1090 | void __init gic_init_bases(unsigned int gic_nr, int irq_start, | 1114 | void __init gic_init(unsigned int gic_nr, int irq_start, |
1091 | void __iomem *dist_base, void __iomem *cpu_base, | 1115 | void __iomem *dist_base, void __iomem *cpu_base) |
1092 | u32 percpu_offset, struct device_node *node) | ||
1093 | { | 1116 | { |
1094 | /* | 1117 | /* |
1095 | * Non-DT/ACPI systems won't run a hypervisor, so let's not | 1118 | * Non-DT/ACPI systems won't run a hypervisor, so let's not |
1096 | * bother with these... | 1119 | * bother with these... |
1097 | */ | 1120 | */ |
1098 | static_key_slow_dec(&supports_deactivate); | 1121 | static_key_slow_dec(&supports_deactivate); |
1099 | __gic_init_bases(gic_nr, irq_start, dist_base, cpu_base, | 1122 | __gic_init_bases(gic_nr, irq_start, dist_base, cpu_base, 0, NULL); |
1100 | percpu_offset, node); | ||
1101 | } | 1123 | } |
1102 | 1124 | ||
1103 | #ifdef CONFIG_OF | 1125 | #ifdef CONFIG_OF |
@@ -1168,7 +1190,8 @@ gic_of_init(struct device_node *node, struct device_node *parent) | |||
1168 | if (of_property_read_u32(node, "cpu-offset", &percpu_offset)) | 1190 | if (of_property_read_u32(node, "cpu-offset", &percpu_offset)) |
1169 | percpu_offset = 0; | 1191 | percpu_offset = 0; |
1170 | 1192 | ||
1171 | __gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node); | 1193 | __gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, |
1194 | &node->fwnode); | ||
1172 | if (!gic_cnt) | 1195 | if (!gic_cnt) |
1173 | gic_init_physaddr(node); | 1196 | gic_init_physaddr(node); |
1174 | 1197 | ||
@@ -1191,6 +1214,7 @@ IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init); | |||
1191 | IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init); | 1214 | IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init); |
1192 | IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init); | 1215 | IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init); |
1193 | IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init); | 1216 | IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init); |
1217 | IRQCHIP_DECLARE(pl390, "arm,pl390", gic_of_init); | ||
1194 | 1218 | ||
1195 | #endif | 1219 | #endif |
1196 | 1220 | ||
@@ -1242,6 +1266,7 @@ int __init | |||
1242 | gic_v2_acpi_init(struct acpi_table_header *table) | 1266 | gic_v2_acpi_init(struct acpi_table_header *table) |
1243 | { | 1267 | { |
1244 | void __iomem *cpu_base, *dist_base; | 1268 | void __iomem *cpu_base, *dist_base; |
1269 | struct fwnode_handle *domain_handle; | ||
1245 | int count; | 1270 | int count; |
1246 | 1271 | ||
1247 | /* Collect CPU base addresses */ | 1272 | /* Collect CPU base addresses */ |
@@ -1292,14 +1317,19 @@ gic_v2_acpi_init(struct acpi_table_header *table) | |||
1292 | static_key_slow_dec(&supports_deactivate); | 1317 | static_key_slow_dec(&supports_deactivate); |
1293 | 1318 | ||
1294 | /* | 1319 | /* |
1295 | * Initialize zero GIC instance (no multi-GIC support). Also, set GIC | 1320 | * Initialize GIC instance zero (no multi-GIC support). |
1296 | * as default IRQ domain to allow for GSI registration and GSI to IRQ | ||
1297 | * number translation (see acpi_register_gsi() and acpi_gsi_to_irq()). | ||
1298 | */ | 1321 | */ |
1299 | __gic_init_bases(0, -1, dist_base, cpu_base, 0, NULL); | 1322 | domain_handle = irq_domain_alloc_fwnode(dist_base); |
1300 | irq_set_default_host(gic_data[0].domain); | 1323 | if (!domain_handle) { |
1324 | pr_err("Unable to allocate domain handle\n"); | ||
1325 | iounmap(cpu_base); | ||
1326 | iounmap(dist_base); | ||
1327 | return -ENOMEM; | ||
1328 | } | ||
1329 | |||
1330 | __gic_init_bases(0, -1, dist_base, cpu_base, 0, domain_handle); | ||
1301 | 1331 | ||
1302 | acpi_irq_model = ACPI_IRQ_MODEL_GIC; | 1332 | acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle); |
1303 | return 0; | 1333 | return 0; |
1304 | } | 1334 | } |
1305 | #endif | 1335 | #endif |
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c index 8f3ca8f3a62b..9688d2e2a636 100644 --- a/drivers/irqchip/irq-hip04.c +++ b/drivers/irqchip/irq-hip04.c | |||
@@ -325,7 +325,7 @@ static int hip04_irq_domain_xlate(struct irq_domain *d, | |||
325 | { | 325 | { |
326 | unsigned long ret = 0; | 326 | unsigned long ret = 0; |
327 | 327 | ||
328 | if (d->of_node != controller) | 328 | if (irq_domain_get_of_node(d) != controller) |
329 | return -EINVAL; | 329 | return -EINVAL; |
330 | if (intsize < 3) | 330 | if (intsize < 3) |
331 | return -EINVAL; | 331 | return -EINVAL; |
diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c index e484fd255321..6b304eb39bd2 100644 --- a/drivers/irqchip/irq-i8259.c +++ b/drivers/irqchip/irq-i8259.c | |||
@@ -377,8 +377,8 @@ int __init i8259_of_init(struct device_node *node, struct device_node *parent) | |||
377 | } | 377 | } |
378 | 378 | ||
379 | domain = __init_i8259_irqs(node); | 379 | domain = __init_i8259_irqs(node); |
380 | irq_set_handler_data(parent_irq, domain); | 380 | irq_set_chained_handler_and_data(parent_irq, i8259_irq_dispatch, |
381 | irq_set_chained_handler(parent_irq, i8259_irq_dispatch); | 381 | domain); |
382 | return 0; | 382 | return 0; |
383 | } | 383 | } |
384 | IRQCHIP_DECLARE(i8259, "intel,i8259", i8259_of_init); | 384 | IRQCHIP_DECLARE(i8259, "intel,i8259", i8259_of_init); |
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c index e48d3305456f..15af9a9753e5 100644 --- a/drivers/irqchip/irq-imx-gpcv2.c +++ b/drivers/irqchip/irq-imx-gpcv2.c | |||
@@ -150,49 +150,42 @@ static struct irq_chip gpcv2_irqchip_data_chip = { | |||
150 | #endif | 150 | #endif |
151 | }; | 151 | }; |
152 | 152 | ||
153 | static int imx_gpcv2_domain_xlate(struct irq_domain *domain, | 153 | static int imx_gpcv2_domain_translate(struct irq_domain *d, |
154 | struct device_node *controller, | 154 | struct irq_fwspec *fwspec, |
155 | const u32 *intspec, | 155 | unsigned long *hwirq, |
156 | unsigned int intsize, | 156 | unsigned int *type) |
157 | unsigned long *out_hwirq, | ||
158 | unsigned int *out_type) | ||
159 | { | 157 | { |
160 | /* Shouldn't happen, really... */ | 158 | if (is_of_node(fwspec->fwnode)) { |
161 | if (domain->of_node != controller) | 159 | if (fwspec->param_count != 3) |
162 | return -EINVAL; | 160 | return -EINVAL; |
163 | 161 | ||
164 | /* Not GIC compliant */ | 162 | /* No PPI should point to this domain */ |
165 | if (intsize != 3) | 163 | if (fwspec->param[0] != 0) |
166 | return -EINVAL; | 164 | return -EINVAL; |
167 | 165 | ||
168 | /* No PPI should point to this domain */ | 166 | *hwirq = fwspec->param[1]; |
169 | if (intspec[0] != 0) | 167 | *type = fwspec->param[2]; |
170 | return -EINVAL; | 168 | return 0; |
169 | } | ||
171 | 170 | ||
172 | *out_hwirq = intspec[1]; | 171 | return -EINVAL; |
173 | *out_type = intspec[2]; | ||
174 | return 0; | ||
175 | } | 172 | } |
176 | 173 | ||
177 | static int imx_gpcv2_domain_alloc(struct irq_domain *domain, | 174 | static int imx_gpcv2_domain_alloc(struct irq_domain *domain, |
178 | unsigned int irq, unsigned int nr_irqs, | 175 | unsigned int irq, unsigned int nr_irqs, |
179 | void *data) | 176 | void *data) |
180 | { | 177 | { |
181 | struct of_phandle_args *args = data; | 178 | struct irq_fwspec *fwspec = data; |
182 | struct of_phandle_args parent_args; | 179 | struct irq_fwspec parent_fwspec; |
183 | irq_hw_number_t hwirq; | 180 | irq_hw_number_t hwirq; |
181 | unsigned int type; | ||
182 | int err; | ||
184 | int i; | 183 | int i; |
185 | 184 | ||
186 | /* Not GIC compliant */ | 185 | err = imx_gpcv2_domain_translate(domain, fwspec, &hwirq, &type); |
187 | if (args->args_count != 3) | 186 | if (err) |
188 | return -EINVAL; | 187 | return err; |
189 | |||
190 | /* No PPI should point to this domain */ | ||
191 | if (args->args[0] != 0) | ||
192 | return -EINVAL; | ||
193 | 188 | ||
194 | /* Can't deal with this */ | ||
195 | hwirq = args->args[1]; | ||
196 | if (hwirq >= GPC_MAX_IRQS) | 189 | if (hwirq >= GPC_MAX_IRQS) |
197 | return -EINVAL; | 190 | return -EINVAL; |
198 | 191 | ||
@@ -201,15 +194,16 @@ static int imx_gpcv2_domain_alloc(struct irq_domain *domain, | |||
201 | &gpcv2_irqchip_data_chip, domain->host_data); | 194 | &gpcv2_irqchip_data_chip, domain->host_data); |
202 | } | 195 | } |
203 | 196 | ||
204 | parent_args = *args; | 197 | parent_fwspec = *fwspec; |
205 | parent_args.np = domain->parent->of_node; | 198 | parent_fwspec.fwnode = domain->parent->fwnode; |
206 | return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs, &parent_args); | 199 | return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs, |
200 | &parent_fwspec); | ||
207 | } | 201 | } |
208 | 202 | ||
209 | static struct irq_domain_ops gpcv2_irqchip_data_domain_ops = { | 203 | static struct irq_domain_ops gpcv2_irqchip_data_domain_ops = { |
210 | .xlate = imx_gpcv2_domain_xlate, | 204 | .translate = imx_gpcv2_domain_translate, |
211 | .alloc = imx_gpcv2_domain_alloc, | 205 | .alloc = imx_gpcv2_domain_alloc, |
212 | .free = irq_domain_free_irqs_common, | 206 | .free = irq_domain_free_irqs_common, |
213 | }; | 207 | }; |
214 | 208 | ||
215 | static int __init imx_gpcv2_irqchip_init(struct device_node *node, | 209 | static int __init imx_gpcv2_irqchip_init(struct device_node *node, |
diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c index c8753da4c156..63ac73b1d9c8 100644 --- a/drivers/irqchip/irq-mtk-sysirq.c +++ b/drivers/irqchip/irq-mtk-sysirq.c | |||
@@ -67,22 +67,25 @@ static struct irq_chip mtk_sysirq_chip = { | |||
67 | .irq_set_affinity = irq_chip_set_affinity_parent, | 67 | .irq_set_affinity = irq_chip_set_affinity_parent, |
68 | }; | 68 | }; |
69 | 69 | ||
70 | static int mtk_sysirq_domain_xlate(struct irq_domain *d, | 70 | static int mtk_sysirq_domain_translate(struct irq_domain *d, |
71 | struct device_node *controller, | 71 | struct irq_fwspec *fwspec, |
72 | const u32 *intspec, unsigned int intsize, | 72 | unsigned long *hwirq, |
73 | unsigned long *out_hwirq, | 73 | unsigned int *type) |
74 | unsigned int *out_type) | ||
75 | { | 74 | { |
76 | if (intsize != 3) | 75 | if (is_of_node(fwspec->fwnode)) { |
77 | return -EINVAL; | 76 | if (fwspec->param_count != 3) |
77 | return -EINVAL; | ||
78 | 78 | ||
79 | /* sysirq doesn't support PPI */ | 79 | /* No PPI should point to this domain */ |
80 | if (intspec[0]) | 80 | if (fwspec->param[0] != 0) |
81 | return -EINVAL; | 81 | return -EINVAL; |
82 | 82 | ||
83 | *out_hwirq = intspec[1]; | 83 | *hwirq = fwspec->param[1]; |
84 | *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; | 84 | *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; |
85 | return 0; | 85 | return 0; |
86 | } | ||
87 | |||
88 | return -EINVAL; | ||
86 | } | 89 | } |
87 | 90 | ||
88 | static int mtk_sysirq_domain_alloc(struct irq_domain *domain, unsigned int virq, | 91 | static int mtk_sysirq_domain_alloc(struct irq_domain *domain, unsigned int virq, |
@@ -90,30 +93,30 @@ static int mtk_sysirq_domain_alloc(struct irq_domain *domain, unsigned int virq, | |||
90 | { | 93 | { |
91 | int i; | 94 | int i; |
92 | irq_hw_number_t hwirq; | 95 | irq_hw_number_t hwirq; |
93 | struct of_phandle_args *irq_data = arg; | 96 | struct irq_fwspec *fwspec = arg; |
94 | struct of_phandle_args gic_data = *irq_data; | 97 | struct irq_fwspec gic_fwspec = *fwspec; |
95 | 98 | ||
96 | if (irq_data->args_count != 3) | 99 | if (fwspec->param_count != 3) |
97 | return -EINVAL; | 100 | return -EINVAL; |
98 | 101 | ||
99 | /* sysirq doesn't support PPI */ | 102 | /* sysirq doesn't support PPI */ |
100 | if (irq_data->args[0]) | 103 | if (fwspec->param[0]) |
101 | return -EINVAL; | 104 | return -EINVAL; |
102 | 105 | ||
103 | hwirq = irq_data->args[1]; | 106 | hwirq = fwspec->param[1]; |
104 | for (i = 0; i < nr_irqs; i++) | 107 | for (i = 0; i < nr_irqs; i++) |
105 | irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, | 108 | irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, |
106 | &mtk_sysirq_chip, | 109 | &mtk_sysirq_chip, |
107 | domain->host_data); | 110 | domain->host_data); |
108 | 111 | ||
109 | gic_data.np = domain->parent->of_node; | 112 | gic_fwspec.fwnode = domain->parent->fwnode; |
110 | return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &gic_data); | 113 | return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &gic_fwspec); |
111 | } | 114 | } |
112 | 115 | ||
113 | static const struct irq_domain_ops sysirq_domain_ops = { | 116 | static const struct irq_domain_ops sysirq_domain_ops = { |
114 | .xlate = mtk_sysirq_domain_xlate, | 117 | .translate = mtk_sysirq_domain_translate, |
115 | .alloc = mtk_sysirq_domain_alloc, | 118 | .alloc = mtk_sysirq_domain_alloc, |
116 | .free = irq_domain_free_irqs_common, | 119 | .free = irq_domain_free_irqs_common, |
117 | }; | 120 | }; |
118 | 121 | ||
119 | static int __init mtk_sysirq_of_init(struct device_node *node, | 122 | static int __init mtk_sysirq_of_init(struct device_node *node, |
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c index 604df63e2edf..c22e2d40cb30 100644 --- a/drivers/irqchip/irq-mxs.c +++ b/drivers/irqchip/irq-mxs.c | |||
@@ -1,5 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved. | 2 | * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved. |
3 | * Copyright (C) 2014 Oleksij Rempel <linux@rempel-privat.de> | ||
4 | * Add Alphascale ASM9260 support. | ||
3 | * | 5 | * |
4 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
@@ -28,20 +30,64 @@ | |||
28 | #include <linux/stmp_device.h> | 30 | #include <linux/stmp_device.h> |
29 | #include <asm/exception.h> | 31 | #include <asm/exception.h> |
30 | 32 | ||
33 | #include "alphascale_asm9260-icoll.h" | ||
34 | |||
35 | /* | ||
36 | * this device provide 4 offsets for each register: | ||
37 | * 0x0 - plain read write mode | ||
38 | * 0x4 - set mode, OR logic. | ||
39 | * 0x8 - clr mode, XOR logic. | ||
40 | * 0xc - togle mode. | ||
41 | */ | ||
42 | #define SET_REG 4 | ||
43 | #define CLR_REG 8 | ||
44 | |||
31 | #define HW_ICOLL_VECTOR 0x0000 | 45 | #define HW_ICOLL_VECTOR 0x0000 |
32 | #define HW_ICOLL_LEVELACK 0x0010 | 46 | #define HW_ICOLL_LEVELACK 0x0010 |
33 | #define HW_ICOLL_CTRL 0x0020 | 47 | #define HW_ICOLL_CTRL 0x0020 |
34 | #define HW_ICOLL_STAT_OFFSET 0x0070 | 48 | #define HW_ICOLL_STAT_OFFSET 0x0070 |
35 | #define HW_ICOLL_INTERRUPTn_SET(n) (0x0124 + (n) * 0x10) | 49 | #define HW_ICOLL_INTERRUPT0 0x0120 |
36 | #define HW_ICOLL_INTERRUPTn_CLR(n) (0x0128 + (n) * 0x10) | 50 | #define HW_ICOLL_INTERRUPTn(n) ((n) * 0x10) |
37 | #define BM_ICOLL_INTERRUPTn_ENABLE 0x00000004 | 51 | #define BM_ICOLL_INTR_ENABLE BIT(2) |
38 | #define BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0 0x1 | 52 | #define BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0 0x1 |
39 | 53 | ||
40 | #define ICOLL_NUM_IRQS 128 | 54 | #define ICOLL_NUM_IRQS 128 |
41 | 55 | ||
42 | static void __iomem *icoll_base; | 56 | enum icoll_type { |
57 | ICOLL, | ||
58 | ASM9260_ICOLL, | ||
59 | }; | ||
60 | |||
61 | struct icoll_priv { | ||
62 | void __iomem *vector; | ||
63 | void __iomem *levelack; | ||
64 | void __iomem *ctrl; | ||
65 | void __iomem *stat; | ||
66 | void __iomem *intr; | ||
67 | void __iomem *clear; | ||
68 | enum icoll_type type; | ||
69 | }; | ||
70 | |||
71 | static struct icoll_priv icoll_priv; | ||
43 | static struct irq_domain *icoll_domain; | 72 | static struct irq_domain *icoll_domain; |
44 | 73 | ||
74 | /* calculate bit offset depending on number of intterupt per register */ | ||
75 | static u32 icoll_intr_bitshift(struct irq_data *d, u32 bit) | ||
76 | { | ||
77 | /* | ||
78 | * mask lower part of hwirq to convert it | ||
79 | * in 0, 1, 2 or 3 and then multiply it by 8 (or shift by 3) | ||
80 | */ | ||
81 | return bit << ((d->hwirq & 3) << 3); | ||
82 | } | ||
83 | |||
84 | /* calculate mem offset depending on number of intterupt per register */ | ||
85 | static void __iomem *icoll_intr_reg(struct irq_data *d) | ||
86 | { | ||
87 | /* offset = hwirq / intr_per_reg * 0x10 */ | ||
88 | return icoll_priv.intr + ((d->hwirq >> 2) * 0x10); | ||
89 | } | ||
90 | |||
45 | static void icoll_ack_irq(struct irq_data *d) | 91 | static void icoll_ack_irq(struct irq_data *d) |
46 | { | 92 | { |
47 | /* | 93 | /* |
@@ -50,19 +96,35 @@ static void icoll_ack_irq(struct irq_data *d) | |||
50 | * BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0 unconditionally. | 96 | * BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0 unconditionally. |
51 | */ | 97 | */ |
52 | __raw_writel(BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0, | 98 | __raw_writel(BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0, |
53 | icoll_base + HW_ICOLL_LEVELACK); | 99 | icoll_priv.levelack); |
54 | } | 100 | } |
55 | 101 | ||
56 | static void icoll_mask_irq(struct irq_data *d) | 102 | static void icoll_mask_irq(struct irq_data *d) |
57 | { | 103 | { |
58 | __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE, | 104 | __raw_writel(BM_ICOLL_INTR_ENABLE, |
59 | icoll_base + HW_ICOLL_INTERRUPTn_CLR(d->hwirq)); | 105 | icoll_priv.intr + CLR_REG + HW_ICOLL_INTERRUPTn(d->hwirq)); |
60 | } | 106 | } |
61 | 107 | ||
62 | static void icoll_unmask_irq(struct irq_data *d) | 108 | static void icoll_unmask_irq(struct irq_data *d) |
63 | { | 109 | { |
64 | __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE, | 110 | __raw_writel(BM_ICOLL_INTR_ENABLE, |
65 | icoll_base + HW_ICOLL_INTERRUPTn_SET(d->hwirq)); | 111 | icoll_priv.intr + SET_REG + HW_ICOLL_INTERRUPTn(d->hwirq)); |
112 | } | ||
113 | |||
114 | static void asm9260_mask_irq(struct irq_data *d) | ||
115 | { | ||
116 | __raw_writel(icoll_intr_bitshift(d, BM_ICOLL_INTR_ENABLE), | ||
117 | icoll_intr_reg(d) + CLR_REG); | ||
118 | } | ||
119 | |||
120 | static void asm9260_unmask_irq(struct irq_data *d) | ||
121 | { | ||
122 | __raw_writel(ASM9260_BM_CLEAR_BIT(d->hwirq), | ||
123 | icoll_priv.clear + | ||
124 | ASM9260_HW_ICOLL_CLEARn(d->hwirq)); | ||
125 | |||
126 | __raw_writel(icoll_intr_bitshift(d, BM_ICOLL_INTR_ENABLE), | ||
127 | icoll_intr_reg(d) + SET_REG); | ||
66 | } | 128 | } |
67 | 129 | ||
68 | static struct irq_chip mxs_icoll_chip = { | 130 | static struct irq_chip mxs_icoll_chip = { |
@@ -71,19 +133,32 @@ static struct irq_chip mxs_icoll_chip = { | |||
71 | .irq_unmask = icoll_unmask_irq, | 133 | .irq_unmask = icoll_unmask_irq, |
72 | }; | 134 | }; |
73 | 135 | ||
136 | static struct irq_chip asm9260_icoll_chip = { | ||
137 | .irq_ack = icoll_ack_irq, | ||
138 | .irq_mask = asm9260_mask_irq, | ||
139 | .irq_unmask = asm9260_unmask_irq, | ||
140 | }; | ||
141 | |||
74 | asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs) | 142 | asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs) |
75 | { | 143 | { |
76 | u32 irqnr; | 144 | u32 irqnr; |
77 | 145 | ||
78 | irqnr = __raw_readl(icoll_base + HW_ICOLL_STAT_OFFSET); | 146 | irqnr = __raw_readl(icoll_priv.stat); |
79 | __raw_writel(irqnr, icoll_base + HW_ICOLL_VECTOR); | 147 | __raw_writel(irqnr, icoll_priv.vector); |
80 | handle_domain_irq(icoll_domain, irqnr, regs); | 148 | handle_domain_irq(icoll_domain, irqnr, regs); |
81 | } | 149 | } |
82 | 150 | ||
83 | static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq, | 151 | static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq, |
84 | irq_hw_number_t hw) | 152 | irq_hw_number_t hw) |
85 | { | 153 | { |
86 | irq_set_chip_and_handler(virq, &mxs_icoll_chip, handle_level_irq); | 154 | struct irq_chip *chip; |
155 | |||
156 | if (icoll_priv.type == ICOLL) | ||
157 | chip = &mxs_icoll_chip; | ||
158 | else | ||
159 | chip = &asm9260_icoll_chip; | ||
160 | |||
161 | irq_set_chip_and_handler(virq, chip, handle_level_irq); | ||
87 | 162 | ||
88 | return 0; | 163 | return 0; |
89 | } | 164 | } |
@@ -93,20 +168,80 @@ static const struct irq_domain_ops icoll_irq_domain_ops = { | |||
93 | .xlate = irq_domain_xlate_onecell, | 168 | .xlate = irq_domain_xlate_onecell, |
94 | }; | 169 | }; |
95 | 170 | ||
171 | static void __init icoll_add_domain(struct device_node *np, | ||
172 | int num) | ||
173 | { | ||
174 | icoll_domain = irq_domain_add_linear(np, num, | ||
175 | &icoll_irq_domain_ops, NULL); | ||
176 | |||
177 | if (!icoll_domain) | ||
178 | panic("%s: unable to create irq domain", np->full_name); | ||
179 | } | ||
180 | |||
181 | static void __iomem * __init icoll_init_iobase(struct device_node *np) | ||
182 | { | ||
183 | void __iomem *icoll_base; | ||
184 | |||
185 | icoll_base = of_io_request_and_map(np, 0, np->name); | ||
186 | if (!icoll_base) | ||
187 | panic("%s: unable to map resource", np->full_name); | ||
188 | return icoll_base; | ||
189 | } | ||
190 | |||
96 | static int __init icoll_of_init(struct device_node *np, | 191 | static int __init icoll_of_init(struct device_node *np, |
97 | struct device_node *interrupt_parent) | 192 | struct device_node *interrupt_parent) |
98 | { | 193 | { |
99 | icoll_base = of_iomap(np, 0); | 194 | void __iomem *icoll_base; |
100 | WARN_ON(!icoll_base); | 195 | |
196 | icoll_priv.type = ICOLL; | ||
197 | |||
198 | icoll_base = icoll_init_iobase(np); | ||
199 | icoll_priv.vector = icoll_base + HW_ICOLL_VECTOR; | ||
200 | icoll_priv.levelack = icoll_base + HW_ICOLL_LEVELACK; | ||
201 | icoll_priv.ctrl = icoll_base + HW_ICOLL_CTRL; | ||
202 | icoll_priv.stat = icoll_base + HW_ICOLL_STAT_OFFSET; | ||
203 | icoll_priv.intr = icoll_base + HW_ICOLL_INTERRUPT0; | ||
204 | icoll_priv.clear = NULL; | ||
101 | 205 | ||
102 | /* | 206 | /* |
103 | * Interrupt Collector reset, which initializes the priority | 207 | * Interrupt Collector reset, which initializes the priority |
104 | * for each irq to level 0. | 208 | * for each irq to level 0. |
105 | */ | 209 | */ |
106 | stmp_reset_block(icoll_base + HW_ICOLL_CTRL); | 210 | stmp_reset_block(icoll_priv.ctrl); |
107 | 211 | ||
108 | icoll_domain = irq_domain_add_linear(np, ICOLL_NUM_IRQS, | 212 | icoll_add_domain(np, ICOLL_NUM_IRQS); |
109 | &icoll_irq_domain_ops, NULL); | 213 | |
110 | return icoll_domain ? 0 : -ENODEV; | 214 | return 0; |
111 | } | 215 | } |
112 | IRQCHIP_DECLARE(mxs, "fsl,icoll", icoll_of_init); | 216 | IRQCHIP_DECLARE(mxs, "fsl,icoll", icoll_of_init); |
217 | |||
218 | static int __init asm9260_of_init(struct device_node *np, | ||
219 | struct device_node *interrupt_parent) | ||
220 | { | ||
221 | void __iomem *icoll_base; | ||
222 | int i; | ||
223 | |||
224 | icoll_priv.type = ASM9260_ICOLL; | ||
225 | |||
226 | icoll_base = icoll_init_iobase(np); | ||
227 | icoll_priv.vector = icoll_base + ASM9260_HW_ICOLL_VECTOR; | ||
228 | icoll_priv.levelack = icoll_base + ASM9260_HW_ICOLL_LEVELACK; | ||
229 | icoll_priv.ctrl = icoll_base + ASM9260_HW_ICOLL_CTRL; | ||
230 | icoll_priv.stat = icoll_base + ASM9260_HW_ICOLL_STAT_OFFSET; | ||
231 | icoll_priv.intr = icoll_base + ASM9260_HW_ICOLL_INTERRUPT0; | ||
232 | icoll_priv.clear = icoll_base + ASM9260_HW_ICOLL_CLEAR0; | ||
233 | |||
234 | writel_relaxed(ASM9260_BM_CTRL_IRQ_ENABLE, | ||
235 | icoll_priv.ctrl); | ||
236 | /* | ||
237 | * ASM9260 don't provide reset bit. So, we need to set level 0 | ||
238 | * manually. | ||
239 | */ | ||
240 | for (i = 0; i < 16 * 0x10; i += 0x10) | ||
241 | writel(0, icoll_priv.intr + i); | ||
242 | |||
243 | icoll_add_domain(np, ASM9260_NUM_IRQS); | ||
244 | |||
245 | return 0; | ||
246 | } | ||
247 | IRQCHIP_DECLARE(asm9260, "alphascale,asm9260-icoll", asm9260_of_init); | ||
diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c index a878b8d03868..b1777104fd9f 100644 --- a/drivers/irqchip/irq-nvic.c +++ b/drivers/irqchip/irq-nvic.c | |||
@@ -48,16 +48,26 @@ nvic_handle_irq(irq_hw_number_t hwirq, struct pt_regs *regs) | |||
48 | handle_IRQ(irq, regs); | 48 | handle_IRQ(irq, regs); |
49 | } | 49 | } |
50 | 50 | ||
51 | static int nvic_irq_domain_translate(struct irq_domain *d, | ||
52 | struct irq_fwspec *fwspec, | ||
53 | unsigned long *hwirq, unsigned int *type) | ||
54 | { | ||
55 | if (WARN_ON(fwspec->param_count < 1)) | ||
56 | return -EINVAL; | ||
57 | *hwirq = fwspec->param[0]; | ||
58 | *type = IRQ_TYPE_NONE; | ||
59 | return 0; | ||
60 | } | ||
61 | |||
51 | static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | 62 | static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, |
52 | unsigned int nr_irqs, void *arg) | 63 | unsigned int nr_irqs, void *arg) |
53 | { | 64 | { |
54 | int i, ret; | 65 | int i, ret; |
55 | irq_hw_number_t hwirq; | 66 | irq_hw_number_t hwirq; |
56 | unsigned int type = IRQ_TYPE_NONE; | 67 | unsigned int type = IRQ_TYPE_NONE; |
57 | struct of_phandle_args *irq_data = arg; | 68 | struct irq_fwspec *fwspec = arg; |
58 | 69 | ||
59 | ret = irq_domain_xlate_onecell(domain, irq_data->np, irq_data->args, | 70 | ret = nvic_irq_domain_translate(domain, fwspec, &hwirq, &type); |
60 | irq_data->args_count, &hwirq, &type); | ||
61 | if (ret) | 71 | if (ret) |
62 | return ret; | 72 | return ret; |
63 | 73 | ||
@@ -68,7 +78,7 @@ static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | |||
68 | } | 78 | } |
69 | 79 | ||
70 | static const struct irq_domain_ops nvic_irq_domain_ops = { | 80 | static const struct irq_domain_ops nvic_irq_domain_ops = { |
71 | .xlate = irq_domain_xlate_onecell, | 81 | .translate = nvic_irq_domain_translate, |
72 | .alloc = nvic_irq_domain_alloc, | 82 | .alloc = nvic_irq_domain_alloc, |
73 | .free = irq_domain_free_irqs_top, | 83 | .free = irq_domain_free_irqs_top, |
74 | }; | 84 | }; |
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c index 9525335723f6..c325806561be 100644 --- a/drivers/irqchip/irq-renesas-intc-irqpin.c +++ b/drivers/irqchip/irq-renesas-intc-irqpin.c | |||
@@ -361,14 +361,16 @@ static const struct irq_domain_ops intc_irqpin_irq_domain_ops = { | |||
361 | .xlate = irq_domain_xlate_twocell, | 361 | .xlate = irq_domain_xlate_twocell, |
362 | }; | 362 | }; |
363 | 363 | ||
364 | static const struct intc_irqpin_irlm_config intc_irqpin_irlm_r8a7779 = { | 364 | static const struct intc_irqpin_irlm_config intc_irqpin_irlm_r8a777x = { |
365 | .irlm_bit = 23, /* ICR0.IRLM0 */ | 365 | .irlm_bit = 23, /* ICR0.IRLM0 */ |
366 | }; | 366 | }; |
367 | 367 | ||
368 | static const struct of_device_id intc_irqpin_dt_ids[] = { | 368 | static const struct of_device_id intc_irqpin_dt_ids[] = { |
369 | { .compatible = "renesas,intc-irqpin", }, | 369 | { .compatible = "renesas,intc-irqpin", }, |
370 | { .compatible = "renesas,intc-irqpin-r8a7778", | ||
371 | .data = &intc_irqpin_irlm_r8a777x }, | ||
370 | { .compatible = "renesas,intc-irqpin-r8a7779", | 372 | { .compatible = "renesas,intc-irqpin-r8a7779", |
371 | .data = &intc_irqpin_irlm_r8a7779 }, | 373 | .data = &intc_irqpin_irlm_r8a777x }, |
372 | {}, | 374 | {}, |
373 | }; | 375 | }; |
374 | MODULE_DEVICE_TABLE(of, intc_irqpin_dt_ids); | 376 | MODULE_DEVICE_TABLE(of, intc_irqpin_dt_ids); |
diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c index 35bf97ba4a3d..52304b139aa4 100644 --- a/drivers/irqchip/irq-renesas-irqc.c +++ b/drivers/irqchip/irq-renesas-irqc.c | |||
@@ -62,33 +62,20 @@ struct irqc_priv { | |||
62 | struct irqc_irq irq[IRQC_IRQ_MAX]; | 62 | struct irqc_irq irq[IRQC_IRQ_MAX]; |
63 | unsigned int number_of_irqs; | 63 | unsigned int number_of_irqs; |
64 | struct platform_device *pdev; | 64 | struct platform_device *pdev; |
65 | struct irq_chip irq_chip; | 65 | struct irq_chip_generic *gc; |
66 | struct irq_domain *irq_domain; | 66 | struct irq_domain *irq_domain; |
67 | struct clk *clk; | 67 | struct clk *clk; |
68 | }; | 68 | }; |
69 | 69 | ||
70 | static void irqc_dbg(struct irqc_irq *i, char *str) | 70 | static struct irqc_priv *irq_data_to_priv(struct irq_data *data) |
71 | { | ||
72 | dev_dbg(&i->p->pdev->dev, "%s (%d:%d)\n", | ||
73 | str, i->requested_irq, i->hw_irq); | ||
74 | } | ||
75 | |||
76 | static void irqc_irq_enable(struct irq_data *d) | ||
77 | { | 71 | { |
78 | struct irqc_priv *p = irq_data_get_irq_chip_data(d); | 72 | return data->domain->host_data; |
79 | int hw_irq = irqd_to_hwirq(d); | ||
80 | |||
81 | irqc_dbg(&p->irq[hw_irq], "enable"); | ||
82 | iowrite32(BIT(hw_irq), p->cpu_int_base + IRQC_EN_SET); | ||
83 | } | 73 | } |
84 | 74 | ||
85 | static void irqc_irq_disable(struct irq_data *d) | 75 | static void irqc_dbg(struct irqc_irq *i, char *str) |
86 | { | 76 | { |
87 | struct irqc_priv *p = irq_data_get_irq_chip_data(d); | 77 | dev_dbg(&i->p->pdev->dev, "%s (%d:%d)\n", |
88 | int hw_irq = irqd_to_hwirq(d); | 78 | str, i->requested_irq, i->hw_irq); |
89 | |||
90 | irqc_dbg(&p->irq[hw_irq], "disable"); | ||
91 | iowrite32(BIT(hw_irq), p->cpu_int_base + IRQC_EN_STS); | ||
92 | } | 79 | } |
93 | 80 | ||
94 | static unsigned char irqc_sense[IRQ_TYPE_SENSE_MASK + 1] = { | 81 | static unsigned char irqc_sense[IRQ_TYPE_SENSE_MASK + 1] = { |
@@ -101,7 +88,7 @@ static unsigned char irqc_sense[IRQ_TYPE_SENSE_MASK + 1] = { | |||
101 | 88 | ||
102 | static int irqc_irq_set_type(struct irq_data *d, unsigned int type) | 89 | static int irqc_irq_set_type(struct irq_data *d, unsigned int type) |
103 | { | 90 | { |
104 | struct irqc_priv *p = irq_data_get_irq_chip_data(d); | 91 | struct irqc_priv *p = irq_data_to_priv(d); |
105 | int hw_irq = irqd_to_hwirq(d); | 92 | int hw_irq = irqd_to_hwirq(d); |
106 | unsigned char value = irqc_sense[type & IRQ_TYPE_SENSE_MASK]; | 93 | unsigned char value = irqc_sense[type & IRQ_TYPE_SENSE_MASK]; |
107 | u32 tmp; | 94 | u32 tmp; |
@@ -120,7 +107,7 @@ static int irqc_irq_set_type(struct irq_data *d, unsigned int type) | |||
120 | 107 | ||
121 | static int irqc_irq_set_wake(struct irq_data *d, unsigned int on) | 108 | static int irqc_irq_set_wake(struct irq_data *d, unsigned int on) |
122 | { | 109 | { |
123 | struct irqc_priv *p = irq_data_get_irq_chip_data(d); | 110 | struct irqc_priv *p = irq_data_to_priv(d); |
124 | int hw_irq = irqd_to_hwirq(d); | 111 | int hw_irq = irqd_to_hwirq(d); |
125 | 112 | ||
126 | irq_set_irq_wake(p->irq[hw_irq].requested_irq, on); | 113 | irq_set_irq_wake(p->irq[hw_irq].requested_irq, on); |
@@ -153,35 +140,11 @@ static irqreturn_t irqc_irq_handler(int irq, void *dev_id) | |||
153 | return IRQ_NONE; | 140 | return IRQ_NONE; |
154 | } | 141 | } |
155 | 142 | ||
156 | /* | ||
157 | * This lock class tells lockdep that IRQC irqs are in a different | ||
158 | * category than their parents, so it won't report false recursion. | ||
159 | */ | ||
160 | static struct lock_class_key irqc_irq_lock_class; | ||
161 | |||
162 | static int irqc_irq_domain_map(struct irq_domain *h, unsigned int virq, | ||
163 | irq_hw_number_t hw) | ||
164 | { | ||
165 | struct irqc_priv *p = h->host_data; | ||
166 | |||
167 | irqc_dbg(&p->irq[hw], "map"); | ||
168 | irq_set_chip_data(virq, h->host_data); | ||
169 | irq_set_lockdep_class(virq, &irqc_irq_lock_class); | ||
170 | irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq); | ||
171 | return 0; | ||
172 | } | ||
173 | |||
174 | static const struct irq_domain_ops irqc_irq_domain_ops = { | ||
175 | .map = irqc_irq_domain_map, | ||
176 | .xlate = irq_domain_xlate_twocell, | ||
177 | }; | ||
178 | |||
179 | static int irqc_probe(struct platform_device *pdev) | 143 | static int irqc_probe(struct platform_device *pdev) |
180 | { | 144 | { |
181 | struct irqc_priv *p; | 145 | struct irqc_priv *p; |
182 | struct resource *io; | 146 | struct resource *io; |
183 | struct resource *irq; | 147 | struct resource *irq; |
184 | struct irq_chip *irq_chip; | ||
185 | const char *name = dev_name(&pdev->dev); | 148 | const char *name = dev_name(&pdev->dev); |
186 | int ret; | 149 | int ret; |
187 | int k; | 150 | int k; |
@@ -241,40 +204,51 @@ static int irqc_probe(struct platform_device *pdev) | |||
241 | 204 | ||
242 | p->cpu_int_base = p->iomem + IRQC_INT_CPU_BASE(0); /* SYS-SPI */ | 205 | p->cpu_int_base = p->iomem + IRQC_INT_CPU_BASE(0); /* SYS-SPI */ |
243 | 206 | ||
244 | irq_chip = &p->irq_chip; | ||
245 | irq_chip->name = name; | ||
246 | irq_chip->irq_mask = irqc_irq_disable; | ||
247 | irq_chip->irq_unmask = irqc_irq_enable; | ||
248 | irq_chip->irq_set_type = irqc_irq_set_type; | ||
249 | irq_chip->irq_set_wake = irqc_irq_set_wake; | ||
250 | irq_chip->flags = IRQCHIP_MASK_ON_SUSPEND; | ||
251 | |||
252 | p->irq_domain = irq_domain_add_linear(pdev->dev.of_node, | 207 | p->irq_domain = irq_domain_add_linear(pdev->dev.of_node, |
253 | p->number_of_irqs, | 208 | p->number_of_irqs, |
254 | &irqc_irq_domain_ops, p); | 209 | &irq_generic_chip_ops, p); |
255 | if (!p->irq_domain) { | 210 | if (!p->irq_domain) { |
256 | ret = -ENXIO; | 211 | ret = -ENXIO; |
257 | dev_err(&pdev->dev, "cannot initialize irq domain\n"); | 212 | dev_err(&pdev->dev, "cannot initialize irq domain\n"); |
258 | goto err2; | 213 | goto err2; |
259 | } | 214 | } |
260 | 215 | ||
216 | ret = irq_alloc_domain_generic_chips(p->irq_domain, p->number_of_irqs, | ||
217 | 1, name, handle_level_irq, | ||
218 | 0, 0, IRQ_GC_INIT_NESTED_LOCK); | ||
219 | if (ret) { | ||
220 | dev_err(&pdev->dev, "cannot allocate generic chip\n"); | ||
221 | goto err3; | ||
222 | } | ||
223 | |||
224 | p->gc = irq_get_domain_generic_chip(p->irq_domain, 0); | ||
225 | p->gc->reg_base = p->cpu_int_base; | ||
226 | p->gc->chip_types[0].regs.enable = IRQC_EN_SET; | ||
227 | p->gc->chip_types[0].regs.disable = IRQC_EN_STS; | ||
228 | p->gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg; | ||
229 | p->gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg; | ||
230 | p->gc->chip_types[0].chip.irq_set_type = irqc_irq_set_type; | ||
231 | p->gc->chip_types[0].chip.irq_set_wake = irqc_irq_set_wake; | ||
232 | p->gc->chip_types[0].chip.flags = IRQCHIP_MASK_ON_SUSPEND; | ||
233 | |||
261 | /* request interrupts one by one */ | 234 | /* request interrupts one by one */ |
262 | for (k = 0; k < p->number_of_irqs; k++) { | 235 | for (k = 0; k < p->number_of_irqs; k++) { |
263 | if (request_irq(p->irq[k].requested_irq, irqc_irq_handler, | 236 | if (request_irq(p->irq[k].requested_irq, irqc_irq_handler, |
264 | 0, name, &p->irq[k])) { | 237 | 0, name, &p->irq[k])) { |
265 | dev_err(&pdev->dev, "failed to request IRQ\n"); | 238 | dev_err(&pdev->dev, "failed to request IRQ\n"); |
266 | ret = -ENOENT; | 239 | ret = -ENOENT; |
267 | goto err3; | 240 | goto err4; |
268 | } | 241 | } |
269 | } | 242 | } |
270 | 243 | ||
271 | dev_info(&pdev->dev, "driving %d irqs\n", p->number_of_irqs); | 244 | dev_info(&pdev->dev, "driving %d irqs\n", p->number_of_irqs); |
272 | 245 | ||
273 | return 0; | 246 | return 0; |
274 | err3: | 247 | err4: |
275 | while (--k >= 0) | 248 | while (--k >= 0) |
276 | free_irq(p->irq[k].requested_irq, &p->irq[k]); | 249 | free_irq(p->irq[k].requested_irq, &p->irq[k]); |
277 | 250 | ||
251 | err3: | ||
278 | irq_domain_remove(p->irq_domain); | 252 | irq_domain_remove(p->irq_domain); |
279 | err2: | 253 | err2: |
280 | iounmap(p->iomem); | 254 | iounmap(p->iomem); |
diff --git a/drivers/irqchip/irq-s3c24xx.c b/drivers/irqchip/irq-s3c24xx.c index 7154b011ddd2..c71914e8f596 100644 --- a/drivers/irqchip/irq-s3c24xx.c +++ b/drivers/irqchip/irq-s3c24xx.c | |||
@@ -311,7 +311,7 @@ static void s3c_irq_demux(struct irq_desc *desc) | |||
311 | * and one big domain for the dt case where the subintc | 311 | * and one big domain for the dt case where the subintc |
312 | * starts at hwirq number 32. | 312 | * starts at hwirq number 32. |
313 | */ | 313 | */ |
314 | offset = (intc->domain->of_node) ? 32 : 0; | 314 | offset = irq_domain_get_of_node(intc->domain) ? 32 : 0; |
315 | 315 | ||
316 | chained_irq_enter(chip, desc); | 316 | chained_irq_enter(chip, desc); |
317 | 317 | ||
@@ -342,7 +342,7 @@ static inline int s3c24xx_handle_intc(struct s3c_irq_intc *intc, | |||
342 | return false; | 342 | return false; |
343 | 343 | ||
344 | /* non-dt machines use individual domains */ | 344 | /* non-dt machines use individual domains */ |
345 | if (!intc->domain->of_node) | 345 | if (!irq_domain_get_of_node(intc->domain)) |
346 | intc_offset = 0; | 346 | intc_offset = 0; |
347 | 347 | ||
348 | /* We have a problem that the INTOFFSET register does not always | 348 | /* We have a problem that the INTOFFSET register does not always |
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c index c143dd58410c..4ef178078e5b 100644 --- a/drivers/irqchip/irq-sunxi-nmi.c +++ b/drivers/irqchip/irq-sunxi-nmi.c | |||
@@ -8,6 +8,9 @@ | |||
8 | * warranty of any kind, whether express or implied. | 8 | * warranty of any kind, whether express or implied. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define DRV_NAME "sunxi-nmi" | ||
12 | #define pr_fmt(fmt) DRV_NAME ": " fmt | ||
13 | |||
11 | #include <linux/bitops.h> | 14 | #include <linux/bitops.h> |
12 | #include <linux/device.h> | 15 | #include <linux/device.h> |
13 | #include <linux/io.h> | 16 | #include <linux/io.h> |
@@ -96,8 +99,8 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type) | |||
96 | break; | 99 | break; |
97 | default: | 100 | default: |
98 | irq_gc_unlock(gc); | 101 | irq_gc_unlock(gc); |
99 | pr_err("%s: Cannot assign multiple trigger modes to IRQ %d.\n", | 102 | pr_err("Cannot assign multiple trigger modes to IRQ %d.\n", |
100 | __func__, data->irq); | 103 | data->irq); |
101 | return -EBADR; | 104 | return -EBADR; |
102 | } | 105 | } |
103 | 106 | ||
@@ -130,30 +133,29 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node, | |||
130 | 133 | ||
131 | domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL); | 134 | domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL); |
132 | if (!domain) { | 135 | if (!domain) { |
133 | pr_err("%s: Could not register interrupt domain.\n", node->name); | 136 | pr_err("Could not register interrupt domain.\n"); |
134 | return -ENOMEM; | 137 | return -ENOMEM; |
135 | } | 138 | } |
136 | 139 | ||
137 | ret = irq_alloc_domain_generic_chips(domain, 1, 2, node->name, | 140 | ret = irq_alloc_domain_generic_chips(domain, 1, 2, DRV_NAME, |
138 | handle_fasteoi_irq, clr, 0, | 141 | handle_fasteoi_irq, clr, 0, |
139 | IRQ_GC_INIT_MASK_CACHE); | 142 | IRQ_GC_INIT_MASK_CACHE); |
140 | if (ret) { | 143 | if (ret) { |
141 | pr_err("%s: Could not allocate generic interrupt chip.\n", | 144 | pr_err("Could not allocate generic interrupt chip.\n"); |
142 | node->name); | 145 | goto fail_irqd_remove; |
143 | goto fail_irqd_remove; | ||
144 | } | 146 | } |
145 | 147 | ||
146 | irq = irq_of_parse_and_map(node, 0); | 148 | irq = irq_of_parse_and_map(node, 0); |
147 | if (irq <= 0) { | 149 | if (irq <= 0) { |
148 | pr_err("%s: unable to parse irq\n", node->name); | 150 | pr_err("unable to parse irq\n"); |
149 | ret = -EINVAL; | 151 | ret = -EINVAL; |
150 | goto fail_irqd_remove; | 152 | goto fail_irqd_remove; |
151 | } | 153 | } |
152 | 154 | ||
153 | gc = irq_get_domain_generic_chip(domain, 0); | 155 | gc = irq_get_domain_generic_chip(domain, 0); |
154 | gc->reg_base = of_iomap(node, 0); | 156 | gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node)); |
155 | if (!gc->reg_base) { | 157 | if (!gc->reg_base) { |
156 | pr_err("%s: unable to map resource\n", node->name); | 158 | pr_err("unable to map resource\n"); |
157 | ret = -ENOMEM; | 159 | ret = -ENOMEM; |
158 | goto fail_irqd_remove; | 160 | goto fail_irqd_remove; |
159 | } | 161 | } |
diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c index fd88e687791a..121ec301372e 100644 --- a/drivers/irqchip/irq-tegra.c +++ b/drivers/irqchip/irq-tegra.c | |||
@@ -221,41 +221,43 @@ static struct irq_chip tegra_ictlr_chip = { | |||
221 | #endif | 221 | #endif |
222 | }; | 222 | }; |
223 | 223 | ||
224 | static int tegra_ictlr_domain_xlate(struct irq_domain *domain, | 224 | static int tegra_ictlr_domain_translate(struct irq_domain *d, |
225 | struct device_node *controller, | 225 | struct irq_fwspec *fwspec, |
226 | const u32 *intspec, | 226 | unsigned long *hwirq, |
227 | unsigned int intsize, | 227 | unsigned int *type) |
228 | unsigned long *out_hwirq, | ||
229 | unsigned int *out_type) | ||
230 | { | 228 | { |
231 | if (domain->of_node != controller) | 229 | if (is_of_node(fwspec->fwnode)) { |
232 | return -EINVAL; /* Shouldn't happen, really... */ | 230 | if (fwspec->param_count != 3) |
233 | if (intsize != 3) | 231 | return -EINVAL; |
234 | return -EINVAL; /* Not GIC compliant */ | ||
235 | if (intspec[0] != GIC_SPI) | ||
236 | return -EINVAL; /* No PPI should point to this domain */ | ||
237 | 232 | ||
238 | *out_hwirq = intspec[1]; | 233 | /* No PPI should point to this domain */ |
239 | *out_type = intspec[2]; | 234 | if (fwspec->param[0] != 0) |
240 | return 0; | 235 | return -EINVAL; |
236 | |||
237 | *hwirq = fwspec->param[1]; | ||
238 | *type = fwspec->param[2]; | ||
239 | return 0; | ||
240 | } | ||
241 | |||
242 | return -EINVAL; | ||
241 | } | 243 | } |
242 | 244 | ||
243 | static int tegra_ictlr_domain_alloc(struct irq_domain *domain, | 245 | static int tegra_ictlr_domain_alloc(struct irq_domain *domain, |
244 | unsigned int virq, | 246 | unsigned int virq, |
245 | unsigned int nr_irqs, void *data) | 247 | unsigned int nr_irqs, void *data) |
246 | { | 248 | { |
247 | struct of_phandle_args *args = data; | 249 | struct irq_fwspec *fwspec = data; |
248 | struct of_phandle_args parent_args; | 250 | struct irq_fwspec parent_fwspec; |
249 | struct tegra_ictlr_info *info = domain->host_data; | 251 | struct tegra_ictlr_info *info = domain->host_data; |
250 | irq_hw_number_t hwirq; | 252 | irq_hw_number_t hwirq; |
251 | unsigned int i; | 253 | unsigned int i; |
252 | 254 | ||
253 | if (args->args_count != 3) | 255 | if (fwspec->param_count != 3) |
254 | return -EINVAL; /* Not GIC compliant */ | 256 | return -EINVAL; /* Not GIC compliant */ |
255 | if (args->args[0] != GIC_SPI) | 257 | if (fwspec->param[0] != GIC_SPI) |
256 | return -EINVAL; /* No PPI should point to this domain */ | 258 | return -EINVAL; /* No PPI should point to this domain */ |
257 | 259 | ||
258 | hwirq = args->args[1]; | 260 | hwirq = fwspec->param[1]; |
259 | if (hwirq >= (num_ictlrs * 32)) | 261 | if (hwirq >= (num_ictlrs * 32)) |
260 | return -EINVAL; | 262 | return -EINVAL; |
261 | 263 | ||
@@ -267,9 +269,10 @@ static int tegra_ictlr_domain_alloc(struct irq_domain *domain, | |||
267 | info->base[ictlr]); | 269 | info->base[ictlr]); |
268 | } | 270 | } |
269 | 271 | ||
270 | parent_args = *args; | 272 | parent_fwspec = *fwspec; |
271 | parent_args.np = domain->parent->of_node; | 273 | parent_fwspec.fwnode = domain->parent->fwnode; |
272 | return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_args); | 274 | return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, |
275 | &parent_fwspec); | ||
273 | } | 276 | } |
274 | 277 | ||
275 | static void tegra_ictlr_domain_free(struct irq_domain *domain, | 278 | static void tegra_ictlr_domain_free(struct irq_domain *domain, |
@@ -285,9 +288,9 @@ static void tegra_ictlr_domain_free(struct irq_domain *domain, | |||
285 | } | 288 | } |
286 | 289 | ||
287 | static const struct irq_domain_ops tegra_ictlr_domain_ops = { | 290 | static const struct irq_domain_ops tegra_ictlr_domain_ops = { |
288 | .xlate = tegra_ictlr_domain_xlate, | 291 | .translate = tegra_ictlr_domain_translate, |
289 | .alloc = tegra_ictlr_domain_alloc, | 292 | .alloc = tegra_ictlr_domain_alloc, |
290 | .free = tegra_ictlr_domain_free, | 293 | .free = tegra_ictlr_domain_free, |
291 | }; | 294 | }; |
292 | 295 | ||
293 | static int __init tegra_ictlr_init(struct device_node *node, | 296 | static int __init tegra_ictlr_init(struct device_node *node, |
diff --git a/drivers/irqchip/irq-vf610-mscm-ir.c b/drivers/irqchip/irq-vf610-mscm-ir.c index 2c2255886401..56b5e3cb9de2 100644 --- a/drivers/irqchip/irq-vf610-mscm-ir.c +++ b/drivers/irqchip/irq-vf610-mscm-ir.c | |||
@@ -130,35 +130,51 @@ static int vf610_mscm_ir_domain_alloc(struct irq_domain *domain, unsigned int vi | |||
130 | { | 130 | { |
131 | int i; | 131 | int i; |
132 | irq_hw_number_t hwirq; | 132 | irq_hw_number_t hwirq; |
133 | struct of_phandle_args *irq_data = arg; | 133 | struct irq_fwspec *fwspec = arg; |
134 | struct of_phandle_args gic_data; | 134 | struct irq_fwspec parent_fwspec; |
135 | 135 | ||
136 | if (irq_data->args_count != 2) | 136 | if (!irq_domain_get_of_node(domain->parent)) |
137 | return -EINVAL; | 137 | return -EINVAL; |
138 | 138 | ||
139 | hwirq = irq_data->args[0]; | 139 | if (fwspec->param_count != 2) |
140 | return -EINVAL; | ||
141 | |||
142 | hwirq = fwspec->param[0]; | ||
140 | for (i = 0; i < nr_irqs; i++) | 143 | for (i = 0; i < nr_irqs; i++) |
141 | irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, | 144 | irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, |
142 | &vf610_mscm_ir_irq_chip, | 145 | &vf610_mscm_ir_irq_chip, |
143 | domain->host_data); | 146 | domain->host_data); |
144 | 147 | ||
145 | gic_data.np = domain->parent->of_node; | 148 | parent_fwspec.fwnode = domain->parent->fwnode; |
146 | 149 | ||
147 | if (mscm_ir_data->is_nvic) { | 150 | if (mscm_ir_data->is_nvic) { |
148 | gic_data.args_count = 1; | 151 | parent_fwspec.param_count = 1; |
149 | gic_data.args[0] = irq_data->args[0]; | 152 | parent_fwspec.param[0] = fwspec->param[0]; |
150 | } else { | 153 | } else { |
151 | gic_data.args_count = 3; | 154 | parent_fwspec.param_count = 3; |
152 | gic_data.args[0] = GIC_SPI; | 155 | parent_fwspec.param[0] = GIC_SPI; |
153 | gic_data.args[1] = irq_data->args[0]; | 156 | parent_fwspec.param[1] = fwspec->param[0]; |
154 | gic_data.args[2] = irq_data->args[1]; | 157 | parent_fwspec.param[2] = fwspec->param[1]; |
155 | } | 158 | } |
156 | 159 | ||
157 | return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &gic_data); | 160 | return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, |
161 | &parent_fwspec); | ||
162 | } | ||
163 | |||
164 | static int vf610_mscm_ir_domain_translate(struct irq_domain *d, | ||
165 | struct irq_fwspec *fwspec, | ||
166 | unsigned long *hwirq, | ||
167 | unsigned int *type) | ||
168 | { | ||
169 | if (WARN_ON(fwspec->param_count < 2)) | ||
170 | return -EINVAL; | ||
171 | *hwirq = fwspec->param[0]; | ||
172 | *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; | ||
173 | return 0; | ||
158 | } | 174 | } |
159 | 175 | ||
160 | static const struct irq_domain_ops mscm_irq_domain_ops = { | 176 | static const struct irq_domain_ops mscm_irq_domain_ops = { |
161 | .xlate = irq_domain_xlate_twocell, | 177 | .translate = vf610_mscm_ir_domain_translate, |
162 | .alloc = vf610_mscm_ir_domain_alloc, | 178 | .alloc = vf610_mscm_ir_domain_alloc, |
163 | .free = irq_domain_free_irqs_common, | 179 | .free = irq_domain_free_irqs_common, |
164 | }; | 180 | }; |
@@ -205,7 +221,8 @@ static int __init vf610_mscm_ir_of_init(struct device_node *node, | |||
205 | goto out_unmap; | 221 | goto out_unmap; |
206 | } | 222 | } |
207 | 223 | ||
208 | if (of_device_is_compatible(domain->parent->of_node, "arm,armv7m-nvic")) | 224 | if (of_device_is_compatible(irq_domain_get_of_node(domain->parent), |
225 | "arm,armv7m-nvic")) | ||
209 | mscm_ir_data->is_nvic = true; | 226 | mscm_ir_data->is_nvic = true; |
210 | 227 | ||
211 | cpu_pm_register_notifier(&mscm_ir_notifier_block); | 228 | cpu_pm_register_notifier(&mscm_ir_notifier_block); |
diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 55317fa9c9dc..0baf626da56a 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c | |||
@@ -579,22 +579,187 @@ err: | |||
579 | } | 579 | } |
580 | } | 580 | } |
581 | 581 | ||
582 | static u32 __of_msi_map_rid(struct device *dev, struct device_node **np, | ||
583 | u32 rid_in) | ||
584 | { | ||
585 | struct device *parent_dev; | ||
586 | struct device_node *msi_controller_node; | ||
587 | struct device_node *msi_np = *np; | ||
588 | u32 map_mask, masked_rid, rid_base, msi_base, rid_len, phandle; | ||
589 | int msi_map_len; | ||
590 | bool matched; | ||
591 | u32 rid_out = rid_in; | ||
592 | const __be32 *msi_map = NULL; | ||
593 | |||
594 | /* | ||
595 | * Walk up the device parent links looking for one with a | ||
596 | * "msi-map" property. | ||
597 | */ | ||
598 | for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent) { | ||
599 | if (!parent_dev->of_node) | ||
600 | continue; | ||
601 | |||
602 | msi_map = of_get_property(parent_dev->of_node, | ||
603 | "msi-map", &msi_map_len); | ||
604 | if (!msi_map) | ||
605 | continue; | ||
606 | |||
607 | if (msi_map_len % (4 * sizeof(__be32))) { | ||
608 | dev_err(parent_dev, "Error: Bad msi-map length: %d\n", | ||
609 | msi_map_len); | ||
610 | return rid_out; | ||
611 | } | ||
612 | /* We have a good parent_dev and msi_map, let's use them. */ | ||
613 | break; | ||
614 | } | ||
615 | if (!msi_map) | ||
616 | return rid_out; | ||
617 | |||
618 | /* The default is to select all bits. */ | ||
619 | map_mask = 0xffffffff; | ||
620 | |||
621 | /* | ||
622 | * Can be overridden by "msi-map-mask" property. If | ||
623 | * of_property_read_u32() fails, the default is used. | ||
624 | */ | ||
625 | of_property_read_u32(parent_dev->of_node, "msi-map-mask", &map_mask); | ||
626 | |||
627 | masked_rid = map_mask & rid_in; | ||
628 | matched = false; | ||
629 | while (!matched && msi_map_len >= 4 * sizeof(__be32)) { | ||
630 | rid_base = be32_to_cpup(msi_map + 0); | ||
631 | phandle = be32_to_cpup(msi_map + 1); | ||
632 | msi_base = be32_to_cpup(msi_map + 2); | ||
633 | rid_len = be32_to_cpup(msi_map + 3); | ||
634 | |||
635 | msi_controller_node = of_find_node_by_phandle(phandle); | ||
636 | |||
637 | matched = (masked_rid >= rid_base && | ||
638 | masked_rid < rid_base + rid_len); | ||
639 | if (msi_np) | ||
640 | matched &= msi_np == msi_controller_node; | ||
641 | |||
642 | if (matched && !msi_np) { | ||
643 | *np = msi_np = msi_controller_node; | ||
644 | break; | ||
645 | } | ||
646 | |||
647 | of_node_put(msi_controller_node); | ||
648 | msi_map_len -= 4 * sizeof(__be32); | ||
649 | msi_map += 4; | ||
650 | } | ||
651 | if (!matched) | ||
652 | return rid_out; | ||
653 | |||
654 | rid_out = masked_rid + msi_base; | ||
655 | dev_dbg(dev, | ||
656 | "msi-map at: %s, using mask %08x, rid-base: %08x, msi-base: %08x, length: %08x, rid: %08x -> %08x\n", | ||
657 | dev_name(parent_dev), map_mask, rid_base, msi_base, | ||
658 | rid_len, rid_in, rid_out); | ||
659 | |||
660 | return rid_out; | ||
661 | } | ||
662 | |||
582 | /** | 663 | /** |
583 | * of_msi_configure - Set the msi_domain field of a device | 664 | * of_msi_map_rid - Map a MSI requester ID for a device. |
584 | * @dev: device structure to associate with an MSI irq domain | 665 | * @dev: device for which the mapping is to be done. |
585 | * @np: device node for that device | 666 | * @msi_np: device node of the expected msi controller. |
667 | * @rid_in: unmapped MSI requester ID for the device. | ||
668 | * | ||
669 | * Walk up the device hierarchy looking for devices with a "msi-map" | ||
670 | * property. If found, apply the mapping to @rid_in. | ||
671 | * | ||
672 | * Returns the mapped MSI requester ID. | ||
586 | */ | 673 | */ |
587 | void of_msi_configure(struct device *dev, struct device_node *np) | 674 | u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in) |
675 | { | ||
676 | return __of_msi_map_rid(dev, &msi_np, rid_in); | ||
677 | } | ||
678 | |||
679 | static struct irq_domain *__of_get_msi_domain(struct device_node *np, | ||
680 | enum irq_domain_bus_token token) | ||
681 | { | ||
682 | struct irq_domain *d; | ||
683 | |||
684 | d = irq_find_matching_host(np, token); | ||
685 | if (!d) | ||
686 | d = irq_find_host(np); | ||
687 | |||
688 | return d; | ||
689 | } | ||
690 | |||
691 | /** | ||
692 | * of_msi_map_get_device_domain - Use msi-map to find the relevant MSI domain | ||
693 | * @dev: device for which the mapping is to be done. | ||
694 | * @rid: Requester ID for the device. | ||
695 | * | ||
696 | * Walk up the device hierarchy looking for devices with a "msi-map" | ||
697 | * property. | ||
698 | * | ||
699 | * Returns: the MSI domain for this device (or NULL on failure) | ||
700 | */ | ||
701 | struct irq_domain *of_msi_map_get_device_domain(struct device *dev, u32 rid) | ||
702 | { | ||
703 | struct device_node *np = NULL; | ||
704 | |||
705 | __of_msi_map_rid(dev, &np, rid); | ||
706 | return __of_get_msi_domain(np, DOMAIN_BUS_PCI_MSI); | ||
707 | } | ||
708 | |||
709 | /** | ||
710 | * of_msi_get_domain - Use msi-parent to find the relevant MSI domain | ||
711 | * @dev: device for which the domain is requested | ||
712 | * @np: device node for @dev | ||
713 | * @token: bus type for this domain | ||
714 | * | ||
715 | * Parse the msi-parent property (both the simple and the complex | ||
716 | * versions), and returns the corresponding MSI domain. | ||
717 | * | ||
718 | * Returns: the MSI domain for this device (or NULL on failure). | ||
719 | */ | ||
720 | struct irq_domain *of_msi_get_domain(struct device *dev, | ||
721 | struct device_node *np, | ||
722 | enum irq_domain_bus_token token) | ||
588 | { | 723 | { |
589 | struct device_node *msi_np; | 724 | struct device_node *msi_np; |
590 | struct irq_domain *d; | 725 | struct irq_domain *d; |
591 | 726 | ||
727 | /* Check for a single msi-parent property */ | ||
592 | msi_np = of_parse_phandle(np, "msi-parent", 0); | 728 | msi_np = of_parse_phandle(np, "msi-parent", 0); |
593 | if (!msi_np) | 729 | if (msi_np && !of_property_read_bool(msi_np, "#msi-cells")) { |
594 | return; | 730 | d = __of_get_msi_domain(msi_np, token); |
731 | if (!d) | ||
732 | of_node_put(msi_np); | ||
733 | return d; | ||
734 | } | ||
595 | 735 | ||
596 | d = irq_find_matching_host(msi_np, DOMAIN_BUS_PLATFORM_MSI); | 736 | if (token == DOMAIN_BUS_PLATFORM_MSI) { |
597 | if (!d) | 737 | /* Check for the complex msi-parent version */ |
598 | d = irq_find_host(msi_np); | 738 | struct of_phandle_args args; |
599 | dev_set_msi_domain(dev, d); | 739 | int index = 0; |
740 | |||
741 | while (!of_parse_phandle_with_args(np, "msi-parent", | ||
742 | "#msi-cells", | ||
743 | index, &args)) { | ||
744 | d = __of_get_msi_domain(args.np, token); | ||
745 | if (d) | ||
746 | return d; | ||
747 | |||
748 | of_node_put(args.np); | ||
749 | index++; | ||
750 | } | ||
751 | } | ||
752 | |||
753 | return NULL; | ||
754 | } | ||
755 | |||
756 | /** | ||
757 | * of_msi_configure - Set the msi_domain field of a device | ||
758 | * @dev: device structure to associate with an MSI irq domain | ||
759 | * @np: device node for that device | ||
760 | */ | ||
761 | void of_msi_configure(struct device *dev, struct device_node *np) | ||
762 | { | ||
763 | dev_set_msi_domain(dev, | ||
764 | of_msi_get_domain(dev, np, DOMAIN_BUS_PLATFORM_MSI)); | ||
600 | } | 765 | } |
diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c index e491681daf22..a6456b578269 100644 --- a/drivers/pci/host/pci-xgene-msi.c +++ b/drivers/pci/host/pci-xgene-msi.c | |||
@@ -256,7 +256,7 @@ static int xgene_allocate_domains(struct xgene_msi *msi) | |||
256 | if (!msi->inner_domain) | 256 | if (!msi->inner_domain) |
257 | return -ENOMEM; | 257 | return -ENOMEM; |
258 | 258 | ||
259 | msi->msi_domain = pci_msi_create_irq_domain(msi->node, | 259 | msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(msi->node), |
260 | &xgene_msi_domain_info, | 260 | &xgene_msi_domain_info, |
261 | msi->inner_domain); | 261 | msi->inner_domain); |
262 | 262 | ||
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 4a7da3c3e035..45a51486d080 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/io.h> | 20 | #include <linux/io.h> |
21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
22 | #include <linux/irqdomain.h> | 22 | #include <linux/irqdomain.h> |
23 | #include <linux/of_irq.h> | ||
23 | 24 | ||
24 | #include "pci.h" | 25 | #include "pci.h" |
25 | 26 | ||
@@ -1250,8 +1251,8 @@ static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info) | |||
1250 | } | 1251 | } |
1251 | 1252 | ||
1252 | /** | 1253 | /** |
1253 | * pci_msi_create_irq_domain - Creat a MSI interrupt domain | 1254 | * pci_msi_create_irq_domain - Create a MSI interrupt domain |
1254 | * @node: Optional device-tree node of the interrupt controller | 1255 | * @fwnode: Optional fwnode of the interrupt controller |
1255 | * @info: MSI domain info | 1256 | * @info: MSI domain info |
1256 | * @parent: Parent irq domain | 1257 | * @parent: Parent irq domain |
1257 | * | 1258 | * |
@@ -1260,7 +1261,7 @@ static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info) | |||
1260 | * Returns: | 1261 | * Returns: |
1261 | * A domain pointer or NULL in case of failure. | 1262 | * A domain pointer or NULL in case of failure. |
1262 | */ | 1263 | */ |
1263 | struct irq_domain *pci_msi_create_irq_domain(struct device_node *node, | 1264 | struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, |
1264 | struct msi_domain_info *info, | 1265 | struct msi_domain_info *info, |
1265 | struct irq_domain *parent) | 1266 | struct irq_domain *parent) |
1266 | { | 1267 | { |
@@ -1271,7 +1272,7 @@ struct irq_domain *pci_msi_create_irq_domain(struct device_node *node, | |||
1271 | if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) | 1272 | if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) |
1272 | pci_msi_domain_update_chip_ops(info); | 1273 | pci_msi_domain_update_chip_ops(info); |
1273 | 1274 | ||
1274 | domain = msi_create_irq_domain(node, info, parent); | 1275 | domain = msi_create_irq_domain(fwnode, info, parent); |
1275 | if (!domain) | 1276 | if (!domain) |
1276 | return NULL; | 1277 | return NULL; |
1277 | 1278 | ||
@@ -1307,14 +1308,14 @@ void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev) | |||
1307 | 1308 | ||
1308 | /** | 1309 | /** |
1309 | * pci_msi_create_default_irq_domain - Create a default MSI interrupt domain | 1310 | * pci_msi_create_default_irq_domain - Create a default MSI interrupt domain |
1310 | * @node: Optional device-tree node of the interrupt controller | 1311 | * @fwnode: Optional fwnode of the interrupt controller |
1311 | * @info: MSI domain info | 1312 | * @info: MSI domain info |
1312 | * @parent: Parent irq domain | 1313 | * @parent: Parent irq domain |
1313 | * | 1314 | * |
1314 | * Returns: A domain pointer or NULL in case of failure. If successful | 1315 | * Returns: A domain pointer or NULL in case of failure. If successful |
1315 | * the default PCI/MSI irqdomain pointer is updated. | 1316 | * the default PCI/MSI irqdomain pointer is updated. |
1316 | */ | 1317 | */ |
1317 | struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node, | 1318 | struct irq_domain *pci_msi_create_default_irq_domain(struct fwnode_handle *fwnode, |
1318 | struct msi_domain_info *info, struct irq_domain *parent) | 1319 | struct msi_domain_info *info, struct irq_domain *parent) |
1319 | { | 1320 | { |
1320 | struct irq_domain *domain; | 1321 | struct irq_domain *domain; |
@@ -1324,11 +1325,59 @@ struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node, | |||
1324 | pr_err("PCI: default irq domain for PCI MSI has already been created.\n"); | 1325 | pr_err("PCI: default irq domain for PCI MSI has already been created.\n"); |
1325 | domain = NULL; | 1326 | domain = NULL; |
1326 | } else { | 1327 | } else { |
1327 | domain = pci_msi_create_irq_domain(node, info, parent); | 1328 | domain = pci_msi_create_irq_domain(fwnode, info, parent); |
1328 | pci_msi_default_domain = domain; | 1329 | pci_msi_default_domain = domain; |
1329 | } | 1330 | } |
1330 | mutex_unlock(&pci_msi_domain_lock); | 1331 | mutex_unlock(&pci_msi_domain_lock); |
1331 | 1332 | ||
1332 | return domain; | 1333 | return domain; |
1333 | } | 1334 | } |
1335 | |||
1336 | static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data) | ||
1337 | { | ||
1338 | u32 *pa = data; | ||
1339 | |||
1340 | *pa = alias; | ||
1341 | return 0; | ||
1342 | } | ||
1343 | /** | ||
1344 | * pci_msi_domain_get_msi_rid - Get the MSI requester id (RID) | ||
1345 | * @domain: The interrupt domain | ||
1346 | * @pdev: The PCI device. | ||
1347 | * | ||
1348 | * The RID for a device is formed from the alias, with a firmware | ||
1349 | * supplied mapping applied | ||
1350 | * | ||
1351 | * Returns: The RID. | ||
1352 | */ | ||
1353 | u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev) | ||
1354 | { | ||
1355 | struct device_node *of_node; | ||
1356 | u32 rid = 0; | ||
1357 | |||
1358 | pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); | ||
1359 | |||
1360 | of_node = irq_domain_get_of_node(domain); | ||
1361 | if (of_node) | ||
1362 | rid = of_msi_map_rid(&pdev->dev, of_node, rid); | ||
1363 | |||
1364 | return rid; | ||
1365 | } | ||
1366 | |||
1367 | /** | ||
1368 | * pci_msi_get_device_domain - Get the MSI domain for a given PCI device | ||
1369 | * @pdev: The PCI device | ||
1370 | * | ||
1371 | * Use the firmware data to find a device-specific MSI domain | ||
1372 | * (i.e. not one that is ste as a default). | ||
1373 | * | ||
1374 | * Returns: The coresponding MSI domain or NULL if none has been found. | ||
1375 | */ | ||
1376 | struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) | ||
1377 | { | ||
1378 | u32 rid = 0; | ||
1379 | |||
1380 | pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); | ||
1381 | return of_msi_map_get_device_domain(&pdev->dev, rid); | ||
1382 | } | ||
1334 | #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ | 1383 | #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ |
diff --git a/drivers/pci/of.c b/drivers/pci/of.c index 2e99a500cb83..e112da11630e 100644 --- a/drivers/pci/of.c +++ b/drivers/pci/of.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/pci.h> | 14 | #include <linux/pci.h> |
15 | #include <linux/of.h> | 15 | #include <linux/of.h> |
16 | #include <linux/of_irq.h> | ||
16 | #include <linux/of_pci.h> | 17 | #include <linux/of_pci.h> |
17 | #include "pci.h" | 18 | #include "pci.h" |
18 | 19 | ||
@@ -64,27 +65,25 @@ struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus) | |||
64 | struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus) | 65 | struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus) |
65 | { | 66 | { |
66 | #ifdef CONFIG_IRQ_DOMAIN | 67 | #ifdef CONFIG_IRQ_DOMAIN |
67 | struct device_node *np; | ||
68 | struct irq_domain *d; | 68 | struct irq_domain *d; |
69 | 69 | ||
70 | if (!bus->dev.of_node) | 70 | if (!bus->dev.of_node) |
71 | return NULL; | 71 | return NULL; |
72 | 72 | ||
73 | /* Start looking for a phandle to an MSI controller. */ | 73 | /* Start looking for a phandle to an MSI controller. */ |
74 | np = of_parse_phandle(bus->dev.of_node, "msi-parent", 0); | 74 | d = of_msi_get_domain(&bus->dev, bus->dev.of_node, DOMAIN_BUS_PCI_MSI); |
75 | if (d) | ||
76 | return d; | ||
75 | 77 | ||
76 | /* | 78 | /* |
77 | * If we don't have an msi-parent property, look for a domain | 79 | * If we don't have an msi-parent property, look for a domain |
78 | * directly attached to the host bridge. | 80 | * directly attached to the host bridge. |
79 | */ | 81 | */ |
80 | if (!np) | 82 | d = irq_find_matching_host(bus->dev.of_node, DOMAIN_BUS_PCI_MSI); |
81 | np = bus->dev.of_node; | ||
82 | |||
83 | d = irq_find_matching_host(np, DOMAIN_BUS_PCI_MSI); | ||
84 | if (d) | 83 | if (d) |
85 | return d; | 84 | return d; |
86 | 85 | ||
87 | return irq_find_host(np); | 86 | return irq_find_host(bus->dev.of_node); |
88 | #else | 87 | #else |
89 | return NULL; | 88 | return NULL; |
90 | #endif | 89 | #endif |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 8361d27e5eca..f14a970b61fa 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -1622,15 +1622,48 @@ static void pci_init_capabilities(struct pci_dev *dev) | |||
1622 | pci_enable_acs(dev); | 1622 | pci_enable_acs(dev); |
1623 | } | 1623 | } |
1624 | 1624 | ||
1625 | /* | ||
1626 | * This is the equivalent of pci_host_bridge_msi_domain that acts on | ||
1627 | * devices. Firmware interfaces that can select the MSI domain on a | ||
1628 | * per-device basis should be called from here. | ||
1629 | */ | ||
1630 | static struct irq_domain *pci_dev_msi_domain(struct pci_dev *dev) | ||
1631 | { | ||
1632 | struct irq_domain *d; | ||
1633 | |||
1634 | /* | ||
1635 | * If a domain has been set through the pcibios_add_device | ||
1636 | * callback, then this is the one (platform code knows best). | ||
1637 | */ | ||
1638 | d = dev_get_msi_domain(&dev->dev); | ||
1639 | if (d) | ||
1640 | return d; | ||
1641 | |||
1642 | /* | ||
1643 | * Let's see if we have a firmware interface able to provide | ||
1644 | * the domain. | ||
1645 | */ | ||
1646 | d = pci_msi_get_device_domain(dev); | ||
1647 | if (d) | ||
1648 | return d; | ||
1649 | |||
1650 | return NULL; | ||
1651 | } | ||
1652 | |||
1625 | static void pci_set_msi_domain(struct pci_dev *dev) | 1653 | static void pci_set_msi_domain(struct pci_dev *dev) |
1626 | { | 1654 | { |
1655 | struct irq_domain *d; | ||
1656 | |||
1627 | /* | 1657 | /* |
1628 | * If no domain has been set through the pcibios_add_device | 1658 | * If the platform or firmware interfaces cannot supply a |
1629 | * callback, inherit the default from the bus device. | 1659 | * device-specific MSI domain, then inherit the default domain |
1660 | * from the host bridge itself. | ||
1630 | */ | 1661 | */ |
1631 | if (!dev_get_msi_domain(&dev->dev)) | 1662 | d = pci_dev_msi_domain(dev); |
1632 | dev_set_msi_domain(&dev->dev, | 1663 | if (!d) |
1633 | dev_get_msi_domain(&dev->bus->dev)); | 1664 | d = dev_get_msi_domain(&dev->bus->dev); |
1665 | |||
1666 | dev_set_msi_domain(&dev->dev, d); | ||
1634 | } | 1667 | } |
1635 | 1668 | ||
1636 | void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) | 1669 | void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) |
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c index 4a3cf9ba152f..fb36810ae89a 100644 --- a/drivers/spmi/spmi-pmic-arb.c +++ b/drivers/spmi/spmi-pmic-arb.c | |||
@@ -657,7 +657,7 @@ static int qpnpint_irq_domain_dt_translate(struct irq_domain *d, | |||
657 | "intspec[0] 0x%1x intspec[1] 0x%02x intspec[2] 0x%02x\n", | 657 | "intspec[0] 0x%1x intspec[1] 0x%02x intspec[2] 0x%02x\n", |
658 | intspec[0], intspec[1], intspec[2]); | 658 | intspec[0], intspec[1], intspec[2]); |
659 | 659 | ||
660 | if (d->of_node != controller) | 660 | if (irq_domain_get_of_node(d) != controller) |
661 | return -EINVAL; | 661 | return -EINVAL; |
662 | if (intsize != 4) | 662 | if (intsize != 4) |
663 | return -EINVAL; | 663 | return -EINVAL; |
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 4e14dac282bb..6a3538ef7275 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h | |||
@@ -282,7 +282,7 @@ struct vgic_v2_cpu_if { | |||
282 | }; | 282 | }; |
283 | 283 | ||
284 | struct vgic_v3_cpu_if { | 284 | struct vgic_v3_cpu_if { |
285 | #ifdef CONFIG_ARM_GIC_V3 | 285 | #ifdef CONFIG_KVM_ARM_VGIC_V3 |
286 | u32 vgic_hcr; | 286 | u32 vgic_hcr; |
287 | u32 vgic_vmcr; | 287 | u32 vgic_vmcr; |
288 | u32 vgic_sre; /* Restored only, change ignored */ | 288 | u32 vgic_sre; /* Restored only, change ignored */ |
@@ -364,7 +364,7 @@ void kvm_vgic_set_phys_irq_active(struct irq_phys_map *map, bool active); | |||
364 | int vgic_v2_probe(struct device_node *vgic_node, | 364 | int vgic_v2_probe(struct device_node *vgic_node, |
365 | const struct vgic_ops **ops, | 365 | const struct vgic_ops **ops, |
366 | const struct vgic_params **params); | 366 | const struct vgic_params **params); |
367 | #ifdef CONFIG_ARM_GIC_V3 | 367 | #ifdef CONFIG_KVM_ARM_VGIC_V3 |
368 | int vgic_v3_probe(struct device_node *vgic_node, | 368 | int vgic_v3_probe(struct device_node *vgic_node, |
369 | const struct vgic_ops **ops, | 369 | const struct vgic_ops **ops, |
370 | const struct vgic_params **params); | 370 | const struct vgic_params **params); |
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 43856d19cf4d..d863e12bbead 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
@@ -201,6 +201,9 @@ int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity | |||
201 | int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); | 201 | int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); |
202 | int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi); | 202 | int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi); |
203 | 203 | ||
204 | void acpi_set_irq_model(enum acpi_irq_model_id model, | ||
205 | struct fwnode_handle *fwnode); | ||
206 | |||
204 | #ifdef CONFIG_X86_IO_APIC | 207 | #ifdef CONFIG_X86_IO_APIC |
205 | extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity); | 208 | extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity); |
206 | #else | 209 | #else |
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index 0408545bce42..37ec668546ab 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h | |||
@@ -17,6 +17,7 @@ enum fwnode_type { | |||
17 | FWNODE_OF, | 17 | FWNODE_OF, |
18 | FWNODE_ACPI, | 18 | FWNODE_ACPI, |
19 | FWNODE_PDATA, | 19 | FWNODE_PDATA, |
20 | FWNODE_IRQCHIP, | ||
20 | }; | 21 | }; |
21 | 22 | ||
22 | struct fwnode_handle { | 23 | struct fwnode_handle { |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index be7e75c945e9..ad16809c8596 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -102,6 +102,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *); | |||
102 | * @flags: flags (see IRQF_* above) | 102 | * @flags: flags (see IRQF_* above) |
103 | * @thread_fn: interrupt handler function for threaded interrupts | 103 | * @thread_fn: interrupt handler function for threaded interrupts |
104 | * @thread: thread pointer for threaded interrupts | 104 | * @thread: thread pointer for threaded interrupts |
105 | * @secondary: pointer to secondary irqaction (force threading) | ||
105 | * @thread_flags: flags related to @thread | 106 | * @thread_flags: flags related to @thread |
106 | * @thread_mask: bitmask for keeping track of @thread activity | 107 | * @thread_mask: bitmask for keeping track of @thread activity |
107 | * @dir: pointer to the proc/irq/NN/name entry | 108 | * @dir: pointer to the proc/irq/NN/name entry |
@@ -113,6 +114,7 @@ struct irqaction { | |||
113 | struct irqaction *next; | 114 | struct irqaction *next; |
114 | irq_handler_t thread_fn; | 115 | irq_handler_t thread_fn; |
115 | struct task_struct *thread; | 116 | struct task_struct *thread; |
117 | struct irqaction *secondary; | ||
116 | unsigned int irq; | 118 | unsigned int irq; |
117 | unsigned int flags; | 119 | unsigned int flags; |
118 | unsigned long thread_flags; | 120 | unsigned long thread_flags; |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 11bf09288ddb..3c1c96786248 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -67,11 +67,12 @@ enum irqchip_irq_state; | |||
67 | * request/setup_irq() | 67 | * request/setup_irq() |
68 | * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) | 68 | * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) |
69 | * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context | 69 | * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context |
70 | * IRQ_NESTED_TRHEAD - Interrupt nests into another thread | 70 | * IRQ_NESTED_THREAD - Interrupt nests into another thread |
71 | * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable | 71 | * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable |
72 | * IRQ_IS_POLLED - Always polled by another interrupt. Exclude | 72 | * IRQ_IS_POLLED - Always polled by another interrupt. Exclude |
73 | * it from the spurious interrupt detection | 73 | * it from the spurious interrupt detection |
74 | * mechanism and from core side polling. | 74 | * mechanism and from core side polling. |
75 | * IRQ_DISABLE_UNLAZY - Disable lazy irq disable | ||
75 | */ | 76 | */ |
76 | enum { | 77 | enum { |
77 | IRQ_TYPE_NONE = 0x00000000, | 78 | IRQ_TYPE_NONE = 0x00000000, |
@@ -97,13 +98,14 @@ enum { | |||
97 | IRQ_NOTHREAD = (1 << 16), | 98 | IRQ_NOTHREAD = (1 << 16), |
98 | IRQ_PER_CPU_DEVID = (1 << 17), | 99 | IRQ_PER_CPU_DEVID = (1 << 17), |
99 | IRQ_IS_POLLED = (1 << 18), | 100 | IRQ_IS_POLLED = (1 << 18), |
101 | IRQ_DISABLE_UNLAZY = (1 << 19), | ||
100 | }; | 102 | }; |
101 | 103 | ||
102 | #define IRQF_MODIFY_MASK \ | 104 | #define IRQF_MODIFY_MASK \ |
103 | (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ | 105 | (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ |
104 | IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ | 106 | IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ |
105 | IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ | 107 | IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ |
106 | IRQ_IS_POLLED) | 108 | IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY) |
107 | 109 | ||
108 | #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) | 110 | #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) |
109 | 111 | ||
@@ -297,21 +299,6 @@ static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d) | |||
297 | __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU; | 299 | __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU; |
298 | } | 300 | } |
299 | 301 | ||
300 | /* | ||
301 | * Functions for chained handlers which can be enabled/disabled by the | ||
302 | * standard disable_irq/enable_irq calls. Must be called with | ||
303 | * irq_desc->lock held. | ||
304 | */ | ||
305 | static inline void irqd_set_chained_irq_inprogress(struct irq_data *d) | ||
306 | { | ||
307 | __irqd_to_state(d) |= IRQD_IRQ_INPROGRESS; | ||
308 | } | ||
309 | |||
310 | static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d) | ||
311 | { | ||
312 | __irqd_to_state(d) &= ~IRQD_IRQ_INPROGRESS; | ||
313 | } | ||
314 | |||
315 | static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) | 302 | static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) |
316 | { | 303 | { |
317 | return d->hwirq; | 304 | return d->hwirq; |
@@ -452,6 +439,8 @@ extern int irq_set_affinity_locked(struct irq_data *data, | |||
452 | const struct cpumask *cpumask, bool force); | 439 | const struct cpumask *cpumask, bool force); |
453 | extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info); | 440 | extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info); |
454 | 441 | ||
442 | extern void irq_migrate_all_off_this_cpu(void); | ||
443 | |||
455 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) | 444 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) |
456 | void irq_move_irq(struct irq_data *data); | 445 | void irq_move_irq(struct irq_data *data); |
457 | void irq_move_masked_irq(struct irq_data *data); | 446 | void irq_move_masked_irq(struct irq_data *data); |
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 9eeeb9589acf..c9ae0c6ec050 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h | |||
@@ -18,8 +18,6 @@ | |||
18 | #ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H | 18 | #ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H |
19 | #define __LINUX_IRQCHIP_ARM_GIC_V3_H | 19 | #define __LINUX_IRQCHIP_ARM_GIC_V3_H |
20 | 20 | ||
21 | #include <asm/sysreg.h> | ||
22 | |||
23 | /* | 21 | /* |
24 | * Distributor registers. We assume we're running non-secure, with ARE | 22 | * Distributor registers. We assume we're running non-secure, with ARE |
25 | * being set. Secure-only and non-ARE registers are not described. | 23 | * being set. Secure-only and non-ARE registers are not described. |
@@ -231,6 +229,7 @@ | |||
231 | #define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT) | 229 | #define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT) |
232 | #define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT) | 230 | #define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT) |
233 | #define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT) | 231 | #define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT) |
232 | #define GITS_BASER_PAGES_MAX 256 | ||
234 | 233 | ||
235 | #define GITS_BASER_TYPE_NONE 0 | 234 | #define GITS_BASER_TYPE_NONE 0 |
236 | #define GITS_BASER_TYPE_DEVICE 1 | 235 | #define GITS_BASER_TYPE_DEVICE 1 |
@@ -266,16 +265,16 @@ | |||
266 | /* | 265 | /* |
267 | * Hypervisor interface registers (SRE only) | 266 | * Hypervisor interface registers (SRE only) |
268 | */ | 267 | */ |
269 | #define ICH_LR_VIRTUAL_ID_MASK ((1UL << 32) - 1) | 268 | #define ICH_LR_VIRTUAL_ID_MASK ((1ULL << 32) - 1) |
270 | 269 | ||
271 | #define ICH_LR_EOI (1UL << 41) | 270 | #define ICH_LR_EOI (1ULL << 41) |
272 | #define ICH_LR_GROUP (1UL << 60) | 271 | #define ICH_LR_GROUP (1ULL << 60) |
273 | #define ICH_LR_HW (1UL << 61) | 272 | #define ICH_LR_HW (1ULL << 61) |
274 | #define ICH_LR_STATE (3UL << 62) | 273 | #define ICH_LR_STATE (3ULL << 62) |
275 | #define ICH_LR_PENDING_BIT (1UL << 62) | 274 | #define ICH_LR_PENDING_BIT (1ULL << 62) |
276 | #define ICH_LR_ACTIVE_BIT (1UL << 63) | 275 | #define ICH_LR_ACTIVE_BIT (1ULL << 63) |
277 | #define ICH_LR_PHYS_ID_SHIFT 32 | 276 | #define ICH_LR_PHYS_ID_SHIFT 32 |
278 | #define ICH_LR_PHYS_ID_MASK (0x3ffUL << ICH_LR_PHYS_ID_SHIFT) | 277 | #define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT) |
279 | 278 | ||
280 | #define ICH_MISR_EOI (1 << 0) | 279 | #define ICH_MISR_EOI (1 << 0) |
281 | #define ICH_MISR_U (1 << 1) | 280 | #define ICH_MISR_U (1 << 1) |
@@ -292,19 +291,8 @@ | |||
292 | #define ICH_VMCR_PMR_SHIFT 24 | 291 | #define ICH_VMCR_PMR_SHIFT 24 |
293 | #define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT) | 292 | #define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT) |
294 | 293 | ||
295 | #define ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1) | ||
296 | #define ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1) | ||
297 | #define ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0) | ||
298 | #define ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5) | ||
299 | #define ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0) | ||
300 | #define ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4) | ||
301 | #define ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5) | ||
302 | #define ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7) | ||
303 | |||
304 | #define ICC_IAR1_EL1_SPURIOUS 0x3ff | 294 | #define ICC_IAR1_EL1_SPURIOUS 0x3ff |
305 | 295 | ||
306 | #define ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5) | ||
307 | |||
308 | #define ICC_SRE_EL2_SRE (1 << 0) | 296 | #define ICC_SRE_EL2_SRE (1 << 0) |
309 | #define ICC_SRE_EL2_ENABLE (1 << 3) | 297 | #define ICC_SRE_EL2_ENABLE (1 << 3) |
310 | 298 | ||
@@ -320,54 +308,10 @@ | |||
320 | #define ICC_SGI1R_AFFINITY_3_SHIFT 48 | 308 | #define ICC_SGI1R_AFFINITY_3_SHIFT 48 |
321 | #define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) | 309 | #define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) |
322 | 310 | ||
323 | /* | 311 | #include <asm/arch_gicv3.h> |
324 | * System register definitions | ||
325 | */ | ||
326 | #define ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4) | ||
327 | #define ICH_HCR_EL2 sys_reg(3, 4, 12, 11, 0) | ||
328 | #define ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1) | ||
329 | #define ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2) | ||
330 | #define ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3) | ||
331 | #define ICH_ELSR_EL2 sys_reg(3, 4, 12, 11, 5) | ||
332 | #define ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7) | ||
333 | |||
334 | #define __LR0_EL2(x) sys_reg(3, 4, 12, 12, x) | ||
335 | #define __LR8_EL2(x) sys_reg(3, 4, 12, 13, x) | ||
336 | |||
337 | #define ICH_LR0_EL2 __LR0_EL2(0) | ||
338 | #define ICH_LR1_EL2 __LR0_EL2(1) | ||
339 | #define ICH_LR2_EL2 __LR0_EL2(2) | ||
340 | #define ICH_LR3_EL2 __LR0_EL2(3) | ||
341 | #define ICH_LR4_EL2 __LR0_EL2(4) | ||
342 | #define ICH_LR5_EL2 __LR0_EL2(5) | ||
343 | #define ICH_LR6_EL2 __LR0_EL2(6) | ||
344 | #define ICH_LR7_EL2 __LR0_EL2(7) | ||
345 | #define ICH_LR8_EL2 __LR8_EL2(0) | ||
346 | #define ICH_LR9_EL2 __LR8_EL2(1) | ||
347 | #define ICH_LR10_EL2 __LR8_EL2(2) | ||
348 | #define ICH_LR11_EL2 __LR8_EL2(3) | ||
349 | #define ICH_LR12_EL2 __LR8_EL2(4) | ||
350 | #define ICH_LR13_EL2 __LR8_EL2(5) | ||
351 | #define ICH_LR14_EL2 __LR8_EL2(6) | ||
352 | #define ICH_LR15_EL2 __LR8_EL2(7) | ||
353 | |||
354 | #define __AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x) | ||
355 | #define ICH_AP0R0_EL2 __AP0Rx_EL2(0) | ||
356 | #define ICH_AP0R1_EL2 __AP0Rx_EL2(1) | ||
357 | #define ICH_AP0R2_EL2 __AP0Rx_EL2(2) | ||
358 | #define ICH_AP0R3_EL2 __AP0Rx_EL2(3) | ||
359 | |||
360 | #define __AP1Rx_EL2(x) sys_reg(3, 4, 12, 9, x) | ||
361 | #define ICH_AP1R0_EL2 __AP1Rx_EL2(0) | ||
362 | #define ICH_AP1R1_EL2 __AP1Rx_EL2(1) | ||
363 | #define ICH_AP1R2_EL2 __AP1Rx_EL2(2) | ||
364 | #define ICH_AP1R3_EL2 __AP1Rx_EL2(3) | ||
365 | 312 | ||
366 | #ifndef __ASSEMBLY__ | 313 | #ifndef __ASSEMBLY__ |
367 | 314 | ||
368 | #include <linux/stringify.h> | ||
369 | #include <asm/msi.h> | ||
370 | |||
371 | /* | 315 | /* |
372 | * We need a value to serve as a irq-type for LPIs. Choose one that will | 316 | * We need a value to serve as a irq-type for LPIs. Choose one that will |
373 | * hopefully pique the interest of the reviewer. | 317 | * hopefully pique the interest of the reviewer. |
@@ -385,23 +329,26 @@ struct rdists { | |||
385 | u64 flags; | 329 | u64 flags; |
386 | }; | 330 | }; |
387 | 331 | ||
388 | static inline void gic_write_eoir(u64 irq) | ||
389 | { | ||
390 | asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq)); | ||
391 | isb(); | ||
392 | } | ||
393 | |||
394 | static inline void gic_write_dir(u64 irq) | ||
395 | { | ||
396 | asm volatile("msr_s " __stringify(ICC_DIR_EL1) ", %0" : : "r" (irq)); | ||
397 | isb(); | ||
398 | } | ||
399 | |||
400 | struct irq_domain; | 332 | struct irq_domain; |
401 | int its_cpu_init(void); | 333 | int its_cpu_init(void); |
402 | int its_init(struct device_node *node, struct rdists *rdists, | 334 | int its_init(struct device_node *node, struct rdists *rdists, |
403 | struct irq_domain *domain); | 335 | struct irq_domain *domain); |
404 | 336 | ||
337 | static inline bool gic_enable_sre(void) | ||
338 | { | ||
339 | u32 val; | ||
340 | |||
341 | val = gic_read_sre(); | ||
342 | if (val & ICC_SRE_EL1_SRE) | ||
343 | return true; | ||
344 | |||
345 | val |= ICC_SRE_EL1_SRE; | ||
346 | gic_write_sre(val); | ||
347 | val = gic_read_sre(); | ||
348 | |||
349 | return !!(val & ICC_SRE_EL1_SRE); | ||
350 | } | ||
351 | |||
405 | #endif | 352 | #endif |
406 | 353 | ||
407 | #endif | 354 | #endif |
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index b8901dfd9e95..bae69e5d693c 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h | |||
@@ -100,16 +100,11 @@ | |||
100 | 100 | ||
101 | struct device_node; | 101 | struct device_node; |
102 | 102 | ||
103 | void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *, | ||
104 | u32 offset, struct device_node *); | ||
105 | void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); | 103 | void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); |
106 | int gic_cpu_if_down(unsigned int gic_nr); | 104 | int gic_cpu_if_down(unsigned int gic_nr); |
107 | 105 | ||
108 | static inline void gic_init(unsigned int nr, int start, | 106 | void gic_init(unsigned int nr, int start, |
109 | void __iomem *dist , void __iomem *cpu) | 107 | void __iomem *dist , void __iomem *cpu); |
110 | { | ||
111 | gic_init_bases(nr, start, dist, cpu, 0, NULL); | ||
112 | } | ||
113 | 108 | ||
114 | int gicv2m_of_init(struct device_node *node, struct irq_domain *parent); | 109 | int gicv2m_of_init(struct device_node *node, struct irq_domain *parent); |
115 | 110 | ||
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index f644fdb06dd6..d5e5c5bef28c 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h | |||
@@ -5,9 +5,10 @@ | |||
5 | * helpful for interrupt controllers to implement mapping between hardware | 5 | * helpful for interrupt controllers to implement mapping between hardware |
6 | * irq numbers and the Linux irq number space. | 6 | * irq numbers and the Linux irq number space. |
7 | * | 7 | * |
8 | * irq_domains also have a hook for translating device tree interrupt | 8 | * irq_domains also have hooks for translating device tree or other |
9 | * representation into a hardware irq number that can be mapped back to a | 9 | * firmware interrupt representations into a hardware irq number that |
10 | * Linux irq number without any extra platform support code. | 10 | * can be mapped back to a Linux irq number without any extra platform |
11 | * support code. | ||
11 | * | 12 | * |
12 | * Interrupt controller "domain" data structure. This could be defined as a | 13 | * Interrupt controller "domain" data structure. This could be defined as a |
13 | * irq domain controller. That is, it handles the mapping between hardware | 14 | * irq domain controller. That is, it handles the mapping between hardware |
@@ -17,16 +18,12 @@ | |||
17 | * model). It's the domain callbacks that are responsible for setting the | 18 | * model). It's the domain callbacks that are responsible for setting the |
18 | * irq_chip on a given irq_desc after it's been mapped. | 19 | * irq_chip on a given irq_desc after it's been mapped. |
19 | * | 20 | * |
20 | * The host code and data structures are agnostic to whether or not | 21 | * The host code and data structures use a fwnode_handle pointer to |
21 | * we use an open firmware device-tree. We do have references to struct | 22 | * identify the domain. In some cases, and in order to preserve source |
22 | * device_node in two places: in irq_find_host() to find the host matching | 23 | * code compatibility, this fwnode pointer is "upgraded" to a DT |
23 | * a given interrupt controller node, and of course as an argument to its | 24 | * device_node. For those firmware infrastructures that do not provide |
24 | * counterpart domain->ops->match() callback. However, those are treated as | 25 | * a unique identifier for an interrupt controller, the irq_domain |
25 | * generic pointers by the core and the fact that it's actually a device-node | 26 | * code offers a fwnode allocator. |
26 | * pointer is purely a convention between callers and implementation. This | ||
27 | * code could thus be used on other architectures by replacing those two | ||
28 | * by some sort of arch-specific void * "token" used to identify interrupt | ||
29 | * controllers. | ||
30 | */ | 27 | */ |
31 | 28 | ||
32 | #ifndef _LINUX_IRQDOMAIN_H | 29 | #ifndef _LINUX_IRQDOMAIN_H |
@@ -34,6 +31,7 @@ | |||
34 | 31 | ||
35 | #include <linux/types.h> | 32 | #include <linux/types.h> |
36 | #include <linux/irqhandler.h> | 33 | #include <linux/irqhandler.h> |
34 | #include <linux/of.h> | ||
37 | #include <linux/radix-tree.h> | 35 | #include <linux/radix-tree.h> |
38 | 36 | ||
39 | struct device_node; | 37 | struct device_node; |
@@ -45,6 +43,24 @@ struct irq_data; | |||
45 | /* Number of irqs reserved for a legacy isa controller */ | 43 | /* Number of irqs reserved for a legacy isa controller */ |
46 | #define NUM_ISA_INTERRUPTS 16 | 44 | #define NUM_ISA_INTERRUPTS 16 |
47 | 45 | ||
46 | #define IRQ_DOMAIN_IRQ_SPEC_PARAMS 16 | ||
47 | |||
48 | /** | ||
49 | * struct irq_fwspec - generic IRQ specifier structure | ||
50 | * | ||
51 | * @fwnode: Pointer to a firmware-specific descriptor | ||
52 | * @param_count: Number of device-specific parameters | ||
53 | * @param: Device-specific parameters | ||
54 | * | ||
55 | * This structure, directly modeled after of_phandle_args, is used to | ||
56 | * pass a device-specific description of an interrupt. | ||
57 | */ | ||
58 | struct irq_fwspec { | ||
59 | struct fwnode_handle *fwnode; | ||
60 | int param_count; | ||
61 | u32 param[IRQ_DOMAIN_IRQ_SPEC_PARAMS]; | ||
62 | }; | ||
63 | |||
48 | /* | 64 | /* |
49 | * Should several domains have the same device node, but serve | 65 | * Should several domains have the same device node, but serve |
50 | * different purposes (for example one domain is for PCI/MSI, and the | 66 | * different purposes (for example one domain is for PCI/MSI, and the |
@@ -91,6 +107,8 @@ struct irq_domain_ops { | |||
91 | unsigned int nr_irqs); | 107 | unsigned int nr_irqs); |
92 | void (*activate)(struct irq_domain *d, struct irq_data *irq_data); | 108 | void (*activate)(struct irq_domain *d, struct irq_data *irq_data); |
93 | void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data); | 109 | void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data); |
110 | int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec, | ||
111 | unsigned long *out_hwirq, unsigned int *out_type); | ||
94 | #endif | 112 | #endif |
95 | }; | 113 | }; |
96 | 114 | ||
@@ -130,7 +148,7 @@ struct irq_domain { | |||
130 | unsigned int flags; | 148 | unsigned int flags; |
131 | 149 | ||
132 | /* Optional data */ | 150 | /* Optional data */ |
133 | struct device_node *of_node; | 151 | struct fwnode_handle *fwnode; |
134 | enum irq_domain_bus_token bus_token; | 152 | enum irq_domain_bus_token bus_token; |
135 | struct irq_domain_chip_generic *gc; | 153 | struct irq_domain_chip_generic *gc; |
136 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | 154 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY |
@@ -163,11 +181,13 @@ enum { | |||
163 | 181 | ||
164 | static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d) | 182 | static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d) |
165 | { | 183 | { |
166 | return d->of_node; | 184 | return to_of_node(d->fwnode); |
167 | } | 185 | } |
168 | 186 | ||
169 | #ifdef CONFIG_IRQ_DOMAIN | 187 | #ifdef CONFIG_IRQ_DOMAIN |
170 | struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, | 188 | struct fwnode_handle *irq_domain_alloc_fwnode(void *data); |
189 | void irq_domain_free_fwnode(struct fwnode_handle *fwnode); | ||
190 | struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size, | ||
171 | irq_hw_number_t hwirq_max, int direct_max, | 191 | irq_hw_number_t hwirq_max, int direct_max, |
172 | const struct irq_domain_ops *ops, | 192 | const struct irq_domain_ops *ops, |
173 | void *host_data); | 193 | void *host_data); |
@@ -182,10 +202,21 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, | |||
182 | irq_hw_number_t first_hwirq, | 202 | irq_hw_number_t first_hwirq, |
183 | const struct irq_domain_ops *ops, | 203 | const struct irq_domain_ops *ops, |
184 | void *host_data); | 204 | void *host_data); |
185 | extern struct irq_domain *irq_find_matching_host(struct device_node *node, | 205 | extern struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode, |
186 | enum irq_domain_bus_token bus_token); | 206 | enum irq_domain_bus_token bus_token); |
187 | extern void irq_set_default_host(struct irq_domain *host); | 207 | extern void irq_set_default_host(struct irq_domain *host); |
188 | 208 | ||
209 | static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node) | ||
210 | { | ||
211 | return node ? &node->fwnode : NULL; | ||
212 | } | ||
213 | |||
214 | static inline struct irq_domain *irq_find_matching_host(struct device_node *node, | ||
215 | enum irq_domain_bus_token bus_token) | ||
216 | { | ||
217 | return irq_find_matching_fwnode(of_node_to_fwnode(node), bus_token); | ||
218 | } | ||
219 | |||
189 | static inline struct irq_domain *irq_find_host(struct device_node *node) | 220 | static inline struct irq_domain *irq_find_host(struct device_node *node) |
190 | { | 221 | { |
191 | return irq_find_matching_host(node, DOMAIN_BUS_ANY); | 222 | return irq_find_matching_host(node, DOMAIN_BUS_ANY); |
@@ -203,14 +234,14 @@ static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_no | |||
203 | const struct irq_domain_ops *ops, | 234 | const struct irq_domain_ops *ops, |
204 | void *host_data) | 235 | void *host_data) |
205 | { | 236 | { |
206 | return __irq_domain_add(of_node, size, size, 0, ops, host_data); | 237 | return __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data); |
207 | } | 238 | } |
208 | static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, | 239 | static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, |
209 | unsigned int max_irq, | 240 | unsigned int max_irq, |
210 | const struct irq_domain_ops *ops, | 241 | const struct irq_domain_ops *ops, |
211 | void *host_data) | 242 | void *host_data) |
212 | { | 243 | { |
213 | return __irq_domain_add(of_node, 0, max_irq, max_irq, ops, host_data); | 244 | return __irq_domain_add(of_node_to_fwnode(of_node), 0, max_irq, max_irq, ops, host_data); |
214 | } | 245 | } |
215 | static inline struct irq_domain *irq_domain_add_legacy_isa( | 246 | static inline struct irq_domain *irq_domain_add_legacy_isa( |
216 | struct device_node *of_node, | 247 | struct device_node *of_node, |
@@ -224,7 +255,22 @@ static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node | |||
224 | const struct irq_domain_ops *ops, | 255 | const struct irq_domain_ops *ops, |
225 | void *host_data) | 256 | void *host_data) |
226 | { | 257 | { |
227 | return __irq_domain_add(of_node, 0, ~0, 0, ops, host_data); | 258 | return __irq_domain_add(of_node_to_fwnode(of_node), 0, ~0, 0, ops, host_data); |
259 | } | ||
260 | |||
261 | static inline struct irq_domain *irq_domain_create_linear(struct fwnode_handle *fwnode, | ||
262 | unsigned int size, | ||
263 | const struct irq_domain_ops *ops, | ||
264 | void *host_data) | ||
265 | { | ||
266 | return __irq_domain_add(fwnode, size, size, 0, ops, host_data); | ||
267 | } | ||
268 | |||
269 | static inline struct irq_domain *irq_domain_create_tree(struct fwnode_handle *fwnode, | ||
270 | const struct irq_domain_ops *ops, | ||
271 | void *host_data) | ||
272 | { | ||
273 | return __irq_domain_add(fwnode, 0, ~0, 0, ops, host_data); | ||
228 | } | 274 | } |
229 | 275 | ||
230 | extern void irq_domain_remove(struct irq_domain *host); | 276 | extern void irq_domain_remove(struct irq_domain *host); |
@@ -239,6 +285,7 @@ extern void irq_domain_disassociate(struct irq_domain *domain, | |||
239 | 285 | ||
240 | extern unsigned int irq_create_mapping(struct irq_domain *host, | 286 | extern unsigned int irq_create_mapping(struct irq_domain *host, |
241 | irq_hw_number_t hwirq); | 287 | irq_hw_number_t hwirq); |
288 | extern unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec); | ||
242 | extern void irq_dispose_mapping(unsigned int virq); | 289 | extern void irq_dispose_mapping(unsigned int virq); |
243 | 290 | ||
244 | /** | 291 | /** |
@@ -290,10 +337,23 @@ extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, | |||
290 | void *chip_data, irq_flow_handler_t handler, | 337 | void *chip_data, irq_flow_handler_t handler, |
291 | void *handler_data, const char *handler_name); | 338 | void *handler_data, const char *handler_name); |
292 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | 339 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY |
293 | extern struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent, | 340 | extern struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent, |
294 | unsigned int flags, unsigned int size, | 341 | unsigned int flags, unsigned int size, |
295 | struct device_node *node, | 342 | struct fwnode_handle *fwnode, |
296 | const struct irq_domain_ops *ops, void *host_data); | 343 | const struct irq_domain_ops *ops, void *host_data); |
344 | |||
345 | static inline struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent, | ||
346 | unsigned int flags, | ||
347 | unsigned int size, | ||
348 | struct device_node *node, | ||
349 | const struct irq_domain_ops *ops, | ||
350 | void *host_data) | ||
351 | { | ||
352 | return irq_domain_create_hierarchy(parent, flags, size, | ||
353 | of_node_to_fwnode(node), | ||
354 | ops, host_data); | ||
355 | } | ||
356 | |||
297 | extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, | 357 | extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, |
298 | unsigned int nr_irqs, int node, void *arg, | 358 | unsigned int nr_irqs, int node, void *arg, |
299 | bool realloc); | 359 | bool realloc); |
diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h index e374e369fb2f..eb1bdcf95f2e 100644 --- a/include/linux/irqreturn.h +++ b/include/linux/irqreturn.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | /** | 4 | /** |
5 | * enum irqreturn | 5 | * enum irqreturn |
6 | * @IRQ_NONE interrupt was not from this device | 6 | * @IRQ_NONE interrupt was not from this device or was not handled |
7 | * @IRQ_HANDLED interrupt was handled by this device | 7 | * @IRQ_HANDLED interrupt was handled by this device |
8 | * @IRQ_WAKE_THREAD handler requests to wake the handler thread | 8 | * @IRQ_WAKE_THREAD handler requests to wake the handler thread |
9 | */ | 9 | */ |
diff --git a/include/linux/msi.h b/include/linux/msi.h index ad939d0ba816..0b4460374020 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h | |||
@@ -174,6 +174,7 @@ struct msi_controller { | |||
174 | struct irq_domain; | 174 | struct irq_domain; |
175 | struct irq_chip; | 175 | struct irq_chip; |
176 | struct device_node; | 176 | struct device_node; |
177 | struct fwnode_handle; | ||
177 | struct msi_domain_info; | 178 | struct msi_domain_info; |
178 | 179 | ||
179 | /** | 180 | /** |
@@ -262,7 +263,7 @@ enum { | |||
262 | int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, | 263 | int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, |
263 | bool force); | 264 | bool force); |
264 | 265 | ||
265 | struct irq_domain *msi_create_irq_domain(struct device_node *of_node, | 266 | struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode, |
266 | struct msi_domain_info *info, | 267 | struct msi_domain_info *info, |
267 | struct irq_domain *parent); | 268 | struct irq_domain *parent); |
268 | int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, | 269 | int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, |
@@ -270,7 +271,7 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, | |||
270 | void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); | 271 | void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); |
271 | struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain); | 272 | struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain); |
272 | 273 | ||
273 | struct irq_domain *platform_msi_create_irq_domain(struct device_node *np, | 274 | struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode, |
274 | struct msi_domain_info *info, | 275 | struct msi_domain_info *info, |
275 | struct irq_domain *parent); | 276 | struct irq_domain *parent); |
276 | int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, | 277 | int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, |
@@ -280,19 +281,26 @@ void platform_msi_domain_free_irqs(struct device *dev); | |||
280 | 281 | ||
281 | #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN | 282 | #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN |
282 | void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg); | 283 | void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg); |
283 | struct irq_domain *pci_msi_create_irq_domain(struct device_node *node, | 284 | struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, |
284 | struct msi_domain_info *info, | 285 | struct msi_domain_info *info, |
285 | struct irq_domain *parent); | 286 | struct irq_domain *parent); |
286 | int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev, | 287 | int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev, |
287 | int nvec, int type); | 288 | int nvec, int type); |
288 | void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev); | 289 | void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev); |
289 | struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node, | 290 | struct irq_domain *pci_msi_create_default_irq_domain(struct fwnode_handle *fwnode, |
290 | struct msi_domain_info *info, struct irq_domain *parent); | 291 | struct msi_domain_info *info, struct irq_domain *parent); |
291 | 292 | ||
292 | irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, | 293 | irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, |
293 | struct msi_desc *desc); | 294 | struct msi_desc *desc); |
294 | int pci_msi_domain_check_cap(struct irq_domain *domain, | 295 | int pci_msi_domain_check_cap(struct irq_domain *domain, |
295 | struct msi_domain_info *info, struct device *dev); | 296 | struct msi_domain_info *info, struct device *dev); |
297 | u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev); | ||
298 | struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev); | ||
299 | #else | ||
300 | static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) | ||
301 | { | ||
302 | return NULL; | ||
303 | } | ||
296 | #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ | 304 | #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ |
297 | 305 | ||
298 | #endif /* LINUX_MSI_H */ | 306 | #endif /* LINUX_MSI_H */ |
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h index 4bcbd586a672..65d969246a4d 100644 --- a/include/linux/of_irq.h +++ b/include/linux/of_irq.h | |||
@@ -46,6 +46,11 @@ extern int of_irq_get(struct device_node *dev, int index); | |||
46 | extern int of_irq_get_byname(struct device_node *dev, const char *name); | 46 | extern int of_irq_get_byname(struct device_node *dev, const char *name); |
47 | extern int of_irq_to_resource_table(struct device_node *dev, | 47 | extern int of_irq_to_resource_table(struct device_node *dev, |
48 | struct resource *res, int nr_irqs); | 48 | struct resource *res, int nr_irqs); |
49 | extern struct irq_domain *of_msi_get_domain(struct device *dev, | ||
50 | struct device_node *np, | ||
51 | enum irq_domain_bus_token token); | ||
52 | extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev, | ||
53 | u32 rid); | ||
49 | #else | 54 | #else |
50 | static inline int of_irq_count(struct device_node *dev) | 55 | static inline int of_irq_count(struct device_node *dev) |
51 | { | 56 | { |
@@ -64,6 +69,17 @@ static inline int of_irq_to_resource_table(struct device_node *dev, | |||
64 | { | 69 | { |
65 | return 0; | 70 | return 0; |
66 | } | 71 | } |
72 | static inline struct irq_domain *of_msi_get_domain(struct device *dev, | ||
73 | struct device_node *np, | ||
74 | enum irq_domain_bus_token token) | ||
75 | { | ||
76 | return NULL; | ||
77 | } | ||
78 | static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev, | ||
79 | u32 rid) | ||
80 | { | ||
81 | return NULL; | ||
82 | } | ||
67 | #endif | 83 | #endif |
68 | 84 | ||
69 | #if defined(CONFIG_OF) | 85 | #if defined(CONFIG_OF) |
@@ -75,6 +91,7 @@ static inline int of_irq_to_resource_table(struct device_node *dev, | |||
75 | extern unsigned int irq_of_parse_and_map(struct device_node *node, int index); | 91 | extern unsigned int irq_of_parse_and_map(struct device_node *node, int index); |
76 | extern struct device_node *of_irq_find_parent(struct device_node *child); | 92 | extern struct device_node *of_irq_find_parent(struct device_node *child); |
77 | extern void of_msi_configure(struct device *dev, struct device_node *np); | 93 | extern void of_msi_configure(struct device *dev, struct device_node *np); |
94 | u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in); | ||
78 | 95 | ||
79 | #else /* !CONFIG_OF */ | 96 | #else /* !CONFIG_OF */ |
80 | static inline unsigned int irq_of_parse_and_map(struct device_node *dev, | 97 | static inline unsigned int irq_of_parse_and_map(struct device_node *dev, |
@@ -87,6 +104,12 @@ static inline void *of_irq_find_parent(struct device_node *child) | |||
87 | { | 104 | { |
88 | return NULL; | 105 | return NULL; |
89 | } | 106 | } |
107 | |||
108 | static inline u32 of_msi_map_rid(struct device *dev, | ||
109 | struct device_node *msi_np, u32 rid_in) | ||
110 | { | ||
111 | return rid_in; | ||
112 | } | ||
90 | #endif /* !CONFIG_OF */ | 113 | #endif /* !CONFIG_OF */ |
91 | 114 | ||
92 | #endif /* __OF_IRQ_H */ | 115 | #endif /* __OF_IRQ_H */ |
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 9a76e3beda54..3b48dab80164 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig | |||
@@ -30,6 +30,10 @@ config GENERIC_IRQ_LEGACY_ALLOC_HWIRQ | |||
30 | config GENERIC_PENDING_IRQ | 30 | config GENERIC_PENDING_IRQ |
31 | bool | 31 | bool |
32 | 32 | ||
33 | # Support for generic irq migrating off cpu before the cpu is offline. | ||
34 | config GENERIC_IRQ_MIGRATION | ||
35 | bool | ||
36 | |||
33 | # Alpha specific irq affinity mechanism | 37 | # Alpha specific irq affinity mechanism |
34 | config AUTO_IRQ_AFFINITY | 38 | config AUTO_IRQ_AFFINITY |
35 | bool | 39 | bool |
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile index d12123526e2b..2fc9cbdf35b6 100644 --- a/kernel/irq/Makefile +++ b/kernel/irq/Makefile | |||
@@ -5,5 +5,6 @@ obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o | |||
5 | obj-$(CONFIG_IRQ_DOMAIN) += irqdomain.o | 5 | obj-$(CONFIG_IRQ_DOMAIN) += irqdomain.o |
6 | obj-$(CONFIG_PROC_FS) += proc.o | 6 | obj-$(CONFIG_PROC_FS) += proc.o |
7 | obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o | 7 | obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o |
8 | obj-$(CONFIG_GENERIC_IRQ_MIGRATION) += cpuhotplug.o | ||
8 | obj-$(CONFIG_PM_SLEEP) += pm.o | 9 | obj-$(CONFIG_PM_SLEEP) += pm.o |
9 | obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o | 10 | obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index e28169dd1c36..15206453b12a 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -21,6 +21,20 @@ | |||
21 | 21 | ||
22 | #include "internals.h" | 22 | #include "internals.h" |
23 | 23 | ||
24 | static irqreturn_t bad_chained_irq(int irq, void *dev_id) | ||
25 | { | ||
26 | WARN_ONCE(1, "Chained irq %d should not call an action\n", irq); | ||
27 | return IRQ_NONE; | ||
28 | } | ||
29 | |||
30 | /* | ||
31 | * Chained handlers should never call action on their IRQ. This default | ||
32 | * action will emit warning if such thing happens. | ||
33 | */ | ||
34 | struct irqaction chained_action = { | ||
35 | .handler = bad_chained_irq, | ||
36 | }; | ||
37 | |||
24 | /** | 38 | /** |
25 | * irq_set_chip - set the irq chip for an irq | 39 | * irq_set_chip - set the irq chip for an irq |
26 | * @irq: irq number | 40 | * @irq: irq number |
@@ -227,6 +241,13 @@ void irq_enable(struct irq_desc *desc) | |||
227 | * disabled. If an interrupt happens, then the interrupt flow | 241 | * disabled. If an interrupt happens, then the interrupt flow |
228 | * handler masks the line at the hardware level and marks it | 242 | * handler masks the line at the hardware level and marks it |
229 | * pending. | 243 | * pending. |
244 | * | ||
245 | * If the interrupt chip does not implement the irq_disable callback, | ||
246 | * a driver can disable the lazy approach for a particular irq line by | ||
247 | * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can | ||
248 | * be used for devices which cannot disable the interrupt at the | ||
249 | * device level under certain circumstances and have to use | ||
250 | * disable_irq[_nosync] instead. | ||
230 | */ | 251 | */ |
231 | void irq_disable(struct irq_desc *desc) | 252 | void irq_disable(struct irq_desc *desc) |
232 | { | 253 | { |
@@ -234,6 +255,8 @@ void irq_disable(struct irq_desc *desc) | |||
234 | if (desc->irq_data.chip->irq_disable) { | 255 | if (desc->irq_data.chip->irq_disable) { |
235 | desc->irq_data.chip->irq_disable(&desc->irq_data); | 256 | desc->irq_data.chip->irq_disable(&desc->irq_data); |
236 | irq_state_set_masked(desc); | 257 | irq_state_set_masked(desc); |
258 | } else if (irq_settings_disable_unlazy(desc)) { | ||
259 | mask_irq(desc); | ||
237 | } | 260 | } |
238 | } | 261 | } |
239 | 262 | ||
@@ -669,7 +692,7 @@ void handle_percpu_irq(struct irq_desc *desc) | |||
669 | if (chip->irq_ack) | 692 | if (chip->irq_ack) |
670 | chip->irq_ack(&desc->irq_data); | 693 | chip->irq_ack(&desc->irq_data); |
671 | 694 | ||
672 | handle_irq_event_percpu(desc, desc->action); | 695 | handle_irq_event_percpu(desc); |
673 | 696 | ||
674 | if (chip->irq_eoi) | 697 | if (chip->irq_eoi) |
675 | chip->irq_eoi(&desc->irq_data); | 698 | chip->irq_eoi(&desc->irq_data); |
@@ -746,6 +769,8 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, | |||
746 | if (desc->irq_data.chip != &no_irq_chip) | 769 | if (desc->irq_data.chip != &no_irq_chip) |
747 | mask_ack_irq(desc); | 770 | mask_ack_irq(desc); |
748 | irq_state_set_disabled(desc); | 771 | irq_state_set_disabled(desc); |
772 | if (is_chained) | ||
773 | desc->action = NULL; | ||
749 | desc->depth = 1; | 774 | desc->depth = 1; |
750 | } | 775 | } |
751 | desc->handle_irq = handle; | 776 | desc->handle_irq = handle; |
@@ -755,6 +780,7 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, | |||
755 | irq_settings_set_noprobe(desc); | 780 | irq_settings_set_noprobe(desc); |
756 | irq_settings_set_norequest(desc); | 781 | irq_settings_set_norequest(desc); |
757 | irq_settings_set_nothread(desc); | 782 | irq_settings_set_nothread(desc); |
783 | desc->action = &chained_action; | ||
758 | irq_startup(desc, true); | 784 | irq_startup(desc, true); |
759 | } | 785 | } |
760 | } | 786 | } |
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c new file mode 100644 index 000000000000..80f4f4e56fed --- /dev/null +++ b/kernel/irq/cpuhotplug.c | |||
@@ -0,0 +1,82 @@ | |||
1 | /* | ||
2 | * Generic cpu hotunplug interrupt migration code copied from the | ||
3 | * arch/arm implementation | ||
4 | * | ||
5 | * Copyright (C) Russell King | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/ratelimit.h> | ||
13 | #include <linux/irq.h> | ||
14 | |||
15 | #include "internals.h" | ||
16 | |||
17 | static bool migrate_one_irq(struct irq_desc *desc) | ||
18 | { | ||
19 | struct irq_data *d = irq_desc_get_irq_data(desc); | ||
20 | const struct cpumask *affinity = d->common->affinity; | ||
21 | struct irq_chip *c; | ||
22 | bool ret = false; | ||
23 | |||
24 | /* | ||
25 | * If this is a per-CPU interrupt, or the affinity does not | ||
26 | * include this CPU, then we have nothing to do. | ||
27 | */ | ||
28 | if (irqd_is_per_cpu(d) || | ||
29 | !cpumask_test_cpu(smp_processor_id(), affinity)) | ||
30 | return false; | ||
31 | |||
32 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { | ||
33 | affinity = cpu_online_mask; | ||
34 | ret = true; | ||
35 | } | ||
36 | |||
37 | c = irq_data_get_irq_chip(d); | ||
38 | if (!c->irq_set_affinity) { | ||
39 | pr_warn_ratelimited("IRQ%u: unable to set affinity\n", d->irq); | ||
40 | } else { | ||
41 | int r = irq_do_set_affinity(d, affinity, false); | ||
42 | if (r) | ||
43 | pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n", | ||
44 | d->irq, r); | ||
45 | } | ||
46 | |||
47 | return ret; | ||
48 | } | ||
49 | |||
50 | /** | ||
51 | * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu | ||
52 | * | ||
53 | * The current CPU has been marked offline. Migrate IRQs off this CPU. | ||
54 | * If the affinity settings do not allow other CPUs, force them onto any | ||
55 | * available CPU. | ||
56 | * | ||
57 | * Note: we must iterate over all IRQs, whether they have an attached | ||
58 | * action structure or not, as we need to get chained interrupts too. | ||
59 | */ | ||
60 | void irq_migrate_all_off_this_cpu(void) | ||
61 | { | ||
62 | unsigned int irq; | ||
63 | struct irq_desc *desc; | ||
64 | unsigned long flags; | ||
65 | |||
66 | local_irq_save(flags); | ||
67 | |||
68 | for_each_active_irq(irq) { | ||
69 | bool affinity_broken; | ||
70 | |||
71 | desc = irq_to_desc(irq); | ||
72 | raw_spin_lock(&desc->lock); | ||
73 | affinity_broken = migrate_one_irq(desc); | ||
74 | raw_spin_unlock(&desc->lock); | ||
75 | |||
76 | if (affinity_broken) | ||
77 | pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n", | ||
78 | irq, smp_processor_id()); | ||
79 | } | ||
80 | |||
81 | local_irq_restore(flags); | ||
82 | } | ||
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index e25a83b67cce..a302cf9a2126 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -132,11 +132,11 @@ void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action) | |||
132 | wake_up_process(action->thread); | 132 | wake_up_process(action->thread); |
133 | } | 133 | } |
134 | 134 | ||
135 | irqreturn_t | 135 | irqreturn_t handle_irq_event_percpu(struct irq_desc *desc) |
136 | handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) | ||
137 | { | 136 | { |
138 | irqreturn_t retval = IRQ_NONE; | 137 | irqreturn_t retval = IRQ_NONE; |
139 | unsigned int flags = 0, irq = desc->irq_data.irq; | 138 | unsigned int flags = 0, irq = desc->irq_data.irq; |
139 | struct irqaction *action = desc->action; | ||
140 | 140 | ||
141 | do { | 141 | do { |
142 | irqreturn_t res; | 142 | irqreturn_t res; |
@@ -184,14 +184,13 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) | |||
184 | 184 | ||
185 | irqreturn_t handle_irq_event(struct irq_desc *desc) | 185 | irqreturn_t handle_irq_event(struct irq_desc *desc) |
186 | { | 186 | { |
187 | struct irqaction *action = desc->action; | ||
188 | irqreturn_t ret; | 187 | irqreturn_t ret; |
189 | 188 | ||
190 | desc->istate &= ~IRQS_PENDING; | 189 | desc->istate &= ~IRQS_PENDING; |
191 | irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); | 190 | irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
192 | raw_spin_unlock(&desc->lock); | 191 | raw_spin_unlock(&desc->lock); |
193 | 192 | ||
194 | ret = handle_irq_event_percpu(desc, action); | 193 | ret = handle_irq_event_percpu(desc); |
195 | 194 | ||
196 | raw_spin_lock(&desc->lock); | 195 | raw_spin_lock(&desc->lock); |
197 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); | 196 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 5ef0c2dbe930..05c2188271b8 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
@@ -18,6 +18,8 @@ | |||
18 | 18 | ||
19 | extern bool noirqdebug; | 19 | extern bool noirqdebug; |
20 | 20 | ||
21 | extern struct irqaction chained_action; | ||
22 | |||
21 | /* | 23 | /* |
22 | * Bits used by threaded handlers: | 24 | * Bits used by threaded handlers: |
23 | * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run | 25 | * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run |
@@ -81,7 +83,7 @@ extern void irq_mark_irq(unsigned int irq); | |||
81 | 83 | ||
82 | extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); | 84 | extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); |
83 | 85 | ||
84 | irqreturn_t handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action); | 86 | irqreturn_t handle_irq_event_percpu(struct irq_desc *desc); |
85 | irqreturn_t handle_irq_event(struct irq_desc *desc); | 87 | irqreturn_t handle_irq_event(struct irq_desc *desc); |
86 | 88 | ||
87 | /* Resending of interrupts :*/ | 89 | /* Resending of interrupts :*/ |
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index dc9d27c0c158..22aa9612ef7c 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c | |||
@@ -27,6 +27,57 @@ static int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, | |||
27 | irq_hw_number_t hwirq, int node); | 27 | irq_hw_number_t hwirq, int node); |
28 | static void irq_domain_check_hierarchy(struct irq_domain *domain); | 28 | static void irq_domain_check_hierarchy(struct irq_domain *domain); |
29 | 29 | ||
30 | struct irqchip_fwid { | ||
31 | struct fwnode_handle fwnode; | ||
32 | char *name; | ||
33 | void *data; | ||
34 | }; | ||
35 | |||
36 | /** | ||
37 | * irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for | ||
38 | * identifying an irq domain | ||
39 | * @data: optional user-provided data | ||
40 | * | ||
41 | * Allocate a struct device_node, and return a poiner to the embedded | ||
42 | * fwnode_handle (or NULL on failure). | ||
43 | */ | ||
44 | struct fwnode_handle *irq_domain_alloc_fwnode(void *data) | ||
45 | { | ||
46 | struct irqchip_fwid *fwid; | ||
47 | char *name; | ||
48 | |||
49 | fwid = kzalloc(sizeof(*fwid), GFP_KERNEL); | ||
50 | name = kasprintf(GFP_KERNEL, "irqchip@%p", data); | ||
51 | |||
52 | if (!fwid || !name) { | ||
53 | kfree(fwid); | ||
54 | kfree(name); | ||
55 | return NULL; | ||
56 | } | ||
57 | |||
58 | fwid->name = name; | ||
59 | fwid->data = data; | ||
60 | fwid->fwnode.type = FWNODE_IRQCHIP; | ||
61 | return &fwid->fwnode; | ||
62 | } | ||
63 | |||
64 | /** | ||
65 | * irq_domain_free_fwnode - Free a non-OF-backed fwnode_handle | ||
66 | * | ||
67 | * Free a fwnode_handle allocated with irq_domain_alloc_fwnode. | ||
68 | */ | ||
69 | void irq_domain_free_fwnode(struct fwnode_handle *fwnode) | ||
70 | { | ||
71 | struct irqchip_fwid *fwid; | ||
72 | |||
73 | if (WARN_ON(fwnode->type != FWNODE_IRQCHIP)) | ||
74 | return; | ||
75 | |||
76 | fwid = container_of(fwnode, struct irqchip_fwid, fwnode); | ||
77 | kfree(fwid->name); | ||
78 | kfree(fwid); | ||
79 | } | ||
80 | |||
30 | /** | 81 | /** |
31 | * __irq_domain_add() - Allocate a new irq_domain data structure | 82 | * __irq_domain_add() - Allocate a new irq_domain data structure |
32 | * @of_node: optional device-tree node of the interrupt controller | 83 | * @of_node: optional device-tree node of the interrupt controller |
@@ -40,23 +91,28 @@ static void irq_domain_check_hierarchy(struct irq_domain *domain); | |||
40 | * Allocates and initialize and irq_domain structure. | 91 | * Allocates and initialize and irq_domain structure. |
41 | * Returns pointer to IRQ domain, or NULL on failure. | 92 | * Returns pointer to IRQ domain, or NULL on failure. |
42 | */ | 93 | */ |
43 | struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, | 94 | struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size, |
44 | irq_hw_number_t hwirq_max, int direct_max, | 95 | irq_hw_number_t hwirq_max, int direct_max, |
45 | const struct irq_domain_ops *ops, | 96 | const struct irq_domain_ops *ops, |
46 | void *host_data) | 97 | void *host_data) |
47 | { | 98 | { |
48 | struct irq_domain *domain; | 99 | struct irq_domain *domain; |
100 | struct device_node *of_node; | ||
101 | |||
102 | of_node = to_of_node(fwnode); | ||
49 | 103 | ||
50 | domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size), | 104 | domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size), |
51 | GFP_KERNEL, of_node_to_nid(of_node)); | 105 | GFP_KERNEL, of_node_to_nid(of_node)); |
52 | if (WARN_ON(!domain)) | 106 | if (WARN_ON(!domain)) |
53 | return NULL; | 107 | return NULL; |
54 | 108 | ||
109 | of_node_get(of_node); | ||
110 | |||
55 | /* Fill structure */ | 111 | /* Fill structure */ |
56 | INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL); | 112 | INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL); |
57 | domain->ops = ops; | 113 | domain->ops = ops; |
58 | domain->host_data = host_data; | 114 | domain->host_data = host_data; |
59 | domain->of_node = of_node_get(of_node); | 115 | domain->fwnode = fwnode; |
60 | domain->hwirq_max = hwirq_max; | 116 | domain->hwirq_max = hwirq_max; |
61 | domain->revmap_size = size; | 117 | domain->revmap_size = size; |
62 | domain->revmap_direct_max_irq = direct_max; | 118 | domain->revmap_direct_max_irq = direct_max; |
@@ -102,7 +158,7 @@ void irq_domain_remove(struct irq_domain *domain) | |||
102 | 158 | ||
103 | pr_debug("Removed domain %s\n", domain->name); | 159 | pr_debug("Removed domain %s\n", domain->name); |
104 | 160 | ||
105 | of_node_put(domain->of_node); | 161 | of_node_put(irq_domain_get_of_node(domain)); |
106 | kfree(domain); | 162 | kfree(domain); |
107 | } | 163 | } |
108 | EXPORT_SYMBOL_GPL(irq_domain_remove); | 164 | EXPORT_SYMBOL_GPL(irq_domain_remove); |
@@ -133,7 +189,7 @@ struct irq_domain *irq_domain_add_simple(struct device_node *of_node, | |||
133 | { | 189 | { |
134 | struct irq_domain *domain; | 190 | struct irq_domain *domain; |
135 | 191 | ||
136 | domain = __irq_domain_add(of_node, size, size, 0, ops, host_data); | 192 | domain = __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data); |
137 | if (!domain) | 193 | if (!domain) |
138 | return NULL; | 194 | return NULL; |
139 | 195 | ||
@@ -177,7 +233,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, | |||
177 | { | 233 | { |
178 | struct irq_domain *domain; | 234 | struct irq_domain *domain; |
179 | 235 | ||
180 | domain = __irq_domain_add(of_node, first_hwirq + size, | 236 | domain = __irq_domain_add(of_node_to_fwnode(of_node), first_hwirq + size, |
181 | first_hwirq + size, 0, ops, host_data); | 237 | first_hwirq + size, 0, ops, host_data); |
182 | if (domain) | 238 | if (domain) |
183 | irq_domain_associate_many(domain, first_irq, first_hwirq, size); | 239 | irq_domain_associate_many(domain, first_irq, first_hwirq, size); |
@@ -187,12 +243,12 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, | |||
187 | EXPORT_SYMBOL_GPL(irq_domain_add_legacy); | 243 | EXPORT_SYMBOL_GPL(irq_domain_add_legacy); |
188 | 244 | ||
189 | /** | 245 | /** |
190 | * irq_find_matching_host() - Locates a domain for a given device node | 246 | * irq_find_matching_fwnode() - Locates a domain for a given fwnode |
191 | * @node: device-tree node of the interrupt controller | 247 | * @fwnode: FW descriptor of the interrupt controller |
192 | * @bus_token: domain-specific data | 248 | * @bus_token: domain-specific data |
193 | */ | 249 | */ |
194 | struct irq_domain *irq_find_matching_host(struct device_node *node, | 250 | struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode, |
195 | enum irq_domain_bus_token bus_token) | 251 | enum irq_domain_bus_token bus_token) |
196 | { | 252 | { |
197 | struct irq_domain *h, *found = NULL; | 253 | struct irq_domain *h, *found = NULL; |
198 | int rc; | 254 | int rc; |
@@ -209,9 +265,9 @@ struct irq_domain *irq_find_matching_host(struct device_node *node, | |||
209 | mutex_lock(&irq_domain_mutex); | 265 | mutex_lock(&irq_domain_mutex); |
210 | list_for_each_entry(h, &irq_domain_list, link) { | 266 | list_for_each_entry(h, &irq_domain_list, link) { |
211 | if (h->ops->match) | 267 | if (h->ops->match) |
212 | rc = h->ops->match(h, node, bus_token); | 268 | rc = h->ops->match(h, to_of_node(fwnode), bus_token); |
213 | else | 269 | else |
214 | rc = ((h->of_node != NULL) && (h->of_node == node) && | 270 | rc = ((fwnode != NULL) && (h->fwnode == fwnode) && |
215 | ((bus_token == DOMAIN_BUS_ANY) || | 271 | ((bus_token == DOMAIN_BUS_ANY) || |
216 | (h->bus_token == bus_token))); | 272 | (h->bus_token == bus_token))); |
217 | 273 | ||
@@ -223,7 +279,7 @@ struct irq_domain *irq_find_matching_host(struct device_node *node, | |||
223 | mutex_unlock(&irq_domain_mutex); | 279 | mutex_unlock(&irq_domain_mutex); |
224 | return found; | 280 | return found; |
225 | } | 281 | } |
226 | EXPORT_SYMBOL_GPL(irq_find_matching_host); | 282 | EXPORT_SYMBOL_GPL(irq_find_matching_fwnode); |
227 | 283 | ||
228 | /** | 284 | /** |
229 | * irq_set_default_host() - Set a "default" irq domain | 285 | * irq_set_default_host() - Set a "default" irq domain |
@@ -336,10 +392,12 @@ EXPORT_SYMBOL_GPL(irq_domain_associate); | |||
336 | void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base, | 392 | void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base, |
337 | irq_hw_number_t hwirq_base, int count) | 393 | irq_hw_number_t hwirq_base, int count) |
338 | { | 394 | { |
395 | struct device_node *of_node; | ||
339 | int i; | 396 | int i; |
340 | 397 | ||
398 | of_node = irq_domain_get_of_node(domain); | ||
341 | pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__, | 399 | pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__, |
342 | of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count); | 400 | of_node_full_name(of_node), irq_base, (int)hwirq_base, count); |
343 | 401 | ||
344 | for (i = 0; i < count; i++) { | 402 | for (i = 0; i < count; i++) { |
345 | irq_domain_associate(domain, irq_base + i, hwirq_base + i); | 403 | irq_domain_associate(domain, irq_base + i, hwirq_base + i); |
@@ -359,12 +417,14 @@ EXPORT_SYMBOL_GPL(irq_domain_associate_many); | |||
359 | */ | 417 | */ |
360 | unsigned int irq_create_direct_mapping(struct irq_domain *domain) | 418 | unsigned int irq_create_direct_mapping(struct irq_domain *domain) |
361 | { | 419 | { |
420 | struct device_node *of_node; | ||
362 | unsigned int virq; | 421 | unsigned int virq; |
363 | 422 | ||
364 | if (domain == NULL) | 423 | if (domain == NULL) |
365 | domain = irq_default_domain; | 424 | domain = irq_default_domain; |
366 | 425 | ||
367 | virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node)); | 426 | of_node = irq_domain_get_of_node(domain); |
427 | virq = irq_alloc_desc_from(1, of_node_to_nid(of_node)); | ||
368 | if (!virq) { | 428 | if (!virq) { |
369 | pr_debug("create_direct virq allocation failed\n"); | 429 | pr_debug("create_direct virq allocation failed\n"); |
370 | return 0; | 430 | return 0; |
@@ -399,6 +459,7 @@ EXPORT_SYMBOL_GPL(irq_create_direct_mapping); | |||
399 | unsigned int irq_create_mapping(struct irq_domain *domain, | 459 | unsigned int irq_create_mapping(struct irq_domain *domain, |
400 | irq_hw_number_t hwirq) | 460 | irq_hw_number_t hwirq) |
401 | { | 461 | { |
462 | struct device_node *of_node; | ||
402 | int virq; | 463 | int virq; |
403 | 464 | ||
404 | pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); | 465 | pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); |
@@ -412,6 +473,8 @@ unsigned int irq_create_mapping(struct irq_domain *domain, | |||
412 | } | 473 | } |
413 | pr_debug("-> using domain @%p\n", domain); | 474 | pr_debug("-> using domain @%p\n", domain); |
414 | 475 | ||
476 | of_node = irq_domain_get_of_node(domain); | ||
477 | |||
415 | /* Check if mapping already exists */ | 478 | /* Check if mapping already exists */ |
416 | virq = irq_find_mapping(domain, hwirq); | 479 | virq = irq_find_mapping(domain, hwirq); |
417 | if (virq) { | 480 | if (virq) { |
@@ -420,8 +483,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain, | |||
420 | } | 483 | } |
421 | 484 | ||
422 | /* Allocate a virtual interrupt number */ | 485 | /* Allocate a virtual interrupt number */ |
423 | virq = irq_domain_alloc_descs(-1, 1, hwirq, | 486 | virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node)); |
424 | of_node_to_nid(domain->of_node)); | ||
425 | if (virq <= 0) { | 487 | if (virq <= 0) { |
426 | pr_debug("-> virq allocation failed\n"); | 488 | pr_debug("-> virq allocation failed\n"); |
427 | return 0; | 489 | return 0; |
@@ -433,7 +495,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain, | |||
433 | } | 495 | } |
434 | 496 | ||
435 | pr_debug("irq %lu on domain %s mapped to virtual irq %u\n", | 497 | pr_debug("irq %lu on domain %s mapped to virtual irq %u\n", |
436 | hwirq, of_node_full_name(domain->of_node), virq); | 498 | hwirq, of_node_full_name(of_node), virq); |
437 | 499 | ||
438 | return virq; | 500 | return virq; |
439 | } | 501 | } |
@@ -460,10 +522,12 @@ EXPORT_SYMBOL_GPL(irq_create_mapping); | |||
460 | int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base, | 522 | int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base, |
461 | irq_hw_number_t hwirq_base, int count) | 523 | irq_hw_number_t hwirq_base, int count) |
462 | { | 524 | { |
525 | struct device_node *of_node; | ||
463 | int ret; | 526 | int ret; |
464 | 527 | ||
528 | of_node = irq_domain_get_of_node(domain); | ||
465 | ret = irq_alloc_descs(irq_base, irq_base, count, | 529 | ret = irq_alloc_descs(irq_base, irq_base, count, |
466 | of_node_to_nid(domain->of_node)); | 530 | of_node_to_nid(of_node)); |
467 | if (unlikely(ret < 0)) | 531 | if (unlikely(ret < 0)) |
468 | return ret; | 532 | return ret; |
469 | 533 | ||
@@ -472,28 +536,56 @@ int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base, | |||
472 | } | 536 | } |
473 | EXPORT_SYMBOL_GPL(irq_create_strict_mappings); | 537 | EXPORT_SYMBOL_GPL(irq_create_strict_mappings); |
474 | 538 | ||
475 | unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data) | 539 | static int irq_domain_translate(struct irq_domain *d, |
540 | struct irq_fwspec *fwspec, | ||
541 | irq_hw_number_t *hwirq, unsigned int *type) | ||
542 | { | ||
543 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | ||
544 | if (d->ops->translate) | ||
545 | return d->ops->translate(d, fwspec, hwirq, type); | ||
546 | #endif | ||
547 | if (d->ops->xlate) | ||
548 | return d->ops->xlate(d, to_of_node(fwspec->fwnode), | ||
549 | fwspec->param, fwspec->param_count, | ||
550 | hwirq, type); | ||
551 | |||
552 | /* If domain has no translation, then we assume interrupt line */ | ||
553 | *hwirq = fwspec->param[0]; | ||
554 | return 0; | ||
555 | } | ||
556 | |||
557 | static void of_phandle_args_to_fwspec(struct of_phandle_args *irq_data, | ||
558 | struct irq_fwspec *fwspec) | ||
559 | { | ||
560 | int i; | ||
561 | |||
562 | fwspec->fwnode = irq_data->np ? &irq_data->np->fwnode : NULL; | ||
563 | fwspec->param_count = irq_data->args_count; | ||
564 | |||
565 | for (i = 0; i < irq_data->args_count; i++) | ||
566 | fwspec->param[i] = irq_data->args[i]; | ||
567 | } | ||
568 | |||
569 | unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec) | ||
476 | { | 570 | { |
477 | struct irq_domain *domain; | 571 | struct irq_domain *domain; |
478 | irq_hw_number_t hwirq; | 572 | irq_hw_number_t hwirq; |
479 | unsigned int type = IRQ_TYPE_NONE; | 573 | unsigned int type = IRQ_TYPE_NONE; |
480 | int virq; | 574 | int virq; |
481 | 575 | ||
482 | domain = irq_data->np ? irq_find_host(irq_data->np) : irq_default_domain; | 576 | if (fwspec->fwnode) |
577 | domain = irq_find_matching_fwnode(fwspec->fwnode, DOMAIN_BUS_ANY); | ||
578 | else | ||
579 | domain = irq_default_domain; | ||
580 | |||
483 | if (!domain) { | 581 | if (!domain) { |
484 | pr_warn("no irq domain found for %s !\n", | 582 | pr_warn("no irq domain found for %s !\n", |
485 | of_node_full_name(irq_data->np)); | 583 | of_node_full_name(to_of_node(fwspec->fwnode))); |
486 | return 0; | 584 | return 0; |
487 | } | 585 | } |
488 | 586 | ||
489 | /* If domain has no translation, then we assume interrupt line */ | 587 | if (irq_domain_translate(domain, fwspec, &hwirq, &type)) |
490 | if (domain->ops->xlate == NULL) | 588 | return 0; |
491 | hwirq = irq_data->args[0]; | ||
492 | else { | ||
493 | if (domain->ops->xlate(domain, irq_data->np, irq_data->args, | ||
494 | irq_data->args_count, &hwirq, &type)) | ||
495 | return 0; | ||
496 | } | ||
497 | 589 | ||
498 | if (irq_domain_is_hierarchy(domain)) { | 590 | if (irq_domain_is_hierarchy(domain)) { |
499 | /* | 591 | /* |
@@ -504,7 +596,7 @@ unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data) | |||
504 | if (virq) | 596 | if (virq) |
505 | return virq; | 597 | return virq; |
506 | 598 | ||
507 | virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, irq_data); | 599 | virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec); |
508 | if (virq <= 0) | 600 | if (virq <= 0) |
509 | return 0; | 601 | return 0; |
510 | } else { | 602 | } else { |
@@ -520,6 +612,15 @@ unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data) | |||
520 | irq_set_irq_type(virq, type); | 612 | irq_set_irq_type(virq, type); |
521 | return virq; | 613 | return virq; |
522 | } | 614 | } |
615 | EXPORT_SYMBOL_GPL(irq_create_fwspec_mapping); | ||
616 | |||
617 | unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data) | ||
618 | { | ||
619 | struct irq_fwspec fwspec; | ||
620 | |||
621 | of_phandle_args_to_fwspec(irq_data, &fwspec); | ||
622 | return irq_create_fwspec_mapping(&fwspec); | ||
623 | } | ||
523 | EXPORT_SYMBOL_GPL(irq_create_of_mapping); | 624 | EXPORT_SYMBOL_GPL(irq_create_of_mapping); |
524 | 625 | ||
525 | /** | 626 | /** |
@@ -590,14 +691,16 @@ static int virq_debug_show(struct seq_file *m, void *private) | |||
590 | "name", "mapped", "linear-max", "direct-max", "devtree-node"); | 691 | "name", "mapped", "linear-max", "direct-max", "devtree-node"); |
591 | mutex_lock(&irq_domain_mutex); | 692 | mutex_lock(&irq_domain_mutex); |
592 | list_for_each_entry(domain, &irq_domain_list, link) { | 693 | list_for_each_entry(domain, &irq_domain_list, link) { |
694 | struct device_node *of_node; | ||
593 | int count = 0; | 695 | int count = 0; |
696 | of_node = irq_domain_get_of_node(domain); | ||
594 | radix_tree_for_each_slot(slot, &domain->revmap_tree, &iter, 0) | 697 | radix_tree_for_each_slot(slot, &domain->revmap_tree, &iter, 0) |
595 | count++; | 698 | count++; |
596 | seq_printf(m, "%c%-16s %6u %10u %10u %s\n", | 699 | seq_printf(m, "%c%-16s %6u %10u %10u %s\n", |
597 | domain == irq_default_domain ? '*' : ' ', domain->name, | 700 | domain == irq_default_domain ? '*' : ' ', domain->name, |
598 | domain->revmap_size + count, domain->revmap_size, | 701 | domain->revmap_size + count, domain->revmap_size, |
599 | domain->revmap_direct_max_irq, | 702 | domain->revmap_direct_max_irq, |
600 | domain->of_node ? of_node_full_name(domain->of_node) : ""); | 703 | of_node ? of_node_full_name(of_node) : ""); |
601 | } | 704 | } |
602 | mutex_unlock(&irq_domain_mutex); | 705 | mutex_unlock(&irq_domain_mutex); |
603 | 706 | ||
@@ -751,11 +854,11 @@ static int irq_domain_alloc_descs(int virq, unsigned int cnt, | |||
751 | 854 | ||
752 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | 855 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY |
753 | /** | 856 | /** |
754 | * irq_domain_add_hierarchy - Add a irqdomain into the hierarchy | 857 | * irq_domain_create_hierarchy - Add a irqdomain into the hierarchy |
755 | * @parent: Parent irq domain to associate with the new domain | 858 | * @parent: Parent irq domain to associate with the new domain |
756 | * @flags: Irq domain flags associated to the domain | 859 | * @flags: Irq domain flags associated to the domain |
757 | * @size: Size of the domain. See below | 860 | * @size: Size of the domain. See below |
758 | * @node: Optional device-tree node of the interrupt controller | 861 | * @fwnode: Optional fwnode of the interrupt controller |
759 | * @ops: Pointer to the interrupt domain callbacks | 862 | * @ops: Pointer to the interrupt domain callbacks |
760 | * @host_data: Controller private data pointer | 863 | * @host_data: Controller private data pointer |
761 | * | 864 | * |
@@ -765,19 +868,19 @@ static int irq_domain_alloc_descs(int virq, unsigned int cnt, | |||
765 | * domain flags are set. | 868 | * domain flags are set. |
766 | * Returns pointer to IRQ domain, or NULL on failure. | 869 | * Returns pointer to IRQ domain, or NULL on failure. |
767 | */ | 870 | */ |
768 | struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent, | 871 | struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent, |
769 | unsigned int flags, | 872 | unsigned int flags, |
770 | unsigned int size, | 873 | unsigned int size, |
771 | struct device_node *node, | 874 | struct fwnode_handle *fwnode, |
772 | const struct irq_domain_ops *ops, | 875 | const struct irq_domain_ops *ops, |
773 | void *host_data) | 876 | void *host_data) |
774 | { | 877 | { |
775 | struct irq_domain *domain; | 878 | struct irq_domain *domain; |
776 | 879 | ||
777 | if (size) | 880 | if (size) |
778 | domain = irq_domain_add_linear(node, size, ops, host_data); | 881 | domain = irq_domain_create_linear(fwnode, size, ops, host_data); |
779 | else | 882 | else |
780 | domain = irq_domain_add_tree(node, ops, host_data); | 883 | domain = irq_domain_create_tree(fwnode, ops, host_data); |
781 | if (domain) { | 884 | if (domain) { |
782 | domain->parent = parent; | 885 | domain->parent = parent; |
783 | domain->flags |= flags; | 886 | domain->flags |= flags; |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index f9a59f6cabd2..a71175ff98d5 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -258,37 +258,6 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) | |||
258 | } | 258 | } |
259 | EXPORT_SYMBOL_GPL(irq_set_affinity_hint); | 259 | EXPORT_SYMBOL_GPL(irq_set_affinity_hint); |
260 | 260 | ||
261 | /** | ||
262 | * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt | ||
263 | * @irq: interrupt number to set affinity | ||
264 | * @vcpu_info: vCPU specific data | ||
265 | * | ||
266 | * This function uses the vCPU specific data to set the vCPU | ||
267 | * affinity for an irq. The vCPU specific data is passed from | ||
268 | * outside, such as KVM. One example code path is as below: | ||
269 | * KVM -> IOMMU -> irq_set_vcpu_affinity(). | ||
270 | */ | ||
271 | int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info) | ||
272 | { | ||
273 | unsigned long flags; | ||
274 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); | ||
275 | struct irq_data *data; | ||
276 | struct irq_chip *chip; | ||
277 | int ret = -ENOSYS; | ||
278 | |||
279 | if (!desc) | ||
280 | return -EINVAL; | ||
281 | |||
282 | data = irq_desc_get_irq_data(desc); | ||
283 | chip = irq_data_get_irq_chip(data); | ||
284 | if (chip && chip->irq_set_vcpu_affinity) | ||
285 | ret = chip->irq_set_vcpu_affinity(data, vcpu_info); | ||
286 | irq_put_desc_unlock(desc, flags); | ||
287 | |||
288 | return ret; | ||
289 | } | ||
290 | EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity); | ||
291 | |||
292 | static void irq_affinity_notify(struct work_struct *work) | 261 | static void irq_affinity_notify(struct work_struct *work) |
293 | { | 262 | { |
294 | struct irq_affinity_notify *notify = | 263 | struct irq_affinity_notify *notify = |
@@ -424,6 +393,37 @@ setup_affinity(struct irq_desc *desc, struct cpumask *mask) | |||
424 | } | 393 | } |
425 | #endif | 394 | #endif |
426 | 395 | ||
396 | /** | ||
397 | * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt | ||
398 | * @irq: interrupt number to set affinity | ||
399 | * @vcpu_info: vCPU specific data | ||
400 | * | ||
401 | * This function uses the vCPU specific data to set the vCPU | ||
402 | * affinity for an irq. The vCPU specific data is passed from | ||
403 | * outside, such as KVM. One example code path is as below: | ||
404 | * KVM -> IOMMU -> irq_set_vcpu_affinity(). | ||
405 | */ | ||
406 | int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info) | ||
407 | { | ||
408 | unsigned long flags; | ||
409 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); | ||
410 | struct irq_data *data; | ||
411 | struct irq_chip *chip; | ||
412 | int ret = -ENOSYS; | ||
413 | |||
414 | if (!desc) | ||
415 | return -EINVAL; | ||
416 | |||
417 | data = irq_desc_get_irq_data(desc); | ||
418 | chip = irq_data_get_irq_chip(data); | ||
419 | if (chip && chip->irq_set_vcpu_affinity) | ||
420 | ret = chip->irq_set_vcpu_affinity(data, vcpu_info); | ||
421 | irq_put_desc_unlock(desc, flags); | ||
422 | |||
423 | return ret; | ||
424 | } | ||
425 | EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity); | ||
426 | |||
427 | void __disable_irq(struct irq_desc *desc) | 427 | void __disable_irq(struct irq_desc *desc) |
428 | { | 428 | { |
429 | if (!desc->depth++) | 429 | if (!desc->depth++) |
@@ -730,6 +730,12 @@ static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) | |||
730 | return IRQ_NONE; | 730 | return IRQ_NONE; |
731 | } | 731 | } |
732 | 732 | ||
733 | static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id) | ||
734 | { | ||
735 | WARN(1, "Secondary action handler called for irq %d\n", irq); | ||
736 | return IRQ_NONE; | ||
737 | } | ||
738 | |||
733 | static int irq_wait_for_interrupt(struct irqaction *action) | 739 | static int irq_wait_for_interrupt(struct irqaction *action) |
734 | { | 740 | { |
735 | set_current_state(TASK_INTERRUPTIBLE); | 741 | set_current_state(TASK_INTERRUPTIBLE); |
@@ -756,7 +762,8 @@ static int irq_wait_for_interrupt(struct irqaction *action) | |||
756 | static void irq_finalize_oneshot(struct irq_desc *desc, | 762 | static void irq_finalize_oneshot(struct irq_desc *desc, |
757 | struct irqaction *action) | 763 | struct irqaction *action) |
758 | { | 764 | { |
759 | if (!(desc->istate & IRQS_ONESHOT)) | 765 | if (!(desc->istate & IRQS_ONESHOT) || |
766 | action->handler == irq_forced_secondary_handler) | ||
760 | return; | 767 | return; |
761 | again: | 768 | again: |
762 | chip_bus_lock(desc); | 769 | chip_bus_lock(desc); |
@@ -910,6 +917,18 @@ static void irq_thread_dtor(struct callback_head *unused) | |||
910 | irq_finalize_oneshot(desc, action); | 917 | irq_finalize_oneshot(desc, action); |
911 | } | 918 | } |
912 | 919 | ||
920 | static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action) | ||
921 | { | ||
922 | struct irqaction *secondary = action->secondary; | ||
923 | |||
924 | if (WARN_ON_ONCE(!secondary)) | ||
925 | return; | ||
926 | |||
927 | raw_spin_lock_irq(&desc->lock); | ||
928 | __irq_wake_thread(desc, secondary); | ||
929 | raw_spin_unlock_irq(&desc->lock); | ||
930 | } | ||
931 | |||
913 | /* | 932 | /* |
914 | * Interrupt handler thread | 933 | * Interrupt handler thread |
915 | */ | 934 | */ |
@@ -940,6 +959,8 @@ static int irq_thread(void *data) | |||
940 | action_ret = handler_fn(desc, action); | 959 | action_ret = handler_fn(desc, action); |
941 | if (action_ret == IRQ_HANDLED) | 960 | if (action_ret == IRQ_HANDLED) |
942 | atomic_inc(&desc->threads_handled); | 961 | atomic_inc(&desc->threads_handled); |
962 | if (action_ret == IRQ_WAKE_THREAD) | ||
963 | irq_wake_secondary(desc, action); | ||
943 | 964 | ||
944 | wake_threads_waitq(desc); | 965 | wake_threads_waitq(desc); |
945 | } | 966 | } |
@@ -984,20 +1005,36 @@ void irq_wake_thread(unsigned int irq, void *dev_id) | |||
984 | } | 1005 | } |
985 | EXPORT_SYMBOL_GPL(irq_wake_thread); | 1006 | EXPORT_SYMBOL_GPL(irq_wake_thread); |
986 | 1007 | ||
987 | static void irq_setup_forced_threading(struct irqaction *new) | 1008 | static int irq_setup_forced_threading(struct irqaction *new) |
988 | { | 1009 | { |
989 | if (!force_irqthreads) | 1010 | if (!force_irqthreads) |
990 | return; | 1011 | return 0; |
991 | if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) | 1012 | if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) |
992 | return; | 1013 | return 0; |
993 | 1014 | ||
994 | new->flags |= IRQF_ONESHOT; | 1015 | new->flags |= IRQF_ONESHOT; |
995 | 1016 | ||
996 | if (!new->thread_fn) { | 1017 | /* |
997 | set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); | 1018 | * Handle the case where we have a real primary handler and a |
998 | new->thread_fn = new->handler; | 1019 | * thread handler. We force thread them as well by creating a |
999 | new->handler = irq_default_primary_handler; | 1020 | * secondary action. |
1021 | */ | ||
1022 | if (new->handler != irq_default_primary_handler && new->thread_fn) { | ||
1023 | /* Allocate the secondary action */ | ||
1024 | new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL); | ||
1025 | if (!new->secondary) | ||
1026 | return -ENOMEM; | ||
1027 | new->secondary->handler = irq_forced_secondary_handler; | ||
1028 | new->secondary->thread_fn = new->thread_fn; | ||
1029 | new->secondary->dev_id = new->dev_id; | ||
1030 | new->secondary->irq = new->irq; | ||
1031 | new->secondary->name = new->name; | ||
1000 | } | 1032 | } |
1033 | /* Deal with the primary handler */ | ||
1034 | set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); | ||
1035 | new->thread_fn = new->handler; | ||
1036 | new->handler = irq_default_primary_handler; | ||
1037 | return 0; | ||
1001 | } | 1038 | } |
1002 | 1039 | ||
1003 | static int irq_request_resources(struct irq_desc *desc) | 1040 | static int irq_request_resources(struct irq_desc *desc) |
@@ -1017,6 +1054,48 @@ static void irq_release_resources(struct irq_desc *desc) | |||
1017 | c->irq_release_resources(d); | 1054 | c->irq_release_resources(d); |
1018 | } | 1055 | } |
1019 | 1056 | ||
1057 | static int | ||
1058 | setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary) | ||
1059 | { | ||
1060 | struct task_struct *t; | ||
1061 | struct sched_param param = { | ||
1062 | .sched_priority = MAX_USER_RT_PRIO/2, | ||
1063 | }; | ||
1064 | |||
1065 | if (!secondary) { | ||
1066 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, | ||
1067 | new->name); | ||
1068 | } else { | ||
1069 | t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq, | ||
1070 | new->name); | ||
1071 | param.sched_priority -= 1; | ||
1072 | } | ||
1073 | |||
1074 | if (IS_ERR(t)) | ||
1075 | return PTR_ERR(t); | ||
1076 | |||
1077 | sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m); | ||
1078 | |||
1079 | /* | ||
1080 | * We keep the reference to the task struct even if | ||
1081 | * the thread dies to avoid that the interrupt code | ||
1082 | * references an already freed task_struct. | ||
1083 | */ | ||
1084 | get_task_struct(t); | ||
1085 | new->thread = t; | ||
1086 | /* | ||
1087 | * Tell the thread to set its affinity. This is | ||
1088 | * important for shared interrupt handlers as we do | ||
1089 | * not invoke setup_affinity() for the secondary | ||
1090 | * handlers as everything is already set up. Even for | ||
1091 | * interrupts marked with IRQF_NO_BALANCE this is | ||
1092 | * correct as we want the thread to move to the cpu(s) | ||
1093 | * on which the requesting code placed the interrupt. | ||
1094 | */ | ||
1095 | set_bit(IRQTF_AFFINITY, &new->thread_flags); | ||
1096 | return 0; | ||
1097 | } | ||
1098 | |||
1020 | /* | 1099 | /* |
1021 | * Internal function to register an irqaction - typically used to | 1100 | * Internal function to register an irqaction - typically used to |
1022 | * allocate special interrupts that are part of the architecture. | 1101 | * allocate special interrupts that are part of the architecture. |
@@ -1037,6 +1116,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
1037 | if (!try_module_get(desc->owner)) | 1116 | if (!try_module_get(desc->owner)) |
1038 | return -ENODEV; | 1117 | return -ENODEV; |
1039 | 1118 | ||
1119 | new->irq = irq; | ||
1120 | |||
1040 | /* | 1121 | /* |
1041 | * Check whether the interrupt nests into another interrupt | 1122 | * Check whether the interrupt nests into another interrupt |
1042 | * thread. | 1123 | * thread. |
@@ -1054,8 +1135,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
1054 | */ | 1135 | */ |
1055 | new->handler = irq_nested_primary_handler; | 1136 | new->handler = irq_nested_primary_handler; |
1056 | } else { | 1137 | } else { |
1057 | if (irq_settings_can_thread(desc)) | 1138 | if (irq_settings_can_thread(desc)) { |
1058 | irq_setup_forced_threading(new); | 1139 | ret = irq_setup_forced_threading(new); |
1140 | if (ret) | ||
1141 | goto out_mput; | ||
1142 | } | ||
1059 | } | 1143 | } |
1060 | 1144 | ||
1061 | /* | 1145 | /* |
@@ -1064,37 +1148,14 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
1064 | * thread. | 1148 | * thread. |
1065 | */ | 1149 | */ |
1066 | if (new->thread_fn && !nested) { | 1150 | if (new->thread_fn && !nested) { |
1067 | struct task_struct *t; | 1151 | ret = setup_irq_thread(new, irq, false); |
1068 | static const struct sched_param param = { | 1152 | if (ret) |
1069 | .sched_priority = MAX_USER_RT_PRIO/2, | ||
1070 | }; | ||
1071 | |||
1072 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, | ||
1073 | new->name); | ||
1074 | if (IS_ERR(t)) { | ||
1075 | ret = PTR_ERR(t); | ||
1076 | goto out_mput; | 1153 | goto out_mput; |
1154 | if (new->secondary) { | ||
1155 | ret = setup_irq_thread(new->secondary, irq, true); | ||
1156 | if (ret) | ||
1157 | goto out_thread; | ||
1077 | } | 1158 | } |
1078 | |||
1079 | sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m); | ||
1080 | |||
1081 | /* | ||
1082 | * We keep the reference to the task struct even if | ||
1083 | * the thread dies to avoid that the interrupt code | ||
1084 | * references an already freed task_struct. | ||
1085 | */ | ||
1086 | get_task_struct(t); | ||
1087 | new->thread = t; | ||
1088 | /* | ||
1089 | * Tell the thread to set its affinity. This is | ||
1090 | * important for shared interrupt handlers as we do | ||
1091 | * not invoke setup_affinity() for the secondary | ||
1092 | * handlers as everything is already set up. Even for | ||
1093 | * interrupts marked with IRQF_NO_BALANCE this is | ||
1094 | * correct as we want the thread to move to the cpu(s) | ||
1095 | * on which the requesting code placed the interrupt. | ||
1096 | */ | ||
1097 | set_bit(IRQTF_AFFINITY, &new->thread_flags); | ||
1098 | } | 1159 | } |
1099 | 1160 | ||
1100 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { | 1161 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { |
@@ -1267,7 +1328,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
1267 | irq, nmsk, omsk); | 1328 | irq, nmsk, omsk); |
1268 | } | 1329 | } |
1269 | 1330 | ||
1270 | new->irq = irq; | ||
1271 | *old_ptr = new; | 1331 | *old_ptr = new; |
1272 | 1332 | ||
1273 | irq_pm_install_action(desc, new); | 1333 | irq_pm_install_action(desc, new); |
@@ -1293,6 +1353,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
1293 | */ | 1353 | */ |
1294 | if (new->thread) | 1354 | if (new->thread) |
1295 | wake_up_process(new->thread); | 1355 | wake_up_process(new->thread); |
1356 | if (new->secondary) | ||
1357 | wake_up_process(new->secondary->thread); | ||
1296 | 1358 | ||
1297 | register_irq_proc(irq, desc); | 1359 | register_irq_proc(irq, desc); |
1298 | new->dir = NULL; | 1360 | new->dir = NULL; |
@@ -1323,6 +1385,13 @@ out_thread: | |||
1323 | kthread_stop(t); | 1385 | kthread_stop(t); |
1324 | put_task_struct(t); | 1386 | put_task_struct(t); |
1325 | } | 1387 | } |
1388 | if (new->secondary && new->secondary->thread) { | ||
1389 | struct task_struct *t = new->secondary->thread; | ||
1390 | |||
1391 | new->secondary->thread = NULL; | ||
1392 | kthread_stop(t); | ||
1393 | put_task_struct(t); | ||
1394 | } | ||
1326 | out_mput: | 1395 | out_mput: |
1327 | module_put(desc->owner); | 1396 | module_put(desc->owner); |
1328 | return ret; | 1397 | return ret; |
@@ -1394,6 +1463,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
1394 | 1463 | ||
1395 | /* If this was the last handler, shut down the IRQ line: */ | 1464 | /* If this was the last handler, shut down the IRQ line: */ |
1396 | if (!desc->action) { | 1465 | if (!desc->action) { |
1466 | irq_settings_clr_disable_unlazy(desc); | ||
1397 | irq_shutdown(desc); | 1467 | irq_shutdown(desc); |
1398 | irq_release_resources(desc); | 1468 | irq_release_resources(desc); |
1399 | } | 1469 | } |
@@ -1430,9 +1500,14 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
1430 | if (action->thread) { | 1500 | if (action->thread) { |
1431 | kthread_stop(action->thread); | 1501 | kthread_stop(action->thread); |
1432 | put_task_struct(action->thread); | 1502 | put_task_struct(action->thread); |
1503 | if (action->secondary && action->secondary->thread) { | ||
1504 | kthread_stop(action->secondary->thread); | ||
1505 | put_task_struct(action->secondary->thread); | ||
1506 | } | ||
1433 | } | 1507 | } |
1434 | 1508 | ||
1435 | module_put(desc->owner); | 1509 | module_put(desc->owner); |
1510 | kfree(action->secondary); | ||
1436 | return action; | 1511 | return action; |
1437 | } | 1512 | } |
1438 | 1513 | ||
@@ -1576,8 +1651,10 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, | |||
1576 | retval = __setup_irq(irq, desc, action); | 1651 | retval = __setup_irq(irq, desc, action); |
1577 | chip_bus_sync_unlock(desc); | 1652 | chip_bus_sync_unlock(desc); |
1578 | 1653 | ||
1579 | if (retval) | 1654 | if (retval) { |
1655 | kfree(action->secondary); | ||
1580 | kfree(action); | 1656 | kfree(action); |
1657 | } | ||
1581 | 1658 | ||
1582 | #ifdef CONFIG_DEBUG_SHIRQ_FIXME | 1659 | #ifdef CONFIG_DEBUG_SHIRQ_FIXME |
1583 | if (!retval && (irqflags & IRQF_SHARED)) { | 1660 | if (!retval && (irqflags & IRQF_SHARED)) { |
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index be9149f62eb8..6b0c0b74a2a1 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c | |||
@@ -235,11 +235,11 @@ static void msi_domain_update_chip_ops(struct msi_domain_info *info) | |||
235 | 235 | ||
236 | /** | 236 | /** |
237 | * msi_create_irq_domain - Create a MSI interrupt domain | 237 | * msi_create_irq_domain - Create a MSI interrupt domain |
238 | * @of_node: Optional device-tree node of the interrupt controller | 238 | * @fwnode: Optional fwnode of the interrupt controller |
239 | * @info: MSI domain info | 239 | * @info: MSI domain info |
240 | * @parent: Parent irq domain | 240 | * @parent: Parent irq domain |
241 | */ | 241 | */ |
242 | struct irq_domain *msi_create_irq_domain(struct device_node *node, | 242 | struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode, |
243 | struct msi_domain_info *info, | 243 | struct msi_domain_info *info, |
244 | struct irq_domain *parent) | 244 | struct irq_domain *parent) |
245 | { | 245 | { |
@@ -248,8 +248,8 @@ struct irq_domain *msi_create_irq_domain(struct device_node *node, | |||
248 | if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) | 248 | if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) |
249 | msi_domain_update_chip_ops(info); | 249 | msi_domain_update_chip_ops(info); |
250 | 250 | ||
251 | return irq_domain_add_hierarchy(parent, 0, 0, node, &msi_domain_ops, | 251 | return irq_domain_create_hierarchy(parent, 0, 0, fwnode, |
252 | info); | 252 | &msi_domain_ops, info); |
253 | } | 253 | } |
254 | 254 | ||
255 | /** | 255 | /** |
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index a50ddc9417ff..a916cf144b65 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
@@ -475,7 +475,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
475 | for_each_online_cpu(j) | 475 | for_each_online_cpu(j) |
476 | any_count |= kstat_irqs_cpu(i, j); | 476 | any_count |= kstat_irqs_cpu(i, j); |
477 | action = desc->action; | 477 | action = desc->action; |
478 | if (!action && !any_count) | 478 | if ((!action || action == &chained_action) && !any_count) |
479 | goto out; | 479 | goto out; |
480 | 480 | ||
481 | seq_printf(p, "%*d: ", prec, i); | 481 | seq_printf(p, "%*d: ", prec, i); |
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h index 3320b84cc60f..320579d89091 100644 --- a/kernel/irq/settings.h +++ b/kernel/irq/settings.h | |||
@@ -15,6 +15,7 @@ enum { | |||
15 | _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD, | 15 | _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD, |
16 | _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID, | 16 | _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID, |
17 | _IRQ_IS_POLLED = IRQ_IS_POLLED, | 17 | _IRQ_IS_POLLED = IRQ_IS_POLLED, |
18 | _IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY, | ||
18 | _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, | 19 | _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, |
19 | }; | 20 | }; |
20 | 21 | ||
@@ -28,6 +29,7 @@ enum { | |||
28 | #define IRQ_NESTED_THREAD GOT_YOU_MORON | 29 | #define IRQ_NESTED_THREAD GOT_YOU_MORON |
29 | #define IRQ_PER_CPU_DEVID GOT_YOU_MORON | 30 | #define IRQ_PER_CPU_DEVID GOT_YOU_MORON |
30 | #define IRQ_IS_POLLED GOT_YOU_MORON | 31 | #define IRQ_IS_POLLED GOT_YOU_MORON |
32 | #define IRQ_DISABLE_UNLAZY GOT_YOU_MORON | ||
31 | #undef IRQF_MODIFY_MASK | 33 | #undef IRQF_MODIFY_MASK |
32 | #define IRQF_MODIFY_MASK GOT_YOU_MORON | 34 | #define IRQF_MODIFY_MASK GOT_YOU_MORON |
33 | 35 | ||
@@ -154,3 +156,13 @@ static inline bool irq_settings_is_polled(struct irq_desc *desc) | |||
154 | { | 156 | { |
155 | return desc->status_use_accessors & _IRQ_IS_POLLED; | 157 | return desc->status_use_accessors & _IRQ_IS_POLLED; |
156 | } | 158 | } |
159 | |||
160 | static inline bool irq_settings_disable_unlazy(struct irq_desc *desc) | ||
161 | { | ||
162 | return desc->status_use_accessors & _IRQ_DISABLE_UNLAZY; | ||
163 | } | ||
164 | |||
165 | static inline void irq_settings_clr_disable_unlazy(struct irq_desc *desc) | ||
166 | { | ||
167 | desc->status_use_accessors &= ~_IRQ_DISABLE_UNLAZY; | ||
168 | } | ||
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 66c66165e712..30489181922d 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c | |||
@@ -2137,7 +2137,7 @@ static int init_vgic_model(struct kvm *kvm, int type) | |||
2137 | case KVM_DEV_TYPE_ARM_VGIC_V2: | 2137 | case KVM_DEV_TYPE_ARM_VGIC_V2: |
2138 | vgic_v2_init_emulation(kvm); | 2138 | vgic_v2_init_emulation(kvm); |
2139 | break; | 2139 | break; |
2140 | #ifdef CONFIG_ARM_GIC_V3 | 2140 | #ifdef CONFIG_KVM_ARM_VGIC_V3 |
2141 | case KVM_DEV_TYPE_ARM_VGIC_V3: | 2141 | case KVM_DEV_TYPE_ARM_VGIC_V3: |
2142 | vgic_v3_init_emulation(kvm); | 2142 | vgic_v3_init_emulation(kvm); |
2143 | break; | 2143 | break; |
@@ -2299,7 +2299,7 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) | |||
2299 | block_size = KVM_VGIC_V2_CPU_SIZE; | 2299 | block_size = KVM_VGIC_V2_CPU_SIZE; |
2300 | alignment = SZ_4K; | 2300 | alignment = SZ_4K; |
2301 | break; | 2301 | break; |
2302 | #ifdef CONFIG_ARM_GIC_V3 | 2302 | #ifdef CONFIG_KVM_ARM_VGIC_V3 |
2303 | case KVM_VGIC_V3_ADDR_TYPE_DIST: | 2303 | case KVM_VGIC_V3_ADDR_TYPE_DIST: |
2304 | type_needed = KVM_DEV_TYPE_ARM_VGIC_V3; | 2304 | type_needed = KVM_DEV_TYPE_ARM_VGIC_V3; |
2305 | addr_ptr = &vgic->vgic_dist_base; | 2305 | addr_ptr = &vgic->vgic_dist_base; |