aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-15 20:30:09 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-15 20:30:09 -0500
commit60d7ef3fd34dca2eb7ef4c997ccf1ef76a9ba148 (patch)
treea5d9449b76af277c8f36008535c5d00036e021de
parent9b8ec916df67ba31614796037caf606b763e2e79 (diff)
parentc848126734e8621e81659d819922b20d93a2aa6d (diff)
Merge branch 'irq-irqdomain-arm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq domain ARM updates from Thomas Gleixner: "This set of changes make use of hierarchical irqdomains to provide: - MSI/ITS support for GICv3 - MSI support for GICv2m - Interrupt polarity extender for GICv1 Marc has come more cleanups for the existing extension hooks of GIC in the pipeline, but they are going to be 3.20 material" * 'irq-irqdomain-arm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (22 commits) irqchip: gicv3-its: Fix ITT allocation irqchip: gicv3-its: Move some alloc/free code to activate/deactivate irqchip: gicv3-its: Fix domain free in multi-MSI case irqchip: gic: Remove warning by including linux/irqdomain.h irqchip: gic-v2m: Add DT bindings for GICv2m irqchip: gic-v2m: Add support for ARM GICv2m MSI(-X) doorbell irqchip: mtk-sysirq: dt-bindings: Add bindings for mediatek sysirq irqchip: mtk-sysirq: Add sysirq interrupt polarity support irqchip: gic: Support hierarchy irq domain. irqchip: GICv3: Binding updates for ITS irqchip: GICv3: ITS: enable compilation of the ITS driver irqchip: GICv3: ITS: plug ITS init into main GICv3 code irqchip: GICv3: ITS: DT probing and initialization irqchip: GICv3: ITS: MSI support irqchip: GICv3: ITS: device allocation and configuration irqchip: GICv3: ITS: tables allocators irqchip: GICv3: ITS: LPI allocator irqchip: GICv3: ITS: irqchip implementation irqchip: GICv3: ITS command queue irqchip: GICv3: rework redistributor structure ...
-rw-r--r--Documentation/devicetree/bindings/arm/gic-v3.txt39
-rw-r--r--Documentation/devicetree/bindings/arm/gic.txt53
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,sysirq.txt28
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--drivers/irqchip/Kconfig12
-rw-r--r--drivers/irqchip/Makefile3
-rw-r--r--drivers/irqchip/irq-gic-v2m.c333
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c1425
-rw-r--r--drivers/irqchip/irq-gic-v3.c156
-rw-r--r--drivers/irqchip/irq-gic.c81
-rw-r--r--drivers/irqchip/irq-mtk-sysirq.c163
-rw-r--r--include/linux/irqchip/arm-gic-v3.h128
-rw-r--r--include/linux/irqchip/arm-gic.h4
14 files changed, 2364 insertions, 64 deletions
diff --git a/Documentation/devicetree/bindings/arm/gic-v3.txt b/Documentation/devicetree/bindings/arm/gic-v3.txt
index 33cd05e6c125..ddfade40ac59 100644
--- a/Documentation/devicetree/bindings/arm/gic-v3.txt
+++ b/Documentation/devicetree/bindings/arm/gic-v3.txt
@@ -49,11 +49,29 @@ Optional
49 occupied by the redistributors. Required if more than one such 49 occupied by the redistributors. Required if more than one such
50 region is present. 50 region is present.
51 51
52Sub-nodes:
53
54GICv3 has one or more Interrupt Translation Services (ITS) that are
55used to route Message Signalled Interrupts (MSI) to the CPUs.
56
57These nodes must have the following properties:
58- compatible : Should at least contain "arm,gic-v3-its".
59- msi-controller : Boolean property. Identifies the node as an MSI controller
60- reg: Specifies the base physical address and size of the ITS
61 registers.
62
63The main GIC node must contain the appropriate #address-cells,
64#size-cells and ranges properties for the reg property of all ITS
65nodes.
66
52Examples: 67Examples:
53 68
54 gic: interrupt-controller@2cf00000 { 69 gic: interrupt-controller@2cf00000 {
55 compatible = "arm,gic-v3"; 70 compatible = "arm,gic-v3";
56 #interrupt-cells = <3>; 71 #interrupt-cells = <3>;
72 #address-cells = <2>;
73 #size-cells = <2>;
74 ranges;
57 interrupt-controller; 75 interrupt-controller;
58 reg = <0x0 0x2f000000 0 0x10000>, // GICD 76 reg = <0x0 0x2f000000 0 0x10000>, // GICD
59 <0x0 0x2f100000 0 0x200000>, // GICR 77 <0x0 0x2f100000 0 0x200000>, // GICR
@@ -61,11 +79,20 @@ Examples:
61 <0x0 0x2c010000 0 0x2000>, // GICH 79 <0x0 0x2c010000 0 0x2000>, // GICH
62 <0x0 0x2c020000 0 0x2000>; // GICV 80 <0x0 0x2c020000 0 0x2000>; // GICV
63 interrupts = <1 9 4>; 81 interrupts = <1 9 4>;
82
83 gic-its@2c200000 {
84 compatible = "arm,gic-v3-its";
85 msi-controller;
86 reg = <0x0 0x2c200000 0 0x200000>;
87 };
64 }; 88 };
65 89
66 gic: interrupt-controller@2c010000 { 90 gic: interrupt-controller@2c010000 {
67 compatible = "arm,gic-v3"; 91 compatible = "arm,gic-v3";
68 #interrupt-cells = <3>; 92 #interrupt-cells = <3>;
93 #address-cells = <2>;
94 #size-cells = <2>;
95 ranges;
69 interrupt-controller; 96 interrupt-controller;
70 redistributor-stride = <0x0 0x40000>; // 256kB stride 97 redistributor-stride = <0x0 0x40000>; // 256kB stride
71 #redistributor-regions = <2>; 98 #redistributor-regions = <2>;
@@ -76,4 +103,16 @@ Examples:
76 <0x0 0x2c060000 0 0x2000>, // GICH 103 <0x0 0x2c060000 0 0x2000>, // GICH
77 <0x0 0x2c080000 0 0x2000>; // GICV 104 <0x0 0x2c080000 0 0x2000>; // GICV
78 interrupts = <1 9 4>; 105 interrupts = <1 9 4>;
106
107 gic-its@2c200000 {
108 compatible = "arm,gic-v3-its";
109 msi-controller;
110 reg = <0x0 0x2c200000 0 0x200000>;
111 };
112
113 gic-its@2c400000 {
114 compatible = "arm,gic-v3-its";
115 msi-controller;
116 reg = <0x0 0x2c400000 0 0x200000>;
117 };
79 }; 118 };
diff --git a/Documentation/devicetree/bindings/arm/gic.txt b/Documentation/devicetree/bindings/arm/gic.txt
index b38608af66db..8112d0c3675a 100644
--- a/Documentation/devicetree/bindings/arm/gic.txt
+++ b/Documentation/devicetree/bindings/arm/gic.txt
@@ -97,3 +97,56 @@ Example:
97 <0x2c006000 0x2000>; 97 <0x2c006000 0x2000>;
98 interrupts = <1 9 0xf04>; 98 interrupts = <1 9 0xf04>;
99 }; 99 };
100
101
102* GICv2m extension for MSI/MSI-x support (Optional)
103
104Certain revisions of GIC-400 supports MSI/MSI-x via V2M register frame(s).
105This is enabled by specifying v2m sub-node(s).
106
107Required properties:
108
109- compatible : The value here should contain "arm,gic-v2m-frame".
110
111- msi-controller : Identifies the node as an MSI controller.
112
113- reg : GICv2m MSI interface register base and size
114
115Optional properties:
116
117- arm,msi-base-spi : When the MSI_TYPER register contains an incorrect
118 value, this property should contain the SPI base of
119 the MSI frame, overriding the HW value.
120
121- arm,msi-num-spis : When the MSI_TYPER register contains an incorrect
122 value, this property should contain the number of
123 SPIs assigned to the frame, overriding the HW value.
124
125Example:
126
127 interrupt-controller@e1101000 {
128 compatible = "arm,gic-400";
129 #interrupt-cells = <3>;
130 #address-cells = <2>;
131 #size-cells = <2>;
132 interrupt-controller;
133 interrupts = <1 8 0xf04>;
134 ranges = <0 0 0 0xe1100000 0 0x100000>;
135 reg = <0x0 0xe1110000 0 0x01000>,
136 <0x0 0xe112f000 0 0x02000>,
137 <0x0 0xe1140000 0 0x10000>,
138 <0x0 0xe1160000 0 0x10000>;
139 v2m0: v2m@0x8000 {
140 compatible = "arm,gic-v2m-frame";
141 msi-controller;
142 reg = <0x0 0x80000 0 0x1000>;
143 };
144
145 ....
146
147 v2mN: v2m@0x9000 {
148 compatible = "arm,gic-v2m-frame";
149 msi-controller;
150 reg = <0x0 0x90000 0 0x1000>;
151 };
152 };
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,sysirq.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,sysirq.txt
new file mode 100644
index 000000000000..d680b07ec6e8
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,sysirq.txt
@@ -0,0 +1,28 @@
1Mediatek 65xx/81xx sysirq
2
3Mediatek SOCs sysirq support controllable irq inverter for each GIC SPI
4interrupt.
5
6Required properties:
7- compatible: should be one of:
8 "mediatek,mt8135-sysirq"
9 "mediatek,mt8127-sysirq"
10 "mediatek,mt6589-sysirq"
11 "mediatek,mt6582-sysirq"
12 "mediatek,mt6577-sysirq"
13- interrupt-controller : Identifies the node as an interrupt controller
14- #interrupt-cells : Use the same format as specified by GIC in
15 Documentation/devicetree/bindings/arm/gic.txt
16- interrupt-parent: phandle of irq parent for sysirq. The parent must
17 use the same interrupt-cells format as GIC.
18- reg: Physical base address of the intpol registers and length of memory
19 mapped region.
20
21Example:
22 sysirq: interrupt-controller@10200100 {
23 compatible = "mediatek,mt6589-sysirq", "mediatek,mt6577-sysirq";
24 interrupt-controller;
25 #interrupt-cells = <3>;
26 interrupt-parent = <&gic>;
27 reg = <0 0x10200100 0 0x1c>;
28 };
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 688db03ef5b8..b1f9a20a3677 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -14,7 +14,9 @@ config ARM64
14 select ARM_ARCH_TIMER 14 select ARM_ARCH_TIMER
15 select ARM_GIC 15 select ARM_GIC
16 select AUDIT_ARCH_COMPAT_GENERIC 16 select AUDIT_ARCH_COMPAT_GENERIC
17 select ARM_GIC_V2M if PCI_MSI
17 select ARM_GIC_V3 18 select ARM_GIC_V3
19 select ARM_GIC_V3_ITS if PCI_MSI
18 select BUILDTIME_EXTABLE_SORT 20 select BUILDTIME_EXTABLE_SORT
19 select CLONE_BACKWARDS 21 select CLONE_BACKWARDS
20 select COMMON_CLK 22 select COMMON_CLK
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 6b61091c7f4c..55103e50c51b 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -27,6 +27,7 @@ generic-y += local64.h
27generic-y += mcs_spinlock.h 27generic-y += mcs_spinlock.h
28generic-y += mman.h 28generic-y += mman.h
29generic-y += msgbuf.h 29generic-y += msgbuf.h
30generic-y += msi.h
30generic-y += mutex.h 31generic-y += mutex.h
31generic-y += pci.h 32generic-y += pci.h
32generic-y += pci-bridge.h 33generic-y += pci-bridge.h
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index e12cb23d786c..cc79d2a5a8c2 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -5,8 +5,15 @@ config IRQCHIP
5config ARM_GIC 5config ARM_GIC
6 bool 6 bool
7 select IRQ_DOMAIN 7 select IRQ_DOMAIN
8 select IRQ_DOMAIN_HIERARCHY
8 select MULTI_IRQ_HANDLER 9 select MULTI_IRQ_HANDLER
9 10
11config ARM_GIC_V2M
12 bool
13 depends on ARM_GIC
14 depends on PCI && PCI_MSI
15 select PCI_MSI_IRQ_DOMAIN
16
10config GIC_NON_BANKED 17config GIC_NON_BANKED
11 bool 18 bool
12 19
@@ -14,6 +21,11 @@ config ARM_GIC_V3
14 bool 21 bool
15 select IRQ_DOMAIN 22 select IRQ_DOMAIN
16 select MULTI_IRQ_HANDLER 23 select MULTI_IRQ_HANDLER
24 select IRQ_DOMAIN_HIERARCHY
25
26config ARM_GIC_V3_ITS
27 bool
28 select PCI_MSI_IRQ_DOMAIN
17 29
18config ARM_NVIC 30config ARM_NVIC
19 bool 31 bool
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 4954a314c31e..9516a324be6d 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -19,7 +19,9 @@ obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o
19obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o 19obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o
20obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o 20obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
21obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o 21obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o
22obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
22obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o 23obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
24obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o
23obj-$(CONFIG_ARM_NVIC) += irq-nvic.o 25obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
24obj-$(CONFIG_ARM_VIC) += irq-vic.o 26obj-$(CONFIG_ARM_VIC) += irq-vic.o
25obj-$(CONFIG_ATMEL_AIC_IRQ) += irq-atmel-aic-common.o irq-atmel-aic.o 27obj-$(CONFIG_ATMEL_AIC_IRQ) += irq-atmel-aic-common.o irq-atmel-aic.o
@@ -39,3 +41,4 @@ obj-$(CONFIG_BCM7120_L2_IRQ) += irq-bcm7120-l2.o
39obj-$(CONFIG_BRCMSTB_L2_IRQ) += irq-brcmstb-l2.o 41obj-$(CONFIG_BRCMSTB_L2_IRQ) += irq-brcmstb-l2.o
40obj-$(CONFIG_KEYSTONE_IRQ) += irq-keystone.o 42obj-$(CONFIG_KEYSTONE_IRQ) += irq-keystone.o
41obj-$(CONFIG_MIPS_GIC) += irq-mips-gic.o 43obj-$(CONFIG_MIPS_GIC) += irq-mips-gic.o
44obj-$(CONFIG_ARCH_MEDIATEK) += irq-mtk-sysirq.o
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
new file mode 100644
index 000000000000..fdf706555d72
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -0,0 +1,333 @@
1/*
2 * ARM GIC v2m MSI(-X) support
3 * Support for Message Signaled Interrupts for systems that
4 * implement ARM Generic Interrupt Controller: GICv2m.
5 *
6 * Copyright (C) 2014 Advanced Micro Devices, Inc.
7 * Authors: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
8 * Harish Kasiviswanathan <harish.kasiviswanathan@amd.com>
9 * Brandon Anderson <brandon.anderson@amd.com>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published
13 * by the Free Software Foundation.
14 */
15
16#define pr_fmt(fmt) "GICv2m: " fmt
17
18#include <linux/irq.h>
19#include <linux/irqdomain.h>
20#include <linux/kernel.h>
21#include <linux/of_address.h>
22#include <linux/of_pci.h>
23#include <linux/slab.h>
24#include <linux/spinlock.h>
25
26/*
27* MSI_TYPER:
28* [31:26] Reserved
29* [25:16] lowest SPI assigned to MSI
30* [15:10] Reserved
31* [9:0] Numer of SPIs assigned to MSI
32*/
33#define V2M_MSI_TYPER 0x008
34#define V2M_MSI_TYPER_BASE_SHIFT 16
35#define V2M_MSI_TYPER_BASE_MASK 0x3FF
36#define V2M_MSI_TYPER_NUM_MASK 0x3FF
37#define V2M_MSI_SETSPI_NS 0x040
38#define V2M_MIN_SPI 32
39#define V2M_MAX_SPI 1019
40
41#define V2M_MSI_TYPER_BASE_SPI(x) \
42 (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK)
43
44#define V2M_MSI_TYPER_NUM_SPI(x) ((x) & V2M_MSI_TYPER_NUM_MASK)
45
46struct v2m_data {
47 spinlock_t msi_cnt_lock;
48 struct msi_controller mchip;
49 struct resource res; /* GICv2m resource */
50 void __iomem *base; /* GICv2m virt address */
51 u32 spi_start; /* The SPI number that MSIs start */
52 u32 nr_spis; /* The number of SPIs for MSIs */
53 unsigned long *bm; /* MSI vector bitmap */
54 struct irq_domain *domain;
55};
56
57static void gicv2m_mask_msi_irq(struct irq_data *d)
58{
59 pci_msi_mask_irq(d);
60 irq_chip_mask_parent(d);
61}
62
63static void gicv2m_unmask_msi_irq(struct irq_data *d)
64{
65 pci_msi_unmask_irq(d);
66 irq_chip_unmask_parent(d);
67}
68
69static struct irq_chip gicv2m_msi_irq_chip = {
70 .name = "MSI",
71 .irq_mask = gicv2m_mask_msi_irq,
72 .irq_unmask = gicv2m_unmask_msi_irq,
73 .irq_eoi = irq_chip_eoi_parent,
74 .irq_write_msi_msg = pci_msi_domain_write_msg,
75};
76
77static struct msi_domain_info gicv2m_msi_domain_info = {
78 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
79 MSI_FLAG_PCI_MSIX),
80 .chip = &gicv2m_msi_irq_chip,
81};
82
83static int gicv2m_set_affinity(struct irq_data *irq_data,
84 const struct cpumask *mask, bool force)
85{
86 int ret;
87
88 ret = irq_chip_set_affinity_parent(irq_data, mask, force);
89 if (ret == IRQ_SET_MASK_OK)
90 ret = IRQ_SET_MASK_OK_DONE;
91
92 return ret;
93}
94
95static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
96{
97 struct v2m_data *v2m = irq_data_get_irq_chip_data(data);
98 phys_addr_t addr = v2m->res.start + V2M_MSI_SETSPI_NS;
99
100 msg->address_hi = (u32) (addr >> 32);
101 msg->address_lo = (u32) (addr);
102 msg->data = data->hwirq;
103}
104
105static struct irq_chip gicv2m_irq_chip = {
106 .name = "GICv2m",
107 .irq_mask = irq_chip_mask_parent,
108 .irq_unmask = irq_chip_unmask_parent,
109 .irq_eoi = irq_chip_eoi_parent,
110 .irq_set_affinity = gicv2m_set_affinity,
111 .irq_compose_msi_msg = gicv2m_compose_msi_msg,
112};
113
114static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain,
115 unsigned int virq,
116 irq_hw_number_t hwirq)
117{
118 struct of_phandle_args args;
119 struct irq_data *d;
120 int err;
121
122 args.np = domain->parent->of_node;
123 args.args_count = 3;
124 args.args[0] = 0;
125 args.args[1] = hwirq - 32;
126 args.args[2] = IRQ_TYPE_EDGE_RISING;
127
128 err = irq_domain_alloc_irqs_parent(domain, virq, 1, &args);
129 if (err)
130 return err;
131
132 /* Configure the interrupt line to be edge */
133 d = irq_domain_get_irq_data(domain->parent, virq);
134 d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
135 return 0;
136}
137
138static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq)
139{
140 int pos;
141
142 pos = hwirq - v2m->spi_start;
143 if (pos < 0 || pos >= v2m->nr_spis) {
144 pr_err("Failed to teardown msi. Invalid hwirq %d\n", hwirq);
145 return;
146 }
147
148 spin_lock(&v2m->msi_cnt_lock);
149 __clear_bit(pos, v2m->bm);
150 spin_unlock(&v2m->msi_cnt_lock);
151}
152
153static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
154 unsigned int nr_irqs, void *args)
155{
156 struct v2m_data *v2m = domain->host_data;
157 int hwirq, offset, err = 0;
158
159 spin_lock(&v2m->msi_cnt_lock);
160 offset = find_first_zero_bit(v2m->bm, v2m->nr_spis);
161 if (offset < v2m->nr_spis)
162 __set_bit(offset, v2m->bm);
163 else
164 err = -ENOSPC;
165 spin_unlock(&v2m->msi_cnt_lock);
166
167 if (err)
168 return err;
169
170 hwirq = v2m->spi_start + offset;
171
172 err = gicv2m_irq_gic_domain_alloc(domain, virq, hwirq);
173 if (err) {
174 gicv2m_unalloc_msi(v2m, hwirq);
175 return err;
176 }
177
178 irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
179 &gicv2m_irq_chip, v2m);
180
181 return 0;
182}
183
184static void gicv2m_irq_domain_free(struct irq_domain *domain,
185 unsigned int virq, unsigned int nr_irqs)
186{
187 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
188 struct v2m_data *v2m = irq_data_get_irq_chip_data(d);
189
190 BUG_ON(nr_irqs != 1);
191 gicv2m_unalloc_msi(v2m, d->hwirq);
192 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
193}
194
195static const struct irq_domain_ops gicv2m_domain_ops = {
196 .alloc = gicv2m_irq_domain_alloc,
197 .free = gicv2m_irq_domain_free,
198};
199
200static bool is_msi_spi_valid(u32 base, u32 num)
201{
202 if (base < V2M_MIN_SPI) {
203 pr_err("Invalid MSI base SPI (base:%u)\n", base);
204 return false;
205 }
206
207 if ((num == 0) || (base + num > V2M_MAX_SPI)) {
208 pr_err("Number of SPIs (%u) exceed maximum (%u)\n",
209 num, V2M_MAX_SPI - V2M_MIN_SPI + 1);
210 return false;
211 }
212
213 return true;
214}
215
216static int __init gicv2m_init_one(struct device_node *node,
217 struct irq_domain *parent)
218{
219 int ret;
220 struct v2m_data *v2m;
221
222 v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL);
223 if (!v2m) {
224 pr_err("Failed to allocate struct v2m_data.\n");
225 return -ENOMEM;
226 }
227
228 ret = of_address_to_resource(node, 0, &v2m->res);
229 if (ret) {
230 pr_err("Failed to allocate v2m resource.\n");
231 goto err_free_v2m;
232 }
233
234 v2m->base = ioremap(v2m->res.start, resource_size(&v2m->res));
235 if (!v2m->base) {
236 pr_err("Failed to map GICv2m resource\n");
237 ret = -ENOMEM;
238 goto err_free_v2m;
239 }
240
241 if (!of_property_read_u32(node, "arm,msi-base-spi", &v2m->spi_start) &&
242 !of_property_read_u32(node, "arm,msi-num-spis", &v2m->nr_spis)) {
243 pr_info("Overriding V2M MSI_TYPER (base:%u, num:%u)\n",
244 v2m->spi_start, v2m->nr_spis);
245 } else {
246 u32 typer = readl_relaxed(v2m->base + V2M_MSI_TYPER);
247
248 v2m->spi_start = V2M_MSI_TYPER_BASE_SPI(typer);
249 v2m->nr_spis = V2M_MSI_TYPER_NUM_SPI(typer);
250 }
251
252 if (!is_msi_spi_valid(v2m->spi_start, v2m->nr_spis)) {
253 ret = -EINVAL;
254 goto err_iounmap;
255 }
256
257 v2m->bm = kzalloc(sizeof(long) * BITS_TO_LONGS(v2m->nr_spis),
258 GFP_KERNEL);
259 if (!v2m->bm) {
260 ret = -ENOMEM;
261 goto err_iounmap;
262 }
263
264 v2m->domain = irq_domain_add_tree(NULL, &gicv2m_domain_ops, v2m);
265 if (!v2m->domain) {
266 pr_err("Failed to create GICv2m domain\n");
267 ret = -ENOMEM;
268 goto err_free_bm;
269 }
270
271 v2m->domain->parent = parent;
272 v2m->mchip.of_node = node;
273 v2m->mchip.domain = pci_msi_create_irq_domain(node,
274 &gicv2m_msi_domain_info,
275 v2m->domain);
276 if (!v2m->mchip.domain) {
277 pr_err("Failed to create MSI domain\n");
278 ret = -ENOMEM;
279 goto err_free_domains;
280 }
281
282 spin_lock_init(&v2m->msi_cnt_lock);
283
284 ret = of_pci_msi_chip_add(&v2m->mchip);
285 if (ret) {
286 pr_err("Failed to add msi_chip.\n");
287 goto err_free_domains;
288 }
289
290 pr_info("Node %s: range[%#lx:%#lx], SPI[%d:%d]\n", node->name,
291 (unsigned long)v2m->res.start, (unsigned long)v2m->res.end,
292 v2m->spi_start, (v2m->spi_start + v2m->nr_spis));
293
294 return 0;
295
296err_free_domains:
297 if (v2m->mchip.domain)
298 irq_domain_remove(v2m->mchip.domain);
299 if (v2m->domain)
300 irq_domain_remove(v2m->domain);
301err_free_bm:
302 kfree(v2m->bm);
303err_iounmap:
304 iounmap(v2m->base);
305err_free_v2m:
306 kfree(v2m);
307 return ret;
308}
309
310static struct of_device_id gicv2m_device_id[] = {
311 { .compatible = "arm,gic-v2m-frame", },
312 {},
313};
314
315int __init gicv2m_of_init(struct device_node *node, struct irq_domain *parent)
316{
317 int ret = 0;
318 struct device_node *child;
319
320 for (child = of_find_matching_node(node, gicv2m_device_id); child;
321 child = of_find_matching_node(child, gicv2m_device_id)) {
322 if (!of_find_property(child, "msi-controller", NULL))
323 continue;
324
325 ret = gicv2m_init_one(child, parent);
326 if (ret) {
327 of_node_put(node);
328 break;
329 }
330 }
331
332 return ret;
333}
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
new file mode 100644
index 000000000000..86e4684adeb1
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -0,0 +1,1425 @@
1/*
2 * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/bitmap.h>
19#include <linux/cpu.h>
20#include <linux/delay.h>
21#include <linux/interrupt.h>
22#include <linux/log2.h>
23#include <linux/mm.h>
24#include <linux/msi.h>
25#include <linux/of.h>
26#include <linux/of_address.h>
27#include <linux/of_irq.h>
28#include <linux/of_pci.h>
29#include <linux/of_platform.h>
30#include <linux/percpu.h>
31#include <linux/slab.h>
32
33#include <linux/irqchip/arm-gic-v3.h>
34
35#include <asm/cacheflush.h>
36#include <asm/cputype.h>
37#include <asm/exception.h>
38
39#include "irqchip.h"
40
41#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1 << 0)
42
43#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
44
45/*
46 * Collection structure - just an ID, and a redistributor address to
47 * ping. We use one per CPU as a bag of interrupts assigned to this
48 * CPU.
49 */
50struct its_collection {
51 u64 target_address;
52 u16 col_id;
53};
54
55/*
56 * The ITS structure - contains most of the infrastructure, with the
57 * msi_controller, the command queue, the collections, and the list of
58 * devices writing to it.
59 */
60struct its_node {
61 raw_spinlock_t lock;
62 struct list_head entry;
63 struct msi_controller msi_chip;
64 struct irq_domain *domain;
65 void __iomem *base;
66 unsigned long phys_base;
67 struct its_cmd_block *cmd_base;
68 struct its_cmd_block *cmd_write;
69 void *tables[GITS_BASER_NR_REGS];
70 struct its_collection *collections;
71 struct list_head its_device_list;
72 u64 flags;
73 u32 ite_size;
74};
75
76#define ITS_ITT_ALIGN SZ_256
77
78/*
79 * The ITS view of a device - belongs to an ITS, a collection, owns an
80 * interrupt translation table, and a list of interrupts.
81 */
82struct its_device {
83 struct list_head entry;
84 struct its_node *its;
85 struct its_collection *collection;
86 void *itt;
87 unsigned long *lpi_map;
88 irq_hw_number_t lpi_base;
89 int nr_lpis;
90 u32 nr_ites;
91 u32 device_id;
92};
93
94static LIST_HEAD(its_nodes);
95static DEFINE_SPINLOCK(its_lock);
96static struct device_node *gic_root_node;
97static struct rdists *gic_rdists;
98
99#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
100#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
101
102/*
103 * ITS command descriptors - parameters to be encoded in a command
104 * block.
105 */
106struct its_cmd_desc {
107 union {
108 struct {
109 struct its_device *dev;
110 u32 event_id;
111 } its_inv_cmd;
112
113 struct {
114 struct its_device *dev;
115 u32 event_id;
116 } its_int_cmd;
117
118 struct {
119 struct its_device *dev;
120 int valid;
121 } its_mapd_cmd;
122
123 struct {
124 struct its_collection *col;
125 int valid;
126 } its_mapc_cmd;
127
128 struct {
129 struct its_device *dev;
130 u32 phys_id;
131 u32 event_id;
132 } its_mapvi_cmd;
133
134 struct {
135 struct its_device *dev;
136 struct its_collection *col;
137 u32 id;
138 } its_movi_cmd;
139
140 struct {
141 struct its_device *dev;
142 u32 event_id;
143 } its_discard_cmd;
144
145 struct {
146 struct its_collection *col;
147 } its_invall_cmd;
148 };
149};
150
151/*
152 * The ITS command block, which is what the ITS actually parses.
153 */
154struct its_cmd_block {
155 u64 raw_cmd[4];
156};
157
158#define ITS_CMD_QUEUE_SZ SZ_64K
159#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
160
161typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *,
162 struct its_cmd_desc *);
163
164static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
165{
166 cmd->raw_cmd[0] &= ~0xffUL;
167 cmd->raw_cmd[0] |= cmd_nr;
168}
169
170static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
171{
172 cmd->raw_cmd[0] &= ~(0xffffUL << 32);
173 cmd->raw_cmd[0] |= ((u64)devid) << 32;
174}
175
176static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
177{
178 cmd->raw_cmd[1] &= ~0xffffffffUL;
179 cmd->raw_cmd[1] |= id;
180}
181
182static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
183{
184 cmd->raw_cmd[1] &= 0xffffffffUL;
185 cmd->raw_cmd[1] |= ((u64)phys_id) << 32;
186}
187
188static void its_encode_size(struct its_cmd_block *cmd, u8 size)
189{
190 cmd->raw_cmd[1] &= ~0x1fUL;
191 cmd->raw_cmd[1] |= size & 0x1f;
192}
193
194static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
195{
196 cmd->raw_cmd[2] &= ~0xffffffffffffUL;
197 cmd->raw_cmd[2] |= itt_addr & 0xffffffffff00UL;
198}
199
200static void its_encode_valid(struct its_cmd_block *cmd, int valid)
201{
202 cmd->raw_cmd[2] &= ~(1UL << 63);
203 cmd->raw_cmd[2] |= ((u64)!!valid) << 63;
204}
205
206static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
207{
208 cmd->raw_cmd[2] &= ~(0xffffffffUL << 16);
209 cmd->raw_cmd[2] |= (target_addr & (0xffffffffUL << 16));
210}
211
212static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
213{
214 cmd->raw_cmd[2] &= ~0xffffUL;
215 cmd->raw_cmd[2] |= col;
216}
217
218static inline void its_fixup_cmd(struct its_cmd_block *cmd)
219{
220 /* Let's fixup BE commands */
221 cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]);
222 cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]);
223 cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]);
224 cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
225}
226
227static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd,
228 struct its_cmd_desc *desc)
229{
230 unsigned long itt_addr;
231 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
232
233 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
234 itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
235
236 its_encode_cmd(cmd, GITS_CMD_MAPD);
237 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
238 its_encode_size(cmd, size - 1);
239 its_encode_itt(cmd, itt_addr);
240 its_encode_valid(cmd, desc->its_mapd_cmd.valid);
241
242 its_fixup_cmd(cmd);
243
244 return desc->its_mapd_cmd.dev->collection;
245}
246
247static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
248 struct its_cmd_desc *desc)
249{
250 its_encode_cmd(cmd, GITS_CMD_MAPC);
251 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
252 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
253 its_encode_valid(cmd, desc->its_mapc_cmd.valid);
254
255 its_fixup_cmd(cmd);
256
257 return desc->its_mapc_cmd.col;
258}
259
260static struct its_collection *its_build_mapvi_cmd(struct its_cmd_block *cmd,
261 struct its_cmd_desc *desc)
262{
263 its_encode_cmd(cmd, GITS_CMD_MAPVI);
264 its_encode_devid(cmd, desc->its_mapvi_cmd.dev->device_id);
265 its_encode_event_id(cmd, desc->its_mapvi_cmd.event_id);
266 its_encode_phys_id(cmd, desc->its_mapvi_cmd.phys_id);
267 its_encode_collection(cmd, desc->its_mapvi_cmd.dev->collection->col_id);
268
269 its_fixup_cmd(cmd);
270
271 return desc->its_mapvi_cmd.dev->collection;
272}
273
274static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd,
275 struct its_cmd_desc *desc)
276{
277 its_encode_cmd(cmd, GITS_CMD_MOVI);
278 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
279 its_encode_event_id(cmd, desc->its_movi_cmd.id);
280 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
281
282 its_fixup_cmd(cmd);
283
284 return desc->its_movi_cmd.dev->collection;
285}
286
287static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd,
288 struct its_cmd_desc *desc)
289{
290 its_encode_cmd(cmd, GITS_CMD_DISCARD);
291 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
292 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
293
294 its_fixup_cmd(cmd);
295
296 return desc->its_discard_cmd.dev->collection;
297}
298
299static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
300 struct its_cmd_desc *desc)
301{
302 its_encode_cmd(cmd, GITS_CMD_INV);
303 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
304 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
305
306 its_fixup_cmd(cmd);
307
308 return desc->its_inv_cmd.dev->collection;
309}
310
311static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
312 struct its_cmd_desc *desc)
313{
314 its_encode_cmd(cmd, GITS_CMD_INVALL);
315 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
316
317 its_fixup_cmd(cmd);
318
319 return NULL;
320}
321
322static u64 its_cmd_ptr_to_offset(struct its_node *its,
323 struct its_cmd_block *ptr)
324{
325 return (ptr - its->cmd_base) * sizeof(*ptr);
326}
327
328static int its_queue_full(struct its_node *its)
329{
330 int widx;
331 int ridx;
332
333 widx = its->cmd_write - its->cmd_base;
334 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
335
336 /* This is incredibly unlikely to happen, unless the ITS locks up. */
337 if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
338 return 1;
339
340 return 0;
341}
342
343static struct its_cmd_block *its_allocate_entry(struct its_node *its)
344{
345 struct its_cmd_block *cmd;
346 u32 count = 1000000; /* 1s! */
347
348 while (its_queue_full(its)) {
349 count--;
350 if (!count) {
351 pr_err_ratelimited("ITS queue not draining\n");
352 return NULL;
353 }
354 cpu_relax();
355 udelay(1);
356 }
357
358 cmd = its->cmd_write++;
359
360 /* Handle queue wrapping */
361 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
362 its->cmd_write = its->cmd_base;
363
364 return cmd;
365}
366
367static struct its_cmd_block *its_post_commands(struct its_node *its)
368{
369 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
370
371 writel_relaxed(wr, its->base + GITS_CWRITER);
372
373 return its->cmd_write;
374}
375
376static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
377{
378 /*
379 * Make sure the commands written to memory are observable by
380 * the ITS.
381 */
382 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
383 __flush_dcache_area(cmd, sizeof(*cmd));
384 else
385 dsb(ishst);
386}
387
388static void its_wait_for_range_completion(struct its_node *its,
389 struct its_cmd_block *from,
390 struct its_cmd_block *to)
391{
392 u64 rd_idx, from_idx, to_idx;
393 u32 count = 1000000; /* 1s! */
394
395 from_idx = its_cmd_ptr_to_offset(its, from);
396 to_idx = its_cmd_ptr_to_offset(its, to);
397
398 while (1) {
399 rd_idx = readl_relaxed(its->base + GITS_CREADR);
400 if (rd_idx >= to_idx || rd_idx < from_idx)
401 break;
402
403 count--;
404 if (!count) {
405 pr_err_ratelimited("ITS queue timeout\n");
406 return;
407 }
408 cpu_relax();
409 udelay(1);
410 }
411}
412
413static void its_send_single_command(struct its_node *its,
414 its_cmd_builder_t builder,
415 struct its_cmd_desc *desc)
416{
417 struct its_cmd_block *cmd, *sync_cmd, *next_cmd;
418 struct its_collection *sync_col;
419
420 raw_spin_lock(&its->lock);
421
422 cmd = its_allocate_entry(its);
423 if (!cmd) { /* We're soooooo screewed... */
424 pr_err_ratelimited("ITS can't allocate, dropping command\n");
425 raw_spin_unlock(&its->lock);
426 return;
427 }
428 sync_col = builder(cmd, desc);
429 its_flush_cmd(its, cmd);
430
431 if (sync_col) {
432 sync_cmd = its_allocate_entry(its);
433 if (!sync_cmd) {
434 pr_err_ratelimited("ITS can't SYNC, skipping\n");
435 goto post;
436 }
437 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
438 its_encode_target(sync_cmd, sync_col->target_address);
439 its_fixup_cmd(sync_cmd);
440 its_flush_cmd(its, sync_cmd);
441 }
442
443post:
444 next_cmd = its_post_commands(its);
445 raw_spin_unlock(&its->lock);
446
447 its_wait_for_range_completion(its, cmd, next_cmd);
448}
449
450static void its_send_inv(struct its_device *dev, u32 event_id)
451{
452 struct its_cmd_desc desc;
453
454 desc.its_inv_cmd.dev = dev;
455 desc.its_inv_cmd.event_id = event_id;
456
457 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
458}
459
460static void its_send_mapd(struct its_device *dev, int valid)
461{
462 struct its_cmd_desc desc;
463
464 desc.its_mapd_cmd.dev = dev;
465 desc.its_mapd_cmd.valid = !!valid;
466
467 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
468}
469
470static void its_send_mapc(struct its_node *its, struct its_collection *col,
471 int valid)
472{
473 struct its_cmd_desc desc;
474
475 desc.its_mapc_cmd.col = col;
476 desc.its_mapc_cmd.valid = !!valid;
477
478 its_send_single_command(its, its_build_mapc_cmd, &desc);
479}
480
481static void its_send_mapvi(struct its_device *dev, u32 irq_id, u32 id)
482{
483 struct its_cmd_desc desc;
484
485 desc.its_mapvi_cmd.dev = dev;
486 desc.its_mapvi_cmd.phys_id = irq_id;
487 desc.its_mapvi_cmd.event_id = id;
488
489 its_send_single_command(dev->its, its_build_mapvi_cmd, &desc);
490}
491
492static void its_send_movi(struct its_device *dev,
493 struct its_collection *col, u32 id)
494{
495 struct its_cmd_desc desc;
496
497 desc.its_movi_cmd.dev = dev;
498 desc.its_movi_cmd.col = col;
499 desc.its_movi_cmd.id = id;
500
501 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
502}
503
504static void its_send_discard(struct its_device *dev, u32 id)
505{
506 struct its_cmd_desc desc;
507
508 desc.its_discard_cmd.dev = dev;
509 desc.its_discard_cmd.event_id = id;
510
511 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
512}
513
514static void its_send_invall(struct its_node *its, struct its_collection *col)
515{
516 struct its_cmd_desc desc;
517
518 desc.its_invall_cmd.col = col;
519
520 its_send_single_command(its, its_build_invall_cmd, &desc);
521}
522
523/*
524 * irqchip functions - assumes MSI, mostly.
525 */
526
527static inline u32 its_get_event_id(struct irq_data *d)
528{
529 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
530 return d->hwirq - its_dev->lpi_base;
531}
532
533static void lpi_set_config(struct irq_data *d, bool enable)
534{
535 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
536 irq_hw_number_t hwirq = d->hwirq;
537 u32 id = its_get_event_id(d);
538 u8 *cfg = page_address(gic_rdists->prop_page) + hwirq - 8192;
539
540 if (enable)
541 *cfg |= LPI_PROP_ENABLED;
542 else
543 *cfg &= ~LPI_PROP_ENABLED;
544
545 /*
546 * Make the above write visible to the redistributors.
547 * And yes, we're flushing exactly: One. Single. Byte.
548 * Humpf...
549 */
550 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
551 __flush_dcache_area(cfg, sizeof(*cfg));
552 else
553 dsb(ishst);
554 its_send_inv(its_dev, id);
555}
556
557static void its_mask_irq(struct irq_data *d)
558{
559 lpi_set_config(d, false);
560}
561
562static void its_unmask_irq(struct irq_data *d)
563{
564 lpi_set_config(d, true);
565}
566
567static void its_eoi_irq(struct irq_data *d)
568{
569 gic_write_eoir(d->hwirq);
570}
571
572static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
573 bool force)
574{
575 unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
576 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
577 struct its_collection *target_col;
578 u32 id = its_get_event_id(d);
579
580 if (cpu >= nr_cpu_ids)
581 return -EINVAL;
582
583 target_col = &its_dev->its->collections[cpu];
584 its_send_movi(its_dev, target_col, id);
585 its_dev->collection = target_col;
586
587 return IRQ_SET_MASK_OK_DONE;
588}
589
590static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
591{
592 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
593 struct its_node *its;
594 u64 addr;
595
596 its = its_dev->its;
597 addr = its->phys_base + GITS_TRANSLATER;
598
599 msg->address_lo = addr & ((1UL << 32) - 1);
600 msg->address_hi = addr >> 32;
601 msg->data = its_get_event_id(d);
602}
603
604static struct irq_chip its_irq_chip = {
605 .name = "ITS",
606 .irq_mask = its_mask_irq,
607 .irq_unmask = its_unmask_irq,
608 .irq_eoi = its_eoi_irq,
609 .irq_set_affinity = its_set_affinity,
610 .irq_compose_msi_msg = its_irq_compose_msi_msg,
611};
612
613static void its_mask_msi_irq(struct irq_data *d)
614{
615 pci_msi_mask_irq(d);
616 irq_chip_mask_parent(d);
617}
618
619static void its_unmask_msi_irq(struct irq_data *d)
620{
621 pci_msi_unmask_irq(d);
622 irq_chip_unmask_parent(d);
623}
624
625static struct irq_chip its_msi_irq_chip = {
626 .name = "ITS-MSI",
627 .irq_unmask = its_unmask_msi_irq,
628 .irq_mask = its_mask_msi_irq,
629 .irq_eoi = irq_chip_eoi_parent,
630 .irq_write_msi_msg = pci_msi_domain_write_msg,
631};
632
633/*
634 * How we allocate LPIs:
635 *
636 * The GIC has id_bits bits for interrupt identifiers. From there, we
637 * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as
638 * we allocate LPIs by chunks of 32, we can shift the whole thing by 5
639 * bits to the right.
640 *
641 * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
642 */
643#define IRQS_PER_CHUNK_SHIFT 5
644#define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT)
645
646static unsigned long *lpi_bitmap;
647static u32 lpi_chunks;
648static DEFINE_SPINLOCK(lpi_lock);
649
650static int its_lpi_to_chunk(int lpi)
651{
652 return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT;
653}
654
655static int its_chunk_to_lpi(int chunk)
656{
657 return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192;
658}
659
660static int its_lpi_init(u32 id_bits)
661{
662 lpi_chunks = its_lpi_to_chunk(1UL << id_bits);
663
664 lpi_bitmap = kzalloc(BITS_TO_LONGS(lpi_chunks) * sizeof(long),
665 GFP_KERNEL);
666 if (!lpi_bitmap) {
667 lpi_chunks = 0;
668 return -ENOMEM;
669 }
670
671 pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks);
672 return 0;
673}
674
675static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids)
676{
677 unsigned long *bitmap = NULL;
678 int chunk_id;
679 int nr_chunks;
680 int i;
681
682 nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK);
683
684 spin_lock(&lpi_lock);
685
686 do {
687 chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks,
688 0, nr_chunks, 0);
689 if (chunk_id < lpi_chunks)
690 break;
691
692 nr_chunks--;
693 } while (nr_chunks > 0);
694
695 if (!nr_chunks)
696 goto out;
697
698 bitmap = kzalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK) * sizeof (long),
699 GFP_ATOMIC);
700 if (!bitmap)
701 goto out;
702
703 for (i = 0; i < nr_chunks; i++)
704 set_bit(chunk_id + i, lpi_bitmap);
705
706 *base = its_chunk_to_lpi(chunk_id);
707 *nr_ids = nr_chunks * IRQS_PER_CHUNK;
708
709out:
710 spin_unlock(&lpi_lock);
711
712 return bitmap;
713}
714
715static void its_lpi_free(unsigned long *bitmap, int base, int nr_ids)
716{
717 int lpi;
718
719 spin_lock(&lpi_lock);
720
721 for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) {
722 int chunk = its_lpi_to_chunk(lpi);
723 BUG_ON(chunk > lpi_chunks);
724 if (test_bit(chunk, lpi_bitmap)) {
725 clear_bit(chunk, lpi_bitmap);
726 } else {
727 pr_err("Bad LPI chunk %d\n", chunk);
728 }
729 }
730
731 spin_unlock(&lpi_lock);
732
733 kfree(bitmap);
734}
735
736/*
737 * We allocate 64kB for PROPBASE. That gives us at most 64K LPIs to
738 * deal with (one configuration byte per interrupt). PENDBASE has to
739 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
740 */
741#define LPI_PROPBASE_SZ SZ_64K
742#define LPI_PENDBASE_SZ (LPI_PROPBASE_SZ / 8 + SZ_1K)
743
744/*
745 * This is how many bits of ID we need, including the useless ones.
746 */
747#define LPI_NRBITS ilog2(LPI_PROPBASE_SZ + SZ_8K)
748
749#define LPI_PROP_DEFAULT_PRIO 0xa0
750
751static int __init its_alloc_lpi_tables(void)
752{
753 phys_addr_t paddr;
754
755 gic_rdists->prop_page = alloc_pages(GFP_NOWAIT,
756 get_order(LPI_PROPBASE_SZ));
757 if (!gic_rdists->prop_page) {
758 pr_err("Failed to allocate PROPBASE\n");
759 return -ENOMEM;
760 }
761
762 paddr = page_to_phys(gic_rdists->prop_page);
763 pr_info("GIC: using LPI property table @%pa\n", &paddr);
764
765 /* Priority 0xa0, Group-1, disabled */
766 memset(page_address(gic_rdists->prop_page),
767 LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
768 LPI_PROPBASE_SZ);
769
770 /* Make sure the GIC will observe the written configuration */
771 __flush_dcache_area(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ);
772
773 return 0;
774}
775
776static const char *its_base_type_string[] = {
777 [GITS_BASER_TYPE_DEVICE] = "Devices",
778 [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
779 [GITS_BASER_TYPE_CPU] = "Physical CPUs",
780 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
781 [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
782 [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
783 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
784};
785
786static void its_free_tables(struct its_node *its)
787{
788 int i;
789
790 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
791 if (its->tables[i]) {
792 free_page((unsigned long)its->tables[i]);
793 its->tables[i] = NULL;
794 }
795 }
796}
797
798static int its_alloc_tables(struct its_node *its)
799{
800 int err;
801 int i;
802 int psz = PAGE_SIZE;
803 u64 shr = GITS_BASER_InnerShareable;
804
805 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
806 u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
807 u64 type = GITS_BASER_TYPE(val);
808 u64 entry_size = GITS_BASER_ENTRY_SIZE(val);
809 u64 tmp;
810 void *base;
811
812 if (type == GITS_BASER_TYPE_NONE)
813 continue;
814
815 /* We're lazy and only allocate a single page for now */
816 base = (void *)get_zeroed_page(GFP_KERNEL);
817 if (!base) {
818 err = -ENOMEM;
819 goto out_free;
820 }
821
822 its->tables[i] = base;
823
824retry_baser:
825 val = (virt_to_phys(base) |
826 (type << GITS_BASER_TYPE_SHIFT) |
827 ((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
828 GITS_BASER_WaWb |
829 shr |
830 GITS_BASER_VALID);
831
832 switch (psz) {
833 case SZ_4K:
834 val |= GITS_BASER_PAGE_SIZE_4K;
835 break;
836 case SZ_16K:
837 val |= GITS_BASER_PAGE_SIZE_16K;
838 break;
839 case SZ_64K:
840 val |= GITS_BASER_PAGE_SIZE_64K;
841 break;
842 }
843
844 val |= (PAGE_SIZE / psz) - 1;
845
846 writeq_relaxed(val, its->base + GITS_BASER + i * 8);
847 tmp = readq_relaxed(its->base + GITS_BASER + i * 8);
848
849 if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
850 /*
851 * Shareability didn't stick. Just use
852 * whatever the read reported, which is likely
853 * to be the only thing this redistributor
854 * supports.
855 */
856 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
857 goto retry_baser;
858 }
859
860 if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
861 /*
862 * Page size didn't stick. Let's try a smaller
863 * size and retry. If we reach 4K, then
864 * something is horribly wrong...
865 */
866 switch (psz) {
867 case SZ_16K:
868 psz = SZ_4K;
869 goto retry_baser;
870 case SZ_64K:
871 psz = SZ_16K;
872 goto retry_baser;
873 }
874 }
875
876 if (val != tmp) {
877 pr_err("ITS: %s: GITS_BASER%d doesn't stick: %lx %lx\n",
878 its->msi_chip.of_node->full_name, i,
879 (unsigned long) val, (unsigned long) tmp);
880 err = -ENXIO;
881 goto out_free;
882 }
883
884 pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n",
885 (int)(PAGE_SIZE / entry_size),
886 its_base_type_string[type],
887 (unsigned long)virt_to_phys(base),
888 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
889 }
890
891 return 0;
892
893out_free:
894 its_free_tables(its);
895
896 return err;
897}
898
899static int its_alloc_collections(struct its_node *its)
900{
901 its->collections = kzalloc(nr_cpu_ids * sizeof(*its->collections),
902 GFP_KERNEL);
903 if (!its->collections)
904 return -ENOMEM;
905
906 return 0;
907}
908
909static void its_cpu_init_lpis(void)
910{
911 void __iomem *rbase = gic_data_rdist_rd_base();
912 struct page *pend_page;
913 u64 val, tmp;
914
915 /* If we didn't allocate the pending table yet, do it now */
916 pend_page = gic_data_rdist()->pend_page;
917 if (!pend_page) {
918 phys_addr_t paddr;
919 /*
920 * The pending pages have to be at least 64kB aligned,
921 * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
922 */
923 pend_page = alloc_pages(GFP_NOWAIT | __GFP_ZERO,
924 get_order(max(LPI_PENDBASE_SZ, SZ_64K)));
925 if (!pend_page) {
926 pr_err("Failed to allocate PENDBASE for CPU%d\n",
927 smp_processor_id());
928 return;
929 }
930
931 /* Make sure the GIC will observe the zero-ed page */
932 __flush_dcache_area(page_address(pend_page), LPI_PENDBASE_SZ);
933
934 paddr = page_to_phys(pend_page);
935 pr_info("CPU%d: using LPI pending table @%pa\n",
936 smp_processor_id(), &paddr);
937 gic_data_rdist()->pend_page = pend_page;
938 }
939
940 /* Disable LPIs */
941 val = readl_relaxed(rbase + GICR_CTLR);
942 val &= ~GICR_CTLR_ENABLE_LPIS;
943 writel_relaxed(val, rbase + GICR_CTLR);
944
945 /*
946 * Make sure any change to the table is observable by the GIC.
947 */
948 dsb(sy);
949
950 /* set PROPBASE */
951 val = (page_to_phys(gic_rdists->prop_page) |
952 GICR_PROPBASER_InnerShareable |
953 GICR_PROPBASER_WaWb |
954 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
955
956 writeq_relaxed(val, rbase + GICR_PROPBASER);
957 tmp = readq_relaxed(rbase + GICR_PROPBASER);
958
959 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
960 pr_info_once("GIC: using cache flushing for LPI property table\n");
961 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
962 }
963
964 /* set PENDBASE */
965 val = (page_to_phys(pend_page) |
966 GICR_PROPBASER_InnerShareable |
967 GICR_PROPBASER_WaWb);
968
969 writeq_relaxed(val, rbase + GICR_PENDBASER);
970
971 /* Enable LPIs */
972 val = readl_relaxed(rbase + GICR_CTLR);
973 val |= GICR_CTLR_ENABLE_LPIS;
974 writel_relaxed(val, rbase + GICR_CTLR);
975
976 /* Make sure the GIC has seen the above */
977 dsb(sy);
978}
979
980static void its_cpu_init_collection(void)
981{
982 struct its_node *its;
983 int cpu;
984
985 spin_lock(&its_lock);
986 cpu = smp_processor_id();
987
988 list_for_each_entry(its, &its_nodes, entry) {
989 u64 target;
990
991 /*
992 * We now have to bind each collection to its target
993 * redistributor.
994 */
995 if (readq_relaxed(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
996 /*
997 * This ITS wants the physical address of the
998 * redistributor.
999 */
1000 target = gic_data_rdist()->phys_base;
1001 } else {
1002 /*
1003 * This ITS wants a linear CPU number.
1004 */
1005 target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER);
1006 target = GICR_TYPER_CPU_NUMBER(target);
1007 }
1008
1009 /* Perform collection mapping */
1010 its->collections[cpu].target_address = target;
1011 its->collections[cpu].col_id = cpu;
1012
1013 its_send_mapc(its, &its->collections[cpu], 1);
1014 its_send_invall(its, &its->collections[cpu]);
1015 }
1016
1017 spin_unlock(&its_lock);
1018}
1019
1020static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
1021{
1022 struct its_device *its_dev = NULL, *tmp;
1023
1024 raw_spin_lock(&its->lock);
1025
1026 list_for_each_entry(tmp, &its->its_device_list, entry) {
1027 if (tmp->device_id == dev_id) {
1028 its_dev = tmp;
1029 break;
1030 }
1031 }
1032
1033 raw_spin_unlock(&its->lock);
1034
1035 return its_dev;
1036}
1037
1038static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1039 int nvecs)
1040{
1041 struct its_device *dev;
1042 unsigned long *lpi_map;
1043 void *itt;
1044 int lpi_base;
1045 int nr_lpis;
1046 int nr_ites;
1047 int cpu;
1048 int sz;
1049
1050 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1051 /*
1052 * At least one bit of EventID is being used, hence a minimum
1053 * of two entries. No, the architecture doesn't let you
1054 * express an ITT with a single entry.
1055 */
1056 nr_ites = max(2, roundup_pow_of_two(nvecs));
1057 sz = nr_ites * its->ite_size;
1058 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
1059 itt = kmalloc(sz, GFP_KERNEL);
1060 lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
1061
1062 if (!dev || !itt || !lpi_map) {
1063 kfree(dev);
1064 kfree(itt);
1065 kfree(lpi_map);
1066 return NULL;
1067 }
1068
1069 dev->its = its;
1070 dev->itt = itt;
1071 dev->nr_ites = nr_ites;
1072 dev->lpi_map = lpi_map;
1073 dev->lpi_base = lpi_base;
1074 dev->nr_lpis = nr_lpis;
1075 dev->device_id = dev_id;
1076 INIT_LIST_HEAD(&dev->entry);
1077
1078 raw_spin_lock(&its->lock);
1079 list_add(&dev->entry, &its->its_device_list);
1080 raw_spin_unlock(&its->lock);
1081
1082 /* Bind the device to the first possible CPU */
1083 cpu = cpumask_first(cpu_online_mask);
1084 dev->collection = &its->collections[cpu];
1085
1086 /* Map device to its ITT */
1087 its_send_mapd(dev, 1);
1088
1089 return dev;
1090}
1091
1092static void its_free_device(struct its_device *its_dev)
1093{
1094 raw_spin_lock(&its_dev->its->lock);
1095 list_del(&its_dev->entry);
1096 raw_spin_unlock(&its_dev->its->lock);
1097 kfree(its_dev->itt);
1098 kfree(its_dev);
1099}
1100
1101static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
1102{
1103 int idx;
1104
1105 idx = find_first_zero_bit(dev->lpi_map, dev->nr_lpis);
1106 if (idx == dev->nr_lpis)
1107 return -ENOSPC;
1108
1109 *hwirq = dev->lpi_base + idx;
1110 set_bit(idx, dev->lpi_map);
1111
1112 return 0;
1113}
1114
1115static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
1116 int nvec, msi_alloc_info_t *info)
1117{
1118 struct pci_dev *pdev;
1119 struct its_node *its;
1120 u32 dev_id;
1121 struct its_device *its_dev;
1122
1123 if (!dev_is_pci(dev))
1124 return -EINVAL;
1125
1126 pdev = to_pci_dev(dev);
1127 dev_id = PCI_DEVID(pdev->bus->number, pdev->devfn);
1128 its = domain->parent->host_data;
1129
1130 its_dev = its_find_device(its, dev_id);
1131 if (WARN_ON(its_dev))
1132 return -EINVAL;
1133
1134 its_dev = its_create_device(its, dev_id, nvec);
1135 if (!its_dev)
1136 return -ENOMEM;
1137
1138 dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n", nvec, ilog2(nvec));
1139
1140 info->scratchpad[0].ptr = its_dev;
1141 info->scratchpad[1].ptr = dev;
1142 return 0;
1143}
1144
1145static struct msi_domain_ops its_pci_msi_ops = {
1146 .msi_prepare = its_msi_prepare,
1147};
1148
1149static struct msi_domain_info its_pci_msi_domain_info = {
1150 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
1151 MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
1152 .ops = &its_pci_msi_ops,
1153 .chip = &its_msi_irq_chip,
1154};
1155
1156static int its_irq_gic_domain_alloc(struct irq_domain *domain,
1157 unsigned int virq,
1158 irq_hw_number_t hwirq)
1159{
1160 struct of_phandle_args args;
1161
1162 args.np = domain->parent->of_node;
1163 args.args_count = 3;
1164 args.args[0] = GIC_IRQ_TYPE_LPI;
1165 args.args[1] = hwirq;
1166 args.args[2] = IRQ_TYPE_EDGE_RISING;
1167
1168 return irq_domain_alloc_irqs_parent(domain, virq, 1, &args);
1169}
1170
1171static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1172 unsigned int nr_irqs, void *args)
1173{
1174 msi_alloc_info_t *info = args;
1175 struct its_device *its_dev = info->scratchpad[0].ptr;
1176 irq_hw_number_t hwirq;
1177 int err;
1178 int i;
1179
1180 for (i = 0; i < nr_irqs; i++) {
1181 err = its_alloc_device_irq(its_dev, &hwirq);
1182 if (err)
1183 return err;
1184
1185 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
1186 if (err)
1187 return err;
1188
1189 irq_domain_set_hwirq_and_chip(domain, virq + i,
1190 hwirq, &its_irq_chip, its_dev);
1191 dev_dbg(info->scratchpad[1].ptr, "ID:%d pID:%d vID:%d\n",
1192 (int)(hwirq - its_dev->lpi_base), (int)hwirq, virq + i);
1193 }
1194
1195 return 0;
1196}
1197
1198static void its_irq_domain_activate(struct irq_domain *domain,
1199 struct irq_data *d)
1200{
1201 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1202 u32 event = its_get_event_id(d);
1203
1204 /* Map the GIC IRQ and event to the device */
1205 its_send_mapvi(its_dev, d->hwirq, event);
1206}
1207
1208static void its_irq_domain_deactivate(struct irq_domain *domain,
1209 struct irq_data *d)
1210{
1211 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1212 u32 event = its_get_event_id(d);
1213
1214 /* Stop the delivery of interrupts */
1215 its_send_discard(its_dev, event);
1216}
1217
1218static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1219 unsigned int nr_irqs)
1220{
1221 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
1222 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1223 int i;
1224
1225 for (i = 0; i < nr_irqs; i++) {
1226 struct irq_data *data = irq_domain_get_irq_data(domain,
1227 virq + i);
1228 u32 event = its_get_event_id(data);
1229
1230 /* Mark interrupt index as unused */
1231 clear_bit(event, its_dev->lpi_map);
1232
1233 /* Nuke the entry in the domain */
1234 irq_domain_reset_irq_data(data);
1235 }
1236
1237 /* If all interrupts have been freed, start mopping the floor */
1238 if (bitmap_empty(its_dev->lpi_map, its_dev->nr_lpis)) {
1239 its_lpi_free(its_dev->lpi_map,
1240 its_dev->lpi_base,
1241 its_dev->nr_lpis);
1242
1243 /* Unmap device/itt */
1244 its_send_mapd(its_dev, 0);
1245 its_free_device(its_dev);
1246 }
1247
1248 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
1249}
1250
1251static const struct irq_domain_ops its_domain_ops = {
1252 .alloc = its_irq_domain_alloc,
1253 .free = its_irq_domain_free,
1254 .activate = its_irq_domain_activate,
1255 .deactivate = its_irq_domain_deactivate,
1256};
1257
1258static int its_probe(struct device_node *node, struct irq_domain *parent)
1259{
1260 struct resource res;
1261 struct its_node *its;
1262 void __iomem *its_base;
1263 u32 val;
1264 u64 baser, tmp;
1265 int err;
1266
1267 err = of_address_to_resource(node, 0, &res);
1268 if (err) {
1269 pr_warn("%s: no regs?\n", node->full_name);
1270 return -ENXIO;
1271 }
1272
1273 its_base = ioremap(res.start, resource_size(&res));
1274 if (!its_base) {
1275 pr_warn("%s: unable to map registers\n", node->full_name);
1276 return -ENOMEM;
1277 }
1278
1279 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
1280 if (val != 0x30 && val != 0x40) {
1281 pr_warn("%s: no ITS detected, giving up\n", node->full_name);
1282 err = -ENODEV;
1283 goto out_unmap;
1284 }
1285
1286 pr_info("ITS: %s\n", node->full_name);
1287
1288 its = kzalloc(sizeof(*its), GFP_KERNEL);
1289 if (!its) {
1290 err = -ENOMEM;
1291 goto out_unmap;
1292 }
1293
1294 raw_spin_lock_init(&its->lock);
1295 INIT_LIST_HEAD(&its->entry);
1296 INIT_LIST_HEAD(&its->its_device_list);
1297 its->base = its_base;
1298 its->phys_base = res.start;
1299 its->msi_chip.of_node = node;
1300 its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
1301
1302 its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
1303 if (!its->cmd_base) {
1304 err = -ENOMEM;
1305 goto out_free_its;
1306 }
1307 its->cmd_write = its->cmd_base;
1308
1309 err = its_alloc_tables(its);
1310 if (err)
1311 goto out_free_cmd;
1312
1313 err = its_alloc_collections(its);
1314 if (err)
1315 goto out_free_tables;
1316
1317 baser = (virt_to_phys(its->cmd_base) |
1318 GITS_CBASER_WaWb |
1319 GITS_CBASER_InnerShareable |
1320 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
1321 GITS_CBASER_VALID);
1322
1323 writeq_relaxed(baser, its->base + GITS_CBASER);
1324 tmp = readq_relaxed(its->base + GITS_CBASER);
1325 writeq_relaxed(0, its->base + GITS_CWRITER);
1326 writel_relaxed(1, its->base + GITS_CTLR);
1327
1328 if ((tmp ^ baser) & GITS_BASER_SHAREABILITY_MASK) {
1329 pr_info("ITS: using cache flushing for cmd queue\n");
1330 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
1331 }
1332
1333 if (of_property_read_bool(its->msi_chip.of_node, "msi-controller")) {
1334 its->domain = irq_domain_add_tree(NULL, &its_domain_ops, its);
1335 if (!its->domain) {
1336 err = -ENOMEM;
1337 goto out_free_tables;
1338 }
1339
1340 its->domain->parent = parent;
1341
1342 its->msi_chip.domain = pci_msi_create_irq_domain(node,
1343 &its_pci_msi_domain_info,
1344 its->domain);
1345 if (!its->msi_chip.domain) {
1346 err = -ENOMEM;
1347 goto out_free_domains;
1348 }
1349
1350 err = of_pci_msi_chip_add(&its->msi_chip);
1351 if (err)
1352 goto out_free_domains;
1353 }
1354
1355 spin_lock(&its_lock);
1356 list_add(&its->entry, &its_nodes);
1357 spin_unlock(&its_lock);
1358
1359 return 0;
1360
1361out_free_domains:
1362 if (its->msi_chip.domain)
1363 irq_domain_remove(its->msi_chip.domain);
1364 if (its->domain)
1365 irq_domain_remove(its->domain);
1366out_free_tables:
1367 its_free_tables(its);
1368out_free_cmd:
1369 kfree(its->cmd_base);
1370out_free_its:
1371 kfree(its);
1372out_unmap:
1373 iounmap(its_base);
1374 pr_err("ITS: failed probing %s (%d)\n", node->full_name, err);
1375 return err;
1376}
1377
1378static bool gic_rdists_supports_plpis(void)
1379{
1380 return !!(readl_relaxed(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
1381}
1382
1383int its_cpu_init(void)
1384{
1385 if (!gic_rdists_supports_plpis()) {
1386 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
1387 return -ENXIO;
1388 }
1389
1390 if (!list_empty(&its_nodes)) {
1391 its_cpu_init_lpis();
1392 its_cpu_init_collection();
1393 }
1394
1395 return 0;
1396}
1397
1398static struct of_device_id its_device_id[] = {
1399 { .compatible = "arm,gic-v3-its", },
1400 {},
1401};
1402
1403int its_init(struct device_node *node, struct rdists *rdists,
1404 struct irq_domain *parent_domain)
1405{
1406 struct device_node *np;
1407
1408 for (np = of_find_matching_node(node, its_device_id); np;
1409 np = of_find_matching_node(np, its_device_id)) {
1410 its_probe(np, parent_domain);
1411 }
1412
1413 if (list_empty(&its_nodes)) {
1414 pr_warn("ITS: No ITS available, not enabling LPIs\n");
1415 return -ENXIO;
1416 }
1417
1418 gic_rdists = rdists;
1419 gic_root_node = node;
1420
1421 its_alloc_lpi_tables();
1422 its_lpi_init(rdists->id_bits);
1423
1424 return 0;
1425}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index aa17ae805a70..1a146ccee701 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -34,20 +34,25 @@
34#include "irq-gic-common.h" 34#include "irq-gic-common.h"
35#include "irqchip.h" 35#include "irqchip.h"
36 36
37struct redist_region {
38 void __iomem *redist_base;
39 phys_addr_t phys_base;
40};
41
37struct gic_chip_data { 42struct gic_chip_data {
38 void __iomem *dist_base; 43 void __iomem *dist_base;
39 void __iomem **redist_base; 44 struct redist_region *redist_regions;
40 void __iomem * __percpu *rdist; 45 struct rdists rdists;
41 struct irq_domain *domain; 46 struct irq_domain *domain;
42 u64 redist_stride; 47 u64 redist_stride;
43 u32 redist_regions; 48 u32 nr_redist_regions;
44 unsigned int irq_nr; 49 unsigned int irq_nr;
45}; 50};
46 51
47static struct gic_chip_data gic_data __read_mostly; 52static struct gic_chip_data gic_data __read_mostly;
48 53
49#define gic_data_rdist() (this_cpu_ptr(gic_data.rdist)) 54#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
50#define gic_data_rdist_rd_base() (*gic_data_rdist()) 55#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
51#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K) 56#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
52 57
53/* Our default, arbitrary priority value. Linux only uses one anyway. */ 58/* Our default, arbitrary priority value. Linux only uses one anyway. */
@@ -71,9 +76,6 @@ static inline void __iomem *gic_dist_base(struct irq_data *d)
71 if (d->hwirq <= 1023) /* SPI -> dist_base */ 76 if (d->hwirq <= 1023) /* SPI -> dist_base */
72 return gic_data.dist_base; 77 return gic_data.dist_base;
73 78
74 if (d->hwirq >= 8192)
75 BUG(); /* LPI Detected!!! */
76
77 return NULL; 79 return NULL;
78} 80}
79 81
@@ -271,11 +273,11 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
271 do { 273 do {
272 irqnr = gic_read_iar(); 274 irqnr = gic_read_iar();
273 275
274 if (likely(irqnr > 15 && irqnr < 1020)) { 276 if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
275 int err; 277 int err;
276 err = handle_domain_irq(gic_data.domain, irqnr, regs); 278 err = handle_domain_irq(gic_data.domain, irqnr, regs);
277 if (err) { 279 if (err) {
278 WARN_ONCE(true, "Unexpected SPI received!\n"); 280 WARN_ONCE(true, "Unexpected interrupt received!\n");
279 gic_write_eoir(irqnr); 281 gic_write_eoir(irqnr);
280 } 282 }
281 continue; 283 continue;
@@ -333,8 +335,8 @@ static int gic_populate_rdist(void)
333 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | 335 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
334 MPIDR_AFFINITY_LEVEL(mpidr, 0)); 336 MPIDR_AFFINITY_LEVEL(mpidr, 0));
335 337
336 for (i = 0; i < gic_data.redist_regions; i++) { 338 for (i = 0; i < gic_data.nr_redist_regions; i++) {
337 void __iomem *ptr = gic_data.redist_base[i]; 339 void __iomem *ptr = gic_data.redist_regions[i].redist_base;
338 u32 reg; 340 u32 reg;
339 341
340 reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK; 342 reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
@@ -347,10 +349,13 @@ static int gic_populate_rdist(void)
347 do { 349 do {
348 typer = readq_relaxed(ptr + GICR_TYPER); 350 typer = readq_relaxed(ptr + GICR_TYPER);
349 if ((typer >> 32) == aff) { 351 if ((typer >> 32) == aff) {
352 u64 offset = ptr - gic_data.redist_regions[i].redist_base;
350 gic_data_rdist_rd_base() = ptr; 353 gic_data_rdist_rd_base() = ptr;
351 pr_info("CPU%d: found redistributor %llx @%p\n", 354 gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset;
355 pr_info("CPU%d: found redistributor %llx region %d:%pa\n",
352 smp_processor_id(), 356 smp_processor_id(),
353 (unsigned long long)mpidr, ptr); 357 (unsigned long long)mpidr,
358 i, &gic_data_rdist()->phys_base);
354 return 0; 359 return 0;
355 } 360 }
356 361
@@ -385,6 +390,11 @@ static void gic_cpu_sys_reg_init(void)
385 gic_write_grpen1(1); 390 gic_write_grpen1(1);
386} 391}
387 392
393static int gic_dist_supports_lpis(void)
394{
395 return !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS);
396}
397
388static void gic_cpu_init(void) 398static void gic_cpu_init(void)
389{ 399{
390 void __iomem *rbase; 400 void __iomem *rbase;
@@ -399,6 +409,10 @@ static void gic_cpu_init(void)
399 409
400 gic_cpu_config(rbase, gic_redist_wait_for_rwp); 410 gic_cpu_config(rbase, gic_redist_wait_for_rwp);
401 411
412 /* Give LPIs a spin */
413 if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
414 its_cpu_init();
415
402 /* initialise system registers */ 416 /* initialise system registers */
403 gic_cpu_sys_reg_init(); 417 gic_cpu_sys_reg_init();
404} 418}
@@ -585,26 +599,43 @@ static struct irq_chip gic_chip = {
585 .irq_set_affinity = gic_set_affinity, 599 .irq_set_affinity = gic_set_affinity,
586}; 600};
587 601
602#define GIC_ID_NR (1U << gic_data.rdists.id_bits)
603
588static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, 604static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
589 irq_hw_number_t hw) 605 irq_hw_number_t hw)
590{ 606{
591 /* SGIs are private to the core kernel */ 607 /* SGIs are private to the core kernel */
592 if (hw < 16) 608 if (hw < 16)
593 return -EPERM; 609 return -EPERM;
610 /* Nothing here */
611 if (hw >= gic_data.irq_nr && hw < 8192)
612 return -EPERM;
613 /* Off limits */
614 if (hw >= GIC_ID_NR)
615 return -EPERM;
616
594 /* PPIs */ 617 /* PPIs */
595 if (hw < 32) { 618 if (hw < 32) {
596 irq_set_percpu_devid(irq); 619 irq_set_percpu_devid(irq);
597 irq_set_chip_and_handler(irq, &gic_chip, 620 irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
598 handle_percpu_devid_irq); 621 handle_percpu_devid_irq, NULL, NULL);
599 set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); 622 set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
600 } 623 }
601 /* SPIs */ 624 /* SPIs */
602 if (hw >= 32 && hw < gic_data.irq_nr) { 625 if (hw >= 32 && hw < gic_data.irq_nr) {
603 irq_set_chip_and_handler(irq, &gic_chip, 626 irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
604 handle_fasteoi_irq); 627 handle_fasteoi_irq, NULL, NULL);
605 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 628 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
606 } 629 }
607 irq_set_chip_data(irq, d->host_data); 630 /* LPIs */
631 if (hw >= 8192 && hw < GIC_ID_NR) {
632 if (!gic_dist_supports_lpis())
633 return -EPERM;
634 irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
635 handle_fasteoi_irq, NULL, NULL);
636 set_irq_flags(irq, IRQF_VALID);
637 }
638
608 return 0; 639 return 0;
609} 640}
610 641
@@ -625,6 +656,9 @@ static int gic_irq_domain_xlate(struct irq_domain *d,
625 case 1: /* PPI */ 656 case 1: /* PPI */
626 *out_hwirq = intspec[1] + 16; 657 *out_hwirq = intspec[1] + 16;
627 break; 658 break;
659 case GIC_IRQ_TYPE_LPI: /* LPI */
660 *out_hwirq = intspec[1];
661 break;
628 default: 662 default:
629 return -EINVAL; 663 return -EINVAL;
630 } 664 }
@@ -633,17 +667,50 @@ static int gic_irq_domain_xlate(struct irq_domain *d,
633 return 0; 667 return 0;
634} 668}
635 669
670static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
671 unsigned int nr_irqs, void *arg)
672{
673 int i, ret;
674 irq_hw_number_t hwirq;
675 unsigned int type = IRQ_TYPE_NONE;
676 struct of_phandle_args *irq_data = arg;
677
678 ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args,
679 irq_data->args_count, &hwirq, &type);
680 if (ret)
681 return ret;
682
683 for (i = 0; i < nr_irqs; i++)
684 gic_irq_domain_map(domain, virq + i, hwirq + i);
685
686 return 0;
687}
688
689static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
690 unsigned int nr_irqs)
691{
692 int i;
693
694 for (i = 0; i < nr_irqs; i++) {
695 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
696 irq_set_handler(virq + i, NULL);
697 irq_domain_reset_irq_data(d);
698 }
699}
700
636static const struct irq_domain_ops gic_irq_domain_ops = { 701static const struct irq_domain_ops gic_irq_domain_ops = {
637 .map = gic_irq_domain_map,
638 .xlate = gic_irq_domain_xlate, 702 .xlate = gic_irq_domain_xlate,
703 .alloc = gic_irq_domain_alloc,
704 .free = gic_irq_domain_free,
639}; 705};
640 706
641static int __init gic_of_init(struct device_node *node, struct device_node *parent) 707static int __init gic_of_init(struct device_node *node, struct device_node *parent)
642{ 708{
643 void __iomem *dist_base; 709 void __iomem *dist_base;
644 void __iomem **redist_base; 710 struct redist_region *rdist_regs;
645 u64 redist_stride; 711 u64 redist_stride;
646 u32 redist_regions; 712 u32 nr_redist_regions;
713 u32 typer;
647 u32 reg; 714 u32 reg;
648 int gic_irqs; 715 int gic_irqs;
649 int err; 716 int err;
@@ -664,54 +731,63 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
664 goto out_unmap_dist; 731 goto out_unmap_dist;
665 } 732 }
666 733
667 if (of_property_read_u32(node, "#redistributor-regions", &redist_regions)) 734 if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
668 redist_regions = 1; 735 nr_redist_regions = 1;
669 736
670 redist_base = kzalloc(sizeof(*redist_base) * redist_regions, GFP_KERNEL); 737 rdist_regs = kzalloc(sizeof(*rdist_regs) * nr_redist_regions, GFP_KERNEL);
671 if (!redist_base) { 738 if (!rdist_regs) {
672 err = -ENOMEM; 739 err = -ENOMEM;
673 goto out_unmap_dist; 740 goto out_unmap_dist;
674 } 741 }
675 742
676 for (i = 0; i < redist_regions; i++) { 743 for (i = 0; i < nr_redist_regions; i++) {
677 redist_base[i] = of_iomap(node, 1 + i); 744 struct resource res;
678 if (!redist_base[i]) { 745 int ret;
746
747 ret = of_address_to_resource(node, 1 + i, &res);
748 rdist_regs[i].redist_base = of_iomap(node, 1 + i);
749 if (ret || !rdist_regs[i].redist_base) {
679 pr_err("%s: couldn't map region %d\n", 750 pr_err("%s: couldn't map region %d\n",
680 node->full_name, i); 751 node->full_name, i);
681 err = -ENODEV; 752 err = -ENODEV;
682 goto out_unmap_rdist; 753 goto out_unmap_rdist;
683 } 754 }
755 rdist_regs[i].phys_base = res.start;
684 } 756 }
685 757
686 if (of_property_read_u64(node, "redistributor-stride", &redist_stride)) 758 if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
687 redist_stride = 0; 759 redist_stride = 0;
688 760
689 gic_data.dist_base = dist_base; 761 gic_data.dist_base = dist_base;
690 gic_data.redist_base = redist_base; 762 gic_data.redist_regions = rdist_regs;
691 gic_data.redist_regions = redist_regions; 763 gic_data.nr_redist_regions = nr_redist_regions;
692 gic_data.redist_stride = redist_stride; 764 gic_data.redist_stride = redist_stride;
693 765
694 /* 766 /*
695 * Find out how many interrupts are supported. 767 * Find out how many interrupts are supported.
696 * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI) 768 * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
697 */ 769 */
698 gic_irqs = readl_relaxed(gic_data.dist_base + GICD_TYPER) & 0x1f; 770 typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
699 gic_irqs = (gic_irqs + 1) * 32; 771 gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer);
772 gic_irqs = GICD_TYPER_IRQS(typer);
700 if (gic_irqs > 1020) 773 if (gic_irqs > 1020)
701 gic_irqs = 1020; 774 gic_irqs = 1020;
702 gic_data.irq_nr = gic_irqs; 775 gic_data.irq_nr = gic_irqs;
703 776
704 gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops, 777 gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops,
705 &gic_data); 778 &gic_data);
706 gic_data.rdist = alloc_percpu(typeof(*gic_data.rdist)); 779 gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
707 780
708 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdist)) { 781 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
709 err = -ENOMEM; 782 err = -ENOMEM;
710 goto out_free; 783 goto out_free;
711 } 784 }
712 785
713 set_handle_irq(gic_handle_irq); 786 set_handle_irq(gic_handle_irq);
714 787
788 if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
789 its_init(node, &gic_data.rdists, gic_data.domain);
790
715 gic_smp_init(); 791 gic_smp_init();
716 gic_dist_init(); 792 gic_dist_init();
717 gic_cpu_init(); 793 gic_cpu_init();
@@ -722,12 +798,12 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
722out_free: 798out_free:
723 if (gic_data.domain) 799 if (gic_data.domain)
724 irq_domain_remove(gic_data.domain); 800 irq_domain_remove(gic_data.domain);
725 free_percpu(gic_data.rdist); 801 free_percpu(gic_data.rdists.rdist);
726out_unmap_rdist: 802out_unmap_rdist:
727 for (i = 0; i < redist_regions; i++) 803 for (i = 0; i < nr_redist_regions; i++)
728 if (redist_base[i]) 804 if (rdist_regs[i].redist_base)
729 iounmap(redist_base[i]); 805 iounmap(rdist_regs[i].redist_base);
730 kfree(redist_base); 806 kfree(rdist_regs);
731out_unmap_dist: 807out_unmap_dist:
732 iounmap(dist_base); 808 iounmap(dist_base);
733 return err; 809 return err;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 7f9be0785c6a..d617ee5a3d8a 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -788,17 +788,16 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
788{ 788{
789 if (hw < 32) { 789 if (hw < 32) {
790 irq_set_percpu_devid(irq); 790 irq_set_percpu_devid(irq);
791 irq_set_chip_and_handler(irq, &gic_chip, 791 irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
792 handle_percpu_devid_irq); 792 handle_percpu_devid_irq, NULL, NULL);
793 set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); 793 set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
794 } else { 794 } else {
795 irq_set_chip_and_handler(irq, &gic_chip, 795 irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
796 handle_fasteoi_irq); 796 handle_fasteoi_irq, NULL, NULL);
797 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 797 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
798 798
799 gic_routable_irq_domain_ops->map(d, irq, hw); 799 gic_routable_irq_domain_ops->map(d, irq, hw);
800 } 800 }
801 irq_set_chip_data(irq, d->host_data);
802 return 0; 801 return 0;
803} 802}
804 803
@@ -858,6 +857,31 @@ static struct notifier_block gic_cpu_notifier = {
858}; 857};
859#endif 858#endif
860 859
860static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
861 unsigned int nr_irqs, void *arg)
862{
863 int i, ret;
864 irq_hw_number_t hwirq;
865 unsigned int type = IRQ_TYPE_NONE;
866 struct of_phandle_args *irq_data = arg;
867
868 ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args,
869 irq_data->args_count, &hwirq, &type);
870 if (ret)
871 return ret;
872
873 for (i = 0; i < nr_irqs; i++)
874 gic_irq_domain_map(domain, virq + i, hwirq + i);
875
876 return 0;
877}
878
879static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = {
880 .xlate = gic_irq_domain_xlate,
881 .alloc = gic_irq_domain_alloc,
882 .free = irq_domain_free_irqs_top,
883};
884
861static const struct irq_domain_ops gic_irq_domain_ops = { 885static const struct irq_domain_ops gic_irq_domain_ops = {
862 .map = gic_irq_domain_map, 886 .map = gic_irq_domain_map,
863 .unmap = gic_irq_domain_unmap, 887 .unmap = gic_irq_domain_unmap,
@@ -948,18 +972,6 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
948 gic_cpu_map[i] = 0xff; 972 gic_cpu_map[i] = 0xff;
949 973
950 /* 974 /*
951 * For primary GICs, skip over SGIs.
952 * For secondary GICs, skip over PPIs, too.
953 */
954 if (gic_nr == 0 && (irq_start & 31) > 0) {
955 hwirq_base = 16;
956 if (irq_start != -1)
957 irq_start = (irq_start & ~31) + 16;
958 } else {
959 hwirq_base = 32;
960 }
961
962 /*
963 * Find out how many interrupts are supported. 975 * Find out how many interrupts are supported.
964 * The GIC only supports up to 1020 interrupt sources. 976 * The GIC only supports up to 1020 interrupt sources.
965 */ 977 */
@@ -969,10 +981,31 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
969 gic_irqs = 1020; 981 gic_irqs = 1020;
970 gic->gic_irqs = gic_irqs; 982 gic->gic_irqs = gic_irqs;
971 983
972 gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ 984 if (node) { /* DT case */
985 const struct irq_domain_ops *ops = &gic_irq_domain_hierarchy_ops;
986
987 if (!of_property_read_u32(node, "arm,routable-irqs",
988 &nr_routable_irqs)) {
989 ops = &gic_irq_domain_ops;
990 gic_irqs = nr_routable_irqs;
991 }
992
993 gic->domain = irq_domain_add_linear(node, gic_irqs, ops, gic);
994 } else { /* Non-DT case */
995 /*
996 * For primary GICs, skip over SGIs.
997 * For secondary GICs, skip over PPIs, too.
998 */
999 if (gic_nr == 0 && (irq_start & 31) > 0) {
1000 hwirq_base = 16;
1001 if (irq_start != -1)
1002 irq_start = (irq_start & ~31) + 16;
1003 } else {
1004 hwirq_base = 32;
1005 }
1006
1007 gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
973 1008
974 if (of_property_read_u32(node, "arm,routable-irqs",
975 &nr_routable_irqs)) {
976 irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, 1009 irq_base = irq_alloc_descs(irq_start, 16, gic_irqs,
977 numa_node_id()); 1010 numa_node_id());
978 if (IS_ERR_VALUE(irq_base)) { 1011 if (IS_ERR_VALUE(irq_base)) {
@@ -983,10 +1016,6 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
983 1016
984 gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base, 1017 gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
985 hwirq_base, &gic_irq_domain_ops, gic); 1018 hwirq_base, &gic_irq_domain_ops, gic);
986 } else {
987 gic->domain = irq_domain_add_linear(node, nr_routable_irqs,
988 &gic_irq_domain_ops,
989 gic);
990 } 1019 }
991 1020
992 if (WARN_ON(!gic->domain)) 1021 if (WARN_ON(!gic->domain))
@@ -1037,6 +1066,10 @@ gic_of_init(struct device_node *node, struct device_node *parent)
1037 irq = irq_of_parse_and_map(node, 0); 1066 irq = irq_of_parse_and_map(node, 0);
1038 gic_cascade_irq(gic_cnt, irq); 1067 gic_cascade_irq(gic_cnt, irq);
1039 } 1068 }
1069
1070 if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
1071 gicv2m_of_init(node, gic_data[gic_cnt].domain);
1072
1040 gic_cnt++; 1073 gic_cnt++;
1041 return 0; 1074 return 0;
1042} 1075}
diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c
new file mode 100644
index 000000000000..7e342df6a62f
--- /dev/null
+++ b/drivers/irqchip/irq-mtk-sysirq.c
@@ -0,0 +1,163 @@
1/*
2 * Copyright (c) 2014 MediaTek Inc.
3 * Author: Joe.C <yingjoe.chen@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/irq.h>
16#include <linux/irqdomain.h>
17#include <linux/of.h>
18#include <linux/of_irq.h>
19#include <linux/of_address.h>
20#include <linux/io.h>
21#include <linux/slab.h>
22#include <linux/spinlock.h>
23
24#include "irqchip.h"
25
26#define MT6577_SYS_INTPOL_NUM (224)
27
28struct mtk_sysirq_chip_data {
29 spinlock_t lock;
30 void __iomem *intpol_base;
31};
32
33static int mtk_sysirq_set_type(struct irq_data *data, unsigned int type)
34{
35 irq_hw_number_t hwirq = data->hwirq;
36 struct mtk_sysirq_chip_data *chip_data = data->chip_data;
37 u32 offset, reg_index, value;
38 unsigned long flags;
39 int ret;
40
41 offset = hwirq & 0x1f;
42 reg_index = hwirq >> 5;
43
44 spin_lock_irqsave(&chip_data->lock, flags);
45 value = readl_relaxed(chip_data->intpol_base + reg_index * 4);
46 if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_EDGE_FALLING) {
47 if (type == IRQ_TYPE_LEVEL_LOW)
48 type = IRQ_TYPE_LEVEL_HIGH;
49 else
50 type = IRQ_TYPE_EDGE_RISING;
51 value |= (1 << offset);
52 } else {
53 value &= ~(1 << offset);
54 }
55 writel(value, chip_data->intpol_base + reg_index * 4);
56
57 data = data->parent_data;
58 ret = data->chip->irq_set_type(data, type);
59 spin_unlock_irqrestore(&chip_data->lock, flags);
60 return ret;
61}
62
63static struct irq_chip mtk_sysirq_chip = {
64 .name = "MT_SYSIRQ",
65 .irq_mask = irq_chip_mask_parent,
66 .irq_unmask = irq_chip_unmask_parent,
67 .irq_eoi = irq_chip_eoi_parent,
68 .irq_set_type = mtk_sysirq_set_type,
69 .irq_retrigger = irq_chip_retrigger_hierarchy,
70 .irq_set_affinity = irq_chip_set_affinity_parent,
71};
72
73static int mtk_sysirq_domain_xlate(struct irq_domain *d,
74 struct device_node *controller,
75 const u32 *intspec, unsigned int intsize,
76 unsigned long *out_hwirq,
77 unsigned int *out_type)
78{
79 if (intsize != 3)
80 return -EINVAL;
81
82 /* sysirq doesn't support PPI */
83 if (intspec[0])
84 return -EINVAL;
85
86 *out_hwirq = intspec[1];
87 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
88 return 0;
89}
90
91static int mtk_sysirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
92 unsigned int nr_irqs, void *arg)
93{
94 int i;
95 irq_hw_number_t hwirq;
96 struct of_phandle_args *irq_data = arg;
97 struct of_phandle_args gic_data = *irq_data;
98
99 if (irq_data->args_count != 3)
100 return -EINVAL;
101
102 /* sysirq doesn't support PPI */
103 if (irq_data->args[0])
104 return -EINVAL;
105
106 hwirq = irq_data->args[1];
107 for (i = 0; i < nr_irqs; i++)
108 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
109 &mtk_sysirq_chip,
110 domain->host_data);
111
112 gic_data.np = domain->parent->of_node;
113 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &gic_data);
114}
115
116static struct irq_domain_ops sysirq_domain_ops = {
117 .xlate = mtk_sysirq_domain_xlate,
118 .alloc = mtk_sysirq_domain_alloc,
119 .free = irq_domain_free_irqs_common,
120};
121
122static int __init mtk_sysirq_of_init(struct device_node *node,
123 struct device_node *parent)
124{
125 struct irq_domain *domain, *domain_parent;
126 struct mtk_sysirq_chip_data *chip_data;
127 int ret = 0;
128
129 domain_parent = irq_find_host(parent);
130 if (!domain_parent) {
131 pr_err("mtk_sysirq: interrupt-parent not found\n");
132 return -EINVAL;
133 }
134
135 chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
136 if (!chip_data)
137 return -ENOMEM;
138
139 chip_data->intpol_base = of_io_request_and_map(node, 0, "intpol");
140 if (!chip_data->intpol_base) {
141 pr_err("mtk_sysirq: unable to map sysirq register\n");
142 ret = -ENOMEM;
143 goto out_free;
144 }
145
146 domain = irq_domain_add_hierarchy(domain_parent, 0,
147 MT6577_SYS_INTPOL_NUM, node,
148 &sysirq_domain_ops, chip_data);
149 if (!domain) {
150 ret = -ENOMEM;
151 goto out_unmap;
152 }
153 spin_lock_init(&chip_data->lock);
154
155 return 0;
156
157out_unmap:
158 iounmap(chip_data->intpol_base);
159out_free:
160 kfree(chip_data);
161 return ret;
162}
163IRQCHIP_DECLARE(mtk_sysirq, "mediatek,mt6577-sysirq", mtk_sysirq_of_init);
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 03a4ea37ba86..1e8b0cf30792 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -49,6 +49,10 @@
49#define GICD_CTLR_ENABLE_G1A (1U << 1) 49#define GICD_CTLR_ENABLE_G1A (1U << 1)
50#define GICD_CTLR_ENABLE_G1 (1U << 0) 50#define GICD_CTLR_ENABLE_G1 (1U << 0)
51 51
52#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1)
53#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32)
54#define GICD_TYPER_LPIS (1U << 17)
55
52#define GICD_IROUTER_SPI_MODE_ONE (0U << 31) 56#define GICD_IROUTER_SPI_MODE_ONE (0U << 31)
53#define GICD_IROUTER_SPI_MODE_ANY (1U << 31) 57#define GICD_IROUTER_SPI_MODE_ANY (1U << 31)
54 58
@@ -76,9 +80,27 @@
76#define GICR_MOVALLR 0x0110 80#define GICR_MOVALLR 0x0110
77#define GICR_PIDR2 GICD_PIDR2 81#define GICR_PIDR2 GICD_PIDR2
78 82
83#define GICR_CTLR_ENABLE_LPIS (1UL << 0)
84
85#define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff)
86
79#define GICR_WAKER_ProcessorSleep (1U << 1) 87#define GICR_WAKER_ProcessorSleep (1U << 1)
80#define GICR_WAKER_ChildrenAsleep (1U << 2) 88#define GICR_WAKER_ChildrenAsleep (1U << 2)
81 89
90#define GICR_PROPBASER_NonShareable (0U << 10)
91#define GICR_PROPBASER_InnerShareable (1U << 10)
92#define GICR_PROPBASER_OuterShareable (2U << 10)
93#define GICR_PROPBASER_SHAREABILITY_MASK (3UL << 10)
94#define GICR_PROPBASER_nCnB (0U << 7)
95#define GICR_PROPBASER_nC (1U << 7)
96#define GICR_PROPBASER_RaWt (2U << 7)
97#define GICR_PROPBASER_RaWb (3U << 7)
98#define GICR_PROPBASER_WaWt (4U << 7)
99#define GICR_PROPBASER_WaWb (5U << 7)
100#define GICR_PROPBASER_RaWaWt (6U << 7)
101#define GICR_PROPBASER_RaWaWb (7U << 7)
102#define GICR_PROPBASER_IDBITS_MASK (0x1f)
103
82/* 104/*
83 * Re-Distributor registers, offsets from SGI_base 105 * Re-Distributor registers, offsets from SGI_base
84 */ 106 */
@@ -91,9 +113,93 @@
91#define GICR_IPRIORITYR0 GICD_IPRIORITYR 113#define GICR_IPRIORITYR0 GICD_IPRIORITYR
92#define GICR_ICFGR0 GICD_ICFGR 114#define GICR_ICFGR0 GICD_ICFGR
93 115
116#define GICR_TYPER_PLPIS (1U << 0)
94#define GICR_TYPER_VLPIS (1U << 1) 117#define GICR_TYPER_VLPIS (1U << 1)
95#define GICR_TYPER_LAST (1U << 4) 118#define GICR_TYPER_LAST (1U << 4)
96 119
120#define LPI_PROP_GROUP1 (1 << 1)
121#define LPI_PROP_ENABLED (1 << 0)
122
123/*
124 * ITS registers, offsets from ITS_base
125 */
126#define GITS_CTLR 0x0000
127#define GITS_IIDR 0x0004
128#define GITS_TYPER 0x0008
129#define GITS_CBASER 0x0080
130#define GITS_CWRITER 0x0088
131#define GITS_CREADR 0x0090
132#define GITS_BASER 0x0100
133#define GITS_PIDR2 GICR_PIDR2
134
135#define GITS_TRANSLATER 0x10040
136
137#define GITS_TYPER_PTA (1UL << 19)
138
139#define GITS_CBASER_VALID (1UL << 63)
140#define GITS_CBASER_nCnB (0UL << 59)
141#define GITS_CBASER_nC (1UL << 59)
142#define GITS_CBASER_RaWt (2UL << 59)
143#define GITS_CBASER_RaWb (3UL << 59)
144#define GITS_CBASER_WaWt (4UL << 59)
145#define GITS_CBASER_WaWb (5UL << 59)
146#define GITS_CBASER_RaWaWt (6UL << 59)
147#define GITS_CBASER_RaWaWb (7UL << 59)
148#define GITS_CBASER_NonShareable (0UL << 10)
149#define GITS_CBASER_InnerShareable (1UL << 10)
150#define GITS_CBASER_OuterShareable (2UL << 10)
151#define GITS_CBASER_SHAREABILITY_MASK (3UL << 10)
152
153#define GITS_BASER_NR_REGS 8
154
155#define GITS_BASER_VALID (1UL << 63)
156#define GITS_BASER_nCnB (0UL << 59)
157#define GITS_BASER_nC (1UL << 59)
158#define GITS_BASER_RaWt (2UL << 59)
159#define GITS_BASER_RaWb (3UL << 59)
160#define GITS_BASER_WaWt (4UL << 59)
161#define GITS_BASER_WaWb (5UL << 59)
162#define GITS_BASER_RaWaWt (6UL << 59)
163#define GITS_BASER_RaWaWb (7UL << 59)
164#define GITS_BASER_TYPE_SHIFT (56)
165#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7)
166#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
167#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1)
168#define GITS_BASER_NonShareable (0UL << 10)
169#define GITS_BASER_InnerShareable (1UL << 10)
170#define GITS_BASER_OuterShareable (2UL << 10)
171#define GITS_BASER_SHAREABILITY_SHIFT (10)
172#define GITS_BASER_SHAREABILITY_MASK (3UL << GITS_BASER_SHAREABILITY_SHIFT)
173#define GITS_BASER_PAGE_SIZE_SHIFT (8)
174#define GITS_BASER_PAGE_SIZE_4K (0UL << GITS_BASER_PAGE_SIZE_SHIFT)
175#define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT)
176#define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT)
177#define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT)
178
179#define GITS_BASER_TYPE_NONE 0
180#define GITS_BASER_TYPE_DEVICE 1
181#define GITS_BASER_TYPE_VCPU 2
182#define GITS_BASER_TYPE_CPU 3
183#define GITS_BASER_TYPE_COLLECTION 4
184#define GITS_BASER_TYPE_RESERVED5 5
185#define GITS_BASER_TYPE_RESERVED6 6
186#define GITS_BASER_TYPE_RESERVED7 7
187
188/*
189 * ITS commands
190 */
191#define GITS_CMD_MAPD 0x08
192#define GITS_CMD_MAPC 0x09
193#define GITS_CMD_MAPVI 0x0a
194#define GITS_CMD_MOVI 0x01
195#define GITS_CMD_DISCARD 0x0f
196#define GITS_CMD_INV 0x0c
197#define GITS_CMD_MOVALL 0x0e
198#define GITS_CMD_INVALL 0x0d
199#define GITS_CMD_INT 0x03
200#define GITS_CMD_CLEAR 0x04
201#define GITS_CMD_SYNC 0x05
202
97/* 203/*
98 * CPU interface registers 204 * CPU interface registers
99 */ 205 */
@@ -189,12 +295,34 @@
189 295
190#include <linux/stringify.h> 296#include <linux/stringify.h>
191 297
298/*
299 * We need a value to serve as a irq-type for LPIs. Choose one that will
300 * hopefully pique the interest of the reviewer.
301 */
302#define GIC_IRQ_TYPE_LPI 0xa110c8ed
303
304struct rdists {
305 struct {
306 void __iomem *rd_base;
307 struct page *pend_page;
308 phys_addr_t phys_base;
309 } __percpu *rdist;
310 struct page *prop_page;
311 int id_bits;
312 u64 flags;
313};
314
192static inline void gic_write_eoir(u64 irq) 315static inline void gic_write_eoir(u64 irq)
193{ 316{
194 asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq)); 317 asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq));
195 isb(); 318 isb();
196} 319}
197 320
321struct irq_domain;
322int its_cpu_init(void);
323int its_init(struct device_node *node, struct rdists *rdists,
324 struct irq_domain *domain);
325
198#endif 326#endif
199 327
200#endif 328#endif
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 13eed92c7d24..71d706d5f169 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -91,6 +91,8 @@
91 91
92#ifndef __ASSEMBLY__ 92#ifndef __ASSEMBLY__
93 93
94#include <linux/irqdomain.h>
95
94struct device_node; 96struct device_node;
95 97
96extern struct irq_chip gic_arch_extn; 98extern struct irq_chip gic_arch_extn;
@@ -106,6 +108,8 @@ static inline void gic_init(unsigned int nr, int start,
106 gic_init_bases(nr, start, dist, cpu, 0, NULL); 108 gic_init_bases(nr, start, dist, cpu, 0, NULL);
107} 109}
108 110
111int gicv2m_of_init(struct device_node *node, struct irq_domain *parent);
112
109void gic_send_sgi(unsigned int cpu_id, unsigned int irq); 113void gic_send_sgi(unsigned int cpu_id, unsigned int irq);
110int gic_get_cpu_id(unsigned int cpu); 114int gic_get_cpu_id(unsigned int cpu);
111void gic_migrate_target(unsigned int new_cpu_id); 115void gic_migrate_target(unsigned int new_cpu_id);