summaryrefslogtreecommitdiffstats
path: root/drivers/irqchip
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2017-08-31 14:12:51 -0400
committerThomas Gleixner <tglx@linutronix.de>2017-08-31 14:12:51 -0400
commit9fbd7fd28d1a1053325967670915c12b4b246a61 (patch)
tree9a462e5af1e90ef6c68b96fff75cad141e1a784f /drivers/irqchip
parentb33394ba5c0974a578c24b2fecbb91a984da5e09 (diff)
parentae3efabfadea92a7300f57792ebeb24b5d18469f (diff)
Merge tag 'irqchip-4.14' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into irq/core
Pull irqchip updates for 4.14 from Marc Zyngier: - irqchip-specific part of the monster GICv4 series - new UniPhier AIDET irqchip driver - new variants of some Freescale MSI widget - blanket removal of of_node->full_name in printk - random collection of fixes
Diffstat (limited to 'drivers/irqchip')
-rw-r--r--drivers/irqchip/Kconfig8
-rw-r--r--drivers/irqchip/Makefile3
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c2
-rw-r--r--drivers/irqchip/irq-bcm2835.c9
-rw-r--r--drivers/irqchip/irq-bcm2836.c5
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c10
-rw-r--r--drivers/irqchip/irq-crossbar.c6
-rw-r--r--drivers/irqchip/irq-digicolor.c8
-rw-r--r--drivers/irqchip/irq-dw-apb-ictl.c12
-rw-r--r--drivers/irqchip/irq-gic-v3-its-pci-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c1489
-rw-r--r--drivers/irqchip/irq-gic-v3.c106
-rw-r--r--drivers/irqchip/irq-gic-v4.c225
-rw-r--r--drivers/irqchip/irq-gic.c2
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c4
-rw-r--r--drivers/irqchip/irq-lpc32xx.c2
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c256
-rw-r--r--drivers/irqchip/irq-mmp.c4
-rw-r--r--drivers/irqchip/irq-mtk-sysirq.c3
-rw-r--r--drivers/irqchip/irq-mxs.c4
-rw-r--r--drivers/irqchip/irq-stm32-exti.c8
-rw-r--r--drivers/irqchip/irq-sun4i.c6
-rw-r--r--drivers/irqchip/irq-tegra.c16
-rw-r--r--drivers/irqchip/irq-uniphier-aidet.c261
-rw-r--r--drivers/irqchip/irq-xilinx-intc.c4
25 files changed, 2219 insertions, 236 deletions
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 1139de9da21a..9d8a1dd2e2c2 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -313,3 +313,11 @@ config QCOM_IRQ_COMBINER
313 help 313 help
314 Say yes here to add support for the IRQ combiner devices embedded 314 Say yes here to add support for the IRQ combiner devices embedded
315 in Qualcomm Technologies chips. 315 in Qualcomm Technologies chips.
316
317config IRQ_UNIPHIER_AIDET
318 bool "UniPhier AIDET support" if COMPILE_TEST
319 depends on ARCH_UNIPHIER || COMPILE_TEST
320 default ARCH_UNIPHIER
321 select IRQ_DOMAIN_HIERARCHY
322 help
323 Support for the UniPhier AIDET (ARM Interrupt Detector).
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index e88d856cc09c..845abc107ad5 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -28,7 +28,7 @@ obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o
28obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-realview.o 28obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-realview.o
29obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o 29obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
30obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o 30obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
31obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o 31obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o
32obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o 32obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o
33obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o 33obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o
34obj-$(CONFIG_ARM_NVIC) += irq-nvic.o 34obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
@@ -78,3 +78,4 @@ obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o
78obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o 78obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o
79obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o 79obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o
80obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o 80obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o
81obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index eb815676c088..c9bdc5221b82 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -203,7 +203,7 @@ static struct irq_chip armada_370_xp_msi_irq_chip = {
203 203
204static struct msi_domain_info armada_370_xp_msi_domain_info = { 204static struct msi_domain_info armada_370_xp_msi_domain_info = {
205 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | 205 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
206 MSI_FLAG_MULTI_PCI_MSI), 206 MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
207 .chip = &armada_370_xp_msi_irq_chip, 207 .chip = &armada_370_xp_msi_irq_chip,
208}; 208};
209 209
diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c
index 44d7c38dde47..d2da8a1e6b1b 100644
--- a/drivers/irqchip/irq-bcm2835.c
+++ b/drivers/irqchip/irq-bcm2835.c
@@ -147,13 +147,12 @@ static int __init armctrl_of_init(struct device_node *node,
147 147
148 base = of_iomap(node, 0); 148 base = of_iomap(node, 0);
149 if (!base) 149 if (!base)
150 panic("%s: unable to map IC registers\n", 150 panic("%pOF: unable to map IC registers\n", node);
151 node->full_name);
152 151
153 intc.domain = irq_domain_add_linear(node, MAKE_HWIRQ(NR_BANKS, 0), 152 intc.domain = irq_domain_add_linear(node, MAKE_HWIRQ(NR_BANKS, 0),
154 &armctrl_ops, NULL); 153 &armctrl_ops, NULL);
155 if (!intc.domain) 154 if (!intc.domain)
156 panic("%s: unable to create IRQ domain\n", node->full_name); 155 panic("%pOF: unable to create IRQ domain\n", node);
157 156
158 for (b = 0; b < NR_BANKS; b++) { 157 for (b = 0; b < NR_BANKS; b++) {
159 intc.pending[b] = base + reg_pending[b]; 158 intc.pending[b] = base + reg_pending[b];
@@ -173,8 +172,8 @@ static int __init armctrl_of_init(struct device_node *node,
173 int parent_irq = irq_of_parse_and_map(node, 0); 172 int parent_irq = irq_of_parse_and_map(node, 0);
174 173
175 if (!parent_irq) { 174 if (!parent_irq) {
176 panic("%s: unable to get parent interrupt.\n", 175 panic("%pOF: unable to get parent interrupt.\n",
177 node->full_name); 176 node);
178 } 177 }
179 irq_set_chained_handler(parent_irq, bcm2836_chained_handle_irq); 178 irq_set_chained_handler(parent_irq, bcm2836_chained_handle_irq);
180 } else { 179 } else {
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index e7463e3c0814..dc8c1e3eafe7 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -282,8 +282,7 @@ static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node,
282{ 282{
283 intc.base = of_iomap(node, 0); 283 intc.base = of_iomap(node, 0);
284 if (!intc.base) { 284 if (!intc.base) {
285 panic("%s: unable to map local interrupt registers\n", 285 panic("%pOF: unable to map local interrupt registers\n", node);
286 node->full_name);
287 } 286 }
288 287
289 bcm2835_init_local_timer_frequency(); 288 bcm2835_init_local_timer_frequency();
@@ -292,7 +291,7 @@ static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node,
292 &bcm2836_arm_irqchip_intc_ops, 291 &bcm2836_arm_irqchip_intc_ops,
293 NULL); 292 NULL);
294 if (!intc.domain) 293 if (!intc.domain)
295 panic("%s: unable to create IRQ domain\n", node->full_name); 294 panic("%pOF: unable to create IRQ domain\n", node);
296 295
297 bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTPSIRQ, 296 bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTPSIRQ,
298 &bcm2836_arm_irqchip_timer); 297 &bcm2836_arm_irqchip_timer);
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index 64c2692070ef..983640eba418 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -250,12 +250,6 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
250 if (ret < 0) 250 if (ret < 0)
251 goto out_free_l1_data; 251 goto out_free_l1_data;
252 252
253 for (idx = 0; idx < data->n_words; idx++) {
254 __raw_writel(data->irq_fwd_mask[idx],
255 data->pair_base[idx] +
256 data->en_offset[idx]);
257 }
258
259 for (irq = 0; irq < data->num_parent_irqs; irq++) { 253 for (irq = 0; irq < data->num_parent_irqs; irq++) {
260 ret = bcm7120_l2_intc_init_one(dn, data, irq, valid_mask); 254 ret = bcm7120_l2_intc_init_one(dn, data, irq, valid_mask);
261 if (ret) 255 if (ret)
@@ -297,6 +291,10 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
297 gc->reg_base = data->pair_base[idx]; 291 gc->reg_base = data->pair_base[idx];
298 ct->regs.mask = data->en_offset[idx]; 292 ct->regs.mask = data->en_offset[idx];
299 293
294 /* gc->reg_base is defined and so is gc->writel */
295 irq_reg_writel(gc, data->irq_fwd_mask[idx],
296 data->en_offset[idx]);
297
300 ct->chip.irq_mask = irq_gc_mask_clr_bit; 298 ct->chip.irq_mask = irq_gc_mask_clr_bit;
301 ct->chip.irq_unmask = irq_gc_mask_set_bit; 299 ct->chip.irq_unmask = irq_gc_mask_set_bit;
302 ct->chip.irq_ack = irq_gc_noop; 300 ct->chip.irq_ack = irq_gc_noop;
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index f96601268f71..99d97d7e3fd7 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -341,13 +341,13 @@ static int __init irqcrossbar_init(struct device_node *node,
341 int err; 341 int err;
342 342
343 if (!parent) { 343 if (!parent) {
344 pr_err("%s: no parent, giving up\n", node->full_name); 344 pr_err("%pOF: no parent, giving up\n", node);
345 return -ENODEV; 345 return -ENODEV;
346 } 346 }
347 347
348 parent_domain = irq_find_host(parent); 348 parent_domain = irq_find_host(parent);
349 if (!parent_domain) { 349 if (!parent_domain) {
350 pr_err("%s: unable to obtain parent domain\n", node->full_name); 350 pr_err("%pOF: unable to obtain parent domain\n", node);
351 return -ENXIO; 351 return -ENXIO;
352 } 352 }
353 353
@@ -360,7 +360,7 @@ static int __init irqcrossbar_init(struct device_node *node,
360 node, &crossbar_domain_ops, 360 node, &crossbar_domain_ops,
361 NULL); 361 NULL);
362 if (!domain) { 362 if (!domain) {
363 pr_err("%s: failed to allocated domain\n", node->full_name); 363 pr_err("%pOF: failed to allocated domain\n", node);
364 return -ENOMEM; 364 return -ENOMEM;
365 } 365 }
366 366
diff --git a/drivers/irqchip/irq-digicolor.c b/drivers/irqchip/irq-digicolor.c
index 3aae015469a5..fc38d2da11b9 100644
--- a/drivers/irqchip/irq-digicolor.c
+++ b/drivers/irqchip/irq-digicolor.c
@@ -78,7 +78,7 @@ static int __init digicolor_of_init(struct device_node *node,
78 78
79 reg_base = of_iomap(node, 0); 79 reg_base = of_iomap(node, 0);
80 if (!reg_base) { 80 if (!reg_base) {
81 pr_err("%s: unable to map IC registers\n", node->full_name); 81 pr_err("%pOF: unable to map IC registers\n", node);
82 return -ENXIO; 82 return -ENXIO;
83 } 83 }
84 84
@@ -88,7 +88,7 @@ static int __init digicolor_of_init(struct device_node *node,
88 88
89 ucregs = syscon_regmap_lookup_by_phandle(node, "syscon"); 89 ucregs = syscon_regmap_lookup_by_phandle(node, "syscon");
90 if (IS_ERR(ucregs)) { 90 if (IS_ERR(ucregs)) {
91 pr_err("%s: unable to map UC registers\n", node->full_name); 91 pr_err("%pOF: unable to map UC registers\n", node);
92 return PTR_ERR(ucregs); 92 return PTR_ERR(ucregs);
93 } 93 }
94 /* channel 1, regular IRQs */ 94 /* channel 1, regular IRQs */
@@ -97,7 +97,7 @@ static int __init digicolor_of_init(struct device_node *node,
97 digicolor_irq_domain = 97 digicolor_irq_domain =
98 irq_domain_add_linear(node, 64, &irq_generic_chip_ops, NULL); 98 irq_domain_add_linear(node, 64, &irq_generic_chip_ops, NULL);
99 if (!digicolor_irq_domain) { 99 if (!digicolor_irq_domain) {
100 pr_err("%s: unable to create IRQ domain\n", node->full_name); 100 pr_err("%pOF: unable to create IRQ domain\n", node);
101 return -ENOMEM; 101 return -ENOMEM;
102 } 102 }
103 103
@@ -105,7 +105,7 @@ static int __init digicolor_of_init(struct device_node *node,
105 "digicolor_irq", handle_level_irq, 105 "digicolor_irq", handle_level_irq,
106 clr, 0, 0); 106 clr, 0, 0);
107 if (ret) { 107 if (ret) {
108 pr_err("%s: unable to allocate IRQ gc\n", node->full_name); 108 pr_err("%pOF: unable to allocate IRQ gc\n", node);
109 return ret; 109 return ret;
110 } 110 }
111 111
diff --git a/drivers/irqchip/irq-dw-apb-ictl.c b/drivers/irqchip/irq-dw-apb-ictl.c
index 052f266364c0..0a19618ce2c8 100644
--- a/drivers/irqchip/irq-dw-apb-ictl.c
+++ b/drivers/irqchip/irq-dw-apb-ictl.c
@@ -79,24 +79,24 @@ static int __init dw_apb_ictl_init(struct device_node *np,
79 /* Map the parent interrupt for the chained handler */ 79 /* Map the parent interrupt for the chained handler */
80 irq = irq_of_parse_and_map(np, 0); 80 irq = irq_of_parse_and_map(np, 0);
81 if (irq <= 0) { 81 if (irq <= 0) {
82 pr_err("%s: unable to parse irq\n", np->full_name); 82 pr_err("%pOF: unable to parse irq\n", np);
83 return -EINVAL; 83 return -EINVAL;
84 } 84 }
85 85
86 ret = of_address_to_resource(np, 0, &r); 86 ret = of_address_to_resource(np, 0, &r);
87 if (ret) { 87 if (ret) {
88 pr_err("%s: unable to get resource\n", np->full_name); 88 pr_err("%pOF: unable to get resource\n", np);
89 return ret; 89 return ret;
90 } 90 }
91 91
92 if (!request_mem_region(r.start, resource_size(&r), np->full_name)) { 92 if (!request_mem_region(r.start, resource_size(&r), np->full_name)) {
93 pr_err("%s: unable to request mem region\n", np->full_name); 93 pr_err("%pOF: unable to request mem region\n", np);
94 return -ENOMEM; 94 return -ENOMEM;
95 } 95 }
96 96
97 iobase = ioremap(r.start, resource_size(&r)); 97 iobase = ioremap(r.start, resource_size(&r));
98 if (!iobase) { 98 if (!iobase) {
99 pr_err("%s: unable to map resource\n", np->full_name); 99 pr_err("%pOF: unable to map resource\n", np);
100 ret = -ENOMEM; 100 ret = -ENOMEM;
101 goto err_release; 101 goto err_release;
102 } 102 }
@@ -123,7 +123,7 @@ static int __init dw_apb_ictl_init(struct device_node *np,
123 domain = irq_domain_add_linear(np, nrirqs, 123 domain = irq_domain_add_linear(np, nrirqs,
124 &irq_generic_chip_ops, NULL); 124 &irq_generic_chip_ops, NULL);
125 if (!domain) { 125 if (!domain) {
126 pr_err("%s: unable to add irq domain\n", np->full_name); 126 pr_err("%pOF: unable to add irq domain\n", np);
127 ret = -ENOMEM; 127 ret = -ENOMEM;
128 goto err_unmap; 128 goto err_unmap;
129 } 129 }
@@ -132,7 +132,7 @@ static int __init dw_apb_ictl_init(struct device_node *np,
132 handle_level_irq, clr, 0, 132 handle_level_irq, clr, 0,
133 IRQ_GC_INIT_MASK_CACHE); 133 IRQ_GC_INIT_MASK_CACHE);
134 if (ret) { 134 if (ret) {
135 pr_err("%s: unable to alloc irq domain gc\n", np->full_name); 135 pr_err("%pOF: unable to alloc irq domain gc\n", np);
136 goto err_unmap; 136 goto err_unmap;
137 } 137 }
138 138
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
index 77931214d954..14a8c0a7e095 100644
--- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
@@ -138,7 +138,7 @@ static int __init its_pci_of_msi_init(void)
138 if (its_pci_msi_init_one(of_node_to_fwnode(np), np->full_name)) 138 if (its_pci_msi_init_one(of_node_to_fwnode(np), np->full_name))
139 continue; 139 continue;
140 140
141 pr_info("PCI/MSI: %s domain created\n", np->full_name); 141 pr_info("PCI/MSI: %pOF domain created\n", np);
142 } 142 }
143 143
144 return 0; 144 return 0;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 22e228500357..578837cdddef 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved. 2 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com> 3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
@@ -36,6 +36,7 @@
36 36
37#include <linux/irqchip.h> 37#include <linux/irqchip.h>
38#include <linux/irqchip/arm-gic-v3.h> 38#include <linux/irqchip/arm-gic-v3.h>
39#include <linux/irqchip/arm-gic-v4.h>
39 40
40#include <asm/cputype.h> 41#include <asm/cputype.h>
41#include <asm/exception.h> 42#include <asm/exception.h>
@@ -48,6 +49,19 @@
48 49
49#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) 50#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
50 51
52static u32 lpi_id_bits;
53
54/*
55 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
56 * deal with (one configuration byte per interrupt). PENDBASE has to
57 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
58 */
59#define LPI_NRBITS lpi_id_bits
60#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
61#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
62
63#define LPI_PROP_DEFAULT_PRIO 0xa0
64
51/* 65/*
52 * Collection structure - just an ID, and a redistributor address to 66 * Collection structure - just an ID, and a redistributor address to
53 * ping. We use one per CPU as a bag of interrupts assigned to this 67 * ping. We use one per CPU as a bag of interrupts assigned to this
@@ -88,6 +102,7 @@ struct its_node {
88 u32 ite_size; 102 u32 ite_size;
89 u32 device_ids; 103 u32 device_ids;
90 int numa_node; 104 int numa_node;
105 bool is_v4;
91}; 106};
92 107
93#define ITS_ITT_ALIGN SZ_256 108#define ITS_ITT_ALIGN SZ_256
@@ -100,11 +115,17 @@ struct event_lpi_map {
100 u16 *col_map; 115 u16 *col_map;
101 irq_hw_number_t lpi_base; 116 irq_hw_number_t lpi_base;
102 int nr_lpis; 117 int nr_lpis;
118 struct mutex vlpi_lock;
119 struct its_vm *vm;
120 struct its_vlpi_map *vlpi_maps;
121 int nr_vlpis;
103}; 122};
104 123
105/* 124/*
106 * The ITS view of a device - belongs to an ITS, a collection, owns an 125 * The ITS view of a device - belongs to an ITS, owns an interrupt
107 * interrupt translation table, and a list of interrupts. 126 * translation table, and a list of interrupts. If it some of its
127 * LPIs are injected into a guest (GICv4), the event_map.vm field
128 * indicates which one.
108 */ 129 */
109struct its_device { 130struct its_device {
110 struct list_head entry; 131 struct list_head entry;
@@ -115,13 +136,33 @@ struct its_device {
115 u32 device_id; 136 u32 device_id;
116}; 137};
117 138
139static struct {
140 raw_spinlock_t lock;
141 struct its_device *dev;
142 struct its_vpe **vpes;
143 int next_victim;
144} vpe_proxy;
145
118static LIST_HEAD(its_nodes); 146static LIST_HEAD(its_nodes);
119static DEFINE_SPINLOCK(its_lock); 147static DEFINE_SPINLOCK(its_lock);
120static struct rdists *gic_rdists; 148static struct rdists *gic_rdists;
121static struct irq_domain *its_parent; 149static struct irq_domain *its_parent;
122 150
151/*
152 * We have a maximum number of 16 ITSs in the whole system if we're
153 * using the ITSList mechanism
154 */
155#define ITS_LIST_MAX 16
156
157static unsigned long its_list_map;
158static u16 vmovp_seq_num;
159static DEFINE_RAW_SPINLOCK(vmovp_lock);
160
161static DEFINE_IDA(its_vpeid_ida);
162
123#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) 163#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
124#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) 164#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
165#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
125 166
126static struct its_collection *dev_event_to_col(struct its_device *its_dev, 167static struct its_collection *dev_event_to_col(struct its_device *its_dev,
127 u32 event) 168 u32 event)
@@ -145,6 +186,11 @@ struct its_cmd_desc {
145 struct { 186 struct {
146 struct its_device *dev; 187 struct its_device *dev;
147 u32 event_id; 188 u32 event_id;
189 } its_clear_cmd;
190
191 struct {
192 struct its_device *dev;
193 u32 event_id;
148 } its_int_cmd; 194 } its_int_cmd;
149 195
150 struct { 196 struct {
@@ -177,6 +223,38 @@ struct its_cmd_desc {
177 struct { 223 struct {
178 struct its_collection *col; 224 struct its_collection *col;
179 } its_invall_cmd; 225 } its_invall_cmd;
226
227 struct {
228 struct its_vpe *vpe;
229 } its_vinvall_cmd;
230
231 struct {
232 struct its_vpe *vpe;
233 struct its_collection *col;
234 bool valid;
235 } its_vmapp_cmd;
236
237 struct {
238 struct its_vpe *vpe;
239 struct its_device *dev;
240 u32 virt_id;
241 u32 event_id;
242 bool db_enabled;
243 } its_vmapti_cmd;
244
245 struct {
246 struct its_vpe *vpe;
247 struct its_device *dev;
248 u32 event_id;
249 bool db_enabled;
250 } its_vmovi_cmd;
251
252 struct {
253 struct its_vpe *vpe;
254 struct its_collection *col;
255 u16 seq_num;
256 u16 its_list;
257 } its_vmovp_cmd;
180 }; 258 };
181}; 259};
182 260
@@ -193,6 +271,9 @@ struct its_cmd_block {
193typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *, 271typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *,
194 struct its_cmd_desc *); 272 struct its_cmd_desc *);
195 273
274typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_cmd_block *,
275 struct its_cmd_desc *);
276
196static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l) 277static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
197{ 278{
198 u64 mask = GENMASK_ULL(h, l); 279 u64 mask = GENMASK_ULL(h, l);
@@ -245,6 +326,46 @@ static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
245 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0); 326 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
246} 327}
247 328
329static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
330{
331 its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
332}
333
334static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
335{
336 its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
337}
338
339static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
340{
341 its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
342}
343
344static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
345{
346 its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
347}
348
349static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
350{
351 its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
352}
353
354static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
355{
356 its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
357}
358
359static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
360{
361 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 50, 16);
362}
363
364static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
365{
366 its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
367}
368
248static inline void its_fixup_cmd(struct its_cmd_block *cmd) 369static inline void its_fixup_cmd(struct its_cmd_block *cmd)
249{ 370{
250 /* Let's fixup BE commands */ 371 /* Let's fixup BE commands */
@@ -358,6 +479,40 @@ static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
358 return col; 479 return col;
359} 480}
360 481
482static struct its_collection *its_build_int_cmd(struct its_cmd_block *cmd,
483 struct its_cmd_desc *desc)
484{
485 struct its_collection *col;
486
487 col = dev_event_to_col(desc->its_int_cmd.dev,
488 desc->its_int_cmd.event_id);
489
490 its_encode_cmd(cmd, GITS_CMD_INT);
491 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
492 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
493
494 its_fixup_cmd(cmd);
495
496 return col;
497}
498
499static struct its_collection *its_build_clear_cmd(struct its_cmd_block *cmd,
500 struct its_cmd_desc *desc)
501{
502 struct its_collection *col;
503
504 col = dev_event_to_col(desc->its_clear_cmd.dev,
505 desc->its_clear_cmd.event_id);
506
507 its_encode_cmd(cmd, GITS_CMD_CLEAR);
508 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
509 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
510
511 its_fixup_cmd(cmd);
512
513 return col;
514}
515
361static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd, 516static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
362 struct its_cmd_desc *desc) 517 struct its_cmd_desc *desc)
363{ 518{
@@ -369,6 +524,94 @@ static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
369 return NULL; 524 return NULL;
370} 525}
371 526
527static struct its_vpe *its_build_vinvall_cmd(struct its_cmd_block *cmd,
528 struct its_cmd_desc *desc)
529{
530 its_encode_cmd(cmd, GITS_CMD_VINVALL);
531 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
532
533 its_fixup_cmd(cmd);
534
535 return desc->its_vinvall_cmd.vpe;
536}
537
538static struct its_vpe *its_build_vmapp_cmd(struct its_cmd_block *cmd,
539 struct its_cmd_desc *desc)
540{
541 unsigned long vpt_addr;
542
543 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
544
545 its_encode_cmd(cmd, GITS_CMD_VMAPP);
546 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
547 its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
548 its_encode_target(cmd, desc->its_vmapp_cmd.col->target_address);
549 its_encode_vpt_addr(cmd, vpt_addr);
550 its_encode_vpt_size(cmd, LPI_NRBITS - 1);
551
552 its_fixup_cmd(cmd);
553
554 return desc->its_vmapp_cmd.vpe;
555}
556
557static struct its_vpe *its_build_vmapti_cmd(struct its_cmd_block *cmd,
558 struct its_cmd_desc *desc)
559{
560 u32 db;
561
562 if (desc->its_vmapti_cmd.db_enabled)
563 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
564 else
565 db = 1023;
566
567 its_encode_cmd(cmd, GITS_CMD_VMAPTI);
568 its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
569 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
570 its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
571 its_encode_db_phys_id(cmd, db);
572 its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
573
574 its_fixup_cmd(cmd);
575
576 return desc->its_vmapti_cmd.vpe;
577}
578
579static struct its_vpe *its_build_vmovi_cmd(struct its_cmd_block *cmd,
580 struct its_cmd_desc *desc)
581{
582 u32 db;
583
584 if (desc->its_vmovi_cmd.db_enabled)
585 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
586 else
587 db = 1023;
588
589 its_encode_cmd(cmd, GITS_CMD_VMOVI);
590 its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
591 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
592 its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
593 its_encode_db_phys_id(cmd, db);
594 its_encode_db_valid(cmd, true);
595
596 its_fixup_cmd(cmd);
597
598 return desc->its_vmovi_cmd.vpe;
599}
600
601static struct its_vpe *its_build_vmovp_cmd(struct its_cmd_block *cmd,
602 struct its_cmd_desc *desc)
603{
604 its_encode_cmd(cmd, GITS_CMD_VMOVP);
605 its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
606 its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
607 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
608 its_encode_target(cmd, desc->its_vmovp_cmd.col->target_address);
609
610 its_fixup_cmd(cmd);
611
612 return desc->its_vmovp_cmd.vpe;
613}
614
372static u64 its_cmd_ptr_to_offset(struct its_node *its, 615static u64 its_cmd_ptr_to_offset(struct its_node *its,
373 struct its_cmd_block *ptr) 616 struct its_cmd_block *ptr)
374{ 617{
@@ -453,7 +696,13 @@ static void its_wait_for_range_completion(struct its_node *its,
453 696
454 while (1) { 697 while (1) {
455 rd_idx = readl_relaxed(its->base + GITS_CREADR); 698 rd_idx = readl_relaxed(its->base + GITS_CREADR);
456 if (rd_idx >= to_idx || rd_idx < from_idx) 699
700 /* Direct case */
701 if (from_idx < to_idx && rd_idx >= to_idx)
702 break;
703
704 /* Wrapped case */
705 if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx)
457 break; 706 break;
458 707
459 count--; 708 count--;
@@ -466,42 +715,84 @@ static void its_wait_for_range_completion(struct its_node *its,
466 } 715 }
467} 716}
468 717
469static void its_send_single_command(struct its_node *its, 718/* Warning, macro hell follows */
470 its_cmd_builder_t builder, 719#define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
471 struct its_cmd_desc *desc) 720void name(struct its_node *its, \
721 buildtype builder, \
722 struct its_cmd_desc *desc) \
723{ \
724 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
725 synctype *sync_obj; \
726 unsigned long flags; \
727 \
728 raw_spin_lock_irqsave(&its->lock, flags); \
729 \
730 cmd = its_allocate_entry(its); \
731 if (!cmd) { /* We're soooooo screewed... */ \
732 raw_spin_unlock_irqrestore(&its->lock, flags); \
733 return; \
734 } \
735 sync_obj = builder(cmd, desc); \
736 its_flush_cmd(its, cmd); \
737 \
738 if (sync_obj) { \
739 sync_cmd = its_allocate_entry(its); \
740 if (!sync_cmd) \
741 goto post; \
742 \
743 buildfn(sync_cmd, sync_obj); \
744 its_flush_cmd(its, sync_cmd); \
745 } \
746 \
747post: \
748 next_cmd = its_post_commands(its); \
749 raw_spin_unlock_irqrestore(&its->lock, flags); \
750 \
751 its_wait_for_range_completion(its, cmd, next_cmd); \
752}
753
754static void its_build_sync_cmd(struct its_cmd_block *sync_cmd,
755 struct its_collection *sync_col)
756{
757 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
758 its_encode_target(sync_cmd, sync_col->target_address);
759
760 its_fixup_cmd(sync_cmd);
761}
762
763static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
764 struct its_collection, its_build_sync_cmd)
765
766static void its_build_vsync_cmd(struct its_cmd_block *sync_cmd,
767 struct its_vpe *sync_vpe)
768{
769 its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
770 its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
771
772 its_fixup_cmd(sync_cmd);
773}
774
775static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
776 struct its_vpe, its_build_vsync_cmd)
777
778static void its_send_int(struct its_device *dev, u32 event_id)
472{ 779{
473 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; 780 struct its_cmd_desc desc;
474 struct its_collection *sync_col;
475 unsigned long flags;
476 781
477 raw_spin_lock_irqsave(&its->lock, flags); 782 desc.its_int_cmd.dev = dev;
783 desc.its_int_cmd.event_id = event_id;
478 784
479 cmd = its_allocate_entry(its); 785 its_send_single_command(dev->its, its_build_int_cmd, &desc);
480 if (!cmd) { /* We're soooooo screewed... */ 786}
481 pr_err_ratelimited("ITS can't allocate, dropping command\n");
482 raw_spin_unlock_irqrestore(&its->lock, flags);
483 return;
484 }
485 sync_col = builder(cmd, desc);
486 its_flush_cmd(its, cmd);
487 787
488 if (sync_col) { 788static void its_send_clear(struct its_device *dev, u32 event_id)
489 sync_cmd = its_allocate_entry(its); 789{
490 if (!sync_cmd) { 790 struct its_cmd_desc desc;
491 pr_err_ratelimited("ITS can't SYNC, skipping\n");
492 goto post;
493 }
494 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
495 its_encode_target(sync_cmd, sync_col->target_address);
496 its_fixup_cmd(sync_cmd);
497 its_flush_cmd(its, sync_cmd);
498 }
499 791
500post: 792 desc.its_clear_cmd.dev = dev;
501 next_cmd = its_post_commands(its); 793 desc.its_clear_cmd.event_id = event_id;
502 raw_spin_unlock_irqrestore(&its->lock, flags);
503 794
504 its_wait_for_range_completion(its, cmd, next_cmd); 795 its_send_single_command(dev->its, its_build_clear_cmd, &desc);
505} 796}
506 797
507static void its_send_inv(struct its_device *dev, u32 event_id) 798static void its_send_inv(struct its_device *dev, u32 event_id)
@@ -577,6 +868,106 @@ static void its_send_invall(struct its_node *its, struct its_collection *col)
577 its_send_single_command(its, its_build_invall_cmd, &desc); 868 its_send_single_command(its, its_build_invall_cmd, &desc);
578} 869}
579 870
871static void its_send_vmapti(struct its_device *dev, u32 id)
872{
873 struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
874 struct its_cmd_desc desc;
875
876 desc.its_vmapti_cmd.vpe = map->vpe;
877 desc.its_vmapti_cmd.dev = dev;
878 desc.its_vmapti_cmd.virt_id = map->vintid;
879 desc.its_vmapti_cmd.event_id = id;
880 desc.its_vmapti_cmd.db_enabled = map->db_enabled;
881
882 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
883}
884
885static void its_send_vmovi(struct its_device *dev, u32 id)
886{
887 struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
888 struct its_cmd_desc desc;
889
890 desc.its_vmovi_cmd.vpe = map->vpe;
891 desc.its_vmovi_cmd.dev = dev;
892 desc.its_vmovi_cmd.event_id = id;
893 desc.its_vmovi_cmd.db_enabled = map->db_enabled;
894
895 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
896}
897
898static void its_send_vmapp(struct its_vpe *vpe, bool valid)
899{
900 struct its_cmd_desc desc;
901 struct its_node *its;
902
903 desc.its_vmapp_cmd.vpe = vpe;
904 desc.its_vmapp_cmd.valid = valid;
905
906 list_for_each_entry(its, &its_nodes, entry) {
907 if (!its->is_v4)
908 continue;
909
910 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
911 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
912 }
913}
914
915static void its_send_vmovp(struct its_vpe *vpe)
916{
917 struct its_cmd_desc desc;
918 struct its_node *its;
919 unsigned long flags;
920 int col_id = vpe->col_idx;
921
922 desc.its_vmovp_cmd.vpe = vpe;
923 desc.its_vmovp_cmd.its_list = (u16)its_list_map;
924
925 if (!its_list_map) {
926 its = list_first_entry(&its_nodes, struct its_node, entry);
927 desc.its_vmovp_cmd.seq_num = 0;
928 desc.its_vmovp_cmd.col = &its->collections[col_id];
929 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
930 return;
931 }
932
933 /*
934 * Yet another marvel of the architecture. If using the
935 * its_list "feature", we need to make sure that all ITSs
936 * receive all VMOVP commands in the same order. The only way
937 * to guarantee this is to make vmovp a serialization point.
938 *
939 * Wall <-- Head.
940 */
941 raw_spin_lock_irqsave(&vmovp_lock, flags);
942
943 desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
944
945 /* Emit VMOVPs */
946 list_for_each_entry(its, &its_nodes, entry) {
947 if (!its->is_v4)
948 continue;
949
950 desc.its_vmovp_cmd.col = &its->collections[col_id];
951 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
952 }
953
954 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
955}
956
957static void its_send_vinvall(struct its_vpe *vpe)
958{
959 struct its_cmd_desc desc;
960 struct its_node *its;
961
962 desc.its_vinvall_cmd.vpe = vpe;
963
964 list_for_each_entry(its, &its_nodes, entry) {
965 if (!its->is_v4)
966 continue;
967 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
968 }
969}
970
580/* 971/*
581 * irqchip functions - assumes MSI, mostly. 972 * irqchip functions - assumes MSI, mostly.
582 */ 973 */
@@ -587,17 +978,26 @@ static inline u32 its_get_event_id(struct irq_data *d)
587 return d->hwirq - its_dev->event_map.lpi_base; 978 return d->hwirq - its_dev->event_map.lpi_base;
588} 979}
589 980
590static void lpi_set_config(struct irq_data *d, bool enable) 981static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
591{ 982{
592 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 983 irq_hw_number_t hwirq;
593 irq_hw_number_t hwirq = d->hwirq; 984 struct page *prop_page;
594 u32 id = its_get_event_id(d); 985 u8 *cfg;
595 u8 *cfg = page_address(gic_rdists->prop_page) + hwirq - 8192;
596 986
597 if (enable) 987 if (irqd_is_forwarded_to_vcpu(d)) {
598 *cfg |= LPI_PROP_ENABLED; 988 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
599 else 989 u32 event = its_get_event_id(d);
600 *cfg &= ~LPI_PROP_ENABLED; 990
991 prop_page = its_dev->event_map.vm->vprop_page;
992 hwirq = its_dev->event_map.vlpi_maps[event].vintid;
993 } else {
994 prop_page = gic_rdists->prop_page;
995 hwirq = d->hwirq;
996 }
997
998 cfg = page_address(prop_page) + hwirq - 8192;
999 *cfg &= ~clr;
1000 *cfg |= set | LPI_PROP_GROUP1;
601 1001
602 /* 1002 /*
603 * Make the above write visible to the redistributors. 1003 * Make the above write visible to the redistributors.
@@ -608,17 +1008,53 @@ static void lpi_set_config(struct irq_data *d, bool enable)
608 gic_flush_dcache_to_poc(cfg, sizeof(*cfg)); 1008 gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
609 else 1009 else
610 dsb(ishst); 1010 dsb(ishst);
611 its_send_inv(its_dev, id); 1011}
1012
1013static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1014{
1015 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1016
1017 lpi_write_config(d, clr, set);
1018 its_send_inv(its_dev, its_get_event_id(d));
1019}
1020
1021static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1022{
1023 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1024 u32 event = its_get_event_id(d);
1025
1026 if (its_dev->event_map.vlpi_maps[event].db_enabled == enable)
1027 return;
1028
1029 its_dev->event_map.vlpi_maps[event].db_enabled = enable;
1030
1031 /*
1032 * More fun with the architecture:
1033 *
1034 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1035 * value or to 1023, depending on the enable bit. But that
1036 * would be issueing a mapping for an /existing/ DevID+EventID
1037 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1038 * to the /same/ vPE, using this opportunity to adjust the
1039 * doorbell. Mouahahahaha. We loves it, Precious.
1040 */
1041 its_send_vmovi(its_dev, event);
612} 1042}
613 1043
614static void its_mask_irq(struct irq_data *d) 1044static void its_mask_irq(struct irq_data *d)
615{ 1045{
616 lpi_set_config(d, false); 1046 if (irqd_is_forwarded_to_vcpu(d))
1047 its_vlpi_set_doorbell(d, false);
1048
1049 lpi_update_config(d, LPI_PROP_ENABLED, 0);
617} 1050}
618 1051
619static void its_unmask_irq(struct irq_data *d) 1052static void its_unmask_irq(struct irq_data *d)
620{ 1053{
621 lpi_set_config(d, true); 1054 if (irqd_is_forwarded_to_vcpu(d))
1055 its_vlpi_set_doorbell(d, true);
1056
1057 lpi_update_config(d, 0, LPI_PROP_ENABLED);
622} 1058}
623 1059
624static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, 1060static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
@@ -630,6 +1066,10 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
630 struct its_collection *target_col; 1066 struct its_collection *target_col;
631 u32 id = its_get_event_id(d); 1067 u32 id = its_get_event_id(d);
632 1068
1069 /* A forwarded interrupt should use irq_set_vcpu_affinity */
1070 if (irqd_is_forwarded_to_vcpu(d))
1071 return -EINVAL;
1072
633 /* lpi cannot be routed to a redistributor that is on a foreign node */ 1073 /* lpi cannot be routed to a redistributor that is on a foreign node */
634 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { 1074 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
635 if (its_dev->its->numa_node >= 0) { 1075 if (its_dev->its->numa_node >= 0) {
@@ -671,6 +1111,179 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
671 iommu_dma_map_msi_msg(d->irq, msg); 1111 iommu_dma_map_msi_msg(d->irq, msg);
672} 1112}
673 1113
1114static int its_irq_set_irqchip_state(struct irq_data *d,
1115 enum irqchip_irq_state which,
1116 bool state)
1117{
1118 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1119 u32 event = its_get_event_id(d);
1120
1121 if (which != IRQCHIP_STATE_PENDING)
1122 return -EINVAL;
1123
1124 if (state)
1125 its_send_int(its_dev, event);
1126 else
1127 its_send_clear(its_dev, event);
1128
1129 return 0;
1130}
1131
1132static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1133{
1134 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1135 u32 event = its_get_event_id(d);
1136 int ret = 0;
1137
1138 if (!info->map)
1139 return -EINVAL;
1140
1141 mutex_lock(&its_dev->event_map.vlpi_lock);
1142
1143 if (!its_dev->event_map.vm) {
1144 struct its_vlpi_map *maps;
1145
1146 maps = kzalloc(sizeof(*maps) * its_dev->event_map.nr_lpis,
1147 GFP_KERNEL);
1148 if (!maps) {
1149 ret = -ENOMEM;
1150 goto out;
1151 }
1152
1153 its_dev->event_map.vm = info->map->vm;
1154 its_dev->event_map.vlpi_maps = maps;
1155 } else if (its_dev->event_map.vm != info->map->vm) {
1156 ret = -EINVAL;
1157 goto out;
1158 }
1159
1160 /* Get our private copy of the mapping information */
1161 its_dev->event_map.vlpi_maps[event] = *info->map;
1162
1163 if (irqd_is_forwarded_to_vcpu(d)) {
1164 /* Already mapped, move it around */
1165 its_send_vmovi(its_dev, event);
1166 } else {
1167 /* Drop the physical mapping */
1168 its_send_discard(its_dev, event);
1169
1170 /* and install the virtual one */
1171 its_send_vmapti(its_dev, event);
1172 irqd_set_forwarded_to_vcpu(d);
1173
1174 /* Increment the number of VLPIs */
1175 its_dev->event_map.nr_vlpis++;
1176 }
1177
1178out:
1179 mutex_unlock(&its_dev->event_map.vlpi_lock);
1180 return ret;
1181}
1182
1183static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1184{
1185 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1186 u32 event = its_get_event_id(d);
1187 int ret = 0;
1188
1189 mutex_lock(&its_dev->event_map.vlpi_lock);
1190
1191 if (!its_dev->event_map.vm ||
1192 !its_dev->event_map.vlpi_maps[event].vm) {
1193 ret = -EINVAL;
1194 goto out;
1195 }
1196
1197 /* Copy our mapping information to the incoming request */
1198 *info->map = its_dev->event_map.vlpi_maps[event];
1199
1200out:
1201 mutex_unlock(&its_dev->event_map.vlpi_lock);
1202 return ret;
1203}
1204
1205static int its_vlpi_unmap(struct irq_data *d)
1206{
1207 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1208 u32 event = its_get_event_id(d);
1209 int ret = 0;
1210
1211 mutex_lock(&its_dev->event_map.vlpi_lock);
1212
1213 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
1214 ret = -EINVAL;
1215 goto out;
1216 }
1217
1218 /* Drop the virtual mapping */
1219 its_send_discard(its_dev, event);
1220
1221 /* and restore the physical one */
1222 irqd_clr_forwarded_to_vcpu(d);
1223 its_send_mapti(its_dev, d->hwirq, event);
1224 lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
1225 LPI_PROP_ENABLED |
1226 LPI_PROP_GROUP1));
1227
1228 /*
1229 * Drop the refcount and make the device available again if
1230 * this was the last VLPI.
1231 */
1232 if (!--its_dev->event_map.nr_vlpis) {
1233 its_dev->event_map.vm = NULL;
1234 kfree(its_dev->event_map.vlpi_maps);
1235 }
1236
1237out:
1238 mutex_unlock(&its_dev->event_map.vlpi_lock);
1239 return ret;
1240}
1241
1242static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
1243{
1244 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1245
1246 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1247 return -EINVAL;
1248
1249 if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
1250 lpi_update_config(d, 0xff, info->config);
1251 else
1252 lpi_write_config(d, 0xff, info->config);
1253 its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
1254
1255 return 0;
1256}
1257
1258static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
1259{
1260 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1261 struct its_cmd_info *info = vcpu_info;
1262
1263 /* Need a v4 ITS */
1264 if (!its_dev->its->is_v4)
1265 return -EINVAL;
1266
1267 /* Unmap request? */
1268 if (!info)
1269 return its_vlpi_unmap(d);
1270
1271 switch (info->cmd_type) {
1272 case MAP_VLPI:
1273 return its_vlpi_map(d, info);
1274
1275 case GET_VLPI:
1276 return its_vlpi_get(d, info);
1277
1278 case PROP_UPDATE_VLPI:
1279 case PROP_UPDATE_AND_INV_VLPI:
1280 return its_vlpi_prop_update(d, info);
1281
1282 default:
1283 return -EINVAL;
1284 }
1285}
1286
674static struct irq_chip its_irq_chip = { 1287static struct irq_chip its_irq_chip = {
675 .name = "ITS", 1288 .name = "ITS",
676 .irq_mask = its_mask_irq, 1289 .irq_mask = its_mask_irq,
@@ -678,6 +1291,8 @@ static struct irq_chip its_irq_chip = {
678 .irq_eoi = irq_chip_eoi_parent, 1291 .irq_eoi = irq_chip_eoi_parent,
679 .irq_set_affinity = its_set_affinity, 1292 .irq_set_affinity = its_set_affinity,
680 .irq_compose_msi_msg = its_irq_compose_msi_msg, 1293 .irq_compose_msi_msg = its_irq_compose_msi_msg,
1294 .irq_set_irqchip_state = its_irq_set_irqchip_state,
1295 .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
681}; 1296};
682 1297
683/* 1298/*
@@ -696,7 +1311,6 @@ static struct irq_chip its_irq_chip = {
696 1311
697static unsigned long *lpi_bitmap; 1312static unsigned long *lpi_bitmap;
698static u32 lpi_chunks; 1313static u32 lpi_chunks;
699static u32 lpi_id_bits;
700static DEFINE_SPINLOCK(lpi_lock); 1314static DEFINE_SPINLOCK(lpi_lock);
701 1315
702static int its_lpi_to_chunk(int lpi) 1316static int its_lpi_to_chunk(int lpi)
@@ -767,16 +1381,15 @@ out:
767 return bitmap; 1381 return bitmap;
768} 1382}
769 1383
770static void its_lpi_free(struct event_lpi_map *map) 1384static void its_lpi_free_chunks(unsigned long *bitmap, int base, int nr_ids)
771{ 1385{
772 int base = map->lpi_base;
773 int nr_ids = map->nr_lpis;
774 int lpi; 1386 int lpi;
775 1387
776 spin_lock(&lpi_lock); 1388 spin_lock(&lpi_lock);
777 1389
778 for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) { 1390 for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) {
779 int chunk = its_lpi_to_chunk(lpi); 1391 int chunk = its_lpi_to_chunk(lpi);
1392
780 BUG_ON(chunk > lpi_chunks); 1393 BUG_ON(chunk > lpi_chunks);
781 if (test_bit(chunk, lpi_bitmap)) { 1394 if (test_bit(chunk, lpi_bitmap)) {
782 clear_bit(chunk, lpi_bitmap); 1395 clear_bit(chunk, lpi_bitmap);
@@ -787,28 +1400,40 @@ static void its_lpi_free(struct event_lpi_map *map)
787 1400
788 spin_unlock(&lpi_lock); 1401 spin_unlock(&lpi_lock);
789 1402
790 kfree(map->lpi_map); 1403 kfree(bitmap);
791 kfree(map->col_map);
792} 1404}
793 1405
794/* 1406static struct page *its_allocate_prop_table(gfp_t gfp_flags)
795 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to 1407{
796 * deal with (one configuration byte per interrupt). PENDBASE has to 1408 struct page *prop_page;
797 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
798 */
799#define LPI_NRBITS lpi_id_bits
800#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
801#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
802 1409
803#define LPI_PROP_DEFAULT_PRIO 0xa0 1410 prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
1411 if (!prop_page)
1412 return NULL;
1413
1414 /* Priority 0xa0, Group-1, disabled */
1415 memset(page_address(prop_page),
1416 LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
1417 LPI_PROPBASE_SZ);
1418
1419 /* Make sure the GIC will observe the written configuration */
1420 gic_flush_dcache_to_poc(page_address(prop_page), LPI_PROPBASE_SZ);
1421
1422 return prop_page;
1423}
1424
1425static void its_free_prop_table(struct page *prop_page)
1426{
1427 free_pages((unsigned long)page_address(prop_page),
1428 get_order(LPI_PROPBASE_SZ));
1429}
804 1430
805static int __init its_alloc_lpi_tables(void) 1431static int __init its_alloc_lpi_tables(void)
806{ 1432{
807 phys_addr_t paddr; 1433 phys_addr_t paddr;
808 1434
809 lpi_id_bits = min_t(u32, gic_rdists->id_bits, ITS_MAX_LPI_NRBITS); 1435 lpi_id_bits = min_t(u32, gic_rdists->id_bits, ITS_MAX_LPI_NRBITS);
810 gic_rdists->prop_page = alloc_pages(GFP_NOWAIT, 1436 gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
811 get_order(LPI_PROPBASE_SZ));
812 if (!gic_rdists->prop_page) { 1437 if (!gic_rdists->prop_page) {
813 pr_err("Failed to allocate PROPBASE\n"); 1438 pr_err("Failed to allocate PROPBASE\n");
814 return -ENOMEM; 1439 return -ENOMEM;
@@ -817,14 +1442,6 @@ static int __init its_alloc_lpi_tables(void)
817 paddr = page_to_phys(gic_rdists->prop_page); 1442 paddr = page_to_phys(gic_rdists->prop_page);
818 pr_info("GIC: using LPI property table @%pa\n", &paddr); 1443 pr_info("GIC: using LPI property table @%pa\n", &paddr);
819 1444
820 /* Priority 0xa0, Group-1, disabled */
821 memset(page_address(gic_rdists->prop_page),
822 LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
823 LPI_PROPBASE_SZ);
824
825 /* Make sure the GIC will observe the written configuration */
826 gic_flush_dcache_to_poc(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ);
827
828 return its_lpi_init(lpi_id_bits); 1445 return its_lpi_init(lpi_id_bits);
829} 1446}
830 1447
@@ -963,10 +1580,13 @@ retry_baser:
963 return 0; 1580 return 0;
964} 1581}
965 1582
966static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser, 1583static bool its_parse_indirect_baser(struct its_node *its,
967 u32 psz, u32 *order) 1584 struct its_baser *baser,
1585 u32 psz, u32 *order)
968{ 1586{
969 u64 esz = GITS_BASER_ENTRY_SIZE(its_read_baser(its, baser)); 1587 u64 tmp = its_read_baser(its, baser);
1588 u64 type = GITS_BASER_TYPE(tmp);
1589 u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
970 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb; 1590 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
971 u32 ids = its->device_ids; 1591 u32 ids = its->device_ids;
972 u32 new_order = *order; 1592 u32 new_order = *order;
@@ -1005,8 +1625,9 @@ static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser
1005 if (new_order >= MAX_ORDER) { 1625 if (new_order >= MAX_ORDER) {
1006 new_order = MAX_ORDER - 1; 1626 new_order = MAX_ORDER - 1;
1007 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz); 1627 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
1008 pr_warn("ITS@%pa: Device Table too large, reduce ids %u->%u\n", 1628 pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n",
1009 &its->phys_base, its->device_ids, ids); 1629 &its->phys_base, its_base_type_string[type],
1630 its->device_ids, ids);
1010 } 1631 }
1011 1632
1012 *order = new_order; 1633 *order = new_order;
@@ -1054,11 +1675,16 @@ static int its_alloc_tables(struct its_node *its)
1054 u32 order = get_order(psz); 1675 u32 order = get_order(psz);
1055 bool indirect = false; 1676 bool indirect = false;
1056 1677
1057 if (type == GITS_BASER_TYPE_NONE) 1678 switch (type) {
1679 case GITS_BASER_TYPE_NONE:
1058 continue; 1680 continue;
1059 1681
1060 if (type == GITS_BASER_TYPE_DEVICE) 1682 case GITS_BASER_TYPE_DEVICE:
1061 indirect = its_parse_baser_device(its, baser, psz, &order); 1683 case GITS_BASER_TYPE_VCPU:
1684 indirect = its_parse_indirect_baser(its, baser,
1685 psz, &order);
1686 break;
1687 }
1062 1688
1063 err = its_setup_baser(its, baser, cache, shr, psz, order, indirect); 1689 err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
1064 if (err < 0) { 1690 if (err < 0) {
@@ -1085,6 +1711,30 @@ static int its_alloc_collections(struct its_node *its)
1085 return 0; 1711 return 0;
1086} 1712}
1087 1713
1714static struct page *its_allocate_pending_table(gfp_t gfp_flags)
1715{
1716 struct page *pend_page;
1717 /*
1718 * The pending pages have to be at least 64kB aligned,
1719 * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
1720 */
1721 pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
1722 get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
1723 if (!pend_page)
1724 return NULL;
1725
1726 /* Make sure the GIC will observe the zero-ed page */
1727 gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
1728
1729 return pend_page;
1730}
1731
1732static void its_free_pending_table(struct page *pt)
1733{
1734 free_pages((unsigned long)page_address(pt),
1735 get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
1736}
1737
1088static void its_cpu_init_lpis(void) 1738static void its_cpu_init_lpis(void)
1089{ 1739{
1090 void __iomem *rbase = gic_data_rdist_rd_base(); 1740 void __iomem *rbase = gic_data_rdist_rd_base();
@@ -1095,21 +1745,14 @@ static void its_cpu_init_lpis(void)
1095 pend_page = gic_data_rdist()->pend_page; 1745 pend_page = gic_data_rdist()->pend_page;
1096 if (!pend_page) { 1746 if (!pend_page) {
1097 phys_addr_t paddr; 1747 phys_addr_t paddr;
1098 /* 1748
1099 * The pending pages have to be at least 64kB aligned, 1749 pend_page = its_allocate_pending_table(GFP_NOWAIT);
1100 * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
1101 */
1102 pend_page = alloc_pages(GFP_NOWAIT | __GFP_ZERO,
1103 get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
1104 if (!pend_page) { 1750 if (!pend_page) {
1105 pr_err("Failed to allocate PENDBASE for CPU%d\n", 1751 pr_err("Failed to allocate PENDBASE for CPU%d\n",
1106 smp_processor_id()); 1752 smp_processor_id());
1107 return; 1753 return;
1108 } 1754 }
1109 1755
1110 /* Make sure the GIC will observe the zero-ed page */
1111 gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
1112
1113 paddr = page_to_phys(pend_page); 1756 paddr = page_to_phys(pend_page);
1114 pr_info("CPU%d: using LPI pending table @%pa\n", 1757 pr_info("CPU%d: using LPI pending table @%pa\n",
1115 smp_processor_id(), &paddr); 1758 smp_processor_id(), &paddr);
@@ -1260,26 +1903,19 @@ static struct its_baser *its_get_baser(struct its_node *its, u32 type)
1260 return NULL; 1903 return NULL;
1261} 1904}
1262 1905
1263static bool its_alloc_device_table(struct its_node *its, u32 dev_id) 1906static bool its_alloc_table_entry(struct its_baser *baser, u32 id)
1264{ 1907{
1265 struct its_baser *baser;
1266 struct page *page; 1908 struct page *page;
1267 u32 esz, idx; 1909 u32 esz, idx;
1268 __le64 *table; 1910 __le64 *table;
1269 1911
1270 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
1271
1272 /* Don't allow device id that exceeds ITS hardware limit */
1273 if (!baser)
1274 return (ilog2(dev_id) < its->device_ids);
1275
1276 /* Don't allow device id that exceeds single, flat table limit */ 1912 /* Don't allow device id that exceeds single, flat table limit */
1277 esz = GITS_BASER_ENTRY_SIZE(baser->val); 1913 esz = GITS_BASER_ENTRY_SIZE(baser->val);
1278 if (!(baser->val & GITS_BASER_INDIRECT)) 1914 if (!(baser->val & GITS_BASER_INDIRECT))
1279 return (dev_id < (PAGE_ORDER_TO_SIZE(baser->order) / esz)); 1915 return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
1280 1916
1281 /* Compute 1st level table index & check if that exceeds table limit */ 1917 /* Compute 1st level table index & check if that exceeds table limit */
1282 idx = dev_id >> ilog2(baser->psz / esz); 1918 idx = id >> ilog2(baser->psz / esz);
1283 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE)) 1919 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
1284 return false; 1920 return false;
1285 1921
@@ -1308,11 +1944,52 @@ static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
1308 return true; 1944 return true;
1309} 1945}
1310 1946
1947static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
1948{
1949 struct its_baser *baser;
1950
1951 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
1952
1953 /* Don't allow device id that exceeds ITS hardware limit */
1954 if (!baser)
1955 return (ilog2(dev_id) < its->device_ids);
1956
1957 return its_alloc_table_entry(baser, dev_id);
1958}
1959
1960static bool its_alloc_vpe_table(u32 vpe_id)
1961{
1962 struct its_node *its;
1963
1964 /*
1965 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
1966 * could try and only do it on ITSs corresponding to devices
1967 * that have interrupts targeted at this VPE, but the
1968 * complexity becomes crazy (and you have tons of memory
1969 * anyway, right?).
1970 */
1971 list_for_each_entry(its, &its_nodes, entry) {
1972 struct its_baser *baser;
1973
1974 if (!its->is_v4)
1975 continue;
1976
1977 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
1978 if (!baser)
1979 return false;
1980
1981 if (!its_alloc_table_entry(baser, vpe_id))
1982 return false;
1983 }
1984
1985 return true;
1986}
1987
1311static struct its_device *its_create_device(struct its_node *its, u32 dev_id, 1988static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1312 int nvecs) 1989 int nvecs, bool alloc_lpis)
1313{ 1990{
1314 struct its_device *dev; 1991 struct its_device *dev;
1315 unsigned long *lpi_map; 1992 unsigned long *lpi_map = NULL;
1316 unsigned long flags; 1993 unsigned long flags;
1317 u16 *col_map = NULL; 1994 u16 *col_map = NULL;
1318 void *itt; 1995 void *itt;
@@ -1334,11 +2011,18 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1334 sz = nr_ites * its->ite_size; 2011 sz = nr_ites * its->ite_size;
1335 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; 2012 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
1336 itt = kzalloc(sz, GFP_KERNEL); 2013 itt = kzalloc(sz, GFP_KERNEL);
1337 lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); 2014 if (alloc_lpis) {
1338 if (lpi_map) 2015 lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
1339 col_map = kzalloc(sizeof(*col_map) * nr_lpis, GFP_KERNEL); 2016 if (lpi_map)
2017 col_map = kzalloc(sizeof(*col_map) * nr_lpis,
2018 GFP_KERNEL);
2019 } else {
2020 col_map = kzalloc(sizeof(*col_map) * nr_ites, GFP_KERNEL);
2021 nr_lpis = 0;
2022 lpi_base = 0;
2023 }
1340 2024
1341 if (!dev || !itt || !lpi_map || !col_map) { 2025 if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
1342 kfree(dev); 2026 kfree(dev);
1343 kfree(itt); 2027 kfree(itt);
1344 kfree(lpi_map); 2028 kfree(lpi_map);
@@ -1355,6 +2039,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1355 dev->event_map.col_map = col_map; 2039 dev->event_map.col_map = col_map;
1356 dev->event_map.lpi_base = lpi_base; 2040 dev->event_map.lpi_base = lpi_base;
1357 dev->event_map.nr_lpis = nr_lpis; 2041 dev->event_map.nr_lpis = nr_lpis;
2042 mutex_init(&dev->event_map.vlpi_lock);
1358 dev->device_id = dev_id; 2043 dev->device_id = dev_id;
1359 INIT_LIST_HEAD(&dev->entry); 2044 INIT_LIST_HEAD(&dev->entry);
1360 2045
@@ -1413,6 +2098,16 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
1413 msi_info = msi_get_domain_info(domain); 2098 msi_info = msi_get_domain_info(domain);
1414 its = msi_info->data; 2099 its = msi_info->data;
1415 2100
2101 if (!gic_rdists->has_direct_lpi &&
2102 vpe_proxy.dev &&
2103 vpe_proxy.dev->its == its &&
2104 dev_id == vpe_proxy.dev->device_id) {
2105 /* Bad luck. Get yourself a better implementation */
2106 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
2107 dev_id);
2108 return -EINVAL;
2109 }
2110
1416 its_dev = its_find_device(its, dev_id); 2111 its_dev = its_find_device(its, dev_id);
1417 if (its_dev) { 2112 if (its_dev) {
1418 /* 2113 /*
@@ -1424,7 +2119,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
1424 goto out; 2119 goto out;
1425 } 2120 }
1426 2121
1427 its_dev = its_create_device(its, dev_id, nvec); 2122 its_dev = its_create_device(its, dev_id, nvec, true);
1428 if (!its_dev) 2123 if (!its_dev)
1429 return -ENOMEM; 2124 return -ENOMEM;
1430 2125
@@ -1544,7 +2239,10 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1544 /* If all interrupts have been freed, start mopping the floor */ 2239 /* If all interrupts have been freed, start mopping the floor */
1545 if (bitmap_empty(its_dev->event_map.lpi_map, 2240 if (bitmap_empty(its_dev->event_map.lpi_map,
1546 its_dev->event_map.nr_lpis)) { 2241 its_dev->event_map.nr_lpis)) {
1547 its_lpi_free(&its_dev->event_map); 2242 its_lpi_free_chunks(its_dev->event_map.lpi_map,
2243 its_dev->event_map.lpi_base,
2244 its_dev->event_map.nr_lpis);
2245 kfree(its_dev->event_map.col_map);
1548 2246
1549 /* Unmap device/itt */ 2247 /* Unmap device/itt */
1550 its_send_mapd(its_dev, 0); 2248 its_send_mapd(its_dev, 0);
@@ -1561,6 +2259,451 @@ static const struct irq_domain_ops its_domain_ops = {
1561 .deactivate = its_irq_domain_deactivate, 2259 .deactivate = its_irq_domain_deactivate,
1562}; 2260};
1563 2261
2262/*
2263 * This is insane.
2264 *
2265 * If a GICv4 doesn't implement Direct LPIs (which is extremely
2266 * likely), the only way to perform an invalidate is to use a fake
2267 * device to issue an INV command, implying that the LPI has first
2268 * been mapped to some event on that device. Since this is not exactly
2269 * cheap, we try to keep that mapping around as long as possible, and
2270 * only issue an UNMAP if we're short on available slots.
2271 *
2272 * Broken by design(tm).
2273 */
2274static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
2275{
2276 /* Already unmapped? */
2277 if (vpe->vpe_proxy_event == -1)
2278 return;
2279
2280 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
2281 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
2282
2283 /*
2284 * We don't track empty slots at all, so let's move the
2285 * next_victim pointer if we can quickly reuse that slot
2286 * instead of nuking an existing entry. Not clear that this is
2287 * always a win though, and this might just generate a ripple
2288 * effect... Let's just hope VPEs don't migrate too often.
2289 */
2290 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2291 vpe_proxy.next_victim = vpe->vpe_proxy_event;
2292
2293 vpe->vpe_proxy_event = -1;
2294}
2295
2296static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
2297{
2298 if (!gic_rdists->has_direct_lpi) {
2299 unsigned long flags;
2300
2301 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2302 its_vpe_db_proxy_unmap_locked(vpe);
2303 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2304 }
2305}
2306
2307static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
2308{
2309 /* Already mapped? */
2310 if (vpe->vpe_proxy_event != -1)
2311 return;
2312
2313 /* This slot was already allocated. Kick the other VPE out. */
2314 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2315 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
2316
2317 /* Map the new VPE instead */
2318 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
2319 vpe->vpe_proxy_event = vpe_proxy.next_victim;
2320 vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
2321
2322 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
2323 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
2324}
2325
2326static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
2327{
2328 unsigned long flags;
2329 struct its_collection *target_col;
2330
2331 if (gic_rdists->has_direct_lpi) {
2332 void __iomem *rdbase;
2333
2334 rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
2335 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
2336 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2337 cpu_relax();
2338
2339 return;
2340 }
2341
2342 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2343
2344 its_vpe_db_proxy_map_locked(vpe);
2345
2346 target_col = &vpe_proxy.dev->its->collections[to];
2347 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
2348 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
2349
2350 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2351}
2352
2353static int its_vpe_set_affinity(struct irq_data *d,
2354 const struct cpumask *mask_val,
2355 bool force)
2356{
2357 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2358 int cpu = cpumask_first(mask_val);
2359
2360 /*
2361 * Changing affinity is mega expensive, so let's be as lazy as
2362 * we can and only do it if we really have to. Also, if mapped
2363 * into the proxy device, we need to move the doorbell
2364 * interrupt to its new location.
2365 */
2366 if (vpe->col_idx != cpu) {
2367 int from = vpe->col_idx;
2368
2369 vpe->col_idx = cpu;
2370 its_send_vmovp(vpe);
2371 its_vpe_db_proxy_move(vpe, from, cpu);
2372 }
2373
2374 return IRQ_SET_MASK_OK_DONE;
2375}
2376
2377static void its_vpe_schedule(struct its_vpe *vpe)
2378{
2379 void * __iomem vlpi_base = gic_data_rdist_vlpi_base();
2380 u64 val;
2381
2382 /* Schedule the VPE */
2383 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
2384 GENMASK_ULL(51, 12);
2385 val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
2386 val |= GICR_VPROPBASER_RaWb;
2387 val |= GICR_VPROPBASER_InnerShareable;
2388 gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2389
2390 val = virt_to_phys(page_address(vpe->vpt_page)) &
2391 GENMASK_ULL(51, 16);
2392 val |= GICR_VPENDBASER_RaWaWb;
2393 val |= GICR_VPENDBASER_NonShareable;
2394 /*
2395 * There is no good way of finding out if the pending table is
2396 * empty as we can race against the doorbell interrupt very
2397 * easily. So in the end, vpe->pending_last is only an
2398 * indication that the vcpu has something pending, not one
2399 * that the pending table is empty. A good implementation
2400 * would be able to read its coarse map pretty quickly anyway,
2401 * making this a tolerable issue.
2402 */
2403 val |= GICR_VPENDBASER_PendingLast;
2404 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
2405 val |= GICR_VPENDBASER_Valid;
2406 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2407}
2408
2409static void its_vpe_deschedule(struct its_vpe *vpe)
2410{
2411 void * __iomem vlpi_base = gic_data_rdist_vlpi_base();
2412 u32 count = 1000000; /* 1s! */
2413 bool clean;
2414 u64 val;
2415
2416 /* We're being scheduled out */
2417 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2418 val &= ~GICR_VPENDBASER_Valid;
2419 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2420
2421 do {
2422 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2423 clean = !(val & GICR_VPENDBASER_Dirty);
2424 if (!clean) {
2425 count--;
2426 cpu_relax();
2427 udelay(1);
2428 }
2429 } while (!clean && count);
2430
2431 if (unlikely(!clean && !count)) {
2432 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
2433 vpe->idai = false;
2434 vpe->pending_last = true;
2435 } else {
2436 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
2437 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
2438 }
2439}
2440
2441static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
2442{
2443 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2444 struct its_cmd_info *info = vcpu_info;
2445
2446 switch (info->cmd_type) {
2447 case SCHEDULE_VPE:
2448 its_vpe_schedule(vpe);
2449 return 0;
2450
2451 case DESCHEDULE_VPE:
2452 its_vpe_deschedule(vpe);
2453 return 0;
2454
2455 case INVALL_VPE:
2456 its_send_vinvall(vpe);
2457 return 0;
2458
2459 default:
2460 return -EINVAL;
2461 }
2462}
2463
2464static void its_vpe_send_cmd(struct its_vpe *vpe,
2465 void (*cmd)(struct its_device *, u32))
2466{
2467 unsigned long flags;
2468
2469 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2470
2471 its_vpe_db_proxy_map_locked(vpe);
2472 cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
2473
2474 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2475}
2476
2477static void its_vpe_send_inv(struct irq_data *d)
2478{
2479 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2480
2481 if (gic_rdists->has_direct_lpi) {
2482 void __iomem *rdbase;
2483
2484 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
2485 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR);
2486 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2487 cpu_relax();
2488 } else {
2489 its_vpe_send_cmd(vpe, its_send_inv);
2490 }
2491}
2492
2493static void its_vpe_mask_irq(struct irq_data *d)
2494{
2495 /*
2496 * We need to unmask the LPI, which is described by the parent
2497 * irq_data. Instead of calling into the parent (which won't
2498 * exactly do the right thing, let's simply use the
2499 * parent_data pointer. Yes, I'm naughty.
2500 */
2501 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
2502 its_vpe_send_inv(d);
2503}
2504
2505static void its_vpe_unmask_irq(struct irq_data *d)
2506{
2507 /* Same hack as above... */
2508 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
2509 its_vpe_send_inv(d);
2510}
2511
2512static int its_vpe_set_irqchip_state(struct irq_data *d,
2513 enum irqchip_irq_state which,
2514 bool state)
2515{
2516 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2517
2518 if (which != IRQCHIP_STATE_PENDING)
2519 return -EINVAL;
2520
2521 if (gic_rdists->has_direct_lpi) {
2522 void __iomem *rdbase;
2523
2524 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
2525 if (state) {
2526 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
2527 } else {
2528 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
2529 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2530 cpu_relax();
2531 }
2532 } else {
2533 if (state)
2534 its_vpe_send_cmd(vpe, its_send_int);
2535 else
2536 its_vpe_send_cmd(vpe, its_send_clear);
2537 }
2538
2539 return 0;
2540}
2541
2542static struct irq_chip its_vpe_irq_chip = {
2543 .name = "GICv4-vpe",
2544 .irq_mask = its_vpe_mask_irq,
2545 .irq_unmask = its_vpe_unmask_irq,
2546 .irq_eoi = irq_chip_eoi_parent,
2547 .irq_set_affinity = its_vpe_set_affinity,
2548 .irq_set_irqchip_state = its_vpe_set_irqchip_state,
2549 .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
2550};
2551
2552static int its_vpe_id_alloc(void)
2553{
2554 return ida_simple_get(&its_vpeid_ida, 0, 1 << 16, GFP_KERNEL);
2555}
2556
2557static void its_vpe_id_free(u16 id)
2558{
2559 ida_simple_remove(&its_vpeid_ida, id);
2560}
2561
2562static int its_vpe_init(struct its_vpe *vpe)
2563{
2564 struct page *vpt_page;
2565 int vpe_id;
2566
2567 /* Allocate vpe_id */
2568 vpe_id = its_vpe_id_alloc();
2569 if (vpe_id < 0)
2570 return vpe_id;
2571
2572 /* Allocate VPT */
2573 vpt_page = its_allocate_pending_table(GFP_KERNEL);
2574 if (!vpt_page) {
2575 its_vpe_id_free(vpe_id);
2576 return -ENOMEM;
2577 }
2578
2579 if (!its_alloc_vpe_table(vpe_id)) {
2580 its_vpe_id_free(vpe_id);
2581 its_free_pending_table(vpe->vpt_page);
2582 return -ENOMEM;
2583 }
2584
2585 vpe->vpe_id = vpe_id;
2586 vpe->vpt_page = vpt_page;
2587 vpe->vpe_proxy_event = -1;
2588
2589 return 0;
2590}
2591
2592static void its_vpe_teardown(struct its_vpe *vpe)
2593{
2594 its_vpe_db_proxy_unmap(vpe);
2595 its_vpe_id_free(vpe->vpe_id);
2596 its_free_pending_table(vpe->vpt_page);
2597}
2598
2599static void its_vpe_irq_domain_free(struct irq_domain *domain,
2600 unsigned int virq,
2601 unsigned int nr_irqs)
2602{
2603 struct its_vm *vm = domain->host_data;
2604 int i;
2605
2606 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
2607
2608 for (i = 0; i < nr_irqs; i++) {
2609 struct irq_data *data = irq_domain_get_irq_data(domain,
2610 virq + i);
2611 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
2612
2613 BUG_ON(vm != vpe->its_vm);
2614
2615 clear_bit(data->hwirq, vm->db_bitmap);
2616 its_vpe_teardown(vpe);
2617 irq_domain_reset_irq_data(data);
2618 }
2619
2620 if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
2621 its_lpi_free_chunks(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
2622 its_free_prop_table(vm->vprop_page);
2623 }
2624}
2625
2626static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2627 unsigned int nr_irqs, void *args)
2628{
2629 struct its_vm *vm = args;
2630 unsigned long *bitmap;
2631 struct page *vprop_page;
2632 int base, nr_ids, i, err = 0;
2633
2634 BUG_ON(!vm);
2635
2636 bitmap = its_lpi_alloc_chunks(nr_irqs, &base, &nr_ids);
2637 if (!bitmap)
2638 return -ENOMEM;
2639
2640 if (nr_ids < nr_irqs) {
2641 its_lpi_free_chunks(bitmap, base, nr_ids);
2642 return -ENOMEM;
2643 }
2644
2645 vprop_page = its_allocate_prop_table(GFP_KERNEL);
2646 if (!vprop_page) {
2647 its_lpi_free_chunks(bitmap, base, nr_ids);
2648 return -ENOMEM;
2649 }
2650
2651 vm->db_bitmap = bitmap;
2652 vm->db_lpi_base = base;
2653 vm->nr_db_lpis = nr_ids;
2654 vm->vprop_page = vprop_page;
2655
2656 for (i = 0; i < nr_irqs; i++) {
2657 vm->vpes[i]->vpe_db_lpi = base + i;
2658 err = its_vpe_init(vm->vpes[i]);
2659 if (err)
2660 break;
2661 err = its_irq_gic_domain_alloc(domain, virq + i,
2662 vm->vpes[i]->vpe_db_lpi);
2663 if (err)
2664 break;
2665 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
2666 &its_vpe_irq_chip, vm->vpes[i]);
2667 set_bit(i, bitmap);
2668 }
2669
2670 if (err) {
2671 if (i > 0)
2672 its_vpe_irq_domain_free(domain, virq, i - 1);
2673
2674 its_lpi_free_chunks(bitmap, base, nr_ids);
2675 its_free_prop_table(vprop_page);
2676 }
2677
2678 return err;
2679}
2680
2681static void its_vpe_irq_domain_activate(struct irq_domain *domain,
2682 struct irq_data *d)
2683{
2684 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2685
2686 /* Map the VPE to the first possible CPU */
2687 vpe->col_idx = cpumask_first(cpu_online_mask);
2688 its_send_vmapp(vpe, true);
2689 its_send_vinvall(vpe);
2690}
2691
2692static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
2693 struct irq_data *d)
2694{
2695 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2696
2697 its_send_vmapp(vpe, false);
2698}
2699
2700static const struct irq_domain_ops its_vpe_domain_ops = {
2701 .alloc = its_vpe_irq_domain_alloc,
2702 .free = its_vpe_irq_domain_free,
2703 .activate = its_vpe_irq_domain_activate,
2704 .deactivate = its_vpe_irq_domain_deactivate,
2705};
2706
1564static int its_force_quiescent(void __iomem *base) 2707static int its_force_quiescent(void __iomem *base)
1565{ 2708{
1566 u32 count = 1000000; /* 1s */ 2709 u32 count = 1000000; /* 1s */
@@ -1576,7 +2719,7 @@ static int its_force_quiescent(void __iomem *base)
1576 return 0; 2719 return 0;
1577 2720
1578 /* Disable the generation of all interrupts to this ITS */ 2721 /* Disable the generation of all interrupts to this ITS */
1579 val &= ~GITS_CTLR_ENABLE; 2722 val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
1580 writel_relaxed(val, base + GITS_CTLR); 2723 writel_relaxed(val, base + GITS_CTLR);
1581 2724
1582 /* Poll GITS_CTLR and wait until ITS becomes quiescent */ 2725 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
@@ -1677,13 +2820,92 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
1677 return 0; 2820 return 0;
1678} 2821}
1679 2822
2823static int its_init_vpe_domain(void)
2824{
2825 struct its_node *its;
2826 u32 devid;
2827 int entries;
2828
2829 if (gic_rdists->has_direct_lpi) {
2830 pr_info("ITS: Using DirectLPI for VPE invalidation\n");
2831 return 0;
2832 }
2833
2834 /* Any ITS will do, even if not v4 */
2835 its = list_first_entry(&its_nodes, struct its_node, entry);
2836
2837 entries = roundup_pow_of_two(nr_cpu_ids);
2838 vpe_proxy.vpes = kzalloc(sizeof(*vpe_proxy.vpes) * entries,
2839 GFP_KERNEL);
2840 if (!vpe_proxy.vpes) {
2841 pr_err("ITS: Can't allocate GICv4 proxy device array\n");
2842 return -ENOMEM;
2843 }
2844
2845 /* Use the last possible DevID */
2846 devid = GENMASK(its->device_ids - 1, 0);
2847 vpe_proxy.dev = its_create_device(its, devid, entries, false);
2848 if (!vpe_proxy.dev) {
2849 kfree(vpe_proxy.vpes);
2850 pr_err("ITS: Can't allocate GICv4 proxy device\n");
2851 return -ENOMEM;
2852 }
2853
2854 BUG_ON(entries != vpe_proxy.dev->nr_ites);
2855
2856 raw_spin_lock_init(&vpe_proxy.lock);
2857 vpe_proxy.next_victim = 0;
2858 pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
2859 devid, vpe_proxy.dev->nr_ites);
2860
2861 return 0;
2862}
2863
2864static int __init its_compute_its_list_map(struct resource *res,
2865 void __iomem *its_base)
2866{
2867 int its_number;
2868 u32 ctlr;
2869
2870 /*
2871 * This is assumed to be done early enough that we're
2872 * guaranteed to be single-threaded, hence no
2873 * locking. Should this change, we should address
2874 * this.
2875 */
2876 its_number = find_first_zero_bit(&its_list_map, ITS_LIST_MAX);
2877 if (its_number >= ITS_LIST_MAX) {
2878 pr_err("ITS@%pa: No ITSList entry available!\n",
2879 &res->start);
2880 return -EINVAL;
2881 }
2882
2883 ctlr = readl_relaxed(its_base + GITS_CTLR);
2884 ctlr &= ~GITS_CTLR_ITS_NUMBER;
2885 ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
2886 writel_relaxed(ctlr, its_base + GITS_CTLR);
2887 ctlr = readl_relaxed(its_base + GITS_CTLR);
2888 if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
2889 its_number = ctlr & GITS_CTLR_ITS_NUMBER;
2890 its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
2891 }
2892
2893 if (test_and_set_bit(its_number, &its_list_map)) {
2894 pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
2895 &res->start, its_number);
2896 return -EINVAL;
2897 }
2898
2899 return its_number;
2900}
2901
1680static int __init its_probe_one(struct resource *res, 2902static int __init its_probe_one(struct resource *res,
1681 struct fwnode_handle *handle, int numa_node) 2903 struct fwnode_handle *handle, int numa_node)
1682{ 2904{
1683 struct its_node *its; 2905 struct its_node *its;
1684 void __iomem *its_base; 2906 void __iomem *its_base;
1685 u32 val; 2907 u32 val, ctlr;
1686 u64 baser, tmp; 2908 u64 baser, tmp, typer;
1687 int err; 2909 int err;
1688 2910
1689 its_base = ioremap(res->start, resource_size(res)); 2911 its_base = ioremap(res->start, resource_size(res));
@@ -1716,9 +2938,24 @@ static int __init its_probe_one(struct resource *res,
1716 raw_spin_lock_init(&its->lock); 2938 raw_spin_lock_init(&its->lock);
1717 INIT_LIST_HEAD(&its->entry); 2939 INIT_LIST_HEAD(&its->entry);
1718 INIT_LIST_HEAD(&its->its_device_list); 2940 INIT_LIST_HEAD(&its->its_device_list);
2941 typer = gic_read_typer(its_base + GITS_TYPER);
1719 its->base = its_base; 2942 its->base = its_base;
1720 its->phys_base = res->start; 2943 its->phys_base = res->start;
1721 its->ite_size = ((gic_read_typer(its_base + GITS_TYPER) >> 4) & 0xf) + 1; 2944 its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer);
2945 its->is_v4 = !!(typer & GITS_TYPER_VLPIS);
2946 if (its->is_v4) {
2947 if (!(typer & GITS_TYPER_VMOVP)) {
2948 err = its_compute_its_list_map(res, its_base);
2949 if (err < 0)
2950 goto out_free_its;
2951
2952 pr_info("ITS@%pa: Using ITS number %d\n",
2953 &res->start, err);
2954 } else {
2955 pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
2956 }
2957 }
2958
1722 its->numa_node = numa_node; 2959 its->numa_node = numa_node;
1723 2960
1724 its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2961 its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
@@ -1765,7 +3002,11 @@ static int __init its_probe_one(struct resource *res,
1765 } 3002 }
1766 3003
1767 gits_write_cwriter(0, its->base + GITS_CWRITER); 3004 gits_write_cwriter(0, its->base + GITS_CWRITER);
1768 writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR); 3005 ctlr = readl_relaxed(its->base + GITS_CTLR);
3006 ctlr |= GITS_CTLR_ENABLE;
3007 if (its->is_v4)
3008 ctlr |= GITS_CTLR_ImDe;
3009 writel_relaxed(ctlr, its->base + GITS_CTLR);
1769 3010
1770 err = its_init_domain(handle, its); 3011 err = its_init_domain(handle, its);
1771 if (err) 3012 if (err)
@@ -1821,13 +3062,13 @@ static int __init its_of_probe(struct device_node *node)
1821 for (np = of_find_matching_node(node, its_device_id); np; 3062 for (np = of_find_matching_node(node, its_device_id); np;
1822 np = of_find_matching_node(np, its_device_id)) { 3063 np = of_find_matching_node(np, its_device_id)) {
1823 if (!of_property_read_bool(np, "msi-controller")) { 3064 if (!of_property_read_bool(np, "msi-controller")) {
1824 pr_warn("%s: no msi-controller property, ITS ignored\n", 3065 pr_warn("%pOF: no msi-controller property, ITS ignored\n",
1825 np->full_name); 3066 np);
1826 continue; 3067 continue;
1827 } 3068 }
1828 3069
1829 if (of_address_to_resource(np, 0, &res)) { 3070 if (of_address_to_resource(np, 0, &res)) {
1830 pr_warn("%s: no regs?\n", np->full_name); 3071 pr_warn("%pOF: no regs?\n", np);
1831 continue; 3072 continue;
1832 } 3073 }
1833 3074
@@ -1965,6 +3206,9 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
1965 struct irq_domain *parent_domain) 3206 struct irq_domain *parent_domain)
1966{ 3207{
1967 struct device_node *of_node; 3208 struct device_node *of_node;
3209 struct its_node *its;
3210 bool has_v4 = false;
3211 int err;
1968 3212
1969 its_parent = parent_domain; 3213 its_parent = parent_domain;
1970 of_node = to_of_node(handle); 3214 of_node = to_of_node(handle);
@@ -1979,5 +3223,20 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
1979 } 3223 }
1980 3224
1981 gic_rdists = rdists; 3225 gic_rdists = rdists;
1982 return its_alloc_lpi_tables(); 3226 err = its_alloc_lpi_tables();
3227 if (err)
3228 return err;
3229
3230 list_for_each_entry(its, &its_nodes, entry)
3231 has_v4 |= its->is_v4;
3232
3233 if (has_v4 & rdists->has_vlpis) {
3234 if (its_init_vpe_domain() ||
3235 its_init_v4(parent_domain, &its_vpe_domain_ops)) {
3236 rdists->has_vlpis = false;
3237 pr_err("ITS: Disabling GICv4 support\n");
3238 }
3239 }
3240
3241 return 0;
1983} 3242}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 511c290c4a26..985eb04698ed 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved. 2 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com> 3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
@@ -421,24 +421,14 @@ static void __init gic_dist_init(void)
421 gic_write_irouter(affinity, base + GICD_IROUTER + i * 8); 421 gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
422} 422}
423 423
424static int gic_populate_rdist(void) 424static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
425{ 425{
426 unsigned long mpidr = cpu_logical_map(smp_processor_id()); 426 int ret = -ENODEV;
427 u64 typer;
428 u32 aff;
429 int i; 427 int i;
430 428
431 /*
432 * Convert affinity to a 32bit value that can be matched to
433 * GICR_TYPER bits [63:32].
434 */
435 aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
436 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
437 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
438 MPIDR_AFFINITY_LEVEL(mpidr, 0));
439
440 for (i = 0; i < gic_data.nr_redist_regions; i++) { 429 for (i = 0; i < gic_data.nr_redist_regions; i++) {
441 void __iomem *ptr = gic_data.redist_regions[i].redist_base; 430 void __iomem *ptr = gic_data.redist_regions[i].redist_base;
431 u64 typer;
442 u32 reg; 432 u32 reg;
443 433
444 reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK; 434 reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
@@ -450,15 +440,9 @@ static int gic_populate_rdist(void)
450 440
451 do { 441 do {
452 typer = gic_read_typer(ptr + GICR_TYPER); 442 typer = gic_read_typer(ptr + GICR_TYPER);
453 if ((typer >> 32) == aff) { 443 ret = fn(gic_data.redist_regions + i, ptr);
454 u64 offset = ptr - gic_data.redist_regions[i].redist_base; 444 if (!ret)
455 gic_data_rdist_rd_base() = ptr;
456 gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset;
457 pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
458 smp_processor_id(), mpidr, i,
459 &gic_data_rdist()->phys_base);
460 return 0; 445 return 0;
461 }
462 446
463 if (gic_data.redist_regions[i].single_redist) 447 if (gic_data.redist_regions[i].single_redist)
464 break; 448 break;
@@ -473,12 +457,71 @@ static int gic_populate_rdist(void)
473 } while (!(typer & GICR_TYPER_LAST)); 457 } while (!(typer & GICR_TYPER_LAST));
474 } 458 }
475 459
460 return ret ? -ENODEV : 0;
461}
462
463static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
464{
465 unsigned long mpidr = cpu_logical_map(smp_processor_id());
466 u64 typer;
467 u32 aff;
468
469 /*
470 * Convert affinity to a 32bit value that can be matched to
471 * GICR_TYPER bits [63:32].
472 */
473 aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
474 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
475 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
476 MPIDR_AFFINITY_LEVEL(mpidr, 0));
477
478 typer = gic_read_typer(ptr + GICR_TYPER);
479 if ((typer >> 32) == aff) {
480 u64 offset = ptr - region->redist_base;
481 gic_data_rdist_rd_base() = ptr;
482 gic_data_rdist()->phys_base = region->phys_base + offset;
483
484 pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
485 smp_processor_id(), mpidr,
486 (int)(region - gic_data.redist_regions),
487 &gic_data_rdist()->phys_base);
488 return 0;
489 }
490
491 /* Try next one */
492 return 1;
493}
494
495static int gic_populate_rdist(void)
496{
497 if (gic_iterate_rdists(__gic_populate_rdist) == 0)
498 return 0;
499
476 /* We couldn't even deal with ourselves... */ 500 /* We couldn't even deal with ourselves... */
477 WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n", 501 WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
478 smp_processor_id(), mpidr); 502 smp_processor_id(),
503 (unsigned long)cpu_logical_map(smp_processor_id()));
479 return -ENODEV; 504 return -ENODEV;
480} 505}
481 506
507static int __gic_update_vlpi_properties(struct redist_region *region,
508 void __iomem *ptr)
509{
510 u64 typer = gic_read_typer(ptr + GICR_TYPER);
511 gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
512 gic_data.rdists.has_direct_lpi &= !!(typer & GICR_TYPER_DirectLPIS);
513
514 return 1;
515}
516
517static void gic_update_vlpi_properties(void)
518{
519 gic_iterate_rdists(__gic_update_vlpi_properties);
520 pr_info("%sVLPI support, %sdirect LPI support\n",
521 !gic_data.rdists.has_vlpis ? "no " : "",
522 !gic_data.rdists.has_direct_lpi ? "no " : "");
523}
524
482static void gic_cpu_sys_reg_init(void) 525static void gic_cpu_sys_reg_init(void)
483{ 526{
484 /* 527 /*
@@ -946,6 +989,8 @@ static int __init gic_init_bases(void __iomem *dist_base,
946 gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops, 989 gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
947 &gic_data); 990 &gic_data);
948 gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); 991 gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
992 gic_data.rdists.has_vlpis = true;
993 gic_data.rdists.has_direct_lpi = true;
949 994
950 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { 995 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
951 err = -ENOMEM; 996 err = -ENOMEM;
@@ -954,6 +999,8 @@ static int __init gic_init_bases(void __iomem *dist_base,
954 999
955 set_handle_irq(gic_handle_irq); 1000 set_handle_irq(gic_handle_irq);
956 1001
1002 gic_update_vlpi_properties();
1003
957 if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis()) 1004 if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
958 its_init(handle, &gic_data.rdists, gic_data.domain); 1005 its_init(handle, &gic_data.rdists, gic_data.domain);
959 1006
@@ -1060,7 +1107,7 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
1060 if (WARN_ON(cpu == -1)) 1107 if (WARN_ON(cpu == -1))
1061 continue; 1108 continue;
1062 1109
1063 pr_cont("%s[%d] ", cpu_node->full_name, cpu); 1110 pr_cont("%pOF[%d] ", cpu_node, cpu);
1064 1111
1065 cpumask_set_cpu(cpu, &part->mask); 1112 cpumask_set_cpu(cpu, &part->mask);
1066 } 1113 }
@@ -1115,6 +1162,7 @@ static void __init gic_of_setup_kvm_info(struct device_node *node)
1115 if (!ret) 1162 if (!ret)
1116 gic_v3_kvm_info.vcpu = r; 1163 gic_v3_kvm_info.vcpu = r;
1117 1164
1165 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
1118 gic_set_kvm_info(&gic_v3_kvm_info); 1166 gic_set_kvm_info(&gic_v3_kvm_info);
1119} 1167}
1120 1168
@@ -1128,15 +1176,13 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
1128 1176
1129 dist_base = of_iomap(node, 0); 1177 dist_base = of_iomap(node, 0);
1130 if (!dist_base) { 1178 if (!dist_base) {
1131 pr_err("%s: unable to map gic dist registers\n", 1179 pr_err("%pOF: unable to map gic dist registers\n", node);
1132 node->full_name);
1133 return -ENXIO; 1180 return -ENXIO;
1134 } 1181 }
1135 1182
1136 err = gic_validate_dist_version(dist_base); 1183 err = gic_validate_dist_version(dist_base);
1137 if (err) { 1184 if (err) {
1138 pr_err("%s: no distributor detected, giving up\n", 1185 pr_err("%pOF: no distributor detected, giving up\n", node);
1139 node->full_name);
1140 goto out_unmap_dist; 1186 goto out_unmap_dist;
1141 } 1187 }
1142 1188
@@ -1156,8 +1202,7 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
1156 ret = of_address_to_resource(node, 1 + i, &res); 1202 ret = of_address_to_resource(node, 1 + i, &res);
1157 rdist_regs[i].redist_base = of_iomap(node, 1 + i); 1203 rdist_regs[i].redist_base = of_iomap(node, 1 + i);
1158 if (ret || !rdist_regs[i].redist_base) { 1204 if (ret || !rdist_regs[i].redist_base) {
1159 pr_err("%s: couldn't map region %d\n", 1205 pr_err("%pOF: couldn't map region %d\n", node, i);
1160 node->full_name, i);
1161 err = -ENODEV; 1206 err = -ENODEV;
1162 goto out_unmap_rdist; 1207 goto out_unmap_rdist;
1163 } 1208 }
@@ -1411,6 +1456,7 @@ static void __init gic_acpi_setup_kvm_info(void)
1411 vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1; 1456 vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
1412 } 1457 }
1413 1458
1459 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
1414 gic_set_kvm_info(&gic_v3_kvm_info); 1460 gic_set_kvm_info(&gic_v3_kvm_info);
1415} 1461}
1416 1462
diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c
new file mode 100644
index 000000000000..2370e6d9e603
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v4.c
@@ -0,0 +1,225 @@
1/*
2 * Copyright (C) 2016,2017 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/interrupt.h>
19#include <linux/irq.h>
20#include <linux/irqdomain.h>
21#include <linux/msi.h>
22#include <linux/sched.h>
23
24#include <linux/irqchip/arm-gic-v4.h>
25
26/*
27 * WARNING: The blurb below assumes that you understand the
28 * intricacies of GICv3, GICv4, and how a guest's view of a GICv3 gets
29 * translated into GICv4 commands. So it effectively targets at most
30 * two individuals. You know who you are.
31 *
32 * The core GICv4 code is designed to *avoid* exposing too much of the
33 * core GIC code (that would in turn leak into the hypervisor code),
34 * and instead provide a hypervisor agnostic interface to the HW (of
35 * course, the astute reader will quickly realize that hypervisor
36 * agnostic actually means KVM-specific - what were you thinking?).
37 *
38 * In order to achieve a modicum of isolation, we try to hide most of
39 * the GICv4 "stuff" behind normal irqchip operations:
40 *
41 * - Any guest-visible VLPI is backed by a Linux interrupt (and a
42 * physical LPI which gets unmapped when the guest maps the
43 * VLPI). This allows the same DevID/EventID pair to be either
44 * mapped to the LPI (host) or the VLPI (guest). Note that this is
45 * exclusive, and you cannot have both.
46 *
47 * - Enabling/disabling a VLPI is done by issuing mask/unmask calls.
48 *
49 * - Guest INT/CLEAR commands are implemented through
50 * irq_set_irqchip_state().
51 *
52 * - The *bizarre* stuff (mapping/unmapping an interrupt to a VLPI, or
53 * issuing an INV after changing a priority) gets shoved into the
54 * irq_set_vcpu_affinity() method. While this is quite horrible
55 * (let's face it, this is the irqchip version of an ioctl), it
56 * confines the crap to a single location. And map/unmap really is
57 * about setting the affinity of a VLPI to a vcpu, so only INV is
58 * majorly out of place. So there.
59 *
60 * A number of commands are simply not provided by this interface, as
61 * they do not make direct sense. For example, MAPD is purely local to
62 * the virtual ITS (because it references a virtual device, and the
63 * physical ITS is still very much in charge of the physical
64 * device). Same goes for things like MAPC (the physical ITS deals
65 * with the actual vPE affinity, and not the braindead concept of
66 * collection). SYNC is not provided either, as each and every command
67 * is followed by a VSYNC. This could be relaxed in the future, should
68 * this be seen as a bottleneck (yes, this means *never*).
69 *
70 * But handling VLPIs is only one side of the job of the GICv4
71 * code. The other (darker) side is to take care of the doorbell
72 * interrupts which are delivered when a VLPI targeting a non-running
73 * vcpu is being made pending.
74 *
75 * The choice made here is that each vcpu (VPE in old northern GICv4
76 * dialect) gets a single doorbell LPI, no matter how many interrupts
77 * are targeting it. This has a nice property, which is that the
78 * interrupt becomes a handle for the VPE, and that the hypervisor
79 * code can manipulate it through the normal interrupt API:
80 *
81 * - VMs (or rather the VM abstraction that matters to the GIC)
82 * contain an irq domain where each interrupt maps to a VPE. In
83 * turn, this domain sits on top of the normal LPI allocator, and a
84 * specially crafted irq_chip implementation.
85 *
86 * - mask/unmask do what is expected on the doorbell interrupt.
87 *
88 * - irq_set_affinity is used to move a VPE from one redistributor to
89 * another.
90 *
91 * - irq_set_vcpu_affinity once again gets hijacked for the purpose of
92 * creating a new sub-API, namely scheduling/descheduling a VPE
93 * (which involves programming GICR_V{PROP,PEND}BASER) and
94 * performing INVALL operations.
95 */
96
97static struct irq_domain *gic_domain;
98static const struct irq_domain_ops *vpe_domain_ops;
99
100int its_alloc_vcpu_irqs(struct its_vm *vm)
101{
102 int vpe_base_irq, i;
103
104 vm->fwnode = irq_domain_alloc_named_id_fwnode("GICv4-vpe",
105 task_pid_nr(current));
106 if (!vm->fwnode)
107 goto err;
108
109 vm->domain = irq_domain_create_hierarchy(gic_domain, 0, vm->nr_vpes,
110 vm->fwnode, vpe_domain_ops,
111 vm);
112 if (!vm->domain)
113 goto err;
114
115 for (i = 0; i < vm->nr_vpes; i++) {
116 vm->vpes[i]->its_vm = vm;
117 vm->vpes[i]->idai = true;
118 }
119
120 vpe_base_irq = __irq_domain_alloc_irqs(vm->domain, -1, vm->nr_vpes,
121 NUMA_NO_NODE, vm,
122 false, NULL);
123 if (vpe_base_irq <= 0)
124 goto err;
125
126 for (i = 0; i < vm->nr_vpes; i++)
127 vm->vpes[i]->irq = vpe_base_irq + i;
128
129 return 0;
130
131err:
132 if (vm->domain)
133 irq_domain_remove(vm->domain);
134 if (vm->fwnode)
135 irq_domain_free_fwnode(vm->fwnode);
136
137 return -ENOMEM;
138}
139
140void its_free_vcpu_irqs(struct its_vm *vm)
141{
142 irq_domain_free_irqs(vm->vpes[0]->irq, vm->nr_vpes);
143 irq_domain_remove(vm->domain);
144 irq_domain_free_fwnode(vm->fwnode);
145}
146
147static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info)
148{
149 return irq_set_vcpu_affinity(vpe->irq, info);
150}
151
152int its_schedule_vpe(struct its_vpe *vpe, bool on)
153{
154 struct its_cmd_info info;
155
156 WARN_ON(preemptible());
157
158 info.cmd_type = on ? SCHEDULE_VPE : DESCHEDULE_VPE;
159
160 return its_send_vpe_cmd(vpe, &info);
161}
162
163int its_invall_vpe(struct its_vpe *vpe)
164{
165 struct its_cmd_info info = {
166 .cmd_type = INVALL_VPE,
167 };
168
169 return its_send_vpe_cmd(vpe, &info);
170}
171
172int its_map_vlpi(int irq, struct its_vlpi_map *map)
173{
174 struct its_cmd_info info = {
175 .cmd_type = MAP_VLPI,
176 .map = map,
177 };
178
179 /*
180 * The host will never see that interrupt firing again, so it
181 * is vital that we don't do any lazy masking.
182 */
183 irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
184
185 return irq_set_vcpu_affinity(irq, &info);
186}
187
188int its_get_vlpi(int irq, struct its_vlpi_map *map)
189{
190 struct its_cmd_info info = {
191 .cmd_type = GET_VLPI,
192 .map = map,
193 };
194
195 return irq_set_vcpu_affinity(irq, &info);
196}
197
198int its_unmap_vlpi(int irq)
199{
200 irq_clear_status_flags(irq, IRQ_DISABLE_UNLAZY);
201 return irq_set_vcpu_affinity(irq, NULL);
202}
203
204int its_prop_update_vlpi(int irq, u8 config, bool inv)
205{
206 struct its_cmd_info info = {
207 .cmd_type = inv ? PROP_UPDATE_AND_INV_VLPI : PROP_UPDATE_VLPI,
208 .config = config,
209 };
210
211 return irq_set_vcpu_affinity(irq, &info);
212}
213
214int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops)
215{
216 if (domain) {
217 pr_info("ITS: Enabling GICv4 support\n");
218 gic_domain = domain;
219 vpe_domain_ops = ops;
220 return 0;
221 }
222
223 pr_err("ITS: No GICv4 VPE domain allocated\n");
224 return -ENODEV;
225}
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 20dd2ba3d603..aff561e0c960 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -412,7 +412,7 @@ static void gic_handle_cascade_irq(struct irq_desc *desc)
412 chained_irq_exit(chip, desc); 412 chained_irq_exit(chip, desc);
413} 413}
414 414
415static struct irq_chip gic_chip = { 415static const struct irq_chip gic_chip = {
416 .irq_mask = gic_mask_irq, 416 .irq_mask = gic_mask_irq,
417 .irq_unmask = gic_unmask_irq, 417 .irq_unmask = gic_unmask_irq,
418 .irq_eoi = gic_eoi_irq, 418 .irq_eoi = gic_eoi_irq,
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index bb36f572e322..675eda5ff2b8 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -214,13 +214,13 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
214 int i; 214 int i;
215 215
216 if (!parent) { 216 if (!parent) {
217 pr_err("%s: no parent, giving up\n", node->full_name); 217 pr_err("%pOF: no parent, giving up\n", node);
218 return -ENODEV; 218 return -ENODEV;
219 } 219 }
220 220
221 parent_domain = irq_find_host(parent); 221 parent_domain = irq_find_host(parent);
222 if (!parent_domain) { 222 if (!parent_domain) {
223 pr_err("%s: unable to get parent domain\n", node->full_name); 223 pr_err("%pOF: unable to get parent domain\n", node);
224 return -ENXIO; 224 return -ENXIO;
225 } 225 }
226 226
diff --git a/drivers/irqchip/irq-lpc32xx.c b/drivers/irqchip/irq-lpc32xx.c
index 1034aeb2e98a..a48357d369b5 100644
--- a/drivers/irqchip/irq-lpc32xx.c
+++ b/drivers/irqchip/irq-lpc32xx.c
@@ -191,7 +191,7 @@ static int __init lpc32xx_of_ic_init(struct device_node *node,
191 191
192 irqc->base = of_iomap(node, 0); 192 irqc->base = of_iomap(node, 0);
193 if (!irqc->base) { 193 if (!irqc->base) {
194 pr_err("%s: unable to map registers\n", node->full_name); 194 pr_err("%pOF: unable to map registers\n", node);
195 kfree(irqc); 195 kfree(irqc);
196 return -EINVAL; 196 return -EINVAL;
197 } 197 }
diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
index 02cca74cab94..119f4ef0d421 100644
--- a/drivers/irqchip/irq-ls-scfg-msi.c
+++ b/drivers/irqchip/irq-ls-scfg-msi.c
@@ -17,13 +17,32 @@
17#include <linux/irq.h> 17#include <linux/irq.h>
18#include <linux/irqchip/chained_irq.h> 18#include <linux/irqchip/chained_irq.h>
19#include <linux/irqdomain.h> 19#include <linux/irqdomain.h>
20#include <linux/of_irq.h>
20#include <linux/of_pci.h> 21#include <linux/of_pci.h>
21#include <linux/of_platform.h> 22#include <linux/of_platform.h>
22#include <linux/spinlock.h> 23#include <linux/spinlock.h>
23 24
24#define MSI_MAX_IRQS 32 25#define MSI_IRQS_PER_MSIR 32
25#define MSI_IBS_SHIFT 3 26#define MSI_MSIR_OFFSET 4
26#define MSIR 4 27
28#define MSI_LS1043V1_1_IRQS_PER_MSIR 8
29#define MSI_LS1043V1_1_MSIR_OFFSET 0x10
30
31struct ls_scfg_msi_cfg {
32 u32 ibs_shift; /* Shift of interrupt bit select */
33 u32 msir_irqs; /* The irq number per MSIR */
34 u32 msir_base; /* The base address of MSIR */
35};
36
37struct ls_scfg_msir {
38 struct ls_scfg_msi *msi_data;
39 unsigned int index;
40 unsigned int gic_irq;
41 unsigned int bit_start;
42 unsigned int bit_end;
43 unsigned int srs; /* Shared interrupt register select */
44 void __iomem *reg;
45};
27 46
28struct ls_scfg_msi { 47struct ls_scfg_msi {
29 spinlock_t lock; 48 spinlock_t lock;
@@ -32,8 +51,11 @@ struct ls_scfg_msi {
32 struct irq_domain *msi_domain; 51 struct irq_domain *msi_domain;
33 void __iomem *regs; 52 void __iomem *regs;
34 phys_addr_t msiir_addr; 53 phys_addr_t msiir_addr;
35 int irq; 54 struct ls_scfg_msi_cfg *cfg;
36 DECLARE_BITMAP(used, MSI_MAX_IRQS); 55 u32 msir_num;
56 struct ls_scfg_msir *msir;
57 u32 irqs_num;
58 unsigned long *used;
37}; 59};
38 60
39static struct irq_chip ls_scfg_msi_irq_chip = { 61static struct irq_chip ls_scfg_msi_irq_chip = {
@@ -49,19 +71,56 @@ static struct msi_domain_info ls_scfg_msi_domain_info = {
49 .chip = &ls_scfg_msi_irq_chip, 71 .chip = &ls_scfg_msi_irq_chip,
50}; 72};
51 73
74static int msi_affinity_flag = 1;
75
76static int __init early_parse_ls_scfg_msi(char *p)
77{
78 if (p && strncmp(p, "no-affinity", 11) == 0)
79 msi_affinity_flag = 0;
80 else
81 msi_affinity_flag = 1;
82
83 return 0;
84}
85early_param("lsmsi", early_parse_ls_scfg_msi);
86
52static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) 87static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
53{ 88{
54 struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data); 89 struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data);
55 90
56 msg->address_hi = upper_32_bits(msi_data->msiir_addr); 91 msg->address_hi = upper_32_bits(msi_data->msiir_addr);
57 msg->address_lo = lower_32_bits(msi_data->msiir_addr); 92 msg->address_lo = lower_32_bits(msi_data->msiir_addr);
58 msg->data = data->hwirq << MSI_IBS_SHIFT; 93 msg->data = data->hwirq;
94
95 if (msi_affinity_flag)
96 msg->data |= cpumask_first(data->common->affinity);
59} 97}
60 98
61static int ls_scfg_msi_set_affinity(struct irq_data *irq_data, 99static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
62 const struct cpumask *mask, bool force) 100 const struct cpumask *mask, bool force)
63{ 101{
64 return -EINVAL; 102 struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(irq_data);
103 u32 cpu;
104
105 if (!msi_affinity_flag)
106 return -EINVAL;
107
108 if (!force)
109 cpu = cpumask_any_and(mask, cpu_online_mask);
110 else
111 cpu = cpumask_first(mask);
112
113 if (cpu >= msi_data->msir_num)
114 return -EINVAL;
115
116 if (msi_data->msir[cpu].gic_irq <= 0) {
117 pr_warn("cannot bind the irq to cpu%d\n", cpu);
118 return -EINVAL;
119 }
120
121 cpumask_copy(irq_data->common->affinity, mask);
122
123 return IRQ_SET_MASK_OK;
65} 124}
66 125
67static struct irq_chip ls_scfg_msi_parent_chip = { 126static struct irq_chip ls_scfg_msi_parent_chip = {
@@ -81,8 +140,8 @@ static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain,
81 WARN_ON(nr_irqs != 1); 140 WARN_ON(nr_irqs != 1);
82 141
83 spin_lock(&msi_data->lock); 142 spin_lock(&msi_data->lock);
84 pos = find_first_zero_bit(msi_data->used, MSI_MAX_IRQS); 143 pos = find_first_zero_bit(msi_data->used, msi_data->irqs_num);
85 if (pos < MSI_MAX_IRQS) 144 if (pos < msi_data->irqs_num)
86 __set_bit(pos, msi_data->used); 145 __set_bit(pos, msi_data->used);
87 else 146 else
88 err = -ENOSPC; 147 err = -ENOSPC;
@@ -106,7 +165,7 @@ static void ls_scfg_msi_domain_irq_free(struct irq_domain *domain,
106 int pos; 165 int pos;
107 166
108 pos = d->hwirq; 167 pos = d->hwirq;
109 if (pos < 0 || pos >= MSI_MAX_IRQS) { 168 if (pos < 0 || pos >= msi_data->irqs_num) {
110 pr_err("failed to teardown msi. Invalid hwirq %d\n", pos); 169 pr_err("failed to teardown msi. Invalid hwirq %d\n", pos);
111 return; 170 return;
112 } 171 }
@@ -123,15 +182,22 @@ static const struct irq_domain_ops ls_scfg_msi_domain_ops = {
123 182
124static void ls_scfg_msi_irq_handler(struct irq_desc *desc) 183static void ls_scfg_msi_irq_handler(struct irq_desc *desc)
125{ 184{
126 struct ls_scfg_msi *msi_data = irq_desc_get_handler_data(desc); 185 struct ls_scfg_msir *msir = irq_desc_get_handler_data(desc);
186 struct ls_scfg_msi *msi_data = msir->msi_data;
127 unsigned long val; 187 unsigned long val;
128 int pos, virq; 188 int pos, size, virq, hwirq;
129 189
130 chained_irq_enter(irq_desc_get_chip(desc), desc); 190 chained_irq_enter(irq_desc_get_chip(desc), desc);
131 191
132 val = ioread32be(msi_data->regs + MSIR); 192 val = ioread32be(msir->reg);
133 for_each_set_bit(pos, &val, MSI_MAX_IRQS) { 193
134 virq = irq_find_mapping(msi_data->parent, (31 - pos)); 194 pos = msir->bit_start;
195 size = msir->bit_end + 1;
196
197 for_each_set_bit_from(pos, &val, size) {
198 hwirq = ((msir->bit_end - pos) << msi_data->cfg->ibs_shift) |
199 msir->srs;
200 virq = irq_find_mapping(msi_data->parent, hwirq);
135 if (virq) 201 if (virq)
136 generic_handle_irq(virq); 202 generic_handle_irq(virq);
137 } 203 }
@@ -143,7 +209,7 @@ static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data)
143{ 209{
144 /* Initialize MSI domain parent */ 210 /* Initialize MSI domain parent */
145 msi_data->parent = irq_domain_add_linear(NULL, 211 msi_data->parent = irq_domain_add_linear(NULL,
146 MSI_MAX_IRQS, 212 msi_data->irqs_num,
147 &ls_scfg_msi_domain_ops, 213 &ls_scfg_msi_domain_ops,
148 msi_data); 214 msi_data);
149 if (!msi_data->parent) { 215 if (!msi_data->parent) {
@@ -164,16 +230,117 @@ static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data)
164 return 0; 230 return 0;
165} 231}
166 232
233static int ls_scfg_msi_setup_hwirq(struct ls_scfg_msi *msi_data, int index)
234{
235 struct ls_scfg_msir *msir;
236 int virq, i, hwirq;
237
238 virq = platform_get_irq(msi_data->pdev, index);
239 if (virq <= 0)
240 return -ENODEV;
241
242 msir = &msi_data->msir[index];
243 msir->index = index;
244 msir->msi_data = msi_data;
245 msir->gic_irq = virq;
246 msir->reg = msi_data->regs + msi_data->cfg->msir_base + 4 * index;
247
248 if (msi_data->cfg->msir_irqs == MSI_LS1043V1_1_IRQS_PER_MSIR) {
249 msir->bit_start = 32 - ((msir->index + 1) *
250 MSI_LS1043V1_1_IRQS_PER_MSIR);
251 msir->bit_end = msir->bit_start +
252 MSI_LS1043V1_1_IRQS_PER_MSIR - 1;
253 } else {
254 msir->bit_start = 0;
255 msir->bit_end = msi_data->cfg->msir_irqs - 1;
256 }
257
258 irq_set_chained_handler_and_data(msir->gic_irq,
259 ls_scfg_msi_irq_handler,
260 msir);
261
262 if (msi_affinity_flag) {
263 /* Associate MSIR interrupt to the cpu */
264 irq_set_affinity(msir->gic_irq, get_cpu_mask(index));
265 msir->srs = 0; /* This value is determined by the CPU */
266 } else
267 msir->srs = index;
268
269 /* Release the hwirqs corresponding to this MSIR */
270 if (!msi_affinity_flag || msir->index == 0) {
271 for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
272 hwirq = i << msi_data->cfg->ibs_shift | msir->index;
273 bitmap_clear(msi_data->used, hwirq, 1);
274 }
275 }
276
277 return 0;
278}
279
280static int ls_scfg_msi_teardown_hwirq(struct ls_scfg_msir *msir)
281{
282 struct ls_scfg_msi *msi_data = msir->msi_data;
283 int i, hwirq;
284
285 if (msir->gic_irq > 0)
286 irq_set_chained_handler_and_data(msir->gic_irq, NULL, NULL);
287
288 for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
289 hwirq = i << msi_data->cfg->ibs_shift | msir->index;
290 bitmap_set(msi_data->used, hwirq, 1);
291 }
292
293 return 0;
294}
295
296static struct ls_scfg_msi_cfg ls1021_msi_cfg = {
297 .ibs_shift = 3,
298 .msir_irqs = MSI_IRQS_PER_MSIR,
299 .msir_base = MSI_MSIR_OFFSET,
300};
301
302static struct ls_scfg_msi_cfg ls1046_msi_cfg = {
303 .ibs_shift = 2,
304 .msir_irqs = MSI_IRQS_PER_MSIR,
305 .msir_base = MSI_MSIR_OFFSET,
306};
307
308static struct ls_scfg_msi_cfg ls1043_v1_1_msi_cfg = {
309 .ibs_shift = 2,
310 .msir_irqs = MSI_LS1043V1_1_IRQS_PER_MSIR,
311 .msir_base = MSI_LS1043V1_1_MSIR_OFFSET,
312};
313
314static const struct of_device_id ls_scfg_msi_id[] = {
315 /* The following two misspelled compatibles are obsolete */
316 { .compatible = "fsl,1s1021a-msi", .data = &ls1021_msi_cfg},
317 { .compatible = "fsl,1s1043a-msi", .data = &ls1021_msi_cfg},
318
319 { .compatible = "fsl,ls1021a-msi", .data = &ls1021_msi_cfg },
320 { .compatible = "fsl,ls1043a-msi", .data = &ls1021_msi_cfg },
321 { .compatible = "fsl,ls1043a-v1.1-msi", .data = &ls1043_v1_1_msi_cfg },
322 { .compatible = "fsl,ls1046a-msi", .data = &ls1046_msi_cfg },
323 {},
324};
325MODULE_DEVICE_TABLE(of, ls_scfg_msi_id);
326
167static int ls_scfg_msi_probe(struct platform_device *pdev) 327static int ls_scfg_msi_probe(struct platform_device *pdev)
168{ 328{
329 const struct of_device_id *match;
169 struct ls_scfg_msi *msi_data; 330 struct ls_scfg_msi *msi_data;
170 struct resource *res; 331 struct resource *res;
171 int ret; 332 int i, ret;
333
334 match = of_match_device(ls_scfg_msi_id, &pdev->dev);
335 if (!match)
336 return -ENODEV;
172 337
173 msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL); 338 msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL);
174 if (!msi_data) 339 if (!msi_data)
175 return -ENOMEM; 340 return -ENOMEM;
176 341
342 msi_data->cfg = (struct ls_scfg_msi_cfg *) match->data;
343
177 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 344 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
178 msi_data->regs = devm_ioremap_resource(&pdev->dev, res); 345 msi_data->regs = devm_ioremap_resource(&pdev->dev, res);
179 if (IS_ERR(msi_data->regs)) { 346 if (IS_ERR(msi_data->regs)) {
@@ -182,23 +349,48 @@ static int ls_scfg_msi_probe(struct platform_device *pdev)
182 } 349 }
183 msi_data->msiir_addr = res->start; 350 msi_data->msiir_addr = res->start;
184 351
185 msi_data->irq = platform_get_irq(pdev, 0);
186 if (msi_data->irq <= 0) {
187 dev_err(&pdev->dev, "failed to get MSI irq\n");
188 return -ENODEV;
189 }
190
191 msi_data->pdev = pdev; 352 msi_data->pdev = pdev;
192 spin_lock_init(&msi_data->lock); 353 spin_lock_init(&msi_data->lock);
193 354
355 msi_data->irqs_num = MSI_IRQS_PER_MSIR *
356 (1 << msi_data->cfg->ibs_shift);
357 msi_data->used = devm_kcalloc(&pdev->dev,
358 BITS_TO_LONGS(msi_data->irqs_num),
359 sizeof(*msi_data->used),
360 GFP_KERNEL);
361 if (!msi_data->used)
362 return -ENOMEM;
363 /*
364 * Reserve all the hwirqs
365 * The available hwirqs will be released in ls1_msi_setup_hwirq()
366 */
367 bitmap_set(msi_data->used, 0, msi_data->irqs_num);
368
369 msi_data->msir_num = of_irq_count(pdev->dev.of_node);
370
371 if (msi_affinity_flag) {
372 u32 cpu_num;
373
374 cpu_num = num_possible_cpus();
375 if (msi_data->msir_num >= cpu_num)
376 msi_data->msir_num = cpu_num;
377 else
378 msi_affinity_flag = 0;
379 }
380
381 msi_data->msir = devm_kcalloc(&pdev->dev, msi_data->msir_num,
382 sizeof(*msi_data->msir),
383 GFP_KERNEL);
384 if (!msi_data->msir)
385 return -ENOMEM;
386
387 for (i = 0; i < msi_data->msir_num; i++)
388 ls_scfg_msi_setup_hwirq(msi_data, i);
389
194 ret = ls_scfg_msi_domains_init(msi_data); 390 ret = ls_scfg_msi_domains_init(msi_data);
195 if (ret) 391 if (ret)
196 return ret; 392 return ret;
197 393
198 irq_set_chained_handler_and_data(msi_data->irq,
199 ls_scfg_msi_irq_handler,
200 msi_data);
201
202 platform_set_drvdata(pdev, msi_data); 394 platform_set_drvdata(pdev, msi_data);
203 395
204 return 0; 396 return 0;
@@ -207,8 +399,10 @@ static int ls_scfg_msi_probe(struct platform_device *pdev)
207static int ls_scfg_msi_remove(struct platform_device *pdev) 399static int ls_scfg_msi_remove(struct platform_device *pdev)
208{ 400{
209 struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev); 401 struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev);
402 int i;
210 403
211 irq_set_chained_handler_and_data(msi_data->irq, NULL, NULL); 404 for (i = 0; i < msi_data->msir_num; i++)
405 ls_scfg_msi_teardown_hwirq(&msi_data->msir[i]);
212 406
213 irq_domain_remove(msi_data->msi_domain); 407 irq_domain_remove(msi_data->msi_domain);
214 irq_domain_remove(msi_data->parent); 408 irq_domain_remove(msi_data->parent);
@@ -218,12 +412,6 @@ static int ls_scfg_msi_remove(struct platform_device *pdev)
218 return 0; 412 return 0;
219} 413}
220 414
221static const struct of_device_id ls_scfg_msi_id[] = {
222 { .compatible = "fsl,1s1021a-msi", },
223 { .compatible = "fsl,1s1043a-msi", },
224 {},
225};
226
227static struct platform_driver ls_scfg_msi_driver = { 415static struct platform_driver ls_scfg_msi_driver = {
228 .driver = { 416 .driver = {
229 .name = "ls-scfg-msi", 417 .name = "ls-scfg-msi",
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
index 013fc9659a84..25f32e1d7764 100644
--- a/drivers/irqchip/irq-mmp.c
+++ b/drivers/irqchip/irq-mmp.c
@@ -181,13 +181,13 @@ const struct irq_domain_ops mmp_irq_domain_ops = {
181 .xlate = mmp_irq_domain_xlate, 181 .xlate = mmp_irq_domain_xlate,
182}; 182};
183 183
184static struct mmp_intc_conf mmp_conf = { 184static const struct mmp_intc_conf mmp_conf = {
185 .conf_enable = 0x51, 185 .conf_enable = 0x51,
186 .conf_disable = 0x0, 186 .conf_disable = 0x0,
187 .conf_mask = 0x7f, 187 .conf_mask = 0x7f,
188}; 188};
189 189
190static struct mmp_intc_conf mmp2_conf = { 190static const struct mmp_intc_conf mmp2_conf = {
191 .conf_enable = 0x20, 191 .conf_enable = 0x20,
192 .conf_disable = 0x0, 192 .conf_disable = 0x0,
193 .conf_mask = 0x7f, 193 .conf_mask = 0x7f,
diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c
index eeac512ec5a8..90aaf190157f 100644
--- a/drivers/irqchip/irq-mtk-sysirq.c
+++ b/drivers/irqchip/irq-mtk-sysirq.c
@@ -178,8 +178,7 @@ static int __init mtk_sysirq_of_init(struct device_node *node,
178 chip_data->intpol_words[i] = size / 4; 178 chip_data->intpol_words[i] = size / 4;
179 chip_data->intpol_bases[i] = of_iomap(node, i); 179 chip_data->intpol_bases[i] = of_iomap(node, i);
180 if (ret || !chip_data->intpol_bases[i]) { 180 if (ret || !chip_data->intpol_bases[i]) {
181 pr_err("%s: couldn't map region %d\n", 181 pr_err("%pOF: couldn't map region %d\n", node, i);
182 node->full_name, i);
183 ret = -ENODEV; 182 ret = -ENODEV;
184 goto out_free_intpol; 183 goto out_free_intpol;
185 } 184 }
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index 05fa9f7af53c..e8b31f52e071 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -179,7 +179,7 @@ static void __init icoll_add_domain(struct device_node *np,
179 &icoll_irq_domain_ops, NULL); 179 &icoll_irq_domain_ops, NULL);
180 180
181 if (!icoll_domain) 181 if (!icoll_domain)
182 panic("%s: unable to create irq domain", np->full_name); 182 panic("%pOF: unable to create irq domain", np);
183} 183}
184 184
185static void __iomem * __init icoll_init_iobase(struct device_node *np) 185static void __iomem * __init icoll_init_iobase(struct device_node *np)
@@ -188,7 +188,7 @@ static void __iomem * __init icoll_init_iobase(struct device_node *np)
188 188
189 icoll_base = of_io_request_and_map(np, 0, np->name); 189 icoll_base = of_io_request_and_map(np, 0, np->name);
190 if (IS_ERR(icoll_base)) 190 if (IS_ERR(icoll_base))
191 panic("%s: unable to map resource", np->full_name); 191 panic("%pOF: unable to map resource", np);
192 return icoll_base; 192 return icoll_base;
193} 193}
194 194
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 491568c95aa5..45363ff8d06f 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -140,7 +140,7 @@ static int __init stm32_exti_init(struct device_node *node,
140 140
141 base = of_iomap(node, 0); 141 base = of_iomap(node, 0);
142 if (!base) { 142 if (!base) {
143 pr_err("%s: Unable to map registers\n", node->full_name); 143 pr_err("%pOF: Unable to map registers\n", node);
144 return -ENOMEM; 144 return -ENOMEM;
145 } 145 }
146 146
@@ -149,7 +149,7 @@ static int __init stm32_exti_init(struct device_node *node,
149 nr_exti = fls(readl_relaxed(base + EXTI_RTSR)); 149 nr_exti = fls(readl_relaxed(base + EXTI_RTSR));
150 writel_relaxed(0, base + EXTI_RTSR); 150 writel_relaxed(0, base + EXTI_RTSR);
151 151
152 pr_info("%s: %d External IRQs detected\n", node->full_name, nr_exti); 152 pr_info("%pOF: %d External IRQs detected\n", node, nr_exti);
153 153
154 domain = irq_domain_add_linear(node, nr_exti, 154 domain = irq_domain_add_linear(node, nr_exti,
155 &irq_exti_domain_ops, NULL); 155 &irq_exti_domain_ops, NULL);
@@ -163,8 +163,8 @@ static int __init stm32_exti_init(struct device_node *node,
163 ret = irq_alloc_domain_generic_chips(domain, nr_exti, 1, "exti", 163 ret = irq_alloc_domain_generic_chips(domain, nr_exti, 1, "exti",
164 handle_edge_irq, clr, 0, 0); 164 handle_edge_irq, clr, 0, 0);
165 if (ret) { 165 if (ret) {
166 pr_err("%s: Could not allocate generic interrupt chip.\n", 166 pr_err("%pOF: Could not allocate generic interrupt chip.\n",
167 node->full_name); 167 node);
168 goto out_free_domain; 168 goto out_free_domain;
169 } 169 }
170 170
diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c
index 376b28074e0d..e3e5b9132b75 100644
--- a/drivers/irqchip/irq-sun4i.c
+++ b/drivers/irqchip/irq-sun4i.c
@@ -97,8 +97,8 @@ static int __init sun4i_of_init(struct device_node *node,
97{ 97{
98 sun4i_irq_base = of_iomap(node, 0); 98 sun4i_irq_base = of_iomap(node, 0);
99 if (!sun4i_irq_base) 99 if (!sun4i_irq_base)
100 panic("%s: unable to map IC registers\n", 100 panic("%pOF: unable to map IC registers\n",
101 node->full_name); 101 node);
102 102
103 /* Disable all interrupts */ 103 /* Disable all interrupts */
104 writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(0)); 104 writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(0));
@@ -124,7 +124,7 @@ static int __init sun4i_of_init(struct device_node *node,
124 sun4i_irq_domain = irq_domain_add_linear(node, 3 * 32, 124 sun4i_irq_domain = irq_domain_add_linear(node, 3 * 32,
125 &sun4i_irq_ops, NULL); 125 &sun4i_irq_ops, NULL);
126 if (!sun4i_irq_domain) 126 if (!sun4i_irq_domain)
127 panic("%s: unable to create IRQ domain\n", node->full_name); 127 panic("%pOF: unable to create IRQ domain\n", node);
128 128
129 set_handle_irq(sun4i_handle_irq); 129 set_handle_irq(sun4i_handle_irq);
130 130
diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c
index 3973a14bb15b..0abc0cd1c32e 100644
--- a/drivers/irqchip/irq-tegra.c
+++ b/drivers/irqchip/irq-tegra.c
@@ -291,13 +291,13 @@ static int __init tegra_ictlr_init(struct device_node *node,
291 int err; 291 int err;
292 292
293 if (!parent) { 293 if (!parent) {
294 pr_err("%s: no parent, giving up\n", node->full_name); 294 pr_err("%pOF: no parent, giving up\n", node);
295 return -ENODEV; 295 return -ENODEV;
296 } 296 }
297 297
298 parent_domain = irq_find_host(parent); 298 parent_domain = irq_find_host(parent);
299 if (!parent_domain) { 299 if (!parent_domain) {
300 pr_err("%s: unable to obtain parent domain\n", node->full_name); 300 pr_err("%pOF: unable to obtain parent domain\n", node);
301 return -ENXIO; 301 return -ENXIO;
302 } 302 }
303 303
@@ -329,29 +329,29 @@ static int __init tegra_ictlr_init(struct device_node *node,
329 } 329 }
330 330
331 if (!num_ictlrs) { 331 if (!num_ictlrs) {
332 pr_err("%s: no valid regions, giving up\n", node->full_name); 332 pr_err("%pOF: no valid regions, giving up\n", node);
333 err = -ENOMEM; 333 err = -ENOMEM;
334 goto out_free; 334 goto out_free;
335 } 335 }
336 336
337 WARN(num_ictlrs != soc->num_ictlrs, 337 WARN(num_ictlrs != soc->num_ictlrs,
338 "%s: Found %u interrupt controllers in DT; expected %u.\n", 338 "%pOF: Found %u interrupt controllers in DT; expected %u.\n",
339 node->full_name, num_ictlrs, soc->num_ictlrs); 339 node, num_ictlrs, soc->num_ictlrs);
340 340
341 341
342 domain = irq_domain_add_hierarchy(parent_domain, 0, num_ictlrs * 32, 342 domain = irq_domain_add_hierarchy(parent_domain, 0, num_ictlrs * 32,
343 node, &tegra_ictlr_domain_ops, 343 node, &tegra_ictlr_domain_ops,
344 lic); 344 lic);
345 if (!domain) { 345 if (!domain) {
346 pr_err("%s: failed to allocated domain\n", node->full_name); 346 pr_err("%pOF: failed to allocated domain\n", node);
347 err = -ENOMEM; 347 err = -ENOMEM;
348 goto out_unmap; 348 goto out_unmap;
349 } 349 }
350 350
351 tegra_ictlr_syscore_init(); 351 tegra_ictlr_syscore_init();
352 352
353 pr_info("%s: %d interrupts forwarded to %s\n", 353 pr_info("%pOF: %d interrupts forwarded to %pOF\n",
354 node->full_name, num_ictlrs * 32, parent->full_name); 354 node, num_ictlrs * 32, parent);
355 355
356 return 0; 356 return 0;
357 357
diff --git a/drivers/irqchip/irq-uniphier-aidet.c b/drivers/irqchip/irq-uniphier-aidet.c
new file mode 100644
index 000000000000..7ba7f253470e
--- /dev/null
+++ b/drivers/irqchip/irq-uniphier-aidet.c
@@ -0,0 +1,261 @@
1/*
2 * Driver for UniPhier AIDET (ARM Interrupt Detector)
3 *
4 * Copyright (C) 2017 Socionext Inc.
5 * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/bitops.h>
18#include <linux/init.h>
19#include <linux/irq.h>
20#include <linux/irqdomain.h>
21#include <linux/kernel.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/of_irq.h>
25#include <linux/platform_device.h>
26#include <linux/spinlock.h>
27
28#define UNIPHIER_AIDET_NR_IRQS 256
29
30#define UNIPHIER_AIDET_DETCONF 0x04 /* inverter register base */
31
32struct uniphier_aidet_priv {
33 struct irq_domain *domain;
34 void __iomem *reg_base;
35 spinlock_t lock;
36 u32 saved_vals[UNIPHIER_AIDET_NR_IRQS / 32];
37};
38
39static void uniphier_aidet_reg_update(struct uniphier_aidet_priv *priv,
40 unsigned int reg, u32 mask, u32 val)
41{
42 unsigned long flags;
43 u32 tmp;
44
45 spin_lock_irqsave(&priv->lock, flags);
46 tmp = readl_relaxed(priv->reg_base + reg);
47 tmp &= ~mask;
48 tmp |= mask & val;
49 writel_relaxed(tmp, priv->reg_base + reg);
50 spin_unlock_irqrestore(&priv->lock, flags);
51}
52
53static void uniphier_aidet_detconf_update(struct uniphier_aidet_priv *priv,
54 unsigned long index, unsigned int val)
55{
56 unsigned int reg;
57 u32 mask;
58
59 reg = UNIPHIER_AIDET_DETCONF + index / 32 * 4;
60 mask = BIT(index % 32);
61
62 uniphier_aidet_reg_update(priv, reg, mask, val ? mask : 0);
63}
64
65static int uniphier_aidet_irq_set_type(struct irq_data *data, unsigned int type)
66{
67 struct uniphier_aidet_priv *priv = data->chip_data;
68 unsigned int val;
69
70 /* enable inverter for active low triggers */
71 switch (type) {
72 case IRQ_TYPE_EDGE_RISING:
73 case IRQ_TYPE_LEVEL_HIGH:
74 val = 0;
75 break;
76 case IRQ_TYPE_EDGE_FALLING:
77 val = 1;
78 type = IRQ_TYPE_EDGE_RISING;
79 break;
80 case IRQ_TYPE_LEVEL_LOW:
81 val = 1;
82 type = IRQ_TYPE_LEVEL_HIGH;
83 break;
84 default:
85 return -EINVAL;
86 }
87
88 uniphier_aidet_detconf_update(priv, data->hwirq, val);
89
90 return irq_chip_set_type_parent(data, type);
91}
92
93static struct irq_chip uniphier_aidet_irq_chip = {
94 .name = "AIDET",
95 .irq_mask = irq_chip_mask_parent,
96 .irq_unmask = irq_chip_unmask_parent,
97 .irq_eoi = irq_chip_eoi_parent,
98 .irq_set_affinity = irq_chip_set_affinity_parent,
99 .irq_set_type = uniphier_aidet_irq_set_type,
100};
101
102static int uniphier_aidet_domain_translate(struct irq_domain *domain,
103 struct irq_fwspec *fwspec,
104 unsigned long *out_hwirq,
105 unsigned int *out_type)
106{
107 if (WARN_ON(fwspec->param_count < 2))
108 return -EINVAL;
109
110 *out_hwirq = fwspec->param[0];
111 *out_type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
112
113 return 0;
114}
115
116static int uniphier_aidet_domain_alloc(struct irq_domain *domain,
117 unsigned int virq, unsigned int nr_irqs,
118 void *arg)
119{
120 struct irq_fwspec parent_fwspec;
121 irq_hw_number_t hwirq;
122 unsigned int type;
123 int ret;
124
125 if (nr_irqs != 1)
126 return -EINVAL;
127
128 ret = uniphier_aidet_domain_translate(domain, arg, &hwirq, &type);
129 if (ret)
130 return ret;
131
132 switch (type) {
133 case IRQ_TYPE_EDGE_RISING:
134 case IRQ_TYPE_LEVEL_HIGH:
135 break;
136 case IRQ_TYPE_EDGE_FALLING:
137 type = IRQ_TYPE_EDGE_RISING;
138 break;
139 case IRQ_TYPE_LEVEL_LOW:
140 type = IRQ_TYPE_LEVEL_HIGH;
141 break;
142 default:
143 return -EINVAL;
144 }
145
146 if (hwirq >= UNIPHIER_AIDET_NR_IRQS)
147 return -ENXIO;
148
149 ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
150 &uniphier_aidet_irq_chip,
151 domain->host_data);
152 if (ret)
153 return ret;
154
155 /* parent is GIC */
156 parent_fwspec.fwnode = domain->parent->fwnode;
157 parent_fwspec.param_count = 3;
158 parent_fwspec.param[0] = 0; /* SPI */
159 parent_fwspec.param[1] = hwirq;
160 parent_fwspec.param[2] = type;
161
162 return irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
163}
164
165static const struct irq_domain_ops uniphier_aidet_domain_ops = {
166 .alloc = uniphier_aidet_domain_alloc,
167 .free = irq_domain_free_irqs_common,
168 .translate = uniphier_aidet_domain_translate,
169};
170
171static int uniphier_aidet_probe(struct platform_device *pdev)
172{
173 struct device *dev = &pdev->dev;
174 struct device_node *parent_np;
175 struct irq_domain *parent_domain;
176 struct uniphier_aidet_priv *priv;
177 struct resource *res;
178
179 parent_np = of_irq_find_parent(dev->of_node);
180 if (!parent_np)
181 return -ENXIO;
182
183 parent_domain = irq_find_host(parent_np);
184 of_node_put(parent_np);
185 if (!parent_domain)
186 return -EPROBE_DEFER;
187
188 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
189 if (!priv)
190 return -ENOMEM;
191
192 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
193 priv->reg_base = devm_ioremap_resource(dev, res);
194 if (IS_ERR(priv->reg_base))
195 return PTR_ERR(priv->reg_base);
196
197 spin_lock_init(&priv->lock);
198
199 priv->domain = irq_domain_create_hierarchy(
200 parent_domain, 0,
201 UNIPHIER_AIDET_NR_IRQS,
202 of_node_to_fwnode(dev->of_node),
203 &uniphier_aidet_domain_ops, priv);
204 if (!priv->domain)
205 return -ENOMEM;
206
207 platform_set_drvdata(pdev, priv);
208
209 return 0;
210}
211
212static int __maybe_unused uniphier_aidet_suspend(struct device *dev)
213{
214 struct uniphier_aidet_priv *priv = dev_get_drvdata(dev);
215 int i;
216
217 for (i = 0; i < ARRAY_SIZE(priv->saved_vals); i++)
218 priv->saved_vals[i] = readl_relaxed(
219 priv->reg_base + UNIPHIER_AIDET_DETCONF + i * 4);
220
221 return 0;
222}
223
224static int __maybe_unused uniphier_aidet_resume(struct device *dev)
225{
226 struct uniphier_aidet_priv *priv = dev_get_drvdata(dev);
227 int i;
228
229 for (i = 0; i < ARRAY_SIZE(priv->saved_vals); i++)
230 writel_relaxed(priv->saved_vals[i],
231 priv->reg_base + UNIPHIER_AIDET_DETCONF + i * 4);
232
233 return 0;
234}
235
236static const struct dev_pm_ops uniphier_aidet_pm_ops = {
237 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(uniphier_aidet_suspend,
238 uniphier_aidet_resume)
239};
240
241static const struct of_device_id uniphier_aidet_match[] = {
242 { .compatible = "socionext,uniphier-ld4-aidet" },
243 { .compatible = "socionext,uniphier-pro4-aidet" },
244 { .compatible = "socionext,uniphier-sld8-aidet" },
245 { .compatible = "socionext,uniphier-pro5-aidet" },
246 { .compatible = "socionext,uniphier-pxs2-aidet" },
247 { .compatible = "socionext,uniphier-ld11-aidet" },
248 { .compatible = "socionext,uniphier-ld20-aidet" },
249 { .compatible = "socionext,uniphier-pxs3-aidet" },
250 { /* sentinel */ }
251};
252
253static struct platform_driver uniphier_aidet_driver = {
254 .probe = uniphier_aidet_probe,
255 .driver = {
256 .name = "uniphier-aidet",
257 .of_match_table = uniphier_aidet_match,
258 .pm = &uniphier_aidet_pm_ops,
259 },
260};
261builtin_platform_driver(uniphier_aidet_driver);
diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c
index 3db7ab1c9741..e3043ded8973 100644
--- a/drivers/irqchip/irq-xilinx-intc.c
+++ b/drivers/irqchip/irq-xilinx-intc.c
@@ -186,8 +186,8 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
186 if (irqc->intr_mask >> nr_irq) 186 if (irqc->intr_mask >> nr_irq)
187 pr_warn("irq-xilinx: mismatch in kind-of-intr param\n"); 187 pr_warn("irq-xilinx: mismatch in kind-of-intr param\n");
188 188
189 pr_info("irq-xilinx: %s: num_irq=%d, edge=0x%x\n", 189 pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n",
190 intc->full_name, nr_irq, irqc->intr_mask); 190 intc, nr_irq, irqc->intr_mask);
191 191
192 192
193 /* 193 /*