diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2017-10-19 08:29:46 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2017-10-19 08:29:46 -0400 |
commit | e4844dede52f26132657e8dbd66d1b615653b0d6 (patch) | |
tree | f51ac6c8e5f86fdeea99582bd3ece222567f451c | |
parent | c94fb639d5462027004ed8f5f71288955688a4ae (diff) | |
parent | 7bdeb7f52b1b193bb50fc6d01c6110ba50bafb5b (diff) |
Merge tag 'irqchip-4.15' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into irq/core
Pull irqchip updates for 4.15 from Marc Zyngier
- GICv4 updates (improved performance, errata workarounds)
- Workaround for Socionext's pre-ITS erratum
- Meson GPIO interrupt controller
- BCM7271 L2 interrupt controller
- GICv3 range selector support
- various cleanups
22 files changed, 981 insertions, 151 deletions
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt index 66e8ce14d23d..304bf22bb83c 100644 --- a/Documentation/arm64/silicon-errata.txt +++ b/Documentation/arm64/silicon-errata.txt | |||
@@ -70,6 +70,7 @@ stable kernels. | |||
70 | | | | | | | 70 | | | | | | |
71 | | Hisilicon | Hip0{5,6,7} | #161010101 | HISILICON_ERRATUM_161010101 | | 71 | | Hisilicon | Hip0{5,6,7} | #161010101 | HISILICON_ERRATUM_161010101 | |
72 | | Hisilicon | Hip0{6,7} | #161010701 | N/A | | 72 | | Hisilicon | Hip0{6,7} | #161010701 | N/A | |
73 | | Hisilicon | Hip07 | #161600802 | HISILICON_ERRATUM_161600802 | | ||
73 | | | | | | | 74 | | | | | | |
74 | | Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 | | 75 | | Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 | |
75 | | Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 | | 76 | | Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 | |
diff --git a/Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.txt new file mode 100644 index 000000000000..633e21ce4b17 --- /dev/null +++ b/Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.txt | |||
@@ -0,0 +1,35 @@ | |||
1 | Amlogic meson GPIO interrupt controller | ||
2 | |||
3 | Meson SoCs contains an interrupt controller which is able to watch the SoC | ||
4 | pads and generate an interrupt on edge or level. The controller is essentially | ||
5 | a 256 pads to 8 GIC interrupt multiplexer, with a filter block to select edge | ||
6 | or level and polarity. It does not expose all 256 mux inputs because the | ||
7 | documentation shows that the upper part is not mapped to any pad. The actual | ||
8 | number of interrupt exposed depends on the SoC. | ||
9 | |||
10 | Required properties: | ||
11 | |||
12 | - compatible : must have "amlogic,meson8-gpio-intc” and either | ||
13 | “amlogic,meson8b-gpio-intc” for meson8b SoCs (S805) or | ||
14 | “amlogic,meson-gxbb-gpio-intc” for GXBB SoCs (S905) or | ||
15 | “amlogic,meson-gxl-gpio-intc” for GXL SoCs (S905X, S912) | ||
16 | - interrupt-parent : a phandle to the GIC the interrupts are routed to. | ||
17 | Usually this is provided at the root level of the device tree as it is | ||
18 | common to most of the SoC. | ||
19 | - reg : Specifies base physical address and size of the registers. | ||
20 | - interrupt-controller : Identifies the node as an interrupt controller. | ||
21 | - #interrupt-cells : Specifies the number of cells needed to encode an | ||
22 | interrupt source. The value must be 2. | ||
23 | - meson,channel-interrupts: Array with the 8 upstream hwirq numbers. These | ||
24 | are the hwirqs used on the parent interrupt controller. | ||
25 | |||
26 | Example: | ||
27 | |||
28 | gpio_interrupt: interrupt-controller@9880 { | ||
29 | compatible = "amlogic,meson-gxbb-gpio-intc", | ||
30 | "amlogic,meson-gpio-intc"; | ||
31 | reg = <0x0 0x9880 0x0 0x10>; | ||
32 | interrupt-controller; | ||
33 | #interrupt-cells = <2>; | ||
34 | meson,channel-interrupts = <64 65 66 67 68 69 70 71>; | ||
35 | }; | ||
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt index 4c29cdab0ea5..c3e6092f3add 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt | |||
@@ -75,6 +75,10 @@ These nodes must have the following properties: | |||
75 | - reg: Specifies the base physical address and size of the ITS | 75 | - reg: Specifies the base physical address and size of the ITS |
76 | registers. | 76 | registers. |
77 | 77 | ||
78 | Optional: | ||
79 | - socionext,synquacer-pre-its: (u32, u32) tuple describing the untranslated | ||
80 | address and size of the pre-ITS window. | ||
81 | |||
78 | The main GIC node must contain the appropriate #address-cells, | 82 | The main GIC node must contain the appropriate #address-cells, |
79 | #size-cells and ranges properties for the reg property of all ITS | 83 | #size-cells and ranges properties for the reg property of all ITS |
80 | nodes. | 84 | nodes. |
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt index 448273a30a11..36df06c5c567 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt | |||
@@ -2,7 +2,8 @@ Broadcom Generic Level 2 Interrupt Controller | |||
2 | 2 | ||
3 | Required properties: | 3 | Required properties: |
4 | 4 | ||
5 | - compatible: should be "brcm,l2-intc" | 5 | - compatible: should be "brcm,l2-intc" for latched interrupt controllers |
6 | should be "brcm,bcm7271-l2-intc" for level interrupt controllers | ||
6 | - reg: specifies the base physical address and size of the registers | 7 | - reg: specifies the base physical address and size of the registers |
7 | - interrupt-controller: identifies the node as an interrupt controller | 8 | - interrupt-controller: identifies the node as an interrupt controller |
8 | - #interrupt-cells: specifies the number of cells needed to encode an | 9 | - #interrupt-cells: specifies the number of cells needed to encode an |
diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt index e3f052d8c11a..33c9a10fdc91 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt | |||
@@ -13,6 +13,9 @@ Required properties: | |||
13 | - "renesas,irqc-r8a7793" (R-Car M2-N) | 13 | - "renesas,irqc-r8a7793" (R-Car M2-N) |
14 | - "renesas,irqc-r8a7794" (R-Car E2) | 14 | - "renesas,irqc-r8a7794" (R-Car E2) |
15 | - "renesas,intc-ex-r8a7795" (R-Car H3) | 15 | - "renesas,intc-ex-r8a7795" (R-Car H3) |
16 | - "renesas,intc-ex-r8a7796" (R-Car M3-W) | ||
17 | - "renesas,intc-ex-r8a77970" (R-Car V3M) | ||
18 | - "renesas,intc-ex-r8a77995" (R-Car D3) | ||
16 | - #interrupt-cells: has to be <2>: an interrupt index and flags, as defined in | 19 | - #interrupt-cells: has to be <2>: an interrupt index and flags, as defined in |
17 | interrupts.txt in this directory | 20 | interrupts.txt in this directory |
18 | - clocks: Must contain a reference to the functional clock. | 21 | - clocks: Must contain a reference to the functional clock. |
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h index eee269321923..1070044f5c3f 100644 --- a/arch/arm/include/asm/arch_gicv3.h +++ b/arch/arm/include/asm/arch_gicv3.h | |||
@@ -196,6 +196,11 @@ static inline void gic_write_ctlr(u32 val) | |||
196 | isb(); | 196 | isb(); |
197 | } | 197 | } |
198 | 198 | ||
199 | static inline u32 gic_read_ctlr(void) | ||
200 | { | ||
201 | return read_sysreg(ICC_CTLR); | ||
202 | } | ||
203 | |||
199 | static inline void gic_write_grpen1(u32 val) | 204 | static inline void gic_write_grpen1(u32 val) |
200 | { | 205 | { |
201 | write_sysreg(val, ICC_IGRPEN1); | 206 | write_sysreg(val, ICC_IGRPEN1); |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 0df64a6a56d4..22455e4168c1 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -539,6 +539,25 @@ config QCOM_QDF2400_ERRATUM_0065 | |||
539 | 539 | ||
540 | If unsure, say Y. | 540 | If unsure, say Y. |
541 | 541 | ||
542 | |||
543 | config SOCIONEXT_SYNQUACER_PREITS | ||
544 | bool "Socionext Synquacer: Workaround for GICv3 pre-ITS" | ||
545 | default y | ||
546 | help | ||
547 | Socionext Synquacer SoCs implement a separate h/w block to generate | ||
548 | MSI doorbell writes with non-zero values for the device ID. | ||
549 | |||
550 | If unsure, say Y. | ||
551 | |||
552 | config HISILICON_ERRATUM_161600802 | ||
553 | bool "Hip07 161600802: Erroneous redistributor VLPI base" | ||
554 | default y | ||
555 | help | ||
556 | The HiSilicon Hip07 SoC usees the wrong redistributor base | ||
557 | when issued ITS commands such as VMOVP and VMAPP, and requires | ||
558 | a 128kB offset to be applied to the target address in this commands. | ||
559 | |||
560 | If unsure, say Y. | ||
542 | endmenu | 561 | endmenu |
543 | 562 | ||
544 | 563 | ||
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index b7e3f74822da..9becba9ab392 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h | |||
@@ -87,6 +87,11 @@ static inline void gic_write_ctlr(u32 val) | |||
87 | isb(); | 87 | isb(); |
88 | } | 88 | } |
89 | 89 | ||
90 | static inline u32 gic_read_ctlr(void) | ||
91 | { | ||
92 | return read_sysreg_s(SYS_ICC_CTLR_EL1); | ||
93 | } | ||
94 | |||
90 | static inline void gic_write_grpen1(u32 val) | 95 | static inline void gic_write_grpen1(u32 val) |
91 | { | 96 | { |
92 | write_sysreg_s(val, SYS_ICC_IGRPEN1_EL1); | 97 | write_sysreg_s(val, SYS_ICC_IGRPEN1_EL1); |
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 77df38ed0050..1cb39405d7f8 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig | |||
@@ -324,4 +324,12 @@ config IRQ_UNIPHIER_AIDET | |||
324 | help | 324 | help |
325 | Support for the UniPhier AIDET (ARM Interrupt Detector). | 325 | Support for the UniPhier AIDET (ARM Interrupt Detector). |
326 | 326 | ||
327 | config MESON_IRQ_GPIO | ||
328 | bool "Meson GPIO Interrupt Multiplexer" | ||
329 | depends on ARCH_MESON || COMPILE_TEST | ||
330 | select IRQ_DOMAIN | ||
331 | select IRQ_DOMAIN_HIERARCHY | ||
332 | help | ||
333 | Support Meson SoC Family GPIO Interrupt Multiplexer | ||
334 | |||
327 | endmenu | 335 | endmenu |
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 845abc107ad5..065adf4102c9 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile | |||
@@ -79,3 +79,4 @@ obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o | |||
79 | obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o | 79 | obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o |
80 | obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o | 80 | obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o |
81 | obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o | 81 | obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o |
82 | obj-$(CONFIG_MESON_IRQ_GPIO) += irq-meson-gpio.o | ||
diff --git a/drivers/irqchip/irq-aspeed-i2c-ic.c b/drivers/irqchip/irq-aspeed-i2c-ic.c index 815b88dd18f2..f20200af0992 100644 --- a/drivers/irqchip/irq-aspeed-i2c-ic.c +++ b/drivers/irqchip/irq-aspeed-i2c-ic.c | |||
@@ -76,8 +76,8 @@ static int __init aspeed_i2c_ic_of_init(struct device_node *node, | |||
76 | return -ENOMEM; | 76 | return -ENOMEM; |
77 | 77 | ||
78 | i2c_ic->base = of_iomap(node, 0); | 78 | i2c_ic->base = of_iomap(node, 0); |
79 | if (IS_ERR(i2c_ic->base)) { | 79 | if (!i2c_ic->base) { |
80 | ret = PTR_ERR(i2c_ic->base); | 80 | ret = -ENOMEM; |
81 | goto err_free_ic; | 81 | goto err_free_ic; |
82 | } | 82 | } |
83 | 83 | ||
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c index b009b916a292..691d20eb0bec 100644 --- a/drivers/irqchip/irq-brcmstb-l2.c +++ b/drivers/irqchip/irq-brcmstb-l2.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Generic Broadcom Set Top Box Level 2 Interrupt controller driver | 2 | * Generic Broadcom Set Top Box Level 2 Interrupt controller driver |
3 | * | 3 | * |
4 | * Copyright (C) 2014 Broadcom Corporation | 4 | * Copyright (C) 2014-2017 Broadcom |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
@@ -31,35 +31,82 @@ | |||
31 | #include <linux/irqchip.h> | 31 | #include <linux/irqchip.h> |
32 | #include <linux/irqchip/chained_irq.h> | 32 | #include <linux/irqchip/chained_irq.h> |
33 | 33 | ||
34 | /* Register offsets in the L2 interrupt controller */ | 34 | struct brcmstb_intc_init_params { |
35 | #define CPU_STATUS 0x00 | 35 | irq_flow_handler_t handler; |
36 | #define CPU_SET 0x04 | 36 | int cpu_status; |
37 | #define CPU_CLEAR 0x08 | 37 | int cpu_clear; |
38 | #define CPU_MASK_STATUS 0x0c | 38 | int cpu_mask_status; |
39 | #define CPU_MASK_SET 0x10 | 39 | int cpu_mask_set; |
40 | #define CPU_MASK_CLEAR 0x14 | 40 | int cpu_mask_clear; |
41 | }; | ||
42 | |||
43 | /* Register offsets in the L2 latched interrupt controller */ | ||
44 | static const struct brcmstb_intc_init_params l2_edge_intc_init = { | ||
45 | .handler = handle_edge_irq, | ||
46 | .cpu_status = 0x00, | ||
47 | .cpu_clear = 0x08, | ||
48 | .cpu_mask_status = 0x0c, | ||
49 | .cpu_mask_set = 0x10, | ||
50 | .cpu_mask_clear = 0x14 | ||
51 | }; | ||
52 | |||
53 | /* Register offsets in the L2 level interrupt controller */ | ||
54 | static const struct brcmstb_intc_init_params l2_lvl_intc_init = { | ||
55 | .handler = handle_level_irq, | ||
56 | .cpu_status = 0x00, | ||
57 | .cpu_clear = -1, /* Register not present */ | ||
58 | .cpu_mask_status = 0x04, | ||
59 | .cpu_mask_set = 0x08, | ||
60 | .cpu_mask_clear = 0x0C | ||
61 | }; | ||
41 | 62 | ||
42 | /* L2 intc private data structure */ | 63 | /* L2 intc private data structure */ |
43 | struct brcmstb_l2_intc_data { | 64 | struct brcmstb_l2_intc_data { |
44 | int parent_irq; | ||
45 | void __iomem *base; | ||
46 | struct irq_domain *domain; | 65 | struct irq_domain *domain; |
66 | struct irq_chip_generic *gc; | ||
67 | int status_offset; | ||
68 | int mask_offset; | ||
47 | bool can_wake; | 69 | bool can_wake; |
48 | u32 saved_mask; /* for suspend/resume */ | 70 | u32 saved_mask; /* for suspend/resume */ |
49 | }; | 71 | }; |
50 | 72 | ||
73 | /** | ||
74 | * brcmstb_l2_mask_and_ack - Mask and ack pending interrupt | ||
75 | * @d: irq_data | ||
76 | * | ||
77 | * Chip has separate enable/disable registers instead of a single mask | ||
78 | * register and pending interrupt is acknowledged by setting a bit. | ||
79 | * | ||
80 | * Note: This function is generic and could easily be added to the | ||
81 | * generic irqchip implementation if there ever becomes a will to do so. | ||
82 | * Perhaps with a name like irq_gc_mask_disable_and_ack_set(). | ||
83 | * | ||
84 | * e.g.: https://patchwork.kernel.org/patch/9831047/ | ||
85 | */ | ||
86 | static void brcmstb_l2_mask_and_ack(struct irq_data *d) | ||
87 | { | ||
88 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | ||
89 | struct irq_chip_type *ct = irq_data_get_chip_type(d); | ||
90 | u32 mask = d->mask; | ||
91 | |||
92 | irq_gc_lock(gc); | ||
93 | irq_reg_writel(gc, mask, ct->regs.disable); | ||
94 | *ct->mask_cache &= ~mask; | ||
95 | irq_reg_writel(gc, mask, ct->regs.ack); | ||
96 | irq_gc_unlock(gc); | ||
97 | } | ||
98 | |||
51 | static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc) | 99 | static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc) |
52 | { | 100 | { |
53 | struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc); | 101 | struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc); |
54 | struct irq_chip_generic *gc = irq_get_domain_generic_chip(b->domain, 0); | ||
55 | struct irq_chip *chip = irq_desc_get_chip(desc); | 102 | struct irq_chip *chip = irq_desc_get_chip(desc); |
56 | unsigned int irq; | 103 | unsigned int irq; |
57 | u32 status; | 104 | u32 status; |
58 | 105 | ||
59 | chained_irq_enter(chip, desc); | 106 | chained_irq_enter(chip, desc); |
60 | 107 | ||
61 | status = irq_reg_readl(gc, CPU_STATUS) & | 108 | status = irq_reg_readl(b->gc, b->status_offset) & |
62 | ~(irq_reg_readl(gc, CPU_MASK_STATUS)); | 109 | ~(irq_reg_readl(b->gc, b->mask_offset)); |
63 | 110 | ||
64 | if (status == 0) { | 111 | if (status == 0) { |
65 | raw_spin_lock(&desc->lock); | 112 | raw_spin_lock(&desc->lock); |
@@ -70,10 +117,8 @@ static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc) | |||
70 | 117 | ||
71 | do { | 118 | do { |
72 | irq = ffs(status) - 1; | 119 | irq = ffs(status) - 1; |
73 | /* ack at our level */ | ||
74 | irq_reg_writel(gc, 1 << irq, CPU_CLEAR); | ||
75 | status &= ~(1 << irq); | 120 | status &= ~(1 << irq); |
76 | generic_handle_irq(irq_find_mapping(b->domain, irq)); | 121 | generic_handle_irq(irq_linear_revmap(b->domain, irq)); |
77 | } while (status); | 122 | } while (status); |
78 | out: | 123 | out: |
79 | chained_irq_exit(chip, desc); | 124 | chained_irq_exit(chip, desc); |
@@ -82,16 +127,17 @@ out: | |||
82 | static void brcmstb_l2_intc_suspend(struct irq_data *d) | 127 | static void brcmstb_l2_intc_suspend(struct irq_data *d) |
83 | { | 128 | { |
84 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | 129 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
130 | struct irq_chip_type *ct = irq_data_get_chip_type(d); | ||
85 | struct brcmstb_l2_intc_data *b = gc->private; | 131 | struct brcmstb_l2_intc_data *b = gc->private; |
86 | 132 | ||
87 | irq_gc_lock(gc); | 133 | irq_gc_lock(gc); |
88 | /* Save the current mask */ | 134 | /* Save the current mask */ |
89 | b->saved_mask = irq_reg_readl(gc, CPU_MASK_STATUS); | 135 | b->saved_mask = irq_reg_readl(gc, ct->regs.mask); |
90 | 136 | ||
91 | if (b->can_wake) { | 137 | if (b->can_wake) { |
92 | /* Program the wakeup mask */ | 138 | /* Program the wakeup mask */ |
93 | irq_reg_writel(gc, ~gc->wake_active, CPU_MASK_SET); | 139 | irq_reg_writel(gc, ~gc->wake_active, ct->regs.disable); |
94 | irq_reg_writel(gc, gc->wake_active, CPU_MASK_CLEAR); | 140 | irq_reg_writel(gc, gc->wake_active, ct->regs.enable); |
95 | } | 141 | } |
96 | irq_gc_unlock(gc); | 142 | irq_gc_unlock(gc); |
97 | } | 143 | } |
@@ -99,49 +145,56 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d) | |||
99 | static void brcmstb_l2_intc_resume(struct irq_data *d) | 145 | static void brcmstb_l2_intc_resume(struct irq_data *d) |
100 | { | 146 | { |
101 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | 147 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
148 | struct irq_chip_type *ct = irq_data_get_chip_type(d); | ||
102 | struct brcmstb_l2_intc_data *b = gc->private; | 149 | struct brcmstb_l2_intc_data *b = gc->private; |
103 | 150 | ||
104 | irq_gc_lock(gc); | 151 | irq_gc_lock(gc); |
105 | /* Clear unmasked non-wakeup interrupts */ | 152 | if (ct->chip.irq_ack) { |
106 | irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active, CPU_CLEAR); | 153 | /* Clear unmasked non-wakeup interrupts */ |
154 | irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active, | ||
155 | ct->regs.ack); | ||
156 | } | ||
107 | 157 | ||
108 | /* Restore the saved mask */ | 158 | /* Restore the saved mask */ |
109 | irq_reg_writel(gc, b->saved_mask, CPU_MASK_SET); | 159 | irq_reg_writel(gc, b->saved_mask, ct->regs.disable); |
110 | irq_reg_writel(gc, ~b->saved_mask, CPU_MASK_CLEAR); | 160 | irq_reg_writel(gc, ~b->saved_mask, ct->regs.enable); |
111 | irq_gc_unlock(gc); | 161 | irq_gc_unlock(gc); |
112 | } | 162 | } |
113 | 163 | ||
114 | static int __init brcmstb_l2_intc_of_init(struct device_node *np, | 164 | static int __init brcmstb_l2_intc_of_init(struct device_node *np, |
115 | struct device_node *parent) | 165 | struct device_node *parent, |
166 | const struct brcmstb_intc_init_params | ||
167 | *init_params) | ||
116 | { | 168 | { |
117 | unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; | 169 | unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; |
118 | struct brcmstb_l2_intc_data *data; | 170 | struct brcmstb_l2_intc_data *data; |
119 | struct irq_chip_generic *gc; | ||
120 | struct irq_chip_type *ct; | 171 | struct irq_chip_type *ct; |
121 | int ret; | 172 | int ret; |
122 | unsigned int flags; | 173 | unsigned int flags; |
174 | int parent_irq; | ||
175 | void __iomem *base; | ||
123 | 176 | ||
124 | data = kzalloc(sizeof(*data), GFP_KERNEL); | 177 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
125 | if (!data) | 178 | if (!data) |
126 | return -ENOMEM; | 179 | return -ENOMEM; |
127 | 180 | ||
128 | data->base = of_iomap(np, 0); | 181 | base = of_iomap(np, 0); |
129 | if (!data->base) { | 182 | if (!base) { |
130 | pr_err("failed to remap intc L2 registers\n"); | 183 | pr_err("failed to remap intc L2 registers\n"); |
131 | ret = -ENOMEM; | 184 | ret = -ENOMEM; |
132 | goto out_free; | 185 | goto out_free; |
133 | } | 186 | } |
134 | 187 | ||
135 | /* Disable all interrupts by default */ | 188 | /* Disable all interrupts by default */ |
136 | writel(0xffffffff, data->base + CPU_MASK_SET); | 189 | writel(0xffffffff, base + init_params->cpu_mask_set); |
137 | 190 | ||
138 | /* Wakeup interrupts may be retained from S5 (cold boot) */ | 191 | /* Wakeup interrupts may be retained from S5 (cold boot) */ |
139 | data->can_wake = of_property_read_bool(np, "brcm,irq-can-wake"); | 192 | data->can_wake = of_property_read_bool(np, "brcm,irq-can-wake"); |
140 | if (!data->can_wake) | 193 | if (!data->can_wake && (init_params->cpu_clear >= 0)) |
141 | writel(0xffffffff, data->base + CPU_CLEAR); | 194 | writel(0xffffffff, base + init_params->cpu_clear); |
142 | 195 | ||
143 | data->parent_irq = irq_of_parse_and_map(np, 0); | 196 | parent_irq = irq_of_parse_and_map(np, 0); |
144 | if (!data->parent_irq) { | 197 | if (!parent_irq) { |
145 | pr_err("failed to find parent interrupt\n"); | 198 | pr_err("failed to find parent interrupt\n"); |
146 | ret = -EINVAL; | 199 | ret = -EINVAL; |
147 | goto out_unmap; | 200 | goto out_unmap; |
@@ -163,29 +216,39 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np, | |||
163 | 216 | ||
164 | /* Allocate a single Generic IRQ chip for this node */ | 217 | /* Allocate a single Generic IRQ chip for this node */ |
165 | ret = irq_alloc_domain_generic_chips(data->domain, 32, 1, | 218 | ret = irq_alloc_domain_generic_chips(data->domain, 32, 1, |
166 | np->full_name, handle_edge_irq, clr, 0, flags); | 219 | np->full_name, init_params->handler, clr, 0, flags); |
167 | if (ret) { | 220 | if (ret) { |
168 | pr_err("failed to allocate generic irq chip\n"); | 221 | pr_err("failed to allocate generic irq chip\n"); |
169 | goto out_free_domain; | 222 | goto out_free_domain; |
170 | } | 223 | } |
171 | 224 | ||
172 | /* Set the IRQ chaining logic */ | 225 | /* Set the IRQ chaining logic */ |
173 | irq_set_chained_handler_and_data(data->parent_irq, | 226 | irq_set_chained_handler_and_data(parent_irq, |
174 | brcmstb_l2_intc_irq_handle, data); | 227 | brcmstb_l2_intc_irq_handle, data); |
175 | 228 | ||
176 | gc = irq_get_domain_generic_chip(data->domain, 0); | 229 | data->gc = irq_get_domain_generic_chip(data->domain, 0); |
177 | gc->reg_base = data->base; | 230 | data->gc->reg_base = base; |
178 | gc->private = data; | 231 | data->gc->private = data; |
179 | ct = gc->chip_types; | 232 | data->status_offset = init_params->cpu_status; |
180 | 233 | data->mask_offset = init_params->cpu_mask_status; | |
181 | ct->chip.irq_ack = irq_gc_ack_set_bit; | 234 | |
182 | ct->regs.ack = CPU_CLEAR; | 235 | ct = data->gc->chip_types; |
236 | |||
237 | if (init_params->cpu_clear >= 0) { | ||
238 | ct->regs.ack = init_params->cpu_clear; | ||
239 | ct->chip.irq_ack = irq_gc_ack_set_bit; | ||
240 | ct->chip.irq_mask_ack = brcmstb_l2_mask_and_ack; | ||
241 | } else { | ||
242 | /* No Ack - but still slightly more efficient to define this */ | ||
243 | ct->chip.irq_mask_ack = irq_gc_mask_disable_reg; | ||
244 | } | ||
183 | 245 | ||
184 | ct->chip.irq_mask = irq_gc_mask_disable_reg; | 246 | ct->chip.irq_mask = irq_gc_mask_disable_reg; |
185 | ct->regs.disable = CPU_MASK_SET; | 247 | ct->regs.disable = init_params->cpu_mask_set; |
248 | ct->regs.mask = init_params->cpu_mask_status; | ||
186 | 249 | ||
187 | ct->chip.irq_unmask = irq_gc_unmask_enable_reg; | 250 | ct->chip.irq_unmask = irq_gc_unmask_enable_reg; |
188 | ct->regs.enable = CPU_MASK_CLEAR; | 251 | ct->regs.enable = init_params->cpu_mask_clear; |
189 | 252 | ||
190 | ct->chip.irq_suspend = brcmstb_l2_intc_suspend; | 253 | ct->chip.irq_suspend = brcmstb_l2_intc_suspend; |
191 | ct->chip.irq_resume = brcmstb_l2_intc_resume; | 254 | ct->chip.irq_resume = brcmstb_l2_intc_resume; |
@@ -195,21 +258,35 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np, | |||
195 | /* This IRQ chip can wake the system, set all child interrupts | 258 | /* This IRQ chip can wake the system, set all child interrupts |
196 | * in wake_enabled mask | 259 | * in wake_enabled mask |
197 | */ | 260 | */ |
198 | gc->wake_enabled = 0xffffffff; | 261 | data->gc->wake_enabled = 0xffffffff; |
199 | ct->chip.irq_set_wake = irq_gc_set_wake; | 262 | ct->chip.irq_set_wake = irq_gc_set_wake; |
200 | } | 263 | } |
201 | 264 | ||
202 | pr_info("registered L2 intc (mem: 0x%p, parent irq: %d)\n", | 265 | pr_info("registered L2 intc (mem: 0x%p, parent irq: %d)\n", |
203 | data->base, data->parent_irq); | 266 | base, parent_irq); |
204 | 267 | ||
205 | return 0; | 268 | return 0; |
206 | 269 | ||
207 | out_free_domain: | 270 | out_free_domain: |
208 | irq_domain_remove(data->domain); | 271 | irq_domain_remove(data->domain); |
209 | out_unmap: | 272 | out_unmap: |
210 | iounmap(data->base); | 273 | iounmap(base); |
211 | out_free: | 274 | out_free: |
212 | kfree(data); | 275 | kfree(data); |
213 | return ret; | 276 | return ret; |
214 | } | 277 | } |
215 | IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_intc_of_init); | 278 | |
279 | int __init brcmstb_l2_edge_intc_of_init(struct device_node *np, | ||
280 | struct device_node *parent) | ||
281 | { | ||
282 | return brcmstb_l2_intc_of_init(np, parent, &l2_edge_intc_init); | ||
283 | } | ||
284 | IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_edge_intc_of_init); | ||
285 | |||
286 | int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np, | ||
287 | struct device_node *parent) | ||
288 | { | ||
289 | return brcmstb_l2_intc_of_init(np, parent, &l2_lvl_intc_init); | ||
290 | } | ||
291 | IRQCHIP_DECLARE(bcm7271_l2_intc, "brcm,bcm7271-l2-intc", | ||
292 | brcmstb_l2_lvl_intc_of_init); | ||
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c index 9ae71804b5dd..30017df5b54c 100644 --- a/drivers/irqchip/irq-gic-common.c +++ b/drivers/irqchip/irq-gic-common.c | |||
@@ -40,8 +40,9 @@ void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks, | |||
40 | for (; quirks->desc; quirks++) { | 40 | for (; quirks->desc; quirks++) { |
41 | if (quirks->iidr != (quirks->mask & iidr)) | 41 | if (quirks->iidr != (quirks->mask & iidr)) |
42 | continue; | 42 | continue; |
43 | quirks->init(data); | 43 | if (quirks->init(data)) |
44 | pr_info("GIC: enabling workaround for %s\n", quirks->desc); | 44 | pr_info("GIC: enabling workaround for %s\n", |
45 | quirks->desc); | ||
45 | } | 46 | } |
46 | } | 47 | } |
47 | 48 | ||
diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h index 205e5fddf6da..3919cd7c5285 100644 --- a/drivers/irqchip/irq-gic-common.h +++ b/drivers/irqchip/irq-gic-common.h | |||
@@ -23,7 +23,7 @@ | |||
23 | 23 | ||
24 | struct gic_quirk { | 24 | struct gic_quirk { |
25 | const char *desc; | 25 | const char *desc; |
26 | void (*init)(void *data); | 26 | bool (*init)(void *data); |
27 | u32 iidr; | 27 | u32 iidr; |
28 | u32 mask; | 28 | u32 mask; |
29 | }; | 29 | }; |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 20e2b5fac7b9..6a74f0497f82 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
@@ -83,6 +83,8 @@ struct its_baser { | |||
83 | u32 psz; | 83 | u32 psz; |
84 | }; | 84 | }; |
85 | 85 | ||
86 | struct its_device; | ||
87 | |||
86 | /* | 88 | /* |
87 | * The ITS structure - contains most of the infrastructure, with the | 89 | * The ITS structure - contains most of the infrastructure, with the |
88 | * top-level MSI domain, the command queue, the collections, and the | 90 | * top-level MSI domain, the command queue, the collections, and the |
@@ -97,12 +99,18 @@ struct its_node { | |||
97 | struct its_cmd_block *cmd_write; | 99 | struct its_cmd_block *cmd_write; |
98 | struct its_baser tables[GITS_BASER_NR_REGS]; | 100 | struct its_baser tables[GITS_BASER_NR_REGS]; |
99 | struct its_collection *collections; | 101 | struct its_collection *collections; |
102 | struct fwnode_handle *fwnode_handle; | ||
103 | u64 (*get_msi_base)(struct its_device *its_dev); | ||
100 | struct list_head its_device_list; | 104 | struct list_head its_device_list; |
101 | u64 flags; | 105 | u64 flags; |
106 | unsigned long list_nr; | ||
102 | u32 ite_size; | 107 | u32 ite_size; |
103 | u32 device_ids; | 108 | u32 device_ids; |
104 | int numa_node; | 109 | int numa_node; |
110 | unsigned int msi_domain_flags; | ||
111 | u32 pre_its_base; /* for Socionext Synquacer */ | ||
105 | bool is_v4; | 112 | bool is_v4; |
113 | int vlpi_redist_offset; | ||
106 | }; | 114 | }; |
107 | 115 | ||
108 | #define ITS_ITT_ALIGN SZ_256 | 116 | #define ITS_ITT_ALIGN SZ_256 |
@@ -148,12 +156,6 @@ static DEFINE_SPINLOCK(its_lock); | |||
148 | static struct rdists *gic_rdists; | 156 | static struct rdists *gic_rdists; |
149 | static struct irq_domain *its_parent; | 157 | static struct irq_domain *its_parent; |
150 | 158 | ||
151 | /* | ||
152 | * We have a maximum number of 16 ITSs in the whole system if we're | ||
153 | * using the ITSList mechanism | ||
154 | */ | ||
155 | #define ITS_LIST_MAX 16 | ||
156 | |||
157 | static unsigned long its_list_map; | 159 | static unsigned long its_list_map; |
158 | static u16 vmovp_seq_num; | 160 | static u16 vmovp_seq_num; |
159 | static DEFINE_RAW_SPINLOCK(vmovp_lock); | 161 | static DEFINE_RAW_SPINLOCK(vmovp_lock); |
@@ -268,10 +270,12 @@ struct its_cmd_block { | |||
268 | #define ITS_CMD_QUEUE_SZ SZ_64K | 270 | #define ITS_CMD_QUEUE_SZ SZ_64K |
269 | #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block)) | 271 | #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block)) |
270 | 272 | ||
271 | typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *, | 273 | typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *, |
274 | struct its_cmd_block *, | ||
272 | struct its_cmd_desc *); | 275 | struct its_cmd_desc *); |
273 | 276 | ||
274 | typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_cmd_block *, | 277 | typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *, |
278 | struct its_cmd_block *, | ||
275 | struct its_cmd_desc *); | 279 | struct its_cmd_desc *); |
276 | 280 | ||
277 | static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l) | 281 | static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l) |
@@ -375,7 +379,8 @@ static inline void its_fixup_cmd(struct its_cmd_block *cmd) | |||
375 | cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]); | 379 | cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]); |
376 | } | 380 | } |
377 | 381 | ||
378 | static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd, | 382 | static struct its_collection *its_build_mapd_cmd(struct its_node *its, |
383 | struct its_cmd_block *cmd, | ||
379 | struct its_cmd_desc *desc) | 384 | struct its_cmd_desc *desc) |
380 | { | 385 | { |
381 | unsigned long itt_addr; | 386 | unsigned long itt_addr; |
@@ -395,7 +400,8 @@ static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd, | |||
395 | return NULL; | 400 | return NULL; |
396 | } | 401 | } |
397 | 402 | ||
398 | static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd, | 403 | static struct its_collection *its_build_mapc_cmd(struct its_node *its, |
404 | struct its_cmd_block *cmd, | ||
399 | struct its_cmd_desc *desc) | 405 | struct its_cmd_desc *desc) |
400 | { | 406 | { |
401 | its_encode_cmd(cmd, GITS_CMD_MAPC); | 407 | its_encode_cmd(cmd, GITS_CMD_MAPC); |
@@ -408,7 +414,8 @@ static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd, | |||
408 | return desc->its_mapc_cmd.col; | 414 | return desc->its_mapc_cmd.col; |
409 | } | 415 | } |
410 | 416 | ||
411 | static struct its_collection *its_build_mapti_cmd(struct its_cmd_block *cmd, | 417 | static struct its_collection *its_build_mapti_cmd(struct its_node *its, |
418 | struct its_cmd_block *cmd, | ||
412 | struct its_cmd_desc *desc) | 419 | struct its_cmd_desc *desc) |
413 | { | 420 | { |
414 | struct its_collection *col; | 421 | struct its_collection *col; |
@@ -427,7 +434,8 @@ static struct its_collection *its_build_mapti_cmd(struct its_cmd_block *cmd, | |||
427 | return col; | 434 | return col; |
428 | } | 435 | } |
429 | 436 | ||
430 | static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd, | 437 | static struct its_collection *its_build_movi_cmd(struct its_node *its, |
438 | struct its_cmd_block *cmd, | ||
431 | struct its_cmd_desc *desc) | 439 | struct its_cmd_desc *desc) |
432 | { | 440 | { |
433 | struct its_collection *col; | 441 | struct its_collection *col; |
@@ -445,7 +453,8 @@ static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd, | |||
445 | return col; | 453 | return col; |
446 | } | 454 | } |
447 | 455 | ||
448 | static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd, | 456 | static struct its_collection *its_build_discard_cmd(struct its_node *its, |
457 | struct its_cmd_block *cmd, | ||
449 | struct its_cmd_desc *desc) | 458 | struct its_cmd_desc *desc) |
450 | { | 459 | { |
451 | struct its_collection *col; | 460 | struct its_collection *col; |
@@ -462,7 +471,8 @@ static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd, | |||
462 | return col; | 471 | return col; |
463 | } | 472 | } |
464 | 473 | ||
465 | static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd, | 474 | static struct its_collection *its_build_inv_cmd(struct its_node *its, |
475 | struct its_cmd_block *cmd, | ||
466 | struct its_cmd_desc *desc) | 476 | struct its_cmd_desc *desc) |
467 | { | 477 | { |
468 | struct its_collection *col; | 478 | struct its_collection *col; |
@@ -479,7 +489,8 @@ static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd, | |||
479 | return col; | 489 | return col; |
480 | } | 490 | } |
481 | 491 | ||
482 | static struct its_collection *its_build_int_cmd(struct its_cmd_block *cmd, | 492 | static struct its_collection *its_build_int_cmd(struct its_node *its, |
493 | struct its_cmd_block *cmd, | ||
483 | struct its_cmd_desc *desc) | 494 | struct its_cmd_desc *desc) |
484 | { | 495 | { |
485 | struct its_collection *col; | 496 | struct its_collection *col; |
@@ -496,7 +507,8 @@ static struct its_collection *its_build_int_cmd(struct its_cmd_block *cmd, | |||
496 | return col; | 507 | return col; |
497 | } | 508 | } |
498 | 509 | ||
499 | static struct its_collection *its_build_clear_cmd(struct its_cmd_block *cmd, | 510 | static struct its_collection *its_build_clear_cmd(struct its_node *its, |
511 | struct its_cmd_block *cmd, | ||
500 | struct its_cmd_desc *desc) | 512 | struct its_cmd_desc *desc) |
501 | { | 513 | { |
502 | struct its_collection *col; | 514 | struct its_collection *col; |
@@ -513,7 +525,8 @@ static struct its_collection *its_build_clear_cmd(struct its_cmd_block *cmd, | |||
513 | return col; | 525 | return col; |
514 | } | 526 | } |
515 | 527 | ||
516 | static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd, | 528 | static struct its_collection *its_build_invall_cmd(struct its_node *its, |
529 | struct its_cmd_block *cmd, | ||
517 | struct its_cmd_desc *desc) | 530 | struct its_cmd_desc *desc) |
518 | { | 531 | { |
519 | its_encode_cmd(cmd, GITS_CMD_INVALL); | 532 | its_encode_cmd(cmd, GITS_CMD_INVALL); |
@@ -524,7 +537,8 @@ static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd, | |||
524 | return NULL; | 537 | return NULL; |
525 | } | 538 | } |
526 | 539 | ||
527 | static struct its_vpe *its_build_vinvall_cmd(struct its_cmd_block *cmd, | 540 | static struct its_vpe *its_build_vinvall_cmd(struct its_node *its, |
541 | struct its_cmd_block *cmd, | ||
528 | struct its_cmd_desc *desc) | 542 | struct its_cmd_desc *desc) |
529 | { | 543 | { |
530 | its_encode_cmd(cmd, GITS_CMD_VINVALL); | 544 | its_encode_cmd(cmd, GITS_CMD_VINVALL); |
@@ -535,17 +549,20 @@ static struct its_vpe *its_build_vinvall_cmd(struct its_cmd_block *cmd, | |||
535 | return desc->its_vinvall_cmd.vpe; | 549 | return desc->its_vinvall_cmd.vpe; |
536 | } | 550 | } |
537 | 551 | ||
538 | static struct its_vpe *its_build_vmapp_cmd(struct its_cmd_block *cmd, | 552 | static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, |
553 | struct its_cmd_block *cmd, | ||
539 | struct its_cmd_desc *desc) | 554 | struct its_cmd_desc *desc) |
540 | { | 555 | { |
541 | unsigned long vpt_addr; | 556 | unsigned long vpt_addr; |
557 | u64 target; | ||
542 | 558 | ||
543 | vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); | 559 | vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); |
560 | target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset; | ||
544 | 561 | ||
545 | its_encode_cmd(cmd, GITS_CMD_VMAPP); | 562 | its_encode_cmd(cmd, GITS_CMD_VMAPP); |
546 | its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id); | 563 | its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id); |
547 | its_encode_valid(cmd, desc->its_vmapp_cmd.valid); | 564 | its_encode_valid(cmd, desc->its_vmapp_cmd.valid); |
548 | its_encode_target(cmd, desc->its_vmapp_cmd.col->target_address); | 565 | its_encode_target(cmd, target); |
549 | its_encode_vpt_addr(cmd, vpt_addr); | 566 | its_encode_vpt_addr(cmd, vpt_addr); |
550 | its_encode_vpt_size(cmd, LPI_NRBITS - 1); | 567 | its_encode_vpt_size(cmd, LPI_NRBITS - 1); |
551 | 568 | ||
@@ -554,7 +571,8 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_cmd_block *cmd, | |||
554 | return desc->its_vmapp_cmd.vpe; | 571 | return desc->its_vmapp_cmd.vpe; |
555 | } | 572 | } |
556 | 573 | ||
557 | static struct its_vpe *its_build_vmapti_cmd(struct its_cmd_block *cmd, | 574 | static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, |
575 | struct its_cmd_block *cmd, | ||
558 | struct its_cmd_desc *desc) | 576 | struct its_cmd_desc *desc) |
559 | { | 577 | { |
560 | u32 db; | 578 | u32 db; |
@@ -576,7 +594,8 @@ static struct its_vpe *its_build_vmapti_cmd(struct its_cmd_block *cmd, | |||
576 | return desc->its_vmapti_cmd.vpe; | 594 | return desc->its_vmapti_cmd.vpe; |
577 | } | 595 | } |
578 | 596 | ||
579 | static struct its_vpe *its_build_vmovi_cmd(struct its_cmd_block *cmd, | 597 | static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, |
598 | struct its_cmd_block *cmd, | ||
580 | struct its_cmd_desc *desc) | 599 | struct its_cmd_desc *desc) |
581 | { | 600 | { |
582 | u32 db; | 601 | u32 db; |
@@ -598,14 +617,18 @@ static struct its_vpe *its_build_vmovi_cmd(struct its_cmd_block *cmd, | |||
598 | return desc->its_vmovi_cmd.vpe; | 617 | return desc->its_vmovi_cmd.vpe; |
599 | } | 618 | } |
600 | 619 | ||
601 | static struct its_vpe *its_build_vmovp_cmd(struct its_cmd_block *cmd, | 620 | static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, |
621 | struct its_cmd_block *cmd, | ||
602 | struct its_cmd_desc *desc) | 622 | struct its_cmd_desc *desc) |
603 | { | 623 | { |
624 | u64 target; | ||
625 | |||
626 | target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset; | ||
604 | its_encode_cmd(cmd, GITS_CMD_VMOVP); | 627 | its_encode_cmd(cmd, GITS_CMD_VMOVP); |
605 | its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num); | 628 | its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num); |
606 | its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list); | 629 | its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list); |
607 | its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id); | 630 | its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id); |
608 | its_encode_target(cmd, desc->its_vmovp_cmd.col->target_address); | 631 | its_encode_target(cmd, target); |
609 | 632 | ||
610 | its_fixup_cmd(cmd); | 633 | its_fixup_cmd(cmd); |
611 | 634 | ||
@@ -684,9 +707,9 @@ static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) | |||
684 | dsb(ishst); | 707 | dsb(ishst); |
685 | } | 708 | } |
686 | 709 | ||
687 | static void its_wait_for_range_completion(struct its_node *its, | 710 | static int its_wait_for_range_completion(struct its_node *its, |
688 | struct its_cmd_block *from, | 711 | struct its_cmd_block *from, |
689 | struct its_cmd_block *to) | 712 | struct its_cmd_block *to) |
690 | { | 713 | { |
691 | u64 rd_idx, from_idx, to_idx; | 714 | u64 rd_idx, from_idx, to_idx; |
692 | u32 count = 1000000; /* 1s! */ | 715 | u32 count = 1000000; /* 1s! */ |
@@ -707,12 +730,15 @@ static void its_wait_for_range_completion(struct its_node *its, | |||
707 | 730 | ||
708 | count--; | 731 | count--; |
709 | if (!count) { | 732 | if (!count) { |
710 | pr_err_ratelimited("ITS queue timeout\n"); | 733 | pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n", |
711 | return; | 734 | from_idx, to_idx, rd_idx); |
735 | return -1; | ||
712 | } | 736 | } |
713 | cpu_relax(); | 737 | cpu_relax(); |
714 | udelay(1); | 738 | udelay(1); |
715 | } | 739 | } |
740 | |||
741 | return 0; | ||
716 | } | 742 | } |
717 | 743 | ||
718 | /* Warning, macro hell follows */ | 744 | /* Warning, macro hell follows */ |
@@ -732,7 +758,7 @@ void name(struct its_node *its, \ | |||
732 | raw_spin_unlock_irqrestore(&its->lock, flags); \ | 758 | raw_spin_unlock_irqrestore(&its->lock, flags); \ |
733 | return; \ | 759 | return; \ |
734 | } \ | 760 | } \ |
735 | sync_obj = builder(cmd, desc); \ | 761 | sync_obj = builder(its, cmd, desc); \ |
736 | its_flush_cmd(its, cmd); \ | 762 | its_flush_cmd(its, cmd); \ |
737 | \ | 763 | \ |
738 | if (sync_obj) { \ | 764 | if (sync_obj) { \ |
@@ -740,7 +766,7 @@ void name(struct its_node *its, \ | |||
740 | if (!sync_cmd) \ | 766 | if (!sync_cmd) \ |
741 | goto post; \ | 767 | goto post; \ |
742 | \ | 768 | \ |
743 | buildfn(sync_cmd, sync_obj); \ | 769 | buildfn(its, sync_cmd, sync_obj); \ |
744 | its_flush_cmd(its, sync_cmd); \ | 770 | its_flush_cmd(its, sync_cmd); \ |
745 | } \ | 771 | } \ |
746 | \ | 772 | \ |
@@ -748,10 +774,12 @@ post: \ | |||
748 | next_cmd = its_post_commands(its); \ | 774 | next_cmd = its_post_commands(its); \ |
749 | raw_spin_unlock_irqrestore(&its->lock, flags); \ | 775 | raw_spin_unlock_irqrestore(&its->lock, flags); \ |
750 | \ | 776 | \ |
751 | its_wait_for_range_completion(its, cmd, next_cmd); \ | 777 | if (its_wait_for_range_completion(its, cmd, next_cmd)) \ |
778 | pr_err_ratelimited("ITS cmd %ps failed\n", builder); \ | ||
752 | } | 779 | } |
753 | 780 | ||
754 | static void its_build_sync_cmd(struct its_cmd_block *sync_cmd, | 781 | static void its_build_sync_cmd(struct its_node *its, |
782 | struct its_cmd_block *sync_cmd, | ||
755 | struct its_collection *sync_col) | 783 | struct its_collection *sync_col) |
756 | { | 784 | { |
757 | its_encode_cmd(sync_cmd, GITS_CMD_SYNC); | 785 | its_encode_cmd(sync_cmd, GITS_CMD_SYNC); |
@@ -763,7 +791,8 @@ static void its_build_sync_cmd(struct its_cmd_block *sync_cmd, | |||
763 | static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t, | 791 | static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t, |
764 | struct its_collection, its_build_sync_cmd) | 792 | struct its_collection, its_build_sync_cmd) |
765 | 793 | ||
766 | static void its_build_vsync_cmd(struct its_cmd_block *sync_cmd, | 794 | static void its_build_vsync_cmd(struct its_node *its, |
795 | struct its_cmd_block *sync_cmd, | ||
767 | struct its_vpe *sync_vpe) | 796 | struct its_vpe *sync_vpe) |
768 | { | 797 | { |
769 | its_encode_cmd(sync_cmd, GITS_CMD_VSYNC); | 798 | its_encode_cmd(sync_cmd, GITS_CMD_VSYNC); |
@@ -895,21 +924,16 @@ static void its_send_vmovi(struct its_device *dev, u32 id) | |||
895 | its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc); | 924 | its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc); |
896 | } | 925 | } |
897 | 926 | ||
898 | static void its_send_vmapp(struct its_vpe *vpe, bool valid) | 927 | static void its_send_vmapp(struct its_node *its, |
928 | struct its_vpe *vpe, bool valid) | ||
899 | { | 929 | { |
900 | struct its_cmd_desc desc; | 930 | struct its_cmd_desc desc; |
901 | struct its_node *its; | ||
902 | 931 | ||
903 | desc.its_vmapp_cmd.vpe = vpe; | 932 | desc.its_vmapp_cmd.vpe = vpe; |
904 | desc.its_vmapp_cmd.valid = valid; | 933 | desc.its_vmapp_cmd.valid = valid; |
934 | desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx]; | ||
905 | 935 | ||
906 | list_for_each_entry(its, &its_nodes, entry) { | 936 | its_send_single_vcommand(its, its_build_vmapp_cmd, &desc); |
907 | if (!its->is_v4) | ||
908 | continue; | ||
909 | |||
910 | desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx]; | ||
911 | its_send_single_vcommand(its, its_build_vmapp_cmd, &desc); | ||
912 | } | ||
913 | } | 937 | } |
914 | 938 | ||
915 | static void its_send_vmovp(struct its_vpe *vpe) | 939 | static void its_send_vmovp(struct its_vpe *vpe) |
@@ -947,6 +971,9 @@ static void its_send_vmovp(struct its_vpe *vpe) | |||
947 | if (!its->is_v4) | 971 | if (!its->is_v4) |
948 | continue; | 972 | continue; |
949 | 973 | ||
974 | if (!vpe->its_vm->vlpi_count[its->list_nr]) | ||
975 | continue; | ||
976 | |||
950 | desc.its_vmovp_cmd.col = &its->collections[col_id]; | 977 | desc.its_vmovp_cmd.col = &its->collections[col_id]; |
951 | its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); | 978 | its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); |
952 | } | 979 | } |
@@ -954,18 +981,12 @@ static void its_send_vmovp(struct its_vpe *vpe) | |||
954 | raw_spin_unlock_irqrestore(&vmovp_lock, flags); | 981 | raw_spin_unlock_irqrestore(&vmovp_lock, flags); |
955 | } | 982 | } |
956 | 983 | ||
957 | static void its_send_vinvall(struct its_vpe *vpe) | 984 | static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe) |
958 | { | 985 | { |
959 | struct its_cmd_desc desc; | 986 | struct its_cmd_desc desc; |
960 | struct its_node *its; | ||
961 | 987 | ||
962 | desc.its_vinvall_cmd.vpe = vpe; | 988 | desc.its_vinvall_cmd.vpe = vpe; |
963 | 989 | its_send_single_vcommand(its, its_build_vinvall_cmd, &desc); | |
964 | list_for_each_entry(its, &its_nodes, entry) { | ||
965 | if (!its->is_v4) | ||
966 | continue; | ||
967 | its_send_single_vcommand(its, its_build_vinvall_cmd, &desc); | ||
968 | } | ||
969 | } | 990 | } |
970 | 991 | ||
971 | /* | 992 | /* |
@@ -1095,6 +1116,13 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | |||
1095 | return IRQ_SET_MASK_OK_DONE; | 1116 | return IRQ_SET_MASK_OK_DONE; |
1096 | } | 1117 | } |
1097 | 1118 | ||
1119 | static u64 its_irq_get_msi_base(struct its_device *its_dev) | ||
1120 | { | ||
1121 | struct its_node *its = its_dev->its; | ||
1122 | |||
1123 | return its->phys_base + GITS_TRANSLATER; | ||
1124 | } | ||
1125 | |||
1098 | static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) | 1126 | static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) |
1099 | { | 1127 | { |
1100 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | 1128 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
@@ -1102,7 +1130,7 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) | |||
1102 | u64 addr; | 1130 | u64 addr; |
1103 | 1131 | ||
1104 | its = its_dev->its; | 1132 | its = its_dev->its; |
1105 | addr = its->phys_base + GITS_TRANSLATER; | 1133 | addr = its->get_msi_base(its_dev); |
1106 | 1134 | ||
1107 | msg->address_lo = lower_32_bits(addr); | 1135 | msg->address_lo = lower_32_bits(addr); |
1108 | msg->address_hi = upper_32_bits(addr); | 1136 | msg->address_hi = upper_32_bits(addr); |
@@ -1129,6 +1157,60 @@ static int its_irq_set_irqchip_state(struct irq_data *d, | |||
1129 | return 0; | 1157 | return 0; |
1130 | } | 1158 | } |
1131 | 1159 | ||
1160 | static void its_map_vm(struct its_node *its, struct its_vm *vm) | ||
1161 | { | ||
1162 | unsigned long flags; | ||
1163 | |||
1164 | /* Not using the ITS list? Everything is always mapped. */ | ||
1165 | if (!its_list_map) | ||
1166 | return; | ||
1167 | |||
1168 | raw_spin_lock_irqsave(&vmovp_lock, flags); | ||
1169 | |||
1170 | /* | ||
1171 | * If the VM wasn't mapped yet, iterate over the vpes and get | ||
1172 | * them mapped now. | ||
1173 | */ | ||
1174 | vm->vlpi_count[its->list_nr]++; | ||
1175 | |||
1176 | if (vm->vlpi_count[its->list_nr] == 1) { | ||
1177 | int i; | ||
1178 | |||
1179 | for (i = 0; i < vm->nr_vpes; i++) { | ||
1180 | struct its_vpe *vpe = vm->vpes[i]; | ||
1181 | struct irq_data *d = irq_get_irq_data(vpe->irq); | ||
1182 | |||
1183 | /* Map the VPE to the first possible CPU */ | ||
1184 | vpe->col_idx = cpumask_first(cpu_online_mask); | ||
1185 | its_send_vmapp(its, vpe, true); | ||
1186 | its_send_vinvall(its, vpe); | ||
1187 | irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); | ||
1188 | } | ||
1189 | } | ||
1190 | |||
1191 | raw_spin_unlock_irqrestore(&vmovp_lock, flags); | ||
1192 | } | ||
1193 | |||
1194 | static void its_unmap_vm(struct its_node *its, struct its_vm *vm) | ||
1195 | { | ||
1196 | unsigned long flags; | ||
1197 | |||
1198 | /* Not using the ITS list? Everything is always mapped. */ | ||
1199 | if (!its_list_map) | ||
1200 | return; | ||
1201 | |||
1202 | raw_spin_lock_irqsave(&vmovp_lock, flags); | ||
1203 | |||
1204 | if (!--vm->vlpi_count[its->list_nr]) { | ||
1205 | int i; | ||
1206 | |||
1207 | for (i = 0; i < vm->nr_vpes; i++) | ||
1208 | its_send_vmapp(its, vm->vpes[i], false); | ||
1209 | } | ||
1210 | |||
1211 | raw_spin_unlock_irqrestore(&vmovp_lock, flags); | ||
1212 | } | ||
1213 | |||
1132 | static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) | 1214 | static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) |
1133 | { | 1215 | { |
1134 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | 1216 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
@@ -1164,6 +1246,9 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) | |||
1164 | /* Already mapped, move it around */ | 1246 | /* Already mapped, move it around */ |
1165 | its_send_vmovi(its_dev, event); | 1247 | its_send_vmovi(its_dev, event); |
1166 | } else { | 1248 | } else { |
1249 | /* Ensure all the VPEs are mapped on this ITS */ | ||
1250 | its_map_vm(its_dev->its, info->map->vm); | ||
1251 | |||
1167 | /* Drop the physical mapping */ | 1252 | /* Drop the physical mapping */ |
1168 | its_send_discard(its_dev, event); | 1253 | its_send_discard(its_dev, event); |
1169 | 1254 | ||
@@ -1225,6 +1310,9 @@ static int its_vlpi_unmap(struct irq_data *d) | |||
1225 | LPI_PROP_ENABLED | | 1310 | LPI_PROP_ENABLED | |
1226 | LPI_PROP_GROUP1)); | 1311 | LPI_PROP_GROUP1)); |
1227 | 1312 | ||
1313 | /* Potentially unmap the VM from this ITS */ | ||
1314 | its_unmap_vm(its_dev->its, its_dev->event_map.vm); | ||
1315 | |||
1228 | /* | 1316 | /* |
1229 | * Drop the refcount and make the device available again if | 1317 | * Drop the refcount and make the device available again if |
1230 | * this was the last VLPI. | 1318 | * this was the last VLPI. |
@@ -1650,23 +1738,14 @@ static void its_free_tables(struct its_node *its) | |||
1650 | 1738 | ||
1651 | static int its_alloc_tables(struct its_node *its) | 1739 | static int its_alloc_tables(struct its_node *its) |
1652 | { | 1740 | { |
1653 | u64 typer = gic_read_typer(its->base + GITS_TYPER); | ||
1654 | u32 ids = GITS_TYPER_DEVBITS(typer); | ||
1655 | u64 shr = GITS_BASER_InnerShareable; | 1741 | u64 shr = GITS_BASER_InnerShareable; |
1656 | u64 cache = GITS_BASER_RaWaWb; | 1742 | u64 cache = GITS_BASER_RaWaWb; |
1657 | u32 psz = SZ_64K; | 1743 | u32 psz = SZ_64K; |
1658 | int err, i; | 1744 | int err, i; |
1659 | 1745 | ||
1660 | if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) { | 1746 | if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) |
1661 | /* | 1747 | /* erratum 24313: ignore memory access type */ |
1662 | * erratum 22375: only alloc 8MB table size | 1748 | cache = GITS_BASER_nCnB; |
1663 | * erratum 24313: ignore memory access type | ||
1664 | */ | ||
1665 | cache = GITS_BASER_nCnB; | ||
1666 | ids = 0x14; /* 20 bits, 8MB */ | ||
1667 | } | ||
1668 | |||
1669 | its->device_ids = ids; | ||
1670 | 1749 | ||
1671 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { | 1750 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { |
1672 | struct its_baser *baser = its->tables + i; | 1751 | struct its_baser *baser = its->tables + i; |
@@ -2372,6 +2451,8 @@ static int its_vpe_set_affinity(struct irq_data *d, | |||
2372 | its_vpe_db_proxy_move(vpe, from, cpu); | 2451 | its_vpe_db_proxy_move(vpe, from, cpu); |
2373 | } | 2452 | } |
2374 | 2453 | ||
2454 | irq_data_update_effective_affinity(d, cpumask_of(cpu)); | ||
2455 | |||
2375 | return IRQ_SET_MASK_OK_DONE; | 2456 | return IRQ_SET_MASK_OK_DONE; |
2376 | } | 2457 | } |
2377 | 2458 | ||
@@ -2439,6 +2520,26 @@ static void its_vpe_deschedule(struct its_vpe *vpe) | |||
2439 | } | 2520 | } |
2440 | } | 2521 | } |
2441 | 2522 | ||
2523 | static void its_vpe_invall(struct its_vpe *vpe) | ||
2524 | { | ||
2525 | struct its_node *its; | ||
2526 | |||
2527 | list_for_each_entry(its, &its_nodes, entry) { | ||
2528 | if (!its->is_v4) | ||
2529 | continue; | ||
2530 | |||
2531 | if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr]) | ||
2532 | continue; | ||
2533 | |||
2534 | /* | ||
2535 | * Sending a VINVALL to a single ITS is enough, as all | ||
2536 | * we need is to reach the redistributors. | ||
2537 | */ | ||
2538 | its_send_vinvall(its, vpe); | ||
2539 | return; | ||
2540 | } | ||
2541 | } | ||
2542 | |||
2442 | static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) | 2543 | static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) |
2443 | { | 2544 | { |
2444 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); | 2545 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); |
@@ -2454,7 +2555,7 @@ static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) | |||
2454 | return 0; | 2555 | return 0; |
2455 | 2556 | ||
2456 | case INVALL_VPE: | 2557 | case INVALL_VPE: |
2457 | its_send_vinvall(vpe); | 2558 | its_vpe_invall(vpe); |
2458 | return 0; | 2559 | return 0; |
2459 | 2560 | ||
2460 | default: | 2561 | default: |
@@ -2683,11 +2784,25 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain, | |||
2683 | struct irq_data *d, bool early) | 2784 | struct irq_data *d, bool early) |
2684 | { | 2785 | { |
2685 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); | 2786 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); |
2787 | struct its_node *its; | ||
2788 | |||
2789 | /* If we use the list map, we issue VMAPP on demand... */ | ||
2790 | if (its_list_map) | ||
2791 | return true; | ||
2686 | 2792 | ||
2687 | /* Map the VPE to the first possible CPU */ | 2793 | /* Map the VPE to the first possible CPU */ |
2688 | vpe->col_idx = cpumask_first(cpu_online_mask); | 2794 | vpe->col_idx = cpumask_first(cpu_online_mask); |
2689 | its_send_vmapp(vpe, true); | 2795 | |
2690 | its_send_vinvall(vpe); | 2796 | list_for_each_entry(its, &its_nodes, entry) { |
2797 | if (!its->is_v4) | ||
2798 | continue; | ||
2799 | |||
2800 | its_send_vmapp(its, vpe, true); | ||
2801 | its_send_vinvall(its, vpe); | ||
2802 | } | ||
2803 | |||
2804 | irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); | ||
2805 | |||
2691 | return 0; | 2806 | return 0; |
2692 | } | 2807 | } |
2693 | 2808 | ||
@@ -2695,8 +2810,21 @@ static void its_vpe_irq_domain_deactivate(struct irq_domain *domain, | |||
2695 | struct irq_data *d) | 2810 | struct irq_data *d) |
2696 | { | 2811 | { |
2697 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); | 2812 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); |
2813 | struct its_node *its; | ||
2698 | 2814 | ||
2699 | its_send_vmapp(vpe, false); | 2815 | /* |
2816 | * If we use the list map, we unmap the VPE once no VLPIs are | ||
2817 | * associated with the VM. | ||
2818 | */ | ||
2819 | if (its_list_map) | ||
2820 | return; | ||
2821 | |||
2822 | list_for_each_entry(its, &its_nodes, entry) { | ||
2823 | if (!its->is_v4) | ||
2824 | continue; | ||
2825 | |||
2826 | its_send_vmapp(its, vpe, false); | ||
2827 | } | ||
2700 | } | 2828 | } |
2701 | 2829 | ||
2702 | static const struct irq_domain_ops its_vpe_domain_ops = { | 2830 | static const struct irq_domain_ops its_vpe_domain_ops = { |
@@ -2739,26 +2867,85 @@ static int its_force_quiescent(void __iomem *base) | |||
2739 | } | 2867 | } |
2740 | } | 2868 | } |
2741 | 2869 | ||
2742 | static void __maybe_unused its_enable_quirk_cavium_22375(void *data) | 2870 | static bool __maybe_unused its_enable_quirk_cavium_22375(void *data) |
2743 | { | 2871 | { |
2744 | struct its_node *its = data; | 2872 | struct its_node *its = data; |
2745 | 2873 | ||
2874 | /* erratum 22375: only alloc 8MB table size */ | ||
2875 | its->device_ids = 0x14; /* 20 bits, 8MB */ | ||
2746 | its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; | 2876 | its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; |
2877 | |||
2878 | return true; | ||
2747 | } | 2879 | } |
2748 | 2880 | ||
2749 | static void __maybe_unused its_enable_quirk_cavium_23144(void *data) | 2881 | static bool __maybe_unused its_enable_quirk_cavium_23144(void *data) |
2750 | { | 2882 | { |
2751 | struct its_node *its = data; | 2883 | struct its_node *its = data; |
2752 | 2884 | ||
2753 | its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; | 2885 | its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; |
2886 | |||
2887 | return true; | ||
2754 | } | 2888 | } |
2755 | 2889 | ||
2756 | static void __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) | 2890 | static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) |
2757 | { | 2891 | { |
2758 | struct its_node *its = data; | 2892 | struct its_node *its = data; |
2759 | 2893 | ||
2760 | /* On QDF2400, the size of the ITE is 16Bytes */ | 2894 | /* On QDF2400, the size of the ITE is 16Bytes */ |
2761 | its->ite_size = 16; | 2895 | its->ite_size = 16; |
2896 | |||
2897 | return true; | ||
2898 | } | ||
2899 | |||
2900 | static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev) | ||
2901 | { | ||
2902 | struct its_node *its = its_dev->its; | ||
2903 | |||
2904 | /* | ||
2905 | * The Socionext Synquacer SoC has a so-called 'pre-ITS', | ||
2906 | * which maps 32-bit writes targeted at a separate window of | ||
2907 | * size '4 << device_id_bits' onto writes to GITS_TRANSLATER | ||
2908 | * with device ID taken from bits [device_id_bits + 1:2] of | ||
2909 | * the window offset. | ||
2910 | */ | ||
2911 | return its->pre_its_base + (its_dev->device_id << 2); | ||
2912 | } | ||
2913 | |||
2914 | static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data) | ||
2915 | { | ||
2916 | struct its_node *its = data; | ||
2917 | u32 pre_its_window[2]; | ||
2918 | u32 ids; | ||
2919 | |||
2920 | if (!fwnode_property_read_u32_array(its->fwnode_handle, | ||
2921 | "socionext,synquacer-pre-its", | ||
2922 | pre_its_window, | ||
2923 | ARRAY_SIZE(pre_its_window))) { | ||
2924 | |||
2925 | its->pre_its_base = pre_its_window[0]; | ||
2926 | its->get_msi_base = its_irq_get_msi_base_pre_its; | ||
2927 | |||
2928 | ids = ilog2(pre_its_window[1]) - 2; | ||
2929 | if (its->device_ids > ids) | ||
2930 | its->device_ids = ids; | ||
2931 | |||
2932 | /* the pre-ITS breaks isolation, so disable MSI remapping */ | ||
2933 | its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP; | ||
2934 | return true; | ||
2935 | } | ||
2936 | return false; | ||
2937 | } | ||
2938 | |||
2939 | static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data) | ||
2940 | { | ||
2941 | struct its_node *its = data; | ||
2942 | |||
2943 | /* | ||
2944 | * Hip07 insists on using the wrong address for the VLPI | ||
2945 | * page. Trick it into doing the right thing... | ||
2946 | */ | ||
2947 | its->vlpi_redist_offset = SZ_128K; | ||
2948 | return true; | ||
2762 | } | 2949 | } |
2763 | 2950 | ||
2764 | static const struct gic_quirk its_quirks[] = { | 2951 | static const struct gic_quirk its_quirks[] = { |
@@ -2786,6 +2973,27 @@ static const struct gic_quirk its_quirks[] = { | |||
2786 | .init = its_enable_quirk_qdf2400_e0065, | 2973 | .init = its_enable_quirk_qdf2400_e0065, |
2787 | }, | 2974 | }, |
2788 | #endif | 2975 | #endif |
2976 | #ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS | ||
2977 | { | ||
2978 | /* | ||
2979 | * The Socionext Synquacer SoC incorporates ARM's own GIC-500 | ||
2980 | * implementation, but with a 'pre-ITS' added that requires | ||
2981 | * special handling in software. | ||
2982 | */ | ||
2983 | .desc = "ITS: Socionext Synquacer pre-ITS", | ||
2984 | .iidr = 0x0001143b, | ||
2985 | .mask = 0xffffffff, | ||
2986 | .init = its_enable_quirk_socionext_synquacer, | ||
2987 | }, | ||
2988 | #endif | ||
2989 | #ifdef CONFIG_HISILICON_ERRATUM_161600802 | ||
2990 | { | ||
2991 | .desc = "ITS: Hip07 erratum 161600802", | ||
2992 | .iidr = 0x00000004, | ||
2993 | .mask = 0xffffffff, | ||
2994 | .init = its_enable_quirk_hip07_161600802, | ||
2995 | }, | ||
2996 | #endif | ||
2789 | { | 2997 | { |
2790 | } | 2998 | } |
2791 | }; | 2999 | }; |
@@ -2814,7 +3022,7 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its) | |||
2814 | 3022 | ||
2815 | inner_domain->parent = its_parent; | 3023 | inner_domain->parent = its_parent; |
2816 | irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS); | 3024 | irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS); |
2817 | inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_REMAP; | 3025 | inner_domain->flags |= its->msi_domain_flags; |
2818 | info->ops = &its_msi_domain_ops; | 3026 | info->ops = &its_msi_domain_ops; |
2819 | info->data = its; | 3027 | info->data = its; |
2820 | inner_domain->host_data = info; | 3028 | inner_domain->host_data = info; |
@@ -2875,8 +3083,8 @@ static int __init its_compute_its_list_map(struct resource *res, | |||
2875 | * locking. Should this change, we should address | 3083 | * locking. Should this change, we should address |
2876 | * this. | 3084 | * this. |
2877 | */ | 3085 | */ |
2878 | its_number = find_first_zero_bit(&its_list_map, ITS_LIST_MAX); | 3086 | its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX); |
2879 | if (its_number >= ITS_LIST_MAX) { | 3087 | if (its_number >= GICv4_ITS_LIST_MAX) { |
2880 | pr_err("ITS@%pa: No ITSList entry available!\n", | 3088 | pr_err("ITS@%pa: No ITSList entry available!\n", |
2881 | &res->start); | 3089 | &res->start); |
2882 | return -EINVAL; | 3090 | return -EINVAL; |
@@ -2944,6 +3152,7 @@ static int __init its_probe_one(struct resource *res, | |||
2944 | its->base = its_base; | 3152 | its->base = its_base; |
2945 | its->phys_base = res->start; | 3153 | its->phys_base = res->start; |
2946 | its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer); | 3154 | its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer); |
3155 | its->device_ids = GITS_TYPER_DEVBITS(typer); | ||
2947 | its->is_v4 = !!(typer & GITS_TYPER_VLPIS); | 3156 | its->is_v4 = !!(typer & GITS_TYPER_VLPIS); |
2948 | if (its->is_v4) { | 3157 | if (its->is_v4) { |
2949 | if (!(typer & GITS_TYPER_VMOVP)) { | 3158 | if (!(typer & GITS_TYPER_VMOVP)) { |
@@ -2951,6 +3160,8 @@ static int __init its_probe_one(struct resource *res, | |||
2951 | if (err < 0) | 3160 | if (err < 0) |
2952 | goto out_free_its; | 3161 | goto out_free_its; |
2953 | 3162 | ||
3163 | its->list_nr = err; | ||
3164 | |||
2954 | pr_info("ITS@%pa: Using ITS number %d\n", | 3165 | pr_info("ITS@%pa: Using ITS number %d\n", |
2955 | &res->start, err); | 3166 | &res->start, err); |
2956 | } else { | 3167 | } else { |
@@ -2967,6 +3178,9 @@ static int __init its_probe_one(struct resource *res, | |||
2967 | goto out_free_its; | 3178 | goto out_free_its; |
2968 | } | 3179 | } |
2969 | its->cmd_write = its->cmd_base; | 3180 | its->cmd_write = its->cmd_base; |
3181 | its->fwnode_handle = handle; | ||
3182 | its->get_msi_base = its_irq_get_msi_base; | ||
3183 | its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP; | ||
2970 | 3184 | ||
2971 | its_enable_quirks(its); | 3185 | its_enable_quirks(its); |
2972 | 3186 | ||
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index b5df99c6f680..b54b55597ffb 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c | |||
@@ -55,6 +55,7 @@ struct gic_chip_data { | |||
55 | struct irq_domain *domain; | 55 | struct irq_domain *domain; |
56 | u64 redist_stride; | 56 | u64 redist_stride; |
57 | u32 nr_redist_regions; | 57 | u32 nr_redist_regions; |
58 | bool has_rss; | ||
58 | unsigned int irq_nr; | 59 | unsigned int irq_nr; |
59 | struct partition_desc *ppi_descs[16]; | 60 | struct partition_desc *ppi_descs[16]; |
60 | }; | 61 | }; |
@@ -63,7 +64,9 @@ static struct gic_chip_data gic_data __read_mostly; | |||
63 | static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE; | 64 | static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE; |
64 | 65 | ||
65 | static struct gic_kvm_info gic_v3_kvm_info; | 66 | static struct gic_kvm_info gic_v3_kvm_info; |
67 | static DEFINE_PER_CPU(bool, has_rss); | ||
66 | 68 | ||
69 | #define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4) | ||
67 | #define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist)) | 70 | #define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist)) |
68 | #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) | 71 | #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) |
69 | #define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K) | 72 | #define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K) |
@@ -526,6 +529,10 @@ static void gic_update_vlpi_properties(void) | |||
526 | 529 | ||
527 | static void gic_cpu_sys_reg_init(void) | 530 | static void gic_cpu_sys_reg_init(void) |
528 | { | 531 | { |
532 | int i, cpu = smp_processor_id(); | ||
533 | u64 mpidr = cpu_logical_map(cpu); | ||
534 | u64 need_rss = MPIDR_RS(mpidr); | ||
535 | |||
529 | /* | 536 | /* |
530 | * Need to check that the SRE bit has actually been set. If | 537 | * Need to check that the SRE bit has actually been set. If |
531 | * not, it means that SRE is disabled at EL2. We're going to | 538 | * not, it means that SRE is disabled at EL2. We're going to |
@@ -557,6 +564,30 @@ static void gic_cpu_sys_reg_init(void) | |||
557 | 564 | ||
558 | /* ... and let's hit the road... */ | 565 | /* ... and let's hit the road... */ |
559 | gic_write_grpen1(1); | 566 | gic_write_grpen1(1); |
567 | |||
568 | /* Keep the RSS capability status in per_cpu variable */ | ||
569 | per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS); | ||
570 | |||
571 | /* Check all the CPUs have capable of sending SGIs to other CPUs */ | ||
572 | for_each_online_cpu(i) { | ||
573 | bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu); | ||
574 | |||
575 | need_rss |= MPIDR_RS(cpu_logical_map(i)); | ||
576 | if (need_rss && (!have_rss)) | ||
577 | pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n", | ||
578 | cpu, (unsigned long)mpidr, | ||
579 | i, (unsigned long)cpu_logical_map(i)); | ||
580 | } | ||
581 | |||
582 | /** | ||
583 | * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0, | ||
584 | * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED | ||
585 | * UNPREDICTABLE choice of : | ||
586 | * - The write is ignored. | ||
587 | * - The RS field is treated as 0. | ||
588 | */ | ||
589 | if (need_rss && (!gic_data.has_rss)) | ||
590 | pr_crit_once("RSS is required but GICD doesn't support it\n"); | ||
560 | } | 591 | } |
561 | 592 | ||
562 | static int gic_dist_supports_lpis(void) | 593 | static int gic_dist_supports_lpis(void) |
@@ -591,6 +622,9 @@ static void gic_cpu_init(void) | |||
591 | 622 | ||
592 | #ifdef CONFIG_SMP | 623 | #ifdef CONFIG_SMP |
593 | 624 | ||
625 | #define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT) | ||
626 | #define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL) | ||
627 | |||
594 | static int gic_starting_cpu(unsigned int cpu) | 628 | static int gic_starting_cpu(unsigned int cpu) |
595 | { | 629 | { |
596 | gic_cpu_init(); | 630 | gic_cpu_init(); |
@@ -605,13 +639,6 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, | |||
605 | u16 tlist = 0; | 639 | u16 tlist = 0; |
606 | 640 | ||
607 | while (cpu < nr_cpu_ids) { | 641 | while (cpu < nr_cpu_ids) { |
608 | /* | ||
609 | * If we ever get a cluster of more than 16 CPUs, just | ||
610 | * scream and skip that CPU. | ||
611 | */ | ||
612 | if (WARN_ON((mpidr & 0xff) >= 16)) | ||
613 | goto out; | ||
614 | |||
615 | tlist |= 1 << (mpidr & 0xf); | 642 | tlist |= 1 << (mpidr & 0xf); |
616 | 643 | ||
617 | next_cpu = cpumask_next(cpu, mask); | 644 | next_cpu = cpumask_next(cpu, mask); |
@@ -621,7 +648,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, | |||
621 | 648 | ||
622 | mpidr = cpu_logical_map(cpu); | 649 | mpidr = cpu_logical_map(cpu); |
623 | 650 | ||
624 | if (cluster_id != (mpidr & ~0xffUL)) { | 651 | if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) { |
625 | cpu--; | 652 | cpu--; |
626 | goto out; | 653 | goto out; |
627 | } | 654 | } |
@@ -643,6 +670,7 @@ static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq) | |||
643 | MPIDR_TO_SGI_AFFINITY(cluster_id, 2) | | 670 | MPIDR_TO_SGI_AFFINITY(cluster_id, 2) | |
644 | irq << ICC_SGI1R_SGI_ID_SHIFT | | 671 | irq << ICC_SGI1R_SGI_ID_SHIFT | |
645 | MPIDR_TO_SGI_AFFINITY(cluster_id, 1) | | 672 | MPIDR_TO_SGI_AFFINITY(cluster_id, 1) | |
673 | MPIDR_TO_SGI_RS(cluster_id) | | ||
646 | tlist << ICC_SGI1R_TARGET_LIST_SHIFT); | 674 | tlist << ICC_SGI1R_TARGET_LIST_SHIFT); |
647 | 675 | ||
648 | pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); | 676 | pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); |
@@ -663,7 +691,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) | |||
663 | smp_wmb(); | 691 | smp_wmb(); |
664 | 692 | ||
665 | for_each_cpu(cpu, mask) { | 693 | for_each_cpu(cpu, mask) { |
666 | unsigned long cluster_id = cpu_logical_map(cpu) & ~0xffUL; | 694 | u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu)); |
667 | u16 tlist; | 695 | u16 tlist; |
668 | 696 | ||
669 | tlist = gic_compute_target_list(&cpu, mask, cluster_id); | 697 | tlist = gic_compute_target_list(&cpu, mask, cluster_id); |
@@ -1007,6 +1035,10 @@ static int __init gic_init_bases(void __iomem *dist_base, | |||
1007 | goto out_free; | 1035 | goto out_free; |
1008 | } | 1036 | } |
1009 | 1037 | ||
1038 | gic_data.has_rss = !!(typer & GICD_TYPER_RSS); | ||
1039 | pr_info("Distributor has %sRange Selector support\n", | ||
1040 | gic_data.has_rss ? "" : "no "); | ||
1041 | |||
1010 | set_handle_irq(gic_handle_irq); | 1042 | set_handle_irq(gic_handle_irq); |
1011 | 1043 | ||
1012 | gic_update_vlpi_properties(); | 1044 | gic_update_vlpi_properties(); |
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c new file mode 100644 index 000000000000..c7cc7e37a23c --- /dev/null +++ b/drivers/irqchip/irq-meson-gpio.c | |||
@@ -0,0 +1,414 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2015 Endless Mobile, Inc. | ||
3 | * Author: Carlo Caione <carlo@endlessm.com> | ||
4 | * Copyright (c) 2016 BayLibre, SAS. | ||
5 | * Author: Jerome Brunet <jbrunet@baylibre.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of version 2 of the GNU General Public License as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but | ||
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | * General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, see <http://www.gnu.org/licenses/>. | ||
18 | * The full GNU General Public License is included in this distribution | ||
19 | * in the file called COPYING. | ||
20 | */ | ||
21 | |||
22 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
23 | |||
24 | #include <linux/io.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/irq.h> | ||
27 | #include <linux/irqdomain.h> | ||
28 | #include <linux/irqchip.h> | ||
29 | #include <linux/of.h> | ||
30 | #include <linux/of_address.h> | ||
31 | |||
32 | #define NUM_CHANNEL 8 | ||
33 | #define MAX_INPUT_MUX 256 | ||
34 | |||
35 | #define REG_EDGE_POL 0x00 | ||
36 | #define REG_PIN_03_SEL 0x04 | ||
37 | #define REG_PIN_47_SEL 0x08 | ||
38 | #define REG_FILTER_SEL 0x0c | ||
39 | |||
40 | #define REG_EDGE_POL_MASK(x) (BIT(x) | BIT(16 + (x))) | ||
41 | #define REG_EDGE_POL_EDGE(x) BIT(x) | ||
42 | #define REG_EDGE_POL_LOW(x) BIT(16 + (x)) | ||
43 | #define REG_PIN_SEL_SHIFT(x) (((x) % 4) * 8) | ||
44 | #define REG_FILTER_SEL_SHIFT(x) ((x) * 4) | ||
45 | |||
46 | struct meson_gpio_irq_params { | ||
47 | unsigned int nr_hwirq; | ||
48 | }; | ||
49 | |||
50 | static const struct meson_gpio_irq_params meson8b_params = { | ||
51 | .nr_hwirq = 119, | ||
52 | }; | ||
53 | |||
54 | static const struct meson_gpio_irq_params gxbb_params = { | ||
55 | .nr_hwirq = 133, | ||
56 | }; | ||
57 | |||
58 | static const struct meson_gpio_irq_params gxl_params = { | ||
59 | .nr_hwirq = 110, | ||
60 | }; | ||
61 | |||
62 | static const struct of_device_id meson_irq_gpio_matches[] = { | ||
63 | { .compatible = "amlogic,meson8b-gpio-intc", .data = &meson8b_params }, | ||
64 | { .compatible = "amlogic,meson-gxbb-gpio-intc", .data = &gxbb_params }, | ||
65 | { .compatible = "amlogic,meson-gxl-gpio-intc", .data = &gxl_params }, | ||
66 | { } | ||
67 | }; | ||
68 | |||
69 | struct meson_gpio_irq_controller { | ||
70 | unsigned int nr_hwirq; | ||
71 | void __iomem *base; | ||
72 | u32 channel_irqs[NUM_CHANNEL]; | ||
73 | DECLARE_BITMAP(channel_map, NUM_CHANNEL); | ||
74 | spinlock_t lock; | ||
75 | }; | ||
76 | |||
77 | static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl, | ||
78 | unsigned int reg, u32 mask, u32 val) | ||
79 | { | ||
80 | u32 tmp; | ||
81 | |||
82 | tmp = readl_relaxed(ctl->base + reg); | ||
83 | tmp &= ~mask; | ||
84 | tmp |= val; | ||
85 | writel_relaxed(tmp, ctl->base + reg); | ||
86 | } | ||
87 | |||
88 | static unsigned int meson_gpio_irq_channel_to_reg(unsigned int channel) | ||
89 | { | ||
90 | return (channel < 4) ? REG_PIN_03_SEL : REG_PIN_47_SEL; | ||
91 | } | ||
92 | |||
93 | static int | ||
94 | meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl, | ||
95 | unsigned long hwirq, | ||
96 | u32 **channel_hwirq) | ||
97 | { | ||
98 | unsigned int reg, idx; | ||
99 | |||
100 | spin_lock(&ctl->lock); | ||
101 | |||
102 | /* Find a free channel */ | ||
103 | idx = find_first_zero_bit(ctl->channel_map, NUM_CHANNEL); | ||
104 | if (idx >= NUM_CHANNEL) { | ||
105 | spin_unlock(&ctl->lock); | ||
106 | pr_err("No channel available\n"); | ||
107 | return -ENOSPC; | ||
108 | } | ||
109 | |||
110 | /* Mark the channel as used */ | ||
111 | set_bit(idx, ctl->channel_map); | ||
112 | |||
113 | /* | ||
114 | * Setup the mux of the channel to route the signal of the pad | ||
115 | * to the appropriate input of the GIC | ||
116 | */ | ||
117 | reg = meson_gpio_irq_channel_to_reg(idx); | ||
118 | meson_gpio_irq_update_bits(ctl, reg, | ||
119 | 0xff << REG_PIN_SEL_SHIFT(idx), | ||
120 | hwirq << REG_PIN_SEL_SHIFT(idx)); | ||
121 | |||
122 | /* | ||
123 | * Get the hwirq number assigned to this channel through | ||
124 | * a pointer the channel_irq table. The added benifit of this | ||
125 | * method is that we can also retrieve the channel index with | ||
126 | * it, using the table base. | ||
127 | */ | ||
128 | *channel_hwirq = &(ctl->channel_irqs[idx]); | ||
129 | |||
130 | spin_unlock(&ctl->lock); | ||
131 | |||
132 | pr_debug("hwirq %lu assigned to channel %d - irq %u\n", | ||
133 | hwirq, idx, **channel_hwirq); | ||
134 | |||
135 | return 0; | ||
136 | } | ||
137 | |||
138 | static unsigned int | ||
139 | meson_gpio_irq_get_channel_idx(struct meson_gpio_irq_controller *ctl, | ||
140 | u32 *channel_hwirq) | ||
141 | { | ||
142 | return channel_hwirq - ctl->channel_irqs; | ||
143 | } | ||
144 | |||
145 | static void | ||
146 | meson_gpio_irq_release_channel(struct meson_gpio_irq_controller *ctl, | ||
147 | u32 *channel_hwirq) | ||
148 | { | ||
149 | unsigned int idx; | ||
150 | |||
151 | idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq); | ||
152 | clear_bit(idx, ctl->channel_map); | ||
153 | } | ||
154 | |||
155 | static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl, | ||
156 | unsigned int type, | ||
157 | u32 *channel_hwirq) | ||
158 | { | ||
159 | u32 val = 0; | ||
160 | unsigned int idx; | ||
161 | |||
162 | idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq); | ||
163 | |||
164 | /* | ||
165 | * The controller has a filter block to operate in either LEVEL or | ||
166 | * EDGE mode, then signal is sent to the GIC. To enable LEVEL_LOW and | ||
167 | * EDGE_FALLING support (which the GIC does not support), the filter | ||
168 | * block is also able to invert the input signal it gets before | ||
169 | * providing it to the GIC. | ||
170 | */ | ||
171 | type &= IRQ_TYPE_SENSE_MASK; | ||
172 | |||
173 | if (type == IRQ_TYPE_EDGE_BOTH) | ||
174 | return -EINVAL; | ||
175 | |||
176 | if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) | ||
177 | val |= REG_EDGE_POL_EDGE(idx); | ||
178 | |||
179 | if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) | ||
180 | val |= REG_EDGE_POL_LOW(idx); | ||
181 | |||
182 | spin_lock(&ctl->lock); | ||
183 | |||
184 | meson_gpio_irq_update_bits(ctl, REG_EDGE_POL, | ||
185 | REG_EDGE_POL_MASK(idx), val); | ||
186 | |||
187 | spin_unlock(&ctl->lock); | ||
188 | |||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | static unsigned int meson_gpio_irq_type_output(unsigned int type) | ||
193 | { | ||
194 | unsigned int sense = type & IRQ_TYPE_SENSE_MASK; | ||
195 | |||
196 | type &= ~IRQ_TYPE_SENSE_MASK; | ||
197 | |||
198 | /* | ||
199 | * The polarity of the signal provided to the GIC should always | ||
200 | * be high. | ||
201 | */ | ||
202 | if (sense & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) | ||
203 | type |= IRQ_TYPE_LEVEL_HIGH; | ||
204 | else if (sense & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) | ||
205 | type |= IRQ_TYPE_EDGE_RISING; | ||
206 | |||
207 | return type; | ||
208 | } | ||
209 | |||
210 | static int meson_gpio_irq_set_type(struct irq_data *data, unsigned int type) | ||
211 | { | ||
212 | struct meson_gpio_irq_controller *ctl = data->domain->host_data; | ||
213 | u32 *channel_hwirq = irq_data_get_irq_chip_data(data); | ||
214 | int ret; | ||
215 | |||
216 | ret = meson_gpio_irq_type_setup(ctl, type, channel_hwirq); | ||
217 | if (ret) | ||
218 | return ret; | ||
219 | |||
220 | return irq_chip_set_type_parent(data, | ||
221 | meson_gpio_irq_type_output(type)); | ||
222 | } | ||
223 | |||
224 | static struct irq_chip meson_gpio_irq_chip = { | ||
225 | .name = "meson-gpio-irqchip", | ||
226 | .irq_mask = irq_chip_mask_parent, | ||
227 | .irq_unmask = irq_chip_unmask_parent, | ||
228 | .irq_eoi = irq_chip_eoi_parent, | ||
229 | .irq_set_type = meson_gpio_irq_set_type, | ||
230 | .irq_retrigger = irq_chip_retrigger_hierarchy, | ||
231 | #ifdef CONFIG_SMP | ||
232 | .irq_set_affinity = irq_chip_set_affinity_parent, | ||
233 | #endif | ||
234 | .flags = IRQCHIP_SET_TYPE_MASKED, | ||
235 | }; | ||
236 | |||
237 | static int meson_gpio_irq_domain_translate(struct irq_domain *domain, | ||
238 | struct irq_fwspec *fwspec, | ||
239 | unsigned long *hwirq, | ||
240 | unsigned int *type) | ||
241 | { | ||
242 | if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) { | ||
243 | *hwirq = fwspec->param[0]; | ||
244 | *type = fwspec->param[1]; | ||
245 | return 0; | ||
246 | } | ||
247 | |||
248 | return -EINVAL; | ||
249 | } | ||
250 | |||
251 | static int meson_gpio_irq_allocate_gic_irq(struct irq_domain *domain, | ||
252 | unsigned int virq, | ||
253 | u32 hwirq, | ||
254 | unsigned int type) | ||
255 | { | ||
256 | struct irq_fwspec fwspec; | ||
257 | |||
258 | fwspec.fwnode = domain->parent->fwnode; | ||
259 | fwspec.param_count = 3; | ||
260 | fwspec.param[0] = 0; /* SPI */ | ||
261 | fwspec.param[1] = hwirq; | ||
262 | fwspec.param[2] = meson_gpio_irq_type_output(type); | ||
263 | |||
264 | return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); | ||
265 | } | ||
266 | |||
267 | static int meson_gpio_irq_domain_alloc(struct irq_domain *domain, | ||
268 | unsigned int virq, | ||
269 | unsigned int nr_irqs, | ||
270 | void *data) | ||
271 | { | ||
272 | struct irq_fwspec *fwspec = data; | ||
273 | struct meson_gpio_irq_controller *ctl = domain->host_data; | ||
274 | unsigned long hwirq; | ||
275 | u32 *channel_hwirq; | ||
276 | unsigned int type; | ||
277 | int ret; | ||
278 | |||
279 | if (WARN_ON(nr_irqs != 1)) | ||
280 | return -EINVAL; | ||
281 | |||
282 | ret = meson_gpio_irq_domain_translate(domain, fwspec, &hwirq, &type); | ||
283 | if (ret) | ||
284 | return ret; | ||
285 | |||
286 | ret = meson_gpio_irq_request_channel(ctl, hwirq, &channel_hwirq); | ||
287 | if (ret) | ||
288 | return ret; | ||
289 | |||
290 | ret = meson_gpio_irq_allocate_gic_irq(domain, virq, | ||
291 | *channel_hwirq, type); | ||
292 | if (ret < 0) { | ||
293 | pr_err("failed to allocate gic irq %u\n", *channel_hwirq); | ||
294 | meson_gpio_irq_release_channel(ctl, channel_hwirq); | ||
295 | return ret; | ||
296 | } | ||
297 | |||
298 | irq_domain_set_hwirq_and_chip(domain, virq, hwirq, | ||
299 | &meson_gpio_irq_chip, channel_hwirq); | ||
300 | |||
301 | return 0; | ||
302 | } | ||
303 | |||
304 | static void meson_gpio_irq_domain_free(struct irq_domain *domain, | ||
305 | unsigned int virq, | ||
306 | unsigned int nr_irqs) | ||
307 | { | ||
308 | struct meson_gpio_irq_controller *ctl = domain->host_data; | ||
309 | struct irq_data *irq_data; | ||
310 | u32 *channel_hwirq; | ||
311 | |||
312 | if (WARN_ON(nr_irqs != 1)) | ||
313 | return; | ||
314 | |||
315 | irq_domain_free_irqs_parent(domain, virq, 1); | ||
316 | |||
317 | irq_data = irq_domain_get_irq_data(domain, virq); | ||
318 | channel_hwirq = irq_data_get_irq_chip_data(irq_data); | ||
319 | |||
320 | meson_gpio_irq_release_channel(ctl, channel_hwirq); | ||
321 | } | ||
322 | |||
323 | static const struct irq_domain_ops meson_gpio_irq_domain_ops = { | ||
324 | .alloc = meson_gpio_irq_domain_alloc, | ||
325 | .free = meson_gpio_irq_domain_free, | ||
326 | .translate = meson_gpio_irq_domain_translate, | ||
327 | }; | ||
328 | |||
329 | static int __init meson_gpio_irq_parse_dt(struct device_node *node, | ||
330 | struct meson_gpio_irq_controller *ctl) | ||
331 | { | ||
332 | const struct of_device_id *match; | ||
333 | const struct meson_gpio_irq_params *params; | ||
334 | int ret; | ||
335 | |||
336 | match = of_match_node(meson_irq_gpio_matches, node); | ||
337 | if (!match) | ||
338 | return -ENODEV; | ||
339 | |||
340 | params = match->data; | ||
341 | ctl->nr_hwirq = params->nr_hwirq; | ||
342 | |||
343 | ret = of_property_read_variable_u32_array(node, | ||
344 | "amlogic,channel-interrupts", | ||
345 | ctl->channel_irqs, | ||
346 | NUM_CHANNEL, | ||
347 | NUM_CHANNEL); | ||
348 | if (ret < 0) { | ||
349 | pr_err("can't get %d channel interrupts\n", NUM_CHANNEL); | ||
350 | return ret; | ||
351 | } | ||
352 | |||
353 | return 0; | ||
354 | } | ||
355 | |||
356 | static int __init meson_gpio_irq_of_init(struct device_node *node, | ||
357 | struct device_node *parent) | ||
358 | { | ||
359 | struct irq_domain *domain, *parent_domain; | ||
360 | struct meson_gpio_irq_controller *ctl; | ||
361 | int ret; | ||
362 | |||
363 | if (!parent) { | ||
364 | pr_err("missing parent interrupt node\n"); | ||
365 | return -ENODEV; | ||
366 | } | ||
367 | |||
368 | parent_domain = irq_find_host(parent); | ||
369 | if (!parent_domain) { | ||
370 | pr_err("unable to obtain parent domain\n"); | ||
371 | return -ENXIO; | ||
372 | } | ||
373 | |||
374 | ctl = kzalloc(sizeof(*ctl), GFP_KERNEL); | ||
375 | if (!ctl) | ||
376 | return -ENOMEM; | ||
377 | |||
378 | spin_lock_init(&ctl->lock); | ||
379 | |||
380 | ctl->base = of_iomap(node, 0); | ||
381 | if (!ctl->base) { | ||
382 | ret = -ENOMEM; | ||
383 | goto free_ctl; | ||
384 | } | ||
385 | |||
386 | ret = meson_gpio_irq_parse_dt(node, ctl); | ||
387 | if (ret) | ||
388 | goto free_channel_irqs; | ||
389 | |||
390 | domain = irq_domain_create_hierarchy(parent_domain, 0, ctl->nr_hwirq, | ||
391 | of_node_to_fwnode(node), | ||
392 | &meson_gpio_irq_domain_ops, | ||
393 | ctl); | ||
394 | if (!domain) { | ||
395 | pr_err("failed to add domain\n"); | ||
396 | ret = -ENODEV; | ||
397 | goto free_channel_irqs; | ||
398 | } | ||
399 | |||
400 | pr_info("%d to %d gpio interrupt mux initialized\n", | ||
401 | ctl->nr_hwirq, NUM_CHANNEL); | ||
402 | |||
403 | return 0; | ||
404 | |||
405 | free_channel_irqs: | ||
406 | iounmap(ctl->base); | ||
407 | free_ctl: | ||
408 | kfree(ctl); | ||
409 | |||
410 | return ret; | ||
411 | } | ||
412 | |||
413 | IRQCHIP_DECLARE(meson_gpio_intc, "amlogic,meson-gpio-intc", | ||
414 | meson_gpio_irq_of_init); | ||
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c index 713177d97c7a..06f29cf5018a 100644 --- a/drivers/irqchip/irq-renesas-intc-irqpin.c +++ b/drivers/irqchip/irq-renesas-intc-irqpin.c | |||
@@ -389,9 +389,8 @@ MODULE_DEVICE_TABLE(of, intc_irqpin_dt_ids); | |||
389 | 389 | ||
390 | static int intc_irqpin_probe(struct platform_device *pdev) | 390 | static int intc_irqpin_probe(struct platform_device *pdev) |
391 | { | 391 | { |
392 | const struct intc_irqpin_config *config = NULL; | 392 | const struct intc_irqpin_config *config; |
393 | struct device *dev = &pdev->dev; | 393 | struct device *dev = &pdev->dev; |
394 | const struct of_device_id *of_id; | ||
395 | struct intc_irqpin_priv *p; | 394 | struct intc_irqpin_priv *p; |
396 | struct intc_irqpin_iomem *i; | 395 | struct intc_irqpin_iomem *i; |
397 | struct resource *io[INTC_IRQPIN_REG_NR]; | 396 | struct resource *io[INTC_IRQPIN_REG_NR]; |
@@ -422,11 +421,9 @@ static int intc_irqpin_probe(struct platform_device *pdev) | |||
422 | p->pdev = pdev; | 421 | p->pdev = pdev; |
423 | platform_set_drvdata(pdev, p); | 422 | platform_set_drvdata(pdev, p); |
424 | 423 | ||
425 | of_id = of_match_device(intc_irqpin_dt_ids, dev); | 424 | config = of_device_get_match_data(dev); |
426 | if (of_id && of_id->data) { | 425 | if (config) |
427 | config = of_id->data; | ||
428 | p->needs_clk = config->needs_clk; | 426 | p->needs_clk = config->needs_clk; |
429 | } | ||
430 | 427 | ||
431 | p->clk = devm_clk_get(dev, NULL); | 428 | p->clk = devm_clk_get(dev, NULL); |
432 | if (IS_ERR(p->clk)) { | 429 | if (IS_ERR(p->clk)) { |
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 1ea576c8126f..b8b59989bd73 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h | |||
@@ -68,6 +68,7 @@ | |||
68 | #define GICD_CTLR_ENABLE_SS_G1 (1U << 1) | 68 | #define GICD_CTLR_ENABLE_SS_G1 (1U << 1) |
69 | #define GICD_CTLR_ENABLE_SS_G0 (1U << 0) | 69 | #define GICD_CTLR_ENABLE_SS_G0 (1U << 0) |
70 | 70 | ||
71 | #define GICD_TYPER_RSS (1U << 26) | ||
71 | #define GICD_TYPER_LPIS (1U << 17) | 72 | #define GICD_TYPER_LPIS (1U << 17) |
72 | #define GICD_TYPER_MBIS (1U << 16) | 73 | #define GICD_TYPER_MBIS (1U << 16) |
73 | 74 | ||
@@ -459,6 +460,7 @@ | |||
459 | #define ICC_CTLR_EL1_SEIS_MASK (0x1 << ICC_CTLR_EL1_SEIS_SHIFT) | 460 | #define ICC_CTLR_EL1_SEIS_MASK (0x1 << ICC_CTLR_EL1_SEIS_SHIFT) |
460 | #define ICC_CTLR_EL1_A3V_SHIFT 15 | 461 | #define ICC_CTLR_EL1_A3V_SHIFT 15 |
461 | #define ICC_CTLR_EL1_A3V_MASK (0x1 << ICC_CTLR_EL1_A3V_SHIFT) | 462 | #define ICC_CTLR_EL1_A3V_MASK (0x1 << ICC_CTLR_EL1_A3V_SHIFT) |
463 | #define ICC_CTLR_EL1_RSS (0x1 << 18) | ||
462 | #define ICC_PMR_EL1_SHIFT 0 | 464 | #define ICC_PMR_EL1_SHIFT 0 |
463 | #define ICC_PMR_EL1_MASK (0xff << ICC_PMR_EL1_SHIFT) | 465 | #define ICC_PMR_EL1_MASK (0xff << ICC_PMR_EL1_SHIFT) |
464 | #define ICC_BPR0_EL1_SHIFT 0 | 466 | #define ICC_BPR0_EL1_SHIFT 0 |
@@ -547,6 +549,8 @@ | |||
547 | #define ICC_SGI1R_AFFINITY_2_SHIFT 32 | 549 | #define ICC_SGI1R_AFFINITY_2_SHIFT 32 |
548 | #define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT) | 550 | #define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT) |
549 | #define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40 | 551 | #define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40 |
552 | #define ICC_SGI1R_RS_SHIFT 44 | ||
553 | #define ICC_SGI1R_RS_MASK (0xfULL << ICC_SGI1R_RS_SHIFT) | ||
550 | #define ICC_SGI1R_AFFINITY_3_SHIFT 48 | 554 | #define ICC_SGI1R_AFFINITY_3_SHIFT 48 |
551 | #define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT) | 555 | #define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT) |
552 | 556 | ||
diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h index 58a4d89aa82c..43cde15f221b 100644 --- a/include/linux/irqchip/arm-gic-v4.h +++ b/include/linux/irqchip/arm-gic-v4.h | |||
@@ -20,6 +20,12 @@ | |||
20 | 20 | ||
21 | struct its_vpe; | 21 | struct its_vpe; |
22 | 22 | ||
23 | /* | ||
24 | * Maximum number of ITTs when GITS_TYPER.VMOVP == 0, using the | ||
25 | * ITSList mechanism to perform inter-ITS synchronization. | ||
26 | */ | ||
27 | #define GICv4_ITS_LIST_MAX 16 | ||
28 | |||
23 | /* Embedded in kvm.arch */ | 29 | /* Embedded in kvm.arch */ |
24 | struct its_vm { | 30 | struct its_vm { |
25 | struct fwnode_handle *fwnode; | 31 | struct fwnode_handle *fwnode; |
@@ -30,6 +36,7 @@ struct its_vm { | |||
30 | irq_hw_number_t db_lpi_base; | 36 | irq_hw_number_t db_lpi_base; |
31 | unsigned long *db_bitmap; | 37 | unsigned long *db_bitmap; |
32 | int nr_db_lpis; | 38 | int nr_db_lpis; |
39 | u32 vlpi_count[GICv4_ITS_LIST_MAX]; | ||
33 | }; | 40 | }; |
34 | 41 | ||
35 | /* Embedded in kvm_vcpu.arch */ | 42 | /* Embedded in kvm_vcpu.arch */ |
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 7d0c6c144708..df162f7a4aad 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/types.h> | 32 | #include <linux/types.h> |
33 | #include <linux/irqhandler.h> | 33 | #include <linux/irqhandler.h> |
34 | #include <linux/of.h> | 34 | #include <linux/of.h> |
35 | #include <linux/mutex.h> | ||
35 | #include <linux/radix-tree.h> | 36 | #include <linux/radix-tree.h> |
36 | 37 | ||
37 | struct device_node; | 38 | struct device_node; |
@@ -176,6 +177,7 @@ struct irq_domain { | |||
176 | unsigned int revmap_direct_max_irq; | 177 | unsigned int revmap_direct_max_irq; |
177 | unsigned int revmap_size; | 178 | unsigned int revmap_size; |
178 | struct radix_tree_root revmap_tree; | 179 | struct radix_tree_root revmap_tree; |
180 | struct mutex revmap_tree_mutex; | ||
179 | unsigned int linear_revmap[]; | 181 | unsigned int linear_revmap[]; |
180 | }; | 182 | }; |
181 | 183 | ||
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index b50f737574ae..fbbf34293b17 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c | |||
@@ -21,7 +21,6 @@ | |||
21 | static LIST_HEAD(irq_domain_list); | 21 | static LIST_HEAD(irq_domain_list); |
22 | static DEFINE_MUTEX(irq_domain_mutex); | 22 | static DEFINE_MUTEX(irq_domain_mutex); |
23 | 23 | ||
24 | static DEFINE_MUTEX(revmap_trees_mutex); | ||
25 | static struct irq_domain *irq_default_domain; | 24 | static struct irq_domain *irq_default_domain; |
26 | 25 | ||
27 | static void irq_domain_check_hierarchy(struct irq_domain *domain); | 26 | static void irq_domain_check_hierarchy(struct irq_domain *domain); |
@@ -211,6 +210,7 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size, | |||
211 | 210 | ||
212 | /* Fill structure */ | 211 | /* Fill structure */ |
213 | INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL); | 212 | INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL); |
213 | mutex_init(&domain->revmap_tree_mutex); | ||
214 | domain->ops = ops; | 214 | domain->ops = ops; |
215 | domain->host_data = host_data; | 215 | domain->host_data = host_data; |
216 | domain->hwirq_max = hwirq_max; | 216 | domain->hwirq_max = hwirq_max; |
@@ -462,9 +462,9 @@ static void irq_domain_clear_mapping(struct irq_domain *domain, | |||
462 | if (hwirq < domain->revmap_size) { | 462 | if (hwirq < domain->revmap_size) { |
463 | domain->linear_revmap[hwirq] = 0; | 463 | domain->linear_revmap[hwirq] = 0; |
464 | } else { | 464 | } else { |
465 | mutex_lock(&revmap_trees_mutex); | 465 | mutex_lock(&domain->revmap_tree_mutex); |
466 | radix_tree_delete(&domain->revmap_tree, hwirq); | 466 | radix_tree_delete(&domain->revmap_tree, hwirq); |
467 | mutex_unlock(&revmap_trees_mutex); | 467 | mutex_unlock(&domain->revmap_tree_mutex); |
468 | } | 468 | } |
469 | } | 469 | } |
470 | 470 | ||
@@ -475,9 +475,9 @@ static void irq_domain_set_mapping(struct irq_domain *domain, | |||
475 | if (hwirq < domain->revmap_size) { | 475 | if (hwirq < domain->revmap_size) { |
476 | domain->linear_revmap[hwirq] = irq_data->irq; | 476 | domain->linear_revmap[hwirq] = irq_data->irq; |
477 | } else { | 477 | } else { |
478 | mutex_lock(&revmap_trees_mutex); | 478 | mutex_lock(&domain->revmap_tree_mutex); |
479 | radix_tree_insert(&domain->revmap_tree, hwirq, irq_data); | 479 | radix_tree_insert(&domain->revmap_tree, hwirq, irq_data); |
480 | mutex_unlock(&revmap_trees_mutex); | 480 | mutex_unlock(&domain->revmap_tree_mutex); |
481 | } | 481 | } |
482 | } | 482 | } |
483 | 483 | ||
@@ -945,7 +945,7 @@ static int virq_debug_show(struct seq_file *m, void *private) | |||
945 | struct irq_desc *desc; | 945 | struct irq_desc *desc; |
946 | struct irq_domain *domain; | 946 | struct irq_domain *domain; |
947 | struct radix_tree_iter iter; | 947 | struct radix_tree_iter iter; |
948 | void **slot; | 948 | void __rcu **slot; |
949 | int i; | 949 | int i; |
950 | 950 | ||
951 | seq_printf(m, " %-16s %-6s %-10s %-10s %s\n", | 951 | seq_printf(m, " %-16s %-6s %-10s %-10s %s\n", |
@@ -1453,17 +1453,17 @@ out_free_desc: | |||
1453 | /* The irq_data was moved, fix the revmap to refer to the new location */ | 1453 | /* The irq_data was moved, fix the revmap to refer to the new location */ |
1454 | static void irq_domain_fix_revmap(struct irq_data *d) | 1454 | static void irq_domain_fix_revmap(struct irq_data *d) |
1455 | { | 1455 | { |
1456 | void **slot; | 1456 | void __rcu **slot; |
1457 | 1457 | ||
1458 | if (d->hwirq < d->domain->revmap_size) | 1458 | if (d->hwirq < d->domain->revmap_size) |
1459 | return; /* Not using radix tree. */ | 1459 | return; /* Not using radix tree. */ |
1460 | 1460 | ||
1461 | /* Fix up the revmap. */ | 1461 | /* Fix up the revmap. */ |
1462 | mutex_lock(&revmap_trees_mutex); | 1462 | mutex_lock(&d->domain->revmap_tree_mutex); |
1463 | slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq); | 1463 | slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq); |
1464 | if (slot) | 1464 | if (slot) |
1465 | radix_tree_replace_slot(&d->domain->revmap_tree, slot, d); | 1465 | radix_tree_replace_slot(&d->domain->revmap_tree, slot, d); |
1466 | mutex_unlock(&revmap_trees_mutex); | 1466 | mutex_unlock(&d->domain->revmap_tree_mutex); |
1467 | } | 1467 | } |
1468 | 1468 | ||
1469 | /** | 1469 | /** |