aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/irqchip
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/irqchip')
-rw-r--r--drivers/irqchip/Kconfig23
-rw-r--r--drivers/irqchip/Makefile6
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.c254
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.h39
-rw-r--r--drivers/irqchip/irq-atmel-aic.c262
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c353
-rw-r--r--drivers/irqchip/irq-crossbar.c168
-rw-r--r--drivers/irqchip/irq-gic-common.c115
-rw-r--r--drivers/irqchip/irq-gic-common.h29
-rw-r--r--drivers/irqchip/irq-gic-v3.c692
-rw-r--r--drivers/irqchip/irq-gic.c68
-rw-r--r--drivers/irqchip/irq-nvic.c13
-rw-r--r--drivers/irqchip/irq-or1k-pic.c182
-rw-r--r--drivers/irqchip/spear-shirq.c304
14 files changed, 2248 insertions, 260 deletions
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index e7028681aa70..b8632bf9a7f3 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -10,6 +10,11 @@ config ARM_GIC
10config GIC_NON_BANKED 10config GIC_NON_BANKED
11 bool 11 bool
12 12
13config ARM_GIC_V3
14 bool
15 select IRQ_DOMAIN
16 select MULTI_IRQ_HANDLER
17
13config ARM_NVIC 18config ARM_NVIC
14 bool 19 bool
15 select IRQ_DOMAIN 20 select IRQ_DOMAIN
@@ -29,6 +34,20 @@ config ARM_VIC_NR
29 The maximum number of VICs available in the system, for 34 The maximum number of VICs available in the system, for
30 power management. 35 power management.
31 36
37config ATMEL_AIC_IRQ
38 bool
39 select GENERIC_IRQ_CHIP
40 select IRQ_DOMAIN
41 select MULTI_IRQ_HANDLER
42 select SPARSE_IRQ
43
44config ATMEL_AIC5_IRQ
45 bool
46 select GENERIC_IRQ_CHIP
47 select IRQ_DOMAIN
48 select MULTI_IRQ_HANDLER
49 select SPARSE_IRQ
50
32config BRCMSTB_L2_IRQ 51config BRCMSTB_L2_IRQ
33 bool 52 bool
34 depends on ARM 53 depends on ARM
@@ -52,6 +71,10 @@ config CLPS711X_IRQCHIP
52 select SPARSE_IRQ 71 select SPARSE_IRQ
53 default y 72 default y
54 73
74config OR1K_PIC
75 bool
76 select IRQ_DOMAIN
77
55config ORION_IRQCHIP 78config ORION_IRQCHIP
56 bool 79 bool
57 select IRQ_DOMAIN 80 select IRQ_DOMAIN
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 62a13e5ef98f..73052ba9ca62 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -11,13 +11,17 @@ obj-$(CONFIG_METAG) += irq-metag-ext.o
11obj-$(CONFIG_METAG_PERFCOUNTER_IRQS) += irq-metag.o 11obj-$(CONFIG_METAG_PERFCOUNTER_IRQS) += irq-metag.o
12obj-$(CONFIG_ARCH_MOXART) += irq-moxart.o 12obj-$(CONFIG_ARCH_MOXART) += irq-moxart.o
13obj-$(CONFIG_CLPS711X_IRQCHIP) += irq-clps711x.o 13obj-$(CONFIG_CLPS711X_IRQCHIP) += irq-clps711x.o
14obj-$(CONFIG_OR1K_PIC) += irq-or1k-pic.o
14obj-$(CONFIG_ORION_IRQCHIP) += irq-orion.o 15obj-$(CONFIG_ORION_IRQCHIP) += irq-orion.o
15obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o 16obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o
16obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o 17obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o
17obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o 18obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
18obj-$(CONFIG_ARM_GIC) += irq-gic.o 19obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o
20obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
19obj-$(CONFIG_ARM_NVIC) += irq-nvic.o 21obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
20obj-$(CONFIG_ARM_VIC) += irq-vic.o 22obj-$(CONFIG_ARM_VIC) += irq-vic.o
23obj-$(CONFIG_ATMEL_AIC_IRQ) += irq-atmel-aic-common.o irq-atmel-aic.o
24obj-$(CONFIG_ATMEL_AIC5_IRQ) += irq-atmel-aic-common.o irq-atmel-aic5.o
21obj-$(CONFIG_IMGPDC_IRQ) += irq-imgpdc.o 25obj-$(CONFIG_IMGPDC_IRQ) += irq-imgpdc.o
22obj-$(CONFIG_SIRF_IRQ) += irq-sirfsoc.o 26obj-$(CONFIG_SIRF_IRQ) += irq-sirfsoc.o
23obj-$(CONFIG_RENESAS_INTC_IRQPIN) += irq-renesas-intc-irqpin.o 27obj-$(CONFIG_RENESAS_INTC_IRQPIN) += irq-renesas-intc-irqpin.o
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
new file mode 100644
index 000000000000..6ae3cdee0681
--- /dev/null
+++ b/drivers/irqchip/irq-atmel-aic-common.c
@@ -0,0 +1,254 @@
1/*
2 * Atmel AT91 common AIC (Advanced Interrupt Controller) code shared by
3 * irq-atmel-aic and irq-atmel-aic5 drivers
4 *
5 * Copyright (C) 2004 SAN People
6 * Copyright (C) 2004 ATMEL
7 * Copyright (C) Rick Bronson
8 * Copyright (C) 2014 Free Electrons
9 *
10 * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
11 *
12 * This file is licensed under the terms of the GNU General Public
13 * License version 2. This program is licensed "as is" without any
14 * warranty of any kind, whether express or implied.
15 */
16
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/irq.h>
20#include <linux/irqdomain.h>
21#include <linux/of.h>
22#include <linux/of_address.h>
23#include <linux/slab.h>
24
25#include "irq-atmel-aic-common.h"
26
27#define AT91_AIC_PRIOR GENMASK(2, 0)
28#define AT91_AIC_IRQ_MIN_PRIORITY 0
29#define AT91_AIC_IRQ_MAX_PRIORITY 7
30
31#define AT91_AIC_SRCTYPE GENMASK(7, 6)
32#define AT91_AIC_SRCTYPE_LOW (0 << 5)
33#define AT91_AIC_SRCTYPE_FALLING (1 << 5)
34#define AT91_AIC_SRCTYPE_HIGH (2 << 5)
35#define AT91_AIC_SRCTYPE_RISING (3 << 5)
36
37struct aic_chip_data {
38 u32 ext_irqs;
39};
40
41static void aic_common_shutdown(struct irq_data *d)
42{
43 struct irq_chip_type *ct = irq_data_get_chip_type(d);
44
45 ct->chip.irq_mask(d);
46}
47
48int aic_common_set_type(struct irq_data *d, unsigned type, unsigned *val)
49{
50 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
51 struct aic_chip_data *aic = gc->private;
52 unsigned aic_type;
53
54 switch (type) {
55 case IRQ_TYPE_LEVEL_HIGH:
56 aic_type = AT91_AIC_SRCTYPE_HIGH;
57 break;
58 case IRQ_TYPE_EDGE_RISING:
59 aic_type = AT91_AIC_SRCTYPE_RISING;
60 break;
61 case IRQ_TYPE_LEVEL_LOW:
62 if (!(d->mask & aic->ext_irqs))
63 return -EINVAL;
64
65 aic_type = AT91_AIC_SRCTYPE_LOW;
66 break;
67 case IRQ_TYPE_EDGE_FALLING:
68 if (!(d->mask & aic->ext_irqs))
69 return -EINVAL;
70
71 aic_type = AT91_AIC_SRCTYPE_FALLING;
72 break;
73 default:
74 return -EINVAL;
75 }
76
77 *val &= AT91_AIC_SRCTYPE;
78 *val |= aic_type;
79
80 return 0;
81}
82
83int aic_common_set_priority(int priority, unsigned *val)
84{
85 if (priority < AT91_AIC_IRQ_MIN_PRIORITY ||
86 priority > AT91_AIC_IRQ_MAX_PRIORITY)
87 return -EINVAL;
88
89 *val &= AT91_AIC_PRIOR;
90 *val |= priority;
91
92 return 0;
93}
94
95int aic_common_irq_domain_xlate(struct irq_domain *d,
96 struct device_node *ctrlr,
97 const u32 *intspec,
98 unsigned int intsize,
99 irq_hw_number_t *out_hwirq,
100 unsigned int *out_type)
101{
102 if (WARN_ON(intsize < 3))
103 return -EINVAL;
104
105 if (WARN_ON((intspec[2] < AT91_AIC_IRQ_MIN_PRIORITY) ||
106 (intspec[2] > AT91_AIC_IRQ_MAX_PRIORITY)))
107 return -EINVAL;
108
109 *out_hwirq = intspec[0];
110 *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
111
112 return 0;
113}
114
115static void __init aic_common_ext_irq_of_init(struct irq_domain *domain)
116{
117 struct device_node *node = domain->of_node;
118 struct irq_chip_generic *gc;
119 struct aic_chip_data *aic;
120 struct property *prop;
121 const __be32 *p;
122 u32 hwirq;
123
124 gc = irq_get_domain_generic_chip(domain, 0);
125
126 aic = gc->private;
127 aic->ext_irqs |= 1;
128
129 of_property_for_each_u32(node, "atmel,external-irqs", prop, p, hwirq) {
130 gc = irq_get_domain_generic_chip(domain, hwirq);
131 if (!gc) {
132 pr_warn("AIC: external irq %d >= %d skip it\n",
133 hwirq, domain->revmap_size);
134 continue;
135 }
136
137 aic = gc->private;
138 aic->ext_irqs |= (1 << (hwirq % 32));
139 }
140}
141
142#define AT91_RTC_IDR 0x24
143#define AT91_RTC_IMR 0x28
144#define AT91_RTC_IRQ_MASK 0x1f
145
146void __init aic_common_rtc_irq_fixup(struct device_node *root)
147{
148 struct device_node *np;
149 void __iomem *regs;
150
151 np = of_find_compatible_node(root, NULL, "atmel,at91rm9200-rtc");
152 if (!np)
153 np = of_find_compatible_node(root, NULL,
154 "atmel,at91sam9x5-rtc");
155
156 if (!np)
157 return;
158
159 regs = of_iomap(np, 0);
160 of_node_put(np);
161
162 if (!regs)
163 return;
164
165 writel(AT91_RTC_IRQ_MASK, regs + AT91_RTC_IDR);
166
167 iounmap(regs);
168}
169
170void __init aic_common_irq_fixup(const struct of_device_id *matches)
171{
172 struct device_node *root = of_find_node_by_path("/");
173 const struct of_device_id *match;
174
175 if (!root)
176 return;
177
178 match = of_match_node(matches, root);
179 of_node_put(root);
180
181 if (match) {
182 void (*fixup)(struct device_node *) = match->data;
183 fixup(root);
184 }
185
186 of_node_put(root);
187}
188
189struct irq_domain *__init aic_common_of_init(struct device_node *node,
190 const struct irq_domain_ops *ops,
191 const char *name, int nirqs)
192{
193 struct irq_chip_generic *gc;
194 struct irq_domain *domain;
195 struct aic_chip_data *aic;
196 void __iomem *reg_base;
197 int nchips;
198 int ret;
199 int i;
200
201 nchips = DIV_ROUND_UP(nirqs, 32);
202
203 reg_base = of_iomap(node, 0);
204 if (!reg_base)
205 return ERR_PTR(-ENOMEM);
206
207 aic = kcalloc(nchips, sizeof(*aic), GFP_KERNEL);
208 if (!aic) {
209 ret = -ENOMEM;
210 goto err_iounmap;
211 }
212
213 domain = irq_domain_add_linear(node, nchips * 32, ops, aic);
214 if (!domain) {
215 ret = -ENOMEM;
216 goto err_free_aic;
217 }
218
219 ret = irq_alloc_domain_generic_chips(domain, 32, 1, name,
220 handle_level_irq, 0, 0,
221 IRQCHIP_SKIP_SET_WAKE);
222 if (ret)
223 goto err_domain_remove;
224
225 for (i = 0; i < nchips; i++) {
226 gc = irq_get_domain_generic_chip(domain, i * 32);
227
228 gc->reg_base = reg_base;
229
230 gc->unused = 0;
231 gc->wake_enabled = ~0;
232 gc->chip_types[0].type = IRQ_TYPE_SENSE_MASK;
233 gc->chip_types[0].handler = handle_fasteoi_irq;
234 gc->chip_types[0].chip.irq_eoi = irq_gc_eoi;
235 gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake;
236 gc->chip_types[0].chip.irq_shutdown = aic_common_shutdown;
237 gc->private = &aic[i];
238 }
239
240 aic_common_ext_irq_of_init(domain);
241
242 return domain;
243
244err_domain_remove:
245 irq_domain_remove(domain);
246
247err_free_aic:
248 kfree(aic);
249
250err_iounmap:
251 iounmap(reg_base);
252
253 return ERR_PTR(ret);
254}
diff --git a/drivers/irqchip/irq-atmel-aic-common.h b/drivers/irqchip/irq-atmel-aic-common.h
new file mode 100644
index 000000000000..90aa00e918d6
--- /dev/null
+++ b/drivers/irqchip/irq-atmel-aic-common.h
@@ -0,0 +1,39 @@
1/*
2 * Atmel AT91 common AIC (Advanced Interrupt Controller) header file
3 *
4 * Copyright (C) 2004 SAN People
5 * Copyright (C) 2004 ATMEL
6 * Copyright (C) Rick Bronson
7 * Copyright (C) 2014 Free Electrons
8 *
9 * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
10 *
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
14 */
15
16#ifndef __IRQ_ATMEL_AIC_COMMON_H
17#define __IRQ_ATMEL_AIC_COMMON_H
18
19
20int aic_common_set_type(struct irq_data *d, unsigned type, unsigned *val);
21
22int aic_common_set_priority(int priority, unsigned *val);
23
24int aic_common_irq_domain_xlate(struct irq_domain *d,
25 struct device_node *ctrlr,
26 const u32 *intspec,
27 unsigned int intsize,
28 irq_hw_number_t *out_hwirq,
29 unsigned int *out_type);
30
31struct irq_domain *__init aic_common_of_init(struct device_node *node,
32 const struct irq_domain_ops *ops,
33 const char *name, int nirqs);
34
35void __init aic_common_rtc_irq_fixup(struct device_node *root);
36
37void __init aic_common_irq_fixup(const struct of_device_id *matches);
38
39#endif /* __IRQ_ATMEL_AIC_COMMON_H */
diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c
new file mode 100644
index 000000000000..a82869e9fb26
--- /dev/null
+++ b/drivers/irqchip/irq-atmel-aic.c
@@ -0,0 +1,262 @@
1/*
2 * Atmel AT91 AIC (Advanced Interrupt Controller) driver
3 *
4 * Copyright (C) 2004 SAN People
5 * Copyright (C) 2004 ATMEL
6 * Copyright (C) Rick Bronson
7 * Copyright (C) 2014 Free Electrons
8 *
9 * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
10 *
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
14 */
15
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/mm.h>
19#include <linux/bitmap.h>
20#include <linux/types.h>
21#include <linux/irq.h>
22#include <linux/of.h>
23#include <linux/of_address.h>
24#include <linux/of_irq.h>
25#include <linux/irqdomain.h>
26#include <linux/err.h>
27#include <linux/slab.h>
28#include <linux/io.h>
29
30#include <asm/exception.h>
31#include <asm/mach/irq.h>
32
33#include "irq-atmel-aic-common.h"
34#include "irqchip.h"
35
36/* Number of irq lines managed by AIC */
37#define NR_AIC_IRQS 32
38
39#define AT91_AIC_SMR(n) ((n) * 4)
40
41#define AT91_AIC_SVR(n) (0x80 + ((n) * 4))
42#define AT91_AIC_IVR 0x100
43#define AT91_AIC_FVR 0x104
44#define AT91_AIC_ISR 0x108
45
46#define AT91_AIC_IPR 0x10c
47#define AT91_AIC_IMR 0x110
48#define AT91_AIC_CISR 0x114
49
50#define AT91_AIC_IECR 0x120
51#define AT91_AIC_IDCR 0x124
52#define AT91_AIC_ICCR 0x128
53#define AT91_AIC_ISCR 0x12c
54#define AT91_AIC_EOICR 0x130
55#define AT91_AIC_SPU 0x134
56#define AT91_AIC_DCR 0x138
57
58static struct irq_domain *aic_domain;
59
60static asmlinkage void __exception_irq_entry
61aic_handle(struct pt_regs *regs)
62{
63 struct irq_domain_chip_generic *dgc = aic_domain->gc;
64 struct irq_chip_generic *gc = dgc->gc[0];
65 u32 irqnr;
66 u32 irqstat;
67
68 irqnr = irq_reg_readl(gc->reg_base + AT91_AIC_IVR);
69 irqstat = irq_reg_readl(gc->reg_base + AT91_AIC_ISR);
70
71 irqnr = irq_find_mapping(aic_domain, irqnr);
72
73 if (!irqstat)
74 irq_reg_writel(0, gc->reg_base + AT91_AIC_EOICR);
75 else
76 handle_IRQ(irqnr, regs);
77}
78
79static int aic_retrigger(struct irq_data *d)
80{
81 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
82
83 /* Enable interrupt on AIC5 */
84 irq_gc_lock(gc);
85 irq_reg_writel(d->mask, gc->reg_base + AT91_AIC_ISCR);
86 irq_gc_unlock(gc);
87
88 return 0;
89}
90
91static int aic_set_type(struct irq_data *d, unsigned type)
92{
93 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
94 unsigned int smr;
95 int ret;
96
97 smr = irq_reg_readl(gc->reg_base + AT91_AIC_SMR(d->hwirq));
98 ret = aic_common_set_type(d, type, &smr);
99 if (ret)
100 return ret;
101
102 irq_reg_writel(smr, gc->reg_base + AT91_AIC_SMR(d->hwirq));
103
104 return 0;
105}
106
107#ifdef CONFIG_PM
108static void aic_suspend(struct irq_data *d)
109{
110 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
111
112 irq_gc_lock(gc);
113 irq_reg_writel(gc->mask_cache, gc->reg_base + AT91_AIC_IDCR);
114 irq_reg_writel(gc->wake_active, gc->reg_base + AT91_AIC_IECR);
115 irq_gc_unlock(gc);
116}
117
118static void aic_resume(struct irq_data *d)
119{
120 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
121
122 irq_gc_lock(gc);
123 irq_reg_writel(gc->wake_active, gc->reg_base + AT91_AIC_IDCR);
124 irq_reg_writel(gc->mask_cache, gc->reg_base + AT91_AIC_IECR);
125 irq_gc_unlock(gc);
126}
127
128static void aic_pm_shutdown(struct irq_data *d)
129{
130 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
131
132 irq_gc_lock(gc);
133 irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_IDCR);
134 irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_ICCR);
135 irq_gc_unlock(gc);
136}
137#else
138#define aic_suspend NULL
139#define aic_resume NULL
140#define aic_pm_shutdown NULL
141#endif /* CONFIG_PM */
142
143static void __init aic_hw_init(struct irq_domain *domain)
144{
145 struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0);
146 int i;
147
148 /*
149 * Perform 8 End Of Interrupt Command to make sure AIC
150 * will not Lock out nIRQ
151 */
152 for (i = 0; i < 8; i++)
153 irq_reg_writel(0, gc->reg_base + AT91_AIC_EOICR);
154
155 /*
156 * Spurious Interrupt ID in Spurious Vector Register.
157 * When there is no current interrupt, the IRQ Vector Register
158 * reads the value stored in AIC_SPU
159 */
160 irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_SPU);
161
162 /* No debugging in AIC: Debug (Protect) Control Register */
163 irq_reg_writel(0, gc->reg_base + AT91_AIC_DCR);
164
165 /* Disable and clear all interrupts initially */
166 irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_IDCR);
167 irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_ICCR);
168
169 for (i = 0; i < 32; i++)
170 irq_reg_writel(i, gc->reg_base + AT91_AIC_SVR(i));
171}
172
173static int aic_irq_domain_xlate(struct irq_domain *d,
174 struct device_node *ctrlr,
175 const u32 *intspec, unsigned int intsize,
176 irq_hw_number_t *out_hwirq,
177 unsigned int *out_type)
178{
179 struct irq_domain_chip_generic *dgc = d->gc;
180 struct irq_chip_generic *gc;
181 unsigned smr;
182 int idx;
183 int ret;
184
185 if (!dgc)
186 return -EINVAL;
187
188 ret = aic_common_irq_domain_xlate(d, ctrlr, intspec, intsize,
189 out_hwirq, out_type);
190 if (ret)
191 return ret;
192
193 idx = intspec[0] / dgc->irqs_per_chip;
194 if (idx >= dgc->num_chips)
195 return -EINVAL;
196
197 gc = dgc->gc[idx];
198
199 irq_gc_lock(gc);
200 smr = irq_reg_readl(gc->reg_base + AT91_AIC_SMR(*out_hwirq));
201 ret = aic_common_set_priority(intspec[2], &smr);
202 if (!ret)
203 irq_reg_writel(smr, gc->reg_base + AT91_AIC_SMR(*out_hwirq));
204 irq_gc_unlock(gc);
205
206 return ret;
207}
208
209static const struct irq_domain_ops aic_irq_ops = {
210 .map = irq_map_generic_chip,
211 .xlate = aic_irq_domain_xlate,
212};
213
214static void __init at91sam9_aic_irq_fixup(struct device_node *root)
215{
216 aic_common_rtc_irq_fixup(root);
217}
218
219static const struct of_device_id __initdata aic_irq_fixups[] = {
220 { .compatible = "atmel,at91sam9g45", .data = at91sam9_aic_irq_fixup },
221 { .compatible = "atmel,at91sam9n12", .data = at91sam9_aic_irq_fixup },
222 { .compatible = "atmel,at91sam9rl", .data = at91sam9_aic_irq_fixup },
223 { .compatible = "atmel,at91sam9x5", .data = at91sam9_aic_irq_fixup },
224 { /* sentinel */ },
225};
226
227static int __init aic_of_init(struct device_node *node,
228 struct device_node *parent)
229{
230 struct irq_chip_generic *gc;
231 struct irq_domain *domain;
232
233 if (aic_domain)
234 return -EEXIST;
235
236 domain = aic_common_of_init(node, &aic_irq_ops, "atmel-aic",
237 NR_AIC_IRQS);
238 if (IS_ERR(domain))
239 return PTR_ERR(domain);
240
241 aic_common_irq_fixup(aic_irq_fixups);
242
243 aic_domain = domain;
244 gc = irq_get_domain_generic_chip(domain, 0);
245
246 gc->chip_types[0].regs.eoi = AT91_AIC_EOICR;
247 gc->chip_types[0].regs.enable = AT91_AIC_IECR;
248 gc->chip_types[0].regs.disable = AT91_AIC_IDCR;
249 gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
250 gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
251 gc->chip_types[0].chip.irq_retrigger = aic_retrigger;
252 gc->chip_types[0].chip.irq_set_type = aic_set_type;
253 gc->chip_types[0].chip.irq_suspend = aic_suspend;
254 gc->chip_types[0].chip.irq_resume = aic_resume;
255 gc->chip_types[0].chip.irq_pm_shutdown = aic_pm_shutdown;
256
257 aic_hw_init(domain);
258 set_handle_irq(aic_handle);
259
260 return 0;
261}
262IRQCHIP_DECLARE(at91rm9200_aic, "atmel,at91rm9200-aic", aic_of_init);
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
new file mode 100644
index 000000000000..edb227081524
--- /dev/null
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -0,0 +1,353 @@
1/*
2 * Atmel AT91 AIC5 (Advanced Interrupt Controller) driver
3 *
4 * Copyright (C) 2004 SAN People
5 * Copyright (C) 2004 ATMEL
6 * Copyright (C) Rick Bronson
7 * Copyright (C) 2014 Free Electrons
8 *
9 * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
10 *
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
14 */
15
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/mm.h>
19#include <linux/bitmap.h>
20#include <linux/types.h>
21#include <linux/irq.h>
22#include <linux/of.h>
23#include <linux/of_address.h>
24#include <linux/of_irq.h>
25#include <linux/irqdomain.h>
26#include <linux/err.h>
27#include <linux/slab.h>
28#include <linux/io.h>
29
30#include <asm/exception.h>
31#include <asm/mach/irq.h>
32
33#include "irq-atmel-aic-common.h"
34#include "irqchip.h"
35
36/* Number of irq lines managed by AIC */
37#define NR_AIC5_IRQS 128
38
39#define AT91_AIC5_SSR 0x0
40#define AT91_AIC5_INTSEL_MSK (0x7f << 0)
41
42#define AT91_AIC5_SMR 0x4
43
44#define AT91_AIC5_SVR 0x8
45#define AT91_AIC5_IVR 0x10
46#define AT91_AIC5_FVR 0x14
47#define AT91_AIC5_ISR 0x18
48
49#define AT91_AIC5_IPR0 0x20
50#define AT91_AIC5_IPR1 0x24
51#define AT91_AIC5_IPR2 0x28
52#define AT91_AIC5_IPR3 0x2c
53#define AT91_AIC5_IMR 0x30
54#define AT91_AIC5_CISR 0x34
55
56#define AT91_AIC5_IECR 0x40
57#define AT91_AIC5_IDCR 0x44
58#define AT91_AIC5_ICCR 0x48
59#define AT91_AIC5_ISCR 0x4c
60#define AT91_AIC5_EOICR 0x38
61#define AT91_AIC5_SPU 0x3c
62#define AT91_AIC5_DCR 0x6c
63
64#define AT91_AIC5_FFER 0x50
65#define AT91_AIC5_FFDR 0x54
66#define AT91_AIC5_FFSR 0x58
67
68static struct irq_domain *aic5_domain;
69
70static asmlinkage void __exception_irq_entry
71aic5_handle(struct pt_regs *regs)
72{
73 struct irq_domain_chip_generic *dgc = aic5_domain->gc;
74 struct irq_chip_generic *gc = dgc->gc[0];
75 u32 irqnr;
76 u32 irqstat;
77
78 irqnr = irq_reg_readl(gc->reg_base + AT91_AIC5_IVR);
79 irqstat = irq_reg_readl(gc->reg_base + AT91_AIC5_ISR);
80
81 irqnr = irq_find_mapping(aic5_domain, irqnr);
82
83 if (!irqstat)
84 irq_reg_writel(0, gc->reg_base + AT91_AIC5_EOICR);
85 else
86 handle_IRQ(irqnr, regs);
87}
88
89static void aic5_mask(struct irq_data *d)
90{
91 struct irq_domain *domain = d->domain;
92 struct irq_domain_chip_generic *dgc = domain->gc;
93 struct irq_chip_generic *gc = dgc->gc[0];
94
95 /* Disable interrupt on AIC5 */
96 irq_gc_lock(gc);
97 irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR);
98 irq_reg_writel(1, gc->reg_base + AT91_AIC5_IDCR);
99 gc->mask_cache &= ~d->mask;
100 irq_gc_unlock(gc);
101}
102
103static void aic5_unmask(struct irq_data *d)
104{
105 struct irq_domain *domain = d->domain;
106 struct irq_domain_chip_generic *dgc = domain->gc;
107 struct irq_chip_generic *gc = dgc->gc[0];
108
109 /* Enable interrupt on AIC5 */
110 irq_gc_lock(gc);
111 irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR);
112 irq_reg_writel(1, gc->reg_base + AT91_AIC5_IECR);
113 gc->mask_cache |= d->mask;
114 irq_gc_unlock(gc);
115}
116
117static int aic5_retrigger(struct irq_data *d)
118{
119 struct irq_domain *domain = d->domain;
120 struct irq_domain_chip_generic *dgc = domain->gc;
121 struct irq_chip_generic *gc = dgc->gc[0];
122
123 /* Enable interrupt on AIC5 */
124 irq_gc_lock(gc);
125 irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR);
126 irq_reg_writel(1, gc->reg_base + AT91_AIC5_ISCR);
127 irq_gc_unlock(gc);
128
129 return 0;
130}
131
132static int aic5_set_type(struct irq_data *d, unsigned type)
133{
134 struct irq_domain *domain = d->domain;
135 struct irq_domain_chip_generic *dgc = domain->gc;
136 struct irq_chip_generic *gc = dgc->gc[0];
137 unsigned int smr;
138 int ret;
139
140 irq_gc_lock(gc);
141 irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR);
142 smr = irq_reg_readl(gc->reg_base + AT91_AIC5_SMR);
143 ret = aic_common_set_type(d, type, &smr);
144 if (!ret)
145 irq_reg_writel(smr, gc->reg_base + AT91_AIC5_SMR);
146 irq_gc_unlock(gc);
147
148 return ret;
149}
150
151#ifdef CONFIG_PM
152static void aic5_suspend(struct irq_data *d)
153{
154 struct irq_domain *domain = d->domain;
155 struct irq_domain_chip_generic *dgc = domain->gc;
156 struct irq_chip_generic *bgc = dgc->gc[0];
157 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
158 int i;
159 u32 mask;
160
161 irq_gc_lock(bgc);
162 for (i = 0; i < dgc->irqs_per_chip; i++) {
163 mask = 1 << i;
164 if ((mask & gc->mask_cache) == (mask & gc->wake_active))
165 continue;
166
167 irq_reg_writel(i + gc->irq_base,
168 bgc->reg_base + AT91_AIC5_SSR);
169 if (mask & gc->wake_active)
170 irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IECR);
171 else
172 irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IDCR);
173 }
174 irq_gc_unlock(bgc);
175}
176
177static void aic5_resume(struct irq_data *d)
178{
179 struct irq_domain *domain = d->domain;
180 struct irq_domain_chip_generic *dgc = domain->gc;
181 struct irq_chip_generic *bgc = dgc->gc[0];
182 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
183 int i;
184 u32 mask;
185
186 irq_gc_lock(bgc);
187 for (i = 0; i < dgc->irqs_per_chip; i++) {
188 mask = 1 << i;
189 if ((mask & gc->mask_cache) == (mask & gc->wake_active))
190 continue;
191
192 irq_reg_writel(i + gc->irq_base,
193 bgc->reg_base + AT91_AIC5_SSR);
194 if (mask & gc->mask_cache)
195 irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IECR);
196 else
197 irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IDCR);
198 }
199 irq_gc_unlock(bgc);
200}
201
202static void aic5_pm_shutdown(struct irq_data *d)
203{
204 struct irq_domain *domain = d->domain;
205 struct irq_domain_chip_generic *dgc = domain->gc;
206 struct irq_chip_generic *bgc = dgc->gc[0];
207 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
208 int i;
209
210 irq_gc_lock(bgc);
211 for (i = 0; i < dgc->irqs_per_chip; i++) {
212 irq_reg_writel(i + gc->irq_base,
213 bgc->reg_base + AT91_AIC5_SSR);
214 irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IDCR);
215 irq_reg_writel(1, bgc->reg_base + AT91_AIC5_ICCR);
216 }
217 irq_gc_unlock(bgc);
218}
219#else
220#define aic5_suspend NULL
221#define aic5_resume NULL
222#define aic5_pm_shutdown NULL
223#endif /* CONFIG_PM */
224
225static void __init aic5_hw_init(struct irq_domain *domain)
226{
227 struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0);
228 int i;
229
230 /*
231 * Perform 8 End Of Interrupt Command to make sure AIC
232 * will not Lock out nIRQ
233 */
234 for (i = 0; i < 8; i++)
235 irq_reg_writel(0, gc->reg_base + AT91_AIC5_EOICR);
236
237 /*
238 * Spurious Interrupt ID in Spurious Vector Register.
239 * When there is no current interrupt, the IRQ Vector Register
240 * reads the value stored in AIC_SPU
241 */
242 irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC5_SPU);
243
244 /* No debugging in AIC: Debug (Protect) Control Register */
245 irq_reg_writel(0, gc->reg_base + AT91_AIC5_DCR);
246
247 /* Disable and clear all interrupts initially */
248 for (i = 0; i < domain->revmap_size; i++) {
249 irq_reg_writel(i, gc->reg_base + AT91_AIC5_SSR);
250 irq_reg_writel(i, gc->reg_base + AT91_AIC5_SVR);
251 irq_reg_writel(1, gc->reg_base + AT91_AIC5_IDCR);
252 irq_reg_writel(1, gc->reg_base + AT91_AIC5_ICCR);
253 }
254}
255
256static int aic5_irq_domain_xlate(struct irq_domain *d,
257 struct device_node *ctrlr,
258 const u32 *intspec, unsigned int intsize,
259 irq_hw_number_t *out_hwirq,
260 unsigned int *out_type)
261{
262 struct irq_domain_chip_generic *dgc = d->gc;
263 struct irq_chip_generic *gc;
264 unsigned smr;
265 int ret;
266
267 if (!dgc)
268 return -EINVAL;
269
270 ret = aic_common_irq_domain_xlate(d, ctrlr, intspec, intsize,
271 out_hwirq, out_type);
272 if (ret)
273 return ret;
274
275 gc = dgc->gc[0];
276
277 irq_gc_lock(gc);
278 irq_reg_writel(*out_hwirq, gc->reg_base + AT91_AIC5_SSR);
279 smr = irq_reg_readl(gc->reg_base + AT91_AIC5_SMR);
280 ret = aic_common_set_priority(intspec[2], &smr);
281 if (!ret)
282 irq_reg_writel(intspec[2] | smr, gc->reg_base + AT91_AIC5_SMR);
283 irq_gc_unlock(gc);
284
285 return ret;
286}
287
288static const struct irq_domain_ops aic5_irq_ops = {
289 .map = irq_map_generic_chip,
290 .xlate = aic5_irq_domain_xlate,
291};
292
293static void __init sama5d3_aic_irq_fixup(struct device_node *root)
294{
295 aic_common_rtc_irq_fixup(root);
296}
297
298static const struct of_device_id __initdata aic5_irq_fixups[] = {
299 { .compatible = "atmel,sama5d3", .data = sama5d3_aic_irq_fixup },
300 { /* sentinel */ },
301};
302
303static int __init aic5_of_init(struct device_node *node,
304 struct device_node *parent,
305 int nirqs)
306{
307 struct irq_chip_generic *gc;
308 struct irq_domain *domain;
309 int nchips;
310 int i;
311
312 if (nirqs > NR_AIC5_IRQS)
313 return -EINVAL;
314
315 if (aic5_domain)
316 return -EEXIST;
317
318 domain = aic_common_of_init(node, &aic5_irq_ops, "atmel-aic5",
319 nirqs);
320 if (IS_ERR(domain))
321 return PTR_ERR(domain);
322
323 aic_common_irq_fixup(aic5_irq_fixups);
324
325 aic5_domain = domain;
326 nchips = aic5_domain->revmap_size / 32;
327 for (i = 0; i < nchips; i++) {
328 gc = irq_get_domain_generic_chip(domain, i * 32);
329
330 gc->chip_types[0].regs.eoi = AT91_AIC5_EOICR;
331 gc->chip_types[0].chip.irq_mask = aic5_mask;
332 gc->chip_types[0].chip.irq_unmask = aic5_unmask;
333 gc->chip_types[0].chip.irq_retrigger = aic5_retrigger;
334 gc->chip_types[0].chip.irq_set_type = aic5_set_type;
335 gc->chip_types[0].chip.irq_suspend = aic5_suspend;
336 gc->chip_types[0].chip.irq_resume = aic5_resume;
337 gc->chip_types[0].chip.irq_pm_shutdown = aic5_pm_shutdown;
338 }
339
340 aic5_hw_init(domain);
341 set_handle_irq(aic5_handle);
342
343 return 0;
344}
345
346#define NR_SAMA5D3_IRQS 50
347
348static int __init sama5d3_aic5_of_init(struct device_node *node,
349 struct device_node *parent)
350{
351 return aic5_of_init(node, parent, NR_SAMA5D3_IRQS);
352}
353IRQCHIP_DECLARE(sama5d3_aic5, "atmel,sama5d3-aic", sama5d3_aic5_of_init);
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index 3d15d16a7088..85c2985d8bcb 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -15,22 +15,31 @@
15#include <linux/of_irq.h> 15#include <linux/of_irq.h>
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/irqchip/arm-gic.h> 17#include <linux/irqchip/arm-gic.h>
18#include <linux/irqchip/irq-crossbar.h>
18 19
19#define IRQ_FREE -1 20#define IRQ_FREE -1
21#define IRQ_RESERVED -2
22#define IRQ_SKIP -3
20#define GIC_IRQ_START 32 23#define GIC_IRQ_START 32
21 24
22/* 25/**
26 * struct crossbar_device - crossbar device description
23 * @int_max: maximum number of supported interrupts 27 * @int_max: maximum number of supported interrupts
28 * @safe_map: safe default value to initialize the crossbar
29 * @max_crossbar_sources: Maximum number of crossbar sources
24 * @irq_map: array of interrupts to crossbar number mapping 30 * @irq_map: array of interrupts to crossbar number mapping
25 * @crossbar_base: crossbar base address 31 * @crossbar_base: crossbar base address
26 * @register_offsets: offsets for each irq number 32 * @register_offsets: offsets for each irq number
33 * @write: register write function pointer
27 */ 34 */
28struct crossbar_device { 35struct crossbar_device {
29 uint int_max; 36 uint int_max;
37 uint safe_map;
38 uint max_crossbar_sources;
30 uint *irq_map; 39 uint *irq_map;
31 void __iomem *crossbar_base; 40 void __iomem *crossbar_base;
32 int *register_offsets; 41 int *register_offsets;
33 void (*write) (int, int); 42 void (*write)(int, int);
34}; 43};
35 44
36static struct crossbar_device *cb; 45static struct crossbar_device *cb;
@@ -50,11 +59,22 @@ static inline void crossbar_writeb(int irq_no, int cb_no)
50 writeb(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]); 59 writeb(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]);
51} 60}
52 61
62static inline int get_prev_map_irq(int cb_no)
63{
64 int i;
65
66 for (i = cb->int_max - 1; i >= 0; i--)
67 if (cb->irq_map[i] == cb_no)
68 return i;
69
70 return -ENODEV;
71}
72
53static inline int allocate_free_irq(int cb_no) 73static inline int allocate_free_irq(int cb_no)
54{ 74{
55 int i; 75 int i;
56 76
57 for (i = 0; i < cb->int_max; i++) { 77 for (i = cb->int_max - 1; i >= 0; i--) {
58 if (cb->irq_map[i] == IRQ_FREE) { 78 if (cb->irq_map[i] == IRQ_FREE) {
59 cb->irq_map[i] = cb_no; 79 cb->irq_map[i] = cb_no;
60 return i; 80 return i;
@@ -64,19 +84,47 @@ static inline int allocate_free_irq(int cb_no)
64 return -ENODEV; 84 return -ENODEV;
65} 85}
66 86
87static inline bool needs_crossbar_write(irq_hw_number_t hw)
88{
89 int cb_no;
90
91 if (hw > GIC_IRQ_START) {
92 cb_no = cb->irq_map[hw - GIC_IRQ_START];
93 if (cb_no != IRQ_RESERVED && cb_no != IRQ_SKIP)
94 return true;
95 }
96
97 return false;
98}
99
67static int crossbar_domain_map(struct irq_domain *d, unsigned int irq, 100static int crossbar_domain_map(struct irq_domain *d, unsigned int irq,
68 irq_hw_number_t hw) 101 irq_hw_number_t hw)
69{ 102{
70 cb->write(hw - GIC_IRQ_START, cb->irq_map[hw - GIC_IRQ_START]); 103 if (needs_crossbar_write(hw))
104 cb->write(hw - GIC_IRQ_START, cb->irq_map[hw - GIC_IRQ_START]);
105
71 return 0; 106 return 0;
72} 107}
73 108
109/**
110 * crossbar_domain_unmap - unmap a crossbar<->irq connection
111 * @d: domain of irq to unmap
112 * @irq: virq number
113 *
114 * We do not maintain a use count of total number of map/unmap
115 * calls for a particular irq to find out if a irq can be really
116 * unmapped. This is because unmap is called during irq_dispose_mapping(irq),
117 * after which irq is anyways unusable. So an explicit map has to be called
118 * after that.
119 */
74static void crossbar_domain_unmap(struct irq_domain *d, unsigned int irq) 120static void crossbar_domain_unmap(struct irq_domain *d, unsigned int irq)
75{ 121{
76 irq_hw_number_t hw = irq_get_irq_data(irq)->hwirq; 122 irq_hw_number_t hw = irq_get_irq_data(irq)->hwirq;
77 123
78 if (hw > GIC_IRQ_START) 124 if (needs_crossbar_write(hw)) {
79 cb->irq_map[hw - GIC_IRQ_START] = IRQ_FREE; 125 cb->irq_map[hw - GIC_IRQ_START] = IRQ_FREE;
126 cb->write(hw - GIC_IRQ_START, cb->safe_map);
127 }
80} 128}
81 129
82static int crossbar_domain_xlate(struct irq_domain *d, 130static int crossbar_domain_xlate(struct irq_domain *d,
@@ -85,18 +133,41 @@ static int crossbar_domain_xlate(struct irq_domain *d,
85 unsigned long *out_hwirq, 133 unsigned long *out_hwirq,
86 unsigned int *out_type) 134 unsigned int *out_type)
87{ 135{
88 unsigned long ret; 136 int ret;
137 int req_num = intspec[1];
138 int direct_map_num;
139
140 if (req_num >= cb->max_crossbar_sources) {
141 direct_map_num = req_num - cb->max_crossbar_sources;
142 if (direct_map_num < cb->int_max) {
143 ret = cb->irq_map[direct_map_num];
144 if (ret == IRQ_RESERVED || ret == IRQ_SKIP) {
145 /* We use the interrupt num as h/w irq num */
146 ret = direct_map_num;
147 goto found;
148 }
149 }
150
151 pr_err("%s: requested crossbar number %d > max %d\n",
152 __func__, req_num, cb->max_crossbar_sources);
153 return -EINVAL;
154 }
89 155
90 ret = allocate_free_irq(intspec[1]); 156 ret = get_prev_map_irq(req_num);
157 if (ret >= 0)
158 goto found;
91 159
92 if (IS_ERR_VALUE(ret)) 160 ret = allocate_free_irq(req_num);
161
162 if (ret < 0)
93 return ret; 163 return ret;
94 164
165found:
95 *out_hwirq = ret + GIC_IRQ_START; 166 *out_hwirq = ret + GIC_IRQ_START;
96 return 0; 167 return 0;
97} 168}
98 169
99const struct irq_domain_ops routable_irq_domain_ops = { 170static const struct irq_domain_ops routable_irq_domain_ops = {
100 .map = crossbar_domain_map, 171 .map = crossbar_domain_map,
101 .unmap = crossbar_domain_unmap, 172 .unmap = crossbar_domain_unmap,
102 .xlate = crossbar_domain_xlate 173 .xlate = crossbar_domain_xlate
@@ -104,22 +175,36 @@ const struct irq_domain_ops routable_irq_domain_ops = {
104 175
105static int __init crossbar_of_init(struct device_node *node) 176static int __init crossbar_of_init(struct device_node *node)
106{ 177{
107 int i, size, max, reserved = 0, entry; 178 int i, size, max = 0, reserved = 0, entry;
108 const __be32 *irqsr; 179 const __be32 *irqsr;
180 int ret = -ENOMEM;
109 181
110 cb = kzalloc(sizeof(*cb), GFP_KERNEL); 182 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
111 183
112 if (!cb) 184 if (!cb)
113 return -ENOMEM; 185 return ret;
114 186
115 cb->crossbar_base = of_iomap(node, 0); 187 cb->crossbar_base = of_iomap(node, 0);
116 if (!cb->crossbar_base) 188 if (!cb->crossbar_base)
117 goto err1; 189 goto err_cb;
190
191 of_property_read_u32(node, "ti,max-crossbar-sources",
192 &cb->max_crossbar_sources);
193 if (!cb->max_crossbar_sources) {
194 pr_err("missing 'ti,max-crossbar-sources' property\n");
195 ret = -EINVAL;
196 goto err_base;
197 }
118 198
119 of_property_read_u32(node, "ti,max-irqs", &max); 199 of_property_read_u32(node, "ti,max-irqs", &max);
120 cb->irq_map = kzalloc(max * sizeof(int), GFP_KERNEL); 200 if (!max) {
201 pr_err("missing 'ti,max-irqs' property\n");
202 ret = -EINVAL;
203 goto err_base;
204 }
205 cb->irq_map = kcalloc(max, sizeof(int), GFP_KERNEL);
121 if (!cb->irq_map) 206 if (!cb->irq_map)
122 goto err2; 207 goto err_base;
123 208
124 cb->int_max = max; 209 cb->int_max = max;
125 210
@@ -137,15 +222,35 @@ static int __init crossbar_of_init(struct device_node *node)
137 i, &entry); 222 i, &entry);
138 if (entry > max) { 223 if (entry > max) {
139 pr_err("Invalid reserved entry\n"); 224 pr_err("Invalid reserved entry\n");
140 goto err3; 225 ret = -EINVAL;
226 goto err_irq_map;
227 }
228 cb->irq_map[entry] = IRQ_RESERVED;
229 }
230 }
231
232 /* Skip irqs hardwired to bypass the crossbar */
233 irqsr = of_get_property(node, "ti,irqs-skip", &size);
234 if (irqsr) {
235 size /= sizeof(__be32);
236
237 for (i = 0; i < size; i++) {
238 of_property_read_u32_index(node,
239 "ti,irqs-skip",
240 i, &entry);
241 if (entry > max) {
242 pr_err("Invalid skip entry\n");
243 ret = -EINVAL;
244 goto err_irq_map;
141 } 245 }
142 cb->irq_map[entry] = 0; 246 cb->irq_map[entry] = IRQ_SKIP;
143 } 247 }
144 } 248 }
145 249
146 cb->register_offsets = kzalloc(max * sizeof(int), GFP_KERNEL); 250
251 cb->register_offsets = kcalloc(max, sizeof(int), GFP_KERNEL);
147 if (!cb->register_offsets) 252 if (!cb->register_offsets)
148 goto err3; 253 goto err_irq_map;
149 254
150 of_property_read_u32(node, "ti,reg-size", &size); 255 of_property_read_u32(node, "ti,reg-size", &size);
151 256
@@ -161,7 +266,8 @@ static int __init crossbar_of_init(struct device_node *node)
161 break; 266 break;
162 default: 267 default:
163 pr_err("Invalid reg-size property\n"); 268 pr_err("Invalid reg-size property\n");
164 goto err4; 269 ret = -EINVAL;
270 goto err_reg_offset;
165 break; 271 break;
166 } 272 }
167 273
@@ -170,25 +276,37 @@ static int __init crossbar_of_init(struct device_node *node)
170 * reserved irqs. so find and store the offsets once. 276 * reserved irqs. so find and store the offsets once.
171 */ 277 */
172 for (i = 0; i < max; i++) { 278 for (i = 0; i < max; i++) {
173 if (!cb->irq_map[i]) 279 if (cb->irq_map[i] == IRQ_RESERVED)
174 continue; 280 continue;
175 281
176 cb->register_offsets[i] = reserved; 282 cb->register_offsets[i] = reserved;
177 reserved += size; 283 reserved += size;
178 } 284 }
179 285
286 of_property_read_u32(node, "ti,irqs-safe-map", &cb->safe_map);
287 /* Initialize the crossbar with safe map to start with */
288 for (i = 0; i < max; i++) {
289 if (cb->irq_map[i] == IRQ_RESERVED ||
290 cb->irq_map[i] == IRQ_SKIP)
291 continue;
292
293 cb->write(i, cb->safe_map);
294 }
295
180 register_routable_domain_ops(&routable_irq_domain_ops); 296 register_routable_domain_ops(&routable_irq_domain_ops);
181 return 0; 297 return 0;
182 298
183err4: 299err_reg_offset:
184 kfree(cb->register_offsets); 300 kfree(cb->register_offsets);
185err3: 301err_irq_map:
186 kfree(cb->irq_map); 302 kfree(cb->irq_map);
187err2: 303err_base:
188 iounmap(cb->crossbar_base); 304 iounmap(cb->crossbar_base);
189err1: 305err_cb:
190 kfree(cb); 306 kfree(cb);
191 return -ENOMEM; 307
308 cb = NULL;
309 return ret;
192} 310}
193 311
194static const struct of_device_id crossbar_match[] __initconst = { 312static const struct of_device_id crossbar_match[] __initconst = {
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
new file mode 100644
index 000000000000..60ac704d2090
--- /dev/null
+++ b/drivers/irqchip/irq-gic-common.c
@@ -0,0 +1,115 @@
1/*
2 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/interrupt.h>
18#include <linux/io.h>
19#include <linux/irq.h>
20#include <linux/irqchip/arm-gic.h>
21
22#include "irq-gic-common.h"
23
24void gic_configure_irq(unsigned int irq, unsigned int type,
25 void __iomem *base, void (*sync_access)(void))
26{
27 u32 enablemask = 1 << (irq % 32);
28 u32 enableoff = (irq / 32) * 4;
29 u32 confmask = 0x2 << ((irq % 16) * 2);
30 u32 confoff = (irq / 16) * 4;
31 bool enabled = false;
32 u32 val;
33
34 /*
35 * Read current configuration register, and insert the config
36 * for "irq", depending on "type".
37 */
38 val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
39 if (type == IRQ_TYPE_LEVEL_HIGH)
40 val &= ~confmask;
41 else if (type == IRQ_TYPE_EDGE_RISING)
42 val |= confmask;
43
44 /*
45 * As recommended by the spec, disable the interrupt before changing
46 * the configuration
47 */
48 if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
49 writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
50 if (sync_access)
51 sync_access();
52 enabled = true;
53 }
54
55 /*
56 * Write back the new configuration, and possibly re-enable
57 * the interrupt.
58 */
59 writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
60
61 if (enabled)
62 writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
63
64 if (sync_access)
65 sync_access();
66}
67
68void __init gic_dist_config(void __iomem *base, int gic_irqs,
69 void (*sync_access)(void))
70{
71 unsigned int i;
72
73 /*
74 * Set all global interrupts to be level triggered, active low.
75 */
76 for (i = 32; i < gic_irqs; i += 16)
77 writel_relaxed(0, base + GIC_DIST_CONFIG + i / 4);
78
79 /*
80 * Set priority on all global interrupts.
81 */
82 for (i = 32; i < gic_irqs; i += 4)
83 writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i);
84
85 /*
86 * Disable all interrupts. Leave the PPI and SGIs alone
87 * as they are enabled by redistributor registers.
88 */
89 for (i = 32; i < gic_irqs; i += 32)
90 writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i / 8);
91
92 if (sync_access)
93 sync_access();
94}
95
96void gic_cpu_config(void __iomem *base, void (*sync_access)(void))
97{
98 int i;
99
100 /*
101 * Deal with the banked PPI and SGI interrupts - disable all
102 * PPI interrupts, ensure all SGI interrupts are enabled.
103 */
104 writel_relaxed(0xffff0000, base + GIC_DIST_ENABLE_CLEAR);
105 writel_relaxed(0x0000ffff, base + GIC_DIST_ENABLE_SET);
106
107 /*
108 * Set priority on PPI and SGI interrupts
109 */
110 for (i = 0; i < 32; i += 4)
111 writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
112
113 if (sync_access)
114 sync_access();
115}
diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h
new file mode 100644
index 000000000000..b41f02481c3a
--- /dev/null
+++ b/drivers/irqchip/irq-gic-common.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef _IRQ_GIC_COMMON_H
18#define _IRQ_GIC_COMMON_H
19
20#include <linux/of.h>
21#include <linux/irqdomain.h>
22
23void gic_configure_irq(unsigned int irq, unsigned int type,
24 void __iomem *base, void (*sync_access)(void));
25void gic_dist_config(void __iomem *base, int gic_irqs,
26 void (*sync_access)(void));
27void gic_cpu_config(void __iomem *base, void (*sync_access)(void));
28
29#endif /* _IRQ_GIC_COMMON_H */
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
new file mode 100644
index 000000000000..57eaa5a0b1e3
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -0,0 +1,692 @@
1/*
2 * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/cpu.h>
19#include <linux/delay.h>
20#include <linux/interrupt.h>
21#include <linux/of.h>
22#include <linux/of_address.h>
23#include <linux/of_irq.h>
24#include <linux/percpu.h>
25#include <linux/slab.h>
26
27#include <linux/irqchip/arm-gic-v3.h>
28
29#include <asm/cputype.h>
30#include <asm/exception.h>
31#include <asm/smp_plat.h>
32
33#include "irq-gic-common.h"
34#include "irqchip.h"
35
36struct gic_chip_data {
37 void __iomem *dist_base;
38 void __iomem **redist_base;
39 void __percpu __iomem **rdist;
40 struct irq_domain *domain;
41 u64 redist_stride;
42 u32 redist_regions;
43 unsigned int irq_nr;
44};
45
46static struct gic_chip_data gic_data __read_mostly;
47
48#define gic_data_rdist() (this_cpu_ptr(gic_data.rdist))
49#define gic_data_rdist_rd_base() (*gic_data_rdist())
50#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
51
52/* Our default, arbitrary priority value. Linux only uses one anyway. */
53#define DEFAULT_PMR_VALUE 0xf0
54
55static inline unsigned int gic_irq(struct irq_data *d)
56{
57 return d->hwirq;
58}
59
60static inline int gic_irq_in_rdist(struct irq_data *d)
61{
62 return gic_irq(d) < 32;
63}
64
65static inline void __iomem *gic_dist_base(struct irq_data *d)
66{
67 if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */
68 return gic_data_rdist_sgi_base();
69
70 if (d->hwirq <= 1023) /* SPI -> dist_base */
71 return gic_data.dist_base;
72
73 if (d->hwirq >= 8192)
74 BUG(); /* LPI Detected!!! */
75
76 return NULL;
77}
78
79static void gic_do_wait_for_rwp(void __iomem *base)
80{
81 u32 count = 1000000; /* 1s! */
82
83 while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
84 count--;
85 if (!count) {
86 pr_err_ratelimited("RWP timeout, gone fishing\n");
87 return;
88 }
89 cpu_relax();
90 udelay(1);
91 };
92}
93
94/* Wait for completion of a distributor change */
95static void gic_dist_wait_for_rwp(void)
96{
97 gic_do_wait_for_rwp(gic_data.dist_base);
98}
99
100/* Wait for completion of a redistributor change */
101static void gic_redist_wait_for_rwp(void)
102{
103 gic_do_wait_for_rwp(gic_data_rdist_rd_base());
104}
105
106/* Low level accessors */
107static u64 gic_read_iar(void)
108{
109 u64 irqstat;
110
111 asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
112 return irqstat;
113}
114
115static void gic_write_pmr(u64 val)
116{
117 asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val));
118}
119
120static void gic_write_ctlr(u64 val)
121{
122 asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val));
123 isb();
124}
125
126static void gic_write_grpen1(u64 val)
127{
128 asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val));
129 isb();
130}
131
132static void gic_write_sgi1r(u64 val)
133{
134 asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
135}
136
137static void gic_enable_sre(void)
138{
139 u64 val;
140
141 asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
142 val |= ICC_SRE_EL1_SRE;
143 asm volatile("msr_s " __stringify(ICC_SRE_EL1) ", %0" : : "r" (val));
144 isb();
145
146 /*
147 * Need to check that the SRE bit has actually been set. If
148 * not, it means that SRE is disabled at EL2. We're going to
149 * die painfully, and there is nothing we can do about it.
150 *
151 * Kindly inform the luser.
152 */
153 asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
154 if (!(val & ICC_SRE_EL1_SRE))
155 pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
156}
157
158static void gic_enable_redist(void)
159{
160 void __iomem *rbase;
161 u32 count = 1000000; /* 1s! */
162 u32 val;
163
164 rbase = gic_data_rdist_rd_base();
165
166 /* Wake up this CPU redistributor */
167 val = readl_relaxed(rbase + GICR_WAKER);
168 val &= ~GICR_WAKER_ProcessorSleep;
169 writel_relaxed(val, rbase + GICR_WAKER);
170
171 while (readl_relaxed(rbase + GICR_WAKER) & GICR_WAKER_ChildrenAsleep) {
172 count--;
173 if (!count) {
174 pr_err_ratelimited("redist didn't wake up...\n");
175 return;
176 }
177 cpu_relax();
178 udelay(1);
179 };
180}
181
182/*
183 * Routines to disable, enable, EOI and route interrupts
184 */
185static void gic_poke_irq(struct irq_data *d, u32 offset)
186{
187 u32 mask = 1 << (gic_irq(d) % 32);
188 void (*rwp_wait)(void);
189 void __iomem *base;
190
191 if (gic_irq_in_rdist(d)) {
192 base = gic_data_rdist_sgi_base();
193 rwp_wait = gic_redist_wait_for_rwp;
194 } else {
195 base = gic_data.dist_base;
196 rwp_wait = gic_dist_wait_for_rwp;
197 }
198
199 writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4);
200 rwp_wait();
201}
202
203static int gic_peek_irq(struct irq_data *d, u32 offset)
204{
205 u32 mask = 1 << (gic_irq(d) % 32);
206 void __iomem *base;
207
208 if (gic_irq_in_rdist(d))
209 base = gic_data_rdist_sgi_base();
210 else
211 base = gic_data.dist_base;
212
213 return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
214}
215
216static void gic_mask_irq(struct irq_data *d)
217{
218 gic_poke_irq(d, GICD_ICENABLER);
219}
220
221static void gic_unmask_irq(struct irq_data *d)
222{
223 gic_poke_irq(d, GICD_ISENABLER);
224}
225
226static void gic_eoi_irq(struct irq_data *d)
227{
228 gic_write_eoir(gic_irq(d));
229}
230
231static int gic_set_type(struct irq_data *d, unsigned int type)
232{
233 unsigned int irq = gic_irq(d);
234 void (*rwp_wait)(void);
235 void __iomem *base;
236
237 /* Interrupt configuration for SGIs can't be changed */
238 if (irq < 16)
239 return -EINVAL;
240
241 if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
242 return -EINVAL;
243
244 if (gic_irq_in_rdist(d)) {
245 base = gic_data_rdist_sgi_base();
246 rwp_wait = gic_redist_wait_for_rwp;
247 } else {
248 base = gic_data.dist_base;
249 rwp_wait = gic_dist_wait_for_rwp;
250 }
251
252 gic_configure_irq(irq, type, base, rwp_wait);
253
254 return 0;
255}
256
257static u64 gic_mpidr_to_affinity(u64 mpidr)
258{
259 u64 aff;
260
261 aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
262 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
263 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
264 MPIDR_AFFINITY_LEVEL(mpidr, 0));
265
266 return aff;
267}
268
269static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
270{
271 u64 irqnr;
272
273 do {
274 irqnr = gic_read_iar();
275
276 if (likely(irqnr > 15 && irqnr < 1020)) {
277 u64 irq = irq_find_mapping(gic_data.domain, irqnr);
278 if (likely(irq)) {
279 handle_IRQ(irq, regs);
280 continue;
281 }
282
283 WARN_ONCE(true, "Unexpected SPI received!\n");
284 gic_write_eoir(irqnr);
285 }
286 if (irqnr < 16) {
287 gic_write_eoir(irqnr);
288#ifdef CONFIG_SMP
289 handle_IPI(irqnr, regs);
290#else
291 WARN_ONCE(true, "Unexpected SGI received!\n");
292#endif
293 continue;
294 }
295 } while (irqnr != ICC_IAR1_EL1_SPURIOUS);
296}
297
298static void __init gic_dist_init(void)
299{
300 unsigned int i;
301 u64 affinity;
302 void __iomem *base = gic_data.dist_base;
303
304 /* Disable the distributor */
305 writel_relaxed(0, base + GICD_CTLR);
306 gic_dist_wait_for_rwp();
307
308 gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
309
310 /* Enable distributor with ARE, Group1 */
311 writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
312 base + GICD_CTLR);
313
314 /*
315 * Set all global interrupts to the boot CPU only. ARE must be
316 * enabled.
317 */
318 affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
319 for (i = 32; i < gic_data.irq_nr; i++)
320 writeq_relaxed(affinity, base + GICD_IROUTER + i * 8);
321}
322
323static int gic_populate_rdist(void)
324{
325 u64 mpidr = cpu_logical_map(smp_processor_id());
326 u64 typer;
327 u32 aff;
328 int i;
329
330 /*
331 * Convert affinity to a 32bit value that can be matched to
332 * GICR_TYPER bits [63:32].
333 */
334 aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
335 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
336 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
337 MPIDR_AFFINITY_LEVEL(mpidr, 0));
338
339 for (i = 0; i < gic_data.redist_regions; i++) {
340 void __iomem *ptr = gic_data.redist_base[i];
341 u32 reg;
342
343 reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
344 if (reg != GIC_PIDR2_ARCH_GICv3 &&
345 reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
346 pr_warn("No redistributor present @%p\n", ptr);
347 break;
348 }
349
350 do {
351 typer = readq_relaxed(ptr + GICR_TYPER);
352 if ((typer >> 32) == aff) {
353 gic_data_rdist_rd_base() = ptr;
354 pr_info("CPU%d: found redistributor %llx @%p\n",
355 smp_processor_id(),
356 (unsigned long long)mpidr, ptr);
357 return 0;
358 }
359
360 if (gic_data.redist_stride) {
361 ptr += gic_data.redist_stride;
362 } else {
363 ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
364 if (typer & GICR_TYPER_VLPIS)
365 ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
366 }
367 } while (!(typer & GICR_TYPER_LAST));
368 }
369
370 /* We couldn't even deal with ourselves... */
371 WARN(true, "CPU%d: mpidr %llx has no re-distributor!\n",
372 smp_processor_id(), (unsigned long long)mpidr);
373 return -ENODEV;
374}
375
376static void gic_cpu_init(void)
377{
378 void __iomem *rbase;
379
380 /* Register ourselves with the rest of the world */
381 if (gic_populate_rdist())
382 return;
383
384 gic_enable_redist();
385
386 rbase = gic_data_rdist_sgi_base();
387
388 gic_cpu_config(rbase, gic_redist_wait_for_rwp);
389
390 /* Enable system registers */
391 gic_enable_sre();
392
393 /* Set priority mask register */
394 gic_write_pmr(DEFAULT_PMR_VALUE);
395
396 /* EOI deactivates interrupt too (mode 0) */
397 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
398
399 /* ... and let's hit the road... */
400 gic_write_grpen1(1);
401}
402
403#ifdef CONFIG_SMP
404static int gic_secondary_init(struct notifier_block *nfb,
405 unsigned long action, void *hcpu)
406{
407 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
408 gic_cpu_init();
409 return NOTIFY_OK;
410}
411
412/*
413 * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
414 * priority because the GIC needs to be up before the ARM generic timers.
415 */
416static struct notifier_block gic_cpu_notifier = {
417 .notifier_call = gic_secondary_init,
418 .priority = 100,
419};
420
421static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
422 u64 cluster_id)
423{
424 int cpu = *base_cpu;
425 u64 mpidr = cpu_logical_map(cpu);
426 u16 tlist = 0;
427
428 while (cpu < nr_cpu_ids) {
429 /*
430 * If we ever get a cluster of more than 16 CPUs, just
431 * scream and skip that CPU.
432 */
433 if (WARN_ON((mpidr & 0xff) >= 16))
434 goto out;
435
436 tlist |= 1 << (mpidr & 0xf);
437
438 cpu = cpumask_next(cpu, mask);
439 if (cpu == nr_cpu_ids)
440 goto out;
441
442 mpidr = cpu_logical_map(cpu);
443
444 if (cluster_id != (mpidr & ~0xffUL)) {
445 cpu--;
446 goto out;
447 }
448 }
449out:
450 *base_cpu = cpu;
451 return tlist;
452}
453
454static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
455{
456 u64 val;
457
458 val = (MPIDR_AFFINITY_LEVEL(cluster_id, 3) << 48 |
459 MPIDR_AFFINITY_LEVEL(cluster_id, 2) << 32 |
460 irq << 24 |
461 MPIDR_AFFINITY_LEVEL(cluster_id, 1) << 16 |
462 tlist);
463
464 pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
465 gic_write_sgi1r(val);
466}
467
468static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
469{
470 int cpu;
471
472 if (WARN_ON(irq >= 16))
473 return;
474
475 /*
476 * Ensure that stores to Normal memory are visible to the
477 * other CPUs before issuing the IPI.
478 */
479 smp_wmb();
480
481 for_each_cpu_mask(cpu, *mask) {
482 u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL;
483 u16 tlist;
484
485 tlist = gic_compute_target_list(&cpu, mask, cluster_id);
486 gic_send_sgi(cluster_id, tlist, irq);
487 }
488
489 /* Force the above writes to ICC_SGI1R_EL1 to be executed */
490 isb();
491}
492
493static void gic_smp_init(void)
494{
495 set_smp_cross_call(gic_raise_softirq);
496 register_cpu_notifier(&gic_cpu_notifier);
497}
498
499static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
500 bool force)
501{
502 unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
503 void __iomem *reg;
504 int enabled;
505 u64 val;
506
507 if (gic_irq_in_rdist(d))
508 return -EINVAL;
509
510 /* If interrupt was enabled, disable it first */
511 enabled = gic_peek_irq(d, GICD_ISENABLER);
512 if (enabled)
513 gic_mask_irq(d);
514
515 reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8);
516 val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
517
518 writeq_relaxed(val, reg);
519
520 /*
521 * If the interrupt was enabled, enabled it again. Otherwise,
522 * just wait for the distributor to have digested our changes.
523 */
524 if (enabled)
525 gic_unmask_irq(d);
526 else
527 gic_dist_wait_for_rwp();
528
529 return IRQ_SET_MASK_OK;
530}
531#else
532#define gic_set_affinity NULL
533#define gic_smp_init() do { } while(0)
534#endif
535
536static struct irq_chip gic_chip = {
537 .name = "GICv3",
538 .irq_mask = gic_mask_irq,
539 .irq_unmask = gic_unmask_irq,
540 .irq_eoi = gic_eoi_irq,
541 .irq_set_type = gic_set_type,
542 .irq_set_affinity = gic_set_affinity,
543};
544
545static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
546 irq_hw_number_t hw)
547{
548 /* SGIs are private to the core kernel */
549 if (hw < 16)
550 return -EPERM;
551 /* PPIs */
552 if (hw < 32) {
553 irq_set_percpu_devid(irq);
554 irq_set_chip_and_handler(irq, &gic_chip,
555 handle_percpu_devid_irq);
556 set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
557 }
558 /* SPIs */
559 if (hw >= 32 && hw < gic_data.irq_nr) {
560 irq_set_chip_and_handler(irq, &gic_chip,
561 handle_fasteoi_irq);
562 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
563 }
564 irq_set_chip_data(irq, d->host_data);
565 return 0;
566}
567
568static int gic_irq_domain_xlate(struct irq_domain *d,
569 struct device_node *controller,
570 const u32 *intspec, unsigned int intsize,
571 unsigned long *out_hwirq, unsigned int *out_type)
572{
573 if (d->of_node != controller)
574 return -EINVAL;
575 if (intsize < 3)
576 return -EINVAL;
577
578 switch(intspec[0]) {
579 case 0: /* SPI */
580 *out_hwirq = intspec[1] + 32;
581 break;
582 case 1: /* PPI */
583 *out_hwirq = intspec[1] + 16;
584 break;
585 default:
586 return -EINVAL;
587 }
588
589 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
590 return 0;
591}
592
593static const struct irq_domain_ops gic_irq_domain_ops = {
594 .map = gic_irq_domain_map,
595 .xlate = gic_irq_domain_xlate,
596};
597
598static int __init gic_of_init(struct device_node *node, struct device_node *parent)
599{
600 void __iomem *dist_base;
601 void __iomem **redist_base;
602 u64 redist_stride;
603 u32 redist_regions;
604 u32 reg;
605 int gic_irqs;
606 int err;
607 int i;
608
609 dist_base = of_iomap(node, 0);
610 if (!dist_base) {
611 pr_err("%s: unable to map gic dist registers\n",
612 node->full_name);
613 return -ENXIO;
614 }
615
616 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
617 if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) {
618 pr_err("%s: no distributor detected, giving up\n",
619 node->full_name);
620 err = -ENODEV;
621 goto out_unmap_dist;
622 }
623
624 if (of_property_read_u32(node, "#redistributor-regions", &redist_regions))
625 redist_regions = 1;
626
627 redist_base = kzalloc(sizeof(*redist_base) * redist_regions, GFP_KERNEL);
628 if (!redist_base) {
629 err = -ENOMEM;
630 goto out_unmap_dist;
631 }
632
633 for (i = 0; i < redist_regions; i++) {
634 redist_base[i] = of_iomap(node, 1 + i);
635 if (!redist_base[i]) {
636 pr_err("%s: couldn't map region %d\n",
637 node->full_name, i);
638 err = -ENODEV;
639 goto out_unmap_rdist;
640 }
641 }
642
643 if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
644 redist_stride = 0;
645
646 gic_data.dist_base = dist_base;
647 gic_data.redist_base = redist_base;
648 gic_data.redist_regions = redist_regions;
649 gic_data.redist_stride = redist_stride;
650
651 /*
652 * Find out how many interrupts are supported.
653 * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
654 */
655 gic_irqs = readl_relaxed(gic_data.dist_base + GICD_TYPER) & 0x1f;
656 gic_irqs = (gic_irqs + 1) * 32;
657 if (gic_irqs > 1020)
658 gic_irqs = 1020;
659 gic_data.irq_nr = gic_irqs;
660
661 gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops,
662 &gic_data);
663 gic_data.rdist = alloc_percpu(typeof(*gic_data.rdist));
664
665 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdist)) {
666 err = -ENOMEM;
667 goto out_free;
668 }
669
670 set_handle_irq(gic_handle_irq);
671
672 gic_smp_init();
673 gic_dist_init();
674 gic_cpu_init();
675
676 return 0;
677
678out_free:
679 if (gic_data.domain)
680 irq_domain_remove(gic_data.domain);
681 free_percpu(gic_data.rdist);
682out_unmap_rdist:
683 for (i = 0; i < redist_regions; i++)
684 if (redist_base[i])
685 iounmap(redist_base[i]);
686 kfree(redist_base);
687out_unmap_dist:
688 iounmap(dist_base);
689 return err;
690}
691
692IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 7e11c9d6ae8c..4b959e606fe8 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * linux/arch/arm/common/gic.c
3 *
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved. 2 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
5 * 3 *
6 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
@@ -42,10 +40,12 @@
42#include <linux/irqchip/chained_irq.h> 40#include <linux/irqchip/chained_irq.h>
43#include <linux/irqchip/arm-gic.h> 41#include <linux/irqchip/arm-gic.h>
44 42
43#include <asm/cputype.h>
45#include <asm/irq.h> 44#include <asm/irq.h>
46#include <asm/exception.h> 45#include <asm/exception.h>
47#include <asm/smp_plat.h> 46#include <asm/smp_plat.h>
48 47
48#include "irq-gic-common.h"
49#include "irqchip.h" 49#include "irqchip.h"
50 50
51union gic_base { 51union gic_base {
@@ -188,12 +188,6 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
188{ 188{
189 void __iomem *base = gic_dist_base(d); 189 void __iomem *base = gic_dist_base(d);
190 unsigned int gicirq = gic_irq(d); 190 unsigned int gicirq = gic_irq(d);
191 u32 enablemask = 1 << (gicirq % 32);
192 u32 enableoff = (gicirq / 32) * 4;
193 u32 confmask = 0x2 << ((gicirq % 16) * 2);
194 u32 confoff = (gicirq / 16) * 4;
195 bool enabled = false;
196 u32 val;
197 191
198 /* Interrupt configuration for SGIs can't be changed */ 192 /* Interrupt configuration for SGIs can't be changed */
199 if (gicirq < 16) 193 if (gicirq < 16)
@@ -207,25 +201,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
207 if (gic_arch_extn.irq_set_type) 201 if (gic_arch_extn.irq_set_type)
208 gic_arch_extn.irq_set_type(d, type); 202 gic_arch_extn.irq_set_type(d, type);
209 203
210 val = readl_relaxed(base + GIC_DIST_CONFIG + confoff); 204 gic_configure_irq(gicirq, type, base, NULL);
211 if (type == IRQ_TYPE_LEVEL_HIGH)
212 val &= ~confmask;
213 else if (type == IRQ_TYPE_EDGE_RISING)
214 val |= confmask;
215
216 /*
217 * As recommended by the spec, disable the interrupt before changing
218 * the configuration
219 */
220 if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
221 writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
222 enabled = true;
223 }
224
225 writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
226
227 if (enabled)
228 writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
229 205
230 raw_spin_unlock(&irq_controller_lock); 206 raw_spin_unlock(&irq_controller_lock);
231 207
@@ -387,12 +363,6 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
387 writel_relaxed(0, base + GIC_DIST_CTRL); 363 writel_relaxed(0, base + GIC_DIST_CTRL);
388 364
389 /* 365 /*
390 * Set all global interrupts to be level triggered, active low.
391 */
392 for (i = 32; i < gic_irqs; i += 16)
393 writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16);
394
395 /*
396 * Set all global interrupts to this CPU only. 366 * Set all global interrupts to this CPU only.
397 */ 367 */
398 cpumask = gic_get_cpumask(gic); 368 cpumask = gic_get_cpumask(gic);
@@ -401,18 +371,7 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
401 for (i = 32; i < gic_irqs; i += 4) 371 for (i = 32; i < gic_irqs; i += 4)
402 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); 372 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
403 373
404 /* 374 gic_dist_config(base, gic_irqs, NULL);
405 * Set priority on all global interrupts.
406 */
407 for (i = 32; i < gic_irqs; i += 4)
408 writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
409
410 /*
411 * Disable all interrupts. Leave the PPI and SGIs alone
412 * as these enables are banked registers.
413 */
414 for (i = 32; i < gic_irqs; i += 32)
415 writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
416 375
417 writel_relaxed(1, base + GIC_DIST_CTRL); 376 writel_relaxed(1, base + GIC_DIST_CTRL);
418} 377}
@@ -439,18 +398,7 @@ static void gic_cpu_init(struct gic_chip_data *gic)
439 if (i != cpu) 398 if (i != cpu)
440 gic_cpu_map[i] &= ~cpu_mask; 399 gic_cpu_map[i] &= ~cpu_mask;
441 400
442 /* 401 gic_cpu_config(dist_base, NULL);
443 * Deal with the banked PPI and SGI interrupts - disable all
444 * PPI interrupts, ensure all SGI interrupts are enabled.
445 */
446 writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
447 writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
448
449 /*
450 * Set priority on PPI and SGI interrupts
451 */
452 for (i = 0; i < 32; i += 4)
453 writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
454 402
455 writel_relaxed(0xf0, base + GIC_CPU_PRIMASK); 403 writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
456 writel_relaxed(1, base + GIC_CPU_CTRL); 404 writel_relaxed(1, base + GIC_CPU_CTRL);
@@ -954,7 +902,9 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
954 } 902 }
955 903
956 for_each_possible_cpu(cpu) { 904 for_each_possible_cpu(cpu) {
957 unsigned long offset = percpu_offset * cpu_logical_map(cpu); 905 u32 mpidr = cpu_logical_map(cpu);
906 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
907 unsigned long offset = percpu_offset * core_id;
958 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset; 908 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
959 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset; 909 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
960 } 910 }
@@ -1071,8 +1021,10 @@ gic_of_init(struct device_node *node, struct device_node *parent)
1071 gic_cnt++; 1021 gic_cnt++;
1072 return 0; 1022 return 0;
1073} 1023}
1024IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init);
1074IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init); 1025IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
1075IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init); 1026IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
1027IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
1076IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init); 1028IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
1077IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init); 1029IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
1078 1030
diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c
index 70bdf6edb7bb..4ff0805fca01 100644
--- a/drivers/irqchip/irq-nvic.c
+++ b/drivers/irqchip/irq-nvic.c
@@ -49,14 +49,6 @@ nvic_handle_irq(irq_hw_number_t hwirq, struct pt_regs *regs)
49 handle_IRQ(irq, regs); 49 handle_IRQ(irq, regs);
50} 50}
51 51
52static void nvic_eoi(struct irq_data *d)
53{
54 /*
55 * This is a no-op as end of interrupt is signaled by the exception
56 * return sequence.
57 */
58}
59
60static int __init nvic_of_init(struct device_node *node, 52static int __init nvic_of_init(struct device_node *node,
61 struct device_node *parent) 53 struct device_node *parent)
62{ 54{
@@ -102,7 +94,10 @@ static int __init nvic_of_init(struct device_node *node,
102 gc->chip_types[0].regs.disable = NVIC_ICER; 94 gc->chip_types[0].regs.disable = NVIC_ICER;
103 gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg; 95 gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
104 gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg; 96 gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
105 gc->chip_types[0].chip.irq_eoi = nvic_eoi; 97 /* This is a no-op as end of interrupt is signaled by the
98 * exception return sequence.
99 */
100 gc->chip_types[0].chip.irq_eoi = irq_gc_noop;
106 101
107 /* disable interrupts */ 102 /* disable interrupts */
108 writel_relaxed(~0, gc->reg_base + NVIC_ICER); 103 writel_relaxed(~0, gc->reg_base + NVIC_ICER);
diff --git a/drivers/irqchip/irq-or1k-pic.c b/drivers/irqchip/irq-or1k-pic.c
new file mode 100644
index 000000000000..17ff033d9925
--- /dev/null
+++ b/drivers/irqchip/irq-or1k-pic.c
@@ -0,0 +1,182 @@
1/*
2 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
3 * Copyright (C) 2014 Stefan Kristansson <stefan.kristiansson@saunalahti.fi>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10
11#include <linux/irq.h>
12#include <linux/of.h>
13#include <linux/of_irq.h>
14#include <linux/of_address.h>
15
16#include "irqchip.h"
17
18/* OR1K PIC implementation */
19
20struct or1k_pic_dev {
21 struct irq_chip chip;
22 irq_flow_handler_t handle;
23 unsigned long flags;
24};
25
26/*
27 * We're a couple of cycles faster than the generic implementations with
28 * these 'fast' versions.
29 */
30
31static void or1k_pic_mask(struct irq_data *data)
32{
33 mtspr(SPR_PICMR, mfspr(SPR_PICMR) & ~(1UL << data->hwirq));
34}
35
36static void or1k_pic_unmask(struct irq_data *data)
37{
38 mtspr(SPR_PICMR, mfspr(SPR_PICMR) | (1UL << data->hwirq));
39}
40
41static void or1k_pic_ack(struct irq_data *data)
42{
43 mtspr(SPR_PICSR, (1UL << data->hwirq));
44}
45
46static void or1k_pic_mask_ack(struct irq_data *data)
47{
48 mtspr(SPR_PICMR, mfspr(SPR_PICMR) & ~(1UL << data->hwirq));
49 mtspr(SPR_PICSR, (1UL << data->hwirq));
50}
51
52/*
53 * There are two oddities with the OR1200 PIC implementation:
54 * i) LEVEL-triggered interrupts are latched and need to be cleared
55 * ii) the interrupt latch is cleared by writing a 0 to the bit,
56 * as opposed to a 1 as mandated by the spec
57 */
58static void or1k_pic_or1200_ack(struct irq_data *data)
59{
60 mtspr(SPR_PICSR, mfspr(SPR_PICSR) & ~(1UL << data->hwirq));
61}
62
63static void or1k_pic_or1200_mask_ack(struct irq_data *data)
64{
65 mtspr(SPR_PICMR, mfspr(SPR_PICMR) & ~(1UL << data->hwirq));
66 mtspr(SPR_PICSR, mfspr(SPR_PICSR) & ~(1UL << data->hwirq));
67}
68
69static struct or1k_pic_dev or1k_pic_level = {
70 .chip = {
71 .name = "or1k-PIC-level",
72 .irq_unmask = or1k_pic_unmask,
73 .irq_mask = or1k_pic_mask,
74 .irq_mask_ack = or1k_pic_mask,
75 },
76 .handle = handle_level_irq,
77 .flags = IRQ_LEVEL | IRQ_NOPROBE,
78};
79
80static struct or1k_pic_dev or1k_pic_edge = {
81 .chip = {
82 .name = "or1k-PIC-edge",
83 .irq_unmask = or1k_pic_unmask,
84 .irq_mask = or1k_pic_mask,
85 .irq_ack = or1k_pic_ack,
86 .irq_mask_ack = or1k_pic_mask_ack,
87 },
88 .handle = handle_edge_irq,
89 .flags = IRQ_LEVEL | IRQ_NOPROBE,
90};
91
92static struct or1k_pic_dev or1k_pic_or1200 = {
93 .chip = {
94 .name = "or1200-PIC",
95 .irq_unmask = or1k_pic_unmask,
96 .irq_mask = or1k_pic_mask,
97 .irq_ack = or1k_pic_or1200_ack,
98 .irq_mask_ack = or1k_pic_or1200_mask_ack,
99 },
100 .handle = handle_level_irq,
101 .flags = IRQ_LEVEL | IRQ_NOPROBE,
102};
103
104static struct irq_domain *root_domain;
105
106static inline int pic_get_irq(int first)
107{
108 int hwirq;
109
110 hwirq = ffs(mfspr(SPR_PICSR) >> first);
111 if (!hwirq)
112 return NO_IRQ;
113 else
114 hwirq = hwirq + first - 1;
115
116 return irq_find_mapping(root_domain, hwirq);
117}
118
119static void or1k_pic_handle_irq(struct pt_regs *regs)
120{
121 int irq = -1;
122
123 while ((irq = pic_get_irq(irq + 1)) != NO_IRQ)
124 handle_IRQ(irq, regs);
125}
126
127static int or1k_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
128{
129 struct or1k_pic_dev *pic = d->host_data;
130
131 irq_set_chip_and_handler(irq, &pic->chip, pic->handle);
132 irq_set_status_flags(irq, pic->flags);
133
134 return 0;
135}
136
137static const struct irq_domain_ops or1k_irq_domain_ops = {
138 .xlate = irq_domain_xlate_onecell,
139 .map = or1k_map,
140};
141
142/*
143 * This sets up the IRQ domain for the PIC built in to the OpenRISC
144 * 1000 CPU. This is the "root" domain as these are the interrupts
145 * that directly trigger an exception in the CPU.
146 */
147static int __init or1k_pic_init(struct device_node *node,
148 struct or1k_pic_dev *pic)
149{
150 /* Disable all interrupts until explicitly requested */
151 mtspr(SPR_PICMR, (0UL));
152
153 root_domain = irq_domain_add_linear(node, 32, &or1k_irq_domain_ops,
154 pic);
155
156 set_handle_irq(or1k_pic_handle_irq);
157
158 return 0;
159}
160
161static int __init or1k_pic_or1200_init(struct device_node *node,
162 struct device_node *parent)
163{
164 return or1k_pic_init(node, &or1k_pic_or1200);
165}
166IRQCHIP_DECLARE(or1k_pic_or1200, "opencores,or1200-pic", or1k_pic_or1200_init);
167IRQCHIP_DECLARE(or1k_pic, "opencores,or1k-pic", or1k_pic_or1200_init);
168
169static int __init or1k_pic_level_init(struct device_node *node,
170 struct device_node *parent)
171{
172 return or1k_pic_init(node, &or1k_pic_level);
173}
174IRQCHIP_DECLARE(or1k_pic_level, "opencores,or1k-pic-level",
175 or1k_pic_level_init);
176
177static int __init or1k_pic_edge_init(struct device_node *node,
178 struct device_node *parent)
179{
180 return or1k_pic_init(node, &or1k_pic_edge);
181}
182IRQCHIP_DECLARE(or1k_pic_edge, "opencores,or1k-pic-edge", or1k_pic_edge_init);
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
index 6ce6bd3441bf..9c145a7cb056 100644
--- a/drivers/irqchip/spear-shirq.c
+++ b/drivers/irqchip/spear-shirq.c
@@ -19,7 +19,6 @@
19#include <linux/io.h> 19#include <linux/io.h>
20#include <linux/irq.h> 20#include <linux/irq.h>
21#include <linux/irqdomain.h> 21#include <linux/irqdomain.h>
22#include <linux/irqchip/spear-shirq.h>
23#include <linux/of.h> 22#include <linux/of.h>
24#include <linux/of_address.h> 23#include <linux/of_address.h>
25#include <linux/of_irq.h> 24#include <linux/of_irq.h>
@@ -27,20 +26,73 @@
27 26
28#include "irqchip.h" 27#include "irqchip.h"
29 28
30static DEFINE_SPINLOCK(lock); 29/*
30 * struct spear_shirq: shared irq structure
31 *
32 * base: Base register address
33 * status_reg: Status register offset for chained interrupt handler
34 * mask_reg: Mask register offset for irq chip
35 * mask: Mask to apply to the status register
36 * virq_base: Base virtual interrupt number
37 * nr_irqs: Number of interrupts handled by this block
38 * offset: Bit offset of the first interrupt
39 * irq_chip: Interrupt controller chip used for this instance,
40 * if NULL group is disabled, but accounted
41 */
42struct spear_shirq {
43 void __iomem *base;
44 u32 status_reg;
45 u32 mask_reg;
46 u32 mask;
47 u32 virq_base;
48 u32 nr_irqs;
49 u32 offset;
50 struct irq_chip *irq_chip;
51};
31 52
32/* spear300 shared irq registers offsets and masks */ 53/* spear300 shared irq registers offsets and masks */
33#define SPEAR300_INT_ENB_MASK_REG 0x54 54#define SPEAR300_INT_ENB_MASK_REG 0x54
34#define SPEAR300_INT_STS_MASK_REG 0x58 55#define SPEAR300_INT_STS_MASK_REG 0x58
35 56
57static DEFINE_RAW_SPINLOCK(shirq_lock);
58
59static void shirq_irq_mask(struct irq_data *d)
60{
61 struct spear_shirq *shirq = irq_data_get_irq_chip_data(d);
62 u32 val, shift = d->irq - shirq->virq_base + shirq->offset;
63 u32 __iomem *reg = shirq->base + shirq->mask_reg;
64
65 raw_spin_lock(&shirq_lock);
66 val = readl(reg) & ~(0x1 << shift);
67 writel(val, reg);
68 raw_spin_unlock(&shirq_lock);
69}
70
71static void shirq_irq_unmask(struct irq_data *d)
72{
73 struct spear_shirq *shirq = irq_data_get_irq_chip_data(d);
74 u32 val, shift = d->irq - shirq->virq_base + shirq->offset;
75 u32 __iomem *reg = shirq->base + shirq->mask_reg;
76
77 raw_spin_lock(&shirq_lock);
78 val = readl(reg) | (0x1 << shift);
79 writel(val, reg);
80 raw_spin_unlock(&shirq_lock);
81}
82
83static struct irq_chip shirq_chip = {
84 .name = "spear-shirq",
85 .irq_mask = shirq_irq_mask,
86 .irq_unmask = shirq_irq_unmask,
87};
88
36static struct spear_shirq spear300_shirq_ras1 = { 89static struct spear_shirq spear300_shirq_ras1 = {
37 .irq_nr = 9, 90 .offset = 0,
38 .irq_bit_off = 0, 91 .nr_irqs = 9,
39 .regs = { 92 .mask = ((0x1 << 9) - 1) << 0,
40 .enb_reg = SPEAR300_INT_ENB_MASK_REG, 93 .irq_chip = &shirq_chip,
41 .status_reg = SPEAR300_INT_STS_MASK_REG, 94 .status_reg = SPEAR300_INT_STS_MASK_REG,
42 .clear_reg = -1, 95 .mask_reg = SPEAR300_INT_ENB_MASK_REG,
43 },
44}; 96};
45 97
46static struct spear_shirq *spear300_shirq_blocks[] = { 98static struct spear_shirq *spear300_shirq_blocks[] = {
@@ -51,43 +103,35 @@ static struct spear_shirq *spear300_shirq_blocks[] = {
51#define SPEAR310_INT_STS_MASK_REG 0x04 103#define SPEAR310_INT_STS_MASK_REG 0x04
52 104
53static struct spear_shirq spear310_shirq_ras1 = { 105static struct spear_shirq spear310_shirq_ras1 = {
54 .irq_nr = 8, 106 .offset = 0,
55 .irq_bit_off = 0, 107 .nr_irqs = 8,
56 .regs = { 108 .mask = ((0x1 << 8) - 1) << 0,
57 .enb_reg = -1, 109 .irq_chip = &dummy_irq_chip,
58 .status_reg = SPEAR310_INT_STS_MASK_REG, 110 .status_reg = SPEAR310_INT_STS_MASK_REG,
59 .clear_reg = -1,
60 },
61}; 111};
62 112
63static struct spear_shirq spear310_shirq_ras2 = { 113static struct spear_shirq spear310_shirq_ras2 = {
64 .irq_nr = 5, 114 .offset = 8,
65 .irq_bit_off = 8, 115 .nr_irqs = 5,
66 .regs = { 116 .mask = ((0x1 << 5) - 1) << 8,
67 .enb_reg = -1, 117 .irq_chip = &dummy_irq_chip,
68 .status_reg = SPEAR310_INT_STS_MASK_REG, 118 .status_reg = SPEAR310_INT_STS_MASK_REG,
69 .clear_reg = -1,
70 },
71}; 119};
72 120
73static struct spear_shirq spear310_shirq_ras3 = { 121static struct spear_shirq spear310_shirq_ras3 = {
74 .irq_nr = 1, 122 .offset = 13,
75 .irq_bit_off = 13, 123 .nr_irqs = 1,
76 .regs = { 124 .mask = ((0x1 << 1) - 1) << 13,
77 .enb_reg = -1, 125 .irq_chip = &dummy_irq_chip,
78 .status_reg = SPEAR310_INT_STS_MASK_REG, 126 .status_reg = SPEAR310_INT_STS_MASK_REG,
79 .clear_reg = -1,
80 },
81}; 127};
82 128
83static struct spear_shirq spear310_shirq_intrcomm_ras = { 129static struct spear_shirq spear310_shirq_intrcomm_ras = {
84 .irq_nr = 3, 130 .offset = 14,
85 .irq_bit_off = 14, 131 .nr_irqs = 3,
86 .regs = { 132 .mask = ((0x1 << 3) - 1) << 14,
87 .enb_reg = -1, 133 .irq_chip = &dummy_irq_chip,
88 .status_reg = SPEAR310_INT_STS_MASK_REG, 134 .status_reg = SPEAR310_INT_STS_MASK_REG,
89 .clear_reg = -1,
90 },
91}; 135};
92 136
93static struct spear_shirq *spear310_shirq_blocks[] = { 137static struct spear_shirq *spear310_shirq_blocks[] = {
@@ -102,50 +146,34 @@ static struct spear_shirq *spear310_shirq_blocks[] = {
102#define SPEAR320_INT_CLR_MASK_REG 0x04 146#define SPEAR320_INT_CLR_MASK_REG 0x04
103#define SPEAR320_INT_ENB_MASK_REG 0x08 147#define SPEAR320_INT_ENB_MASK_REG 0x08
104 148
105static struct spear_shirq spear320_shirq_ras1 = { 149static struct spear_shirq spear320_shirq_ras3 = {
106 .irq_nr = 3, 150 .offset = 0,
107 .irq_bit_off = 7, 151 .nr_irqs = 7,
108 .regs = { 152 .mask = ((0x1 << 7) - 1) << 0,
109 .enb_reg = -1,
110 .status_reg = SPEAR320_INT_STS_MASK_REG,
111 .clear_reg = SPEAR320_INT_CLR_MASK_REG,
112 .reset_to_clear = 1,
113 },
114}; 153};
115 154
116static struct spear_shirq spear320_shirq_ras2 = { 155static struct spear_shirq spear320_shirq_ras1 = {
117 .irq_nr = 1, 156 .offset = 7,
118 .irq_bit_off = 10, 157 .nr_irqs = 3,
119 .regs = { 158 .mask = ((0x1 << 3) - 1) << 7,
120 .enb_reg = -1, 159 .irq_chip = &dummy_irq_chip,
121 .status_reg = SPEAR320_INT_STS_MASK_REG, 160 .status_reg = SPEAR320_INT_STS_MASK_REG,
122 .clear_reg = SPEAR320_INT_CLR_MASK_REG,
123 .reset_to_clear = 1,
124 },
125}; 161};
126 162
127static struct spear_shirq spear320_shirq_ras3 = { 163static struct spear_shirq spear320_shirq_ras2 = {
128 .irq_nr = 7, 164 .offset = 10,
129 .irq_bit_off = 0, 165 .nr_irqs = 1,
130 .invalid_irq = 1, 166 .mask = ((0x1 << 1) - 1) << 10,
131 .regs = { 167 .irq_chip = &dummy_irq_chip,
132 .enb_reg = SPEAR320_INT_ENB_MASK_REG, 168 .status_reg = SPEAR320_INT_STS_MASK_REG,
133 .reset_to_enb = 1,
134 .status_reg = SPEAR320_INT_STS_MASK_REG,
135 .clear_reg = SPEAR320_INT_CLR_MASK_REG,
136 .reset_to_clear = 1,
137 },
138}; 169};
139 170
140static struct spear_shirq spear320_shirq_intrcomm_ras = { 171static struct spear_shirq spear320_shirq_intrcomm_ras = {
141 .irq_nr = 11, 172 .offset = 11,
142 .irq_bit_off = 11, 173 .nr_irqs = 11,
143 .regs = { 174 .mask = ((0x1 << 11) - 1) << 11,
144 .enb_reg = -1, 175 .irq_chip = &dummy_irq_chip,
145 .status_reg = SPEAR320_INT_STS_MASK_REG, 176 .status_reg = SPEAR320_INT_STS_MASK_REG,
146 .clear_reg = SPEAR320_INT_CLR_MASK_REG,
147 .reset_to_clear = 1,
148 },
149}; 177};
150 178
151static struct spear_shirq *spear320_shirq_blocks[] = { 179static struct spear_shirq *spear320_shirq_blocks[] = {
@@ -155,104 +183,46 @@ static struct spear_shirq *spear320_shirq_blocks[] = {
155 &spear320_shirq_intrcomm_ras, 183 &spear320_shirq_intrcomm_ras,
156}; 184};
157 185
158static void shirq_irq_mask_unmask(struct irq_data *d, bool mask)
159{
160 struct spear_shirq *shirq = irq_data_get_irq_chip_data(d);
161 u32 val, offset = d->irq - shirq->irq_base;
162 unsigned long flags;
163
164 if (shirq->regs.enb_reg == -1)
165 return;
166
167 spin_lock_irqsave(&lock, flags);
168 val = readl(shirq->base + shirq->regs.enb_reg);
169
170 if (mask ^ shirq->regs.reset_to_enb)
171 val &= ~(0x1 << shirq->irq_bit_off << offset);
172 else
173 val |= 0x1 << shirq->irq_bit_off << offset;
174
175 writel(val, shirq->base + shirq->regs.enb_reg);
176 spin_unlock_irqrestore(&lock, flags);
177
178}
179
180static void shirq_irq_mask(struct irq_data *d)
181{
182 shirq_irq_mask_unmask(d, 1);
183}
184
185static void shirq_irq_unmask(struct irq_data *d)
186{
187 shirq_irq_mask_unmask(d, 0);
188}
189
190static struct irq_chip shirq_chip = {
191 .name = "spear-shirq",
192 .irq_ack = shirq_irq_mask,
193 .irq_mask = shirq_irq_mask,
194 .irq_unmask = shirq_irq_unmask,
195};
196
197static void shirq_handler(unsigned irq, struct irq_desc *desc) 186static void shirq_handler(unsigned irq, struct irq_desc *desc)
198{ 187{
199 u32 i, j, val, mask, tmp;
200 struct irq_chip *chip;
201 struct spear_shirq *shirq = irq_get_handler_data(irq); 188 struct spear_shirq *shirq = irq_get_handler_data(irq);
189 u32 pend;
202 190
203 chip = irq_get_chip(irq); 191 pend = readl(shirq->base + shirq->status_reg) & shirq->mask;
204 chip->irq_ack(&desc->irq_data); 192 pend >>= shirq->offset;
205
206 mask = ((0x1 << shirq->irq_nr) - 1) << shirq->irq_bit_off;
207 while ((val = readl(shirq->base + shirq->regs.status_reg) &
208 mask)) {
209
210 val >>= shirq->irq_bit_off;
211 for (i = 0, j = 1; i < shirq->irq_nr; i++, j <<= 1) {
212
213 if (!(j & val))
214 continue;
215 193
216 generic_handle_irq(shirq->irq_base + i); 194 while (pend) {
195 int irq = __ffs(pend);
217 196
218 /* clear interrupt */ 197 pend &= ~(0x1 << irq);
219 if (shirq->regs.clear_reg == -1) 198 generic_handle_irq(shirq->virq_base + irq);
220 continue;
221
222 tmp = readl(shirq->base + shirq->regs.clear_reg);
223 if (shirq->regs.reset_to_clear)
224 tmp &= ~(j << shirq->irq_bit_off);
225 else
226 tmp |= (j << shirq->irq_bit_off);
227 writel(tmp, shirq->base + shirq->regs.clear_reg);
228 }
229 } 199 }
230 chip->irq_unmask(&desc->irq_data);
231} 200}
232 201
233static void __init spear_shirq_register(struct spear_shirq *shirq) 202static void __init spear_shirq_register(struct spear_shirq *shirq,
203 int parent_irq)
234{ 204{
235 int i; 205 int i;
236 206
237 if (shirq->invalid_irq) 207 if (!shirq->irq_chip)
238 return; 208 return;
239 209
240 irq_set_chained_handler(shirq->irq, shirq_handler); 210 irq_set_chained_handler(parent_irq, shirq_handler);
241 for (i = 0; i < shirq->irq_nr; i++) { 211 irq_set_handler_data(parent_irq, shirq);
242 irq_set_chip_and_handler(shirq->irq_base + i,
243 &shirq_chip, handle_simple_irq);
244 set_irq_flags(shirq->irq_base + i, IRQF_VALID);
245 irq_set_chip_data(shirq->irq_base + i, shirq);
246 }
247 212
248 irq_set_handler_data(shirq->irq, shirq); 213 for (i = 0; i < shirq->nr_irqs; i++) {
214 irq_set_chip_and_handler(shirq->virq_base + i,
215 shirq->irq_chip, handle_simple_irq);
216 set_irq_flags(shirq->virq_base + i, IRQF_VALID);
217 irq_set_chip_data(shirq->virq_base + i, shirq);
218 }
249} 219}
250 220
251static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr, 221static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr,
252 struct device_node *np) 222 struct device_node *np)
253{ 223{
254 int i, irq_base, hwirq = 0, irq_nr = 0; 224 int i, parent_irq, virq_base, hwirq = 0, nr_irqs = 0;
255 static struct irq_domain *shirq_domain; 225 struct irq_domain *shirq_domain;
256 void __iomem *base; 226 void __iomem *base;
257 227
258 base = of_iomap(np, 0); 228 base = of_iomap(np, 0);
@@ -262,15 +232,15 @@ static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr,
262 } 232 }
263 233
264 for (i = 0; i < block_nr; i++) 234 for (i = 0; i < block_nr; i++)
265 irq_nr += shirq_blocks[i]->irq_nr; 235 nr_irqs += shirq_blocks[i]->nr_irqs;
266 236
267 irq_base = irq_alloc_descs(-1, 0, irq_nr, 0); 237 virq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
268 if (IS_ERR_VALUE(irq_base)) { 238 if (IS_ERR_VALUE(virq_base)) {
269 pr_err("%s: irq desc alloc failed\n", __func__); 239 pr_err("%s: irq desc alloc failed\n", __func__);
270 goto err_unmap; 240 goto err_unmap;
271 } 241 }
272 242
273 shirq_domain = irq_domain_add_legacy(np, irq_nr, irq_base, 0, 243 shirq_domain = irq_domain_add_legacy(np, nr_irqs, virq_base, 0,
274 &irq_domain_simple_ops, NULL); 244 &irq_domain_simple_ops, NULL);
275 if (WARN_ON(!shirq_domain)) { 245 if (WARN_ON(!shirq_domain)) {
276 pr_warn("%s: irq domain init failed\n", __func__); 246 pr_warn("%s: irq domain init failed\n", __func__);
@@ -279,41 +249,41 @@ static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr,
279 249
280 for (i = 0; i < block_nr; i++) { 250 for (i = 0; i < block_nr; i++) {
281 shirq_blocks[i]->base = base; 251 shirq_blocks[i]->base = base;
282 shirq_blocks[i]->irq_base = irq_find_mapping(shirq_domain, 252 shirq_blocks[i]->virq_base = irq_find_mapping(shirq_domain,
283 hwirq); 253 hwirq);
284 shirq_blocks[i]->irq = irq_of_parse_and_map(np, i);
285 254
286 spear_shirq_register(shirq_blocks[i]); 255 parent_irq = irq_of_parse_and_map(np, i);
287 hwirq += shirq_blocks[i]->irq_nr; 256 spear_shirq_register(shirq_blocks[i], parent_irq);
257 hwirq += shirq_blocks[i]->nr_irqs;
288 } 258 }
289 259
290 return 0; 260 return 0;
291 261
292err_free_desc: 262err_free_desc:
293 irq_free_descs(irq_base, irq_nr); 263 irq_free_descs(virq_base, nr_irqs);
294err_unmap: 264err_unmap:
295 iounmap(base); 265 iounmap(base);
296 return -ENXIO; 266 return -ENXIO;
297} 267}
298 268
299int __init spear300_shirq_of_init(struct device_node *np, 269static int __init spear300_shirq_of_init(struct device_node *np,
300 struct device_node *parent) 270 struct device_node *parent)
301{ 271{
302 return shirq_init(spear300_shirq_blocks, 272 return shirq_init(spear300_shirq_blocks,
303 ARRAY_SIZE(spear300_shirq_blocks), np); 273 ARRAY_SIZE(spear300_shirq_blocks), np);
304} 274}
305IRQCHIP_DECLARE(spear300_shirq, "st,spear300-shirq", spear300_shirq_of_init); 275IRQCHIP_DECLARE(spear300_shirq, "st,spear300-shirq", spear300_shirq_of_init);
306 276
307int __init spear310_shirq_of_init(struct device_node *np, 277static int __init spear310_shirq_of_init(struct device_node *np,
308 struct device_node *parent) 278 struct device_node *parent)
309{ 279{
310 return shirq_init(spear310_shirq_blocks, 280 return shirq_init(spear310_shirq_blocks,
311 ARRAY_SIZE(spear310_shirq_blocks), np); 281 ARRAY_SIZE(spear310_shirq_blocks), np);
312} 282}
313IRQCHIP_DECLARE(spear310_shirq, "st,spear310-shirq", spear310_shirq_of_init); 283IRQCHIP_DECLARE(spear310_shirq, "st,spear310-shirq", spear310_shirq_of_init);
314 284
315int __init spear320_shirq_of_init(struct device_node *np, 285static int __init spear320_shirq_of_init(struct device_node *np,
316 struct device_node *parent) 286 struct device_node *parent)
317{ 287{
318 return shirq_init(spear320_shirq_blocks, 288 return shirq_init(spear320_shirq_blocks,
319 ARRAY_SIZE(spear320_shirq_blocks), np); 289 ARRAY_SIZE(spear320_shirq_blocks), np);