aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/plat-spear/shirq.c
diff options
context:
space:
mode:
authorShiraz Hashim <shiraz.hashim@st.com>2012-08-03 06:03:10 -0400
committerViresh Kumar <viresh.kumar@linaro.org>2012-11-26 06:25:32 -0500
commit80515a5a2e3c35e2994105f19af27650e8a16c51 (patch)
treeb117ab7aa811d0da139fc03aa4f906cbc5b560cb /arch/arm/plat-spear/shirq.c
parent300a6856324a56955ab909e1dca93dabb8464c8a (diff)
ARM: SPEAr3xx: shirq: simplify and move the shared irq multiplexor to DT
SPEAr3xx architecture includes shared/multiplexed irqs for certain set of devices. The multiplexor provides a single interrupt to parent interrupt controller (VIC) on behalf of a group of devices. There can be multiple groups available on SPEAr3xx variants but not exceeding 4. The number of devices in a group can differ, further they may share same set of status/mask registers spanning across different bit masks. Also in some cases the group may not have enable or other registers. This makes software little complex. Present implementation was non-DT and had few complex data structures to decipher banks, number of irqs supported, mask and registers involved. This patch simplifies the overall design and convert it in to DT. It also removes all registration from individual SoC files and bring them in to common shirq.c. Also updated the corresponding documentation for DT binding of shirq. Signed-off-by: Shiraz Hashim <shiraz.hashim@st.com> Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Diffstat (limited to 'arch/arm/plat-spear/shirq.c')
-rw-r--r--arch/arm/plat-spear/shirq.c297
1 files changed, 247 insertions, 50 deletions
diff --git a/arch/arm/plat-spear/shirq.c b/arch/arm/plat-spear/shirq.c
index 853e891e1184..955c7249a5c1 100644
--- a/arch/arm/plat-spear/shirq.c
+++ b/arch/arm/plat-spear/shirq.c
@@ -10,56 +10,182 @@
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied. 11 * warranty of any kind, whether express or implied.
12 */ 12 */
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 14
14#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/export.h>
17#include <linux/interrupt.h>
15#include <linux/io.h> 18#include <linux/io.h>
16#include <linux/irq.h> 19#include <linux/irq.h>
20#include <linux/irqdomain.h>
21#include <linux/of.h>
22#include <linux/of_address.h>
23#include <linux/of_irq.h>
17#include <linux/spinlock.h> 24#include <linux/spinlock.h>
18#include <plat/shirq.h> 25#include <plat/shirq.h>
19 26
20struct spear_shirq *shirq;
21static DEFINE_SPINLOCK(lock); 27static DEFINE_SPINLOCK(lock);
22 28
23static void shirq_irq_mask(struct irq_data *d) 29/* spear300 shared irq registers offsets and masks */
30#define SPEAR300_INT_ENB_MASK_REG 0x54
31#define SPEAR300_INT_STS_MASK_REG 0x58
32
33static struct spear_shirq spear300_shirq_ras1 = {
34 .irq_nr = 9,
35 .irq_bit_off = 0,
36 .regs = {
37 .enb_reg = SPEAR300_INT_ENB_MASK_REG,
38 .status_reg = SPEAR300_INT_STS_MASK_REG,
39 .clear_reg = -1,
40 },
41};
42
43static struct spear_shirq *spear300_shirq_blocks[] = {
44 &spear300_shirq_ras1,
45};
46
47/* spear310 shared irq registers offsets and masks */
48#define SPEAR310_INT_STS_MASK_REG 0x04
49
50static struct spear_shirq spear310_shirq_ras1 = {
51 .irq_nr = 8,
52 .irq_bit_off = 0,
53 .regs = {
54 .enb_reg = -1,
55 .status_reg = SPEAR310_INT_STS_MASK_REG,
56 .clear_reg = -1,
57 },
58};
59
60static struct spear_shirq spear310_shirq_ras2 = {
61 .irq_nr = 5,
62 .irq_bit_off = 8,
63 .regs = {
64 .enb_reg = -1,
65 .status_reg = SPEAR310_INT_STS_MASK_REG,
66 .clear_reg = -1,
67 },
68};
69
70static struct spear_shirq spear310_shirq_ras3 = {
71 .irq_nr = 1,
72 .irq_bit_off = 13,
73 .regs = {
74 .enb_reg = -1,
75 .status_reg = SPEAR310_INT_STS_MASK_REG,
76 .clear_reg = -1,
77 },
78};
79
80static struct spear_shirq spear310_shirq_intrcomm_ras = {
81 .irq_nr = 3,
82 .irq_bit_off = 14,
83 .regs = {
84 .enb_reg = -1,
85 .status_reg = SPEAR310_INT_STS_MASK_REG,
86 .clear_reg = -1,
87 },
88};
89
90static struct spear_shirq *spear310_shirq_blocks[] = {
91 &spear310_shirq_ras1,
92 &spear310_shirq_ras2,
93 &spear310_shirq_ras3,
94 &spear310_shirq_intrcomm_ras,
95};
96
97/* spear320 shared irq registers offsets and masks */
98#define SPEAR320_INT_STS_MASK_REG 0x04
99#define SPEAR320_INT_CLR_MASK_REG 0x04
100#define SPEAR320_INT_ENB_MASK_REG 0x08
101
102static struct spear_shirq spear320_shirq_ras1 = {
103 .irq_nr = 3,
104 .irq_bit_off = 7,
105 .regs = {
106 .enb_reg = -1,
107 .status_reg = SPEAR320_INT_STS_MASK_REG,
108 .clear_reg = SPEAR320_INT_CLR_MASK_REG,
109 .reset_to_clear = 1,
110 },
111};
112
113static struct spear_shirq spear320_shirq_ras2 = {
114 .irq_nr = 1,
115 .irq_bit_off = 10,
116 .regs = {
117 .enb_reg = -1,
118 .status_reg = SPEAR320_INT_STS_MASK_REG,
119 .clear_reg = SPEAR320_INT_CLR_MASK_REG,
120 .reset_to_clear = 1,
121 },
122};
123
124static struct spear_shirq spear320_shirq_ras3 = {
125 .irq_nr = 3,
126 .irq_bit_off = 0,
127 .invalid_irq = 1,
128 .regs = {
129 .enb_reg = SPEAR320_INT_ENB_MASK_REG,
130 .reset_to_enb = 1,
131 .status_reg = SPEAR320_INT_STS_MASK_REG,
132 .clear_reg = SPEAR320_INT_CLR_MASK_REG,
133 .reset_to_clear = 1,
134 },
135};
136
137static struct spear_shirq spear320_shirq_intrcomm_ras = {
138 .irq_nr = 11,
139 .irq_bit_off = 11,
140 .regs = {
141 .enb_reg = -1,
142 .status_reg = SPEAR320_INT_STS_MASK_REG,
143 .clear_reg = SPEAR320_INT_CLR_MASK_REG,
144 .reset_to_clear = 1,
145 },
146};
147
148static struct spear_shirq *spear320_shirq_blocks[] = {
149 &spear320_shirq_ras3,
150 &spear320_shirq_ras1,
151 &spear320_shirq_ras2,
152 &spear320_shirq_intrcomm_ras,
153};
154
155static void shirq_irq_mask_unmask(struct irq_data *d, bool mask)
24{ 156{
25 struct spear_shirq *shirq = irq_data_get_irq_chip_data(d); 157 struct spear_shirq *shirq = irq_data_get_irq_chip_data(d);
26 u32 val, id = d->irq - shirq->dev_config[0].virq; 158 u32 val, offset = d->irq - shirq->irq_base;
27 unsigned long flags; 159 unsigned long flags;
28 160
29 if ((shirq->regs.enb_reg == -1) || shirq->dev_config[id].enb_mask == -1) 161 if (shirq->regs.enb_reg == -1)
30 return; 162 return;
31 163
32 spin_lock_irqsave(&lock, flags); 164 spin_lock_irqsave(&lock, flags);
33 val = readl(shirq->regs.base + shirq->regs.enb_reg); 165 val = readl(shirq->base + shirq->regs.enb_reg);
34 if (shirq->regs.reset_to_enb) 166
35 val |= shirq->dev_config[id].enb_mask; 167 if (mask ^ shirq->regs.reset_to_enb)
168 val &= ~(0x1 << shirq->irq_bit_off << offset);
36 else 169 else
37 val &= ~(shirq->dev_config[id].enb_mask); 170 val |= 0x1 << shirq->irq_bit_off << offset;
38 writel(val, shirq->regs.base + shirq->regs.enb_reg); 171
172 writel(val, shirq->base + shirq->regs.enb_reg);
39 spin_unlock_irqrestore(&lock, flags); 173 spin_unlock_irqrestore(&lock, flags);
174
40} 175}
41 176
42static void shirq_irq_unmask(struct irq_data *d) 177static void shirq_irq_mask(struct irq_data *d)
43{ 178{
44 struct spear_shirq *shirq = irq_data_get_irq_chip_data(d); 179 shirq_irq_mask_unmask(d, 1);
45 u32 val, id = d->irq - shirq->dev_config[0].virq; 180}
46 unsigned long flags;
47
48 if ((shirq->regs.enb_reg == -1) || shirq->dev_config[id].enb_mask == -1)
49 return;
50 181
51 spin_lock_irqsave(&lock, flags); 182static void shirq_irq_unmask(struct irq_data *d)
52 val = readl(shirq->regs.base + shirq->regs.enb_reg); 183{
53 if (shirq->regs.reset_to_enb) 184 shirq_irq_mask_unmask(d, 0);
54 val &= ~(shirq->dev_config[id].enb_mask);
55 else
56 val |= shirq->dev_config[id].enb_mask;
57 writel(val, shirq->regs.base + shirq->regs.enb_reg);
58 spin_unlock_irqrestore(&lock, flags);
59} 185}
60 186
61static struct irq_chip shirq_chip = { 187static struct irq_chip shirq_chip = {
62 .name = "spear_shirq", 188 .name = "spear-shirq",
63 .irq_ack = shirq_irq_mask, 189 .irq_ack = shirq_irq_mask,
64 .irq_mask = shirq_irq_mask, 190 .irq_mask = shirq_irq_mask,
65 .irq_unmask = shirq_irq_unmask, 191 .irq_unmask = shirq_irq_unmask,
@@ -67,52 +193,123 @@ static struct irq_chip shirq_chip = {
67 193
68static void shirq_handler(unsigned irq, struct irq_desc *desc) 194static void shirq_handler(unsigned irq, struct irq_desc *desc)
69{ 195{
70 u32 i, val, mask; 196 u32 i, j, val, mask, tmp;
197 struct irq_chip *chip;
71 struct spear_shirq *shirq = irq_get_handler_data(irq); 198 struct spear_shirq *shirq = irq_get_handler_data(irq);
72 199
73 desc->irq_data.chip->irq_ack(&desc->irq_data); 200 chip = irq_get_chip(irq);
74 while ((val = readl(shirq->regs.base + shirq->regs.status_reg) & 201 chip->irq_ack(&desc->irq_data);
75 shirq->regs.status_reg_mask)) { 202
76 for (i = 0; (i < shirq->dev_count) && val; i++) { 203 mask = ((0x1 << shirq->irq_nr) - 1) << shirq->irq_bit_off;
77 if (!(shirq->dev_config[i].status_mask & val)) 204 while ((val = readl(shirq->base + shirq->regs.status_reg) &
205 mask)) {
206
207 val >>= shirq->irq_bit_off;
208 for (i = 0, j = 1; i < shirq->irq_nr; i++, j <<= 1) {
209
210 if (!(j & val))
78 continue; 211 continue;
79 212
80 generic_handle_irq(shirq->dev_config[i].virq); 213 generic_handle_irq(shirq->irq_base + i);
81 214
82 /* clear interrupt */ 215 /* clear interrupt */
83 val &= ~shirq->dev_config[i].status_mask; 216 if (shirq->regs.clear_reg == -1)
84 if ((shirq->regs.clear_reg == -1) ||
85 shirq->dev_config[i].clear_mask == -1)
86 continue; 217 continue;
87 mask = readl(shirq->regs.base + shirq->regs.clear_reg); 218
219 tmp = readl(shirq->base + shirq->regs.clear_reg);
88 if (shirq->regs.reset_to_clear) 220 if (shirq->regs.reset_to_clear)
89 mask &= ~shirq->dev_config[i].clear_mask; 221 tmp &= ~(j << shirq->irq_bit_off);
90 else 222 else
91 mask |= shirq->dev_config[i].clear_mask; 223 tmp |= (j << shirq->irq_bit_off);
92 writel(mask, shirq->regs.base + shirq->regs.clear_reg); 224 writel(tmp, shirq->base + shirq->regs.clear_reg);
93 } 225 }
94 } 226 }
95 desc->irq_data.chip->irq_unmask(&desc->irq_data); 227 chip->irq_unmask(&desc->irq_data);
96} 228}
97 229
98int spear_shirq_register(struct spear_shirq *shirq) 230static void __init spear_shirq_register(struct spear_shirq *shirq)
99{ 231{
100 int i; 232 int i;
101 233
102 if (!shirq || !shirq->dev_config || !shirq->regs.base) 234 if (shirq->invalid_irq)
103 return -EFAULT; 235 return;
104
105 if (!shirq->dev_count)
106 return -EINVAL;
107 236
108 irq_set_chained_handler(shirq->irq, shirq_handler); 237 irq_set_chained_handler(shirq->irq, shirq_handler);
109 for (i = 0; i < shirq->dev_count; i++) { 238 for (i = 0; i < shirq->irq_nr; i++) {
110 irq_set_chip_and_handler(shirq->dev_config[i].virq, 239 irq_set_chip_and_handler(shirq->irq_base + i,
111 &shirq_chip, handle_simple_irq); 240 &shirq_chip, handle_simple_irq);
112 set_irq_flags(shirq->dev_config[i].virq, IRQF_VALID); 241 set_irq_flags(shirq->irq_base + i, IRQF_VALID);
113 irq_set_chip_data(shirq->dev_config[i].virq, shirq); 242 irq_set_chip_data(shirq->irq_base + i, shirq);
114 } 243 }
115 244
116 irq_set_handler_data(shirq->irq, shirq); 245 irq_set_handler_data(shirq->irq, shirq);
246}
247
248static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr,
249 struct device_node *np)
250{
251 int i, irq_base, hwirq = 0, irq_nr = 0;
252 static struct irq_domain *shirq_domain;
253 void __iomem *base;
254
255 base = of_iomap(np, 0);
256 if (!base) {
257 pr_err("%s: failed to map shirq registers\n", __func__);
258 return -ENXIO;
259 }
260
261 for (i = 0; i < block_nr; i++)
262 irq_nr += shirq_blocks[i]->irq_nr;
263
264 irq_base = irq_alloc_descs(-1, 0, irq_nr, 0);
265 if (IS_ERR_VALUE(irq_base)) {
266 pr_err("%s: irq desc alloc failed\n", __func__);
267 goto err_unmap;
268 }
269
270 shirq_domain = irq_domain_add_legacy(np, irq_nr, irq_base, 0,
271 &irq_domain_simple_ops, NULL);
272 if (WARN_ON(!shirq_domain)) {
273 pr_warn("%s: irq domain init failed\n", __func__);
274 goto err_free_desc;
275 }
276
277 for (i = 0; i < block_nr; i++) {
278 shirq_blocks[i]->base = base;
279 shirq_blocks[i]->irq_base = irq_find_mapping(shirq_domain,
280 hwirq);
281 shirq_blocks[i]->irq = irq_of_parse_and_map(np, i);
282
283 spear_shirq_register(shirq_blocks[i]);
284 hwirq += shirq_blocks[i]->irq_nr;
285 }
286
117 return 0; 287 return 0;
288
289err_free_desc:
290 irq_free_descs(irq_base, irq_nr);
291err_unmap:
292 iounmap(base);
293 return -ENXIO;
294}
295
296int __init spear300_shirq_of_init(struct device_node *np,
297 struct device_node *parent)
298{
299 return shirq_init(spear300_shirq_blocks,
300 ARRAY_SIZE(spear300_shirq_blocks), np);
301}
302
303int __init spear310_shirq_of_init(struct device_node *np,
304 struct device_node *parent)
305{
306 return shirq_init(spear310_shirq_blocks,
307 ARRAY_SIZE(spear310_shirq_blocks), np);
308}
309
310int __init spear320_shirq_of_init(struct device_node *np,
311 struct device_node *parent)
312{
313 return shirq_init(spear320_shirq_blocks,
314 ARRAY_SIZE(spear320_shirq_blocks), np);
118} 315}