aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-10-05 09:10:30 -0400
committerPaul Mundt <lethal@linux-sh.org>2010-10-05 09:10:30 -0400
commit2be6bb0c79c7fbda3425b65ee51c558bbaf4cf91 (patch)
treedb0dafd7e7f83945edc2c50c358a3d81fca960c3 /drivers
parentd74310d3b18aabbb7d0549ea9e3fd3259c1dce00 (diff)
sh: intc: Split up the INTC code.
This splits up the sh intc core in to something more vaguely resembling a subsystem. Most of the functionality was alread fairly well compartmentalized, and there were only a handful of interdependencies that needed to be resolved in the process. This also serves as future-proofing for the genirq and sparseirq rework, which will make some of the split out functionality wholly generic, allowing things to be killed off in place with minimal migration pain. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/sh/Kconfig34
-rw-r--r--drivers/sh/Makefile2
-rw-r--r--drivers/sh/intc.c1776
-rw-r--r--drivers/sh/intc/access.c237
-rw-r--r--drivers/sh/intc/balancing.c97
-rw-r--r--drivers/sh/intc/chip.c215
-rw-r--r--drivers/sh/intc/core.c469
-rw-r--r--drivers/sh/intc/dynamic.c135
-rw-r--r--drivers/sh/intc/handle.c307
-rw-r--r--drivers/sh/intc/internals.h185
-rw-r--r--drivers/sh/intc/userimask.c83
-rw-r--r--drivers/sh/intc/virq-debugfs.c64
-rw-r--r--drivers/sh/intc/virq.c255
13 files changed, 2051 insertions, 1808 deletions
diff --git a/drivers/sh/Kconfig b/drivers/sh/Kconfig
index e01ae42774af..f168a6159961 100644
--- a/drivers/sh/Kconfig
+++ b/drivers/sh/Kconfig
@@ -1,33 +1,5 @@
1config INTC_USERIMASK 1menu "SuperH / SH-Mobile Driver Options"
2 bool "Userspace interrupt masking support"
3 depends on ARCH_SHMOBILE || (SUPERH && CPU_SH4A)
4 help
5 This enables support for hardware-assisted userspace hardirq
6 masking.
7 2
8 SH-4A and newer interrupt blocks all support a special shadowed 3source "drivers/sh/intc/Kconfig"
9 page with all non-masking registers obscured when mapped in to
10 userspace. This is primarily for use by userspace device
11 drivers that are using special priority levels.
12 4
13 If in doubt, say N. 5endmenu
14
15config INTC_BALANCING
16 bool "Hardware IRQ balancing support"
17 depends on SMP && SUPERH && CPU_SHX3
18 help
19 This enables support for IRQ auto-distribution mode on SH-X3
20 SMP parts. All of the balancing and CPU wakeup decisions are
21 taken care of automatically by hardware for distributed
22 vectors.
23
24 If in doubt, say N.
25
26config INTC_MAPPING_DEBUG
27 bool "Expose IRQ to per-controller id mapping via debugfs"
28 depends on DEBUG_FS
29 help
30 This will create a debugfs entry for showing the relationship
31 between system IRQs and the per-controller id tables.
32
33 If in doubt, say N.
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile
index 08fc653a825c..50dd5a65f72e 100644
--- a/drivers/sh/Makefile
+++ b/drivers/sh/Makefile
@@ -1,7 +1,7 @@
1# 1#
2# Makefile for the SuperH specific drivers. 2# Makefile for the SuperH specific drivers.
3# 3#
4obj-y := clk.o intc.o 4obj-y := clk.o intc/
5 5
6obj-$(CONFIG_SUPERHYWAY) += superhyway/ 6obj-$(CONFIG_SUPERHYWAY) += superhyway/
7obj-$(CONFIG_MAPLE) += maple/ 7obj-$(CONFIG_MAPLE) += maple/
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c
deleted file mode 100644
index d4325c70cf61..000000000000
--- a/drivers/sh/intc.c
+++ /dev/null
@@ -1,1776 +0,0 @@
1/*
2 * Shared interrupt handling code for IPR and INTC2 types of IRQs.
3 *
4 * Copyright (C) 2007, 2008 Magnus Damm
5 * Copyright (C) 2009, 2010 Paul Mundt
6 *
7 * Based on intc2.c and ipr.c
8 *
9 * Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi
10 * Copyright (C) 2000 Kazumoto Kojima
11 * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
12 * Copyright (C) 2003 Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp>
13 * Copyright (C) 2005, 2006 Paul Mundt
14 *
15 * This file is subject to the terms and conditions of the GNU General Public
16 * License. See the file "COPYING" in the main directory of this archive
17 * for more details.
18 */
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21#include <linux/init.h>
22#include <linux/irq.h>
23#include <linux/module.h>
24#include <linux/io.h>
25#include <linux/slab.h>
26#include <linux/interrupt.h>
27#include <linux/sh_intc.h>
28#include <linux/sysdev.h>
29#include <linux/list.h>
30#include <linux/topology.h>
31#include <linux/bitmap.h>
32#include <linux/cpumask.h>
33#include <linux/spinlock.h>
34#include <linux/debugfs.h>
35#include <linux/seq_file.h>
36#include <linux/radix-tree.h>
37#include <linux/mutex.h>
38#include <linux/rcupdate.h>
39#include <asm/sizes.h>
40
41#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
42 ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
43 ((addr_e) << 16) | ((addr_d << 24)))
44
45#define _INTC_SHIFT(h) (h & 0x1f)
46#define _INTC_WIDTH(h) ((h >> 5) & 0xf)
47#define _INTC_FN(h) ((h >> 9) & 0xf)
48#define _INTC_MODE(h) ((h >> 13) & 0x7)
49#define _INTC_ADDR_E(h) ((h >> 16) & 0xff)
50#define _INTC_ADDR_D(h) ((h >> 24) & 0xff)
51
52struct intc_handle_int {
53 unsigned int irq;
54 unsigned long handle;
55};
56
57struct intc_window {
58 phys_addr_t phys;
59 void __iomem *virt;
60 unsigned long size;
61};
62
63struct intc_map_entry {
64 intc_enum enum_id;
65 struct intc_desc_int *desc;
66};
67
68struct intc_subgroup_entry {
69 unsigned int pirq;
70 intc_enum enum_id;
71 unsigned long handle;
72};
73
74struct intc_desc_int {
75 struct list_head list;
76 struct sys_device sysdev;
77 struct radix_tree_root tree;
78 pm_message_t state;
79 spinlock_t lock;
80 unsigned int index;
81 unsigned long *reg;
82#ifdef CONFIG_SMP
83 unsigned long *smp;
84#endif
85 unsigned int nr_reg;
86 struct intc_handle_int *prio;
87 unsigned int nr_prio;
88 struct intc_handle_int *sense;
89 unsigned int nr_sense;
90 struct intc_window *window;
91 unsigned int nr_windows;
92 struct irq_chip chip;
93};
94
95static LIST_HEAD(intc_list);
96static unsigned int nr_intc_controllers;
97
98/*
99 * The intc_irq_map provides a global map of bound IRQ vectors for a
100 * given platform. Allocation of IRQs are either static through the CPU
101 * vector map, or dynamic in the case of board mux vectors or MSI.
102 *
103 * As this is a central point for all IRQ controllers on the system,
104 * each of the available sources are mapped out here. This combined with
105 * sparseirq makes it quite trivial to keep the vector map tightly packed
106 * when dynamically creating IRQs, as well as tying in to otherwise
107 * unused irq_desc positions in the sparse array.
108 */
109static DECLARE_BITMAP(intc_irq_map, NR_IRQS);
110static struct intc_map_entry intc_irq_xlate[NR_IRQS];
111static DEFINE_SPINLOCK(vector_lock);
112static DEFINE_SPINLOCK(xlate_lock);
113
114#ifdef CONFIG_SMP
115#define IS_SMP(x) x.smp
116#define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c))
117#define SMP_NR(d, x) ((d->smp[(x)] >> 8) ? (d->smp[(x)] >> 8) : 1)
118#else
119#define IS_SMP(x) 0
120#define INTC_REG(d, x, c) (d->reg[(x)])
121#define SMP_NR(d, x) 1
122#endif
123
124static unsigned int intc_prio_level[NR_IRQS]; /* for now */
125static unsigned int default_prio_level = 2; /* 2 - 16 */
126static unsigned long ack_handle[NR_IRQS];
127#ifdef CONFIG_INTC_BALANCING
128static unsigned long dist_handle[NR_IRQS];
129#endif
130
131struct intc_virq_list {
132 unsigned int irq;
133 struct intc_virq_list *next;
134};
135
136#define for_each_virq(entry, head) \
137 for (entry = head; entry; entry = entry->next)
138
139static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
140{
141 struct irq_chip *chip = get_irq_chip(irq);
142
143 return container_of(chip, struct intc_desc_int, chip);
144}
145
146static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc)
147{
148 generic_handle_irq((unsigned int)get_irq_data(irq));
149}
150
151static inline void activate_irq(int irq)
152{
153#ifdef CONFIG_ARM
154 /* ARM requires an extra step to clear IRQ_NOREQUEST, which it
155 * sets on behalf of every irq_chip. Also sets IRQ_NOPROBE.
156 */
157 set_irq_flags(irq, IRQF_VALID);
158#else
159 /* same effect on other architectures */
160 set_irq_noprobe(irq);
161#endif
162}
163
164static unsigned long intc_phys_to_virt(struct intc_desc_int *d,
165 unsigned long address)
166{
167 struct intc_window *window;
168 int k;
169
170 /* scan through physical windows and convert address */
171 for (k = 0; k < d->nr_windows; k++) {
172 window = d->window + k;
173
174 if (address < window->phys)
175 continue;
176
177 if (address >= (window->phys + window->size))
178 continue;
179
180 address -= window->phys;
181 address += (unsigned long)window->virt;
182
183 return address;
184 }
185
186 /* no windows defined, register must be 1:1 mapped virt:phys */
187 return address;
188}
189
190static unsigned int intc_get_reg(struct intc_desc_int *d, unsigned long address)
191{
192 unsigned int k;
193
194 address = intc_phys_to_virt(d, address);
195
196 for (k = 0; k < d->nr_reg; k++) {
197 if (d->reg[k] == address)
198 return k;
199 }
200
201 BUG();
202 return 0;
203}
204
205static inline unsigned int set_field(unsigned int value,
206 unsigned int field_value,
207 unsigned int handle)
208{
209 unsigned int width = _INTC_WIDTH(handle);
210 unsigned int shift = _INTC_SHIFT(handle);
211
212 value &= ~(((1 << width) - 1) << shift);
213 value |= field_value << shift;
214 return value;
215}
216
217static inline unsigned long get_field(unsigned int value, unsigned int handle)
218{
219 unsigned int width = _INTC_WIDTH(handle);
220 unsigned int shift = _INTC_SHIFT(handle);
221 unsigned int mask = ((1 << width) - 1) << shift;
222
223 return (value & mask) >> shift;
224}
225
226static unsigned long test_8(unsigned long addr, unsigned long h,
227 unsigned long ignore)
228{
229 return get_field(__raw_readb(addr), h);
230}
231
232static unsigned long test_16(unsigned long addr, unsigned long h,
233 unsigned long ignore)
234{
235 return get_field(__raw_readw(addr), h);
236}
237
238static unsigned long test_32(unsigned long addr, unsigned long h,
239 unsigned long ignore)
240{
241 return get_field(__raw_readl(addr), h);
242}
243
244static unsigned long write_8(unsigned long addr, unsigned long h,
245 unsigned long data)
246{
247 __raw_writeb(set_field(0, data, h), addr);
248 (void)__raw_readb(addr); /* Defeat write posting */
249 return 0;
250}
251
252static unsigned long write_16(unsigned long addr, unsigned long h,
253 unsigned long data)
254{
255 __raw_writew(set_field(0, data, h), addr);
256 (void)__raw_readw(addr); /* Defeat write posting */
257 return 0;
258}
259
260static unsigned long write_32(unsigned long addr, unsigned long h,
261 unsigned long data)
262{
263 __raw_writel(set_field(0, data, h), addr);
264 (void)__raw_readl(addr); /* Defeat write posting */
265 return 0;
266}
267
268static unsigned long modify_8(unsigned long addr, unsigned long h,
269 unsigned long data)
270{
271 unsigned long flags;
272 local_irq_save(flags);
273 __raw_writeb(set_field(__raw_readb(addr), data, h), addr);
274 (void)__raw_readb(addr); /* Defeat write posting */
275 local_irq_restore(flags);
276 return 0;
277}
278
279static unsigned long modify_16(unsigned long addr, unsigned long h,
280 unsigned long data)
281{
282 unsigned long flags;
283 local_irq_save(flags);
284 __raw_writew(set_field(__raw_readw(addr), data, h), addr);
285 (void)__raw_readw(addr); /* Defeat write posting */
286 local_irq_restore(flags);
287 return 0;
288}
289
290static unsigned long modify_32(unsigned long addr, unsigned long h,
291 unsigned long data)
292{
293 unsigned long flags;
294 local_irq_save(flags);
295 __raw_writel(set_field(__raw_readl(addr), data, h), addr);
296 (void)__raw_readl(addr); /* Defeat write posting */
297 local_irq_restore(flags);
298 return 0;
299}
300
301enum {
302 REG_FN_ERR = 0,
303 REG_FN_TEST_BASE = 1,
304 REG_FN_WRITE_BASE = 5,
305 REG_FN_MODIFY_BASE = 9
306};
307
308static unsigned long (*intc_reg_fns[])(unsigned long addr,
309 unsigned long h,
310 unsigned long data) = {
311 [REG_FN_TEST_BASE + 0] = test_8,
312 [REG_FN_TEST_BASE + 1] = test_16,
313 [REG_FN_TEST_BASE + 3] = test_32,
314 [REG_FN_WRITE_BASE + 0] = write_8,
315 [REG_FN_WRITE_BASE + 1] = write_16,
316 [REG_FN_WRITE_BASE + 3] = write_32,
317 [REG_FN_MODIFY_BASE + 0] = modify_8,
318 [REG_FN_MODIFY_BASE + 1] = modify_16,
319 [REG_FN_MODIFY_BASE + 3] = modify_32,
320};
321
322enum { MODE_ENABLE_REG = 0, /* Bit(s) set -> interrupt enabled */
323 MODE_MASK_REG, /* Bit(s) set -> interrupt disabled */
324 MODE_DUAL_REG, /* Two registers, set bit to enable / disable */
325 MODE_PRIO_REG, /* Priority value written to enable interrupt */
326 MODE_PCLR_REG, /* Above plus all bits set to disable interrupt */
327};
328
329static unsigned long intc_mode_field(unsigned long addr,
330 unsigned long handle,
331 unsigned long (*fn)(unsigned long,
332 unsigned long,
333 unsigned long),
334 unsigned int irq)
335{
336 return fn(addr, handle, ((1 << _INTC_WIDTH(handle)) - 1));
337}
338
339static unsigned long intc_mode_zero(unsigned long addr,
340 unsigned long handle,
341 unsigned long (*fn)(unsigned long,
342 unsigned long,
343 unsigned long),
344 unsigned int irq)
345{
346 return fn(addr, handle, 0);
347}
348
349static unsigned long intc_mode_prio(unsigned long addr,
350 unsigned long handle,
351 unsigned long (*fn)(unsigned long,
352 unsigned long,
353 unsigned long),
354 unsigned int irq)
355{
356 return fn(addr, handle, intc_prio_level[irq]);
357}
358
359static unsigned long (*intc_enable_fns[])(unsigned long addr,
360 unsigned long handle,
361 unsigned long (*fn)(unsigned long,
362 unsigned long,
363 unsigned long),
364 unsigned int irq) = {
365 [MODE_ENABLE_REG] = intc_mode_field,
366 [MODE_MASK_REG] = intc_mode_zero,
367 [MODE_DUAL_REG] = intc_mode_field,
368 [MODE_PRIO_REG] = intc_mode_prio,
369 [MODE_PCLR_REG] = intc_mode_prio,
370};
371
372static unsigned long (*intc_disable_fns[])(unsigned long addr,
373 unsigned long handle,
374 unsigned long (*fn)(unsigned long,
375 unsigned long,
376 unsigned long),
377 unsigned int irq) = {
378 [MODE_ENABLE_REG] = intc_mode_zero,
379 [MODE_MASK_REG] = intc_mode_field,
380 [MODE_DUAL_REG] = intc_mode_field,
381 [MODE_PRIO_REG] = intc_mode_zero,
382 [MODE_PCLR_REG] = intc_mode_field,
383};
384
385#ifdef CONFIG_INTC_BALANCING
386static inline void intc_balancing_enable(unsigned int irq)
387{
388 struct intc_desc_int *d = get_intc_desc(irq);
389 unsigned long handle = dist_handle[irq];
390 unsigned long addr;
391
392 if (irq_balancing_disabled(irq) || !handle)
393 return;
394
395 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
396 intc_reg_fns[_INTC_FN(handle)](addr, handle, 1);
397}
398
399static inline void intc_balancing_disable(unsigned int irq)
400{
401 struct intc_desc_int *d = get_intc_desc(irq);
402 unsigned long handle = dist_handle[irq];
403 unsigned long addr;
404
405 if (irq_balancing_disabled(irq) || !handle)
406 return;
407
408 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
409 intc_reg_fns[_INTC_FN(handle)](addr, handle, 0);
410}
411
412static unsigned int intc_dist_data(struct intc_desc *desc,
413 struct intc_desc_int *d,
414 intc_enum enum_id)
415{
416 struct intc_mask_reg *mr = desc->hw.mask_regs;
417 unsigned int i, j, fn, mode;
418 unsigned long reg_e, reg_d;
419
420 for (i = 0; mr && enum_id && i < desc->hw.nr_mask_regs; i++) {
421 mr = desc->hw.mask_regs + i;
422
423 /*
424 * Skip this entry if there's no auto-distribution
425 * register associated with it.
426 */
427 if (!mr->dist_reg)
428 continue;
429
430 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
431 if (mr->enum_ids[j] != enum_id)
432 continue;
433
434 fn = REG_FN_MODIFY_BASE;
435 mode = MODE_ENABLE_REG;
436 reg_e = mr->dist_reg;
437 reg_d = mr->dist_reg;
438
439 fn += (mr->reg_width >> 3) - 1;
440 return _INTC_MK(fn, mode,
441 intc_get_reg(d, reg_e),
442 intc_get_reg(d, reg_d),
443 1,
444 (mr->reg_width - 1) - j);
445 }
446 }
447
448 /*
449 * It's possible we've gotten here with no distribution options
450 * available for the IRQ in question, so we just skip over those.
451 */
452 return 0;
453}
454#else
455static inline void intc_balancing_enable(unsigned int irq)
456{
457}
458
459static inline void intc_balancing_disable(unsigned int irq)
460{
461}
462#endif
463
464static inline void _intc_enable(unsigned int irq, unsigned long handle)
465{
466 struct intc_desc_int *d = get_intc_desc(irq);
467 unsigned long addr;
468 unsigned int cpu;
469
470 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
471#ifdef CONFIG_SMP
472 if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
473 continue;
474#endif
475 addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
476 intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
477 [_INTC_FN(handle)], irq);
478 }
479
480 intc_balancing_enable(irq);
481}
482
483static void intc_enable(unsigned int irq)
484{
485 _intc_enable(irq, (unsigned long)get_irq_chip_data(irq));
486}
487
488static void intc_disable(unsigned int irq)
489{
490 struct intc_desc_int *d = get_intc_desc(irq);
491 unsigned long handle = (unsigned long)get_irq_chip_data(irq);
492 unsigned long addr;
493 unsigned int cpu;
494
495 intc_balancing_disable(irq);
496
497 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
498#ifdef CONFIG_SMP
499 if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
500 continue;
501#endif
502 addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
503 intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\
504 [_INTC_FN(handle)], irq);
505 }
506}
507
508static unsigned long
509(*intc_enable_noprio_fns[])(unsigned long addr,
510 unsigned long handle,
511 unsigned long (*fn)(unsigned long,
512 unsigned long,
513 unsigned long),
514 unsigned int irq) = {
515 [MODE_ENABLE_REG] = intc_mode_field,
516 [MODE_MASK_REG] = intc_mode_zero,
517 [MODE_DUAL_REG] = intc_mode_field,
518 [MODE_PRIO_REG] = intc_mode_field,
519 [MODE_PCLR_REG] = intc_mode_field,
520};
521
522static void intc_enable_disable(struct intc_desc_int *d,
523 unsigned long handle, int do_enable)
524{
525 unsigned long addr;
526 unsigned int cpu;
527 unsigned long (*fn)(unsigned long, unsigned long,
528 unsigned long (*)(unsigned long, unsigned long,
529 unsigned long),
530 unsigned int);
531
532 if (do_enable) {
533 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
534 addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
535 fn = intc_enable_noprio_fns[_INTC_MODE(handle)];
536 fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
537 }
538 } else {
539 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
540 addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
541 fn = intc_disable_fns[_INTC_MODE(handle)];
542 fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
543 }
544 }
545}
546
547static int intc_set_wake(unsigned int irq, unsigned int on)
548{
549 return 0; /* allow wakeup, but setup hardware in intc_suspend() */
550}
551
552#ifdef CONFIG_SMP
553/*
554 * This is held with the irq desc lock held, so we don't require any
555 * additional locking here at the intc desc level. The affinity mask is
556 * later tested in the enable/disable paths.
557 */
558static int intc_set_affinity(unsigned int irq, const struct cpumask *cpumask)
559{
560 if (!cpumask_intersects(cpumask, cpu_online_mask))
561 return -1;
562
563 cpumask_copy(irq_to_desc(irq)->affinity, cpumask);
564
565 return 0;
566}
567#endif
568
569static void intc_mask_ack(unsigned int irq)
570{
571 struct intc_desc_int *d = get_intc_desc(irq);
572 unsigned long handle = ack_handle[irq];
573 unsigned long addr;
574
575 intc_disable(irq);
576
577 /* read register and write zero only to the associated bit */
578 if (handle) {
579 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
580 switch (_INTC_FN(handle)) {
581 case REG_FN_MODIFY_BASE + 0: /* 8bit */
582 __raw_readb(addr);
583 __raw_writeb(0xff ^ set_field(0, 1, handle), addr);
584 break;
585 case REG_FN_MODIFY_BASE + 1: /* 16bit */
586 __raw_readw(addr);
587 __raw_writew(0xffff ^ set_field(0, 1, handle), addr);
588 break;
589 case REG_FN_MODIFY_BASE + 3: /* 32bit */
590 __raw_readl(addr);
591 __raw_writel(0xffffffff ^ set_field(0, 1, handle), addr);
592 break;
593 default:
594 BUG();
595 break;
596 }
597 }
598}
599
600static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
601 unsigned int nr_hp,
602 unsigned int irq)
603{
604 int i;
605
606 /*
607 * this doesn't scale well, but...
608 *
609 * this function should only be used for cerain uncommon
610 * operations such as intc_set_priority() and intc_set_sense()
611 * and in those rare cases performance doesn't matter that much.
612 * keeping the memory footprint low is more important.
613 *
614 * one rather simple way to speed this up and still keep the
615 * memory footprint down is to make sure the array is sorted
616 * and then perform a bisect to lookup the irq.
617 */
618 for (i = 0; i < nr_hp; i++) {
619 if ((hp + i)->irq != irq)
620 continue;
621
622 return hp + i;
623 }
624
625 return NULL;
626}
627
628int intc_set_priority(unsigned int irq, unsigned int prio)
629{
630 struct intc_desc_int *d = get_intc_desc(irq);
631 struct intc_handle_int *ihp;
632
633 if (!intc_prio_level[irq] || prio <= 1)
634 return -EINVAL;
635
636 ihp = intc_find_irq(d->prio, d->nr_prio, irq);
637 if (ihp) {
638 if (prio >= (1 << _INTC_WIDTH(ihp->handle)))
639 return -EINVAL;
640
641 intc_prio_level[irq] = prio;
642
643 /*
644 * only set secondary masking method directly
645 * primary masking method is using intc_prio_level[irq]
646 * priority level will be set during next enable()
647 */
648 if (_INTC_FN(ihp->handle) != REG_FN_ERR)
649 _intc_enable(irq, ihp->handle);
650 }
651 return 0;
652}
653
654#define VALID(x) (x | 0x80)
655
656static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
657 [IRQ_TYPE_EDGE_FALLING] = VALID(0),
658 [IRQ_TYPE_EDGE_RISING] = VALID(1),
659 [IRQ_TYPE_LEVEL_LOW] = VALID(2),
660 /* SH7706, SH7707 and SH7709 do not support high level triggered */
661#if !defined(CONFIG_CPU_SUBTYPE_SH7706) && \
662 !defined(CONFIG_CPU_SUBTYPE_SH7707) && \
663 !defined(CONFIG_CPU_SUBTYPE_SH7709)
664 [IRQ_TYPE_LEVEL_HIGH] = VALID(3),
665#endif
666};
667
668static int intc_set_sense(unsigned int irq, unsigned int type)
669{
670 struct intc_desc_int *d = get_intc_desc(irq);
671 unsigned char value = intc_irq_sense_table[type & IRQ_TYPE_SENSE_MASK];
672 struct intc_handle_int *ihp;
673 unsigned long addr;
674
675 if (!value)
676 return -EINVAL;
677
678 ihp = intc_find_irq(d->sense, d->nr_sense, irq);
679 if (ihp) {
680 addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0);
681 intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value);
682 }
683 return 0;
684}
685
686static intc_enum __init intc_grp_id(struct intc_desc *desc,
687 intc_enum enum_id)
688{
689 struct intc_group *g = desc->hw.groups;
690 unsigned int i, j;
691
692 for (i = 0; g && enum_id && i < desc->hw.nr_groups; i++) {
693 g = desc->hw.groups + i;
694
695 for (j = 0; g->enum_ids[j]; j++) {
696 if (g->enum_ids[j] != enum_id)
697 continue;
698
699 return g->enum_id;
700 }
701 }
702
703 return 0;
704}
705
706static unsigned int __init _intc_mask_data(struct intc_desc *desc,
707 struct intc_desc_int *d,
708 intc_enum enum_id,
709 unsigned int *reg_idx,
710 unsigned int *fld_idx)
711{
712 struct intc_mask_reg *mr = desc->hw.mask_regs;
713 unsigned int fn, mode;
714 unsigned long reg_e, reg_d;
715
716 while (mr && enum_id && *reg_idx < desc->hw.nr_mask_regs) {
717 mr = desc->hw.mask_regs + *reg_idx;
718
719 for (; *fld_idx < ARRAY_SIZE(mr->enum_ids); (*fld_idx)++) {
720 if (mr->enum_ids[*fld_idx] != enum_id)
721 continue;
722
723 if (mr->set_reg && mr->clr_reg) {
724 fn = REG_FN_WRITE_BASE;
725 mode = MODE_DUAL_REG;
726 reg_e = mr->clr_reg;
727 reg_d = mr->set_reg;
728 } else {
729 fn = REG_FN_MODIFY_BASE;
730 if (mr->set_reg) {
731 mode = MODE_ENABLE_REG;
732 reg_e = mr->set_reg;
733 reg_d = mr->set_reg;
734 } else {
735 mode = MODE_MASK_REG;
736 reg_e = mr->clr_reg;
737 reg_d = mr->clr_reg;
738 }
739 }
740
741 fn += (mr->reg_width >> 3) - 1;
742 return _INTC_MK(fn, mode,
743 intc_get_reg(d, reg_e),
744 intc_get_reg(d, reg_d),
745 1,
746 (mr->reg_width - 1) - *fld_idx);
747 }
748
749 *fld_idx = 0;
750 (*reg_idx)++;
751 }
752
753 return 0;
754}
755
756static unsigned int __init intc_mask_data(struct intc_desc *desc,
757 struct intc_desc_int *d,
758 intc_enum enum_id, int do_grps)
759{
760 unsigned int i = 0;
761 unsigned int j = 0;
762 unsigned int ret;
763
764 ret = _intc_mask_data(desc, d, enum_id, &i, &j);
765 if (ret)
766 return ret;
767
768 if (do_grps)
769 return intc_mask_data(desc, d, intc_grp_id(desc, enum_id), 0);
770
771 return 0;
772}
773
774static unsigned int __init _intc_prio_data(struct intc_desc *desc,
775 struct intc_desc_int *d,
776 intc_enum enum_id,
777 unsigned int *reg_idx,
778 unsigned int *fld_idx)
779{
780 struct intc_prio_reg *pr = desc->hw.prio_regs;
781 unsigned int fn, n, mode, bit;
782 unsigned long reg_e, reg_d;
783
784 while (pr && enum_id && *reg_idx < desc->hw.nr_prio_regs) {
785 pr = desc->hw.prio_regs + *reg_idx;
786
787 for (; *fld_idx < ARRAY_SIZE(pr->enum_ids); (*fld_idx)++) {
788 if (pr->enum_ids[*fld_idx] != enum_id)
789 continue;
790
791 if (pr->set_reg && pr->clr_reg) {
792 fn = REG_FN_WRITE_BASE;
793 mode = MODE_PCLR_REG;
794 reg_e = pr->set_reg;
795 reg_d = pr->clr_reg;
796 } else {
797 fn = REG_FN_MODIFY_BASE;
798 mode = MODE_PRIO_REG;
799 if (!pr->set_reg)
800 BUG();
801 reg_e = pr->set_reg;
802 reg_d = pr->set_reg;
803 }
804
805 fn += (pr->reg_width >> 3) - 1;
806 n = *fld_idx + 1;
807
808 BUG_ON(n * pr->field_width > pr->reg_width);
809
810 bit = pr->reg_width - (n * pr->field_width);
811
812 return _INTC_MK(fn, mode,
813 intc_get_reg(d, reg_e),
814 intc_get_reg(d, reg_d),
815 pr->field_width, bit);
816 }
817
818 *fld_idx = 0;
819 (*reg_idx)++;
820 }
821
822 return 0;
823}
824
825static unsigned int __init intc_prio_data(struct intc_desc *desc,
826 struct intc_desc_int *d,
827 intc_enum enum_id, int do_grps)
828{
829 unsigned int i = 0;
830 unsigned int j = 0;
831 unsigned int ret;
832
833 ret = _intc_prio_data(desc, d, enum_id, &i, &j);
834 if (ret)
835 return ret;
836
837 if (do_grps)
838 return intc_prio_data(desc, d, intc_grp_id(desc, enum_id), 0);
839
840 return 0;
841}
842
843static void __init intc_enable_disable_enum(struct intc_desc *desc,
844 struct intc_desc_int *d,
845 intc_enum enum_id, int enable)
846{
847 unsigned int i, j, data;
848
849 /* go through and enable/disable all mask bits */
850 i = j = 0;
851 do {
852 data = _intc_mask_data(desc, d, enum_id, &i, &j);
853 if (data)
854 intc_enable_disable(d, data, enable);
855 j++;
856 } while (data);
857
858 /* go through and enable/disable all priority fields */
859 i = j = 0;
860 do {
861 data = _intc_prio_data(desc, d, enum_id, &i, &j);
862 if (data)
863 intc_enable_disable(d, data, enable);
864
865 j++;
866 } while (data);
867}
868
869static unsigned int __init intc_ack_data(struct intc_desc *desc,
870 struct intc_desc_int *d,
871 intc_enum enum_id)
872{
873 struct intc_mask_reg *mr = desc->hw.ack_regs;
874 unsigned int i, j, fn, mode;
875 unsigned long reg_e, reg_d;
876
877 for (i = 0; mr && enum_id && i < desc->hw.nr_ack_regs; i++) {
878 mr = desc->hw.ack_regs + i;
879
880 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
881 if (mr->enum_ids[j] != enum_id)
882 continue;
883
884 fn = REG_FN_MODIFY_BASE;
885 mode = MODE_ENABLE_REG;
886 reg_e = mr->set_reg;
887 reg_d = mr->set_reg;
888
889 fn += (mr->reg_width >> 3) - 1;
890 return _INTC_MK(fn, mode,
891 intc_get_reg(d, reg_e),
892 intc_get_reg(d, reg_d),
893 1,
894 (mr->reg_width - 1) - j);
895 }
896 }
897
898 return 0;
899}
900
901static unsigned int __init intc_sense_data(struct intc_desc *desc,
902 struct intc_desc_int *d,
903 intc_enum enum_id)
904{
905 struct intc_sense_reg *sr = desc->hw.sense_regs;
906 unsigned int i, j, fn, bit;
907
908 for (i = 0; sr && enum_id && i < desc->hw.nr_sense_regs; i++) {
909 sr = desc->hw.sense_regs + i;
910
911 for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) {
912 if (sr->enum_ids[j] != enum_id)
913 continue;
914
915 fn = REG_FN_MODIFY_BASE;
916 fn += (sr->reg_width >> 3) - 1;
917
918 BUG_ON((j + 1) * sr->field_width > sr->reg_width);
919
920 bit = sr->reg_width - ((j + 1) * sr->field_width);
921
922 return _INTC_MK(fn, 0, intc_get_reg(d, sr->reg),
923 0, sr->field_width, bit);
924 }
925 }
926
927 return 0;
928}
929
930#define INTC_TAG_VIRQ_NEEDS_ALLOC 0
931
932int intc_irq_lookup(const char *chipname, intc_enum enum_id)
933{
934 struct intc_map_entry *ptr;
935 struct intc_desc_int *d;
936 int irq = -1;
937
938 list_for_each_entry(d, &intc_list, list) {
939 int tagged;
940
941 if (strcmp(d->chip.name, chipname) != 0)
942 continue;
943
944 /*
945 * Catch early lookups for subgroup VIRQs that have not
946 * yet been allocated an IRQ. This already includes a
947 * fast-path out if the tree is untagged, so there is no
948 * need to explicitly test the root tree.
949 */
950 tagged = radix_tree_tag_get(&d->tree, enum_id,
951 INTC_TAG_VIRQ_NEEDS_ALLOC);
952 if (unlikely(tagged))
953 break;
954
955 ptr = radix_tree_lookup(&d->tree, enum_id);
956 if (ptr) {
957 irq = ptr - intc_irq_xlate;
958 break;
959 }
960 }
961
962 return irq;
963}
964EXPORT_SYMBOL_GPL(intc_irq_lookup);
965
966static int add_virq_to_pirq(unsigned int irq, unsigned int virq)
967{
968 struct intc_virq_list **last, *entry;
969 struct irq_desc *desc = irq_to_desc(irq);
970
971 /* scan for duplicates */
972 last = (struct intc_virq_list **)&desc->handler_data;
973 for_each_virq(entry, desc->handler_data) {
974 if (entry->irq == virq)
975 return 0;
976 last = &entry->next;
977 }
978
979 entry = kzalloc(sizeof(struct intc_virq_list), GFP_ATOMIC);
980 if (!entry) {
981 pr_err("can't allocate VIRQ mapping for %d\n", virq);
982 return -ENOMEM;
983 }
984
985 entry->irq = virq;
986
987 *last = entry;
988
989 return 0;
990}
991
992static void intc_virq_handler(unsigned int irq, struct irq_desc *desc)
993{
994 struct intc_virq_list *entry, *vlist = get_irq_data(irq);
995 struct intc_desc_int *d = get_intc_desc(irq);
996
997 desc->chip->mask_ack(irq);
998
999 for_each_virq(entry, vlist) {
1000 unsigned long addr, handle;
1001
1002 handle = (unsigned long)get_irq_data(entry->irq);
1003 addr = INTC_REG(d, _INTC_ADDR_E(handle), 0);
1004
1005 if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0))
1006 generic_handle_irq(entry->irq);
1007 }
1008
1009 desc->chip->unmask(irq);
1010}
1011
1012static unsigned long __init intc_subgroup_data(struct intc_subgroup *subgroup,
1013 struct intc_desc_int *d,
1014 unsigned int index)
1015{
1016 unsigned int fn = REG_FN_TEST_BASE + (subgroup->reg_width >> 3) - 1;
1017
1018 return _INTC_MK(fn, MODE_ENABLE_REG, intc_get_reg(d, subgroup->reg),
1019 0, 1, (subgroup->reg_width - 1) - index);
1020}
1021
1022static void __init intc_subgroup_init_one(struct intc_desc *desc,
1023 struct intc_desc_int *d,
1024 struct intc_subgroup *subgroup)
1025{
1026 struct intc_map_entry *mapped;
1027 unsigned int pirq;
1028 unsigned long flags;
1029 int i;
1030
1031 mapped = radix_tree_lookup(&d->tree, subgroup->parent_id);
1032 if (!mapped) {
1033 WARN_ON(1);
1034 return;
1035 }
1036
1037 pirq = mapped - intc_irq_xlate;
1038
1039 spin_lock_irqsave(&d->lock, flags);
1040
1041 for (i = 0; i < ARRAY_SIZE(subgroup->enum_ids); i++) {
1042 struct intc_subgroup_entry *entry;
1043 int err;
1044
1045 if (!subgroup->enum_ids[i])
1046 continue;
1047
1048 entry = kmalloc(sizeof(*entry), GFP_NOWAIT);
1049 if (!entry)
1050 break;
1051
1052 entry->pirq = pirq;
1053 entry->enum_id = subgroup->enum_ids[i];
1054 entry->handle = intc_subgroup_data(subgroup, d, i);
1055
1056 err = radix_tree_insert(&d->tree, entry->enum_id, entry);
1057 if (unlikely(err < 0))
1058 break;
1059
1060 radix_tree_tag_set(&d->tree, entry->enum_id,
1061 INTC_TAG_VIRQ_NEEDS_ALLOC);
1062 }
1063
1064 spin_unlock_irqrestore(&d->lock, flags);
1065}
1066
1067static void __init intc_subgroup_init(struct intc_desc *desc,
1068 struct intc_desc_int *d)
1069{
1070 int i;
1071
1072 if (!desc->hw.subgroups)
1073 return;
1074
1075 for (i = 0; i < desc->hw.nr_subgroups; i++)
1076 intc_subgroup_init_one(desc, d, desc->hw.subgroups + i);
1077}
1078
1079static void __init intc_subgroup_map(struct intc_desc_int *d)
1080{
1081 struct intc_subgroup_entry *entries[32];
1082 unsigned long flags;
1083 unsigned int nr_found;
1084 int i;
1085
1086 spin_lock_irqsave(&d->lock, flags);
1087
1088restart:
1089 nr_found = radix_tree_gang_lookup_tag_slot(&d->tree,
1090 (void ***)entries, 0, ARRAY_SIZE(entries),
1091 INTC_TAG_VIRQ_NEEDS_ALLOC);
1092
1093 for (i = 0; i < nr_found; i++) {
1094 struct intc_subgroup_entry *entry;
1095 int irq;
1096
1097 entry = radix_tree_deref_slot((void **)entries[i]);
1098 if (unlikely(!entry))
1099 continue;
1100 if (unlikely(entry == RADIX_TREE_RETRY))
1101 goto restart;
1102
1103 irq = create_irq();
1104 if (unlikely(irq < 0)) {
1105 pr_err("no more free IRQs, bailing..\n");
1106 break;
1107 }
1108
1109 pr_info("Setting up a chained VIRQ from %d -> %d\n",
1110 irq, entry->pirq);
1111
1112 spin_lock(&xlate_lock);
1113 intc_irq_xlate[irq].desc = d;
1114 intc_irq_xlate[irq].enum_id = entry->enum_id;
1115 spin_unlock(&xlate_lock);
1116
1117 set_irq_chip_and_handler_name(irq, get_irq_chip(entry->pirq),
1118 handle_simple_irq, "virq");
1119 set_irq_chip_data(irq, get_irq_chip_data(entry->pirq));
1120
1121 set_irq_data(irq, (void *)entry->handle);
1122
1123 set_irq_chained_handler(entry->pirq, intc_virq_handler);
1124 add_virq_to_pirq(entry->pirq, irq);
1125
1126 radix_tree_tag_clear(&d->tree, entry->enum_id,
1127 INTC_TAG_VIRQ_NEEDS_ALLOC);
1128 radix_tree_replace_slot((void **)entries[i],
1129 &intc_irq_xlate[irq]);
1130 }
1131
1132 spin_unlock_irqrestore(&d->lock, flags);
1133}
1134
1135void __init intc_finalize(void)
1136{
1137 struct intc_desc_int *d;
1138
1139 list_for_each_entry(d, &intc_list, list)
1140 if (radix_tree_tagged(&d->tree, INTC_TAG_VIRQ_NEEDS_ALLOC))
1141 intc_subgroup_map(d);
1142}
1143
1144static void __init intc_register_irq(struct intc_desc *desc,
1145 struct intc_desc_int *d,
1146 intc_enum enum_id,
1147 unsigned int irq)
1148{
1149 struct intc_handle_int *hp;
1150 unsigned int data[2], primary;
1151 unsigned long flags;
1152
1153 /*
1154 * Register the IRQ position with the global IRQ map, then insert
1155 * it in to the radix tree.
1156 */
1157 set_bit(irq, intc_irq_map);
1158
1159 spin_lock_irqsave(&xlate_lock, flags);
1160 radix_tree_insert(&d->tree, enum_id, &intc_irq_xlate[irq]);
1161 spin_unlock_irqrestore(&xlate_lock, flags);
1162
1163 /*
1164 * Prefer single interrupt source bitmap over other combinations:
1165 *
1166 * 1. bitmap, single interrupt source
1167 * 2. priority, single interrupt source
1168 * 3. bitmap, multiple interrupt sources (groups)
1169 * 4. priority, multiple interrupt sources (groups)
1170 */
1171 data[0] = intc_mask_data(desc, d, enum_id, 0);
1172 data[1] = intc_prio_data(desc, d, enum_id, 0);
1173
1174 primary = 0;
1175 if (!data[0] && data[1])
1176 primary = 1;
1177
1178 if (!data[0] && !data[1])
1179 pr_warning("missing unique irq mask for irq %d (vect 0x%04x)\n",
1180 irq, irq2evt(irq));
1181
1182 data[0] = data[0] ? data[0] : intc_mask_data(desc, d, enum_id, 1);
1183 data[1] = data[1] ? data[1] : intc_prio_data(desc, d, enum_id, 1);
1184
1185 if (!data[primary])
1186 primary ^= 1;
1187
1188 BUG_ON(!data[primary]); /* must have primary masking method */
1189
1190 disable_irq_nosync(irq);
1191 set_irq_chip_and_handler_name(irq, &d->chip,
1192 handle_level_irq, "level");
1193 set_irq_chip_data(irq, (void *)data[primary]);
1194
1195 /*
1196 * set priority level
1197 * - this needs to be at least 2 for 5-bit priorities on 7780
1198 */
1199 intc_prio_level[irq] = default_prio_level;
1200
1201 /* enable secondary masking method if present */
1202 if (data[!primary])
1203 _intc_enable(irq, data[!primary]);
1204
1205 /* add irq to d->prio list if priority is available */
1206 if (data[1]) {
1207 hp = d->prio + d->nr_prio;
1208 hp->irq = irq;
1209 hp->handle = data[1];
1210
1211 if (primary) {
1212 /*
1213 * only secondary priority should access registers, so
1214 * set _INTC_FN(h) = REG_FN_ERR for intc_set_priority()
1215 */
1216 hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0);
1217 hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0);
1218 }
1219 d->nr_prio++;
1220 }
1221
1222 /* add irq to d->sense list if sense is available */
1223 data[0] = intc_sense_data(desc, d, enum_id);
1224 if (data[0]) {
1225 (d->sense + d->nr_sense)->irq = irq;
1226 (d->sense + d->nr_sense)->handle = data[0];
1227 d->nr_sense++;
1228 }
1229
1230 /* irq should be disabled by default */
1231 d->chip.mask(irq);
1232
1233 if (desc->hw.ack_regs)
1234 ack_handle[irq] = intc_ack_data(desc, d, enum_id);
1235
1236#ifdef CONFIG_INTC_BALANCING
1237 if (desc->hw.mask_regs)
1238 dist_handle[irq] = intc_dist_data(desc, d, enum_id);
1239#endif
1240
1241 activate_irq(irq);
1242}
1243
1244static unsigned int __init save_reg(struct intc_desc_int *d,
1245 unsigned int cnt,
1246 unsigned long value,
1247 unsigned int smp)
1248{
1249 if (value) {
1250 value = intc_phys_to_virt(d, value);
1251
1252 d->reg[cnt] = value;
1253#ifdef CONFIG_SMP
1254 d->smp[cnt] = smp;
1255#endif
1256 return 1;
1257 }
1258
1259 return 0;
1260}
1261
1262int __init register_intc_controller(struct intc_desc *desc)
1263{
1264 unsigned int i, k, smp;
1265 struct intc_hw_desc *hw = &desc->hw;
1266 struct intc_desc_int *d;
1267 struct resource *res;
1268
1269 pr_info("Registered controller '%s' with %u IRQs\n",
1270 desc->name, hw->nr_vectors);
1271
1272 d = kzalloc(sizeof(*d), GFP_NOWAIT);
1273 if (!d)
1274 goto err0;
1275
1276 INIT_LIST_HEAD(&d->list);
1277 list_add_tail(&d->list, &intc_list);
1278
1279 spin_lock_init(&d->lock);
1280
1281 d->index = nr_intc_controllers;
1282
1283 if (desc->num_resources) {
1284 d->nr_windows = desc->num_resources;
1285 d->window = kzalloc(d->nr_windows * sizeof(*d->window),
1286 GFP_NOWAIT);
1287 if (!d->window)
1288 goto err1;
1289
1290 for (k = 0; k < d->nr_windows; k++) {
1291 res = desc->resource + k;
1292 WARN_ON(resource_type(res) != IORESOURCE_MEM);
1293 d->window[k].phys = res->start;
1294 d->window[k].size = resource_size(res);
1295 d->window[k].virt = ioremap_nocache(res->start,
1296 resource_size(res));
1297 if (!d->window[k].virt)
1298 goto err2;
1299 }
1300 }
1301
1302 d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0;
1303#ifdef CONFIG_INTC_BALANCING
1304 if (d->nr_reg)
1305 d->nr_reg += hw->nr_mask_regs;
1306#endif
1307 d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0;
1308 d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0;
1309 d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0;
1310 d->nr_reg += hw->subgroups ? hw->nr_subgroups : 0;
1311
1312 d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
1313 if (!d->reg)
1314 goto err2;
1315
1316#ifdef CONFIG_SMP
1317 d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT);
1318 if (!d->smp)
1319 goto err3;
1320#endif
1321 k = 0;
1322
1323 if (hw->mask_regs) {
1324 for (i = 0; i < hw->nr_mask_regs; i++) {
1325 smp = IS_SMP(hw->mask_regs[i]);
1326 k += save_reg(d, k, hw->mask_regs[i].set_reg, smp);
1327 k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp);
1328#ifdef CONFIG_INTC_BALANCING
1329 k += save_reg(d, k, hw->mask_regs[i].dist_reg, 0);
1330#endif
1331 }
1332 }
1333
1334 if (hw->prio_regs) {
1335 d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio),
1336 GFP_NOWAIT);
1337 if (!d->prio)
1338 goto err4;
1339
1340 for (i = 0; i < hw->nr_prio_regs; i++) {
1341 smp = IS_SMP(hw->prio_regs[i]);
1342 k += save_reg(d, k, hw->prio_regs[i].set_reg, smp);
1343 k += save_reg(d, k, hw->prio_regs[i].clr_reg, smp);
1344 }
1345 }
1346
1347 if (hw->sense_regs) {
1348 d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense),
1349 GFP_NOWAIT);
1350 if (!d->sense)
1351 goto err5;
1352
1353 for (i = 0; i < hw->nr_sense_regs; i++)
1354 k += save_reg(d, k, hw->sense_regs[i].reg, 0);
1355 }
1356
1357 if (hw->subgroups)
1358 for (i = 0; i < hw->nr_subgroups; i++)
1359 if (hw->subgroups[i].reg)
1360 k+= save_reg(d, k, hw->subgroups[i].reg, 0);
1361
1362 d->chip.name = desc->name;
1363 d->chip.mask = intc_disable;
1364 d->chip.unmask = intc_enable;
1365 d->chip.mask_ack = intc_disable;
1366 d->chip.enable = intc_enable;
1367 d->chip.disable = intc_disable;
1368 d->chip.shutdown = intc_disable;
1369 d->chip.set_type = intc_set_sense;
1370 d->chip.set_wake = intc_set_wake;
1371#ifdef CONFIG_SMP
1372 d->chip.set_affinity = intc_set_affinity;
1373#endif
1374
1375 if (hw->ack_regs) {
1376 for (i = 0; i < hw->nr_ack_regs; i++)
1377 k += save_reg(d, k, hw->ack_regs[i].set_reg, 0);
1378
1379 d->chip.mask_ack = intc_mask_ack;
1380 }
1381
1382 /* disable bits matching force_disable before registering irqs */
1383 if (desc->force_disable)
1384 intc_enable_disable_enum(desc, d, desc->force_disable, 0);
1385
1386 /* disable bits matching force_enable before registering irqs */
1387 if (desc->force_enable)
1388 intc_enable_disable_enum(desc, d, desc->force_enable, 0);
1389
1390 BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */
1391
1392 /* register the vectors one by one */
1393 for (i = 0; i < hw->nr_vectors; i++) {
1394 struct intc_vect *vect = hw->vectors + i;
1395 unsigned int irq = evt2irq(vect->vect);
1396 unsigned long flags;
1397 struct irq_desc *irq_desc;
1398
1399 if (!vect->enum_id)
1400 continue;
1401
1402 irq_desc = irq_to_desc_alloc_node(irq, numa_node_id());
1403 if (unlikely(!irq_desc)) {
1404 pr_err("can't get irq_desc for %d\n", irq);
1405 continue;
1406 }
1407
1408 spin_lock_irqsave(&xlate_lock, flags);
1409 intc_irq_xlate[irq].enum_id = vect->enum_id;
1410 intc_irq_xlate[irq].desc = d;
1411 spin_unlock_irqrestore(&xlate_lock, flags);
1412
1413 intc_register_irq(desc, d, vect->enum_id, irq);
1414
1415 for (k = i + 1; k < hw->nr_vectors; k++) {
1416 struct intc_vect *vect2 = hw->vectors + k;
1417 unsigned int irq2 = evt2irq(vect2->vect);
1418
1419 if (vect->enum_id != vect2->enum_id)
1420 continue;
1421
1422 /*
1423 * In the case of multi-evt handling and sparse
1424 * IRQ support, each vector still needs to have
1425 * its own backing irq_desc.
1426 */
1427 irq_desc = irq_to_desc_alloc_node(irq2, numa_node_id());
1428 if (unlikely(!irq_desc)) {
1429 pr_err("can't get irq_desc for %d\n", irq2);
1430 continue;
1431 }
1432
1433 vect2->enum_id = 0;
1434
1435 /* redirect this interrupts to the first one */
1436 set_irq_chip(irq2, &dummy_irq_chip);
1437 set_irq_chained_handler(irq2, intc_redirect_irq);
1438 set_irq_data(irq2, (void *)irq);
1439 }
1440 }
1441
1442 intc_subgroup_init(desc, d);
1443
1444 /* enable bits matching force_enable after registering irqs */
1445 if (desc->force_enable)
1446 intc_enable_disable_enum(desc, d, desc->force_enable, 1);
1447
1448 nr_intc_controllers++;
1449
1450 return 0;
1451err5:
1452 kfree(d->prio);
1453err4:
1454#ifdef CONFIG_SMP
1455 kfree(d->smp);
1456err3:
1457#endif
1458 kfree(d->reg);
1459err2:
1460 for (k = 0; k < d->nr_windows; k++)
1461 if (d->window[k].virt)
1462 iounmap(d->window[k].virt);
1463
1464 kfree(d->window);
1465err1:
1466 kfree(d);
1467err0:
1468 pr_err("unable to allocate INTC memory\n");
1469
1470 return -ENOMEM;
1471}
1472
1473#ifdef CONFIG_INTC_USERIMASK
1474static void __iomem *uimask;
1475
1476int register_intc_userimask(unsigned long addr)
1477{
1478 if (unlikely(uimask))
1479 return -EBUSY;
1480
1481 uimask = ioremap_nocache(addr, SZ_4K);
1482 if (unlikely(!uimask))
1483 return -ENOMEM;
1484
1485 pr_info("userimask support registered for levels 0 -> %d\n",
1486 default_prio_level - 1);
1487
1488 return 0;
1489}
1490
1491static ssize_t
1492show_intc_userimask(struct sysdev_class *cls,
1493 struct sysdev_class_attribute *attr, char *buf)
1494{
1495 return sprintf(buf, "%d\n", (__raw_readl(uimask) >> 4) & 0xf);
1496}
1497
1498static ssize_t
1499store_intc_userimask(struct sysdev_class *cls,
1500 struct sysdev_class_attribute *attr,
1501 const char *buf, size_t count)
1502{
1503 unsigned long level;
1504
1505 level = simple_strtoul(buf, NULL, 10);
1506
1507 /*
1508 * Minimal acceptable IRQ levels are in the 2 - 16 range, but
1509 * these are chomped so as to not interfere with normal IRQs.
1510 *
1511 * Level 1 is a special case on some CPUs in that it's not
1512 * directly settable, but given that USERIMASK cuts off below a
1513 * certain level, we don't care about this limitation here.
1514 * Level 0 on the other hand equates to user masking disabled.
1515 *
1516 * We use default_prio_level as a cut off so that only special
1517 * case opt-in IRQs can be mangled.
1518 */
1519 if (level >= default_prio_level)
1520 return -EINVAL;
1521
1522 __raw_writel(0xa5 << 24 | level << 4, uimask);
1523
1524 return count;
1525}
1526
1527static SYSDEV_CLASS_ATTR(userimask, S_IRUSR | S_IWUSR,
1528 show_intc_userimask, store_intc_userimask);
1529#endif
1530
1531#ifdef CONFIG_INTC_MAPPING_DEBUG
1532static int intc_irq_xlate_debug(struct seq_file *m, void *priv)
1533{
1534 int i;
1535
1536 seq_printf(m, "%-5s %-7s %-15s\n", "irq", "enum", "chip name");
1537
1538 for (i = 1; i < nr_irqs; i++) {
1539 struct intc_desc_int *desc = intc_irq_xlate[i].desc;
1540
1541 if (!desc)
1542 continue;
1543
1544 seq_printf(m, "%5d ", i);
1545 seq_printf(m, "0x%05x ", intc_irq_xlate[i].enum_id);
1546 seq_printf(m, "%-15s\n", desc->chip.name);
1547 }
1548
1549 return 0;
1550}
1551
1552static int intc_irq_xlate_open(struct inode *inode, struct file *file)
1553{
1554 return single_open(file, intc_irq_xlate_debug, inode->i_private);
1555}
1556
1557static const struct file_operations intc_irq_xlate_fops = {
1558 .open = intc_irq_xlate_open,
1559 .read = seq_read,
1560 .llseek = seq_lseek,
1561 .release = single_release,
1562};
1563
1564static int __init intc_irq_xlate_init(void)
1565{
1566 /*
1567 * XXX.. use arch_debugfs_dir here when all of the intc users are
1568 * converted.
1569 */
1570 if (debugfs_create_file("intc_irq_xlate", S_IRUGO, NULL, NULL,
1571 &intc_irq_xlate_fops) == NULL)
1572 return -ENOMEM;
1573
1574 return 0;
1575}
1576fs_initcall(intc_irq_xlate_init);
1577#endif
1578
1579static ssize_t
1580show_intc_name(struct sys_device *dev, struct sysdev_attribute *attr, char *buf)
1581{
1582 struct intc_desc_int *d;
1583
1584 d = container_of(dev, struct intc_desc_int, sysdev);
1585
1586 return sprintf(buf, "%s\n", d->chip.name);
1587}
1588
1589static SYSDEV_ATTR(name, S_IRUGO, show_intc_name, NULL);
1590
1591static int intc_suspend(struct sys_device *dev, pm_message_t state)
1592{
1593 struct intc_desc_int *d;
1594 struct irq_desc *desc;
1595 int irq;
1596
1597 /* get intc controller associated with this sysdev */
1598 d = container_of(dev, struct intc_desc_int, sysdev);
1599
1600 switch (state.event) {
1601 case PM_EVENT_ON:
1602 if (d->state.event != PM_EVENT_FREEZE)
1603 break;
1604 for_each_irq_desc(irq, desc) {
1605 if (desc->handle_irq == intc_redirect_irq)
1606 continue;
1607 if (desc->chip != &d->chip)
1608 continue;
1609 if (desc->status & IRQ_DISABLED)
1610 intc_disable(irq);
1611 else
1612 intc_enable(irq);
1613 }
1614 break;
1615 case PM_EVENT_FREEZE:
1616 /* nothing has to be done */
1617 break;
1618 case PM_EVENT_SUSPEND:
1619 /* enable wakeup irqs belonging to this intc controller */
1620 for_each_irq_desc(irq, desc) {
1621 if ((desc->status & IRQ_WAKEUP) && (desc->chip == &d->chip))
1622 intc_enable(irq);
1623 }
1624 break;
1625 }
1626 d->state = state;
1627
1628 return 0;
1629}
1630
1631static int intc_resume(struct sys_device *dev)
1632{
1633 return intc_suspend(dev, PMSG_ON);
1634}
1635
1636static struct sysdev_class intc_sysdev_class = {
1637 .name = "intc",
1638 .suspend = intc_suspend,
1639 .resume = intc_resume,
1640};
1641
1642/* register this intc as sysdev to allow suspend/resume */
1643static int __init register_intc_sysdevs(void)
1644{
1645 struct intc_desc_int *d;
1646 int error;
1647
1648 error = sysdev_class_register(&intc_sysdev_class);
1649#ifdef CONFIG_INTC_USERIMASK
1650 if (!error && uimask)
1651 error = sysdev_class_create_file(&intc_sysdev_class,
1652 &attr_userimask);
1653#endif
1654 if (!error) {
1655 list_for_each_entry(d, &intc_list, list) {
1656 d->sysdev.id = d->index;
1657 d->sysdev.cls = &intc_sysdev_class;
1658 error = sysdev_register(&d->sysdev);
1659 if (error == 0)
1660 error = sysdev_create_file(&d->sysdev,
1661 &attr_name);
1662 if (error)
1663 break;
1664 }
1665 }
1666
1667 if (error)
1668 pr_err("sysdev registration error\n");
1669
1670 return error;
1671}
1672device_initcall(register_intc_sysdevs);
1673
1674/*
1675 * Dynamic IRQ allocation and deallocation
1676 */
1677unsigned int create_irq_nr(unsigned int irq_want, int node)
1678{
1679 unsigned int irq = 0, new;
1680 unsigned long flags;
1681 struct irq_desc *desc;
1682
1683 spin_lock_irqsave(&vector_lock, flags);
1684
1685 /*
1686 * First try the wanted IRQ
1687 */
1688 if (test_and_set_bit(irq_want, intc_irq_map) == 0) {
1689 new = irq_want;
1690 } else {
1691 /* .. then fall back to scanning. */
1692 new = find_first_zero_bit(intc_irq_map, nr_irqs);
1693 if (unlikely(new == nr_irqs))
1694 goto out_unlock;
1695
1696 __set_bit(new, intc_irq_map);
1697 }
1698
1699 desc = irq_to_desc_alloc_node(new, node);
1700 if (unlikely(!desc)) {
1701 pr_err("can't get irq_desc for %d\n", new);
1702 goto out_unlock;
1703 }
1704
1705 desc = move_irq_desc(desc, node);
1706 irq = new;
1707
1708out_unlock:
1709 spin_unlock_irqrestore(&vector_lock, flags);
1710
1711 if (irq > 0) {
1712 dynamic_irq_init(irq);
1713 activate_irq(irq);
1714 }
1715
1716 return irq;
1717}
1718
1719int create_irq(void)
1720{
1721 int nid = cpu_to_node(smp_processor_id());
1722 int irq;
1723
1724 irq = create_irq_nr(NR_IRQS_LEGACY, nid);
1725 if (irq == 0)
1726 irq = -1;
1727
1728 return irq;
1729}
1730
1731void destroy_irq(unsigned int irq)
1732{
1733 unsigned long flags;
1734
1735 dynamic_irq_cleanup(irq);
1736
1737 spin_lock_irqsave(&vector_lock, flags);
1738 __clear_bit(irq, intc_irq_map);
1739 spin_unlock_irqrestore(&vector_lock, flags);
1740}
1741
1742int reserve_irq_vector(unsigned int irq)
1743{
1744 unsigned long flags;
1745 int ret = 0;
1746
1747 spin_lock_irqsave(&vector_lock, flags);
1748 if (test_and_set_bit(irq, intc_irq_map))
1749 ret = -EBUSY;
1750 spin_unlock_irqrestore(&vector_lock, flags);
1751
1752 return ret;
1753}
1754
1755void reserve_intc_vectors(struct intc_vect *vectors, unsigned int nr_vecs)
1756{
1757 unsigned long flags;
1758 int i;
1759
1760 spin_lock_irqsave(&vector_lock, flags);
1761 for (i = 0; i < nr_vecs; i++)
1762 __set_bit(evt2irq(vectors[i].vect), intc_irq_map);
1763 spin_unlock_irqrestore(&vector_lock, flags);
1764}
1765
1766void reserve_irq_legacy(void)
1767{
1768 unsigned long flags;
1769 int i, j;
1770
1771 spin_lock_irqsave(&vector_lock, flags);
1772 j = find_first_bit(intc_irq_map, nr_irqs);
1773 for (i = 0; i < j; i++)
1774 __set_bit(i, intc_irq_map);
1775 spin_unlock_irqrestore(&vector_lock, flags);
1776}
diff --git a/drivers/sh/intc/access.c b/drivers/sh/intc/access.c
new file mode 100644
index 000000000000..f892ae1d212a
--- /dev/null
+++ b/drivers/sh/intc/access.c
@@ -0,0 +1,237 @@
1/*
2 * Common INTC2 register accessors
3 *
4 * Copyright (C) 2007, 2008 Magnus Damm
5 * Copyright (C) 2009, 2010 Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <linux/io.h>
12#include "internals.h"
13
14unsigned long intc_phys_to_virt(struct intc_desc_int *d, unsigned long address)
15{
16 struct intc_window *window;
17 int k;
18
19 /* scan through physical windows and convert address */
20 for (k = 0; k < d->nr_windows; k++) {
21 window = d->window + k;
22
23 if (address < window->phys)
24 continue;
25
26 if (address >= (window->phys + window->size))
27 continue;
28
29 address -= window->phys;
30 address += (unsigned long)window->virt;
31
32 return address;
33 }
34
35 /* no windows defined, register must be 1:1 mapped virt:phys */
36 return address;
37}
38
39unsigned int intc_get_reg(struct intc_desc_int *d, unsigned long address)
40{
41 unsigned int k;
42
43 address = intc_phys_to_virt(d, address);
44
45 for (k = 0; k < d->nr_reg; k++) {
46 if (d->reg[k] == address)
47 return k;
48 }
49
50 BUG();
51 return 0;
52}
53
54unsigned int intc_set_field_from_handle(unsigned int value,
55 unsigned int field_value,
56 unsigned int handle)
57{
58 unsigned int width = _INTC_WIDTH(handle);
59 unsigned int shift = _INTC_SHIFT(handle);
60
61 value &= ~(((1 << width) - 1) << shift);
62 value |= field_value << shift;
63 return value;
64}
65
66unsigned long intc_get_field_from_handle(unsigned int value, unsigned int handle)
67{
68 unsigned int width = _INTC_WIDTH(handle);
69 unsigned int shift = _INTC_SHIFT(handle);
70 unsigned int mask = ((1 << width) - 1) << shift;
71
72 return (value & mask) >> shift;
73}
74
75static unsigned long test_8(unsigned long addr, unsigned long h,
76 unsigned long ignore)
77{
78 return intc_get_field_from_handle(__raw_readb(addr), h);
79}
80
81static unsigned long test_16(unsigned long addr, unsigned long h,
82 unsigned long ignore)
83{
84 return intc_get_field_from_handle(__raw_readw(addr), h);
85}
86
87static unsigned long test_32(unsigned long addr, unsigned long h,
88 unsigned long ignore)
89{
90 return intc_get_field_from_handle(__raw_readl(addr), h);
91}
92
93static unsigned long write_8(unsigned long addr, unsigned long h,
94 unsigned long data)
95{
96 __raw_writeb(intc_set_field_from_handle(0, data, h), addr);
97 (void)__raw_readb(addr); /* Defeat write posting */
98 return 0;
99}
100
101static unsigned long write_16(unsigned long addr, unsigned long h,
102 unsigned long data)
103{
104 __raw_writew(intc_set_field_from_handle(0, data, h), addr);
105 (void)__raw_readw(addr); /* Defeat write posting */
106 return 0;
107}
108
109static unsigned long write_32(unsigned long addr, unsigned long h,
110 unsigned long data)
111{
112 __raw_writel(intc_set_field_from_handle(0, data, h), addr);
113 (void)__raw_readl(addr); /* Defeat write posting */
114 return 0;
115}
116
117static unsigned long modify_8(unsigned long addr, unsigned long h,
118 unsigned long data)
119{
120 unsigned long flags;
121 unsigned int value;
122 local_irq_save(flags);
123 value = intc_set_field_from_handle(__raw_readb(addr), data, h);
124 __raw_writeb(value, addr);
125 (void)__raw_readb(addr); /* Defeat write posting */
126 local_irq_restore(flags);
127 return 0;
128}
129
130static unsigned long modify_16(unsigned long addr, unsigned long h,
131 unsigned long data)
132{
133 unsigned long flags;
134 unsigned int value;
135 local_irq_save(flags);
136 value = intc_set_field_from_handle(__raw_readw(addr), data, h);
137 __raw_writew(value, addr);
138 (void)__raw_readw(addr); /* Defeat write posting */
139 local_irq_restore(flags);
140 return 0;
141}
142
143static unsigned long modify_32(unsigned long addr, unsigned long h,
144 unsigned long data)
145{
146 unsigned long flags;
147 unsigned int value;
148 local_irq_save(flags);
149 value = intc_set_field_from_handle(__raw_readl(addr), data, h);
150 __raw_writel(value, addr);
151 (void)__raw_readl(addr); /* Defeat write posting */
152 local_irq_restore(flags);
153 return 0;
154}
155
156static unsigned long intc_mode_field(unsigned long addr,
157 unsigned long handle,
158 unsigned long (*fn)(unsigned long,
159 unsigned long,
160 unsigned long),
161 unsigned int irq)
162{
163 return fn(addr, handle, ((1 << _INTC_WIDTH(handle)) - 1));
164}
165
166static unsigned long intc_mode_zero(unsigned long addr,
167 unsigned long handle,
168 unsigned long (*fn)(unsigned long,
169 unsigned long,
170 unsigned long),
171 unsigned int irq)
172{
173 return fn(addr, handle, 0);
174}
175
176static unsigned long intc_mode_prio(unsigned long addr,
177 unsigned long handle,
178 unsigned long (*fn)(unsigned long,
179 unsigned long,
180 unsigned long),
181 unsigned int irq)
182{
183 return fn(addr, handle, intc_get_prio_level(irq));
184}
185
186unsigned long (*intc_reg_fns[])(unsigned long addr,
187 unsigned long h,
188 unsigned long data) = {
189 [REG_FN_TEST_BASE + 0] = test_8,
190 [REG_FN_TEST_BASE + 1] = test_16,
191 [REG_FN_TEST_BASE + 3] = test_32,
192 [REG_FN_WRITE_BASE + 0] = write_8,
193 [REG_FN_WRITE_BASE + 1] = write_16,
194 [REG_FN_WRITE_BASE + 3] = write_32,
195 [REG_FN_MODIFY_BASE + 0] = modify_8,
196 [REG_FN_MODIFY_BASE + 1] = modify_16,
197 [REG_FN_MODIFY_BASE + 3] = modify_32,
198};
199
200unsigned long (*intc_enable_fns[])(unsigned long addr,
201 unsigned long handle,
202 unsigned long (*fn)(unsigned long,
203 unsigned long,
204 unsigned long),
205 unsigned int irq) = {
206 [MODE_ENABLE_REG] = intc_mode_field,
207 [MODE_MASK_REG] = intc_mode_zero,
208 [MODE_DUAL_REG] = intc_mode_field,
209 [MODE_PRIO_REG] = intc_mode_prio,
210 [MODE_PCLR_REG] = intc_mode_prio,
211};
212
213unsigned long (*intc_disable_fns[])(unsigned long addr,
214 unsigned long handle,
215 unsigned long (*fn)(unsigned long,
216 unsigned long,
217 unsigned long),
218 unsigned int irq) = {
219 [MODE_ENABLE_REG] = intc_mode_zero,
220 [MODE_MASK_REG] = intc_mode_field,
221 [MODE_DUAL_REG] = intc_mode_field,
222 [MODE_PRIO_REG] = intc_mode_zero,
223 [MODE_PCLR_REG] = intc_mode_field,
224};
225
226unsigned long (*intc_enable_noprio_fns[])(unsigned long addr,
227 unsigned long handle,
228 unsigned long (*fn)(unsigned long,
229 unsigned long,
230 unsigned long),
231 unsigned int irq) = {
232 [MODE_ENABLE_REG] = intc_mode_field,
233 [MODE_MASK_REG] = intc_mode_zero,
234 [MODE_DUAL_REG] = intc_mode_field,
235 [MODE_PRIO_REG] = intc_mode_field,
236 [MODE_PCLR_REG] = intc_mode_field,
237};
diff --git a/drivers/sh/intc/balancing.c b/drivers/sh/intc/balancing.c
new file mode 100644
index 000000000000..cec7a96f2c09
--- /dev/null
+++ b/drivers/sh/intc/balancing.c
@@ -0,0 +1,97 @@
1/*
2 * Support for hardware-managed IRQ auto-distribution.
3 *
4 * Copyright (C) 2010 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include "internals.h"
11
12static unsigned long dist_handle[NR_IRQS];
13
14void intc_balancing_enable(unsigned int irq)
15{
16 struct intc_desc_int *d = get_intc_desc(irq);
17 unsigned long handle = dist_handle[irq];
18 unsigned long addr;
19
20 if (irq_balancing_disabled(irq) || !handle)
21 return;
22
23 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
24 intc_reg_fns[_INTC_FN(handle)](addr, handle, 1);
25}
26
27void intc_balancing_disable(unsigned int irq)
28{
29 struct intc_desc_int *d = get_intc_desc(irq);
30 unsigned long handle = dist_handle[irq];
31 unsigned long addr;
32
33 if (irq_balancing_disabled(irq) || !handle)
34 return;
35
36 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
37 intc_reg_fns[_INTC_FN(handle)](addr, handle, 0);
38}
39
40static unsigned int intc_dist_data(struct intc_desc *desc,
41 struct intc_desc_int *d,
42 intc_enum enum_id)
43{
44 struct intc_mask_reg *mr = desc->hw.mask_regs;
45 unsigned int i, j, fn, mode;
46 unsigned long reg_e, reg_d;
47
48 for (i = 0; mr && enum_id && i < desc->hw.nr_mask_regs; i++) {
49 mr = desc->hw.mask_regs + i;
50
51 /*
52 * Skip this entry if there's no auto-distribution
53 * register associated with it.
54 */
55 if (!mr->dist_reg)
56 continue;
57
58 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
59 if (mr->enum_ids[j] != enum_id)
60 continue;
61
62 fn = REG_FN_MODIFY_BASE;
63 mode = MODE_ENABLE_REG;
64 reg_e = mr->dist_reg;
65 reg_d = mr->dist_reg;
66
67 fn += (mr->reg_width >> 3) - 1;
68 return _INTC_MK(fn, mode,
69 intc_get_reg(d, reg_e),
70 intc_get_reg(d, reg_d),
71 1,
72 (mr->reg_width - 1) - j);
73 }
74 }
75
76 /*
77 * It's possible we've gotten here with no distribution options
78 * available for the IRQ in question, so we just skip over those.
79 */
80 return 0;
81}
82
83void intc_set_dist_handle(unsigned int irq, struct intc_desc *desc,
84 struct intc_desc_int *d, intc_enum id)
85{
86 unsigned long flags;
87
88 /*
89 * Nothing to do for this IRQ.
90 */
91 if (!desc->hw.mask_regs)
92 return;
93
94 raw_spin_lock_irqsave(&intc_big_lock, flags);
95 dist_handle[irq] = intc_dist_data(desc, d, id);
96 raw_spin_unlock_irqrestore(&intc_big_lock, flags);
97}
diff --git a/drivers/sh/intc/chip.c b/drivers/sh/intc/chip.c
new file mode 100644
index 000000000000..35c03706cc21
--- /dev/null
+++ b/drivers/sh/intc/chip.c
@@ -0,0 +1,215 @@
1/*
2 * IRQ chip definitions for INTC IRQs.
3 *
4 * Copyright (C) 2007, 2008 Magnus Damm
5 * Copyright (C) 2009, 2010 Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <linux/cpumask.h>
12#include <linux/io.h>
13#include "internals.h"
14
15void _intc_enable(unsigned int irq, unsigned long handle)
16{
17 struct intc_desc_int *d = get_intc_desc(irq);
18 unsigned long addr;
19 unsigned int cpu;
20
21 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
22#ifdef CONFIG_SMP
23 if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
24 continue;
25#endif
26 addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
27 intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
28 [_INTC_FN(handle)], irq);
29 }
30
31 intc_balancing_enable(irq);
32}
33
34static void intc_enable(unsigned int irq)
35{
36 _intc_enable(irq, (unsigned long)get_irq_chip_data(irq));
37}
38
39static void intc_disable(unsigned int irq)
40{
41 struct intc_desc_int *d = get_intc_desc(irq);
42 unsigned long handle = (unsigned long)get_irq_chip_data(irq);
43 unsigned long addr;
44 unsigned int cpu;
45
46 intc_balancing_disable(irq);
47
48 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
49#ifdef CONFIG_SMP
50 if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
51 continue;
52#endif
53 addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
54 intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\
55 [_INTC_FN(handle)], irq);
56 }
57}
58
59static int intc_set_wake(unsigned int irq, unsigned int on)
60{
61 return 0; /* allow wakeup, but setup hardware in intc_suspend() */
62}
63
64#ifdef CONFIG_SMP
65/*
66 * This is held with the irq desc lock held, so we don't require any
67 * additional locking here at the intc desc level. The affinity mask is
68 * later tested in the enable/disable paths.
69 */
70static int intc_set_affinity(unsigned int irq, const struct cpumask *cpumask)
71{
72 if (!cpumask_intersects(cpumask, cpu_online_mask))
73 return -1;
74
75 cpumask_copy(irq_to_desc(irq)->affinity, cpumask);
76
77 return 0;
78}
79#endif
80
81static void intc_mask_ack(unsigned int irq)
82{
83 struct intc_desc_int *d = get_intc_desc(irq);
84 unsigned long handle = intc_get_ack_handle(irq);
85 unsigned long addr;
86
87 intc_disable(irq);
88
89 /* read register and write zero only to the associated bit */
90 if (handle) {
91 unsigned int value;
92
93 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
94 value = intc_set_field_from_handle(0, 1, handle);
95
96 switch (_INTC_FN(handle)) {
97 case REG_FN_MODIFY_BASE + 0: /* 8bit */
98 __raw_readb(addr);
99 __raw_writeb(0xff ^ value, addr);
100 break;
101 case REG_FN_MODIFY_BASE + 1: /* 16bit */
102 __raw_readw(addr);
103 __raw_writew(0xffff ^ value, addr);
104 break;
105 case REG_FN_MODIFY_BASE + 3: /* 32bit */
106 __raw_readl(addr);
107 __raw_writel(0xffffffff ^ value, addr);
108 break;
109 default:
110 BUG();
111 break;
112 }
113 }
114}
115
116static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
117 unsigned int nr_hp,
118 unsigned int irq)
119{
120 int i;
121
122 /*
123 * this doesn't scale well, but...
124 *
125 * this function should only be used for cerain uncommon
126 * operations such as intc_set_priority() and intc_set_type()
127 * and in those rare cases performance doesn't matter that much.
128 * keeping the memory footprint low is more important.
129 *
130 * one rather simple way to speed this up and still keep the
131 * memory footprint down is to make sure the array is sorted
132 * and then perform a bisect to lookup the irq.
133 */
134 for (i = 0; i < nr_hp; i++) {
135 if ((hp + i)->irq != irq)
136 continue;
137
138 return hp + i;
139 }
140
141 return NULL;
142}
143
144int intc_set_priority(unsigned int irq, unsigned int prio)
145{
146 struct intc_desc_int *d = get_intc_desc(irq);
147 struct intc_handle_int *ihp;
148
149 if (!intc_get_prio_level(irq) || prio <= 1)
150 return -EINVAL;
151
152 ihp = intc_find_irq(d->prio, d->nr_prio, irq);
153 if (ihp) {
154 if (prio >= (1 << _INTC_WIDTH(ihp->handle)))
155 return -EINVAL;
156
157 intc_set_prio_level(irq, prio);
158
159 /*
160 * only set secondary masking method directly
161 * primary masking method is using intc_prio_level[irq]
162 * priority level will be set during next enable()
163 */
164 if (_INTC_FN(ihp->handle) != REG_FN_ERR)
165 _intc_enable(irq, ihp->handle);
166 }
167 return 0;
168}
169
170#define VALID(x) (x | 0x80)
171
172static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
173 [IRQ_TYPE_EDGE_FALLING] = VALID(0),
174 [IRQ_TYPE_EDGE_RISING] = VALID(1),
175 [IRQ_TYPE_LEVEL_LOW] = VALID(2),
176 /* SH7706, SH7707 and SH7709 do not support high level triggered */
177#if !defined(CONFIG_CPU_SUBTYPE_SH7706) && \
178 !defined(CONFIG_CPU_SUBTYPE_SH7707) && \
179 !defined(CONFIG_CPU_SUBTYPE_SH7709)
180 [IRQ_TYPE_LEVEL_HIGH] = VALID(3),
181#endif
182};
183
184static int intc_set_type(unsigned int irq, unsigned int type)
185{
186 struct intc_desc_int *d = get_intc_desc(irq);
187 unsigned char value = intc_irq_sense_table[type & IRQ_TYPE_SENSE_MASK];
188 struct intc_handle_int *ihp;
189 unsigned long addr;
190
191 if (!value)
192 return -EINVAL;
193
194 ihp = intc_find_irq(d->sense, d->nr_sense, irq);
195 if (ihp) {
196 addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0);
197 intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value);
198 }
199
200 return 0;
201}
202
203struct irq_chip intc_irq_chip = {
204 .mask = intc_disable,
205 .unmask = intc_enable,
206 .mask_ack = intc_mask_ack,
207 .enable = intc_enable,
208 .disable = intc_disable,
209 .shutdown = intc_disable,
210 .set_type = intc_set_type,
211 .set_wake = intc_set_wake,
212#ifdef CONFIG_SMP
213 .set_affinity = intc_set_affinity,
214#endif
215};
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
new file mode 100644
index 000000000000..306ed287077a
--- /dev/null
+++ b/drivers/sh/intc/core.c
@@ -0,0 +1,469 @@
1/*
2 * Shared interrupt handling code for IPR and INTC2 types of IRQs.
3 *
4 * Copyright (C) 2007, 2008 Magnus Damm
5 * Copyright (C) 2009, 2010 Paul Mundt
6 *
7 * Based on intc2.c and ipr.c
8 *
9 * Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi
10 * Copyright (C) 2000 Kazumoto Kojima
11 * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
12 * Copyright (C) 2003 Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp>
13 * Copyright (C) 2005, 2006 Paul Mundt
14 *
15 * This file is subject to the terms and conditions of the GNU General Public
16 * License. See the file "COPYING" in the main directory of this archive
17 * for more details.
18 */
19#define pr_fmt(fmt) "intc: " fmt
20
21#include <linux/init.h>
22#include <linux/irq.h>
23#include <linux/io.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/sh_intc.h>
27#include <linux/sysdev.h>
28#include <linux/list.h>
29#include <linux/spinlock.h>
30#include <linux/radix-tree.h>
31#include "internals.h"
32
33LIST_HEAD(intc_list);
34DEFINE_RAW_SPINLOCK(intc_big_lock);
35unsigned int nr_intc_controllers;
36
37/*
38 * Default priority level
39 * - this needs to be at least 2 for 5-bit priorities on 7780
40 */
41static unsigned int default_prio_level = 2; /* 2 - 16 */
42static unsigned int intc_prio_level[NR_IRQS]; /* for now */
43
44unsigned int intc_get_dfl_prio_level(void)
45{
46 return default_prio_level;
47}
48
49unsigned int intc_get_prio_level(unsigned int irq)
50{
51 return intc_prio_level[irq];
52}
53
54void intc_set_prio_level(unsigned int irq, unsigned int level)
55{
56 unsigned long flags;
57
58 raw_spin_lock_irqsave(&intc_big_lock, flags);
59 intc_prio_level[irq] = level;
60 raw_spin_unlock_irqrestore(&intc_big_lock, flags);
61}
62
63static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc)
64{
65 generic_handle_irq((unsigned int)get_irq_data(irq));
66}
67
68static void __init intc_register_irq(struct intc_desc *desc,
69 struct intc_desc_int *d,
70 intc_enum enum_id,
71 unsigned int irq)
72{
73 struct intc_handle_int *hp;
74 unsigned int data[2], primary;
75 unsigned long flags;
76
77 /*
78 * Register the IRQ position with the global IRQ map, then insert
79 * it in to the radix tree.
80 */
81 reserve_irq_vector(irq);
82
83 raw_spin_lock_irqsave(&intc_big_lock, flags);
84 radix_tree_insert(&d->tree, enum_id, intc_irq_xlate_get(irq));
85 raw_spin_unlock_irqrestore(&intc_big_lock, flags);
86
87 /*
88 * Prefer single interrupt source bitmap over other combinations:
89 *
90 * 1. bitmap, single interrupt source
91 * 2. priority, single interrupt source
92 * 3. bitmap, multiple interrupt sources (groups)
93 * 4. priority, multiple interrupt sources (groups)
94 */
95 data[0] = intc_get_mask_handle(desc, d, enum_id, 0);
96 data[1] = intc_get_prio_handle(desc, d, enum_id, 0);
97
98 primary = 0;
99 if (!data[0] && data[1])
100 primary = 1;
101
102 if (!data[0] && !data[1])
103 pr_warning("missing unique irq mask for irq %d (vect 0x%04x)\n",
104 irq, irq2evt(irq));
105
106 data[0] = data[0] ? data[0] : intc_get_mask_handle(desc, d, enum_id, 1);
107 data[1] = data[1] ? data[1] : intc_get_prio_handle(desc, d, enum_id, 1);
108
109 if (!data[primary])
110 primary ^= 1;
111
112 BUG_ON(!data[primary]); /* must have primary masking method */
113
114 disable_irq_nosync(irq);
115 set_irq_chip_and_handler_name(irq, &d->chip,
116 handle_level_irq, "level");
117 set_irq_chip_data(irq, (void *)data[primary]);
118
119 /*
120 * set priority level
121 */
122 intc_set_prio_level(irq, intc_get_dfl_prio_level());
123
124 /* enable secondary masking method if present */
125 if (data[!primary])
126 _intc_enable(irq, data[!primary]);
127
128 /* add irq to d->prio list if priority is available */
129 if (data[1]) {
130 hp = d->prio + d->nr_prio;
131 hp->irq = irq;
132 hp->handle = data[1];
133
134 if (primary) {
135 /*
136 * only secondary priority should access registers, so
137 * set _INTC_FN(h) = REG_FN_ERR for intc_set_priority()
138 */
139 hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0);
140 hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0);
141 }
142 d->nr_prio++;
143 }
144
145 /* add irq to d->sense list if sense is available */
146 data[0] = intc_get_sense_handle(desc, d, enum_id);
147 if (data[0]) {
148 (d->sense + d->nr_sense)->irq = irq;
149 (d->sense + d->nr_sense)->handle = data[0];
150 d->nr_sense++;
151 }
152
153 /* irq should be disabled by default */
154 d->chip.mask(irq);
155
156 intc_set_ack_handle(irq, desc, d, enum_id);
157 intc_set_dist_handle(irq, desc, d, enum_id);
158
159 activate_irq(irq);
160}
161
162static unsigned int __init save_reg(struct intc_desc_int *d,
163 unsigned int cnt,
164 unsigned long value,
165 unsigned int smp)
166{
167 if (value) {
168 value = intc_phys_to_virt(d, value);
169
170 d->reg[cnt] = value;
171#ifdef CONFIG_SMP
172 d->smp[cnt] = smp;
173#endif
174 return 1;
175 }
176
177 return 0;
178}
179
180int __init register_intc_controller(struct intc_desc *desc)
181{
182 unsigned int i, k, smp;
183 struct intc_hw_desc *hw = &desc->hw;
184 struct intc_desc_int *d;
185 struct resource *res;
186
187 pr_info("Registered controller '%s' with %u IRQs\n",
188 desc->name, hw->nr_vectors);
189
190 d = kzalloc(sizeof(*d), GFP_NOWAIT);
191 if (!d)
192 goto err0;
193
194 INIT_LIST_HEAD(&d->list);
195 list_add_tail(&d->list, &intc_list);
196
197 raw_spin_lock_init(&d->lock);
198
199 d->index = nr_intc_controllers;
200
201 if (desc->num_resources) {
202 d->nr_windows = desc->num_resources;
203 d->window = kzalloc(d->nr_windows * sizeof(*d->window),
204 GFP_NOWAIT);
205 if (!d->window)
206 goto err1;
207
208 for (k = 0; k < d->nr_windows; k++) {
209 res = desc->resource + k;
210 WARN_ON(resource_type(res) != IORESOURCE_MEM);
211 d->window[k].phys = res->start;
212 d->window[k].size = resource_size(res);
213 d->window[k].virt = ioremap_nocache(res->start,
214 resource_size(res));
215 if (!d->window[k].virt)
216 goto err2;
217 }
218 }
219
220 d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0;
221#ifdef CONFIG_INTC_BALANCING
222 if (d->nr_reg)
223 d->nr_reg += hw->nr_mask_regs;
224#endif
225 d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0;
226 d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0;
227 d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0;
228 d->nr_reg += hw->subgroups ? hw->nr_subgroups : 0;
229
230 d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
231 if (!d->reg)
232 goto err2;
233
234#ifdef CONFIG_SMP
235 d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT);
236 if (!d->smp)
237 goto err3;
238#endif
239 k = 0;
240
241 if (hw->mask_regs) {
242 for (i = 0; i < hw->nr_mask_regs; i++) {
243 smp = IS_SMP(hw->mask_regs[i]);
244 k += save_reg(d, k, hw->mask_regs[i].set_reg, smp);
245 k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp);
246#ifdef CONFIG_INTC_BALANCING
247 k += save_reg(d, k, hw->mask_regs[i].dist_reg, 0);
248#endif
249 }
250 }
251
252 if (hw->prio_regs) {
253 d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio),
254 GFP_NOWAIT);
255 if (!d->prio)
256 goto err4;
257
258 for (i = 0; i < hw->nr_prio_regs; i++) {
259 smp = IS_SMP(hw->prio_regs[i]);
260 k += save_reg(d, k, hw->prio_regs[i].set_reg, smp);
261 k += save_reg(d, k, hw->prio_regs[i].clr_reg, smp);
262 }
263 }
264
265 if (hw->sense_regs) {
266 d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense),
267 GFP_NOWAIT);
268 if (!d->sense)
269 goto err5;
270
271 for (i = 0; i < hw->nr_sense_regs; i++)
272 k += save_reg(d, k, hw->sense_regs[i].reg, 0);
273 }
274
275 if (hw->subgroups)
276 for (i = 0; i < hw->nr_subgroups; i++)
277 if (hw->subgroups[i].reg)
278 k+= save_reg(d, k, hw->subgroups[i].reg, 0);
279
280 memcpy(&d->chip, &intc_irq_chip, sizeof(struct irq_chip));
281 d->chip.name = desc->name;
282
283 if (hw->ack_regs)
284 for (i = 0; i < hw->nr_ack_regs; i++)
285 k += save_reg(d, k, hw->ack_regs[i].set_reg, 0);
286 else
287 d->chip.mask_ack = d->chip.disable;
288
289 /* disable bits matching force_disable before registering irqs */
290 if (desc->force_disable)
291 intc_enable_disable_enum(desc, d, desc->force_disable, 0);
292
293 /* disable bits matching force_enable before registering irqs */
294 if (desc->force_enable)
295 intc_enable_disable_enum(desc, d, desc->force_enable, 0);
296
297 BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */
298
299 /* register the vectors one by one */
300 for (i = 0; i < hw->nr_vectors; i++) {
301 struct intc_vect *vect = hw->vectors + i;
302 unsigned int irq = evt2irq(vect->vect);
303 struct irq_desc *irq_desc;
304
305 if (!vect->enum_id)
306 continue;
307
308 irq_desc = irq_to_desc_alloc_node(irq, numa_node_id());
309 if (unlikely(!irq_desc)) {
310 pr_err("can't get irq_desc for %d\n", irq);
311 continue;
312 }
313
314 intc_irq_xlate_set(irq, vect->enum_id, d);
315 intc_register_irq(desc, d, vect->enum_id, irq);
316
317 for (k = i + 1; k < hw->nr_vectors; k++) {
318 struct intc_vect *vect2 = hw->vectors + k;
319 unsigned int irq2 = evt2irq(vect2->vect);
320
321 if (vect->enum_id != vect2->enum_id)
322 continue;
323
324 /*
325 * In the case of multi-evt handling and sparse
326 * IRQ support, each vector still needs to have
327 * its own backing irq_desc.
328 */
329 irq_desc = irq_to_desc_alloc_node(irq2, numa_node_id());
330 if (unlikely(!irq_desc)) {
331 pr_err("can't get irq_desc for %d\n", irq2);
332 continue;
333 }
334
335 vect2->enum_id = 0;
336
337 /* redirect this interrupts to the first one */
338 set_irq_chip(irq2, &dummy_irq_chip);
339 set_irq_chained_handler(irq2, intc_redirect_irq);
340 set_irq_data(irq2, (void *)irq);
341 }
342 }
343
344 intc_subgroup_init(desc, d);
345
346 /* enable bits matching force_enable after registering irqs */
347 if (desc->force_enable)
348 intc_enable_disable_enum(desc, d, desc->force_enable, 1);
349
350 nr_intc_controllers++;
351
352 return 0;
353err5:
354 kfree(d->prio);
355err4:
356#ifdef CONFIG_SMP
357 kfree(d->smp);
358err3:
359#endif
360 kfree(d->reg);
361err2:
362 for (k = 0; k < d->nr_windows; k++)
363 if (d->window[k].virt)
364 iounmap(d->window[k].virt);
365
366 kfree(d->window);
367err1:
368 kfree(d);
369err0:
370 pr_err("unable to allocate INTC memory\n");
371
372 return -ENOMEM;
373}
374
375static ssize_t
376show_intc_name(struct sys_device *dev, struct sysdev_attribute *attr, char *buf)
377{
378 struct intc_desc_int *d;
379
380 d = container_of(dev, struct intc_desc_int, sysdev);
381
382 return sprintf(buf, "%s\n", d->chip.name);
383}
384
385static SYSDEV_ATTR(name, S_IRUGO, show_intc_name, NULL);
386
387static int intc_suspend(struct sys_device *dev, pm_message_t state)
388{
389 struct intc_desc_int *d;
390 struct irq_desc *desc;
391 int irq;
392
393 /* get intc controller associated with this sysdev */
394 d = container_of(dev, struct intc_desc_int, sysdev);
395
396 switch (state.event) {
397 case PM_EVENT_ON:
398 if (d->state.event != PM_EVENT_FREEZE)
399 break;
400
401 for_each_irq_desc(irq, desc) {
402 /*
403 * This will catch the redirect and VIRQ cases
404 * due to the dummy_irq_chip being inserted.
405 */
406 if (desc->chip != &d->chip)
407 continue;
408 if (desc->status & IRQ_DISABLED)
409 desc->chip->disable(irq);
410 else
411 desc->chip->enable(irq);
412 }
413 break;
414 case PM_EVENT_FREEZE:
415 /* nothing has to be done */
416 break;
417 case PM_EVENT_SUSPEND:
418 /* enable wakeup irqs belonging to this intc controller */
419 for_each_irq_desc(irq, desc) {
420 if (desc->chip != &d->chip)
421 continue;
422 if ((desc->status & IRQ_WAKEUP))
423 desc->chip->enable(irq);
424 }
425 break;
426 }
427
428 d->state = state;
429
430 return 0;
431}
432
433static int intc_resume(struct sys_device *dev)
434{
435 return intc_suspend(dev, PMSG_ON);
436}
437
438struct sysdev_class intc_sysdev_class = {
439 .name = "intc",
440 .suspend = intc_suspend,
441 .resume = intc_resume,
442};
443
444/* register this intc as sysdev to allow suspend/resume */
445static int __init register_intc_sysdevs(void)
446{
447 struct intc_desc_int *d;
448 int error;
449
450 error = sysdev_class_register(&intc_sysdev_class);
451 if (!error) {
452 list_for_each_entry(d, &intc_list, list) {
453 d->sysdev.id = d->index;
454 d->sysdev.cls = &intc_sysdev_class;
455 error = sysdev_register(&d->sysdev);
456 if (error == 0)
457 error = sysdev_create_file(&d->sysdev,
458 &attr_name);
459 if (error)
460 break;
461 }
462 }
463
464 if (error)
465 pr_err("sysdev registration error\n");
466
467 return error;
468}
469device_initcall(register_intc_sysdevs);
diff --git a/drivers/sh/intc/dynamic.c b/drivers/sh/intc/dynamic.c
new file mode 100644
index 000000000000..6caecdffe201
--- /dev/null
+++ b/drivers/sh/intc/dynamic.c
@@ -0,0 +1,135 @@
1/*
2 * Dynamic IRQ management
3 *
4 * Copyright (C) 2010 Paul Mundt
5 *
6 * Modelled after arch/x86/kernel/apic/io_apic.c
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#define pr_fmt(fmt) "intc: " fmt
13
14#include <linux/irq.h>
15#include <linux/bitmap.h>
16#include <linux/spinlock.h>
17#include "internals.h" /* only for activate_irq() damage.. */
18
19/*
20 * The intc_irq_map provides a global map of bound IRQ vectors for a
21 * given platform. Allocation of IRQs are either static through the CPU
22 * vector map, or dynamic in the case of board mux vectors or MSI.
23 *
24 * As this is a central point for all IRQ controllers on the system,
25 * each of the available sources are mapped out here. This combined with
26 * sparseirq makes it quite trivial to keep the vector map tightly packed
27 * when dynamically creating IRQs, as well as tying in to otherwise
28 * unused irq_desc positions in the sparse array.
29 */
30static DECLARE_BITMAP(intc_irq_map, NR_IRQS);
31static DEFINE_RAW_SPINLOCK(vector_lock);
32
33/*
34 * Dynamic IRQ allocation and deallocation
35 */
36unsigned int create_irq_nr(unsigned int irq_want, int node)
37{
38 unsigned int irq = 0, new;
39 unsigned long flags;
40 struct irq_desc *desc;
41
42 raw_spin_lock_irqsave(&vector_lock, flags);
43
44 /*
45 * First try the wanted IRQ
46 */
47 if (test_and_set_bit(irq_want, intc_irq_map) == 0) {
48 new = irq_want;
49 } else {
50 /* .. then fall back to scanning. */
51 new = find_first_zero_bit(intc_irq_map, nr_irqs);
52 if (unlikely(new == nr_irqs))
53 goto out_unlock;
54
55 __set_bit(new, intc_irq_map);
56 }
57
58 desc = irq_to_desc_alloc_node(new, node);
59 if (unlikely(!desc)) {
60 pr_err("can't get irq_desc for %d\n", new);
61 goto out_unlock;
62 }
63
64 desc = move_irq_desc(desc, node);
65 irq = new;
66
67out_unlock:
68 raw_spin_unlock_irqrestore(&vector_lock, flags);
69
70 if (irq > 0) {
71 dynamic_irq_init(irq);
72 activate_irq(irq);
73 }
74
75 return irq;
76}
77
78int create_irq(void)
79{
80 int nid = cpu_to_node(smp_processor_id());
81 int irq;
82
83 irq = create_irq_nr(NR_IRQS_LEGACY, nid);
84 if (irq == 0)
85 irq = -1;
86
87 return irq;
88}
89
90void destroy_irq(unsigned int irq)
91{
92 unsigned long flags;
93
94 dynamic_irq_cleanup(irq);
95
96 raw_spin_lock_irqsave(&vector_lock, flags);
97 __clear_bit(irq, intc_irq_map);
98 raw_spin_unlock_irqrestore(&vector_lock, flags);
99}
100
101int reserve_irq_vector(unsigned int irq)
102{
103 unsigned long flags;
104 int ret = 0;
105
106 raw_spin_lock_irqsave(&vector_lock, flags);
107 if (test_and_set_bit(irq, intc_irq_map))
108 ret = -EBUSY;
109 raw_spin_unlock_irqrestore(&vector_lock, flags);
110
111 return ret;
112}
113
114void reserve_intc_vectors(struct intc_vect *vectors, unsigned int nr_vecs)
115{
116 unsigned long flags;
117 int i;
118
119 raw_spin_lock_irqsave(&vector_lock, flags);
120 for (i = 0; i < nr_vecs; i++)
121 __set_bit(evt2irq(vectors[i].vect), intc_irq_map);
122 raw_spin_unlock_irqrestore(&vector_lock, flags);
123}
124
125void reserve_irq_legacy(void)
126{
127 unsigned long flags;
128 int i, j;
129
130 raw_spin_lock_irqsave(&vector_lock, flags);
131 j = find_first_bit(intc_irq_map, nr_irqs);
132 for (i = 0; i < j; i++)
133 __set_bit(i, intc_irq_map);
134 raw_spin_unlock_irqrestore(&vector_lock, flags);
135}
diff --git a/drivers/sh/intc/handle.c b/drivers/sh/intc/handle.c
new file mode 100644
index 000000000000..057ce56829bf
--- /dev/null
+++ b/drivers/sh/intc/handle.c
@@ -0,0 +1,307 @@
1/*
2 * Shared interrupt handling code for IPR and INTC2 types of IRQs.
3 *
4 * Copyright (C) 2007, 2008 Magnus Damm
5 * Copyright (C) 2009, 2010 Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <linux/init.h>
12#include <linux/irq.h>
13#include <linux/spinlock.h>
14#include "internals.h"
15
16static unsigned long ack_handle[NR_IRQS];
17
18static intc_enum __init intc_grp_id(struct intc_desc *desc,
19 intc_enum enum_id)
20{
21 struct intc_group *g = desc->hw.groups;
22 unsigned int i, j;
23
24 for (i = 0; g && enum_id && i < desc->hw.nr_groups; i++) {
25 g = desc->hw.groups + i;
26
27 for (j = 0; g->enum_ids[j]; j++) {
28 if (g->enum_ids[j] != enum_id)
29 continue;
30
31 return g->enum_id;
32 }
33 }
34
35 return 0;
36}
37
38static unsigned int __init _intc_mask_data(struct intc_desc *desc,
39 struct intc_desc_int *d,
40 intc_enum enum_id,
41 unsigned int *reg_idx,
42 unsigned int *fld_idx)
43{
44 struct intc_mask_reg *mr = desc->hw.mask_regs;
45 unsigned int fn, mode;
46 unsigned long reg_e, reg_d;
47
48 while (mr && enum_id && *reg_idx < desc->hw.nr_mask_regs) {
49 mr = desc->hw.mask_regs + *reg_idx;
50
51 for (; *fld_idx < ARRAY_SIZE(mr->enum_ids); (*fld_idx)++) {
52 if (mr->enum_ids[*fld_idx] != enum_id)
53 continue;
54
55 if (mr->set_reg && mr->clr_reg) {
56 fn = REG_FN_WRITE_BASE;
57 mode = MODE_DUAL_REG;
58 reg_e = mr->clr_reg;
59 reg_d = mr->set_reg;
60 } else {
61 fn = REG_FN_MODIFY_BASE;
62 if (mr->set_reg) {
63 mode = MODE_ENABLE_REG;
64 reg_e = mr->set_reg;
65 reg_d = mr->set_reg;
66 } else {
67 mode = MODE_MASK_REG;
68 reg_e = mr->clr_reg;
69 reg_d = mr->clr_reg;
70 }
71 }
72
73 fn += (mr->reg_width >> 3) - 1;
74 return _INTC_MK(fn, mode,
75 intc_get_reg(d, reg_e),
76 intc_get_reg(d, reg_d),
77 1,
78 (mr->reg_width - 1) - *fld_idx);
79 }
80
81 *fld_idx = 0;
82 (*reg_idx)++;
83 }
84
85 return 0;
86}
87
88unsigned int __init
89intc_get_mask_handle(struct intc_desc *desc, struct intc_desc_int *d,
90 intc_enum enum_id, int do_grps)
91{
92 unsigned int i = 0;
93 unsigned int j = 0;
94 unsigned int ret;
95
96 ret = _intc_mask_data(desc, d, enum_id, &i, &j);
97 if (ret)
98 return ret;
99
100 if (do_grps)
101 return intc_get_mask_handle(desc, d, intc_grp_id(desc, enum_id), 0);
102
103 return 0;
104}
105
106static unsigned int __init _intc_prio_data(struct intc_desc *desc,
107 struct intc_desc_int *d,
108 intc_enum enum_id,
109 unsigned int *reg_idx,
110 unsigned int *fld_idx)
111{
112 struct intc_prio_reg *pr = desc->hw.prio_regs;
113 unsigned int fn, n, mode, bit;
114 unsigned long reg_e, reg_d;
115
116 while (pr && enum_id && *reg_idx < desc->hw.nr_prio_regs) {
117 pr = desc->hw.prio_regs + *reg_idx;
118
119 for (; *fld_idx < ARRAY_SIZE(pr->enum_ids); (*fld_idx)++) {
120 if (pr->enum_ids[*fld_idx] != enum_id)
121 continue;
122
123 if (pr->set_reg && pr->clr_reg) {
124 fn = REG_FN_WRITE_BASE;
125 mode = MODE_PCLR_REG;
126 reg_e = pr->set_reg;
127 reg_d = pr->clr_reg;
128 } else {
129 fn = REG_FN_MODIFY_BASE;
130 mode = MODE_PRIO_REG;
131 if (!pr->set_reg)
132 BUG();
133 reg_e = pr->set_reg;
134 reg_d = pr->set_reg;
135 }
136
137 fn += (pr->reg_width >> 3) - 1;
138 n = *fld_idx + 1;
139
140 BUG_ON(n * pr->field_width > pr->reg_width);
141
142 bit = pr->reg_width - (n * pr->field_width);
143
144 return _INTC_MK(fn, mode,
145 intc_get_reg(d, reg_e),
146 intc_get_reg(d, reg_d),
147 pr->field_width, bit);
148 }
149
150 *fld_idx = 0;
151 (*reg_idx)++;
152 }
153
154 return 0;
155}
156
157unsigned int __init
158intc_get_prio_handle(struct intc_desc *desc, struct intc_desc_int *d,
159 intc_enum enum_id, int do_grps)
160{
161 unsigned int i = 0;
162 unsigned int j = 0;
163 unsigned int ret;
164
165 ret = _intc_prio_data(desc, d, enum_id, &i, &j);
166 if (ret)
167 return ret;
168
169 if (do_grps)
170 return intc_get_prio_handle(desc, d, intc_grp_id(desc, enum_id), 0);
171
172 return 0;
173}
174
175static unsigned int __init intc_ack_data(struct intc_desc *desc,
176 struct intc_desc_int *d,
177 intc_enum enum_id)
178{
179 struct intc_mask_reg *mr = desc->hw.ack_regs;
180 unsigned int i, j, fn, mode;
181 unsigned long reg_e, reg_d;
182
183 for (i = 0; mr && enum_id && i < desc->hw.nr_ack_regs; i++) {
184 mr = desc->hw.ack_regs + i;
185
186 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
187 if (mr->enum_ids[j] != enum_id)
188 continue;
189
190 fn = REG_FN_MODIFY_BASE;
191 mode = MODE_ENABLE_REG;
192 reg_e = mr->set_reg;
193 reg_d = mr->set_reg;
194
195 fn += (mr->reg_width >> 3) - 1;
196 return _INTC_MK(fn, mode,
197 intc_get_reg(d, reg_e),
198 intc_get_reg(d, reg_d),
199 1,
200 (mr->reg_width - 1) - j);
201 }
202 }
203
204 return 0;
205}
206
207static void intc_enable_disable(struct intc_desc_int *d,
208 unsigned long handle, int do_enable)
209{
210 unsigned long addr;
211 unsigned int cpu;
212 unsigned long (*fn)(unsigned long, unsigned long,
213 unsigned long (*)(unsigned long, unsigned long,
214 unsigned long),
215 unsigned int);
216
217 if (do_enable) {
218 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
219 addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
220 fn = intc_enable_noprio_fns[_INTC_MODE(handle)];
221 fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
222 }
223 } else {
224 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
225 addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
226 fn = intc_disable_fns[_INTC_MODE(handle)];
227 fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
228 }
229 }
230}
231
232void __init intc_enable_disable_enum(struct intc_desc *desc,
233 struct intc_desc_int *d,
234 intc_enum enum_id, int enable)
235{
236 unsigned int i, j, data;
237
238 /* go through and enable/disable all mask bits */
239 i = j = 0;
240 do {
241 data = _intc_mask_data(desc, d, enum_id, &i, &j);
242 if (data)
243 intc_enable_disable(d, data, enable);
244 j++;
245 } while (data);
246
247 /* go through and enable/disable all priority fields */
248 i = j = 0;
249 do {
250 data = _intc_prio_data(desc, d, enum_id, &i, &j);
251 if (data)
252 intc_enable_disable(d, data, enable);
253
254 j++;
255 } while (data);
256}
257
258unsigned int __init
259intc_get_sense_handle(struct intc_desc *desc, struct intc_desc_int *d,
260 intc_enum enum_id)
261{
262 struct intc_sense_reg *sr = desc->hw.sense_regs;
263 unsigned int i, j, fn, bit;
264
265 for (i = 0; sr && enum_id && i < desc->hw.nr_sense_regs; i++) {
266 sr = desc->hw.sense_regs + i;
267
268 for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) {
269 if (sr->enum_ids[j] != enum_id)
270 continue;
271
272 fn = REG_FN_MODIFY_BASE;
273 fn += (sr->reg_width >> 3) - 1;
274
275 BUG_ON((j + 1) * sr->field_width > sr->reg_width);
276
277 bit = sr->reg_width - ((j + 1) * sr->field_width);
278
279 return _INTC_MK(fn, 0, intc_get_reg(d, sr->reg),
280 0, sr->field_width, bit);
281 }
282 }
283
284 return 0;
285}
286
287
288void intc_set_ack_handle(unsigned int irq, struct intc_desc *desc,
289 struct intc_desc_int *d, intc_enum id)
290{
291 unsigned long flags;
292
293 /*
294 * Nothing to do for this IRQ.
295 */
296 if (!desc->hw.ack_regs)
297 return;
298
299 raw_spin_lock_irqsave(&intc_big_lock, flags);
300 ack_handle[irq] = intc_ack_data(desc, d, id);
301 raw_spin_unlock_irqrestore(&intc_big_lock, flags);
302}
303
304unsigned long intc_get_ack_handle(unsigned int irq)
305{
306 return ack_handle[irq];
307}
diff --git a/drivers/sh/intc/internals.h b/drivers/sh/intc/internals.h
new file mode 100644
index 000000000000..f02a47f74930
--- /dev/null
+++ b/drivers/sh/intc/internals.h
@@ -0,0 +1,185 @@
1#include <linux/sh_intc.h>
2#include <linux/irq.h>
3#include <linux/list.h>
4#include <linux/kernel.h>
5#include <linux/types.h>
6#include <linux/radix-tree.h>
7#include <linux/sysdev.h>
8
9#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
10 ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
11 ((addr_e) << 16) | ((addr_d << 24)))
12
13#define _INTC_SHIFT(h) (h & 0x1f)
14#define _INTC_WIDTH(h) ((h >> 5) & 0xf)
15#define _INTC_FN(h) ((h >> 9) & 0xf)
16#define _INTC_MODE(h) ((h >> 13) & 0x7)
17#define _INTC_ADDR_E(h) ((h >> 16) & 0xff)
18#define _INTC_ADDR_D(h) ((h >> 24) & 0xff)
19
20#ifdef CONFIG_SMP
21#define IS_SMP(x) (x.smp)
22#define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c))
23#define SMP_NR(d, x) ((d->smp[(x)] >> 8) ? (d->smp[(x)] >> 8) : 1)
24#else
25#define IS_SMP(x) 0
26#define INTC_REG(d, x, c) (d->reg[(x)])
27#define SMP_NR(d, x) 1
28#endif
29
30struct intc_handle_int {
31 unsigned int irq;
32 unsigned long handle;
33};
34
35struct intc_window {
36 phys_addr_t phys;
37 void __iomem *virt;
38 unsigned long size;
39};
40
41struct intc_map_entry {
42 intc_enum enum_id;
43 struct intc_desc_int *desc;
44};
45
46struct intc_subgroup_entry {
47 unsigned int pirq;
48 intc_enum enum_id;
49 unsigned long handle;
50};
51
52struct intc_desc_int {
53 struct list_head list;
54 struct sys_device sysdev;
55 struct radix_tree_root tree;
56 pm_message_t state;
57 raw_spinlock_t lock;
58 unsigned int index;
59 unsigned long *reg;
60#ifdef CONFIG_SMP
61 unsigned long *smp;
62#endif
63 unsigned int nr_reg;
64 struct intc_handle_int *prio;
65 unsigned int nr_prio;
66 struct intc_handle_int *sense;
67 unsigned int nr_sense;
68 struct intc_window *window;
69 unsigned int nr_windows;
70 struct irq_chip chip;
71};
72
73
74enum {
75 REG_FN_ERR = 0,
76 REG_FN_TEST_BASE = 1,
77 REG_FN_WRITE_BASE = 5,
78 REG_FN_MODIFY_BASE = 9
79};
80
81enum { MODE_ENABLE_REG = 0, /* Bit(s) set -> interrupt enabled */
82 MODE_MASK_REG, /* Bit(s) set -> interrupt disabled */
83 MODE_DUAL_REG, /* Two registers, set bit to enable / disable */
84 MODE_PRIO_REG, /* Priority value written to enable interrupt */
85 MODE_PCLR_REG, /* Above plus all bits set to disable interrupt */
86};
87
88static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
89{
90 struct irq_chip *chip = get_irq_chip(irq);
91
92 return container_of(chip, struct intc_desc_int, chip);
93}
94
95/*
96 * Grumble.
97 */
98static inline void activate_irq(int irq)
99{
100#ifdef CONFIG_ARM
101 /* ARM requires an extra step to clear IRQ_NOREQUEST, which it
102 * sets on behalf of every irq_chip. Also sets IRQ_NOPROBE.
103 */
104 set_irq_flags(irq, IRQF_VALID);
105#else
106 /* same effect on other architectures */
107 set_irq_noprobe(irq);
108#endif
109}
110
111/* access.c */
112extern unsigned long
113(*intc_reg_fns[])(unsigned long addr, unsigned long h, unsigned long data);
114
115extern unsigned long
116(*intc_enable_fns[])(unsigned long addr, unsigned long handle,
117 unsigned long (*fn)(unsigned long,
118 unsigned long, unsigned long),
119 unsigned int irq);
120extern unsigned long
121(*intc_disable_fns[])(unsigned long addr, unsigned long handle,
122 unsigned long (*fn)(unsigned long,
123 unsigned long, unsigned long),
124 unsigned int irq);
125extern unsigned long
126(*intc_enable_noprio_fns[])(unsigned long addr, unsigned long handle,
127 unsigned long (*fn)(unsigned long,
128 unsigned long, unsigned long),
129 unsigned int irq);
130
131unsigned long intc_phys_to_virt(struct intc_desc_int *d, unsigned long address);
132unsigned int intc_get_reg(struct intc_desc_int *d, unsigned long address);
133unsigned int intc_set_field_from_handle(unsigned int value,
134 unsigned int field_value,
135 unsigned int handle);
136unsigned long intc_get_field_from_handle(unsigned int value,
137 unsigned int handle);
138
139/* balancing.c */
140#ifdef CONFIG_INTC_BALANCING
141void intc_balancing_enable(unsigned int irq);
142void intc_balancing_disable(unsigned int irq);
143void intc_set_dist_handle(unsigned int irq, struct intc_desc *desc,
144 struct intc_desc_int *d, intc_enum id);
145#else
146void intc_balancing_enable(unsigned int irq) { }
147void intc_balancing_disable(unsigned int irq) { }
148void intc_set_dist_handle(unsigned int irq, struct intc_desc *desc,
149 struct intc_desc_int *d, intc_enum id) { }
150#endif
151
152/* chip.c */
153extern struct irq_chip intc_irq_chip;
154void _intc_enable(unsigned int irq, unsigned long handle);
155
156/* core.c */
157extern struct list_head intc_list;
158extern raw_spinlock_t intc_big_lock;
159extern unsigned int nr_intc_controllers;
160extern struct sysdev_class intc_sysdev_class;
161
162unsigned int intc_get_dfl_prio_level(void);
163unsigned int intc_get_prio_level(unsigned int irq);
164void intc_set_prio_level(unsigned int irq, unsigned int level);
165
166/* handle.c */
167unsigned int intc_get_mask_handle(struct intc_desc *desc,
168 struct intc_desc_int *d,
169 intc_enum enum_id, int do_grps);
170unsigned int intc_get_prio_handle(struct intc_desc *desc,
171 struct intc_desc_int *d,
172 intc_enum enum_id, int do_grps);
173unsigned int intc_get_sense_handle(struct intc_desc *desc,
174 struct intc_desc_int *d,
175 intc_enum enum_id);
176void intc_set_ack_handle(unsigned int irq, struct intc_desc *desc,
177 struct intc_desc_int *d, intc_enum id);
178unsigned long intc_get_ack_handle(unsigned int irq);
179void intc_enable_disable_enum(struct intc_desc *desc, struct intc_desc_int *d,
180 intc_enum enum_id, int enable);
181
182/* virq.c */
183void intc_subgroup_init(struct intc_desc *desc, struct intc_desc_int *d);
184void intc_irq_xlate_set(unsigned int irq, intc_enum id, struct intc_desc_int *d);
185struct intc_map_entry *intc_irq_xlate_get(unsigned int irq);
diff --git a/drivers/sh/intc/userimask.c b/drivers/sh/intc/userimask.c
new file mode 100644
index 000000000000..e32304b66cf1
--- /dev/null
+++ b/drivers/sh/intc/userimask.c
@@ -0,0 +1,83 @@
1/*
2 * Support for hardware-assisted userspace interrupt masking.
3 *
4 * Copyright (C) 2010 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#define pr_fmt(fmt) "intc: " fmt
11
12#include <linux/errno.h>
13#include <linux/sysdev.h>
14#include <linux/init.h>
15#include <linux/io.h>
16#include <asm/sizes.h>
17#include "internals.h"
18
19static void __iomem *uimask;
20
21static ssize_t
22show_intc_userimask(struct sysdev_class *cls,
23 struct sysdev_class_attribute *attr, char *buf)
24{
25 return sprintf(buf, "%d\n", (__raw_readl(uimask) >> 4) & 0xf);
26}
27
28static ssize_t
29store_intc_userimask(struct sysdev_class *cls,
30 struct sysdev_class_attribute *attr,
31 const char *buf, size_t count)
32{
33 unsigned long level;
34
35 level = simple_strtoul(buf, NULL, 10);
36
37 /*
38 * Minimal acceptable IRQ levels are in the 2 - 16 range, but
39 * these are chomped so as to not interfere with normal IRQs.
40 *
41 * Level 1 is a special case on some CPUs in that it's not
42 * directly settable, but given that USERIMASK cuts off below a
43 * certain level, we don't care about this limitation here.
44 * Level 0 on the other hand equates to user masking disabled.
45 *
46 * We use the default priority level as a cut off so that only
47 * special case opt-in IRQs can be mangled.
48 */
49 if (level >= intc_get_dfl_prio_level())
50 return -EINVAL;
51
52 __raw_writel(0xa5 << 24 | level << 4, uimask);
53
54 return count;
55}
56
57static SYSDEV_CLASS_ATTR(userimask, S_IRUSR | S_IWUSR,
58 show_intc_userimask, store_intc_userimask);
59
60
61static int __init userimask_sysdev_init(void)
62{
63 if (unlikely(!uimask))
64 return -ENXIO;
65
66 return sysdev_class_create_file(&intc_sysdev_class, &attr_userimask);
67}
68late_initcall(userimask_sysdev_init);
69
70int register_intc_userimask(unsigned long addr)
71{
72 if (unlikely(uimask))
73 return -EBUSY;
74
75 uimask = ioremap_nocache(addr, SZ_4K);
76 if (unlikely(!uimask))
77 return -ENOMEM;
78
79 pr_info("userimask support registered for levels 0 -> %d\n",
80 intc_get_dfl_prio_level() - 1);
81
82 return 0;
83}
diff --git a/drivers/sh/intc/virq-debugfs.c b/drivers/sh/intc/virq-debugfs.c
new file mode 100644
index 000000000000..9e62ba9311f0
--- /dev/null
+++ b/drivers/sh/intc/virq-debugfs.c
@@ -0,0 +1,64 @@
1/*
2 * Support for virtual IRQ subgroups debugfs mapping.
3 *
4 * Copyright (C) 2010 Paul Mundt
5 *
6 * Modelled after arch/powerpc/kernel/irq.c.
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/seq_file.h>
13#include <linux/fs.h>
14#include <linux/init.h>
15#include <linux/irq.h>
16#include <linux/debugfs.h>
17#include "internals.h"
18
19static int intc_irq_xlate_debug(struct seq_file *m, void *priv)
20{
21 int i;
22
23 seq_printf(m, "%-5s %-7s %-15s\n", "irq", "enum", "chip name");
24
25 for (i = 1; i < nr_irqs; i++) {
26 struct intc_map_entry *entry = intc_irq_xlate_get(i);
27 struct intc_desc_int *desc = entry->desc;
28
29 if (!desc)
30 continue;
31
32 seq_printf(m, "%5d ", i);
33 seq_printf(m, "0x%05x ", entry->enum_id);
34 seq_printf(m, "%-15s\n", desc->chip.name);
35 }
36
37 return 0;
38}
39
40static int intc_irq_xlate_open(struct inode *inode, struct file *file)
41{
42 return single_open(file, intc_irq_xlate_debug, inode->i_private);
43}
44
45static const struct file_operations intc_irq_xlate_fops = {
46 .open = intc_irq_xlate_open,
47 .read = seq_read,
48 .llseek = seq_lseek,
49 .release = single_release,
50};
51
52static int __init intc_irq_xlate_init(void)
53{
54 /*
55 * XXX.. use arch_debugfs_dir here when all of the intc users are
56 * converted.
57 */
58 if (debugfs_create_file("intc_irq_xlate", S_IRUGO, NULL, NULL,
59 &intc_irq_xlate_fops) == NULL)
60 return -ENOMEM;
61
62 return 0;
63}
64fs_initcall(intc_irq_xlate_init);
diff --git a/drivers/sh/intc/virq.c b/drivers/sh/intc/virq.c
new file mode 100644
index 000000000000..643dfd4d2057
--- /dev/null
+++ b/drivers/sh/intc/virq.c
@@ -0,0 +1,255 @@
1/*
2 * Support for virtual IRQ subgroups.
3 *
4 * Copyright (C) 2010 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#define pr_fmt(fmt) "intc: " fmt
11
12#include <linux/slab.h>
13#include <linux/irq.h>
14#include <linux/list.h>
15#include <linux/radix-tree.h>
16#include <linux/spinlock.h>
17#include "internals.h"
18
19static struct intc_map_entry intc_irq_xlate[NR_IRQS];
20
21struct intc_virq_list {
22 unsigned int irq;
23 struct intc_virq_list *next;
24};
25
26#define for_each_virq(entry, head) \
27 for (entry = head; entry; entry = entry->next)
28
29/*
30 * Tags for the radix tree
31 */
32#define INTC_TAG_VIRQ_NEEDS_ALLOC 0
33
34void intc_irq_xlate_set(unsigned int irq, intc_enum id, struct intc_desc_int *d)
35{
36 unsigned long flags;
37
38 raw_spin_lock_irqsave(&intc_big_lock, flags);
39 intc_irq_xlate[irq].enum_id = id;
40 intc_irq_xlate[irq].desc = d;
41 raw_spin_unlock_irqrestore(&intc_big_lock, flags);
42}
43
44struct intc_map_entry *intc_irq_xlate_get(unsigned int irq)
45{
46 return intc_irq_xlate + irq;
47}
48
49int intc_irq_lookup(const char *chipname, intc_enum enum_id)
50{
51 struct intc_map_entry *ptr;
52 struct intc_desc_int *d;
53 int irq = -1;
54
55 list_for_each_entry(d, &intc_list, list) {
56 int tagged;
57
58 if (strcmp(d->chip.name, chipname) != 0)
59 continue;
60
61 /*
62 * Catch early lookups for subgroup VIRQs that have not
63 * yet been allocated an IRQ. This already includes a
64 * fast-path out if the tree is untagged, so there is no
65 * need to explicitly test the root tree.
66 */
67 tagged = radix_tree_tag_get(&d->tree, enum_id,
68 INTC_TAG_VIRQ_NEEDS_ALLOC);
69 if (unlikely(tagged))
70 break;
71
72 ptr = radix_tree_lookup(&d->tree, enum_id);
73 if (ptr) {
74 irq = ptr - intc_irq_xlate;
75 break;
76 }
77 }
78
79 return irq;
80}
81EXPORT_SYMBOL_GPL(intc_irq_lookup);
82
83static int add_virq_to_pirq(unsigned int irq, unsigned int virq)
84{
85 struct intc_virq_list **last, *entry;
86 struct irq_desc *desc = irq_to_desc(irq);
87
88 /* scan for duplicates */
89 last = (struct intc_virq_list **)&desc->handler_data;
90 for_each_virq(entry, desc->handler_data) {
91 if (entry->irq == virq)
92 return 0;
93 last = &entry->next;
94 }
95
96 entry = kzalloc(sizeof(struct intc_virq_list), GFP_ATOMIC);
97 if (!entry) {
98 pr_err("can't allocate VIRQ mapping for %d\n", virq);
99 return -ENOMEM;
100 }
101
102 entry->irq = virq;
103
104 *last = entry;
105
106 return 0;
107}
108
109static void intc_virq_handler(unsigned int irq, struct irq_desc *desc)
110{
111 struct intc_virq_list *entry, *vlist = get_irq_data(irq);
112 struct intc_desc_int *d = get_intc_desc(irq);
113
114 desc->chip->mask_ack(irq);
115
116 for_each_virq(entry, vlist) {
117 unsigned long addr, handle;
118
119 handle = (unsigned long)get_irq_data(entry->irq);
120 addr = INTC_REG(d, _INTC_ADDR_E(handle), 0);
121
122 if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0))
123 generic_handle_irq(entry->irq);
124 }
125
126 desc->chip->unmask(irq);
127}
128
129static unsigned long __init intc_subgroup_data(struct intc_subgroup *subgroup,
130 struct intc_desc_int *d,
131 unsigned int index)
132{
133 unsigned int fn = REG_FN_TEST_BASE + (subgroup->reg_width >> 3) - 1;
134
135 return _INTC_MK(fn, MODE_ENABLE_REG, intc_get_reg(d, subgroup->reg),
136 0, 1, (subgroup->reg_width - 1) - index);
137}
138
139static void __init intc_subgroup_init_one(struct intc_desc *desc,
140 struct intc_desc_int *d,
141 struct intc_subgroup *subgroup)
142{
143 struct intc_map_entry *mapped;
144 unsigned int pirq;
145 unsigned long flags;
146 int i;
147
148 mapped = radix_tree_lookup(&d->tree, subgroup->parent_id);
149 if (!mapped) {
150 WARN_ON(1);
151 return;
152 }
153
154 pirq = mapped - intc_irq_xlate;
155
156 raw_spin_lock_irqsave(&d->lock, flags);
157
158 for (i = 0; i < ARRAY_SIZE(subgroup->enum_ids); i++) {
159 struct intc_subgroup_entry *entry;
160 int err;
161
162 if (!subgroup->enum_ids[i])
163 continue;
164
165 entry = kmalloc(sizeof(*entry), GFP_NOWAIT);
166 if (!entry)
167 break;
168
169 entry->pirq = pirq;
170 entry->enum_id = subgroup->enum_ids[i];
171 entry->handle = intc_subgroup_data(subgroup, d, i);
172
173 err = radix_tree_insert(&d->tree, entry->enum_id, entry);
174 if (unlikely(err < 0))
175 break;
176
177 radix_tree_tag_set(&d->tree, entry->enum_id,
178 INTC_TAG_VIRQ_NEEDS_ALLOC);
179 }
180
181 raw_spin_unlock_irqrestore(&d->lock, flags);
182}
183
184void __init intc_subgroup_init(struct intc_desc *desc, struct intc_desc_int *d)
185{
186 int i;
187
188 if (!desc->hw.subgroups)
189 return;
190
191 for (i = 0; i < desc->hw.nr_subgroups; i++)
192 intc_subgroup_init_one(desc, d, desc->hw.subgroups + i);
193}
194
195static void __init intc_subgroup_map(struct intc_desc_int *d)
196{
197 struct intc_subgroup_entry *entries[32];
198 unsigned long flags;
199 unsigned int nr_found;
200 int i;
201
202 raw_spin_lock_irqsave(&d->lock, flags);
203
204restart:
205 nr_found = radix_tree_gang_lookup_tag_slot(&d->tree,
206 (void ***)entries, 0, ARRAY_SIZE(entries),
207 INTC_TAG_VIRQ_NEEDS_ALLOC);
208
209 for (i = 0; i < nr_found; i++) {
210 struct intc_subgroup_entry *entry;
211 int irq;
212
213 entry = radix_tree_deref_slot((void **)entries[i]);
214 if (unlikely(!entry))
215 continue;
216 if (unlikely(entry == RADIX_TREE_RETRY))
217 goto restart;
218
219 irq = create_irq();
220 if (unlikely(irq < 0)) {
221 pr_err("no more free IRQs, bailing..\n");
222 break;
223 }
224
225 pr_info("Setting up a chained VIRQ from %d -> %d\n",
226 irq, entry->pirq);
227
228 intc_irq_xlate_set(irq, entry->enum_id, d);
229
230 set_irq_chip_and_handler_name(irq, get_irq_chip(entry->pirq),
231 handle_simple_irq, "virq");
232 set_irq_chip_data(irq, get_irq_chip_data(entry->pirq));
233
234 set_irq_data(irq, (void *)entry->handle);
235
236 set_irq_chained_handler(entry->pirq, intc_virq_handler);
237 add_virq_to_pirq(entry->pirq, irq);
238
239 radix_tree_tag_clear(&d->tree, entry->enum_id,
240 INTC_TAG_VIRQ_NEEDS_ALLOC);
241 radix_tree_replace_slot((void **)entries[i],
242 &intc_irq_xlate[irq]);
243 }
244
245 raw_spin_unlock_irqrestore(&d->lock, flags);
246}
247
248void __init intc_finalize(void)
249{
250 struct intc_desc_int *d;
251
252 list_for_each_entry(d, &intc_list, list)
253 if (radix_tree_tagged(&d->tree, INTC_TAG_VIRQ_NEEDS_ALLOC))
254 intc_subgroup_map(d);
255}