aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/sh/intc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/sh/intc')
-rw-r--r--drivers/sh/intc/access.c237
-rw-r--r--drivers/sh/intc/balancing.c97
-rw-r--r--drivers/sh/intc/chip.c215
-rw-r--r--drivers/sh/intc/core.c469
-rw-r--r--drivers/sh/intc/dynamic.c135
-rw-r--r--drivers/sh/intc/handle.c307
-rw-r--r--drivers/sh/intc/internals.h185
-rw-r--r--drivers/sh/intc/userimask.c83
-rw-r--r--drivers/sh/intc/virq-debugfs.c64
-rw-r--r--drivers/sh/intc/virq.c255
10 files changed, 2047 insertions, 0 deletions
diff --git a/drivers/sh/intc/access.c b/drivers/sh/intc/access.c
new file mode 100644
index 000000000000..f892ae1d212a
--- /dev/null
+++ b/drivers/sh/intc/access.c
@@ -0,0 +1,237 @@
1/*
2 * Common INTC2 register accessors
3 *
4 * Copyright (C) 2007, 2008 Magnus Damm
5 * Copyright (C) 2009, 2010 Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <linux/io.h>
12#include "internals.h"
13
14unsigned long intc_phys_to_virt(struct intc_desc_int *d, unsigned long address)
15{
16 struct intc_window *window;
17 int k;
18
19 /* scan through physical windows and convert address */
20 for (k = 0; k < d->nr_windows; k++) {
21 window = d->window + k;
22
23 if (address < window->phys)
24 continue;
25
26 if (address >= (window->phys + window->size))
27 continue;
28
29 address -= window->phys;
30 address += (unsigned long)window->virt;
31
32 return address;
33 }
34
35 /* no windows defined, register must be 1:1 mapped virt:phys */
36 return address;
37}
38
39unsigned int intc_get_reg(struct intc_desc_int *d, unsigned long address)
40{
41 unsigned int k;
42
43 address = intc_phys_to_virt(d, address);
44
45 for (k = 0; k < d->nr_reg; k++) {
46 if (d->reg[k] == address)
47 return k;
48 }
49
50 BUG();
51 return 0;
52}
53
54unsigned int intc_set_field_from_handle(unsigned int value,
55 unsigned int field_value,
56 unsigned int handle)
57{
58 unsigned int width = _INTC_WIDTH(handle);
59 unsigned int shift = _INTC_SHIFT(handle);
60
61 value &= ~(((1 << width) - 1) << shift);
62 value |= field_value << shift;
63 return value;
64}
65
66unsigned long intc_get_field_from_handle(unsigned int value, unsigned int handle)
67{
68 unsigned int width = _INTC_WIDTH(handle);
69 unsigned int shift = _INTC_SHIFT(handle);
70 unsigned int mask = ((1 << width) - 1) << shift;
71
72 return (value & mask) >> shift;
73}
74
75static unsigned long test_8(unsigned long addr, unsigned long h,
76 unsigned long ignore)
77{
78 return intc_get_field_from_handle(__raw_readb(addr), h);
79}
80
81static unsigned long test_16(unsigned long addr, unsigned long h,
82 unsigned long ignore)
83{
84 return intc_get_field_from_handle(__raw_readw(addr), h);
85}
86
87static unsigned long test_32(unsigned long addr, unsigned long h,
88 unsigned long ignore)
89{
90 return intc_get_field_from_handle(__raw_readl(addr), h);
91}
92
93static unsigned long write_8(unsigned long addr, unsigned long h,
94 unsigned long data)
95{
96 __raw_writeb(intc_set_field_from_handle(0, data, h), addr);
97 (void)__raw_readb(addr); /* Defeat write posting */
98 return 0;
99}
100
101static unsigned long write_16(unsigned long addr, unsigned long h,
102 unsigned long data)
103{
104 __raw_writew(intc_set_field_from_handle(0, data, h), addr);
105 (void)__raw_readw(addr); /* Defeat write posting */
106 return 0;
107}
108
109static unsigned long write_32(unsigned long addr, unsigned long h,
110 unsigned long data)
111{
112 __raw_writel(intc_set_field_from_handle(0, data, h), addr);
113 (void)__raw_readl(addr); /* Defeat write posting */
114 return 0;
115}
116
117static unsigned long modify_8(unsigned long addr, unsigned long h,
118 unsigned long data)
119{
120 unsigned long flags;
121 unsigned int value;
122 local_irq_save(flags);
123 value = intc_set_field_from_handle(__raw_readb(addr), data, h);
124 __raw_writeb(value, addr);
125 (void)__raw_readb(addr); /* Defeat write posting */
126 local_irq_restore(flags);
127 return 0;
128}
129
130static unsigned long modify_16(unsigned long addr, unsigned long h,
131 unsigned long data)
132{
133 unsigned long flags;
134 unsigned int value;
135 local_irq_save(flags);
136 value = intc_set_field_from_handle(__raw_readw(addr), data, h);
137 __raw_writew(value, addr);
138 (void)__raw_readw(addr); /* Defeat write posting */
139 local_irq_restore(flags);
140 return 0;
141}
142
143static unsigned long modify_32(unsigned long addr, unsigned long h,
144 unsigned long data)
145{
146 unsigned long flags;
147 unsigned int value;
148 local_irq_save(flags);
149 value = intc_set_field_from_handle(__raw_readl(addr), data, h);
150 __raw_writel(value, addr);
151 (void)__raw_readl(addr); /* Defeat write posting */
152 local_irq_restore(flags);
153 return 0;
154}
155
156static unsigned long intc_mode_field(unsigned long addr,
157 unsigned long handle,
158 unsigned long (*fn)(unsigned long,
159 unsigned long,
160 unsigned long),
161 unsigned int irq)
162{
163 return fn(addr, handle, ((1 << _INTC_WIDTH(handle)) - 1));
164}
165
166static unsigned long intc_mode_zero(unsigned long addr,
167 unsigned long handle,
168 unsigned long (*fn)(unsigned long,
169 unsigned long,
170 unsigned long),
171 unsigned int irq)
172{
173 return fn(addr, handle, 0);
174}
175
176static unsigned long intc_mode_prio(unsigned long addr,
177 unsigned long handle,
178 unsigned long (*fn)(unsigned long,
179 unsigned long,
180 unsigned long),
181 unsigned int irq)
182{
183 return fn(addr, handle, intc_get_prio_level(irq));
184}
185
186unsigned long (*intc_reg_fns[])(unsigned long addr,
187 unsigned long h,
188 unsigned long data) = {
189 [REG_FN_TEST_BASE + 0] = test_8,
190 [REG_FN_TEST_BASE + 1] = test_16,
191 [REG_FN_TEST_BASE + 3] = test_32,
192 [REG_FN_WRITE_BASE + 0] = write_8,
193 [REG_FN_WRITE_BASE + 1] = write_16,
194 [REG_FN_WRITE_BASE + 3] = write_32,
195 [REG_FN_MODIFY_BASE + 0] = modify_8,
196 [REG_FN_MODIFY_BASE + 1] = modify_16,
197 [REG_FN_MODIFY_BASE + 3] = modify_32,
198};
199
200unsigned long (*intc_enable_fns[])(unsigned long addr,
201 unsigned long handle,
202 unsigned long (*fn)(unsigned long,
203 unsigned long,
204 unsigned long),
205 unsigned int irq) = {
206 [MODE_ENABLE_REG] = intc_mode_field,
207 [MODE_MASK_REG] = intc_mode_zero,
208 [MODE_DUAL_REG] = intc_mode_field,
209 [MODE_PRIO_REG] = intc_mode_prio,
210 [MODE_PCLR_REG] = intc_mode_prio,
211};
212
213unsigned long (*intc_disable_fns[])(unsigned long addr,
214 unsigned long handle,
215 unsigned long (*fn)(unsigned long,
216 unsigned long,
217 unsigned long),
218 unsigned int irq) = {
219 [MODE_ENABLE_REG] = intc_mode_zero,
220 [MODE_MASK_REG] = intc_mode_field,
221 [MODE_DUAL_REG] = intc_mode_field,
222 [MODE_PRIO_REG] = intc_mode_zero,
223 [MODE_PCLR_REG] = intc_mode_field,
224};
225
226unsigned long (*intc_enable_noprio_fns[])(unsigned long addr,
227 unsigned long handle,
228 unsigned long (*fn)(unsigned long,
229 unsigned long,
230 unsigned long),
231 unsigned int irq) = {
232 [MODE_ENABLE_REG] = intc_mode_field,
233 [MODE_MASK_REG] = intc_mode_zero,
234 [MODE_DUAL_REG] = intc_mode_field,
235 [MODE_PRIO_REG] = intc_mode_field,
236 [MODE_PCLR_REG] = intc_mode_field,
237};
diff --git a/drivers/sh/intc/balancing.c b/drivers/sh/intc/balancing.c
new file mode 100644
index 000000000000..cec7a96f2c09
--- /dev/null
+++ b/drivers/sh/intc/balancing.c
@@ -0,0 +1,97 @@
1/*
2 * Support for hardware-managed IRQ auto-distribution.
3 *
4 * Copyright (C) 2010 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include "internals.h"
11
12static unsigned long dist_handle[NR_IRQS];
13
14void intc_balancing_enable(unsigned int irq)
15{
16 struct intc_desc_int *d = get_intc_desc(irq);
17 unsigned long handle = dist_handle[irq];
18 unsigned long addr;
19
20 if (irq_balancing_disabled(irq) || !handle)
21 return;
22
23 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
24 intc_reg_fns[_INTC_FN(handle)](addr, handle, 1);
25}
26
27void intc_balancing_disable(unsigned int irq)
28{
29 struct intc_desc_int *d = get_intc_desc(irq);
30 unsigned long handle = dist_handle[irq];
31 unsigned long addr;
32
33 if (irq_balancing_disabled(irq) || !handle)
34 return;
35
36 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
37 intc_reg_fns[_INTC_FN(handle)](addr, handle, 0);
38}
39
40static unsigned int intc_dist_data(struct intc_desc *desc,
41 struct intc_desc_int *d,
42 intc_enum enum_id)
43{
44 struct intc_mask_reg *mr = desc->hw.mask_regs;
45 unsigned int i, j, fn, mode;
46 unsigned long reg_e, reg_d;
47
48 for (i = 0; mr && enum_id && i < desc->hw.nr_mask_regs; i++) {
49 mr = desc->hw.mask_regs + i;
50
51 /*
52 * Skip this entry if there's no auto-distribution
53 * register associated with it.
54 */
55 if (!mr->dist_reg)
56 continue;
57
58 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
59 if (mr->enum_ids[j] != enum_id)
60 continue;
61
62 fn = REG_FN_MODIFY_BASE;
63 mode = MODE_ENABLE_REG;
64 reg_e = mr->dist_reg;
65 reg_d = mr->dist_reg;
66
67 fn += (mr->reg_width >> 3) - 1;
68 return _INTC_MK(fn, mode,
69 intc_get_reg(d, reg_e),
70 intc_get_reg(d, reg_d),
71 1,
72 (mr->reg_width - 1) - j);
73 }
74 }
75
76 /*
77 * It's possible we've gotten here with no distribution options
78 * available for the IRQ in question, so we just skip over those.
79 */
80 return 0;
81}
82
83void intc_set_dist_handle(unsigned int irq, struct intc_desc *desc,
84 struct intc_desc_int *d, intc_enum id)
85{
86 unsigned long flags;
87
88 /*
89 * Nothing to do for this IRQ.
90 */
91 if (!desc->hw.mask_regs)
92 return;
93
94 raw_spin_lock_irqsave(&intc_big_lock, flags);
95 dist_handle[irq] = intc_dist_data(desc, d, id);
96 raw_spin_unlock_irqrestore(&intc_big_lock, flags);
97}
diff --git a/drivers/sh/intc/chip.c b/drivers/sh/intc/chip.c
new file mode 100644
index 000000000000..35c03706cc21
--- /dev/null
+++ b/drivers/sh/intc/chip.c
@@ -0,0 +1,215 @@
1/*
2 * IRQ chip definitions for INTC IRQs.
3 *
4 * Copyright (C) 2007, 2008 Magnus Damm
5 * Copyright (C) 2009, 2010 Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <linux/cpumask.h>
12#include <linux/io.h>
13#include "internals.h"
14
15void _intc_enable(unsigned int irq, unsigned long handle)
16{
17 struct intc_desc_int *d = get_intc_desc(irq);
18 unsigned long addr;
19 unsigned int cpu;
20
21 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
22#ifdef CONFIG_SMP
23 if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
24 continue;
25#endif
26 addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
27 intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
28 [_INTC_FN(handle)], irq);
29 }
30
31 intc_balancing_enable(irq);
32}
33
34static void intc_enable(unsigned int irq)
35{
36 _intc_enable(irq, (unsigned long)get_irq_chip_data(irq));
37}
38
39static void intc_disable(unsigned int irq)
40{
41 struct intc_desc_int *d = get_intc_desc(irq);
42 unsigned long handle = (unsigned long)get_irq_chip_data(irq);
43 unsigned long addr;
44 unsigned int cpu;
45
46 intc_balancing_disable(irq);
47
48 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
49#ifdef CONFIG_SMP
50 if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
51 continue;
52#endif
53 addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
54 intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\
55 [_INTC_FN(handle)], irq);
56 }
57}
58
59static int intc_set_wake(unsigned int irq, unsigned int on)
60{
61 return 0; /* allow wakeup, but setup hardware in intc_suspend() */
62}
63
64#ifdef CONFIG_SMP
65/*
66 * This is held with the irq desc lock held, so we don't require any
67 * additional locking here at the intc desc level. The affinity mask is
68 * later tested in the enable/disable paths.
69 */
70static int intc_set_affinity(unsigned int irq, const struct cpumask *cpumask)
71{
72 if (!cpumask_intersects(cpumask, cpu_online_mask))
73 return -1;
74
75 cpumask_copy(irq_to_desc(irq)->affinity, cpumask);
76
77 return 0;
78}
79#endif
80
81static void intc_mask_ack(unsigned int irq)
82{
83 struct intc_desc_int *d = get_intc_desc(irq);
84 unsigned long handle = intc_get_ack_handle(irq);
85 unsigned long addr;
86
87 intc_disable(irq);
88
89 /* read register and write zero only to the associated bit */
90 if (handle) {
91 unsigned int value;
92
93 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
94 value = intc_set_field_from_handle(0, 1, handle);
95
96 switch (_INTC_FN(handle)) {
97 case REG_FN_MODIFY_BASE + 0: /* 8bit */
98 __raw_readb(addr);
99 __raw_writeb(0xff ^ value, addr);
100 break;
101 case REG_FN_MODIFY_BASE + 1: /* 16bit */
102 __raw_readw(addr);
103 __raw_writew(0xffff ^ value, addr);
104 break;
105 case REG_FN_MODIFY_BASE + 3: /* 32bit */
106 __raw_readl(addr);
107 __raw_writel(0xffffffff ^ value, addr);
108 break;
109 default:
110 BUG();
111 break;
112 }
113 }
114}
115
116static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
117 unsigned int nr_hp,
118 unsigned int irq)
119{
120 int i;
121
122 /*
123 * this doesn't scale well, but...
124 *
125 * this function should only be used for cerain uncommon
126 * operations such as intc_set_priority() and intc_set_type()
127 * and in those rare cases performance doesn't matter that much.
128 * keeping the memory footprint low is more important.
129 *
130 * one rather simple way to speed this up and still keep the
131 * memory footprint down is to make sure the array is sorted
132 * and then perform a bisect to lookup the irq.
133 */
134 for (i = 0; i < nr_hp; i++) {
135 if ((hp + i)->irq != irq)
136 continue;
137
138 return hp + i;
139 }
140
141 return NULL;
142}
143
144int intc_set_priority(unsigned int irq, unsigned int prio)
145{
146 struct intc_desc_int *d = get_intc_desc(irq);
147 struct intc_handle_int *ihp;
148
149 if (!intc_get_prio_level(irq) || prio <= 1)
150 return -EINVAL;
151
152 ihp = intc_find_irq(d->prio, d->nr_prio, irq);
153 if (ihp) {
154 if (prio >= (1 << _INTC_WIDTH(ihp->handle)))
155 return -EINVAL;
156
157 intc_set_prio_level(irq, prio);
158
159 /*
160 * only set secondary masking method directly
161 * primary masking method is using intc_prio_level[irq]
162 * priority level will be set during next enable()
163 */
164 if (_INTC_FN(ihp->handle) != REG_FN_ERR)
165 _intc_enable(irq, ihp->handle);
166 }
167 return 0;
168}
169
170#define VALID(x) (x | 0x80)
171
172static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
173 [IRQ_TYPE_EDGE_FALLING] = VALID(0),
174 [IRQ_TYPE_EDGE_RISING] = VALID(1),
175 [IRQ_TYPE_LEVEL_LOW] = VALID(2),
176 /* SH7706, SH7707 and SH7709 do not support high level triggered */
177#if !defined(CONFIG_CPU_SUBTYPE_SH7706) && \
178 !defined(CONFIG_CPU_SUBTYPE_SH7707) && \
179 !defined(CONFIG_CPU_SUBTYPE_SH7709)
180 [IRQ_TYPE_LEVEL_HIGH] = VALID(3),
181#endif
182};
183
184static int intc_set_type(unsigned int irq, unsigned int type)
185{
186 struct intc_desc_int *d = get_intc_desc(irq);
187 unsigned char value = intc_irq_sense_table[type & IRQ_TYPE_SENSE_MASK];
188 struct intc_handle_int *ihp;
189 unsigned long addr;
190
191 if (!value)
192 return -EINVAL;
193
194 ihp = intc_find_irq(d->sense, d->nr_sense, irq);
195 if (ihp) {
196 addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0);
197 intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value);
198 }
199
200 return 0;
201}
202
203struct irq_chip intc_irq_chip = {
204 .mask = intc_disable,
205 .unmask = intc_enable,
206 .mask_ack = intc_mask_ack,
207 .enable = intc_enable,
208 .disable = intc_disable,
209 .shutdown = intc_disable,
210 .set_type = intc_set_type,
211 .set_wake = intc_set_wake,
212#ifdef CONFIG_SMP
213 .set_affinity = intc_set_affinity,
214#endif
215};
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
new file mode 100644
index 000000000000..306ed287077a
--- /dev/null
+++ b/drivers/sh/intc/core.c
@@ -0,0 +1,469 @@
1/*
2 * Shared interrupt handling code for IPR and INTC2 types of IRQs.
3 *
4 * Copyright (C) 2007, 2008 Magnus Damm
5 * Copyright (C) 2009, 2010 Paul Mundt
6 *
7 * Based on intc2.c and ipr.c
8 *
9 * Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi
10 * Copyright (C) 2000 Kazumoto Kojima
11 * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
12 * Copyright (C) 2003 Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp>
13 * Copyright (C) 2005, 2006 Paul Mundt
14 *
15 * This file is subject to the terms and conditions of the GNU General Public
16 * License. See the file "COPYING" in the main directory of this archive
17 * for more details.
18 */
19#define pr_fmt(fmt) "intc: " fmt
20
21#include <linux/init.h>
22#include <linux/irq.h>
23#include <linux/io.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/sh_intc.h>
27#include <linux/sysdev.h>
28#include <linux/list.h>
29#include <linux/spinlock.h>
30#include <linux/radix-tree.h>
31#include "internals.h"
32
33LIST_HEAD(intc_list);
34DEFINE_RAW_SPINLOCK(intc_big_lock);
35unsigned int nr_intc_controllers;
36
37/*
38 * Default priority level
39 * - this needs to be at least 2 for 5-bit priorities on 7780
40 */
41static unsigned int default_prio_level = 2; /* 2 - 16 */
42static unsigned int intc_prio_level[NR_IRQS]; /* for now */
43
44unsigned int intc_get_dfl_prio_level(void)
45{
46 return default_prio_level;
47}
48
49unsigned int intc_get_prio_level(unsigned int irq)
50{
51 return intc_prio_level[irq];
52}
53
54void intc_set_prio_level(unsigned int irq, unsigned int level)
55{
56 unsigned long flags;
57
58 raw_spin_lock_irqsave(&intc_big_lock, flags);
59 intc_prio_level[irq] = level;
60 raw_spin_unlock_irqrestore(&intc_big_lock, flags);
61}
62
63static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc)
64{
65 generic_handle_irq((unsigned int)get_irq_data(irq));
66}
67
68static void __init intc_register_irq(struct intc_desc *desc,
69 struct intc_desc_int *d,
70 intc_enum enum_id,
71 unsigned int irq)
72{
73 struct intc_handle_int *hp;
74 unsigned int data[2], primary;
75 unsigned long flags;
76
77 /*
78 * Register the IRQ position with the global IRQ map, then insert
79 * it in to the radix tree.
80 */
81 reserve_irq_vector(irq);
82
83 raw_spin_lock_irqsave(&intc_big_lock, flags);
84 radix_tree_insert(&d->tree, enum_id, intc_irq_xlate_get(irq));
85 raw_spin_unlock_irqrestore(&intc_big_lock, flags);
86
87 /*
88 * Prefer single interrupt source bitmap over other combinations:
89 *
90 * 1. bitmap, single interrupt source
91 * 2. priority, single interrupt source
92 * 3. bitmap, multiple interrupt sources (groups)
93 * 4. priority, multiple interrupt sources (groups)
94 */
95 data[0] = intc_get_mask_handle(desc, d, enum_id, 0);
96 data[1] = intc_get_prio_handle(desc, d, enum_id, 0);
97
98 primary = 0;
99 if (!data[0] && data[1])
100 primary = 1;
101
102 if (!data[0] && !data[1])
103 pr_warning("missing unique irq mask for irq %d (vect 0x%04x)\n",
104 irq, irq2evt(irq));
105
106 data[0] = data[0] ? data[0] : intc_get_mask_handle(desc, d, enum_id, 1);
107 data[1] = data[1] ? data[1] : intc_get_prio_handle(desc, d, enum_id, 1);
108
109 if (!data[primary])
110 primary ^= 1;
111
112 BUG_ON(!data[primary]); /* must have primary masking method */
113
114 disable_irq_nosync(irq);
115 set_irq_chip_and_handler_name(irq, &d->chip,
116 handle_level_irq, "level");
117 set_irq_chip_data(irq, (void *)data[primary]);
118
119 /*
120 * set priority level
121 */
122 intc_set_prio_level(irq, intc_get_dfl_prio_level());
123
124 /* enable secondary masking method if present */
125 if (data[!primary])
126 _intc_enable(irq, data[!primary]);
127
128 /* add irq to d->prio list if priority is available */
129 if (data[1]) {
130 hp = d->prio + d->nr_prio;
131 hp->irq = irq;
132 hp->handle = data[1];
133
134 if (primary) {
135 /*
136 * only secondary priority should access registers, so
137 * set _INTC_FN(h) = REG_FN_ERR for intc_set_priority()
138 */
139 hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0);
140 hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0);
141 }
142 d->nr_prio++;
143 }
144
145 /* add irq to d->sense list if sense is available */
146 data[0] = intc_get_sense_handle(desc, d, enum_id);
147 if (data[0]) {
148 (d->sense + d->nr_sense)->irq = irq;
149 (d->sense + d->nr_sense)->handle = data[0];
150 d->nr_sense++;
151 }
152
153 /* irq should be disabled by default */
154 d->chip.mask(irq);
155
156 intc_set_ack_handle(irq, desc, d, enum_id);
157 intc_set_dist_handle(irq, desc, d, enum_id);
158
159 activate_irq(irq);
160}
161
162static unsigned int __init save_reg(struct intc_desc_int *d,
163 unsigned int cnt,
164 unsigned long value,
165 unsigned int smp)
166{
167 if (value) {
168 value = intc_phys_to_virt(d, value);
169
170 d->reg[cnt] = value;
171#ifdef CONFIG_SMP
172 d->smp[cnt] = smp;
173#endif
174 return 1;
175 }
176
177 return 0;
178}
179
180int __init register_intc_controller(struct intc_desc *desc)
181{
182 unsigned int i, k, smp;
183 struct intc_hw_desc *hw = &desc->hw;
184 struct intc_desc_int *d;
185 struct resource *res;
186
187 pr_info("Registered controller '%s' with %u IRQs\n",
188 desc->name, hw->nr_vectors);
189
190 d = kzalloc(sizeof(*d), GFP_NOWAIT);
191 if (!d)
192 goto err0;
193
194 INIT_LIST_HEAD(&d->list);
195 list_add_tail(&d->list, &intc_list);
196
197 raw_spin_lock_init(&d->lock);
198
199 d->index = nr_intc_controllers;
200
201 if (desc->num_resources) {
202 d->nr_windows = desc->num_resources;
203 d->window = kzalloc(d->nr_windows * sizeof(*d->window),
204 GFP_NOWAIT);
205 if (!d->window)
206 goto err1;
207
208 for (k = 0; k < d->nr_windows; k++) {
209 res = desc->resource + k;
210 WARN_ON(resource_type(res) != IORESOURCE_MEM);
211 d->window[k].phys = res->start;
212 d->window[k].size = resource_size(res);
213 d->window[k].virt = ioremap_nocache(res->start,
214 resource_size(res));
215 if (!d->window[k].virt)
216 goto err2;
217 }
218 }
219
220 d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0;
221#ifdef CONFIG_INTC_BALANCING
222 if (d->nr_reg)
223 d->nr_reg += hw->nr_mask_regs;
224#endif
225 d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0;
226 d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0;
227 d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0;
228 d->nr_reg += hw->subgroups ? hw->nr_subgroups : 0;
229
230 d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
231 if (!d->reg)
232 goto err2;
233
234#ifdef CONFIG_SMP
235 d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT);
236 if (!d->smp)
237 goto err3;
238#endif
239 k = 0;
240
241 if (hw->mask_regs) {
242 for (i = 0; i < hw->nr_mask_regs; i++) {
243 smp = IS_SMP(hw->mask_regs[i]);
244 k += save_reg(d, k, hw->mask_regs[i].set_reg, smp);
245 k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp);
246#ifdef CONFIG_INTC_BALANCING
247 k += save_reg(d, k, hw->mask_regs[i].dist_reg, 0);
248#endif
249 }
250 }
251
252 if (hw->prio_regs) {
253 d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio),
254 GFP_NOWAIT);
255 if (!d->prio)
256 goto err4;
257
258 for (i = 0; i < hw->nr_prio_regs; i++) {
259 smp = IS_SMP(hw->prio_regs[i]);
260 k += save_reg(d, k, hw->prio_regs[i].set_reg, smp);
261 k += save_reg(d, k, hw->prio_regs[i].clr_reg, smp);
262 }
263 }
264
265 if (hw->sense_regs) {
266 d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense),
267 GFP_NOWAIT);
268 if (!d->sense)
269 goto err5;
270
271 for (i = 0; i < hw->nr_sense_regs; i++)
272 k += save_reg(d, k, hw->sense_regs[i].reg, 0);
273 }
274
275 if (hw->subgroups)
276 for (i = 0; i < hw->nr_subgroups; i++)
277 if (hw->subgroups[i].reg)
278 k+= save_reg(d, k, hw->subgroups[i].reg, 0);
279
280 memcpy(&d->chip, &intc_irq_chip, sizeof(struct irq_chip));
281 d->chip.name = desc->name;
282
283 if (hw->ack_regs)
284 for (i = 0; i < hw->nr_ack_regs; i++)
285 k += save_reg(d, k, hw->ack_regs[i].set_reg, 0);
286 else
287 d->chip.mask_ack = d->chip.disable;
288
289 /* disable bits matching force_disable before registering irqs */
290 if (desc->force_disable)
291 intc_enable_disable_enum(desc, d, desc->force_disable, 0);
292
293 /* disable bits matching force_enable before registering irqs */
294 if (desc->force_enable)
295 intc_enable_disable_enum(desc, d, desc->force_enable, 0);
296
297 BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */
298
299 /* register the vectors one by one */
300 for (i = 0; i < hw->nr_vectors; i++) {
301 struct intc_vect *vect = hw->vectors + i;
302 unsigned int irq = evt2irq(vect->vect);
303 struct irq_desc *irq_desc;
304
305 if (!vect->enum_id)
306 continue;
307
308 irq_desc = irq_to_desc_alloc_node(irq, numa_node_id());
309 if (unlikely(!irq_desc)) {
310 pr_err("can't get irq_desc for %d\n", irq);
311 continue;
312 }
313
314 intc_irq_xlate_set(irq, vect->enum_id, d);
315 intc_register_irq(desc, d, vect->enum_id, irq);
316
317 for (k = i + 1; k < hw->nr_vectors; k++) {
318 struct intc_vect *vect2 = hw->vectors + k;
319 unsigned int irq2 = evt2irq(vect2->vect);
320
321 if (vect->enum_id != vect2->enum_id)
322 continue;
323
324 /*
325 * In the case of multi-evt handling and sparse
326 * IRQ support, each vector still needs to have
327 * its own backing irq_desc.
328 */
329 irq_desc = irq_to_desc_alloc_node(irq2, numa_node_id());
330 if (unlikely(!irq_desc)) {
331 pr_err("can't get irq_desc for %d\n", irq2);
332 continue;
333 }
334
335 vect2->enum_id = 0;
336
337 /* redirect this interrupts to the first one */
338 set_irq_chip(irq2, &dummy_irq_chip);
339 set_irq_chained_handler(irq2, intc_redirect_irq);
340 set_irq_data(irq2, (void *)irq);
341 }
342 }
343
344 intc_subgroup_init(desc, d);
345
346 /* enable bits matching force_enable after registering irqs */
347 if (desc->force_enable)
348 intc_enable_disable_enum(desc, d, desc->force_enable, 1);
349
350 nr_intc_controllers++;
351
352 return 0;
353err5:
354 kfree(d->prio);
355err4:
356#ifdef CONFIG_SMP
357 kfree(d->smp);
358err3:
359#endif
360 kfree(d->reg);
361err2:
362 for (k = 0; k < d->nr_windows; k++)
363 if (d->window[k].virt)
364 iounmap(d->window[k].virt);
365
366 kfree(d->window);
367err1:
368 kfree(d);
369err0:
370 pr_err("unable to allocate INTC memory\n");
371
372 return -ENOMEM;
373}
374
375static ssize_t
376show_intc_name(struct sys_device *dev, struct sysdev_attribute *attr, char *buf)
377{
378 struct intc_desc_int *d;
379
380 d = container_of(dev, struct intc_desc_int, sysdev);
381
382 return sprintf(buf, "%s\n", d->chip.name);
383}
384
385static SYSDEV_ATTR(name, S_IRUGO, show_intc_name, NULL);
386
387static int intc_suspend(struct sys_device *dev, pm_message_t state)
388{
389 struct intc_desc_int *d;
390 struct irq_desc *desc;
391 int irq;
392
393 /* get intc controller associated with this sysdev */
394 d = container_of(dev, struct intc_desc_int, sysdev);
395
396 switch (state.event) {
397 case PM_EVENT_ON:
398 if (d->state.event != PM_EVENT_FREEZE)
399 break;
400
401 for_each_irq_desc(irq, desc) {
402 /*
403 * This will catch the redirect and VIRQ cases
404 * due to the dummy_irq_chip being inserted.
405 */
406 if (desc->chip != &d->chip)
407 continue;
408 if (desc->status & IRQ_DISABLED)
409 desc->chip->disable(irq);
410 else
411 desc->chip->enable(irq);
412 }
413 break;
414 case PM_EVENT_FREEZE:
415 /* nothing has to be done */
416 break;
417 case PM_EVENT_SUSPEND:
418 /* enable wakeup irqs belonging to this intc controller */
419 for_each_irq_desc(irq, desc) {
420 if (desc->chip != &d->chip)
421 continue;
422 if ((desc->status & IRQ_WAKEUP))
423 desc->chip->enable(irq);
424 }
425 break;
426 }
427
428 d->state = state;
429
430 return 0;
431}
432
433static int intc_resume(struct sys_device *dev)
434{
435 return intc_suspend(dev, PMSG_ON);
436}
437
438struct sysdev_class intc_sysdev_class = {
439 .name = "intc",
440 .suspend = intc_suspend,
441 .resume = intc_resume,
442};
443
444/* register this intc as sysdev to allow suspend/resume */
445static int __init register_intc_sysdevs(void)
446{
447 struct intc_desc_int *d;
448 int error;
449
450 error = sysdev_class_register(&intc_sysdev_class);
451 if (!error) {
452 list_for_each_entry(d, &intc_list, list) {
453 d->sysdev.id = d->index;
454 d->sysdev.cls = &intc_sysdev_class;
455 error = sysdev_register(&d->sysdev);
456 if (error == 0)
457 error = sysdev_create_file(&d->sysdev,
458 &attr_name);
459 if (error)
460 break;
461 }
462 }
463
464 if (error)
465 pr_err("sysdev registration error\n");
466
467 return error;
468}
469device_initcall(register_intc_sysdevs);
diff --git a/drivers/sh/intc/dynamic.c b/drivers/sh/intc/dynamic.c
new file mode 100644
index 000000000000..6caecdffe201
--- /dev/null
+++ b/drivers/sh/intc/dynamic.c
@@ -0,0 +1,135 @@
1/*
2 * Dynamic IRQ management
3 *
4 * Copyright (C) 2010 Paul Mundt
5 *
6 * Modelled after arch/x86/kernel/apic/io_apic.c
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#define pr_fmt(fmt) "intc: " fmt
13
14#include <linux/irq.h>
15#include <linux/bitmap.h>
16#include <linux/spinlock.h>
17#include "internals.h" /* only for activate_irq() damage.. */
18
19/*
20 * The intc_irq_map provides a global map of bound IRQ vectors for a
21 * given platform. Allocation of IRQs are either static through the CPU
22 * vector map, or dynamic in the case of board mux vectors or MSI.
23 *
24 * As this is a central point for all IRQ controllers on the system,
25 * each of the available sources are mapped out here. This combined with
26 * sparseirq makes it quite trivial to keep the vector map tightly packed
27 * when dynamically creating IRQs, as well as tying in to otherwise
28 * unused irq_desc positions in the sparse array.
29 */
30static DECLARE_BITMAP(intc_irq_map, NR_IRQS);
31static DEFINE_RAW_SPINLOCK(vector_lock);
32
33/*
34 * Dynamic IRQ allocation and deallocation
35 */
36unsigned int create_irq_nr(unsigned int irq_want, int node)
37{
38 unsigned int irq = 0, new;
39 unsigned long flags;
40 struct irq_desc *desc;
41
42 raw_spin_lock_irqsave(&vector_lock, flags);
43
44 /*
45 * First try the wanted IRQ
46 */
47 if (test_and_set_bit(irq_want, intc_irq_map) == 0) {
48 new = irq_want;
49 } else {
50 /* .. then fall back to scanning. */
51 new = find_first_zero_bit(intc_irq_map, nr_irqs);
52 if (unlikely(new == nr_irqs))
53 goto out_unlock;
54
55 __set_bit(new, intc_irq_map);
56 }
57
58 desc = irq_to_desc_alloc_node(new, node);
59 if (unlikely(!desc)) {
60 pr_err("can't get irq_desc for %d\n", new);
61 goto out_unlock;
62 }
63
64 desc = move_irq_desc(desc, node);
65 irq = new;
66
67out_unlock:
68 raw_spin_unlock_irqrestore(&vector_lock, flags);
69
70 if (irq > 0) {
71 dynamic_irq_init(irq);
72 activate_irq(irq);
73 }
74
75 return irq;
76}
77
78int create_irq(void)
79{
80 int nid = cpu_to_node(smp_processor_id());
81 int irq;
82
83 irq = create_irq_nr(NR_IRQS_LEGACY, nid);
84 if (irq == 0)
85 irq = -1;
86
87 return irq;
88}
89
90void destroy_irq(unsigned int irq)
91{
92 unsigned long flags;
93
94 dynamic_irq_cleanup(irq);
95
96 raw_spin_lock_irqsave(&vector_lock, flags);
97 __clear_bit(irq, intc_irq_map);
98 raw_spin_unlock_irqrestore(&vector_lock, flags);
99}
100
101int reserve_irq_vector(unsigned int irq)
102{
103 unsigned long flags;
104 int ret = 0;
105
106 raw_spin_lock_irqsave(&vector_lock, flags);
107 if (test_and_set_bit(irq, intc_irq_map))
108 ret = -EBUSY;
109 raw_spin_unlock_irqrestore(&vector_lock, flags);
110
111 return ret;
112}
113
114void reserve_intc_vectors(struct intc_vect *vectors, unsigned int nr_vecs)
115{
116 unsigned long flags;
117 int i;
118
119 raw_spin_lock_irqsave(&vector_lock, flags);
120 for (i = 0; i < nr_vecs; i++)
121 __set_bit(evt2irq(vectors[i].vect), intc_irq_map);
122 raw_spin_unlock_irqrestore(&vector_lock, flags);
123}
124
125void reserve_irq_legacy(void)
126{
127 unsigned long flags;
128 int i, j;
129
130 raw_spin_lock_irqsave(&vector_lock, flags);
131 j = find_first_bit(intc_irq_map, nr_irqs);
132 for (i = 0; i < j; i++)
133 __set_bit(i, intc_irq_map);
134 raw_spin_unlock_irqrestore(&vector_lock, flags);
135}
diff --git a/drivers/sh/intc/handle.c b/drivers/sh/intc/handle.c
new file mode 100644
index 000000000000..057ce56829bf
--- /dev/null
+++ b/drivers/sh/intc/handle.c
@@ -0,0 +1,307 @@
1/*
2 * Shared interrupt handling code for IPR and INTC2 types of IRQs.
3 *
4 * Copyright (C) 2007, 2008 Magnus Damm
5 * Copyright (C) 2009, 2010 Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <linux/init.h>
12#include <linux/irq.h>
13#include <linux/spinlock.h>
14#include "internals.h"
15
16static unsigned long ack_handle[NR_IRQS];
17
18static intc_enum __init intc_grp_id(struct intc_desc *desc,
19 intc_enum enum_id)
20{
21 struct intc_group *g = desc->hw.groups;
22 unsigned int i, j;
23
24 for (i = 0; g && enum_id && i < desc->hw.nr_groups; i++) {
25 g = desc->hw.groups + i;
26
27 for (j = 0; g->enum_ids[j]; j++) {
28 if (g->enum_ids[j] != enum_id)
29 continue;
30
31 return g->enum_id;
32 }
33 }
34
35 return 0;
36}
37
38static unsigned int __init _intc_mask_data(struct intc_desc *desc,
39 struct intc_desc_int *d,
40 intc_enum enum_id,
41 unsigned int *reg_idx,
42 unsigned int *fld_idx)
43{
44 struct intc_mask_reg *mr = desc->hw.mask_regs;
45 unsigned int fn, mode;
46 unsigned long reg_e, reg_d;
47
48 while (mr && enum_id && *reg_idx < desc->hw.nr_mask_regs) {
49 mr = desc->hw.mask_regs + *reg_idx;
50
51 for (; *fld_idx < ARRAY_SIZE(mr->enum_ids); (*fld_idx)++) {
52 if (mr->enum_ids[*fld_idx] != enum_id)
53 continue;
54
55 if (mr->set_reg && mr->clr_reg) {
56 fn = REG_FN_WRITE_BASE;
57 mode = MODE_DUAL_REG;
58 reg_e = mr->clr_reg;
59 reg_d = mr->set_reg;
60 } else {
61 fn = REG_FN_MODIFY_BASE;
62 if (mr->set_reg) {
63 mode = MODE_ENABLE_REG;
64 reg_e = mr->set_reg;
65 reg_d = mr->set_reg;
66 } else {
67 mode = MODE_MASK_REG;
68 reg_e = mr->clr_reg;
69 reg_d = mr->clr_reg;
70 }
71 }
72
73 fn += (mr->reg_width >> 3) - 1;
74 return _INTC_MK(fn, mode,
75 intc_get_reg(d, reg_e),
76 intc_get_reg(d, reg_d),
77 1,
78 (mr->reg_width - 1) - *fld_idx);
79 }
80
81 *fld_idx = 0;
82 (*reg_idx)++;
83 }
84
85 return 0;
86}
87
88unsigned int __init
89intc_get_mask_handle(struct intc_desc *desc, struct intc_desc_int *d,
90 intc_enum enum_id, int do_grps)
91{
92 unsigned int i = 0;
93 unsigned int j = 0;
94 unsigned int ret;
95
96 ret = _intc_mask_data(desc, d, enum_id, &i, &j);
97 if (ret)
98 return ret;
99
100 if (do_grps)
101 return intc_get_mask_handle(desc, d, intc_grp_id(desc, enum_id), 0);
102
103 return 0;
104}
105
106static unsigned int __init _intc_prio_data(struct intc_desc *desc,
107 struct intc_desc_int *d,
108 intc_enum enum_id,
109 unsigned int *reg_idx,
110 unsigned int *fld_idx)
111{
112 struct intc_prio_reg *pr = desc->hw.prio_regs;
113 unsigned int fn, n, mode, bit;
114 unsigned long reg_e, reg_d;
115
116 while (pr && enum_id && *reg_idx < desc->hw.nr_prio_regs) {
117 pr = desc->hw.prio_regs + *reg_idx;
118
119 for (; *fld_idx < ARRAY_SIZE(pr->enum_ids); (*fld_idx)++) {
120 if (pr->enum_ids[*fld_idx] != enum_id)
121 continue;
122
123 if (pr->set_reg && pr->clr_reg) {
124 fn = REG_FN_WRITE_BASE;
125 mode = MODE_PCLR_REG;
126 reg_e = pr->set_reg;
127 reg_d = pr->clr_reg;
128 } else {
129 fn = REG_FN_MODIFY_BASE;
130 mode = MODE_PRIO_REG;
131 if (!pr->set_reg)
132 BUG();
133 reg_e = pr->set_reg;
134 reg_d = pr->set_reg;
135 }
136
137 fn += (pr->reg_width >> 3) - 1;
138 n = *fld_idx + 1;
139
140 BUG_ON(n * pr->field_width > pr->reg_width);
141
142 bit = pr->reg_width - (n * pr->field_width);
143
144 return _INTC_MK(fn, mode,
145 intc_get_reg(d, reg_e),
146 intc_get_reg(d, reg_d),
147 pr->field_width, bit);
148 }
149
150 *fld_idx = 0;
151 (*reg_idx)++;
152 }
153
154 return 0;
155}
156
157unsigned int __init
158intc_get_prio_handle(struct intc_desc *desc, struct intc_desc_int *d,
159 intc_enum enum_id, int do_grps)
160{
161 unsigned int i = 0;
162 unsigned int j = 0;
163 unsigned int ret;
164
165 ret = _intc_prio_data(desc, d, enum_id, &i, &j);
166 if (ret)
167 return ret;
168
169 if (do_grps)
170 return intc_get_prio_handle(desc, d, intc_grp_id(desc, enum_id), 0);
171
172 return 0;
173}
174
175static unsigned int __init intc_ack_data(struct intc_desc *desc,
176 struct intc_desc_int *d,
177 intc_enum enum_id)
178{
179 struct intc_mask_reg *mr = desc->hw.ack_regs;
180 unsigned int i, j, fn, mode;
181 unsigned long reg_e, reg_d;
182
183 for (i = 0; mr && enum_id && i < desc->hw.nr_ack_regs; i++) {
184 mr = desc->hw.ack_regs + i;
185
186 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
187 if (mr->enum_ids[j] != enum_id)
188 continue;
189
190 fn = REG_FN_MODIFY_BASE;
191 mode = MODE_ENABLE_REG;
192 reg_e = mr->set_reg;
193 reg_d = mr->set_reg;
194
195 fn += (mr->reg_width >> 3) - 1;
196 return _INTC_MK(fn, mode,
197 intc_get_reg(d, reg_e),
198 intc_get_reg(d, reg_d),
199 1,
200 (mr->reg_width - 1) - j);
201 }
202 }
203
204 return 0;
205}
206
207static void intc_enable_disable(struct intc_desc_int *d,
208 unsigned long handle, int do_enable)
209{
210 unsigned long addr;
211 unsigned int cpu;
212 unsigned long (*fn)(unsigned long, unsigned long,
213 unsigned long (*)(unsigned long, unsigned long,
214 unsigned long),
215 unsigned int);
216
217 if (do_enable) {
218 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
219 addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
220 fn = intc_enable_noprio_fns[_INTC_MODE(handle)];
221 fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
222 }
223 } else {
224 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
225 addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
226 fn = intc_disable_fns[_INTC_MODE(handle)];
227 fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
228 }
229 }
230}
231
232void __init intc_enable_disable_enum(struct intc_desc *desc,
233 struct intc_desc_int *d,
234 intc_enum enum_id, int enable)
235{
236 unsigned int i, j, data;
237
238 /* go through and enable/disable all mask bits */
239 i = j = 0;
240 do {
241 data = _intc_mask_data(desc, d, enum_id, &i, &j);
242 if (data)
243 intc_enable_disable(d, data, enable);
244 j++;
245 } while (data);
246
247 /* go through and enable/disable all priority fields */
248 i = j = 0;
249 do {
250 data = _intc_prio_data(desc, d, enum_id, &i, &j);
251 if (data)
252 intc_enable_disable(d, data, enable);
253
254 j++;
255 } while (data);
256}
257
258unsigned int __init
259intc_get_sense_handle(struct intc_desc *desc, struct intc_desc_int *d,
260 intc_enum enum_id)
261{
262 struct intc_sense_reg *sr = desc->hw.sense_regs;
263 unsigned int i, j, fn, bit;
264
265 for (i = 0; sr && enum_id && i < desc->hw.nr_sense_regs; i++) {
266 sr = desc->hw.sense_regs + i;
267
268 for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) {
269 if (sr->enum_ids[j] != enum_id)
270 continue;
271
272 fn = REG_FN_MODIFY_BASE;
273 fn += (sr->reg_width >> 3) - 1;
274
275 BUG_ON((j + 1) * sr->field_width > sr->reg_width);
276
277 bit = sr->reg_width - ((j + 1) * sr->field_width);
278
279 return _INTC_MK(fn, 0, intc_get_reg(d, sr->reg),
280 0, sr->field_width, bit);
281 }
282 }
283
284 return 0;
285}
286
287
288void intc_set_ack_handle(unsigned int irq, struct intc_desc *desc,
289 struct intc_desc_int *d, intc_enum id)
290{
291 unsigned long flags;
292
293 /*
294 * Nothing to do for this IRQ.
295 */
296 if (!desc->hw.ack_regs)
297 return;
298
299 raw_spin_lock_irqsave(&intc_big_lock, flags);
300 ack_handle[irq] = intc_ack_data(desc, d, id);
301 raw_spin_unlock_irqrestore(&intc_big_lock, flags);
302}
303
304unsigned long intc_get_ack_handle(unsigned int irq)
305{
306 return ack_handle[irq];
307}
diff --git a/drivers/sh/intc/internals.h b/drivers/sh/intc/internals.h
new file mode 100644
index 000000000000..f02a47f74930
--- /dev/null
+++ b/drivers/sh/intc/internals.h
@@ -0,0 +1,185 @@
1#include <linux/sh_intc.h>
2#include <linux/irq.h>
3#include <linux/list.h>
4#include <linux/kernel.h>
5#include <linux/types.h>
6#include <linux/radix-tree.h>
7#include <linux/sysdev.h>
8
9#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
10 ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
11 ((addr_e) << 16) | ((addr_d << 24)))
12
13#define _INTC_SHIFT(h) (h & 0x1f)
14#define _INTC_WIDTH(h) ((h >> 5) & 0xf)
15#define _INTC_FN(h) ((h >> 9) & 0xf)
16#define _INTC_MODE(h) ((h >> 13) & 0x7)
17#define _INTC_ADDR_E(h) ((h >> 16) & 0xff)
18#define _INTC_ADDR_D(h) ((h >> 24) & 0xff)
19
20#ifdef CONFIG_SMP
21#define IS_SMP(x) (x.smp)
22#define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c))
23#define SMP_NR(d, x) ((d->smp[(x)] >> 8) ? (d->smp[(x)] >> 8) : 1)
24#else
25#define IS_SMP(x) 0
26#define INTC_REG(d, x, c) (d->reg[(x)])
27#define SMP_NR(d, x) 1
28#endif
29
30struct intc_handle_int {
31 unsigned int irq;
32 unsigned long handle;
33};
34
35struct intc_window {
36 phys_addr_t phys;
37 void __iomem *virt;
38 unsigned long size;
39};
40
41struct intc_map_entry {
42 intc_enum enum_id;
43 struct intc_desc_int *desc;
44};
45
46struct intc_subgroup_entry {
47 unsigned int pirq;
48 intc_enum enum_id;
49 unsigned long handle;
50};
51
52struct intc_desc_int {
53 struct list_head list;
54 struct sys_device sysdev;
55 struct radix_tree_root tree;
56 pm_message_t state;
57 raw_spinlock_t lock;
58 unsigned int index;
59 unsigned long *reg;
60#ifdef CONFIG_SMP
61 unsigned long *smp;
62#endif
63 unsigned int nr_reg;
64 struct intc_handle_int *prio;
65 unsigned int nr_prio;
66 struct intc_handle_int *sense;
67 unsigned int nr_sense;
68 struct intc_window *window;
69 unsigned int nr_windows;
70 struct irq_chip chip;
71};
72
73
74enum {
75 REG_FN_ERR = 0,
76 REG_FN_TEST_BASE = 1,
77 REG_FN_WRITE_BASE = 5,
78 REG_FN_MODIFY_BASE = 9
79};
80
81enum { MODE_ENABLE_REG = 0, /* Bit(s) set -> interrupt enabled */
82 MODE_MASK_REG, /* Bit(s) set -> interrupt disabled */
83 MODE_DUAL_REG, /* Two registers, set bit to enable / disable */
84 MODE_PRIO_REG, /* Priority value written to enable interrupt */
85 MODE_PCLR_REG, /* Above plus all bits set to disable interrupt */
86};
87
88static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
89{
90 struct irq_chip *chip = get_irq_chip(irq);
91
92 return container_of(chip, struct intc_desc_int, chip);
93}
94
95/*
96 * Grumble.
97 */
98static inline void activate_irq(int irq)
99{
100#ifdef CONFIG_ARM
101 /* ARM requires an extra step to clear IRQ_NOREQUEST, which it
102 * sets on behalf of every irq_chip. Also sets IRQ_NOPROBE.
103 */
104 set_irq_flags(irq, IRQF_VALID);
105#else
106 /* same effect on other architectures */
107 set_irq_noprobe(irq);
108#endif
109}
110
111/* access.c */
112extern unsigned long
113(*intc_reg_fns[])(unsigned long addr, unsigned long h, unsigned long data);
114
115extern unsigned long
116(*intc_enable_fns[])(unsigned long addr, unsigned long handle,
117 unsigned long (*fn)(unsigned long,
118 unsigned long, unsigned long),
119 unsigned int irq);
120extern unsigned long
121(*intc_disable_fns[])(unsigned long addr, unsigned long handle,
122 unsigned long (*fn)(unsigned long,
123 unsigned long, unsigned long),
124 unsigned int irq);
125extern unsigned long
126(*intc_enable_noprio_fns[])(unsigned long addr, unsigned long handle,
127 unsigned long (*fn)(unsigned long,
128 unsigned long, unsigned long),
129 unsigned int irq);
130
131unsigned long intc_phys_to_virt(struct intc_desc_int *d, unsigned long address);
132unsigned int intc_get_reg(struct intc_desc_int *d, unsigned long address);
133unsigned int intc_set_field_from_handle(unsigned int value,
134 unsigned int field_value,
135 unsigned int handle);
136unsigned long intc_get_field_from_handle(unsigned int value,
137 unsigned int handle);
138
139/* balancing.c */
140#ifdef CONFIG_INTC_BALANCING
141void intc_balancing_enable(unsigned int irq);
142void intc_balancing_disable(unsigned int irq);
143void intc_set_dist_handle(unsigned int irq, struct intc_desc *desc,
144 struct intc_desc_int *d, intc_enum id);
145#else
146void intc_balancing_enable(unsigned int irq) { }
147void intc_balancing_disable(unsigned int irq) { }
148void intc_set_dist_handle(unsigned int irq, struct intc_desc *desc,
149 struct intc_desc_int *d, intc_enum id) { }
150#endif
151
152/* chip.c */
153extern struct irq_chip intc_irq_chip;
154void _intc_enable(unsigned int irq, unsigned long handle);
155
156/* core.c */
157extern struct list_head intc_list;
158extern raw_spinlock_t intc_big_lock;
159extern unsigned int nr_intc_controllers;
160extern struct sysdev_class intc_sysdev_class;
161
162unsigned int intc_get_dfl_prio_level(void);
163unsigned int intc_get_prio_level(unsigned int irq);
164void intc_set_prio_level(unsigned int irq, unsigned int level);
165
166/* handle.c */
167unsigned int intc_get_mask_handle(struct intc_desc *desc,
168 struct intc_desc_int *d,
169 intc_enum enum_id, int do_grps);
170unsigned int intc_get_prio_handle(struct intc_desc *desc,
171 struct intc_desc_int *d,
172 intc_enum enum_id, int do_grps);
173unsigned int intc_get_sense_handle(struct intc_desc *desc,
174 struct intc_desc_int *d,
175 intc_enum enum_id);
176void intc_set_ack_handle(unsigned int irq, struct intc_desc *desc,
177 struct intc_desc_int *d, intc_enum id);
178unsigned long intc_get_ack_handle(unsigned int irq);
179void intc_enable_disable_enum(struct intc_desc *desc, struct intc_desc_int *d,
180 intc_enum enum_id, int enable);
181
182/* virq.c */
183void intc_subgroup_init(struct intc_desc *desc, struct intc_desc_int *d);
184void intc_irq_xlate_set(unsigned int irq, intc_enum id, struct intc_desc_int *d);
185struct intc_map_entry *intc_irq_xlate_get(unsigned int irq);
diff --git a/drivers/sh/intc/userimask.c b/drivers/sh/intc/userimask.c
new file mode 100644
index 000000000000..e32304b66cf1
--- /dev/null
+++ b/drivers/sh/intc/userimask.c
@@ -0,0 +1,83 @@
1/*
2 * Support for hardware-assisted userspace interrupt masking.
3 *
4 * Copyright (C) 2010 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#define pr_fmt(fmt) "intc: " fmt
11
12#include <linux/errno.h>
13#include <linux/sysdev.h>
14#include <linux/init.h>
15#include <linux/io.h>
16#include <asm/sizes.h>
17#include "internals.h"
18
19static void __iomem *uimask;
20
21static ssize_t
22show_intc_userimask(struct sysdev_class *cls,
23 struct sysdev_class_attribute *attr, char *buf)
24{
25 return sprintf(buf, "%d\n", (__raw_readl(uimask) >> 4) & 0xf);
26}
27
28static ssize_t
29store_intc_userimask(struct sysdev_class *cls,
30 struct sysdev_class_attribute *attr,
31 const char *buf, size_t count)
32{
33 unsigned long level;
34
35 level = simple_strtoul(buf, NULL, 10);
36
37 /*
38 * Minimal acceptable IRQ levels are in the 2 - 16 range, but
39 * these are chomped so as to not interfere with normal IRQs.
40 *
41 * Level 1 is a special case on some CPUs in that it's not
42 * directly settable, but given that USERIMASK cuts off below a
43 * certain level, we don't care about this limitation here.
44 * Level 0 on the other hand equates to user masking disabled.
45 *
46 * We use the default priority level as a cut off so that only
47 * special case opt-in IRQs can be mangled.
48 */
49 if (level >= intc_get_dfl_prio_level())
50 return -EINVAL;
51
52 __raw_writel(0xa5 << 24 | level << 4, uimask);
53
54 return count;
55}
56
57static SYSDEV_CLASS_ATTR(userimask, S_IRUSR | S_IWUSR,
58 show_intc_userimask, store_intc_userimask);
59
60
61static int __init userimask_sysdev_init(void)
62{
63 if (unlikely(!uimask))
64 return -ENXIO;
65
66 return sysdev_class_create_file(&intc_sysdev_class, &attr_userimask);
67}
68late_initcall(userimask_sysdev_init);
69
70int register_intc_userimask(unsigned long addr)
71{
72 if (unlikely(uimask))
73 return -EBUSY;
74
75 uimask = ioremap_nocache(addr, SZ_4K);
76 if (unlikely(!uimask))
77 return -ENOMEM;
78
79 pr_info("userimask support registered for levels 0 -> %d\n",
80 intc_get_dfl_prio_level() - 1);
81
82 return 0;
83}
diff --git a/drivers/sh/intc/virq-debugfs.c b/drivers/sh/intc/virq-debugfs.c
new file mode 100644
index 000000000000..9e62ba9311f0
--- /dev/null
+++ b/drivers/sh/intc/virq-debugfs.c
@@ -0,0 +1,64 @@
1/*
2 * Support for virtual IRQ subgroups debugfs mapping.
3 *
4 * Copyright (C) 2010 Paul Mundt
5 *
6 * Modelled after arch/powerpc/kernel/irq.c.
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/seq_file.h>
13#include <linux/fs.h>
14#include <linux/init.h>
15#include <linux/irq.h>
16#include <linux/debugfs.h>
17#include "internals.h"
18
19static int intc_irq_xlate_debug(struct seq_file *m, void *priv)
20{
21 int i;
22
23 seq_printf(m, "%-5s %-7s %-15s\n", "irq", "enum", "chip name");
24
25 for (i = 1; i < nr_irqs; i++) {
26 struct intc_map_entry *entry = intc_irq_xlate_get(i);
27 struct intc_desc_int *desc = entry->desc;
28
29 if (!desc)
30 continue;
31
32 seq_printf(m, "%5d ", i);
33 seq_printf(m, "0x%05x ", entry->enum_id);
34 seq_printf(m, "%-15s\n", desc->chip.name);
35 }
36
37 return 0;
38}
39
40static int intc_irq_xlate_open(struct inode *inode, struct file *file)
41{
42 return single_open(file, intc_irq_xlate_debug, inode->i_private);
43}
44
45static const struct file_operations intc_irq_xlate_fops = {
46 .open = intc_irq_xlate_open,
47 .read = seq_read,
48 .llseek = seq_lseek,
49 .release = single_release,
50};
51
52static int __init intc_irq_xlate_init(void)
53{
54 /*
55 * XXX.. use arch_debugfs_dir here when all of the intc users are
56 * converted.
57 */
58 if (debugfs_create_file("intc_irq_xlate", S_IRUGO, NULL, NULL,
59 &intc_irq_xlate_fops) == NULL)
60 return -ENOMEM;
61
62 return 0;
63}
64fs_initcall(intc_irq_xlate_init);
diff --git a/drivers/sh/intc/virq.c b/drivers/sh/intc/virq.c
new file mode 100644
index 000000000000..643dfd4d2057
--- /dev/null
+++ b/drivers/sh/intc/virq.c
@@ -0,0 +1,255 @@
1/*
2 * Support for virtual IRQ subgroups.
3 *
4 * Copyright (C) 2010 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#define pr_fmt(fmt) "intc: " fmt
11
12#include <linux/slab.h>
13#include <linux/irq.h>
14#include <linux/list.h>
15#include <linux/radix-tree.h>
16#include <linux/spinlock.h>
17#include "internals.h"
18
19static struct intc_map_entry intc_irq_xlate[NR_IRQS];
20
21struct intc_virq_list {
22 unsigned int irq;
23 struct intc_virq_list *next;
24};
25
26#define for_each_virq(entry, head) \
27 for (entry = head; entry; entry = entry->next)
28
29/*
30 * Tags for the radix tree
31 */
32#define INTC_TAG_VIRQ_NEEDS_ALLOC 0
33
34void intc_irq_xlate_set(unsigned int irq, intc_enum id, struct intc_desc_int *d)
35{
36 unsigned long flags;
37
38 raw_spin_lock_irqsave(&intc_big_lock, flags);
39 intc_irq_xlate[irq].enum_id = id;
40 intc_irq_xlate[irq].desc = d;
41 raw_spin_unlock_irqrestore(&intc_big_lock, flags);
42}
43
44struct intc_map_entry *intc_irq_xlate_get(unsigned int irq)
45{
46 return intc_irq_xlate + irq;
47}
48
49int intc_irq_lookup(const char *chipname, intc_enum enum_id)
50{
51 struct intc_map_entry *ptr;
52 struct intc_desc_int *d;
53 int irq = -1;
54
55 list_for_each_entry(d, &intc_list, list) {
56 int tagged;
57
58 if (strcmp(d->chip.name, chipname) != 0)
59 continue;
60
61 /*
62 * Catch early lookups for subgroup VIRQs that have not
63 * yet been allocated an IRQ. This already includes a
64 * fast-path out if the tree is untagged, so there is no
65 * need to explicitly test the root tree.
66 */
67 tagged = radix_tree_tag_get(&d->tree, enum_id,
68 INTC_TAG_VIRQ_NEEDS_ALLOC);
69 if (unlikely(tagged))
70 break;
71
72 ptr = radix_tree_lookup(&d->tree, enum_id);
73 if (ptr) {
74 irq = ptr - intc_irq_xlate;
75 break;
76 }
77 }
78
79 return irq;
80}
81EXPORT_SYMBOL_GPL(intc_irq_lookup);
82
83static int add_virq_to_pirq(unsigned int irq, unsigned int virq)
84{
85 struct intc_virq_list **last, *entry;
86 struct irq_desc *desc = irq_to_desc(irq);
87
88 /* scan for duplicates */
89 last = (struct intc_virq_list **)&desc->handler_data;
90 for_each_virq(entry, desc->handler_data) {
91 if (entry->irq == virq)
92 return 0;
93 last = &entry->next;
94 }
95
96 entry = kzalloc(sizeof(struct intc_virq_list), GFP_ATOMIC);
97 if (!entry) {
98 pr_err("can't allocate VIRQ mapping for %d\n", virq);
99 return -ENOMEM;
100 }
101
102 entry->irq = virq;
103
104 *last = entry;
105
106 return 0;
107}
108
109static void intc_virq_handler(unsigned int irq, struct irq_desc *desc)
110{
111 struct intc_virq_list *entry, *vlist = get_irq_data(irq);
112 struct intc_desc_int *d = get_intc_desc(irq);
113
114 desc->chip->mask_ack(irq);
115
116 for_each_virq(entry, vlist) {
117 unsigned long addr, handle;
118
119 handle = (unsigned long)get_irq_data(entry->irq);
120 addr = INTC_REG(d, _INTC_ADDR_E(handle), 0);
121
122 if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0))
123 generic_handle_irq(entry->irq);
124 }
125
126 desc->chip->unmask(irq);
127}
128
129static unsigned long __init intc_subgroup_data(struct intc_subgroup *subgroup,
130 struct intc_desc_int *d,
131 unsigned int index)
132{
133 unsigned int fn = REG_FN_TEST_BASE + (subgroup->reg_width >> 3) - 1;
134
135 return _INTC_MK(fn, MODE_ENABLE_REG, intc_get_reg(d, subgroup->reg),
136 0, 1, (subgroup->reg_width - 1) - index);
137}
138
139static void __init intc_subgroup_init_one(struct intc_desc *desc,
140 struct intc_desc_int *d,
141 struct intc_subgroup *subgroup)
142{
143 struct intc_map_entry *mapped;
144 unsigned int pirq;
145 unsigned long flags;
146 int i;
147
148 mapped = radix_tree_lookup(&d->tree, subgroup->parent_id);
149 if (!mapped) {
150 WARN_ON(1);
151 return;
152 }
153
154 pirq = mapped - intc_irq_xlate;
155
156 raw_spin_lock_irqsave(&d->lock, flags);
157
158 for (i = 0; i < ARRAY_SIZE(subgroup->enum_ids); i++) {
159 struct intc_subgroup_entry *entry;
160 int err;
161
162 if (!subgroup->enum_ids[i])
163 continue;
164
165 entry = kmalloc(sizeof(*entry), GFP_NOWAIT);
166 if (!entry)
167 break;
168
169 entry->pirq = pirq;
170 entry->enum_id = subgroup->enum_ids[i];
171 entry->handle = intc_subgroup_data(subgroup, d, i);
172
173 err = radix_tree_insert(&d->tree, entry->enum_id, entry);
174 if (unlikely(err < 0))
175 break;
176
177 radix_tree_tag_set(&d->tree, entry->enum_id,
178 INTC_TAG_VIRQ_NEEDS_ALLOC);
179 }
180
181 raw_spin_unlock_irqrestore(&d->lock, flags);
182}
183
184void __init intc_subgroup_init(struct intc_desc *desc, struct intc_desc_int *d)
185{
186 int i;
187
188 if (!desc->hw.subgroups)
189 return;
190
191 for (i = 0; i < desc->hw.nr_subgroups; i++)
192 intc_subgroup_init_one(desc, d, desc->hw.subgroups + i);
193}
194
195static void __init intc_subgroup_map(struct intc_desc_int *d)
196{
197 struct intc_subgroup_entry *entries[32];
198 unsigned long flags;
199 unsigned int nr_found;
200 int i;
201
202 raw_spin_lock_irqsave(&d->lock, flags);
203
204restart:
205 nr_found = radix_tree_gang_lookup_tag_slot(&d->tree,
206 (void ***)entries, 0, ARRAY_SIZE(entries),
207 INTC_TAG_VIRQ_NEEDS_ALLOC);
208
209 for (i = 0; i < nr_found; i++) {
210 struct intc_subgroup_entry *entry;
211 int irq;
212
213 entry = radix_tree_deref_slot((void **)entries[i]);
214 if (unlikely(!entry))
215 continue;
216 if (unlikely(entry == RADIX_TREE_RETRY))
217 goto restart;
218
219 irq = create_irq();
220 if (unlikely(irq < 0)) {
221 pr_err("no more free IRQs, bailing..\n");
222 break;
223 }
224
225 pr_info("Setting up a chained VIRQ from %d -> %d\n",
226 irq, entry->pirq);
227
228 intc_irq_xlate_set(irq, entry->enum_id, d);
229
230 set_irq_chip_and_handler_name(irq, get_irq_chip(entry->pirq),
231 handle_simple_irq, "virq");
232 set_irq_chip_data(irq, get_irq_chip_data(entry->pirq));
233
234 set_irq_data(irq, (void *)entry->handle);
235
236 set_irq_chained_handler(entry->pirq, intc_virq_handler);
237 add_virq_to_pirq(entry->pirq, irq);
238
239 radix_tree_tag_clear(&d->tree, entry->enum_id,
240 INTC_TAG_VIRQ_NEEDS_ALLOC);
241 radix_tree_replace_slot((void **)entries[i],
242 &intc_irq_xlate[irq]);
243 }
244
245 raw_spin_unlock_irqrestore(&d->lock, flags);
246}
247
248void __init intc_finalize(void)
249{
250 struct intc_desc_int *d;
251
252 list_for_each_entry(d, &intc_list, list)
253 if (radix_tree_tagged(&d->tree, INTC_TAG_VIRQ_NEEDS_ALLOC))
254 intc_subgroup_map(d);
255}