diff options
author | Paul Mundt <lethal@linux-sh.org> | 2010-10-04 15:47:03 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2010-10-04 15:47:03 -0400 |
commit | c1e30ad98fe210688edca872686db4a715c2fb23 (patch) | |
tree | 1ff151ccc9658d7a2d89da9cfc6a2d6817913a79 /drivers | |
parent | 44629f57accccbb8e6d443246fe6f51b42f7f781 (diff) |
sh: intc: Support virtual mappings for IRQ subgroups.
Many interrupts that share a single mask source but are on different
hardware vectors will have an associated register tied to an INTEVT that
denotes the precise cause for the interrupt exception being triggered.
This introduces the concept of IRQ subgroups in the intc core, where
a virtual IRQ map is constructed for each of the pre-defined cause bits,
and a higher level chained handler takes control of the parent INTEVT.
This enables CPUs with heavily muxed IRQ vectors (especially across
disjoint blocks) to break things out in to a series of managed chained
handlers while being able to dynamically lookup and adopt the IRQs
created for them.
This is largely an opt-in interface, requiring CPUs to manually submit
IRQs for subgroup splitting, in addition to providing identifiers in
their enum maps that can be used for lazy lookup via the radix tree.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/sh/intc.c | 404 |
1 files changed, 338 insertions, 66 deletions
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c index a27dcb4254c7..c81fe23db7f7 100644 --- a/drivers/sh/intc.c +++ b/drivers/sh/intc.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/seq_file.h> | 35 | #include <linux/seq_file.h> |
36 | #include <linux/radix-tree.h> | 36 | #include <linux/radix-tree.h> |
37 | #include <linux/mutex.h> | 37 | #include <linux/mutex.h> |
38 | #include <linux/rcupdate.h> | ||
38 | #include <asm/sizes.h> | 39 | #include <asm/sizes.h> |
39 | 40 | ||
40 | #define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \ | 41 | #define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \ |
@@ -64,11 +65,19 @@ struct intc_map_entry { | |||
64 | struct intc_desc_int *desc; | 65 | struct intc_desc_int *desc; |
65 | }; | 66 | }; |
66 | 67 | ||
68 | struct intc_subgroup_entry { | ||
69 | unsigned int pirq; | ||
70 | intc_enum enum_id; | ||
71 | unsigned long handle; | ||
72 | }; | ||
73 | |||
67 | struct intc_desc_int { | 74 | struct intc_desc_int { |
68 | struct list_head list; | 75 | struct list_head list; |
69 | struct sys_device sysdev; | 76 | struct sys_device sysdev; |
70 | struct radix_tree_root tree; | 77 | struct radix_tree_root tree; |
71 | pm_message_t state; | 78 | pm_message_t state; |
79 | spinlock_t lock; | ||
80 | unsigned int index; | ||
72 | unsigned long *reg; | 81 | unsigned long *reg; |
73 | #ifdef CONFIG_SMP | 82 | #ifdef CONFIG_SMP |
74 | unsigned long *smp; | 83 | unsigned long *smp; |
@@ -84,6 +93,7 @@ struct intc_desc_int { | |||
84 | }; | 93 | }; |
85 | 94 | ||
86 | static LIST_HEAD(intc_list); | 95 | static LIST_HEAD(intc_list); |
96 | static unsigned int nr_intc_controllers; | ||
87 | 97 | ||
88 | /* | 98 | /* |
89 | * The intc_irq_map provides a global map of bound IRQ vectors for a | 99 | * The intc_irq_map provides a global map of bound IRQ vectors for a |
@@ -99,7 +109,7 @@ static LIST_HEAD(intc_list); | |||
99 | static DECLARE_BITMAP(intc_irq_map, NR_IRQS); | 109 | static DECLARE_BITMAP(intc_irq_map, NR_IRQS); |
100 | static struct intc_map_entry intc_irq_xlate[NR_IRQS]; | 110 | static struct intc_map_entry intc_irq_xlate[NR_IRQS]; |
101 | static DEFINE_SPINLOCK(vector_lock); | 111 | static DEFINE_SPINLOCK(vector_lock); |
102 | static DEFINE_MUTEX(irq_xlate_mutex); | 112 | static DEFINE_SPINLOCK(xlate_lock); |
103 | 113 | ||
104 | #ifdef CONFIG_SMP | 114 | #ifdef CONFIG_SMP |
105 | #define IS_SMP(x) x.smp | 115 | #define IS_SMP(x) x.smp |
@@ -118,12 +128,39 @@ static unsigned long ack_handle[NR_IRQS]; | |||
118 | static unsigned long dist_handle[NR_IRQS]; | 128 | static unsigned long dist_handle[NR_IRQS]; |
119 | #endif | 129 | #endif |
120 | 130 | ||
131 | struct intc_virq_list { | ||
132 | unsigned int irq; | ||
133 | struct intc_virq_list *next; | ||
134 | }; | ||
135 | |||
136 | #define for_each_virq(entry, head) \ | ||
137 | for (entry = head; entry; entry = entry->next) | ||
138 | |||
121 | static inline struct intc_desc_int *get_intc_desc(unsigned int irq) | 139 | static inline struct intc_desc_int *get_intc_desc(unsigned int irq) |
122 | { | 140 | { |
123 | struct irq_chip *chip = get_irq_chip(irq); | 141 | struct irq_chip *chip = get_irq_chip(irq); |
142 | |||
124 | return container_of(chip, struct intc_desc_int, chip); | 143 | return container_of(chip, struct intc_desc_int, chip); |
125 | } | 144 | } |
126 | 145 | ||
146 | static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc) | ||
147 | { | ||
148 | generic_handle_irq((unsigned int)get_irq_data(irq)); | ||
149 | } | ||
150 | |||
151 | static inline void activate_irq(int irq) | ||
152 | { | ||
153 | #ifdef CONFIG_ARM | ||
154 | /* ARM requires an extra step to clear IRQ_NOREQUEST, which it | ||
155 | * sets on behalf of every irq_chip. Also sets IRQ_NOPROBE. | ||
156 | */ | ||
157 | set_irq_flags(irq, IRQF_VALID); | ||
158 | #else | ||
159 | /* same effect on other architectures */ | ||
160 | set_irq_noprobe(irq); | ||
161 | #endif | ||
162 | } | ||
163 | |||
127 | static unsigned long intc_phys_to_virt(struct intc_desc_int *d, | 164 | static unsigned long intc_phys_to_virt(struct intc_desc_int *d, |
128 | unsigned long address) | 165 | unsigned long address) |
129 | { | 166 | { |
@@ -177,56 +214,103 @@ static inline unsigned int set_field(unsigned int value, | |||
177 | return value; | 214 | return value; |
178 | } | 215 | } |
179 | 216 | ||
180 | static void write_8(unsigned long addr, unsigned long h, unsigned long data) | 217 | static inline unsigned long get_field(unsigned int value, unsigned int handle) |
218 | { | ||
219 | unsigned int width = _INTC_WIDTH(handle); | ||
220 | unsigned int shift = _INTC_SHIFT(handle); | ||
221 | unsigned int mask = ((1 << width) - 1) << shift; | ||
222 | |||
223 | return (value & mask) >> shift; | ||
224 | } | ||
225 | |||
226 | static unsigned long test_8(unsigned long addr, unsigned long h, | ||
227 | unsigned long ignore) | ||
228 | { | ||
229 | return get_field(__raw_readb(addr), h); | ||
230 | } | ||
231 | |||
232 | static unsigned long test_16(unsigned long addr, unsigned long h, | ||
233 | unsigned long ignore) | ||
234 | { | ||
235 | return get_field(__raw_readw(addr), h); | ||
236 | } | ||
237 | |||
238 | static unsigned long test_32(unsigned long addr, unsigned long h, | ||
239 | unsigned long ignore) | ||
240 | { | ||
241 | return get_field(__raw_readl(addr), h); | ||
242 | } | ||
243 | |||
244 | static unsigned long write_8(unsigned long addr, unsigned long h, | ||
245 | unsigned long data) | ||
181 | { | 246 | { |
182 | __raw_writeb(set_field(0, data, h), addr); | 247 | __raw_writeb(set_field(0, data, h), addr); |
183 | (void)__raw_readb(addr); /* Defeat write posting */ | 248 | (void)__raw_readb(addr); /* Defeat write posting */ |
249 | return 0; | ||
184 | } | 250 | } |
185 | 251 | ||
186 | static void write_16(unsigned long addr, unsigned long h, unsigned long data) | 252 | static unsigned long write_16(unsigned long addr, unsigned long h, |
253 | unsigned long data) | ||
187 | { | 254 | { |
188 | __raw_writew(set_field(0, data, h), addr); | 255 | __raw_writew(set_field(0, data, h), addr); |
189 | (void)__raw_readw(addr); /* Defeat write posting */ | 256 | (void)__raw_readw(addr); /* Defeat write posting */ |
257 | return 0; | ||
190 | } | 258 | } |
191 | 259 | ||
192 | static void write_32(unsigned long addr, unsigned long h, unsigned long data) | 260 | static unsigned long write_32(unsigned long addr, unsigned long h, |
261 | unsigned long data) | ||
193 | { | 262 | { |
194 | __raw_writel(set_field(0, data, h), addr); | 263 | __raw_writel(set_field(0, data, h), addr); |
195 | (void)__raw_readl(addr); /* Defeat write posting */ | 264 | (void)__raw_readl(addr); /* Defeat write posting */ |
265 | return 0; | ||
196 | } | 266 | } |
197 | 267 | ||
198 | static void modify_8(unsigned long addr, unsigned long h, unsigned long data) | 268 | static unsigned long modify_8(unsigned long addr, unsigned long h, |
269 | unsigned long data) | ||
199 | { | 270 | { |
200 | unsigned long flags; | 271 | unsigned long flags; |
201 | local_irq_save(flags); | 272 | local_irq_save(flags); |
202 | __raw_writeb(set_field(__raw_readb(addr), data, h), addr); | 273 | __raw_writeb(set_field(__raw_readb(addr), data, h), addr); |
203 | (void)__raw_readb(addr); /* Defeat write posting */ | 274 | (void)__raw_readb(addr); /* Defeat write posting */ |
204 | local_irq_restore(flags); | 275 | local_irq_restore(flags); |
276 | return 0; | ||
205 | } | 277 | } |
206 | 278 | ||
207 | static void modify_16(unsigned long addr, unsigned long h, unsigned long data) | 279 | static unsigned long modify_16(unsigned long addr, unsigned long h, |
280 | unsigned long data) | ||
208 | { | 281 | { |
209 | unsigned long flags; | 282 | unsigned long flags; |
210 | local_irq_save(flags); | 283 | local_irq_save(flags); |
211 | __raw_writew(set_field(__raw_readw(addr), data, h), addr); | 284 | __raw_writew(set_field(__raw_readw(addr), data, h), addr); |
212 | (void)__raw_readw(addr); /* Defeat write posting */ | 285 | (void)__raw_readw(addr); /* Defeat write posting */ |
213 | local_irq_restore(flags); | 286 | local_irq_restore(flags); |
287 | return 0; | ||
214 | } | 288 | } |
215 | 289 | ||
216 | static void modify_32(unsigned long addr, unsigned long h, unsigned long data) | 290 | static unsigned long modify_32(unsigned long addr, unsigned long h, |
291 | unsigned long data) | ||
217 | { | 292 | { |
218 | unsigned long flags; | 293 | unsigned long flags; |
219 | local_irq_save(flags); | 294 | local_irq_save(flags); |
220 | __raw_writel(set_field(__raw_readl(addr), data, h), addr); | 295 | __raw_writel(set_field(__raw_readl(addr), data, h), addr); |
221 | (void)__raw_readl(addr); /* Defeat write posting */ | 296 | (void)__raw_readl(addr); /* Defeat write posting */ |
222 | local_irq_restore(flags); | 297 | local_irq_restore(flags); |
298 | return 0; | ||
223 | } | 299 | } |
224 | 300 | ||
225 | enum { REG_FN_ERR = 0, REG_FN_WRITE_BASE = 1, REG_FN_MODIFY_BASE = 5 }; | 301 | enum { |
302 | REG_FN_ERR = 0, | ||
303 | REG_FN_TEST_BASE = 1, | ||
304 | REG_FN_WRITE_BASE = 5, | ||
305 | REG_FN_MODIFY_BASE = 9 | ||
306 | }; | ||
226 | 307 | ||
227 | static void (*intc_reg_fns[])(unsigned long addr, | 308 | static unsigned long (*intc_reg_fns[])(unsigned long addr, |
228 | unsigned long h, | 309 | unsigned long h, |
229 | unsigned long data) = { | 310 | unsigned long data) = { |
311 | [REG_FN_TEST_BASE + 0] = test_8, | ||
312 | [REG_FN_TEST_BASE + 1] = test_16, | ||
313 | [REG_FN_TEST_BASE + 3] = test_32, | ||
230 | [REG_FN_WRITE_BASE + 0] = write_8, | 314 | [REG_FN_WRITE_BASE + 0] = write_8, |
231 | [REG_FN_WRITE_BASE + 1] = write_16, | 315 | [REG_FN_WRITE_BASE + 1] = write_16, |
232 | [REG_FN_WRITE_BASE + 3] = write_32, | 316 | [REG_FN_WRITE_BASE + 3] = write_32, |
@@ -242,42 +326,42 @@ enum { MODE_ENABLE_REG = 0, /* Bit(s) set -> interrupt enabled */ | |||
242 | MODE_PCLR_REG, /* Above plus all bits set to disable interrupt */ | 326 | MODE_PCLR_REG, /* Above plus all bits set to disable interrupt */ |
243 | }; | 327 | }; |
244 | 328 | ||
245 | static void intc_mode_field(unsigned long addr, | 329 | static unsigned long intc_mode_field(unsigned long addr, |
246 | unsigned long handle, | 330 | unsigned long handle, |
247 | void (*fn)(unsigned long, | 331 | unsigned long (*fn)(unsigned long, |
248 | unsigned long, | 332 | unsigned long, |
249 | unsigned long), | 333 | unsigned long), |
250 | unsigned int irq) | 334 | unsigned int irq) |
251 | { | 335 | { |
252 | fn(addr, handle, ((1 << _INTC_WIDTH(handle)) - 1)); | 336 | return fn(addr, handle, ((1 << _INTC_WIDTH(handle)) - 1)); |
253 | } | 337 | } |
254 | 338 | ||
255 | static void intc_mode_zero(unsigned long addr, | 339 | static unsigned long intc_mode_zero(unsigned long addr, |
256 | unsigned long handle, | 340 | unsigned long handle, |
257 | void (*fn)(unsigned long, | 341 | unsigned long (*fn)(unsigned long, |
258 | unsigned long, | 342 | unsigned long, |
259 | unsigned long), | 343 | unsigned long), |
260 | unsigned int irq) | 344 | unsigned int irq) |
261 | { | 345 | { |
262 | fn(addr, handle, 0); | 346 | return fn(addr, handle, 0); |
263 | } | 347 | } |
264 | 348 | ||
265 | static void intc_mode_prio(unsigned long addr, | 349 | static unsigned long intc_mode_prio(unsigned long addr, |
266 | unsigned long handle, | 350 | unsigned long handle, |
267 | void (*fn)(unsigned long, | 351 | unsigned long (*fn)(unsigned long, |
268 | unsigned long, | 352 | unsigned long, |
269 | unsigned long), | 353 | unsigned long), |
270 | unsigned int irq) | 354 | unsigned int irq) |
271 | { | 355 | { |
272 | fn(addr, handle, intc_prio_level[irq]); | 356 | return fn(addr, handle, intc_prio_level[irq]); |
273 | } | 357 | } |
274 | 358 | ||
275 | static void (*intc_enable_fns[])(unsigned long addr, | 359 | static unsigned long (*intc_enable_fns[])(unsigned long addr, |
276 | unsigned long handle, | 360 | unsigned long handle, |
277 | void (*fn)(unsigned long, | 361 | unsigned long (*fn)(unsigned long, |
278 | unsigned long, | 362 | unsigned long, |
279 | unsigned long), | 363 | unsigned long), |
280 | unsigned int irq) = { | 364 | unsigned int irq) = { |
281 | [MODE_ENABLE_REG] = intc_mode_field, | 365 | [MODE_ENABLE_REG] = intc_mode_field, |
282 | [MODE_MASK_REG] = intc_mode_zero, | 366 | [MODE_MASK_REG] = intc_mode_zero, |
283 | [MODE_DUAL_REG] = intc_mode_field, | 367 | [MODE_DUAL_REG] = intc_mode_field, |
@@ -285,9 +369,9 @@ static void (*intc_enable_fns[])(unsigned long addr, | |||
285 | [MODE_PCLR_REG] = intc_mode_prio, | 369 | [MODE_PCLR_REG] = intc_mode_prio, |
286 | }; | 370 | }; |
287 | 371 | ||
288 | static void (*intc_disable_fns[])(unsigned long addr, | 372 | static unsigned long (*intc_disable_fns[])(unsigned long addr, |
289 | unsigned long handle, | 373 | unsigned long handle, |
290 | void (*fn)(unsigned long, | 374 | unsigned long (*fn)(unsigned long, |
291 | unsigned long, | 375 | unsigned long, |
292 | unsigned long), | 376 | unsigned long), |
293 | unsigned int irq) = { | 377 | unsigned int irq) = { |
@@ -421,12 +505,13 @@ static void intc_disable(unsigned int irq) | |||
421 | } | 505 | } |
422 | } | 506 | } |
423 | 507 | ||
424 | static void (*intc_enable_noprio_fns[])(unsigned long addr, | 508 | static unsigned long |
425 | unsigned long handle, | 509 | (*intc_enable_noprio_fns[])(unsigned long addr, |
426 | void (*fn)(unsigned long, | 510 | unsigned long handle, |
427 | unsigned long, | 511 | unsigned long (*fn)(unsigned long, |
428 | unsigned long), | 512 | unsigned long, |
429 | unsigned int irq) = { | 513 | unsigned long), |
514 | unsigned int irq) = { | ||
430 | [MODE_ENABLE_REG] = intc_mode_field, | 515 | [MODE_ENABLE_REG] = intc_mode_field, |
431 | [MODE_MASK_REG] = intc_mode_zero, | 516 | [MODE_MASK_REG] = intc_mode_zero, |
432 | [MODE_DUAL_REG] = intc_mode_field, | 517 | [MODE_DUAL_REG] = intc_mode_field, |
@@ -439,8 +524,9 @@ static void intc_enable_disable(struct intc_desc_int *d, | |||
439 | { | 524 | { |
440 | unsigned long addr; | 525 | unsigned long addr; |
441 | unsigned int cpu; | 526 | unsigned int cpu; |
442 | void (*fn)(unsigned long, unsigned long, | 527 | unsigned long (*fn)(unsigned long, unsigned long, |
443 | void (*)(unsigned long, unsigned long, unsigned long), | 528 | unsigned long (*)(unsigned long, unsigned long, |
529 | unsigned long), | ||
444 | unsigned int); | 530 | unsigned int); |
445 | 531 | ||
446 | if (do_enable) { | 532 | if (do_enable) { |
@@ -861,6 +947,186 @@ unsigned int intc_irq_lookup(const char *chipname, intc_enum enum_id) | |||
861 | } | 947 | } |
862 | EXPORT_SYMBOL_GPL(intc_irq_lookup); | 948 | EXPORT_SYMBOL_GPL(intc_irq_lookup); |
863 | 949 | ||
950 | static int add_virq_to_pirq(unsigned int irq, unsigned int virq) | ||
951 | { | ||
952 | struct intc_virq_list **last, *entry; | ||
953 | struct irq_desc *desc = irq_to_desc(irq); | ||
954 | |||
955 | /* scan for duplicates */ | ||
956 | last = (struct intc_virq_list **)&desc->handler_data; | ||
957 | for_each_virq(entry, desc->handler_data) { | ||
958 | if (entry->irq == virq) | ||
959 | return 0; | ||
960 | last = &entry->next; | ||
961 | } | ||
962 | |||
963 | entry = kzalloc(sizeof(struct intc_virq_list), GFP_ATOMIC); | ||
964 | if (!entry) { | ||
965 | pr_err("can't allocate VIRQ mapping for %d\n", virq); | ||
966 | return -ENOMEM; | ||
967 | } | ||
968 | |||
969 | entry->irq = virq; | ||
970 | |||
971 | *last = entry; | ||
972 | |||
973 | return 0; | ||
974 | } | ||
975 | |||
976 | static void intc_virq_handler(unsigned int irq, struct irq_desc *desc) | ||
977 | { | ||
978 | struct intc_virq_list *entry, *vlist = get_irq_data(irq); | ||
979 | struct intc_desc_int *d = get_intc_desc(irq); | ||
980 | |||
981 | desc->chip->mask_ack(irq); | ||
982 | |||
983 | for_each_virq(entry, vlist) { | ||
984 | unsigned long addr, handle; | ||
985 | |||
986 | handle = (unsigned long)get_irq_data(entry->irq); | ||
987 | addr = INTC_REG(d, _INTC_ADDR_E(handle), 0); | ||
988 | |||
989 | if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0)) | ||
990 | generic_handle_irq(entry->irq); | ||
991 | } | ||
992 | |||
993 | desc->chip->unmask(irq); | ||
994 | } | ||
995 | |||
996 | static unsigned long __init intc_subgroup_data(struct intc_subgroup *subgroup, | ||
997 | struct intc_desc_int *d, | ||
998 | unsigned int index) | ||
999 | { | ||
1000 | unsigned int fn = REG_FN_TEST_BASE + (subgroup->reg_width >> 3) - 1; | ||
1001 | |||
1002 | return _INTC_MK(fn, MODE_ENABLE_REG, intc_get_reg(d, subgroup->reg), | ||
1003 | 0, 1, (subgroup->reg_width - 1) - index); | ||
1004 | } | ||
1005 | |||
1006 | #define INTC_TAG_VIRQ_NEEDS_ALLOC 0 | ||
1007 | |||
1008 | static void __init intc_subgroup_init_one(struct intc_desc *desc, | ||
1009 | struct intc_desc_int *d, | ||
1010 | struct intc_subgroup *subgroup) | ||
1011 | { | ||
1012 | struct intc_map_entry *mapped; | ||
1013 | unsigned int pirq; | ||
1014 | unsigned long flags; | ||
1015 | int i; | ||
1016 | |||
1017 | mapped = radix_tree_lookup(&d->tree, subgroup->parent_id); | ||
1018 | if (!mapped) { | ||
1019 | WARN_ON(1); | ||
1020 | return; | ||
1021 | } | ||
1022 | |||
1023 | pirq = mapped - intc_irq_xlate; | ||
1024 | |||
1025 | spin_lock_irqsave(&d->lock, flags); | ||
1026 | |||
1027 | for (i = 0; i < ARRAY_SIZE(subgroup->enum_ids); i++) { | ||
1028 | struct intc_subgroup_entry *entry; | ||
1029 | int err; | ||
1030 | |||
1031 | if (!subgroup->enum_ids[i]) | ||
1032 | continue; | ||
1033 | |||
1034 | entry = kmalloc(sizeof(*entry), GFP_NOWAIT); | ||
1035 | if (!entry) | ||
1036 | break; | ||
1037 | |||
1038 | entry->pirq = pirq; | ||
1039 | entry->enum_id = subgroup->enum_ids[i]; | ||
1040 | entry->handle = intc_subgroup_data(subgroup, d, i); | ||
1041 | |||
1042 | err = radix_tree_insert(&d->tree, entry->enum_id, entry); | ||
1043 | if (unlikely(err < 0)) | ||
1044 | break; | ||
1045 | |||
1046 | radix_tree_tag_set(&d->tree, entry->enum_id, | ||
1047 | INTC_TAG_VIRQ_NEEDS_ALLOC); | ||
1048 | } | ||
1049 | |||
1050 | spin_unlock_irqrestore(&d->lock, flags); | ||
1051 | } | ||
1052 | |||
1053 | static void __init intc_subgroup_init(struct intc_desc *desc, | ||
1054 | struct intc_desc_int *d) | ||
1055 | { | ||
1056 | int i; | ||
1057 | |||
1058 | if (!desc->hw.subgroups) | ||
1059 | return; | ||
1060 | |||
1061 | for (i = 0; i < desc->hw.nr_subgroups; i++) | ||
1062 | intc_subgroup_init_one(desc, d, desc->hw.subgroups + i); | ||
1063 | } | ||
1064 | |||
1065 | static void __init intc_subgroup_map(struct intc_desc_int *d) | ||
1066 | { | ||
1067 | struct intc_subgroup_entry *entries[32]; | ||
1068 | unsigned long flags; | ||
1069 | unsigned int nr_found; | ||
1070 | int i; | ||
1071 | |||
1072 | spin_lock_irqsave(&d->lock, flags); | ||
1073 | |||
1074 | restart: | ||
1075 | nr_found = radix_tree_gang_lookup_tag_slot(&d->tree, | ||
1076 | (void ***)entries, 0, ARRAY_SIZE(entries), | ||
1077 | INTC_TAG_VIRQ_NEEDS_ALLOC); | ||
1078 | |||
1079 | for (i = 0; i < nr_found; i++) { | ||
1080 | struct intc_subgroup_entry *entry; | ||
1081 | int irq; | ||
1082 | |||
1083 | entry = radix_tree_deref_slot((void **)entries[i]); | ||
1084 | if (unlikely(!entry)) | ||
1085 | continue; | ||
1086 | if (unlikely(entry == RADIX_TREE_RETRY)) | ||
1087 | goto restart; | ||
1088 | |||
1089 | irq = create_irq(); | ||
1090 | if (unlikely(irq < 0)) { | ||
1091 | pr_err("no more free IRQs, bailing..\n"); | ||
1092 | break; | ||
1093 | } | ||
1094 | |||
1095 | pr_info("Setting up a chained VIRQ from %d -> %d\n", | ||
1096 | irq, entry->pirq); | ||
1097 | |||
1098 | spin_lock(&xlate_lock); | ||
1099 | intc_irq_xlate[irq].desc = d; | ||
1100 | intc_irq_xlate[irq].enum_id = entry->enum_id; | ||
1101 | spin_unlock(&xlate_lock); | ||
1102 | |||
1103 | set_irq_chip_and_handler_name(irq, get_irq_chip(entry->pirq), | ||
1104 | handle_simple_irq, "virq"); | ||
1105 | set_irq_chip_data(irq, get_irq_chip_data(entry->pirq)); | ||
1106 | |||
1107 | set_irq_data(irq, (void *)entry->handle); | ||
1108 | |||
1109 | set_irq_chained_handler(entry->pirq, intc_virq_handler); | ||
1110 | add_virq_to_pirq(entry->pirq, irq); | ||
1111 | |||
1112 | radix_tree_tag_clear(&d->tree, entry->enum_id, | ||
1113 | INTC_TAG_VIRQ_NEEDS_ALLOC); | ||
1114 | radix_tree_replace_slot((void **)entries[i], | ||
1115 | &intc_irq_xlate[irq]); | ||
1116 | } | ||
1117 | |||
1118 | spin_unlock_irqrestore(&d->lock, flags); | ||
1119 | } | ||
1120 | |||
1121 | void __init intc_finalize(void) | ||
1122 | { | ||
1123 | struct intc_desc_int *d; | ||
1124 | |||
1125 | list_for_each_entry(d, &intc_list, list) | ||
1126 | if (radix_tree_tagged(&d->tree, INTC_TAG_VIRQ_NEEDS_ALLOC)) | ||
1127 | intc_subgroup_map(d); | ||
1128 | } | ||
1129 | |||
864 | static void __init intc_register_irq(struct intc_desc *desc, | 1130 | static void __init intc_register_irq(struct intc_desc *desc, |
865 | struct intc_desc_int *d, | 1131 | struct intc_desc_int *d, |
866 | intc_enum enum_id, | 1132 | intc_enum enum_id, |
@@ -868,6 +1134,7 @@ static void __init intc_register_irq(struct intc_desc *desc, | |||
868 | { | 1134 | { |
869 | struct intc_handle_int *hp; | 1135 | struct intc_handle_int *hp; |
870 | unsigned int data[2], primary; | 1136 | unsigned int data[2], primary; |
1137 | unsigned long flags; | ||
871 | 1138 | ||
872 | /* | 1139 | /* |
873 | * Register the IRQ position with the global IRQ map, then insert | 1140 | * Register the IRQ position with the global IRQ map, then insert |
@@ -875,9 +1142,9 @@ static void __init intc_register_irq(struct intc_desc *desc, | |||
875 | */ | 1142 | */ |
876 | set_bit(irq, intc_irq_map); | 1143 | set_bit(irq, intc_irq_map); |
877 | 1144 | ||
878 | mutex_lock(&irq_xlate_mutex); | 1145 | spin_lock_irqsave(&xlate_lock, flags); |
879 | radix_tree_insert(&d->tree, enum_id, &intc_irq_xlate[irq]); | 1146 | radix_tree_insert(&d->tree, enum_id, &intc_irq_xlate[irq]); |
880 | mutex_unlock(&irq_xlate_mutex); | 1147 | spin_unlock_irqrestore(&xlate_lock, flags); |
881 | 1148 | ||
882 | /* | 1149 | /* |
883 | * Prefer single interrupt source bitmap over other combinations: | 1150 | * Prefer single interrupt source bitmap over other combinations: |
@@ -957,9 +1224,7 @@ static void __init intc_register_irq(struct intc_desc *desc, | |||
957 | dist_handle[irq] = intc_dist_data(desc, d, enum_id); | 1224 | dist_handle[irq] = intc_dist_data(desc, d, enum_id); |
958 | #endif | 1225 | #endif |
959 | 1226 | ||
960 | #ifdef CONFIG_ARM | 1227 | activate_irq(irq); |
961 | set_irq_flags(irq, IRQF_VALID); /* Enable IRQ on ARM systems */ | ||
962 | #endif | ||
963 | } | 1228 | } |
964 | 1229 | ||
965 | static unsigned int __init save_reg(struct intc_desc_int *d, | 1230 | static unsigned int __init save_reg(struct intc_desc_int *d, |
@@ -980,11 +1245,6 @@ static unsigned int __init save_reg(struct intc_desc_int *d, | |||
980 | return 0; | 1245 | return 0; |
981 | } | 1246 | } |
982 | 1247 | ||
983 | static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc) | ||
984 | { | ||
985 | generic_handle_irq((unsigned int)get_irq_data(irq)); | ||
986 | } | ||
987 | |||
988 | int __init register_intc_controller(struct intc_desc *desc) | 1248 | int __init register_intc_controller(struct intc_desc *desc) |
989 | { | 1249 | { |
990 | unsigned int i, k, smp; | 1250 | unsigned int i, k, smp; |
@@ -1000,7 +1260,11 @@ int __init register_intc_controller(struct intc_desc *desc) | |||
1000 | goto err0; | 1260 | goto err0; |
1001 | 1261 | ||
1002 | INIT_LIST_HEAD(&d->list); | 1262 | INIT_LIST_HEAD(&d->list); |
1003 | list_add(&d->list, &intc_list); | 1263 | list_add_tail(&d->list, &intc_list); |
1264 | |||
1265 | spin_lock_init(&d->lock); | ||
1266 | |||
1267 | d->index = nr_intc_controllers; | ||
1004 | 1268 | ||
1005 | if (desc->num_resources) { | 1269 | if (desc->num_resources) { |
1006 | d->nr_windows = desc->num_resources; | 1270 | d->nr_windows = desc->num_resources; |
@@ -1029,6 +1293,7 @@ int __init register_intc_controller(struct intc_desc *desc) | |||
1029 | d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0; | 1293 | d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0; |
1030 | d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0; | 1294 | d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0; |
1031 | d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0; | 1295 | d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0; |
1296 | d->nr_reg += hw->subgroups ? hw->nr_subgroups : 0; | ||
1032 | 1297 | ||
1033 | d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT); | 1298 | d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT); |
1034 | if (!d->reg) | 1299 | if (!d->reg) |
@@ -1075,6 +1340,11 @@ int __init register_intc_controller(struct intc_desc *desc) | |||
1075 | k += save_reg(d, k, hw->sense_regs[i].reg, 0); | 1340 | k += save_reg(d, k, hw->sense_regs[i].reg, 0); |
1076 | } | 1341 | } |
1077 | 1342 | ||
1343 | if (hw->subgroups) | ||
1344 | for (i = 0; i < hw->nr_subgroups; i++) | ||
1345 | if (hw->subgroups[i].reg) | ||
1346 | k+= save_reg(d, k, hw->subgroups[i].reg, 0); | ||
1347 | |||
1078 | d->chip.name = desc->name; | 1348 | d->chip.name = desc->name; |
1079 | d->chip.mask = intc_disable; | 1349 | d->chip.mask = intc_disable; |
1080 | d->chip.unmask = intc_enable; | 1350 | d->chip.unmask = intc_enable; |
@@ -1109,6 +1379,7 @@ int __init register_intc_controller(struct intc_desc *desc) | |||
1109 | for (i = 0; i < hw->nr_vectors; i++) { | 1379 | for (i = 0; i < hw->nr_vectors; i++) { |
1110 | struct intc_vect *vect = hw->vectors + i; | 1380 | struct intc_vect *vect = hw->vectors + i; |
1111 | unsigned int irq = evt2irq(vect->vect); | 1381 | unsigned int irq = evt2irq(vect->vect); |
1382 | unsigned long flags; | ||
1112 | struct irq_desc *irq_desc; | 1383 | struct irq_desc *irq_desc; |
1113 | 1384 | ||
1114 | if (!vect->enum_id) | 1385 | if (!vect->enum_id) |
@@ -1120,8 +1391,10 @@ int __init register_intc_controller(struct intc_desc *desc) | |||
1120 | continue; | 1391 | continue; |
1121 | } | 1392 | } |
1122 | 1393 | ||
1394 | spin_lock_irqsave(&xlate_lock, flags); | ||
1123 | intc_irq_xlate[irq].enum_id = vect->enum_id; | 1395 | intc_irq_xlate[irq].enum_id = vect->enum_id; |
1124 | intc_irq_xlate[irq].desc = d; | 1396 | intc_irq_xlate[irq].desc = d; |
1397 | spin_unlock_irqrestore(&xlate_lock, flags); | ||
1125 | 1398 | ||
1126 | intc_register_irq(desc, d, vect->enum_id, irq); | 1399 | intc_register_irq(desc, d, vect->enum_id, irq); |
1127 | 1400 | ||
@@ -1152,10 +1425,14 @@ int __init register_intc_controller(struct intc_desc *desc) | |||
1152 | } | 1425 | } |
1153 | } | 1426 | } |
1154 | 1427 | ||
1428 | intc_subgroup_init(desc, d); | ||
1429 | |||
1155 | /* enable bits matching force_enable after registering irqs */ | 1430 | /* enable bits matching force_enable after registering irqs */ |
1156 | if (desc->force_enable) | 1431 | if (desc->force_enable) |
1157 | intc_enable_disable_enum(desc, d, desc->force_enable, 1); | 1432 | intc_enable_disable_enum(desc, d, desc->force_enable, 1); |
1158 | 1433 | ||
1434 | nr_intc_controllers++; | ||
1435 | |||
1159 | return 0; | 1436 | return 0; |
1160 | err5: | 1437 | err5: |
1161 | kfree(d->prio); | 1438 | kfree(d->prio); |
@@ -1353,7 +1630,6 @@ static int __init register_intc_sysdevs(void) | |||
1353 | { | 1630 | { |
1354 | struct intc_desc_int *d; | 1631 | struct intc_desc_int *d; |
1355 | int error; | 1632 | int error; |
1356 | int id = 0; | ||
1357 | 1633 | ||
1358 | error = sysdev_class_register(&intc_sysdev_class); | 1634 | error = sysdev_class_register(&intc_sysdev_class); |
1359 | #ifdef CONFIG_INTC_USERIMASK | 1635 | #ifdef CONFIG_INTC_USERIMASK |
@@ -1363,7 +1639,7 @@ static int __init register_intc_sysdevs(void) | |||
1363 | #endif | 1639 | #endif |
1364 | if (!error) { | 1640 | if (!error) { |
1365 | list_for_each_entry(d, &intc_list, list) { | 1641 | list_for_each_entry(d, &intc_list, list) { |
1366 | d->sysdev.id = id; | 1642 | d->sysdev.id = d->index; |
1367 | d->sysdev.cls = &intc_sysdev_class; | 1643 | d->sysdev.cls = &intc_sysdev_class; |
1368 | error = sysdev_register(&d->sysdev); | 1644 | error = sysdev_register(&d->sysdev); |
1369 | if (error == 0) | 1645 | if (error == 0) |
@@ -1371,8 +1647,6 @@ static int __init register_intc_sysdevs(void) | |||
1371 | &attr_name); | 1647 | &attr_name); |
1372 | if (error) | 1648 | if (error) |
1373 | break; | 1649 | break; |
1374 | |||
1375 | id++; | ||
1376 | } | 1650 | } |
1377 | } | 1651 | } |
1378 | 1652 | ||
@@ -1422,9 +1696,7 @@ out_unlock: | |||
1422 | 1696 | ||
1423 | if (irq > 0) { | 1697 | if (irq > 0) { |
1424 | dynamic_irq_init(irq); | 1698 | dynamic_irq_init(irq); |
1425 | #ifdef CONFIG_ARM | 1699 | activate_irq(irq); |
1426 | set_irq_flags(irq, IRQF_VALID); /* Enable IRQ on ARM systems */ | ||
1427 | #endif | ||
1428 | } | 1700 | } |
1429 | 1701 | ||
1430 | return irq; | 1702 | return irq; |