aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/sh/intc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/sh/intc.c')
-rw-r--r--drivers/sh/intc.c401
1 files changed, 329 insertions, 72 deletions
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c
index 559b5fe9dc0f..94ad6bd86a00 100644
--- a/drivers/sh/intc.c
+++ b/drivers/sh/intc.c
@@ -2,6 +2,7 @@
2 * Shared interrupt handling code for IPR and INTC2 types of IRQs. 2 * Shared interrupt handling code for IPR and INTC2 types of IRQs.
3 * 3 *
4 * Copyright (C) 2007, 2008 Magnus Damm 4 * Copyright (C) 2007, 2008 Magnus Damm
5 * Copyright (C) 2009, 2010 Paul Mundt
5 * 6 *
6 * Based on intc2.c and ipr.c 7 * Based on intc2.c and ipr.c
7 * 8 *
@@ -19,11 +20,14 @@
19#include <linux/irq.h> 20#include <linux/irq.h>
20#include <linux/module.h> 21#include <linux/module.h>
21#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/slab.h>
22#include <linux/interrupt.h> 24#include <linux/interrupt.h>
23#include <linux/sh_intc.h> 25#include <linux/sh_intc.h>
24#include <linux/sysdev.h> 26#include <linux/sysdev.h>
25#include <linux/list.h> 27#include <linux/list.h>
26#include <linux/topology.h> 28#include <linux/topology.h>
29#include <linux/bitmap.h>
30#include <linux/cpumask.h>
27 31
28#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \ 32#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
29 ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \ 33 ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
@@ -59,6 +63,20 @@ struct intc_desc_int {
59 63
60static LIST_HEAD(intc_list); 64static LIST_HEAD(intc_list);
61 65
66/*
67 * The intc_irq_map provides a global map of bound IRQ vectors for a
68 * given platform. Allocation of IRQs are either static through the CPU
69 * vector map, or dynamic in the case of board mux vectors or MSI.
70 *
71 * As this is a central point for all IRQ controllers on the system,
72 * each of the available sources are mapped out here. This combined with
73 * sparseirq makes it quite trivial to keep the vector map tightly packed
74 * when dynamically creating IRQs, as well as tying in to otherwise
75 * unused irq_desc positions in the sparse array.
76 */
77static DECLARE_BITMAP(intc_irq_map, NR_IRQS);
78static DEFINE_SPINLOCK(vector_lock);
79
62#ifdef CONFIG_SMP 80#ifdef CONFIG_SMP
63#define IS_SMP(x) x.smp 81#define IS_SMP(x) x.smp
64#define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c)) 82#define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c))
@@ -70,9 +88,7 @@ static LIST_HEAD(intc_list);
70#endif 88#endif
71 89
72static unsigned int intc_prio_level[NR_IRQS]; /* for now */ 90static unsigned int intc_prio_level[NR_IRQS]; /* for now */
73#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
74static unsigned long ack_handle[NR_IRQS]; 91static unsigned long ack_handle[NR_IRQS];
75#endif
76 92
77static inline struct intc_desc_int *get_intc_desc(unsigned int irq) 93static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
78{ 94{
@@ -220,6 +236,10 @@ static inline void _intc_enable(unsigned int irq, unsigned long handle)
220 unsigned int cpu; 236 unsigned int cpu;
221 237
222 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) { 238 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
239#ifdef CONFIG_SMP
240 if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
241 continue;
242#endif
223 addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu); 243 addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
224 intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\ 244 intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
225 [_INTC_FN(handle)], irq); 245 [_INTC_FN(handle)], irq);
@@ -239,18 +259,75 @@ static void intc_disable(unsigned int irq)
239 unsigned int cpu; 259 unsigned int cpu;
240 260
241 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) { 261 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
262#ifdef CONFIG_SMP
263 if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
264 continue;
265#endif
242 addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu); 266 addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
243 intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\ 267 intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\
244 [_INTC_FN(handle)], irq); 268 [_INTC_FN(handle)], irq);
245 } 269 }
246} 270}
247 271
272static void (*intc_enable_noprio_fns[])(unsigned long addr,
273 unsigned long handle,
274 void (*fn)(unsigned long,
275 unsigned long,
276 unsigned long),
277 unsigned int irq) = {
278 [MODE_ENABLE_REG] = intc_mode_field,
279 [MODE_MASK_REG] = intc_mode_zero,
280 [MODE_DUAL_REG] = intc_mode_field,
281 [MODE_PRIO_REG] = intc_mode_field,
282 [MODE_PCLR_REG] = intc_mode_field,
283};
284
285static void intc_enable_disable(struct intc_desc_int *d,
286 unsigned long handle, int do_enable)
287{
288 unsigned long addr;
289 unsigned int cpu;
290 void (*fn)(unsigned long, unsigned long,
291 void (*)(unsigned long, unsigned long, unsigned long),
292 unsigned int);
293
294 if (do_enable) {
295 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
296 addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
297 fn = intc_enable_noprio_fns[_INTC_MODE(handle)];
298 fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
299 }
300 } else {
301 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
302 addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
303 fn = intc_disable_fns[_INTC_MODE(handle)];
304 fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
305 }
306 }
307}
308
248static int intc_set_wake(unsigned int irq, unsigned int on) 309static int intc_set_wake(unsigned int irq, unsigned int on)
249{ 310{
250 return 0; /* allow wakeup, but setup hardware in intc_suspend() */ 311 return 0; /* allow wakeup, but setup hardware in intc_suspend() */
251} 312}
252 313
253#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) 314#ifdef CONFIG_SMP
315/*
316 * This is held with the irq desc lock held, so we don't require any
317 * additional locking here at the intc desc level. The affinity mask is
318 * later tested in the enable/disable paths.
319 */
320static int intc_set_affinity(unsigned int irq, const struct cpumask *cpumask)
321{
322 if (!cpumask_intersects(cpumask, cpu_online_mask))
323 return -1;
324
325 cpumask_copy(irq_to_desc(irq)->affinity, cpumask);
326
327 return 0;
328}
329#endif
330
254static void intc_mask_ack(unsigned int irq) 331static void intc_mask_ack(unsigned int irq)
255{ 332{
256 struct intc_desc_int *d = get_intc_desc(irq); 333 struct intc_desc_int *d = get_intc_desc(irq);
@@ -282,7 +359,6 @@ static void intc_mask_ack(unsigned int irq)
282 } 359 }
283 } 360 }
284} 361}
285#endif
286 362
287static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp, 363static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
288 unsigned int nr_hp, 364 unsigned int nr_hp,
@@ -388,11 +464,11 @@ static unsigned int __init intc_get_reg(struct intc_desc_int *d,
388static intc_enum __init intc_grp_id(struct intc_desc *desc, 464static intc_enum __init intc_grp_id(struct intc_desc *desc,
389 intc_enum enum_id) 465 intc_enum enum_id)
390{ 466{
391 struct intc_group *g = desc->groups; 467 struct intc_group *g = desc->hw.groups;
392 unsigned int i, j; 468 unsigned int i, j;
393 469
394 for (i = 0; g && enum_id && i < desc->nr_groups; i++) { 470 for (i = 0; g && enum_id && i < desc->hw.nr_groups; i++) {
395 g = desc->groups + i; 471 g = desc->hw.groups + i;
396 472
397 for (j = 0; g->enum_ids[j]; j++) { 473 for (j = 0; g->enum_ids[j]; j++) {
398 if (g->enum_ids[j] != enum_id) 474 if (g->enum_ids[j] != enum_id)
@@ -405,19 +481,21 @@ static intc_enum __init intc_grp_id(struct intc_desc *desc,
405 return 0; 481 return 0;
406} 482}
407 483
408static unsigned int __init intc_mask_data(struct intc_desc *desc, 484static unsigned int __init _intc_mask_data(struct intc_desc *desc,
409 struct intc_desc_int *d, 485 struct intc_desc_int *d,
410 intc_enum enum_id, int do_grps) 486 intc_enum enum_id,
487 unsigned int *reg_idx,
488 unsigned int *fld_idx)
411{ 489{
412 struct intc_mask_reg *mr = desc->mask_regs; 490 struct intc_mask_reg *mr = desc->hw.mask_regs;
413 unsigned int i, j, fn, mode; 491 unsigned int fn, mode;
414 unsigned long reg_e, reg_d; 492 unsigned long reg_e, reg_d;
415 493
416 for (i = 0; mr && enum_id && i < desc->nr_mask_regs; i++) { 494 while (mr && enum_id && *reg_idx < desc->hw.nr_mask_regs) {
417 mr = desc->mask_regs + i; 495 mr = desc->hw.mask_regs + *reg_idx;
418 496
419 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) { 497 for (; *fld_idx < ARRAY_SIZE(mr->enum_ids); (*fld_idx)++) {
420 if (mr->enum_ids[j] != enum_id) 498 if (mr->enum_ids[*fld_idx] != enum_id)
421 continue; 499 continue;
422 500
423 if (mr->set_reg && mr->clr_reg) { 501 if (mr->set_reg && mr->clr_reg) {
@@ -443,29 +521,49 @@ static unsigned int __init intc_mask_data(struct intc_desc *desc,
443 intc_get_reg(d, reg_e), 521 intc_get_reg(d, reg_e),
444 intc_get_reg(d, reg_d), 522 intc_get_reg(d, reg_d),
445 1, 523 1,
446 (mr->reg_width - 1) - j); 524 (mr->reg_width - 1) - *fld_idx);
447 } 525 }
526
527 *fld_idx = 0;
528 (*reg_idx)++;
448 } 529 }
449 530
531 return 0;
532}
533
534static unsigned int __init intc_mask_data(struct intc_desc *desc,
535 struct intc_desc_int *d,
536 intc_enum enum_id, int do_grps)
537{
538 unsigned int i = 0;
539 unsigned int j = 0;
540 unsigned int ret;
541
542 ret = _intc_mask_data(desc, d, enum_id, &i, &j);
543 if (ret)
544 return ret;
545
450 if (do_grps) 546 if (do_grps)
451 return intc_mask_data(desc, d, intc_grp_id(desc, enum_id), 0); 547 return intc_mask_data(desc, d, intc_grp_id(desc, enum_id), 0);
452 548
453 return 0; 549 return 0;
454} 550}
455 551
456static unsigned int __init intc_prio_data(struct intc_desc *desc, 552static unsigned int __init _intc_prio_data(struct intc_desc *desc,
457 struct intc_desc_int *d, 553 struct intc_desc_int *d,
458 intc_enum enum_id, int do_grps) 554 intc_enum enum_id,
555 unsigned int *reg_idx,
556 unsigned int *fld_idx)
459{ 557{
460 struct intc_prio_reg *pr = desc->prio_regs; 558 struct intc_prio_reg *pr = desc->hw.prio_regs;
461 unsigned int i, j, fn, mode, bit; 559 unsigned int fn, n, mode, bit;
462 unsigned long reg_e, reg_d; 560 unsigned long reg_e, reg_d;
463 561
464 for (i = 0; pr && enum_id && i < desc->nr_prio_regs; i++) { 562 while (pr && enum_id && *reg_idx < desc->hw.nr_prio_regs) {
465 pr = desc->prio_regs + i; 563 pr = desc->hw.prio_regs + *reg_idx;
466 564
467 for (j = 0; j < ARRAY_SIZE(pr->enum_ids); j++) { 565 for (; *fld_idx < ARRAY_SIZE(pr->enum_ids); (*fld_idx)++) {
468 if (pr->enum_ids[j] != enum_id) 566 if (pr->enum_ids[*fld_idx] != enum_id)
469 continue; 567 continue;
470 568
471 if (pr->set_reg && pr->clr_reg) { 569 if (pr->set_reg && pr->clr_reg) {
@@ -483,35 +581,79 @@ static unsigned int __init intc_prio_data(struct intc_desc *desc,
483 } 581 }
484 582
485 fn += (pr->reg_width >> 3) - 1; 583 fn += (pr->reg_width >> 3) - 1;
584 n = *fld_idx + 1;
486 585
487 BUG_ON((j + 1) * pr->field_width > pr->reg_width); 586 BUG_ON(n * pr->field_width > pr->reg_width);
488 587
489 bit = pr->reg_width - ((j + 1) * pr->field_width); 588 bit = pr->reg_width - (n * pr->field_width);
490 589
491 return _INTC_MK(fn, mode, 590 return _INTC_MK(fn, mode,
492 intc_get_reg(d, reg_e), 591 intc_get_reg(d, reg_e),
493 intc_get_reg(d, reg_d), 592 intc_get_reg(d, reg_d),
494 pr->field_width, bit); 593 pr->field_width, bit);
495 } 594 }
595
596 *fld_idx = 0;
597 (*reg_idx)++;
496 } 598 }
497 599
600 return 0;
601}
602
603static unsigned int __init intc_prio_data(struct intc_desc *desc,
604 struct intc_desc_int *d,
605 intc_enum enum_id, int do_grps)
606{
607 unsigned int i = 0;
608 unsigned int j = 0;
609 unsigned int ret;
610
611 ret = _intc_prio_data(desc, d, enum_id, &i, &j);
612 if (ret)
613 return ret;
614
498 if (do_grps) 615 if (do_grps)
499 return intc_prio_data(desc, d, intc_grp_id(desc, enum_id), 0); 616 return intc_prio_data(desc, d, intc_grp_id(desc, enum_id), 0);
500 617
501 return 0; 618 return 0;
502} 619}
503 620
504#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) 621static void __init intc_enable_disable_enum(struct intc_desc *desc,
622 struct intc_desc_int *d,
623 intc_enum enum_id, int enable)
624{
625 unsigned int i, j, data;
626
627 /* go through and enable/disable all mask bits */
628 i = j = 0;
629 do {
630 data = _intc_mask_data(desc, d, enum_id, &i, &j);
631 if (data)
632 intc_enable_disable(d, data, enable);
633 j++;
634 } while (data);
635
636 /* go through and enable/disable all priority fields */
637 i = j = 0;
638 do {
639 data = _intc_prio_data(desc, d, enum_id, &i, &j);
640 if (data)
641 intc_enable_disable(d, data, enable);
642
643 j++;
644 } while (data);
645}
646
505static unsigned int __init intc_ack_data(struct intc_desc *desc, 647static unsigned int __init intc_ack_data(struct intc_desc *desc,
506 struct intc_desc_int *d, 648 struct intc_desc_int *d,
507 intc_enum enum_id) 649 intc_enum enum_id)
508{ 650{
509 struct intc_mask_reg *mr = desc->ack_regs; 651 struct intc_mask_reg *mr = desc->hw.ack_regs;
510 unsigned int i, j, fn, mode; 652 unsigned int i, j, fn, mode;
511 unsigned long reg_e, reg_d; 653 unsigned long reg_e, reg_d;
512 654
513 for (i = 0; mr && enum_id && i < desc->nr_ack_regs; i++) { 655 for (i = 0; mr && enum_id && i < desc->hw.nr_ack_regs; i++) {
514 mr = desc->ack_regs + i; 656 mr = desc->hw.ack_regs + i;
515 657
516 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) { 658 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
517 if (mr->enum_ids[j] != enum_id) 659 if (mr->enum_ids[j] != enum_id)
@@ -533,17 +675,16 @@ static unsigned int __init intc_ack_data(struct intc_desc *desc,
533 675
534 return 0; 676 return 0;
535} 677}
536#endif
537 678
538static unsigned int __init intc_sense_data(struct intc_desc *desc, 679static unsigned int __init intc_sense_data(struct intc_desc *desc,
539 struct intc_desc_int *d, 680 struct intc_desc_int *d,
540 intc_enum enum_id) 681 intc_enum enum_id)
541{ 682{
542 struct intc_sense_reg *sr = desc->sense_regs; 683 struct intc_sense_reg *sr = desc->hw.sense_regs;
543 unsigned int i, j, fn, bit; 684 unsigned int i, j, fn, bit;
544 685
545 for (i = 0; sr && enum_id && i < desc->nr_sense_regs; i++) { 686 for (i = 0; sr && enum_id && i < desc->hw.nr_sense_regs; i++) {
546 sr = desc->sense_regs + i; 687 sr = desc->hw.sense_regs + i;
547 688
548 for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) { 689 for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) {
549 if (sr->enum_ids[j] != enum_id) 690 if (sr->enum_ids[j] != enum_id)
@@ -572,6 +713,11 @@ static void __init intc_register_irq(struct intc_desc *desc,
572 struct intc_handle_int *hp; 713 struct intc_handle_int *hp;
573 unsigned int data[2], primary; 714 unsigned int data[2], primary;
574 715
716 /*
717 * Register the IRQ position with the global IRQ map
718 */
719 set_bit(irq, intc_irq_map);
720
575 /* Prefer single interrupt source bitmap over other combinations: 721 /* Prefer single interrupt source bitmap over other combinations:
576 * 1. bitmap, single interrupt source 722 * 1. bitmap, single interrupt source
577 * 2. priority, single interrupt source 723 * 2. priority, single interrupt source
@@ -641,9 +787,11 @@ static void __init intc_register_irq(struct intc_desc *desc,
641 /* irq should be disabled by default */ 787 /* irq should be disabled by default */
642 d->chip.mask(irq); 788 d->chip.mask(irq);
643 789
644#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) 790 if (desc->hw.ack_regs)
645 if (desc->ack_regs)
646 ack_handle[irq] = intc_ack_data(desc, d, enum_id); 791 ack_handle[irq] = intc_ack_data(desc, d, enum_id);
792
793#ifdef CONFIG_ARM
794 set_irq_flags(irq, IRQF_VALID); /* Enable IRQ on ARM systems */
647#endif 795#endif
648} 796}
649 797
@@ -671,6 +819,7 @@ static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc)
671void __init register_intc_controller(struct intc_desc *desc) 819void __init register_intc_controller(struct intc_desc *desc)
672{ 820{
673 unsigned int i, k, smp; 821 unsigned int i, k, smp;
822 struct intc_hw_desc *hw = &desc->hw;
674 struct intc_desc_int *d; 823 struct intc_desc_int *d;
675 824
676 d = kzalloc(sizeof(*d), GFP_NOWAIT); 825 d = kzalloc(sizeof(*d), GFP_NOWAIT);
@@ -678,43 +827,42 @@ void __init register_intc_controller(struct intc_desc *desc)
678 INIT_LIST_HEAD(&d->list); 827 INIT_LIST_HEAD(&d->list);
679 list_add(&d->list, &intc_list); 828 list_add(&d->list, &intc_list);
680 829
681 d->nr_reg = desc->mask_regs ? desc->nr_mask_regs * 2 : 0; 830 d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0;
682 d->nr_reg += desc->prio_regs ? desc->nr_prio_regs * 2 : 0; 831 d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0;
683 d->nr_reg += desc->sense_regs ? desc->nr_sense_regs : 0; 832 d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0;
833 d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0;
684 834
685#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
686 d->nr_reg += desc->ack_regs ? desc->nr_ack_regs : 0;
687#endif
688 d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT); 835 d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
689#ifdef CONFIG_SMP 836#ifdef CONFIG_SMP
690 d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT); 837 d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT);
691#endif 838#endif
692 k = 0; 839 k = 0;
693 840
694 if (desc->mask_regs) { 841 if (hw->mask_regs) {
695 for (i = 0; i < desc->nr_mask_regs; i++) { 842 for (i = 0; i < hw->nr_mask_regs; i++) {
696 smp = IS_SMP(desc->mask_regs[i]); 843 smp = IS_SMP(hw->mask_regs[i]);
697 k += save_reg(d, k, desc->mask_regs[i].set_reg, smp); 844 k += save_reg(d, k, hw->mask_regs[i].set_reg, smp);
698 k += save_reg(d, k, desc->mask_regs[i].clr_reg, smp); 845 k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp);
699 } 846 }
700 } 847 }
701 848
702 if (desc->prio_regs) { 849 if (hw->prio_regs) {
703 d->prio = kzalloc(desc->nr_vectors * sizeof(*d->prio), GFP_NOWAIT); 850 d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio),
851 GFP_NOWAIT);
704 852
705 for (i = 0; i < desc->nr_prio_regs; i++) { 853 for (i = 0; i < hw->nr_prio_regs; i++) {
706 smp = IS_SMP(desc->prio_regs[i]); 854 smp = IS_SMP(hw->prio_regs[i]);
707 k += save_reg(d, k, desc->prio_regs[i].set_reg, smp); 855 k += save_reg(d, k, hw->prio_regs[i].set_reg, smp);
708 k += save_reg(d, k, desc->prio_regs[i].clr_reg, smp); 856 k += save_reg(d, k, hw->prio_regs[i].clr_reg, smp);
709 } 857 }
710 } 858 }
711 859
712 if (desc->sense_regs) { 860 if (hw->sense_regs) {
713 d->sense = kzalloc(desc->nr_vectors * sizeof(*d->sense), GFP_NOWAIT); 861 d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense),
862 GFP_NOWAIT);
714 863
715 for (i = 0; i < desc->nr_sense_regs; i++) { 864 for (i = 0; i < hw->nr_sense_regs; i++)
716 k += save_reg(d, k, desc->sense_regs[i].reg, 0); 865 k += save_reg(d, k, hw->sense_regs[i].reg, 0);
717 }
718 } 866 }
719 867
720 d->chip.name = desc->name; 868 d->chip.name = desc->name;
@@ -726,21 +874,30 @@ void __init register_intc_controller(struct intc_desc *desc)
726 d->chip.shutdown = intc_disable; 874 d->chip.shutdown = intc_disable;
727 d->chip.set_type = intc_set_sense; 875 d->chip.set_type = intc_set_sense;
728 d->chip.set_wake = intc_set_wake; 876 d->chip.set_wake = intc_set_wake;
877#ifdef CONFIG_SMP
878 d->chip.set_affinity = intc_set_affinity;
879#endif
729 880
730#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) 881 if (hw->ack_regs) {
731 if (desc->ack_regs) { 882 for (i = 0; i < hw->nr_ack_regs; i++)
732 for (i = 0; i < desc->nr_ack_regs; i++) 883 k += save_reg(d, k, hw->ack_regs[i].set_reg, 0);
733 k += save_reg(d, k, desc->ack_regs[i].set_reg, 0);
734 884
735 d->chip.mask_ack = intc_mask_ack; 885 d->chip.mask_ack = intc_mask_ack;
736 } 886 }
737#endif 887
888 /* disable bits matching force_disable before registering irqs */
889 if (desc->force_disable)
890 intc_enable_disable_enum(desc, d, desc->force_disable, 0);
891
892 /* disable bits matching force_enable before registering irqs */
893 if (desc->force_enable)
894 intc_enable_disable_enum(desc, d, desc->force_enable, 0);
738 895
739 BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */ 896 BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */
740 897
741 /* register the vectors one by one */ 898 /* register the vectors one by one */
742 for (i = 0; i < desc->nr_vectors; i++) { 899 for (i = 0; i < hw->nr_vectors; i++) {
743 struct intc_vect *vect = desc->vectors + i; 900 struct intc_vect *vect = hw->vectors + i;
744 unsigned int irq = evt2irq(vect->vect); 901 unsigned int irq = evt2irq(vect->vect);
745 struct irq_desc *irq_desc; 902 struct irq_desc *irq_desc;
746 903
@@ -755,8 +912,8 @@ void __init register_intc_controller(struct intc_desc *desc)
755 912
756 intc_register_irq(desc, d, vect->enum_id, irq); 913 intc_register_irq(desc, d, vect->enum_id, irq);
757 914
758 for (k = i + 1; k < desc->nr_vectors; k++) { 915 for (k = i + 1; k < hw->nr_vectors; k++) {
759 struct intc_vect *vect2 = desc->vectors + k; 916 struct intc_vect *vect2 = hw->vectors + k;
760 unsigned int irq2 = evt2irq(vect2->vect); 917 unsigned int irq2 = evt2irq(vect2->vect);
761 918
762 if (vect->enum_id != vect2->enum_id) 919 if (vect->enum_id != vect2->enum_id)
@@ -776,11 +933,15 @@ void __init register_intc_controller(struct intc_desc *desc)
776 vect2->enum_id = 0; 933 vect2->enum_id = 0;
777 934
778 /* redirect this interrupts to the first one */ 935 /* redirect this interrupts to the first one */
779 set_irq_chip_and_handler_name(irq2, &d->chip, 936 set_irq_chip(irq2, &dummy_irq_chip);
780 intc_redirect_irq, "redirect"); 937 set_irq_chained_handler(irq2, intc_redirect_irq);
781 set_irq_data(irq2, (void *)irq); 938 set_irq_data(irq2, (void *)irq);
782 } 939 }
783 } 940 }
941
942 /* enable bits matching force_enable after registering irqs */
943 if (desc->force_enable)
944 intc_enable_disable_enum(desc, d, desc->force_enable, 1);
784} 945}
785 946
786static int intc_suspend(struct sys_device *dev, pm_message_t state) 947static int intc_suspend(struct sys_device *dev, pm_message_t state)
@@ -797,6 +958,8 @@ static int intc_suspend(struct sys_device *dev, pm_message_t state)
797 if (d->state.event != PM_EVENT_FREEZE) 958 if (d->state.event != PM_EVENT_FREEZE)
798 break; 959 break;
799 for_each_irq_desc(irq, desc) { 960 for_each_irq_desc(irq, desc) {
961 if (desc->handle_irq == intc_redirect_irq)
962 continue;
800 if (desc->chip != &d->chip) 963 if (desc->chip != &d->chip)
801 continue; 964 continue;
802 if (desc->status & IRQ_DISABLED) 965 if (desc->status & IRQ_DISABLED)
@@ -856,5 +1019,99 @@ static int __init register_intc_sysdevs(void)
856 1019
857 return error; 1020 return error;
858} 1021}
859
860device_initcall(register_intc_sysdevs); 1022device_initcall(register_intc_sysdevs);
1023
1024/*
1025 * Dynamic IRQ allocation and deallocation
1026 */
1027unsigned int create_irq_nr(unsigned int irq_want, int node)
1028{
1029 unsigned int irq = 0, new;
1030 unsigned long flags;
1031 struct irq_desc *desc;
1032
1033 spin_lock_irqsave(&vector_lock, flags);
1034
1035 /*
1036 * First try the wanted IRQ
1037 */
1038 if (test_and_set_bit(irq_want, intc_irq_map) == 0) {
1039 new = irq_want;
1040 } else {
1041 /* .. then fall back to scanning. */
1042 new = find_first_zero_bit(intc_irq_map, nr_irqs);
1043 if (unlikely(new == nr_irqs))
1044 goto out_unlock;
1045
1046 __set_bit(new, intc_irq_map);
1047 }
1048
1049 desc = irq_to_desc_alloc_node(new, node);
1050 if (unlikely(!desc)) {
1051 pr_info("can't get irq_desc for %d\n", new);
1052 goto out_unlock;
1053 }
1054
1055 desc = move_irq_desc(desc, node);
1056 irq = new;
1057
1058out_unlock:
1059 spin_unlock_irqrestore(&vector_lock, flags);
1060
1061 if (irq > 0) {
1062 dynamic_irq_init(irq);
1063#ifdef CONFIG_ARM
1064 set_irq_flags(irq, IRQF_VALID); /* Enable IRQ on ARM systems */
1065#endif
1066 }
1067
1068 return irq;
1069}
1070
1071int create_irq(void)
1072{
1073 int nid = cpu_to_node(smp_processor_id());
1074 int irq;
1075
1076 irq = create_irq_nr(NR_IRQS_LEGACY, nid);
1077 if (irq == 0)
1078 irq = -1;
1079
1080 return irq;
1081}
1082
1083void destroy_irq(unsigned int irq)
1084{
1085 unsigned long flags;
1086
1087 dynamic_irq_cleanup(irq);
1088
1089 spin_lock_irqsave(&vector_lock, flags);
1090 __clear_bit(irq, intc_irq_map);
1091 spin_unlock_irqrestore(&vector_lock, flags);
1092}
1093
1094int reserve_irq_vector(unsigned int irq)
1095{
1096 unsigned long flags;
1097 int ret = 0;
1098
1099 spin_lock_irqsave(&vector_lock, flags);
1100 if (test_and_set_bit(irq, intc_irq_map))
1101 ret = -EBUSY;
1102 spin_unlock_irqrestore(&vector_lock, flags);
1103
1104 return ret;
1105}
1106
1107void reserve_irq_legacy(void)
1108{
1109 unsigned long flags;
1110 int i, j;
1111
1112 spin_lock_irqsave(&vector_lock, flags);
1113 j = find_first_bit(intc_irq_map, nr_irqs);
1114 for (i = 0; i < j; i++)
1115 __set_bit(i, intc_irq_map);
1116 spin_unlock_irqrestore(&vector_lock, flags);
1117}