aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/common
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2012-01-05 08:24:33 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2012-01-05 08:24:33 -0500
commit2e0e943436912ffe0848ece58167edfe754edb96 (patch)
treeb91919095c74742fa06e2105db6d859bee39b2b4 /arch/arm/common
parenta32737e1ca650504f172292dd344eb64c02311f3 (diff)
parentef3a0bf5bfadbace156fa2a3b9c753df2de41df2 (diff)
Merge branch 'devel-stable' into for-linus
Conflicts: arch/arm/kernel/setup.c arch/arm/mach-shmobile/board-kota2.c
Diffstat (limited to 'arch/arm/common')
-rw-r--r--arch/arm/common/Kconfig6
-rw-r--r--arch/arm/common/gic.c165
-rw-r--r--arch/arm/common/vic.c148
3 files changed, 265 insertions, 54 deletions
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index 74df9ca2be31..81a933eb0903 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -1,8 +1,14 @@
1config ARM_GIC 1config ARM_GIC
2 select IRQ_DOMAIN 2 select IRQ_DOMAIN
3 select MULTI_IRQ_HANDLER
4 bool
5
6config GIC_NON_BANKED
3 bool 7 bool
4 8
5config ARM_VIC 9config ARM_VIC
10 select IRQ_DOMAIN
11 select MULTI_IRQ_HANDLER
6 bool 12 bool
7 13
8config ARM_VIC_NR 14config ARM_VIC_NR
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 410a546060a2..b2dc2dd7f1df 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -40,13 +40,36 @@
40#include <linux/slab.h> 40#include <linux/slab.h>
41 41
42#include <asm/irq.h> 42#include <asm/irq.h>
43#include <asm/exception.h>
43#include <asm/mach/irq.h> 44#include <asm/mach/irq.h>
44#include <asm/hardware/gic.h> 45#include <asm/hardware/gic.h>
45 46
46static DEFINE_RAW_SPINLOCK(irq_controller_lock); 47union gic_base {
48 void __iomem *common_base;
49 void __percpu __iomem **percpu_base;
50};
47 51
48/* Address of GIC 0 CPU interface */ 52struct gic_chip_data {
49void __iomem *gic_cpu_base_addr __read_mostly; 53 unsigned int irq_offset;
54 union gic_base dist_base;
55 union gic_base cpu_base;
56#ifdef CONFIG_CPU_PM
57 u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
58 u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
59 u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
60 u32 __percpu *saved_ppi_enable;
61 u32 __percpu *saved_ppi_conf;
62#endif
63#ifdef CONFIG_IRQ_DOMAIN
64 struct irq_domain domain;
65#endif
66 unsigned int gic_irqs;
67#ifdef CONFIG_GIC_NON_BANKED
68 void __iomem *(*get_base)(union gic_base *);
69#endif
70};
71
72static DEFINE_RAW_SPINLOCK(irq_controller_lock);
50 73
51/* 74/*
52 * Supported arch specific GIC irq extension. 75 * Supported arch specific GIC irq extension.
@@ -67,16 +90,48 @@ struct irq_chip gic_arch_extn = {
67 90
68static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly; 91static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
69 92
93#ifdef CONFIG_GIC_NON_BANKED
94static void __iomem *gic_get_percpu_base(union gic_base *base)
95{
96 return *__this_cpu_ptr(base->percpu_base);
97}
98
99static void __iomem *gic_get_common_base(union gic_base *base)
100{
101 return base->common_base;
102}
103
104static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data)
105{
106 return data->get_base(&data->dist_base);
107}
108
109static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data)
110{
111 return data->get_base(&data->cpu_base);
112}
113
114static inline void gic_set_base_accessor(struct gic_chip_data *data,
115 void __iomem *(*f)(union gic_base *))
116{
117 data->get_base = f;
118}
119#else
120#define gic_data_dist_base(d) ((d)->dist_base.common_base)
121#define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
122#define gic_set_base_accessor(d,f)
123#endif
124
70static inline void __iomem *gic_dist_base(struct irq_data *d) 125static inline void __iomem *gic_dist_base(struct irq_data *d)
71{ 126{
72 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); 127 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
73 return gic_data->dist_base; 128 return gic_data_dist_base(gic_data);
74} 129}
75 130
76static inline void __iomem *gic_cpu_base(struct irq_data *d) 131static inline void __iomem *gic_cpu_base(struct irq_data *d)
77{ 132{
78 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); 133 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
79 return gic_data->cpu_base; 134 return gic_data_cpu_base(gic_data);
80} 135}
81 136
82static inline unsigned int gic_irq(struct irq_data *d) 137static inline unsigned int gic_irq(struct irq_data *d)
@@ -215,6 +270,32 @@ static int gic_set_wake(struct irq_data *d, unsigned int on)
215#define gic_set_wake NULL 270#define gic_set_wake NULL
216#endif 271#endif
217 272
273asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
274{
275 u32 irqstat, irqnr;
276 struct gic_chip_data *gic = &gic_data[0];
277 void __iomem *cpu_base = gic_data_cpu_base(gic);
278
279 do {
280 irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
281 irqnr = irqstat & ~0x1c00;
282
283 if (likely(irqnr > 15 && irqnr < 1021)) {
284 irqnr = irq_domain_to_irq(&gic->domain, irqnr);
285 handle_IRQ(irqnr, regs);
286 continue;
287 }
288 if (irqnr < 16) {
289 writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
290#ifdef CONFIG_SMP
291 handle_IPI(irqnr, regs);
292#endif
293 continue;
294 }
295 break;
296 } while (1);
297}
298
218static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) 299static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
219{ 300{
220 struct gic_chip_data *chip_data = irq_get_handler_data(irq); 301 struct gic_chip_data *chip_data = irq_get_handler_data(irq);
@@ -225,7 +306,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
225 chained_irq_enter(chip, desc); 306 chained_irq_enter(chip, desc);
226 307
227 raw_spin_lock(&irq_controller_lock); 308 raw_spin_lock(&irq_controller_lock);
228 status = readl_relaxed(chip_data->cpu_base + GIC_CPU_INTACK); 309 status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
229 raw_spin_unlock(&irq_controller_lock); 310 raw_spin_unlock(&irq_controller_lock);
230 311
231 gic_irq = (status & 0x3ff); 312 gic_irq = (status & 0x3ff);
@@ -270,7 +351,7 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
270 u32 cpumask; 351 u32 cpumask;
271 unsigned int gic_irqs = gic->gic_irqs; 352 unsigned int gic_irqs = gic->gic_irqs;
272 struct irq_domain *domain = &gic->domain; 353 struct irq_domain *domain = &gic->domain;
273 void __iomem *base = gic->dist_base; 354 void __iomem *base = gic_data_dist_base(gic);
274 u32 cpu = 0; 355 u32 cpu = 0;
275 356
276#ifdef CONFIG_SMP 357#ifdef CONFIG_SMP
@@ -330,8 +411,8 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
330 411
331static void __cpuinit gic_cpu_init(struct gic_chip_data *gic) 412static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
332{ 413{
333 void __iomem *dist_base = gic->dist_base; 414 void __iomem *dist_base = gic_data_dist_base(gic);
334 void __iomem *base = gic->cpu_base; 415 void __iomem *base = gic_data_cpu_base(gic);
335 int i; 416 int i;
336 417
337 /* 418 /*
@@ -368,7 +449,7 @@ static void gic_dist_save(unsigned int gic_nr)
368 BUG(); 449 BUG();
369 450
370 gic_irqs = gic_data[gic_nr].gic_irqs; 451 gic_irqs = gic_data[gic_nr].gic_irqs;
371 dist_base = gic_data[gic_nr].dist_base; 452 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
372 453
373 if (!dist_base) 454 if (!dist_base)
374 return; 455 return;
@@ -403,7 +484,7 @@ static void gic_dist_restore(unsigned int gic_nr)
403 BUG(); 484 BUG();
404 485
405 gic_irqs = gic_data[gic_nr].gic_irqs; 486 gic_irqs = gic_data[gic_nr].gic_irqs;
406 dist_base = gic_data[gic_nr].dist_base; 487 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
407 488
408 if (!dist_base) 489 if (!dist_base)
409 return; 490 return;
@@ -439,8 +520,8 @@ static void gic_cpu_save(unsigned int gic_nr)
439 if (gic_nr >= MAX_GIC_NR) 520 if (gic_nr >= MAX_GIC_NR)
440 BUG(); 521 BUG();
441 522
442 dist_base = gic_data[gic_nr].dist_base; 523 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
443 cpu_base = gic_data[gic_nr].cpu_base; 524 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
444 525
445 if (!dist_base || !cpu_base) 526 if (!dist_base || !cpu_base)
446 return; 527 return;
@@ -465,8 +546,8 @@ static void gic_cpu_restore(unsigned int gic_nr)
465 if (gic_nr >= MAX_GIC_NR) 546 if (gic_nr >= MAX_GIC_NR)
466 BUG(); 547 BUG();
467 548
468 dist_base = gic_data[gic_nr].dist_base; 549 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
469 cpu_base = gic_data[gic_nr].cpu_base; 550 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
470 551
471 if (!dist_base || !cpu_base) 552 if (!dist_base || !cpu_base)
472 return; 553 return;
@@ -491,6 +572,11 @@ static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
491 int i; 572 int i;
492 573
493 for (i = 0; i < MAX_GIC_NR; i++) { 574 for (i = 0; i < MAX_GIC_NR; i++) {
575#ifdef CONFIG_GIC_NON_BANKED
576 /* Skip over unused GICs */
577 if (!gic_data[i].get_base)
578 continue;
579#endif
494 switch (cmd) { 580 switch (cmd) {
495 case CPU_PM_ENTER: 581 case CPU_PM_ENTER:
496 gic_cpu_save(i); 582 gic_cpu_save(i);
@@ -564,8 +650,9 @@ const struct irq_domain_ops gic_irq_domain_ops = {
564#endif 650#endif
565}; 651};
566 652
567void __init gic_init(unsigned int gic_nr, int irq_start, 653void __init gic_init_bases(unsigned int gic_nr, int irq_start,
568 void __iomem *dist_base, void __iomem *cpu_base) 654 void __iomem *dist_base, void __iomem *cpu_base,
655 u32 percpu_offset)
569{ 656{
570 struct gic_chip_data *gic; 657 struct gic_chip_data *gic;
571 struct irq_domain *domain; 658 struct irq_domain *domain;
@@ -575,8 +662,36 @@ void __init gic_init(unsigned int gic_nr, int irq_start,
575 662
576 gic = &gic_data[gic_nr]; 663 gic = &gic_data[gic_nr];
577 domain = &gic->domain; 664 domain = &gic->domain;
578 gic->dist_base = dist_base; 665#ifdef CONFIG_GIC_NON_BANKED
579 gic->cpu_base = cpu_base; 666 if (percpu_offset) { /* Frankein-GIC without banked registers... */
667 unsigned int cpu;
668
669 gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
670 gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
671 if (WARN_ON(!gic->dist_base.percpu_base ||
672 !gic->cpu_base.percpu_base)) {
673 free_percpu(gic->dist_base.percpu_base);
674 free_percpu(gic->cpu_base.percpu_base);
675 return;
676 }
677
678 for_each_possible_cpu(cpu) {
679 unsigned long offset = percpu_offset * cpu_logical_map(cpu);
680 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
681 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
682 }
683
684 gic_set_base_accessor(gic, gic_get_percpu_base);
685 } else
686#endif
687 { /* Normal, sane GIC... */
688 WARN(percpu_offset,
689 "GIC_NON_BANKED not enabled, ignoring %08x offset!",
690 percpu_offset);
691 gic->dist_base.common_base = dist_base;
692 gic->cpu_base.common_base = cpu_base;
693 gic_set_base_accessor(gic, gic_get_common_base);
694 }
580 695
581 /* 696 /*
582 * For primary GICs, skip over SGIs. 697 * For primary GICs, skip over SGIs.
@@ -584,8 +699,6 @@ void __init gic_init(unsigned int gic_nr, int irq_start,
584 */ 699 */
585 domain->hwirq_base = 32; 700 domain->hwirq_base = 32;
586 if (gic_nr == 0) { 701 if (gic_nr == 0) {
587 gic_cpu_base_addr = cpu_base;
588
589 if ((irq_start & 31) > 0) { 702 if ((irq_start & 31) > 0) {
590 domain->hwirq_base = 16; 703 domain->hwirq_base = 16;
591 if (irq_start != -1) 704 if (irq_start != -1)
@@ -597,7 +710,7 @@ void __init gic_init(unsigned int gic_nr, int irq_start,
597 * Find out how many interrupts are supported. 710 * Find out how many interrupts are supported.
598 * The GIC only supports up to 1020 interrupt sources. 711 * The GIC only supports up to 1020 interrupt sources.
599 */ 712 */
600 gic_irqs = readl_relaxed(dist_base + GIC_DIST_CTR) & 0x1f; 713 gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f;
601 gic_irqs = (gic_irqs + 1) * 32; 714 gic_irqs = (gic_irqs + 1) * 32;
602 if (gic_irqs > 1020) 715 if (gic_irqs > 1020)
603 gic_irqs = 1020; 716 gic_irqs = 1020;
@@ -645,7 +758,7 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
645 dsb(); 758 dsb();
646 759
647 /* this always happens on GIC0 */ 760 /* this always happens on GIC0 */
648 writel_relaxed(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT); 761 writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
649} 762}
650#endif 763#endif
651 764
@@ -656,6 +769,7 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent)
656{ 769{
657 void __iomem *cpu_base; 770 void __iomem *cpu_base;
658 void __iomem *dist_base; 771 void __iomem *dist_base;
772 u32 percpu_offset;
659 int irq; 773 int irq;
660 struct irq_domain *domain = &gic_data[gic_cnt].domain; 774 struct irq_domain *domain = &gic_data[gic_cnt].domain;
661 775
@@ -668,9 +782,12 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent)
668 cpu_base = of_iomap(node, 1); 782 cpu_base = of_iomap(node, 1);
669 WARN(!cpu_base, "unable to map gic cpu registers\n"); 783 WARN(!cpu_base, "unable to map gic cpu registers\n");
670 784
785 if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
786 percpu_offset = 0;
787
671 domain->of_node = of_node_get(node); 788 domain->of_node = of_node_get(node);
672 789
673 gic_init(gic_cnt, -1, dist_base, cpu_base); 790 gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset);
674 791
675 if (parent) { 792 if (parent) {
676 irq = irq_of_parse_and_map(node, 0); 793 irq = irq_of_parse_and_map(node, 0);
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c
index 01f18a421b17..dcb004a804c7 100644
--- a/arch/arm/common/vic.c
+++ b/arch/arm/common/vic.c
@@ -19,17 +19,22 @@
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 20 */
21 21
22#include <linux/export.h>
22#include <linux/init.h> 23#include <linux/init.h>
23#include <linux/list.h> 24#include <linux/list.h>
24#include <linux/io.h> 25#include <linux/io.h>
26#include <linux/irqdomain.h>
27#include <linux/of.h>
28#include <linux/of_address.h>
29#include <linux/of_irq.h>
25#include <linux/syscore_ops.h> 30#include <linux/syscore_ops.h>
26#include <linux/device.h> 31#include <linux/device.h>
27#include <linux/amba/bus.h> 32#include <linux/amba/bus.h>
28 33
34#include <asm/exception.h>
29#include <asm/mach/irq.h> 35#include <asm/mach/irq.h>
30#include <asm/hardware/vic.h> 36#include <asm/hardware/vic.h>
31 37
32#ifdef CONFIG_PM
33/** 38/**
34 * struct vic_device - VIC PM device 39 * struct vic_device - VIC PM device
35 * @irq: The IRQ number for the base of the VIC. 40 * @irq: The IRQ number for the base of the VIC.
@@ -40,6 +45,7 @@
40 * @int_enable: Save for VIC_INT_ENABLE. 45 * @int_enable: Save for VIC_INT_ENABLE.
41 * @soft_int: Save for VIC_INT_SOFT. 46 * @soft_int: Save for VIC_INT_SOFT.
42 * @protect: Save for VIC_PROTECT. 47 * @protect: Save for VIC_PROTECT.
48 * @domain: The IRQ domain for the VIC.
43 */ 49 */
44struct vic_device { 50struct vic_device {
45 void __iomem *base; 51 void __iomem *base;
@@ -50,13 +56,13 @@ struct vic_device {
50 u32 int_enable; 56 u32 int_enable;
51 u32 soft_int; 57 u32 soft_int;
52 u32 protect; 58 u32 protect;
59 struct irq_domain domain;
53}; 60};
54 61
55/* we cannot allocate memory when VICs are initially registered */ 62/* we cannot allocate memory when VICs are initially registered */
56static struct vic_device vic_devices[CONFIG_ARM_VIC_NR]; 63static struct vic_device vic_devices[CONFIG_ARM_VIC_NR];
57 64
58static int vic_id; 65static int vic_id;
59#endif /* CONFIG_PM */
60 66
61/** 67/**
62 * vic_init2 - common initialisation code 68 * vic_init2 - common initialisation code
@@ -156,39 +162,50 @@ static int __init vic_pm_init(void)
156 return 0; 162 return 0;
157} 163}
158late_initcall(vic_pm_init); 164late_initcall(vic_pm_init);
165#endif /* CONFIG_PM */
159 166
160/** 167/**
161 * vic_pm_register - Register a VIC for later power management control 168 * vic_register() - Register a VIC.
162 * @base: The base address of the VIC. 169 * @base: The base address of the VIC.
163 * @irq: The base IRQ for the VIC. 170 * @irq: The base IRQ for the VIC.
164 * @resume_sources: bitmask of interrupts allowed for resume sources. 171 * @resume_sources: bitmask of interrupts allowed for resume sources.
172 * @node: The device tree node associated with the VIC.
165 * 173 *
166 * Register the VIC with the system device tree so that it can be notified 174 * Register the VIC with the system device tree so that it can be notified
167 * of suspend and resume requests and ensure that the correct actions are 175 * of suspend and resume requests and ensure that the correct actions are
168 * taken to re-instate the settings on resume. 176 * taken to re-instate the settings on resume.
177 *
178 * This also configures the IRQ domain for the VIC.
169 */ 179 */
170static void __init vic_pm_register(void __iomem *base, unsigned int irq, u32 resume_sources) 180static void __init vic_register(void __iomem *base, unsigned int irq,
181 u32 resume_sources, struct device_node *node)
171{ 182{
172 struct vic_device *v; 183 struct vic_device *v;
173 184
174 if (vic_id >= ARRAY_SIZE(vic_devices)) 185 if (vic_id >= ARRAY_SIZE(vic_devices)) {
175 printk(KERN_ERR "%s: too few VICs, increase CONFIG_ARM_VIC_NR\n", __func__); 186 printk(KERN_ERR "%s: too few VICs, increase CONFIG_ARM_VIC_NR\n", __func__);
176 else { 187 return;
177 v = &vic_devices[vic_id];
178 v->base = base;
179 v->resume_sources = resume_sources;
180 v->irq = irq;
181 vic_id++;
182 } 188 }
189
190 v = &vic_devices[vic_id];
191 v->base = base;
192 v->resume_sources = resume_sources;
193 v->irq = irq;
194 vic_id++;
195
196 v->domain.irq_base = irq;
197 v->domain.nr_irq = 32;
198#ifdef CONFIG_OF_IRQ
199 v->domain.of_node = of_node_get(node);
200#endif /* CONFIG_OF */
201 v->domain.ops = &irq_domain_simple_ops;
202 irq_domain_add(&v->domain);
183} 203}
184#else
185static inline void vic_pm_register(void __iomem *base, unsigned int irq, u32 arg1) { }
186#endif /* CONFIG_PM */
187 204
188static void vic_ack_irq(struct irq_data *d) 205static void vic_ack_irq(struct irq_data *d)
189{ 206{
190 void __iomem *base = irq_data_get_irq_chip_data(d); 207 void __iomem *base = irq_data_get_irq_chip_data(d);
191 unsigned int irq = d->irq & 31; 208 unsigned int irq = d->hwirq;
192 writel(1 << irq, base + VIC_INT_ENABLE_CLEAR); 209 writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
193 /* moreover, clear the soft-triggered, in case it was the reason */ 210 /* moreover, clear the soft-triggered, in case it was the reason */
194 writel(1 << irq, base + VIC_INT_SOFT_CLEAR); 211 writel(1 << irq, base + VIC_INT_SOFT_CLEAR);
@@ -197,14 +214,14 @@ static void vic_ack_irq(struct irq_data *d)
197static void vic_mask_irq(struct irq_data *d) 214static void vic_mask_irq(struct irq_data *d)
198{ 215{
199 void __iomem *base = irq_data_get_irq_chip_data(d); 216 void __iomem *base = irq_data_get_irq_chip_data(d);
200 unsigned int irq = d->irq & 31; 217 unsigned int irq = d->hwirq;
201 writel(1 << irq, base + VIC_INT_ENABLE_CLEAR); 218 writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
202} 219}
203 220
204static void vic_unmask_irq(struct irq_data *d) 221static void vic_unmask_irq(struct irq_data *d)
205{ 222{
206 void __iomem *base = irq_data_get_irq_chip_data(d); 223 void __iomem *base = irq_data_get_irq_chip_data(d);
207 unsigned int irq = d->irq & 31; 224 unsigned int irq = d->hwirq;
208 writel(1 << irq, base + VIC_INT_ENABLE); 225 writel(1 << irq, base + VIC_INT_ENABLE);
209} 226}
210 227
@@ -226,7 +243,7 @@ static struct vic_device *vic_from_irq(unsigned int irq)
226static int vic_set_wake(struct irq_data *d, unsigned int on) 243static int vic_set_wake(struct irq_data *d, unsigned int on)
227{ 244{
228 struct vic_device *v = vic_from_irq(d->irq); 245 struct vic_device *v = vic_from_irq(d->irq);
229 unsigned int off = d->irq & 31; 246 unsigned int off = d->hwirq;
230 u32 bit = 1 << off; 247 u32 bit = 1 << off;
231 248
232 if (!v) 249 if (!v)
@@ -301,7 +318,7 @@ static void __init vic_set_irq_sources(void __iomem *base,
301 * and 020 within the page. We call this "second block". 318 * and 020 within the page. We call this "second block".
302 */ 319 */
303static void __init vic_init_st(void __iomem *base, unsigned int irq_start, 320static void __init vic_init_st(void __iomem *base, unsigned int irq_start,
304 u32 vic_sources) 321 u32 vic_sources, struct device_node *node)
305{ 322{
306 unsigned int i; 323 unsigned int i;
307 int vic_2nd_block = ((unsigned long)base & ~PAGE_MASK) != 0; 324 int vic_2nd_block = ((unsigned long)base & ~PAGE_MASK) != 0;
@@ -328,17 +345,12 @@ static void __init vic_init_st(void __iomem *base, unsigned int irq_start,
328 } 345 }
329 346
330 vic_set_irq_sources(base, irq_start, vic_sources); 347 vic_set_irq_sources(base, irq_start, vic_sources);
348 vic_register(base, irq_start, 0, node);
331} 349}
332 350
333/** 351static void __init __vic_init(void __iomem *base, unsigned int irq_start,
334 * vic_init - initialise a vectored interrupt controller 352 u32 vic_sources, u32 resume_sources,
335 * @base: iomem base address 353 struct device_node *node)
336 * @irq_start: starting interrupt number, must be muliple of 32
337 * @vic_sources: bitmask of interrupt sources to allow
338 * @resume_sources: bitmask of interrupt sources to allow for resume
339 */
340void __init vic_init(void __iomem *base, unsigned int irq_start,
341 u32 vic_sources, u32 resume_sources)
342{ 354{
343 unsigned int i; 355 unsigned int i;
344 u32 cellid = 0; 356 u32 cellid = 0;
@@ -356,7 +368,7 @@ void __init vic_init(void __iomem *base, unsigned int irq_start,
356 368
357 switch(vendor) { 369 switch(vendor) {
358 case AMBA_VENDOR_ST: 370 case AMBA_VENDOR_ST:
359 vic_init_st(base, irq_start, vic_sources); 371 vic_init_st(base, irq_start, vic_sources, node);
360 return; 372 return;
361 default: 373 default:
362 printk(KERN_WARNING "VIC: unknown vendor, continuing anyways\n"); 374 printk(KERN_WARNING "VIC: unknown vendor, continuing anyways\n");
@@ -375,5 +387,81 @@ void __init vic_init(void __iomem *base, unsigned int irq_start,
375 387
376 vic_set_irq_sources(base, irq_start, vic_sources); 388 vic_set_irq_sources(base, irq_start, vic_sources);
377 389
378 vic_pm_register(base, irq_start, resume_sources); 390 vic_register(base, irq_start, resume_sources, node);
391}
392
393/**
394 * vic_init() - initialise a vectored interrupt controller
395 * @base: iomem base address
396 * @irq_start: starting interrupt number, must be muliple of 32
397 * @vic_sources: bitmask of interrupt sources to allow
398 * @resume_sources: bitmask of interrupt sources to allow for resume
399 */
400void __init vic_init(void __iomem *base, unsigned int irq_start,
401 u32 vic_sources, u32 resume_sources)
402{
403 __vic_init(base, irq_start, vic_sources, resume_sources, NULL);
404}
405
406#ifdef CONFIG_OF
407int __init vic_of_init(struct device_node *node, struct device_node *parent)
408{
409 void __iomem *regs;
410 int irq_base;
411
412 if (WARN(parent, "non-root VICs are not supported"))
413 return -EINVAL;
414
415 regs = of_iomap(node, 0);
416 if (WARN_ON(!regs))
417 return -EIO;
418
419 irq_base = irq_alloc_descs(-1, 0, 32, numa_node_id());
420 if (WARN_ON(irq_base < 0))
421 goto out_unmap;
422
423 __vic_init(regs, irq_base, ~0, ~0, node);
424
425 return 0;
426
427 out_unmap:
428 iounmap(regs);
429
430 return -EIO;
431}
432#endif /* CONFIG OF */
433
434/*
435 * Handle each interrupt in a single VIC. Returns non-zero if we've
436 * handled at least one interrupt. This does a single read of the
437 * status register and handles all interrupts in order from LSB first.
438 */
439static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs)
440{
441 u32 stat, irq;
442 int handled = 0;
443
444 stat = readl_relaxed(vic->base + VIC_IRQ_STATUS);
445 while (stat) {
446 irq = ffs(stat) - 1;
447 handle_IRQ(irq_domain_to_irq(&vic->domain, irq), regs);
448 stat &= ~(1 << irq);
449 handled = 1;
450 }
451
452 return handled;
453}
454
455/*
456 * Keep iterating over all registered VIC's until there are no pending
457 * interrupts.
458 */
459asmlinkage void __exception_irq_entry vic_handle_irq(struct pt_regs *regs)
460{
461 int i, handled;
462
463 do {
464 for (i = 0, handled = 0; i < vic_id; ++i)
465 handled |= handle_one_vic(&vic_devices[i], regs);
466 } while (handled);
379} 467}