diff options
author | Arnd Bergmann <arnd@arndb.de> | 2011-10-31 09:08:10 -0400 |
---|---|---|
committer | Arnd Bergmann <arnd@arndb.de> | 2011-10-31 09:08:10 -0400 |
commit | 08cab72f91c8b28ffabfd143119bccdd4a115ad7 (patch) | |
tree | ccd5583971caecd82bf2d1e62691bf6e0362d650 /arch/arm/common | |
parent | 86c1e5a74af11e4817ffa6d7748d9ac1353b5b53 (diff) | |
parent | f37a53cc5d8a8fb199e41386d125d8c2ed9e54ef (diff) |
Merge branch 'dt/gic' into next/dt
Conflicts:
arch/arm/include/asm/localtimer.h
arch/arm/mach-msm/board-msm8x60.c
arch/arm/mach-omap2/board-generic.c
Diffstat (limited to 'arch/arm/common')
-rw-r--r-- | arch/arm/common/Kconfig | 1 | ||||
-rw-r--r-- | arch/arm/common/gic.c | 374 |
2 files changed, 329 insertions, 46 deletions
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig index 4b71766fb21d..74df9ca2be31 100644 --- a/arch/arm/common/Kconfig +++ b/arch/arm/common/Kconfig | |||
@@ -1,4 +1,5 @@ | |||
1 | config ARM_GIC | 1 | config ARM_GIC |
2 | select IRQ_DOMAIN | ||
2 | bool | 3 | bool |
3 | 4 | ||
4 | config ARM_VIC | 5 | config ARM_VIC |
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index 3227ca952a12..9d77777076f0 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c | |||
@@ -24,10 +24,20 @@ | |||
24 | */ | 24 | */ |
25 | #include <linux/init.h> | 25 | #include <linux/init.h> |
26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
27 | #include <linux/err.h> | ||
28 | #include <linux/export.h> | ||
27 | #include <linux/list.h> | 29 | #include <linux/list.h> |
28 | #include <linux/smp.h> | 30 | #include <linux/smp.h> |
31 | #include <linux/cpu_pm.h> | ||
29 | #include <linux/cpumask.h> | 32 | #include <linux/cpumask.h> |
30 | #include <linux/io.h> | 33 | #include <linux/io.h> |
34 | #include <linux/of.h> | ||
35 | #include <linux/of_address.h> | ||
36 | #include <linux/of_irq.h> | ||
37 | #include <linux/irqdomain.h> | ||
38 | #include <linux/interrupt.h> | ||
39 | #include <linux/percpu.h> | ||
40 | #include <linux/slab.h> | ||
31 | 41 | ||
32 | #include <asm/irq.h> | 42 | #include <asm/irq.h> |
33 | #include <asm/mach/irq.h> | 43 | #include <asm/mach/irq.h> |
@@ -71,8 +81,7 @@ static inline void __iomem *gic_cpu_base(struct irq_data *d) | |||
71 | 81 | ||
72 | static inline unsigned int gic_irq(struct irq_data *d) | 82 | static inline unsigned int gic_irq(struct irq_data *d) |
73 | { | 83 | { |
74 | struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); | 84 | return d->hwirq; |
75 | return d->irq - gic_data->irq_offset; | ||
76 | } | 85 | } |
77 | 86 | ||
78 | /* | 87 | /* |
@@ -80,7 +89,7 @@ static inline unsigned int gic_irq(struct irq_data *d) | |||
80 | */ | 89 | */ |
81 | static void gic_mask_irq(struct irq_data *d) | 90 | static void gic_mask_irq(struct irq_data *d) |
82 | { | 91 | { |
83 | u32 mask = 1 << (d->irq % 32); | 92 | u32 mask = 1 << (gic_irq(d) % 32); |
84 | 93 | ||
85 | spin_lock(&irq_controller_lock); | 94 | spin_lock(&irq_controller_lock); |
86 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); | 95 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); |
@@ -91,7 +100,7 @@ static void gic_mask_irq(struct irq_data *d) | |||
91 | 100 | ||
92 | static void gic_unmask_irq(struct irq_data *d) | 101 | static void gic_unmask_irq(struct irq_data *d) |
93 | { | 102 | { |
94 | u32 mask = 1 << (d->irq % 32); | 103 | u32 mask = 1 << (gic_irq(d) % 32); |
95 | 104 | ||
96 | spin_lock(&irq_controller_lock); | 105 | spin_lock(&irq_controller_lock); |
97 | if (gic_arch_extn.irq_unmask) | 106 | if (gic_arch_extn.irq_unmask) |
@@ -172,7 +181,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | |||
172 | bool force) | 181 | bool force) |
173 | { | 182 | { |
174 | void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); | 183 | void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); |
175 | unsigned int shift = (d->irq % 4) * 8; | 184 | unsigned int shift = (gic_irq(d) % 4) * 8; |
176 | unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); | 185 | unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); |
177 | u32 val, mask, bit; | 186 | u32 val, mask, bit; |
178 | 187 | ||
@@ -180,7 +189,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | |||
180 | return -EINVAL; | 189 | return -EINVAL; |
181 | 190 | ||
182 | mask = 0xff << shift; | 191 | mask = 0xff << shift; |
183 | bit = 1 << (cpu + shift); | 192 | bit = 1 << (cpu_logical_map(cpu) + shift); |
184 | 193 | ||
185 | spin_lock(&irq_controller_lock); | 194 | spin_lock(&irq_controller_lock); |
186 | val = readl_relaxed(reg) & ~mask; | 195 | val = readl_relaxed(reg) & ~mask; |
@@ -223,7 +232,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) | |||
223 | if (gic_irq == 1023) | 232 | if (gic_irq == 1023) |
224 | goto out; | 233 | goto out; |
225 | 234 | ||
226 | cascade_irq = gic_irq + chip_data->irq_offset; | 235 | cascade_irq = irq_domain_to_irq(&chip_data->domain, gic_irq); |
227 | if (unlikely(gic_irq < 32 || gic_irq > 1020 || cascade_irq >= NR_IRQS)) | 236 | if (unlikely(gic_irq < 32 || gic_irq > 1020 || cascade_irq >= NR_IRQS)) |
228 | do_bad_IRQ(cascade_irq, desc); | 237 | do_bad_IRQ(cascade_irq, desc); |
229 | else | 238 | else |
@@ -255,28 +264,26 @@ void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) | |||
255 | irq_set_chained_handler(irq, gic_handle_cascade_irq); | 264 | irq_set_chained_handler(irq, gic_handle_cascade_irq); |
256 | } | 265 | } |
257 | 266 | ||
258 | static void __init gic_dist_init(struct gic_chip_data *gic, | 267 | static void __init gic_dist_init(struct gic_chip_data *gic) |
259 | unsigned int irq_start) | ||
260 | { | 268 | { |
261 | unsigned int gic_irqs, irq_limit, i; | 269 | unsigned int i, irq; |
270 | u32 cpumask; | ||
271 | unsigned int gic_irqs = gic->gic_irqs; | ||
272 | struct irq_domain *domain = &gic->domain; | ||
262 | void __iomem *base = gic->dist_base; | 273 | void __iomem *base = gic->dist_base; |
263 | u32 cpumask = 1 << smp_processor_id(); | 274 | u32 cpu = 0; |
264 | 275 | ||
276 | #ifdef CONFIG_SMP | ||
277 | cpu = cpu_logical_map(smp_processor_id()); | ||
278 | #endif | ||
279 | |||
280 | cpumask = 1 << cpu; | ||
265 | cpumask |= cpumask << 8; | 281 | cpumask |= cpumask << 8; |
266 | cpumask |= cpumask << 16; | 282 | cpumask |= cpumask << 16; |
267 | 283 | ||
268 | writel_relaxed(0, base + GIC_DIST_CTRL); | 284 | writel_relaxed(0, base + GIC_DIST_CTRL); |
269 | 285 | ||
270 | /* | 286 | /* |
271 | * Find out how many interrupts are supported. | ||
272 | * The GIC only supports up to 1020 interrupt sources. | ||
273 | */ | ||
274 | gic_irqs = readl_relaxed(base + GIC_DIST_CTR) & 0x1f; | ||
275 | gic_irqs = (gic_irqs + 1) * 32; | ||
276 | if (gic_irqs > 1020) | ||
277 | gic_irqs = 1020; | ||
278 | |||
279 | /* | ||
280 | * Set all global interrupts to be level triggered, active low. | 287 | * Set all global interrupts to be level triggered, active low. |
281 | */ | 288 | */ |
282 | for (i = 32; i < gic_irqs; i += 16) | 289 | for (i = 32; i < gic_irqs; i += 16) |
@@ -302,19 +309,20 @@ static void __init gic_dist_init(struct gic_chip_data *gic, | |||
302 | writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); | 309 | writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); |
303 | 310 | ||
304 | /* | 311 | /* |
305 | * Limit number of interrupts registered to the platform maximum | ||
306 | */ | ||
307 | irq_limit = gic->irq_offset + gic_irqs; | ||
308 | if (WARN_ON(irq_limit > NR_IRQS)) | ||
309 | irq_limit = NR_IRQS; | ||
310 | |||
311 | /* | ||
312 | * Setup the Linux IRQ subsystem. | 312 | * Setup the Linux IRQ subsystem. |
313 | */ | 313 | */ |
314 | for (i = irq_start; i < irq_limit; i++) { | 314 | irq_domain_for_each_irq(domain, i, irq) { |
315 | irq_set_chip_and_handler(i, &gic_chip, handle_fasteoi_irq); | 315 | if (i < 32) { |
316 | irq_set_chip_data(i, gic); | 316 | irq_set_percpu_devid(irq); |
317 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); | 317 | irq_set_chip_and_handler(irq, &gic_chip, |
318 | handle_percpu_devid_irq); | ||
319 | set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); | ||
320 | } else { | ||
321 | irq_set_chip_and_handler(irq, &gic_chip, | ||
322 | handle_fasteoi_irq); | ||
323 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | ||
324 | } | ||
325 | irq_set_chip_data(irq, gic); | ||
318 | } | 326 | } |
319 | 327 | ||
320 | writel_relaxed(1, base + GIC_DIST_CTRL); | 328 | writel_relaxed(1, base + GIC_DIST_CTRL); |
@@ -343,23 +351,270 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic) | |||
343 | writel_relaxed(1, base + GIC_CPU_CTRL); | 351 | writel_relaxed(1, base + GIC_CPU_CTRL); |
344 | } | 352 | } |
345 | 353 | ||
346 | void __init gic_init(unsigned int gic_nr, unsigned int irq_start, | 354 | #ifdef CONFIG_CPU_PM |
355 | /* | ||
356 | * Saves the GIC distributor registers during suspend or idle. Must be called | ||
357 | * with interrupts disabled but before powering down the GIC. After calling | ||
358 | * this function, no interrupts will be delivered by the GIC, and another | ||
359 | * platform-specific wakeup source must be enabled. | ||
360 | */ | ||
361 | static void gic_dist_save(unsigned int gic_nr) | ||
362 | { | ||
363 | unsigned int gic_irqs; | ||
364 | void __iomem *dist_base; | ||
365 | int i; | ||
366 | |||
367 | if (gic_nr >= MAX_GIC_NR) | ||
368 | BUG(); | ||
369 | |||
370 | gic_irqs = gic_data[gic_nr].gic_irqs; | ||
371 | dist_base = gic_data[gic_nr].dist_base; | ||
372 | |||
373 | if (!dist_base) | ||
374 | return; | ||
375 | |||
376 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) | ||
377 | gic_data[gic_nr].saved_spi_conf[i] = | ||
378 | readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); | ||
379 | |||
380 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) | ||
381 | gic_data[gic_nr].saved_spi_target[i] = | ||
382 | readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); | ||
383 | |||
384 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) | ||
385 | gic_data[gic_nr].saved_spi_enable[i] = | ||
386 | readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); | ||
387 | } | ||
388 | |||
389 | /* | ||
390 | * Restores the GIC distributor registers during resume or when coming out of | ||
391 | * idle. Must be called before enabling interrupts. If a level interrupt | ||
392 | * that occured while the GIC was suspended is still present, it will be | ||
393 | * handled normally, but any edge interrupts that occured will not be seen by | ||
394 | * the GIC and need to be handled by the platform-specific wakeup source. | ||
395 | */ | ||
396 | static void gic_dist_restore(unsigned int gic_nr) | ||
397 | { | ||
398 | unsigned int gic_irqs; | ||
399 | unsigned int i; | ||
400 | void __iomem *dist_base; | ||
401 | |||
402 | if (gic_nr >= MAX_GIC_NR) | ||
403 | BUG(); | ||
404 | |||
405 | gic_irqs = gic_data[gic_nr].gic_irqs; | ||
406 | dist_base = gic_data[gic_nr].dist_base; | ||
407 | |||
408 | if (!dist_base) | ||
409 | return; | ||
410 | |||
411 | writel_relaxed(0, dist_base + GIC_DIST_CTRL); | ||
412 | |||
413 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) | ||
414 | writel_relaxed(gic_data[gic_nr].saved_spi_conf[i], | ||
415 | dist_base + GIC_DIST_CONFIG + i * 4); | ||
416 | |||
417 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) | ||
418 | writel_relaxed(0xa0a0a0a0, | ||
419 | dist_base + GIC_DIST_PRI + i * 4); | ||
420 | |||
421 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) | ||
422 | writel_relaxed(gic_data[gic_nr].saved_spi_target[i], | ||
423 | dist_base + GIC_DIST_TARGET + i * 4); | ||
424 | |||
425 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) | ||
426 | writel_relaxed(gic_data[gic_nr].saved_spi_enable[i], | ||
427 | dist_base + GIC_DIST_ENABLE_SET + i * 4); | ||
428 | |||
429 | writel_relaxed(1, dist_base + GIC_DIST_CTRL); | ||
430 | } | ||
431 | |||
432 | static void gic_cpu_save(unsigned int gic_nr) | ||
433 | { | ||
434 | int i; | ||
435 | u32 *ptr; | ||
436 | void __iomem *dist_base; | ||
437 | void __iomem *cpu_base; | ||
438 | |||
439 | if (gic_nr >= MAX_GIC_NR) | ||
440 | BUG(); | ||
441 | |||
442 | dist_base = gic_data[gic_nr].dist_base; | ||
443 | cpu_base = gic_data[gic_nr].cpu_base; | ||
444 | |||
445 | if (!dist_base || !cpu_base) | ||
446 | return; | ||
447 | |||
448 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); | ||
449 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) | ||
450 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); | ||
451 | |||
452 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); | ||
453 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) | ||
454 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); | ||
455 | |||
456 | } | ||
457 | |||
458 | static void gic_cpu_restore(unsigned int gic_nr) | ||
459 | { | ||
460 | int i; | ||
461 | u32 *ptr; | ||
462 | void __iomem *dist_base; | ||
463 | void __iomem *cpu_base; | ||
464 | |||
465 | if (gic_nr >= MAX_GIC_NR) | ||
466 | BUG(); | ||
467 | |||
468 | dist_base = gic_data[gic_nr].dist_base; | ||
469 | cpu_base = gic_data[gic_nr].cpu_base; | ||
470 | |||
471 | if (!dist_base || !cpu_base) | ||
472 | return; | ||
473 | |||
474 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); | ||
475 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) | ||
476 | writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); | ||
477 | |||
478 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); | ||
479 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) | ||
480 | writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4); | ||
481 | |||
482 | for (i = 0; i < DIV_ROUND_UP(32, 4); i++) | ||
483 | writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4); | ||
484 | |||
485 | writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK); | ||
486 | writel_relaxed(1, cpu_base + GIC_CPU_CTRL); | ||
487 | } | ||
488 | |||
489 | static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v) | ||
490 | { | ||
491 | int i; | ||
492 | |||
493 | for (i = 0; i < MAX_GIC_NR; i++) { | ||
494 | switch (cmd) { | ||
495 | case CPU_PM_ENTER: | ||
496 | gic_cpu_save(i); | ||
497 | break; | ||
498 | case CPU_PM_ENTER_FAILED: | ||
499 | case CPU_PM_EXIT: | ||
500 | gic_cpu_restore(i); | ||
501 | break; | ||
502 | case CPU_CLUSTER_PM_ENTER: | ||
503 | gic_dist_save(i); | ||
504 | break; | ||
505 | case CPU_CLUSTER_PM_ENTER_FAILED: | ||
506 | case CPU_CLUSTER_PM_EXIT: | ||
507 | gic_dist_restore(i); | ||
508 | break; | ||
509 | } | ||
510 | } | ||
511 | |||
512 | return NOTIFY_OK; | ||
513 | } | ||
514 | |||
515 | static struct notifier_block gic_notifier_block = { | ||
516 | .notifier_call = gic_notifier, | ||
517 | }; | ||
518 | |||
519 | static void __init gic_pm_init(struct gic_chip_data *gic) | ||
520 | { | ||
521 | gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, | ||
522 | sizeof(u32)); | ||
523 | BUG_ON(!gic->saved_ppi_enable); | ||
524 | |||
525 | gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, | ||
526 | sizeof(u32)); | ||
527 | BUG_ON(!gic->saved_ppi_conf); | ||
528 | |||
529 | cpu_pm_register_notifier(&gic_notifier_block); | ||
530 | } | ||
531 | #else | ||
532 | static void __init gic_pm_init(struct gic_chip_data *gic) | ||
533 | { | ||
534 | } | ||
535 | #endif | ||
536 | |||
537 | #ifdef CONFIG_OF | ||
538 | static int gic_irq_domain_dt_translate(struct irq_domain *d, | ||
539 | struct device_node *controller, | ||
540 | const u32 *intspec, unsigned int intsize, | ||
541 | unsigned long *out_hwirq, unsigned int *out_type) | ||
542 | { | ||
543 | if (d->of_node != controller) | ||
544 | return -EINVAL; | ||
545 | if (intsize < 3) | ||
546 | return -EINVAL; | ||
547 | |||
548 | /* Get the interrupt number and add 16 to skip over SGIs */ | ||
549 | *out_hwirq = intspec[1] + 16; | ||
550 | |||
551 | /* For SPIs, we need to add 16 more to get the GIC irq ID number */ | ||
552 | if (!intspec[0]) | ||
553 | *out_hwirq += 16; | ||
554 | |||
555 | *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; | ||
556 | return 0; | ||
557 | } | ||
558 | #endif | ||
559 | |||
560 | const struct irq_domain_ops gic_irq_domain_ops = { | ||
561 | #ifdef CONFIG_OF | ||
562 | .dt_translate = gic_irq_domain_dt_translate, | ||
563 | #endif | ||
564 | }; | ||
565 | |||
566 | void __init gic_init(unsigned int gic_nr, int irq_start, | ||
347 | void __iomem *dist_base, void __iomem *cpu_base) | 567 | void __iomem *dist_base, void __iomem *cpu_base) |
348 | { | 568 | { |
349 | struct gic_chip_data *gic; | 569 | struct gic_chip_data *gic; |
570 | struct irq_domain *domain; | ||
571 | int gic_irqs; | ||
350 | 572 | ||
351 | BUG_ON(gic_nr >= MAX_GIC_NR); | 573 | BUG_ON(gic_nr >= MAX_GIC_NR); |
352 | 574 | ||
353 | gic = &gic_data[gic_nr]; | 575 | gic = &gic_data[gic_nr]; |
576 | domain = &gic->domain; | ||
354 | gic->dist_base = dist_base; | 577 | gic->dist_base = dist_base; |
355 | gic->cpu_base = cpu_base; | 578 | gic->cpu_base = cpu_base; |
356 | gic->irq_offset = (irq_start - 1) & ~31; | ||
357 | 579 | ||
358 | if (gic_nr == 0) | 580 | /* |
581 | * For primary GICs, skip over SGIs. | ||
582 | * For secondary GICs, skip over PPIs, too. | ||
583 | */ | ||
584 | if (gic_nr == 0) { | ||
359 | gic_cpu_base_addr = cpu_base; | 585 | gic_cpu_base_addr = cpu_base; |
586 | domain->hwirq_base = 16; | ||
587 | if (irq_start > 0) | ||
588 | irq_start = (irq_start & ~31) + 16; | ||
589 | } else | ||
590 | domain->hwirq_base = 32; | ||
591 | |||
592 | /* | ||
593 | * Find out how many interrupts are supported. | ||
594 | * The GIC only supports up to 1020 interrupt sources. | ||
595 | */ | ||
596 | gic_irqs = readl_relaxed(dist_base + GIC_DIST_CTR) & 0x1f; | ||
597 | gic_irqs = (gic_irqs + 1) * 32; | ||
598 | if (gic_irqs > 1020) | ||
599 | gic_irqs = 1020; | ||
600 | gic->gic_irqs = gic_irqs; | ||
601 | |||
602 | domain->nr_irq = gic_irqs - domain->hwirq_base; | ||
603 | domain->irq_base = irq_alloc_descs(irq_start, 16, domain->nr_irq, | ||
604 | numa_node_id()); | ||
605 | if (IS_ERR_VALUE(domain->irq_base)) { | ||
606 | WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", | ||
607 | irq_start); | ||
608 | domain->irq_base = irq_start; | ||
609 | } | ||
610 | domain->priv = gic; | ||
611 | domain->ops = &gic_irq_domain_ops; | ||
612 | irq_domain_add(domain); | ||
360 | 613 | ||
361 | gic_dist_init(gic, irq_start); | 614 | gic_chip.flags |= gic_arch_extn.flags; |
615 | gic_dist_init(gic); | ||
362 | gic_cpu_init(gic); | 616 | gic_cpu_init(gic); |
617 | gic_pm_init(gic); | ||
363 | } | 618 | } |
364 | 619 | ||
365 | void __cpuinit gic_secondary_init(unsigned int gic_nr) | 620 | void __cpuinit gic_secondary_init(unsigned int gic_nr) |
@@ -369,20 +624,15 @@ void __cpuinit gic_secondary_init(unsigned int gic_nr) | |||
369 | gic_cpu_init(&gic_data[gic_nr]); | 624 | gic_cpu_init(&gic_data[gic_nr]); |
370 | } | 625 | } |
371 | 626 | ||
372 | void __cpuinit gic_enable_ppi(unsigned int irq) | ||
373 | { | ||
374 | unsigned long flags; | ||
375 | |||
376 | local_irq_save(flags); | ||
377 | irq_set_status_flags(irq, IRQ_NOPROBE); | ||
378 | gic_unmask_irq(irq_get_irq_data(irq)); | ||
379 | local_irq_restore(flags); | ||
380 | } | ||
381 | |||
382 | #ifdef CONFIG_SMP | 627 | #ifdef CONFIG_SMP |
383 | void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) | 628 | void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) |
384 | { | 629 | { |
385 | unsigned long map = *cpus_addr(*mask); | 630 | int cpu; |
631 | unsigned long map = 0; | ||
632 | |||
633 | /* Convert our logical CPU mask into a physical one. */ | ||
634 | for_each_cpu(cpu, mask) | ||
635 | map |= 1 << cpu_logical_map(cpu); | ||
386 | 636 | ||
387 | /* | 637 | /* |
388 | * Ensure that stores to Normal memory are visible to the | 638 | * Ensure that stores to Normal memory are visible to the |
@@ -394,3 +644,35 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) | |||
394 | writel_relaxed(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT); | 644 | writel_relaxed(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT); |
395 | } | 645 | } |
396 | #endif | 646 | #endif |
647 | |||
648 | #ifdef CONFIG_OF | ||
649 | static int gic_cnt __initdata = 0; | ||
650 | |||
651 | int __init gic_of_init(struct device_node *node, struct device_node *parent) | ||
652 | { | ||
653 | void __iomem *cpu_base; | ||
654 | void __iomem *dist_base; | ||
655 | int irq; | ||
656 | struct irq_domain *domain = &gic_data[gic_cnt].domain; | ||
657 | |||
658 | if (WARN_ON(!node)) | ||
659 | return -ENODEV; | ||
660 | |||
661 | dist_base = of_iomap(node, 0); | ||
662 | WARN(!dist_base, "unable to map gic dist registers\n"); | ||
663 | |||
664 | cpu_base = of_iomap(node, 1); | ||
665 | WARN(!cpu_base, "unable to map gic cpu registers\n"); | ||
666 | |||
667 | domain->of_node = of_node_get(node); | ||
668 | |||
669 | gic_init(gic_cnt, -1, dist_base, cpu_base); | ||
670 | |||
671 | if (parent) { | ||
672 | irq = irq_of_parse_and_map(node, 0); | ||
673 | gic_cascade_irq(gic_cnt, irq); | ||
674 | } | ||
675 | gic_cnt++; | ||
676 | return 0; | ||
677 | } | ||
678 | #endif | ||