aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/irqchip/irq-gic.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-17 13:27:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-17 13:27:29 -0400
commitede40902cf80714ece199977b308e2ee437cae0b (patch)
treee85e57194e7c9c7575ed0fa27b72495135a7eb23 /drivers/irqchip/irq-gic.c
parent91e8d0cbc94f81f110e508c3105dd93fb146d6b5 (diff)
parent0097852c302aca943a8b76f7f85e133af6e1701a (diff)
Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq updates from Thomas Gleixner: "This update delivers: - Yet another interrupt chip diver (LPC32xx) - Core functions to handle partitioned per-cpu interrupts - Enhancements to the IPI core - Proper handling of irq type configuration - A large set of ARM GIC enhancements - The usual pile of small fixes, cleanups and enhancements" * 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (31 commits) irqchip/bcm2836: Use a more generic memory barrier call irqchip/bcm2836: Fix compiler warning on 64-bit build irqchip/bcm2836: Drop smp_set_ops on arm64 builds irqchip/gic: Add helper functions for GIC setup and teardown irqchip/gic: Store GIC configuration parameters irqchip/gic: Pass GIC pointer to save/restore functions irqchip/gic: Return an error if GIC initialisation fails irqchip/gic: Remove static irq_chip definition for eoimode1 irqchip/gic: Don't initialise chip if mapping IO space fails irqchip/gic: WARN if setting the interrupt type for a PPI fails irqchip/gic: Don't unnecessarily write the IRQ configuration irqchip: Mask the non-type/sense bits when translating an IRQ genirq: Ensure IRQ descriptor is valid when setting-up the IRQ irqchip/gic-v3: Configure all interrupts as non-secure Group-1 irqchip/gic-v2m: Add workaround for Broadcom NS2 GICv2m erratum irqchip/irq-alpine-msi: Don't use <asm-generic/msi.h> irqchip/mbigen: Checking for IS_ERR() instead of NULL irqchip/gic-v3: Remove inexistant register definition irqchip/gicv3-its: Don't allow devices whose ID is outside range irqchip: Add LPC32xx interrupt controller driver ...
Diffstat (limited to 'drivers/irqchip/irq-gic.c')
-rw-r--r--drivers/irqchip/irq-gic.c322
1 files changed, 207 insertions, 115 deletions
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 095bb5b5c3f2..1de20e14a721 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -72,6 +72,9 @@ struct gic_chip_data {
72 struct irq_chip chip; 72 struct irq_chip chip;
73 union gic_base dist_base; 73 union gic_base dist_base;
74 union gic_base cpu_base; 74 union gic_base cpu_base;
75 void __iomem *raw_dist_base;
76 void __iomem *raw_cpu_base;
77 u32 percpu_offset;
75#ifdef CONFIG_CPU_PM 78#ifdef CONFIG_CPU_PM
76 u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)]; 79 u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
77 u32 saved_spi_active[DIV_ROUND_UP(1020, 32)]; 80 u32 saved_spi_active[DIV_ROUND_UP(1020, 32)];
@@ -344,6 +347,14 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
344 if (static_key_true(&supports_deactivate)) 347 if (static_key_true(&supports_deactivate))
345 writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE); 348 writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE);
346#ifdef CONFIG_SMP 349#ifdef CONFIG_SMP
350 /*
351 * Ensure any shared data written by the CPU sending
352 * the IPI is read after we've read the ACK register
353 * on the GIC.
354 *
355 * Pairs with the write barrier in gic_raise_softirq
356 */
357 smp_rmb();
347 handle_IPI(irqnr, regs); 358 handle_IPI(irqnr, regs);
348#endif 359#endif
349 continue; 360 continue;
@@ -391,20 +402,6 @@ static struct irq_chip gic_chip = {
391 IRQCHIP_MASK_ON_SUSPEND, 402 IRQCHIP_MASK_ON_SUSPEND,
392}; 403};
393 404
394static struct irq_chip gic_eoimode1_chip = {
395 .name = "GICv2",
396 .irq_mask = gic_eoimode1_mask_irq,
397 .irq_unmask = gic_unmask_irq,
398 .irq_eoi = gic_eoimode1_eoi_irq,
399 .irq_set_type = gic_set_type,
400 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
401 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
402 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
403 .flags = IRQCHIP_SET_TYPE_MASKED |
404 IRQCHIP_SKIP_SET_WAKE |
405 IRQCHIP_MASK_ON_SUSPEND,
406};
407
408void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) 405void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
409{ 406{
410 BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); 407 BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
@@ -473,7 +470,7 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
473 writel_relaxed(GICD_ENABLE, base + GIC_DIST_CTRL); 470 writel_relaxed(GICD_ENABLE, base + GIC_DIST_CTRL);
474} 471}
475 472
476static void gic_cpu_init(struct gic_chip_data *gic) 473static int gic_cpu_init(struct gic_chip_data *gic)
477{ 474{
478 void __iomem *dist_base = gic_data_dist_base(gic); 475 void __iomem *dist_base = gic_data_dist_base(gic);
479 void __iomem *base = gic_data_cpu_base(gic); 476 void __iomem *base = gic_data_cpu_base(gic);
@@ -489,7 +486,9 @@ static void gic_cpu_init(struct gic_chip_data *gic)
489 /* 486 /*
490 * Get what the GIC says our CPU mask is. 487 * Get what the GIC says our CPU mask is.
491 */ 488 */
492 BUG_ON(cpu >= NR_GIC_CPU_IF); 489 if (WARN_ON(cpu >= NR_GIC_CPU_IF))
490 return -EINVAL;
491
493 gic_check_cpu_features(); 492 gic_check_cpu_features();
494 cpu_mask = gic_get_cpumask(gic); 493 cpu_mask = gic_get_cpumask(gic);
495 gic_cpu_map[cpu] = cpu_mask; 494 gic_cpu_map[cpu] = cpu_mask;
@@ -507,6 +506,8 @@ static void gic_cpu_init(struct gic_chip_data *gic)
507 506
508 writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK); 507 writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK);
509 gic_cpu_if_up(gic); 508 gic_cpu_if_up(gic);
509
510 return 0;
510} 511}
511 512
512int gic_cpu_if_down(unsigned int gic_nr) 513int gic_cpu_if_down(unsigned int gic_nr)
@@ -532,34 +533,35 @@ int gic_cpu_if_down(unsigned int gic_nr)
532 * this function, no interrupts will be delivered by the GIC, and another 533 * this function, no interrupts will be delivered by the GIC, and another
533 * platform-specific wakeup source must be enabled. 534 * platform-specific wakeup source must be enabled.
534 */ 535 */
535static void gic_dist_save(unsigned int gic_nr) 536static void gic_dist_save(struct gic_chip_data *gic)
536{ 537{
537 unsigned int gic_irqs; 538 unsigned int gic_irqs;
538 void __iomem *dist_base; 539 void __iomem *dist_base;
539 int i; 540 int i;
540 541
541 BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); 542 if (WARN_ON(!gic))
543 return;
542 544
543 gic_irqs = gic_data[gic_nr].gic_irqs; 545 gic_irqs = gic->gic_irqs;
544 dist_base = gic_data_dist_base(&gic_data[gic_nr]); 546 dist_base = gic_data_dist_base(gic);
545 547
546 if (!dist_base) 548 if (!dist_base)
547 return; 549 return;
548 550
549 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) 551 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
550 gic_data[gic_nr].saved_spi_conf[i] = 552 gic->saved_spi_conf[i] =
551 readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); 553 readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
552 554
553 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) 555 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
554 gic_data[gic_nr].saved_spi_target[i] = 556 gic->saved_spi_target[i] =
555 readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); 557 readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
556 558
557 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) 559 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
558 gic_data[gic_nr].saved_spi_enable[i] = 560 gic->saved_spi_enable[i] =
559 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); 561 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
560 562
561 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) 563 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
562 gic_data[gic_nr].saved_spi_active[i] = 564 gic->saved_spi_active[i] =
563 readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4); 565 readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
564} 566}
565 567
@@ -570,16 +572,17 @@ static void gic_dist_save(unsigned int gic_nr)
570 * handled normally, but any edge interrupts that occured will not be seen by 572 * handled normally, but any edge interrupts that occured will not be seen by
571 * the GIC and need to be handled by the platform-specific wakeup source. 573 * the GIC and need to be handled by the platform-specific wakeup source.
572 */ 574 */
573static void gic_dist_restore(unsigned int gic_nr) 575static void gic_dist_restore(struct gic_chip_data *gic)
574{ 576{
575 unsigned int gic_irqs; 577 unsigned int gic_irqs;
576 unsigned int i; 578 unsigned int i;
577 void __iomem *dist_base; 579 void __iomem *dist_base;
578 580
579 BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); 581 if (WARN_ON(!gic))
582 return;
580 583
581 gic_irqs = gic_data[gic_nr].gic_irqs; 584 gic_irqs = gic->gic_irqs;
582 dist_base = gic_data_dist_base(&gic_data[gic_nr]); 585 dist_base = gic_data_dist_base(gic);
583 586
584 if (!dist_base) 587 if (!dist_base)
585 return; 588 return;
@@ -587,7 +590,7 @@ static void gic_dist_restore(unsigned int gic_nr)
587 writel_relaxed(GICD_DISABLE, dist_base + GIC_DIST_CTRL); 590 writel_relaxed(GICD_DISABLE, dist_base + GIC_DIST_CTRL);
588 591
589 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) 592 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
590 writel_relaxed(gic_data[gic_nr].saved_spi_conf[i], 593 writel_relaxed(gic->saved_spi_conf[i],
591 dist_base + GIC_DIST_CONFIG + i * 4); 594 dist_base + GIC_DIST_CONFIG + i * 4);
592 595
593 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) 596 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
@@ -595,85 +598,87 @@ static void gic_dist_restore(unsigned int gic_nr)
595 dist_base + GIC_DIST_PRI + i * 4); 598 dist_base + GIC_DIST_PRI + i * 4);
596 599
597 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) 600 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
598 writel_relaxed(gic_data[gic_nr].saved_spi_target[i], 601 writel_relaxed(gic->saved_spi_target[i],
599 dist_base + GIC_DIST_TARGET + i * 4); 602 dist_base + GIC_DIST_TARGET + i * 4);
600 603
601 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) { 604 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
602 writel_relaxed(GICD_INT_EN_CLR_X32, 605 writel_relaxed(GICD_INT_EN_CLR_X32,
603 dist_base + GIC_DIST_ENABLE_CLEAR + i * 4); 606 dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
604 writel_relaxed(gic_data[gic_nr].saved_spi_enable[i], 607 writel_relaxed(gic->saved_spi_enable[i],
605 dist_base + GIC_DIST_ENABLE_SET + i * 4); 608 dist_base + GIC_DIST_ENABLE_SET + i * 4);
606 } 609 }
607 610
608 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) { 611 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
609 writel_relaxed(GICD_INT_EN_CLR_X32, 612 writel_relaxed(GICD_INT_EN_CLR_X32,
610 dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4); 613 dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
611 writel_relaxed(gic_data[gic_nr].saved_spi_active[i], 614 writel_relaxed(gic->saved_spi_active[i],
612 dist_base + GIC_DIST_ACTIVE_SET + i * 4); 615 dist_base + GIC_DIST_ACTIVE_SET + i * 4);
613 } 616 }
614 617
615 writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL); 618 writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL);
616} 619}
617 620
618static void gic_cpu_save(unsigned int gic_nr) 621static void gic_cpu_save(struct gic_chip_data *gic)
619{ 622{
620 int i; 623 int i;
621 u32 *ptr; 624 u32 *ptr;
622 void __iomem *dist_base; 625 void __iomem *dist_base;
623 void __iomem *cpu_base; 626 void __iomem *cpu_base;
624 627
625 BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); 628 if (WARN_ON(!gic))
629 return;
626 630
627 dist_base = gic_data_dist_base(&gic_data[gic_nr]); 631 dist_base = gic_data_dist_base(gic);
628 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); 632 cpu_base = gic_data_cpu_base(gic);
629 633
630 if (!dist_base || !cpu_base) 634 if (!dist_base || !cpu_base)
631 return; 635 return;
632 636
633 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); 637 ptr = raw_cpu_ptr(gic->saved_ppi_enable);
634 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) 638 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
635 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); 639 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
636 640
637 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active); 641 ptr = raw_cpu_ptr(gic->saved_ppi_active);
638 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) 642 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
639 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4); 643 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
640 644
641 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); 645 ptr = raw_cpu_ptr(gic->saved_ppi_conf);
642 for (i = 0; i < DIV_ROUND_UP(32, 16); i++) 646 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
643 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); 647 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
644 648
645} 649}
646 650
647static void gic_cpu_restore(unsigned int gic_nr) 651static void gic_cpu_restore(struct gic_chip_data *gic)
648{ 652{
649 int i; 653 int i;
650 u32 *ptr; 654 u32 *ptr;
651 void __iomem *dist_base; 655 void __iomem *dist_base;
652 void __iomem *cpu_base; 656 void __iomem *cpu_base;
653 657
654 BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); 658 if (WARN_ON(!gic))
659 return;
655 660
656 dist_base = gic_data_dist_base(&gic_data[gic_nr]); 661 dist_base = gic_data_dist_base(gic);
657 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); 662 cpu_base = gic_data_cpu_base(gic);
658 663
659 if (!dist_base || !cpu_base) 664 if (!dist_base || !cpu_base)
660 return; 665 return;
661 666
662 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); 667 ptr = raw_cpu_ptr(gic->saved_ppi_enable);
663 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) { 668 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
664 writel_relaxed(GICD_INT_EN_CLR_X32, 669 writel_relaxed(GICD_INT_EN_CLR_X32,
665 dist_base + GIC_DIST_ENABLE_CLEAR + i * 4); 670 dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
666 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); 671 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
667 } 672 }
668 673
669 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active); 674 ptr = raw_cpu_ptr(gic->saved_ppi_active);
670 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) { 675 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
671 writel_relaxed(GICD_INT_EN_CLR_X32, 676 writel_relaxed(GICD_INT_EN_CLR_X32,
672 dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4); 677 dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
673 writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4); 678 writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4);
674 } 679 }
675 680
676 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); 681 ptr = raw_cpu_ptr(gic->saved_ppi_conf);
677 for (i = 0; i < DIV_ROUND_UP(32, 16); i++) 682 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
678 writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4); 683 writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
679 684
@@ -682,7 +687,7 @@ static void gic_cpu_restore(unsigned int gic_nr)
682 dist_base + GIC_DIST_PRI + i * 4); 687 dist_base + GIC_DIST_PRI + i * 4);
683 688
684 writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK); 689 writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK);
685 gic_cpu_if_up(&gic_data[gic_nr]); 690 gic_cpu_if_up(gic);
686} 691}
687 692
688static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v) 693static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
@@ -697,18 +702,18 @@ static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
697#endif 702#endif
698 switch (cmd) { 703 switch (cmd) {
699 case CPU_PM_ENTER: 704 case CPU_PM_ENTER:
700 gic_cpu_save(i); 705 gic_cpu_save(&gic_data[i]);
701 break; 706 break;
702 case CPU_PM_ENTER_FAILED: 707 case CPU_PM_ENTER_FAILED:
703 case CPU_PM_EXIT: 708 case CPU_PM_EXIT:
704 gic_cpu_restore(i); 709 gic_cpu_restore(&gic_data[i]);
705 break; 710 break;
706 case CPU_CLUSTER_PM_ENTER: 711 case CPU_CLUSTER_PM_ENTER:
707 gic_dist_save(i); 712 gic_dist_save(&gic_data[i]);
708 break; 713 break;
709 case CPU_CLUSTER_PM_ENTER_FAILED: 714 case CPU_CLUSTER_PM_ENTER_FAILED:
710 case CPU_CLUSTER_PM_EXIT: 715 case CPU_CLUSTER_PM_EXIT:
711 gic_dist_restore(i); 716 gic_dist_restore(&gic_data[i]);
712 break; 717 break;
713 } 718 }
714 } 719 }
@@ -720,26 +725,39 @@ static struct notifier_block gic_notifier_block = {
720 .notifier_call = gic_notifier, 725 .notifier_call = gic_notifier,
721}; 726};
722 727
723static void __init gic_pm_init(struct gic_chip_data *gic) 728static int __init gic_pm_init(struct gic_chip_data *gic)
724{ 729{
725 gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, 730 gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
726 sizeof(u32)); 731 sizeof(u32));
727 BUG_ON(!gic->saved_ppi_enable); 732 if (WARN_ON(!gic->saved_ppi_enable))
733 return -ENOMEM;
728 734
729 gic->saved_ppi_active = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, 735 gic->saved_ppi_active = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
730 sizeof(u32)); 736 sizeof(u32));
731 BUG_ON(!gic->saved_ppi_active); 737 if (WARN_ON(!gic->saved_ppi_active))
738 goto free_ppi_enable;
732 739
733 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, 740 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
734 sizeof(u32)); 741 sizeof(u32));
735 BUG_ON(!gic->saved_ppi_conf); 742 if (WARN_ON(!gic->saved_ppi_conf))
743 goto free_ppi_active;
736 744
737 if (gic == &gic_data[0]) 745 if (gic == &gic_data[0])
738 cpu_pm_register_notifier(&gic_notifier_block); 746 cpu_pm_register_notifier(&gic_notifier_block);
747
748 return 0;
749
750free_ppi_active:
751 free_percpu(gic->saved_ppi_active);
752free_ppi_enable:
753 free_percpu(gic->saved_ppi_enable);
754
755 return -ENOMEM;
739} 756}
740#else 757#else
741static void __init gic_pm_init(struct gic_chip_data *gic) 758static int __init gic_pm_init(struct gic_chip_data *gic)
742{ 759{
760 return 0;
743} 761}
744#endif 762#endif
745 763
@@ -1012,61 +1030,63 @@ static const struct irq_domain_ops gic_irq_domain_ops = {
1012 .unmap = gic_irq_domain_unmap, 1030 .unmap = gic_irq_domain_unmap,
1013}; 1031};
1014 1032
1015static void __init __gic_init_bases(unsigned int gic_nr, int irq_start, 1033static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start,
1016 void __iomem *dist_base, void __iomem *cpu_base, 1034 struct fwnode_handle *handle)
1017 u32 percpu_offset, struct fwnode_handle *handle)
1018{ 1035{
1019 irq_hw_number_t hwirq_base; 1036 irq_hw_number_t hwirq_base;
1020 struct gic_chip_data *gic; 1037 int gic_irqs, irq_base, i, ret;
1021 int gic_irqs, irq_base, i;
1022
1023 BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
1024 1038
1025 gic = &gic_data[gic_nr]; 1039 if (WARN_ON(!gic || gic->domain))
1040 return -EINVAL;
1026 1041
1027 /* Initialize irq_chip */ 1042 /* Initialize irq_chip */
1028 if (static_key_true(&supports_deactivate) && gic_nr == 0) { 1043 gic->chip = gic_chip;
1029 gic->chip = gic_eoimode1_chip; 1044
1045 if (static_key_true(&supports_deactivate) && gic == &gic_data[0]) {
1046 gic->chip.irq_mask = gic_eoimode1_mask_irq;
1047 gic->chip.irq_eoi = gic_eoimode1_eoi_irq;
1048 gic->chip.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity;
1049 gic->chip.name = kasprintf(GFP_KERNEL, "GICv2");
1030 } else { 1050 } else {
1031 gic->chip = gic_chip; 1051 gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d",
1032 gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d", gic_nr); 1052 (int)(gic - &gic_data[0]));
1033 } 1053 }
1034 1054
1035#ifdef CONFIG_SMP 1055#ifdef CONFIG_SMP
1036 if (gic_nr == 0) 1056 if (gic == &gic_data[0])
1037 gic->chip.irq_set_affinity = gic_set_affinity; 1057 gic->chip.irq_set_affinity = gic_set_affinity;
1038#endif 1058#endif
1039 1059
1040#ifdef CONFIG_GIC_NON_BANKED 1060 if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
1041 if (percpu_offset) { /* Frankein-GIC without banked registers... */ 1061 /* Frankein-GIC without banked registers... */
1042 unsigned int cpu; 1062 unsigned int cpu;
1043 1063
1044 gic->dist_base.percpu_base = alloc_percpu(void __iomem *); 1064 gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
1045 gic->cpu_base.percpu_base = alloc_percpu(void __iomem *); 1065 gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
1046 if (WARN_ON(!gic->dist_base.percpu_base || 1066 if (WARN_ON(!gic->dist_base.percpu_base ||
1047 !gic->cpu_base.percpu_base)) { 1067 !gic->cpu_base.percpu_base)) {
1048 free_percpu(gic->dist_base.percpu_base); 1068 ret = -ENOMEM;
1049 free_percpu(gic->cpu_base.percpu_base); 1069 goto error;
1050 return;
1051 } 1070 }
1052 1071
1053 for_each_possible_cpu(cpu) { 1072 for_each_possible_cpu(cpu) {
1054 u32 mpidr = cpu_logical_map(cpu); 1073 u32 mpidr = cpu_logical_map(cpu);
1055 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); 1074 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
1056 unsigned long offset = percpu_offset * core_id; 1075 unsigned long offset = gic->percpu_offset * core_id;
1057 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset; 1076 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) =
1058 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset; 1077 gic->raw_dist_base + offset;
1078 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) =
1079 gic->raw_cpu_base + offset;
1059 } 1080 }
1060 1081
1061 gic_set_base_accessor(gic, gic_get_percpu_base); 1082 gic_set_base_accessor(gic, gic_get_percpu_base);
1062 } else 1083 } else {
1063#endif 1084 /* Normal, sane GIC... */
1064 { /* Normal, sane GIC... */ 1085 WARN(gic->percpu_offset,
1065 WARN(percpu_offset,
1066 "GIC_NON_BANKED not enabled, ignoring %08x offset!", 1086 "GIC_NON_BANKED not enabled, ignoring %08x offset!",
1067 percpu_offset); 1087 gic->percpu_offset);
1068 gic->dist_base.common_base = dist_base; 1088 gic->dist_base.common_base = gic->raw_dist_base;
1069 gic->cpu_base.common_base = cpu_base; 1089 gic->cpu_base.common_base = gic->raw_cpu_base;
1070 gic_set_base_accessor(gic, gic_get_common_base); 1090 gic_set_base_accessor(gic, gic_get_common_base);
1071 } 1091 }
1072 1092
@@ -1089,7 +1109,7 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
1089 * For primary GICs, skip over SGIs. 1109 * For primary GICs, skip over SGIs.
1090 * For secondary GICs, skip over PPIs, too. 1110 * For secondary GICs, skip over PPIs, too.
1091 */ 1111 */
1092 if (gic_nr == 0 && (irq_start & 31) > 0) { 1112 if (gic == &gic_data[0] && (irq_start & 31) > 0) {
1093 hwirq_base = 16; 1113 hwirq_base = 16;
1094 if (irq_start != -1) 1114 if (irq_start != -1)
1095 irq_start = (irq_start & ~31) + 16; 1115 irq_start = (irq_start & ~31) + 16;
@@ -1111,10 +1131,12 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
1111 hwirq_base, &gic_irq_domain_ops, gic); 1131 hwirq_base, &gic_irq_domain_ops, gic);
1112 } 1132 }
1113 1133
1114 if (WARN_ON(!gic->domain)) 1134 if (WARN_ON(!gic->domain)) {
1115 return; 1135 ret = -ENODEV;
1136 goto error;
1137 }
1116 1138
1117 if (gic_nr == 0) { 1139 if (gic == &gic_data[0]) {
1118 /* 1140 /*
1119 * Initialize the CPU interface map to all CPUs. 1141 * Initialize the CPU interface map to all CPUs.
1120 * It will be refined as each CPU probes its ID. 1142 * It will be refined as each CPU probes its ID.
@@ -1132,19 +1154,57 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
1132 } 1154 }
1133 1155
1134 gic_dist_init(gic); 1156 gic_dist_init(gic);
1135 gic_cpu_init(gic); 1157 ret = gic_cpu_init(gic);
1136 gic_pm_init(gic); 1158 if (ret)
1159 goto error;
1160
1161 ret = gic_pm_init(gic);
1162 if (ret)
1163 goto error;
1164
1165 return 0;
1166
1167error:
1168 if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
1169 free_percpu(gic->dist_base.percpu_base);
1170 free_percpu(gic->cpu_base.percpu_base);
1171 }
1172
1173 kfree(gic->chip.name);
1174
1175 return ret;
1137} 1176}
1138 1177
1139void __init gic_init(unsigned int gic_nr, int irq_start, 1178void __init gic_init(unsigned int gic_nr, int irq_start,
1140 void __iomem *dist_base, void __iomem *cpu_base) 1179 void __iomem *dist_base, void __iomem *cpu_base)
1141{ 1180{
1181 struct gic_chip_data *gic;
1182
1183 if (WARN_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR))
1184 return;
1185
1142 /* 1186 /*
1143 * Non-DT/ACPI systems won't run a hypervisor, so let's not 1187 * Non-DT/ACPI systems won't run a hypervisor, so let's not
1144 * bother with these... 1188 * bother with these...
1145 */ 1189 */
1146 static_key_slow_dec(&supports_deactivate); 1190 static_key_slow_dec(&supports_deactivate);
1147 __gic_init_bases(gic_nr, irq_start, dist_base, cpu_base, 0, NULL); 1191
1192 gic = &gic_data[gic_nr];
1193 gic->raw_dist_base = dist_base;
1194 gic->raw_cpu_base = cpu_base;
1195
1196 __gic_init_bases(gic, irq_start, NULL);
1197}
1198
1199static void gic_teardown(struct gic_chip_data *gic)
1200{
1201 if (WARN_ON(!gic))
1202 return;
1203
1204 if (gic->raw_dist_base)
1205 iounmap(gic->raw_dist_base);
1206 if (gic->raw_cpu_base)
1207 iounmap(gic->raw_cpu_base);
1148} 1208}
1149 1209
1150#ifdef CONFIG_OF 1210#ifdef CONFIG_OF
@@ -1188,35 +1248,61 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
1188 return true; 1248 return true;
1189} 1249}
1190 1250
1251static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
1252{
1253 if (!gic || !node)
1254 return -EINVAL;
1255
1256 gic->raw_dist_base = of_iomap(node, 0);
1257 if (WARN(!gic->raw_dist_base, "unable to map gic dist registers\n"))
1258 goto error;
1259
1260 gic->raw_cpu_base = of_iomap(node, 1);
1261 if (WARN(!gic->raw_cpu_base, "unable to map gic cpu registers\n"))
1262 goto error;
1263
1264 if (of_property_read_u32(node, "cpu-offset", &gic->percpu_offset))
1265 gic->percpu_offset = 0;
1266
1267 return 0;
1268
1269error:
1270 gic_teardown(gic);
1271
1272 return -ENOMEM;
1273}
1274
1191int __init 1275int __init
1192gic_of_init(struct device_node *node, struct device_node *parent) 1276gic_of_init(struct device_node *node, struct device_node *parent)
1193{ 1277{
1194 void __iomem *cpu_base; 1278 struct gic_chip_data *gic;
1195 void __iomem *dist_base; 1279 int irq, ret;
1196 u32 percpu_offset;
1197 int irq;
1198 1280
1199 if (WARN_ON(!node)) 1281 if (WARN_ON(!node))
1200 return -ENODEV; 1282 return -ENODEV;
1201 1283
1202 dist_base = of_iomap(node, 0); 1284 if (WARN_ON(gic_cnt >= CONFIG_ARM_GIC_MAX_NR))
1203 WARN(!dist_base, "unable to map gic dist registers\n"); 1285 return -EINVAL;
1286
1287 gic = &gic_data[gic_cnt];
1204 1288
1205 cpu_base = of_iomap(node, 1); 1289 ret = gic_of_setup(gic, node);
1206 WARN(!cpu_base, "unable to map gic cpu registers\n"); 1290 if (ret)
1291 return ret;
1207 1292
1208 /* 1293 /*
1209 * Disable split EOI/Deactivate if either HYP is not available 1294 * Disable split EOI/Deactivate if either HYP is not available
1210 * or the CPU interface is too small. 1295 * or the CPU interface is too small.
1211 */ 1296 */
1212 if (gic_cnt == 0 && !gic_check_eoimode(node, &cpu_base)) 1297 if (gic_cnt == 0 && !gic_check_eoimode(node, &gic->raw_cpu_base))
1213 static_key_slow_dec(&supports_deactivate); 1298 static_key_slow_dec(&supports_deactivate);
1214 1299
1215 if (of_property_read_u32(node, "cpu-offset", &percpu_offset)) 1300 ret = __gic_init_bases(gic, -1, &node->fwnode);
1216 percpu_offset = 0; 1301 if (ret) {
1302 gic_teardown(gic);
1303 return ret;
1304 }
1217 1305
1218 __gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset,
1219 &node->fwnode);
1220 if (!gic_cnt) 1306 if (!gic_cnt)
1221 gic_init_physaddr(node); 1307 gic_init_physaddr(node);
1222 1308
@@ -1303,9 +1389,9 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
1303 const unsigned long end) 1389 const unsigned long end)
1304{ 1390{
1305 struct acpi_madt_generic_distributor *dist; 1391 struct acpi_madt_generic_distributor *dist;
1306 void __iomem *cpu_base, *dist_base;
1307 struct fwnode_handle *domain_handle; 1392 struct fwnode_handle *domain_handle;
1308 int count; 1393 struct gic_chip_data *gic = &gic_data[0];
1394 int count, ret;
1309 1395
1310 /* Collect CPU base addresses */ 1396 /* Collect CPU base addresses */
1311 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, 1397 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
@@ -1315,17 +1401,18 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
1315 return -EINVAL; 1401 return -EINVAL;
1316 } 1402 }
1317 1403
1318 cpu_base = ioremap(cpu_phy_base, ACPI_GIC_CPU_IF_MEM_SIZE); 1404 gic->raw_cpu_base = ioremap(cpu_phy_base, ACPI_GIC_CPU_IF_MEM_SIZE);
1319 if (!cpu_base) { 1405 if (!gic->raw_cpu_base) {
1320 pr_err("Unable to map GICC registers\n"); 1406 pr_err("Unable to map GICC registers\n");
1321 return -ENOMEM; 1407 return -ENOMEM;
1322 } 1408 }
1323 1409
1324 dist = (struct acpi_madt_generic_distributor *)header; 1410 dist = (struct acpi_madt_generic_distributor *)header;
1325 dist_base = ioremap(dist->base_address, ACPI_GICV2_DIST_MEM_SIZE); 1411 gic->raw_dist_base = ioremap(dist->base_address,
1326 if (!dist_base) { 1412 ACPI_GICV2_DIST_MEM_SIZE);
1413 if (!gic->raw_dist_base) {
1327 pr_err("Unable to map GICD registers\n"); 1414 pr_err("Unable to map GICD registers\n");
1328 iounmap(cpu_base); 1415 gic_teardown(gic);
1329 return -ENOMEM; 1416 return -ENOMEM;
1330 } 1417 }
1331 1418
@@ -1340,15 +1427,20 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
1340 /* 1427 /*
1341 * Initialize GIC instance zero (no multi-GIC support). 1428 * Initialize GIC instance zero (no multi-GIC support).
1342 */ 1429 */
1343 domain_handle = irq_domain_alloc_fwnode(dist_base); 1430 domain_handle = irq_domain_alloc_fwnode(gic->raw_dist_base);
1344 if (!domain_handle) { 1431 if (!domain_handle) {
1345 pr_err("Unable to allocate domain handle\n"); 1432 pr_err("Unable to allocate domain handle\n");
1346 iounmap(cpu_base); 1433 gic_teardown(gic);
1347 iounmap(dist_base);
1348 return -ENOMEM; 1434 return -ENOMEM;
1349 } 1435 }
1350 1436
1351 __gic_init_bases(0, -1, dist_base, cpu_base, 0, domain_handle); 1437 ret = __gic_init_bases(gic, -1, domain_handle);
1438 if (ret) {
1439 pr_err("Failed to initialise GIC\n");
1440 irq_domain_free_fwnode(domain_handle);
1441 gic_teardown(gic);
1442 return ret;
1443 }
1352 1444
1353 acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle); 1445 acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
1354 1446