aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/irqchip
diff options
context:
space:
mode:
authorNicolas Pitre <nicolas.pitre@linaro.org>2012-04-12 01:40:31 -0400
committerNicolas Pitre <nicolas.pitre@linaro.org>2013-07-30 09:02:12 -0400
commit1a6b69b6548cd0dd82549393f30dd982ceeb79d2 (patch)
tree97758dd77421bcacb3191c5347194d3d48940a3b /drivers/irqchip
parent71a8986d7e4845b6fca1298fe6e3233ce6fde0b7 (diff)
ARM: gic: add CPU migration support
This is required by the big.LITTLE switcher code. The gic_migrate_target() changes the CPU interface mapping for the current CPU to redirect SGIs to the specified interface, and it also updates the target CPU for each interrupts to that CPU interface if they were targeting the current interface. Finally, pending SGIs for the current CPU are forwarded to the new interface. Because Linux does not use it, the SGI source information for the forwarded SGIs is not preserved. Neither is the source information for the SGIs sent by the current CPU to other CPUs adjusted to match the new CPU interface mapping. The required registers are banked so only the target CPU could do it. Signed-off-by: Nicolas Pitre <nico@linaro.org>
Diffstat (limited to 'drivers/irqchip')
-rw-r--r--drivers/irqchip/irq-gic.c87
1 files changed, 84 insertions, 3 deletions
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index ee7c50312066..268874ac75e6 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -253,10 +253,9 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
253 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) 253 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
254 return -EINVAL; 254 return -EINVAL;
255 255
256 raw_spin_lock(&irq_controller_lock);
256 mask = 0xff << shift; 257 mask = 0xff << shift;
257 bit = gic_cpu_map[cpu] << shift; 258 bit = gic_cpu_map[cpu] << shift;
258
259 raw_spin_lock(&irq_controller_lock);
260 val = readl_relaxed(reg) & ~mask; 259 val = readl_relaxed(reg) & ~mask;
261 writel_relaxed(val | bit, reg); 260 writel_relaxed(val | bit, reg);
262 raw_spin_unlock(&irq_controller_lock); 261 raw_spin_unlock(&irq_controller_lock);
@@ -646,7 +645,9 @@ static void __init gic_pm_init(struct gic_chip_data *gic)
646void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) 645void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
647{ 646{
648 int cpu; 647 int cpu;
649 unsigned long map = 0; 648 unsigned long flags, map = 0;
649
650 raw_spin_lock_irqsave(&irq_controller_lock, flags);
650 651
651 /* Convert our logical CPU mask into a physical one. */ 652 /* Convert our logical CPU mask into a physical one. */
652 for_each_cpu(cpu, mask) 653 for_each_cpu(cpu, mask)
@@ -660,6 +661,86 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
660 661
661 /* this always happens on GIC0 */ 662 /* this always happens on GIC0 */
662 writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); 663 writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
664
665 raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
666}
667#endif
668
669#ifdef CONFIG_BL_SWITCHER
670/*
671 * gic_migrate_target - migrate IRQs to another CPU interface
672 *
673 * @new_cpu_id: the CPU target ID to migrate IRQs to
674 *
675 * Migrate all peripheral interrupts with a target matching the current CPU
676 * to the interface corresponding to @new_cpu_id. The CPU interface mapping
677 * is also updated. Targets to other CPU interfaces are unchanged.
678 * This must be called with IRQs locally disabled.
679 */
680void gic_migrate_target(unsigned int new_cpu_id)
681{
682 unsigned int cur_cpu_id, gic_irqs, gic_nr = 0;
683 void __iomem *dist_base;
684 int i, ror_val, cpu = smp_processor_id();
685 u32 val, cur_target_mask, active_mask;
686
687 if (gic_nr >= MAX_GIC_NR)
688 BUG();
689
690 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
691 if (!dist_base)
692 return;
693 gic_irqs = gic_data[gic_nr].gic_irqs;
694
695 cur_cpu_id = __ffs(gic_cpu_map[cpu]);
696 cur_target_mask = 0x01010101 << cur_cpu_id;
697 ror_val = (cur_cpu_id - new_cpu_id) & 31;
698
699 raw_spin_lock(&irq_controller_lock);
700
701 /* Update the target interface for this logical CPU */
702 gic_cpu_map[cpu] = 1 << new_cpu_id;
703
704 /*
705 * Find all the peripheral interrupts targetting the current
706 * CPU interface and migrate them to the new CPU interface.
707 * We skip DIST_TARGET 0 to 7 as they are read-only.
708 */
709 for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) {
710 val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
711 active_mask = val & cur_target_mask;
712 if (active_mask) {
713 val &= ~active_mask;
714 val |= ror32(active_mask, ror_val);
715 writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4);
716 }
717 }
718
719 raw_spin_unlock(&irq_controller_lock);
720
721 /*
722 * Now let's migrate and clear any potential SGIs that might be
723 * pending for us (cur_cpu_id). Since GIC_DIST_SGI_PENDING_SET
724 * is a banked register, we can only forward the SGI using
725 * GIC_DIST_SOFTINT. The original SGI source is lost but Linux
726 * doesn't use that information anyway.
727 *
728 * For the same reason we do not adjust SGI source information
729 * for previously sent SGIs by us to other CPUs either.
730 */
731 for (i = 0; i < 16; i += 4) {
732 int j;
733 val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i);
734 if (!val)
735 continue;
736 writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i);
737 for (j = i; j < i + 4; j++) {
738 if (val & 0xff)
739 writel_relaxed((1 << (new_cpu_id + 16)) | j,
740 dist_base + GIC_DIST_SOFTINT);
741 val >>= 8;
742 }
743 }
663} 744}
664#endif 745#endif
665 746