diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2013-09-17 10:13:38 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2013-09-17 10:14:07 -0400 |
commit | 7f63037c8fb204f666367c6469f5a9b9d6888877 (patch) | |
tree | a026c999c7166aa1004e111b2e9afd7b663acdce /drivers/irqchip | |
parent | 272b98c6455f00884f0350f775c5342358ebb73f (diff) | |
parent | b22537c682671de97c932d5addb6b7d087352aa1 (diff) |
Merge branch 'iks_for_rmk' of git://git.linaro.org/people/nico/linux into devel-stable
Nicolas Pitre writes:
This is the first part of the patch series adding IKS (In-Kernel
Switcher) support for big.LITTLE system architectures. This consists of
the core patches only. Extra patches to come later will introduce
various optimizations and tracing support.
Those patches were posted on the list a while ago here:
http://news.gmane.org/group/gmane.linux.ports.arm.kernel/thread=253942
Diffstat (limited to 'drivers/irqchip')
-rw-r--r-- | drivers/irqchip/irq-gic.c | 108 |
1 files changed, 105 insertions, 3 deletions
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index d0e948084eaf..6365b59181ee 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c | |||
@@ -253,10 +253,9 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | |||
253 | if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) | 253 | if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) |
254 | return -EINVAL; | 254 | return -EINVAL; |
255 | 255 | ||
256 | raw_spin_lock(&irq_controller_lock); | ||
256 | mask = 0xff << shift; | 257 | mask = 0xff << shift; |
257 | bit = gic_cpu_map[cpu] << shift; | 258 | bit = gic_cpu_map[cpu] << shift; |
258 | |||
259 | raw_spin_lock(&irq_controller_lock); | ||
260 | val = readl_relaxed(reg) & ~mask; | 259 | val = readl_relaxed(reg) & ~mask; |
261 | writel_relaxed(val | bit, reg); | 260 | writel_relaxed(val | bit, reg); |
262 | raw_spin_unlock(&irq_controller_lock); | 261 | raw_spin_unlock(&irq_controller_lock); |
@@ -652,7 +651,9 @@ static void __init gic_pm_init(struct gic_chip_data *gic) | |||
652 | void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) | 651 | void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) |
653 | { | 652 | { |
654 | int cpu; | 653 | int cpu; |
655 | unsigned long map = 0; | 654 | unsigned long flags, map = 0; |
655 | |||
656 | raw_spin_lock_irqsave(&irq_controller_lock, flags); | ||
656 | 657 | ||
657 | /* Convert our logical CPU mask into a physical one. */ | 658 | /* Convert our logical CPU mask into a physical one. */ |
658 | for_each_cpu(cpu, mask) | 659 | for_each_cpu(cpu, mask) |
@@ -666,6 +667,107 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) | |||
666 | 667 | ||
667 | /* this always happens on GIC0 */ | 668 | /* this always happens on GIC0 */ |
668 | writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); | 669 | writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); |
670 | |||
671 | raw_spin_unlock_irqrestore(&irq_controller_lock, flags); | ||
672 | } | ||
673 | #endif | ||
674 | |||
675 | #ifdef CONFIG_BL_SWITCHER | ||
676 | /* | ||
677 | * gic_get_cpu_id - get the CPU interface ID for the specified CPU | ||
678 | * | ||
679 | * @cpu: the logical CPU number to get the GIC ID for. | ||
680 | * | ||
681 | * Return the CPU interface ID for the given logical CPU number, | ||
682 | * or -1 if the CPU number is too large or the interface ID is | ||
683 | * unknown (more than one bit set). | ||
684 | */ | ||
685 | int gic_get_cpu_id(unsigned int cpu) | ||
686 | { | ||
687 | unsigned int cpu_bit; | ||
688 | |||
689 | if (cpu >= NR_GIC_CPU_IF) | ||
690 | return -1; | ||
691 | cpu_bit = gic_cpu_map[cpu]; | ||
692 | if (cpu_bit & (cpu_bit - 1)) | ||
693 | return -1; | ||
694 | return __ffs(cpu_bit); | ||
695 | } | ||
696 | |||
697 | /* | ||
698 | * gic_migrate_target - migrate IRQs to another CPU interface | ||
699 | * | ||
700 | * @new_cpu_id: the CPU target ID to migrate IRQs to | ||
701 | * | ||
702 | * Migrate all peripheral interrupts with a target matching the current CPU | ||
703 | * to the interface corresponding to @new_cpu_id. The CPU interface mapping | ||
704 | * is also updated. Targets to other CPU interfaces are unchanged. | ||
705 | * This must be called with IRQs locally disabled. | ||
706 | */ | ||
707 | void gic_migrate_target(unsigned int new_cpu_id) | ||
708 | { | ||
709 | unsigned int cur_cpu_id, gic_irqs, gic_nr = 0; | ||
710 | void __iomem *dist_base; | ||
711 | int i, ror_val, cpu = smp_processor_id(); | ||
712 | u32 val, cur_target_mask, active_mask; | ||
713 | |||
714 | if (gic_nr >= MAX_GIC_NR) | ||
715 | BUG(); | ||
716 | |||
717 | dist_base = gic_data_dist_base(&gic_data[gic_nr]); | ||
718 | if (!dist_base) | ||
719 | return; | ||
720 | gic_irqs = gic_data[gic_nr].gic_irqs; | ||
721 | |||
722 | cur_cpu_id = __ffs(gic_cpu_map[cpu]); | ||
723 | cur_target_mask = 0x01010101 << cur_cpu_id; | ||
724 | ror_val = (cur_cpu_id - new_cpu_id) & 31; | ||
725 | |||
726 | raw_spin_lock(&irq_controller_lock); | ||
727 | |||
728 | /* Update the target interface for this logical CPU */ | ||
729 | gic_cpu_map[cpu] = 1 << new_cpu_id; | ||
730 | |||
731 | /* | ||
732 | * Find all the peripheral interrupts targetting the current | ||
733 | * CPU interface and migrate them to the new CPU interface. | ||
734 | * We skip DIST_TARGET 0 to 7 as they are read-only. | ||
735 | */ | ||
736 | for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) { | ||
737 | val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); | ||
738 | active_mask = val & cur_target_mask; | ||
739 | if (active_mask) { | ||
740 | val &= ~active_mask; | ||
741 | val |= ror32(active_mask, ror_val); | ||
742 | writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4); | ||
743 | } | ||
744 | } | ||
745 | |||
746 | raw_spin_unlock(&irq_controller_lock); | ||
747 | |||
748 | /* | ||
749 | * Now let's migrate and clear any potential SGIs that might be | ||
750 | * pending for us (cur_cpu_id). Since GIC_DIST_SGI_PENDING_SET | ||
751 | * is a banked register, we can only forward the SGI using | ||
752 | * GIC_DIST_SOFTINT. The original SGI source is lost but Linux | ||
753 | * doesn't use that information anyway. | ||
754 | * | ||
755 | * For the same reason we do not adjust SGI source information | ||
756 | * for previously sent SGIs by us to other CPUs either. | ||
757 | */ | ||
758 | for (i = 0; i < 16; i += 4) { | ||
759 | int j; | ||
760 | val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i); | ||
761 | if (!val) | ||
762 | continue; | ||
763 | writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i); | ||
764 | for (j = i; j < i + 4; j++) { | ||
765 | if (val & 0xff) | ||
766 | writel_relaxed((1 << (new_cpu_id + 16)) | j, | ||
767 | dist_base + GIC_DIST_SOFTINT); | ||
768 | val >>= 8; | ||
769 | } | ||
770 | } | ||
669 | } | 771 | } |
670 | #endif | 772 | #endif |
671 | 773 | ||