aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/common/gic.c
diff options
context:
space:
mode:
authorNicolas Pitre <nicolas.pitre@linaro.org>2012-04-11 18:55:48 -0400
committerLorenzo Pieralisi <lorenzo.pieralisi@arm.com>2012-11-19 10:44:34 -0500
commit384a290283fde63ba8dc671fca5420111cdac19a (patch)
treeb6c7eef5bc66988a4979edfd2a6440341747a4e5 /arch/arm/common/gic.c
parent7f124aaf01439d2fa54283f3c375ce3b9fc776d4 (diff)
ARM: gic: use a private mapping for CPU target interfaces
The GIC interface numbering does not necessarily follow the logical CPU numbering, especially for complex topologies such as multi-cluster systems. Fortunately we can easily probe the GIC to create a mapping as the Interrupt Processor Targets Registers for the first 32 interrupts are read-only, and each field returns a value that always corresponds to the processor reading the register. Initially all mappings target all CPUs in case an IPI is required to boot secondary CPUs. It is refined as those CPUs discover what their actual mapping is. Signed-off-by: Nicolas Pitre <nico@linaro.org> Acked-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm/common/gic.c')
-rw-r--r--arch/arm/common/gic.c45
1 files changed, 36 insertions, 9 deletions
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index aa5269984187..36ae03a3f5d1 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -70,6 +70,14 @@ struct gic_chip_data {
70static DEFINE_RAW_SPINLOCK(irq_controller_lock); 70static DEFINE_RAW_SPINLOCK(irq_controller_lock);
71 71
72/* 72/*
73 * The GIC mapping of CPU interfaces does not necessarily match
74 * the logical CPU numbering. Let's use a mapping as returned
75 * by the GIC itself.
76 */
77#define NR_GIC_CPU_IF 8
78static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
79
80/*
73 * Supported arch specific GIC irq extension. 81 * Supported arch specific GIC irq extension.
74 * Default make them NULL. 82 * Default make them NULL.
75 */ 83 */
@@ -238,11 +246,11 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
238 unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); 246 unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
239 u32 val, mask, bit; 247 u32 val, mask, bit;
240 248
241 if (cpu >= 8 || cpu >= nr_cpu_ids) 249 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
242 return -EINVAL; 250 return -EINVAL;
243 251
244 mask = 0xff << shift; 252 mask = 0xff << shift;
245 bit = 1 << (cpu_logical_map(cpu) + shift); 253 bit = gic_cpu_map[cpu] << shift;
246 254
247 raw_spin_lock(&irq_controller_lock); 255 raw_spin_lock(&irq_controller_lock);
248 val = readl_relaxed(reg) & ~mask; 256 val = readl_relaxed(reg) & ~mask;
@@ -349,11 +357,6 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
349 u32 cpumask; 357 u32 cpumask;
350 unsigned int gic_irqs = gic->gic_irqs; 358 unsigned int gic_irqs = gic->gic_irqs;
351 void __iomem *base = gic_data_dist_base(gic); 359 void __iomem *base = gic_data_dist_base(gic);
352 u32 cpu = cpu_logical_map(smp_processor_id());
353
354 cpumask = 1 << cpu;
355 cpumask |= cpumask << 8;
356 cpumask |= cpumask << 16;
357 360
358 writel_relaxed(0, base + GIC_DIST_CTRL); 361 writel_relaxed(0, base + GIC_DIST_CTRL);
359 362
@@ -366,6 +369,7 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
366 /* 369 /*
367 * Set all global interrupts to this CPU only. 370 * Set all global interrupts to this CPU only.
368 */ 371 */
372 cpumask = readl_relaxed(base + GIC_DIST_TARGET + 0);
369 for (i = 32; i < gic_irqs; i += 4) 373 for (i = 32; i < gic_irqs; i += 4)
370 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); 374 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
371 375
@@ -389,9 +393,25 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
389{ 393{
390 void __iomem *dist_base = gic_data_dist_base(gic); 394 void __iomem *dist_base = gic_data_dist_base(gic);
391 void __iomem *base = gic_data_cpu_base(gic); 395 void __iomem *base = gic_data_cpu_base(gic);
396 unsigned int cpu_mask, cpu = smp_processor_id();
392 int i; 397 int i;
393 398
394 /* 399 /*
400 * Get what the GIC says our CPU mask is.
401 */
402 BUG_ON(cpu >= NR_GIC_CPU_IF);
403 cpu_mask = readl_relaxed(dist_base + GIC_DIST_TARGET + 0);
404 gic_cpu_map[cpu] = cpu_mask;
405
406 /*
407 * Clear our mask from the other map entries in case they're
408 * still undefined.
409 */
410 for (i = 0; i < NR_GIC_CPU_IF; i++)
411 if (i != cpu)
412 gic_cpu_map[i] &= ~cpu_mask;
413
414 /*
395 * Deal with the banked PPI and SGI interrupts - disable all 415 * Deal with the banked PPI and SGI interrupts - disable all
396 * PPI interrupts, ensure all SGI interrupts are enabled. 416 * PPI interrupts, ensure all SGI interrupts are enabled.
397 */ 417 */
@@ -646,7 +666,7 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
646{ 666{
647 irq_hw_number_t hwirq_base; 667 irq_hw_number_t hwirq_base;
648 struct gic_chip_data *gic; 668 struct gic_chip_data *gic;
649 int gic_irqs, irq_base; 669 int gic_irqs, irq_base, i;
650 670
651 BUG_ON(gic_nr >= MAX_GIC_NR); 671 BUG_ON(gic_nr >= MAX_GIC_NR);
652 672
@@ -683,6 +703,13 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
683 } 703 }
684 704
685 /* 705 /*
706 * Initialize the CPU interface map to all CPUs.
707 * It will be refined as each CPU probes its ID.
708 */
709 for (i = 0; i < NR_GIC_CPU_IF; i++)
710 gic_cpu_map[i] = 0xff;
711
712 /*
686 * For primary GICs, skip over SGIs. 713 * For primary GICs, skip over SGIs.
687 * For secondary GICs, skip over PPIs, too. 714 * For secondary GICs, skip over PPIs, too.
688 */ 715 */
@@ -737,7 +764,7 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
737 764
738 /* Convert our logical CPU mask into a physical one. */ 765 /* Convert our logical CPU mask into a physical one. */
739 for_each_cpu(cpu, mask) 766 for_each_cpu(cpu, mask)
740 map |= 1 << cpu_logical_map(cpu); 767 map |= gic_cpu_map[cpu];
741 768
742 /* 769 /*
743 * Ensure that stores to Normal memory are visible to the 770 * Ensure that stores to Normal memory are visible to the