aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/irqchip/irq-gic.c
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2016-06-27 13:11:43 -0400
committerMarc Zyngier <marc.zyngier@arm.com>2016-09-12 14:46:19 -0400
commit04c8b0f82c7d5a9a1c296eef914ae3bb820bcb85 (patch)
tree0f0d6f5a723d59c97f0c28674c0b486ead7a544e /drivers/irqchip/irq-gic.c
parent9395452b4aab7bc2475ef8935b4a4fb99d778d70 (diff)
irqchip/gic: Make locking a BL_SWITCHER only feature
The BL switcher code manipulates the logical/physical CPU mapping, forcing a lock to be taken on the IPI path. With an IPI heavy load, this single lock becomes contended. But when CONFIG_BL_SWITCHER is not enabled, there is no reason to take this lock at all since the CPU mapping is immutable. This patch allows the lock to be entierely removed when BL_SWITCHER is not enabled (which is the case in most configurations), leading to a small improvement of "perf bench sched pipe" (measured on an 8 core AMD Seattle system): Before: 101370 ops/sec After: 103680 ops/sec Take this opportunity to remove a useless lock being taken when handling an interrupt on a secondary GIC. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'drivers/irqchip/irq-gic.c')
-rw-r--r--drivers/irqchip/irq-gic.c36
1 files changed, 27 insertions, 9 deletions
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 390fac59c6bc..d108fe6a32d9 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -91,7 +91,27 @@ struct gic_chip_data {
91#endif 91#endif
92}; 92};
93 93
94static DEFINE_RAW_SPINLOCK(irq_controller_lock); 94#ifdef CONFIG_BL_SWITCHER
95
96static DEFINE_RAW_SPINLOCK(cpu_map_lock);
97
98#define gic_lock_irqsave(f) \
99 raw_spin_lock_irqsave(&cpu_map_lock, (f))
100#define gic_unlock_irqrestore(f) \
101 raw_spin_unlock_irqrestore(&cpu_map_lock, (f))
102
103#define gic_lock() raw_spin_lock(&cpu_map_lock)
104#define gic_unlock() raw_spin_unlock(&cpu_map_lock)
105
106#else
107
108#define gic_lock_irqsave(f) do { (void)(f); } while(0)
109#define gic_unlock_irqrestore(f) do { (void)(f); } while(0)
110
111#define gic_lock() do { } while(0)
112#define gic_unlock() do { } while(0)
113
114#endif
95 115
96/* 116/*
97 * The GIC mapping of CPU interfaces does not necessarily match 117 * The GIC mapping of CPU interfaces does not necessarily match
@@ -317,12 +337,12 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
317 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) 337 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
318 return -EINVAL; 338 return -EINVAL;
319 339
320 raw_spin_lock_irqsave(&irq_controller_lock, flags); 340 gic_lock_irqsave(flags);
321 mask = 0xff << shift; 341 mask = 0xff << shift;
322 bit = gic_cpu_map[cpu] << shift; 342 bit = gic_cpu_map[cpu] << shift;
323 val = readl_relaxed(reg) & ~mask; 343 val = readl_relaxed(reg) & ~mask;
324 writel_relaxed(val | bit, reg); 344 writel_relaxed(val | bit, reg);
325 raw_spin_unlock_irqrestore(&irq_controller_lock, flags); 345 gic_unlock_irqrestore(flags);
326 346
327 return IRQ_SET_MASK_OK_DONE; 347 return IRQ_SET_MASK_OK_DONE;
328} 348}
@@ -374,9 +394,7 @@ static void gic_handle_cascade_irq(struct irq_desc *desc)
374 394
375 chained_irq_enter(chip, desc); 395 chained_irq_enter(chip, desc);
376 396
377 raw_spin_lock(&irq_controller_lock);
378 status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK); 397 status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
379 raw_spin_unlock(&irq_controller_lock);
380 398
381 gic_irq = (status & GICC_IAR_INT_ID_MASK); 399 gic_irq = (status & GICC_IAR_INT_ID_MASK);
382 if (gic_irq == GICC_INT_SPURIOUS) 400 if (gic_irq == GICC_INT_SPURIOUS)
@@ -776,7 +794,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
776 return; 794 return;
777 } 795 }
778 796
779 raw_spin_lock_irqsave(&irq_controller_lock, flags); 797 gic_lock_irqsave(flags);
780 798
781 /* Convert our logical CPU mask into a physical one. */ 799 /* Convert our logical CPU mask into a physical one. */
782 for_each_cpu(cpu, mask) 800 for_each_cpu(cpu, mask)
@@ -791,7 +809,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
791 /* this always happens on GIC0 */ 809 /* this always happens on GIC0 */
792 writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); 810 writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
793 811
794 raw_spin_unlock_irqrestore(&irq_controller_lock, flags); 812 gic_unlock_irqrestore(flags);
795} 813}
796#endif 814#endif
797 815
@@ -859,7 +877,7 @@ void gic_migrate_target(unsigned int new_cpu_id)
859 cur_target_mask = 0x01010101 << cur_cpu_id; 877 cur_target_mask = 0x01010101 << cur_cpu_id;
860 ror_val = (cur_cpu_id - new_cpu_id) & 31; 878 ror_val = (cur_cpu_id - new_cpu_id) & 31;
861 879
862 raw_spin_lock(&irq_controller_lock); 880 gic_lock();
863 881
864 /* Update the target interface for this logical CPU */ 882 /* Update the target interface for this logical CPU */
865 gic_cpu_map[cpu] = 1 << new_cpu_id; 883 gic_cpu_map[cpu] = 1 << new_cpu_id;
@@ -879,7 +897,7 @@ void gic_migrate_target(unsigned int new_cpu_id)
879 } 897 }
880 } 898 }
881 899
882 raw_spin_unlock(&irq_controller_lock); 900 gic_unlock();
883 901
884 /* 902 /*
885 * Now let's migrate and clear any potential SGIs that might be 903 * Now let's migrate and clear any potential SGIs that might be