diff options
author | Will Deacon <will.deacon@arm.com> | 2011-02-09 07:01:12 -0500 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2011-05-11 11:04:17 -0400 |
commit | 1a01753ed90a4fb84357b9b592e50564c07737f7 (patch) | |
tree | 57381deaf1267db867d154df2d8a5fb8288b003d /arch/arm/common | |
parent | 4bd66cfde5c3b6eced0da483c6357ae46d3adbb5 (diff) |
ARM: gic: use handle_fasteoi_irq for SPIs
Currently, the gic uses handle_level_irq for handling SPIs (Shared
Peripheral Interrupts), requiring active interrupts to be masked at
the distributor level during IRQ handling.
On a virtualised system, only the CPU interfaces are virtualised in
hardware. Accesses to the distributor must be trapped by the
hypervisor, adding latency to the critical interrupt path in Linux.
This patch modifies the GIC code to use handle_fasteoi_irq for handling
interrupts, which only requires us to signal EOI to the CPU interface
when handling is complete. Cascaded IRQ handling is also updated to use
the chained IRQ enter/exit functions to honour the flow control of the
parent chip.
Note that commit 846afbd1 ("GIC: Dont disable INT in ack callback")
broke cascading interrupts by forgetting to add IRQ masking. This is
no longer an issue because the unmask call is now unnecessary.
Tested on Versatile Express and Realview EB (1176 w/ cascaded GICs).
Tested-and-reviewed-by: Abhijeet Dharmapurikar <adharmap@codeaurora.org>
Tested-and-acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm/common')
-rw-r--r-- | arch/arm/common/gic.c | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index f70ec7dadebb..e9c2ff83909b 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c | |||
@@ -49,7 +49,7 @@ struct gic_chip_data { | |||
49 | * Default make them NULL. | 49 | * Default make them NULL. |
50 | */ | 50 | */ |
51 | struct irq_chip gic_arch_extn = { | 51 | struct irq_chip gic_arch_extn = { |
52 | .irq_ack = NULL, | 52 | .irq_eoi = NULL, |
53 | .irq_mask = NULL, | 53 | .irq_mask = NULL, |
54 | .irq_unmask = NULL, | 54 | .irq_unmask = NULL, |
55 | .irq_retrigger = NULL, | 55 | .irq_retrigger = NULL, |
@@ -84,15 +84,6 @@ static inline unsigned int gic_irq(struct irq_data *d) | |||
84 | /* | 84 | /* |
85 | * Routines to acknowledge, disable and enable interrupts | 85 | * Routines to acknowledge, disable and enable interrupts |
86 | */ | 86 | */ |
87 | static void gic_ack_irq(struct irq_data *d) | ||
88 | { | ||
89 | spin_lock(&irq_controller_lock); | ||
90 | if (gic_arch_extn.irq_ack) | ||
91 | gic_arch_extn.irq_ack(d); | ||
92 | writel(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); | ||
93 | spin_unlock(&irq_controller_lock); | ||
94 | } | ||
95 | |||
96 | static void gic_mask_irq(struct irq_data *d) | 87 | static void gic_mask_irq(struct irq_data *d) |
97 | { | 88 | { |
98 | u32 mask = 1 << (d->irq % 32); | 89 | u32 mask = 1 << (d->irq % 32); |
@@ -115,6 +106,17 @@ static void gic_unmask_irq(struct irq_data *d) | |||
115 | spin_unlock(&irq_controller_lock); | 106 | spin_unlock(&irq_controller_lock); |
116 | } | 107 | } |
117 | 108 | ||
109 | static void gic_eoi_irq(struct irq_data *d) | ||
110 | { | ||
111 | if (gic_arch_extn.irq_eoi) { | ||
112 | spin_lock(&irq_controller_lock); | ||
113 | gic_arch_extn.irq_eoi(d); | ||
114 | spin_unlock(&irq_controller_lock); | ||
115 | } | ||
116 | |||
117 | writel(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); | ||
118 | } | ||
119 | |||
118 | static int gic_set_type(struct irq_data *d, unsigned int type) | 120 | static int gic_set_type(struct irq_data *d, unsigned int type) |
119 | { | 121 | { |
120 | void __iomem *base = gic_dist_base(d); | 122 | void __iomem *base = gic_dist_base(d); |
@@ -218,8 +220,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) | |||
218 | unsigned int cascade_irq, gic_irq; | 220 | unsigned int cascade_irq, gic_irq; |
219 | unsigned long status; | 221 | unsigned long status; |
220 | 222 | ||
221 | /* primary controller ack'ing */ | 223 | chained_irq_enter(chip, desc); |
222 | chip->irq_ack(&desc->irq_data); | ||
223 | 224 | ||
224 | spin_lock(&irq_controller_lock); | 225 | spin_lock(&irq_controller_lock); |
225 | status = readl(chip_data->cpu_base + GIC_CPU_INTACK); | 226 | status = readl(chip_data->cpu_base + GIC_CPU_INTACK); |
@@ -236,15 +237,14 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) | |||
236 | generic_handle_irq(cascade_irq); | 237 | generic_handle_irq(cascade_irq); |
237 | 238 | ||
238 | out: | 239 | out: |
239 | /* primary controller unmasking */ | 240 | chained_irq_exit(chip, desc); |
240 | chip->irq_unmask(&desc->irq_data); | ||
241 | } | 241 | } |
242 | 242 | ||
243 | static struct irq_chip gic_chip = { | 243 | static struct irq_chip gic_chip = { |
244 | .name = "GIC", | 244 | .name = "GIC", |
245 | .irq_ack = gic_ack_irq, | ||
246 | .irq_mask = gic_mask_irq, | 245 | .irq_mask = gic_mask_irq, |
247 | .irq_unmask = gic_unmask_irq, | 246 | .irq_unmask = gic_unmask_irq, |
247 | .irq_eoi = gic_eoi_irq, | ||
248 | .irq_set_type = gic_set_type, | 248 | .irq_set_type = gic_set_type, |
249 | .irq_retrigger = gic_retrigger, | 249 | .irq_retrigger = gic_retrigger, |
250 | #ifdef CONFIG_SMP | 250 | #ifdef CONFIG_SMP |
@@ -319,7 +319,7 @@ static void __init gic_dist_init(struct gic_chip_data *gic, | |||
319 | * Setup the Linux IRQ subsystem. | 319 | * Setup the Linux IRQ subsystem. |
320 | */ | 320 | */ |
321 | for (i = irq_start; i < irq_limit; i++) { | 321 | for (i = irq_start; i < irq_limit; i++) { |
322 | irq_set_chip_and_handler(i, &gic_chip, handle_level_irq); | 322 | irq_set_chip_and_handler(i, &gic_chip, handle_fasteoi_irq); |
323 | irq_set_chip_data(i, gic); | 323 | irq_set_chip_data(i, gic); |
324 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); | 324 | set_irq_flags(i, IRQF_VALID | IRQF_PROBE); |
325 | } | 325 | } |