aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2015-10-09 17:28:58 -0400
committerThomas Gleixner <tglx@linutronix.de>2015-10-11 05:33:42 -0400
commite9849777d0e27cdd2902805be51da73e7c79578c (patch)
tree398bab5d6a91973d2e8a281bae6dff8ebd57ac8b /kernel/irq
parentfcf8ab690e3decf8335c51fdabea773a3f5ea026 (diff)
genirq: Add flag to force mask in disable_irq[_nosync]()
If an irq chip does not implement the irq_disable callback, then we use a lazy approach for disabling the interrupt. That means that the interrupt is marked disabled, but the interrupt line is not immediately masked in the interrupt chip. It only becomes masked if the interrupt is raised while it's marked disabled. We use this to avoid possibly expensive mask/unmask operations for common case operations. Unfortunately there are devices which do not allow the interrupt to be disabled easily at the device level. They are forced to use disable_irq_nosync(). This can result in taking each interrupt twice. Instead of enforcing the non lazy mode on all interrupts of a irq chip, provide a settings flag, which can be set by the driver for that particular interrupt line. Reported-and-tested-by: Duc Dang <dhdang@apm.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Marc Zyngier <marc.zyngier@arm.com> Cc: Jason Cooper <jason@lakedaemon.net> Link: http://lkml.kernel.org/r/alpine.DEB.2.11.1510092348370.6097@nanos
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/chip.c9
-rw-r--r--kernel/irq/manage.c1
-rw-r--r--kernel/irq/settings.h12
3 files changed, 22 insertions, 0 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 4aa00d325b8c..15206453b12a 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -241,6 +241,13 @@ void irq_enable(struct irq_desc *desc)
241 * disabled. If an interrupt happens, then the interrupt flow 241 * disabled. If an interrupt happens, then the interrupt flow
242 * handler masks the line at the hardware level and marks it 242 * handler masks the line at the hardware level and marks it
243 * pending. 243 * pending.
244 *
245 * If the interrupt chip does not implement the irq_disable callback,
246 * a driver can disable the lazy approach for a particular irq line by
247 * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
248 * be used for devices which cannot disable the interrupt at the
249 * device level under certain circumstances and have to use
250 * disable_irq[_nosync] instead.
244 */ 251 */
245void irq_disable(struct irq_desc *desc) 252void irq_disable(struct irq_desc *desc)
246{ 253{
@@ -248,6 +255,8 @@ void irq_disable(struct irq_desc *desc)
248 if (desc->irq_data.chip->irq_disable) { 255 if (desc->irq_data.chip->irq_disable) {
249 desc->irq_data.chip->irq_disable(&desc->irq_data); 256 desc->irq_data.chip->irq_disable(&desc->irq_data);
250 irq_state_set_masked(desc); 257 irq_state_set_masked(desc);
258 } else if (irq_settings_disable_unlazy(desc)) {
259 mask_irq(desc);
251 } 260 }
252} 261}
253 262
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 312f9cb12805..a71175ff98d5 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1463,6 +1463,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1463 1463
1464 /* If this was the last handler, shut down the IRQ line: */ 1464 /* If this was the last handler, shut down the IRQ line: */
1465 if (!desc->action) { 1465 if (!desc->action) {
1466 irq_settings_clr_disable_unlazy(desc);
1466 irq_shutdown(desc); 1467 irq_shutdown(desc);
1467 irq_release_resources(desc); 1468 irq_release_resources(desc);
1468 } 1469 }
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
index 3320b84cc60f..320579d89091 100644
--- a/kernel/irq/settings.h
+++ b/kernel/irq/settings.h
@@ -15,6 +15,7 @@ enum {
15 _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD, 15 _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
16 _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID, 16 _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
17 _IRQ_IS_POLLED = IRQ_IS_POLLED, 17 _IRQ_IS_POLLED = IRQ_IS_POLLED,
18 _IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY,
18 _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, 19 _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
19}; 20};
20 21
@@ -28,6 +29,7 @@ enum {
28#define IRQ_NESTED_THREAD GOT_YOU_MORON 29#define IRQ_NESTED_THREAD GOT_YOU_MORON
29#define IRQ_PER_CPU_DEVID GOT_YOU_MORON 30#define IRQ_PER_CPU_DEVID GOT_YOU_MORON
30#define IRQ_IS_POLLED GOT_YOU_MORON 31#define IRQ_IS_POLLED GOT_YOU_MORON
32#define IRQ_DISABLE_UNLAZY GOT_YOU_MORON
31#undef IRQF_MODIFY_MASK 33#undef IRQF_MODIFY_MASK
32#define IRQF_MODIFY_MASK GOT_YOU_MORON 34#define IRQF_MODIFY_MASK GOT_YOU_MORON
33 35
@@ -154,3 +156,13 @@ static inline bool irq_settings_is_polled(struct irq_desc *desc)
154{ 156{
155 return desc->status_use_accessors & _IRQ_IS_POLLED; 157 return desc->status_use_accessors & _IRQ_IS_POLLED;
156} 158}
159
160static inline bool irq_settings_disable_unlazy(struct irq_desc *desc)
161{
162 return desc->status_use_accessors & _IRQ_DISABLE_UNLAZY;
163}
164
165static inline void irq_settings_clr_disable_unlazy(struct irq_desc *desc)
166{
167 desc->status_use_accessors &= ~_IRQ_DISABLE_UNLAZY;
168}