aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-02-05 08:06:23 -0500
committerIngo Molnar <mingo@kernel.org>2015-02-18 09:08:33 -0500
commit02cea3958664723a5d2236f0f0058de97c7e4693 (patch)
tree0bf1e7116f925131c34dc1bc217942a6e6f00688 /kernel
parent4fe7ffb7e17ca6ad9173b8de35f260c9c8fc2f79 (diff)
genirq: Provide disable_hardirq()
For things like netpoll there is a need to disable an interrupt from atomic context. Currently netpoll uses disable_irq() which will sleep-wait on threaded handlers and thus forced_irqthreads breaks things. Provide disable_hardirq(), which uses synchronize_hardirq() to only wait for active hardirq handlers; also change synchronize_hardirq() to return the status of threaded handlers. This will allow one to try-disable an interrupt from atomic context, or in case of request_threaded_irq() to only wait for the hardirq part. Suggested-by: Sabrina Dubroca <sd@queasysnail.net> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: David Miller <davem@davemloft.net> Cc: Eyal Perry <eyalpe@mellanox.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Quentin Lambert <lambert.quentin@gmail.com> Cc: Randy Dunlap <rdunlap@infradead.org> Cc: Russell King <linux@arm.linux.org.uk> Link: http://lkml.kernel.org/r/20150205130623.GH5029@twins.programming.kicks-ass.net [ Fixed typos and such. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq/manage.c36
1 files changed, 34 insertions, 2 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 196a06fbc122..03329c2287eb 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -68,14 +68,20 @@ static void __synchronize_hardirq(struct irq_desc *desc)
68 * Do not use this for shutdown scenarios where you must be sure 68 * Do not use this for shutdown scenarios where you must be sure
69 * that all parts (hardirq and threaded handler) have completed. 69 * that all parts (hardirq and threaded handler) have completed.
70 * 70 *
71 * Returns: false if a threaded handler is active.
72 *
71 * This function may be called - with care - from IRQ context. 73 * This function may be called - with care - from IRQ context.
72 */ 74 */
73void synchronize_hardirq(unsigned int irq) 75bool synchronize_hardirq(unsigned int irq)
74{ 76{
75 struct irq_desc *desc = irq_to_desc(irq); 77 struct irq_desc *desc = irq_to_desc(irq);
76 78
77 if (desc) 79 if (desc) {
78 __synchronize_hardirq(desc); 80 __synchronize_hardirq(desc);
81 return !atomic_read(&desc->threads_active);
82 }
83
84 return true;
79} 85}
80EXPORT_SYMBOL(synchronize_hardirq); 86EXPORT_SYMBOL(synchronize_hardirq);
81 87
@@ -440,6 +446,32 @@ void disable_irq(unsigned int irq)
440} 446}
441EXPORT_SYMBOL(disable_irq); 447EXPORT_SYMBOL(disable_irq);
442 448
449/**
450 * disable_hardirq - disables an irq and waits for hardirq completion
451 * @irq: Interrupt to disable
452 *
453 * Disable the selected interrupt line. Enables and Disables are
454 * nested.
455 * This function waits for any pending hard IRQ handlers for this
456 * interrupt to complete before returning. If you use this function while
457 * holding a resource the hard IRQ handler may need you will deadlock.
458 *
459 * When used to optimistically disable an interrupt from atomic context
460 * the return value must be checked.
461 *
462 * Returns: false if a threaded handler is active.
463 *
464 * This function may be called - with care - from IRQ context.
465 */
466bool disable_hardirq(unsigned int irq)
467{
468 if (!__disable_irq_nosync(irq))
469 return synchronize_hardirq(irq);
470
471 return false;
472}
473EXPORT_SYMBOL_GPL(disable_hardirq);
474
443void __enable_irq(struct irq_desc *desc, unsigned int irq) 475void __enable_irq(struct irq_desc *desc, unsigned int irq)
444{ 476{
445 switch (desc->depth) { 477 switch (desc->depth) {