aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq_work.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2012-10-19 16:43:41 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2012-11-17 19:01:22 -0500
commitbc6679aef673f9dcb8f718528fc3df49ff661af9 (patch)
tree870cf7742a3f26c99e2a4a209dbe4db656d6ad6b /kernel/irq_work.c
parent8aa2accee41f7045dc904fa41d4475b2f6ffae3e (diff)
irq_work: Make self-IPIs optable
On irq work initialization, let the user choose to define it as "lazy" or not. "Lazy" means that we don't want to send an IPI (provided the arch can anyway) when we enqueue this work but we rather prefer to wait for the next timer tick to execute our work if possible. This is going to be a benefit for non-urgent enqueuers (like printk in the future) that may prefer not to raise an IPI storm in case of frequent enqueuing on short periods of time. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Steven Rostedt <rostedt@goodmis.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Diffstat (limited to 'kernel/irq_work.c')
-rw-r--r--kernel/irq_work.c47
1 files changed, 27 insertions, 20 deletions
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 480f74715ba9..7f3a59bc8e3d 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -12,24 +12,15 @@
12#include <linux/percpu.h> 12#include <linux/percpu.h>
13#include <linux/hardirq.h> 13#include <linux/hardirq.h>
14#include <linux/irqflags.h> 14#include <linux/irqflags.h>
15#include <linux/sched.h>
16#include <linux/tick.h>
15#include <linux/cpu.h> 17#include <linux/cpu.h>
16#include <linux/notifier.h> 18#include <linux/notifier.h>
17#include <asm/processor.h> 19#include <asm/processor.h>
18 20
19/*
20 * An entry can be in one of four states:
21 *
22 * free NULL, 0 -> {claimed} : free to be used
23 * claimed NULL, 3 -> {pending} : claimed to be enqueued
24 * pending next, 3 -> {busy} : queued, pending callback
25 * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
26 */
27
28#define IRQ_WORK_PENDING 1UL
29#define IRQ_WORK_BUSY 2UL
30#define IRQ_WORK_FLAGS 3UL
31 21
32static DEFINE_PER_CPU(struct llist_head, irq_work_list); 22static DEFINE_PER_CPU(struct llist_head, irq_work_list);
23static DEFINE_PER_CPU(int, irq_work_raised);
33 24
34/* 25/*
35 * Claim the entry so that no one else will poke at it. 26 * Claim the entry so that no one else will poke at it.
@@ -69,14 +60,19 @@ void __weak arch_irq_work_raise(void)
69 */ 60 */
70static void __irq_work_queue(struct irq_work *work) 61static void __irq_work_queue(struct irq_work *work)
71{ 62{
72 bool empty;
73
74 preempt_disable(); 63 preempt_disable();
75 64
76 empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list)); 65 llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
77 /* The list was empty, raise self-interrupt to start processing. */ 66
78 if (empty) 67 /*
79 arch_irq_work_raise(); 68 * If the work is not "lazy" or the tick is stopped, raise the irq
69 * work interrupt (if supported by the arch), otherwise, just wait
70 * for the next tick.
71 */
72 if (!(work->flags & IRQ_WORK_LAZY) || tick_nohz_tick_stopped()) {
73 if (!this_cpu_cmpxchg(irq_work_raised, 0, 1))
74 arch_irq_work_raise();
75 }
80 76
81 preempt_enable(); 77 preempt_enable();
82} 78}
@@ -117,10 +113,19 @@ bool irq_work_needs_cpu(void)
117 113
118static void __irq_work_run(void) 114static void __irq_work_run(void)
119{ 115{
116 unsigned long flags;
120 struct irq_work *work; 117 struct irq_work *work;
121 struct llist_head *this_list; 118 struct llist_head *this_list;
122 struct llist_node *llnode; 119 struct llist_node *llnode;
123 120
121
122 /*
123 * Reset the "raised" state right before we check the list because
124 * an NMI may enqueue after we find the list empty from the runner.
125 */
126 __this_cpu_write(irq_work_raised, 0);
127 barrier();
128
124 this_list = &__get_cpu_var(irq_work_list); 129 this_list = &__get_cpu_var(irq_work_list);
125 if (llist_empty(this_list)) 130 if (llist_empty(this_list))
126 return; 131 return;
@@ -140,13 +145,15 @@ static void __irq_work_run(void)
140 * to claim that work don't rely on us to handle their data 145 * to claim that work don't rely on us to handle their data
141 * while we are in the middle of the func. 146 * while we are in the middle of the func.
142 */ 147 */
143 xchg(&work->flags, IRQ_WORK_BUSY); 148 flags = work->flags & ~IRQ_WORK_PENDING;
149 xchg(&work->flags, flags);
150
144 work->func(work); 151 work->func(work);
145 /* 152 /*
146 * Clear the BUSY bit and return to the free state if 153 * Clear the BUSY bit and return to the free state if
147 * no-one else claimed it meanwhile. 154 * no-one else claimed it meanwhile.
148 */ 155 */
149 (void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0); 156 (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
150 } 157 }
151} 158}
152 159