aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq_work.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2014-05-23 12:10:21 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2014-06-16 10:26:53 -0400
commitb93e0b8fa819c3d5641794ed9a07e643416aa0fd (patch)
tree7f69d9083da0b366482eed6d94ce8123762c4994 /kernel/irq_work.c
parent7171511eaec5bf23fb06078f59784a3a0626b38f (diff)
irq_work: Split raised and lazy lists
An irq work can be handled from two places: from the tick if the work carries the "lazy" flag and the tick is periodic, or from a self IPI. We merge all these works in a single list and we use some per cpu latch to avoid raising a self-IPI when one is already pending. Now we could do away with this ugly latch if only the list was only made of non-lazy works. Just enqueueing a work on the empty list would be enough to know if we need to raise an IPI or not. Also we are going to implement remote irq work queuing. Then the per CPU latch will need to become atomic in the global scope. That's too bad because, here as well, just enqueueing a work on an empty list of non-lazy works would be enough to know if we need to raise an IPI or not. So lets take a way out of this: split the works in two distinct lists, one for the works that can be handled by the next tick and another one for those handled by the IPI. Just checking if the latter is empty when we queue a new work is enough to know if we need to raise an IPI. Suggested-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Kevin Hilman <khilman@linaro.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Diffstat (limited to 'kernel/irq_work.c')
-rw-r--r--kernel/irq_work.c51
1 files changed, 23 insertions, 28 deletions
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index a82170e2fa78..126f254614bf 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -19,8 +19,8 @@
19#include <asm/processor.h> 19#include <asm/processor.h>
20 20
21 21
22static DEFINE_PER_CPU(struct llist_head, irq_work_list); 22static DEFINE_PER_CPU(struct llist_head, raised_list);
23static DEFINE_PER_CPU(int, irq_work_raised); 23static DEFINE_PER_CPU(struct llist_head, lazy_list);
24 24
25/* 25/*
26 * Claim the entry so that no one else will poke at it. 26 * Claim the entry so that no one else will poke at it.
@@ -70,15 +70,13 @@ bool irq_work_queue(struct irq_work *work)
70 /* Queue the entry and raise the IPI if needed. */ 70 /* Queue the entry and raise the IPI if needed. */
71 preempt_disable(); 71 preempt_disable();
72 72
73 llist_add(&work->llnode, &__get_cpu_var(irq_work_list)); 73 /* If the work is "lazy", handle it from next tick if any */
74 74 if (work->flags & IRQ_WORK_LAZY) {
75 /* 75 if (llist_add(&work->llnode, &__get_cpu_var(lazy_list)) &&
76 * If the work is not "lazy" or the tick is stopped, raise the irq 76 tick_nohz_tick_stopped())
77 * work interrupt (if supported by the arch), otherwise, just wait 77 arch_irq_work_raise();
78 * for the next tick. 78 } else {
79 */ 79 if (llist_add(&work->llnode, &__get_cpu_var(raised_list)))
80 if (!(work->flags & IRQ_WORK_LAZY) || tick_nohz_tick_stopped()) {
81 if (!this_cpu_cmpxchg(irq_work_raised, 0, 1))
82 arch_irq_work_raise(); 80 arch_irq_work_raise();
83 } 81 }
84 82
@@ -90,10 +88,11 @@ EXPORT_SYMBOL_GPL(irq_work_queue);
90 88
91bool irq_work_needs_cpu(void) 89bool irq_work_needs_cpu(void)
92{ 90{
93 struct llist_head *this_list; 91 struct llist_head *raised, *lazy;
94 92
95 this_list = &__get_cpu_var(irq_work_list); 93 raised = &__get_cpu_var(raised_list);
96 if (llist_empty(this_list)) 94 lazy = &__get_cpu_var(lazy_list);
95 if (llist_empty(raised) && llist_empty(lazy))
97 return false; 96 return false;
98 97
99 /* All work should have been flushed before going offline */ 98 /* All work should have been flushed before going offline */
@@ -102,28 +101,18 @@ bool irq_work_needs_cpu(void)
102 return true; 101 return true;
103} 102}
104 103
105static void __irq_work_run(void) 104static void irq_work_run_list(struct llist_head *list)
106{ 105{
107 unsigned long flags; 106 unsigned long flags;
108 struct irq_work *work; 107 struct irq_work *work;
109 struct llist_head *this_list;
110 struct llist_node *llnode; 108 struct llist_node *llnode;
111 109
110 BUG_ON(!irqs_disabled());
112 111
113 /* 112 if (llist_empty(list))
114 * Reset the "raised" state right before we check the list because
115 * an NMI may enqueue after we find the list empty from the runner.
116 */
117 __this_cpu_write(irq_work_raised, 0);
118 barrier();
119
120 this_list = &__get_cpu_var(irq_work_list);
121 if (llist_empty(this_list))
122 return; 113 return;
123 114
124 BUG_ON(!irqs_disabled()); 115 llnode = llist_del_all(list);
125
126 llnode = llist_del_all(this_list);
127 while (llnode != NULL) { 116 while (llnode != NULL) {
128 work = llist_entry(llnode, struct irq_work, llnode); 117 work = llist_entry(llnode, struct irq_work, llnode);
129 118
@@ -148,6 +137,12 @@ static void __irq_work_run(void)
148 } 137 }
149} 138}
150 139
140static void __irq_work_run(void)
141{
142 irq_work_run_list(&__get_cpu_var(raised_list));
143 irq_work_run_list(&__get_cpu_var(lazy_list));
144}
145
151/* 146/*
152 * Run the irq_work entries on this cpu. Requires to be ran from hardirq 147 * Run the irq_work entries on this cpu. Requires to be ran from hardirq
153 * context with local IRQs disabled. 148 * context with local IRQs disabled.