aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2010-12-14 11:28:45 -0500
committerTejun Heo <tj@kernel.org>2010-12-18 09:54:48 -0500
commit20b876918c065818b3574a426d418f68b4f8ad19 (patch)
tree1e31b61ca0c28f528321ff13ed6315475dbf6992 /kernel
parent05c2d088d0eb904e50460b04d77324c26cef4637 (diff)
irq_work: Use per cpu atomics instead of regular atomics
The irq work queue is a per cpu object and it is sufficient for synchronization if per cpu atomics are used. Doing so simplifies the code and reduces the overhead of the code. Before: christoph@linux-2.6$ size kernel/irq_work.o text data bss dec hex filename 451 8 1 460 1cc kernel/irq_work.o After: christoph@linux-2.6$ size kernel/irq_work.o text data bss dec hex filename 438 8 1 447 1bf kernel/irq_work.o Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Christoph Lameter <cl@linux.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq_work.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 90f881904bb1..c58fa7da8aef 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -77,21 +77,21 @@ void __weak arch_irq_work_raise(void)
77 */ 77 */
78static void __irq_work_queue(struct irq_work *entry) 78static void __irq_work_queue(struct irq_work *entry)
79{ 79{
80 struct irq_work **head, *next; 80 struct irq_work *next;
81 81
82 head = &get_cpu_var(irq_work_list); 82 preempt_disable();
83 83
84 do { 84 do {
85 next = *head; 85 next = __this_cpu_read(irq_work_list);
86 /* Can assign non-atomic because we keep the flags set. */ 86 /* Can assign non-atomic because we keep the flags set. */
87 entry->next = next_flags(next, IRQ_WORK_FLAGS); 87 entry->next = next_flags(next, IRQ_WORK_FLAGS);
88 } while (cmpxchg(head, next, entry) != next); 88 } while (this_cpu_cmpxchg(irq_work_list, next, entry) != next);
89 89
90 /* The list was empty, raise self-interrupt to start processing. */ 90 /* The list was empty, raise self-interrupt to start processing. */
91 if (!irq_work_next(entry)) 91 if (!irq_work_next(entry))
92 arch_irq_work_raise(); 92 arch_irq_work_raise();
93 93
94 put_cpu_var(irq_work_list); 94 preempt_enable();
95} 95}
96 96
97/* 97/*
@@ -120,16 +120,16 @@ EXPORT_SYMBOL_GPL(irq_work_queue);
120 */ 120 */
121void irq_work_run(void) 121void irq_work_run(void)
122{ 122{
123 struct irq_work *list, **head; 123 struct irq_work *list;
124 124
125 head = &__get_cpu_var(irq_work_list); 125 if (this_cpu_read(irq_work_list) == NULL)
126 if (*head == NULL)
127 return; 126 return;
128 127
129 BUG_ON(!in_irq()); 128 BUG_ON(!in_irq());
130 BUG_ON(!irqs_disabled()); 129 BUG_ON(!irqs_disabled());
131 130
132 list = xchg(head, NULL); 131 list = this_cpu_xchg(irq_work_list, NULL);
132
133 while (list != NULL) { 133 while (list != NULL) {
134 struct irq_work *entry = list; 134 struct irq_work *entry = list;
135 135