aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq_work.c
diff options
context:
space:
mode:
authoranish kumar <anish198519851985@gmail.com>2013-02-03 16:08:23 -0500
committerIngo Molnar <mingo@kernel.org>2013-02-04 05:50:59 -0500
commitc02cf5f8ed6137e2b3b2f10e0fca336e06e09ba4 (patch)
tree808036e5a89ace73ea2a92719529df6c6321977b /kernel/irq_work.c
parent786133f6e8ff94aaa78cd6b7844d04c227098327 (diff)
irq_work: Remove return value from the irq_work_queue() function
As no one is using the return value of irq_work_queue(), so it is better to just make it void. Signed-off-by: anish kumar <anish198519851985@gmail.com> Acked-by: Steven Rostedt <rostedt@goodmis.org> [ Fix stale comments, remove now unnecessary __irq_work_queue() intermediate function ] Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Link: http://lkml.kernel.org/r/1359925703-24304-1-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/irq_work.c')
-rw-r--r--kernel/irq_work.c31
1 files changed, 10 insertions, 21 deletions
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 64eddd59ed83..c9d7478e4889 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -63,12 +63,20 @@ void __weak arch_irq_work_raise(void)
63} 63}
64 64
65/* 65/*
66 * Queue the entry and raise the IPI if needed. 66 * Enqueue the irq_work @entry unless it's already pending
67 * somewhere.
68 *
69 * Can be re-enqueued while the callback is still in progress.
67 */ 70 */
68static void __irq_work_queue(struct irq_work *work) 71void irq_work_queue(struct irq_work *work)
69{ 72{
70 bool empty; 73 bool empty;
71 74
75 /* Only queue if not already pending */
76 if (!irq_work_claim(work))
77 return;
78
79 /* Queue the entry and raise the IPI if needed. */
72 preempt_disable(); 80 preempt_disable();
73 81
74 empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list)); 82 empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
@@ -78,25 +86,6 @@ static void __irq_work_queue(struct irq_work *work)
78 86
79 preempt_enable(); 87 preempt_enable();
80} 88}
81
82/*
83 * Enqueue the irq_work @entry, returns true on success, failure when the
84 * @entry was already enqueued by someone else.
85 *
86 * Can be re-enqueued while the callback is still in progress.
87 */
88bool irq_work_queue(struct irq_work *work)
89{
90 if (!irq_work_claim(work)) {
91 /*
92 * Already enqueued, can't do!
93 */
94 return false;
95 }
96
97 __irq_work_queue(work);
98 return true;
99}
100EXPORT_SYMBOL_GPL(irq_work_queue); 89EXPORT_SYMBOL_GPL(irq_work_queue);
101 90
102/* 91/*