aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/workqueue.h
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@tv-sign.ru>2007-05-09 05:34:22 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-09 15:30:53 -0400
commit28e53bddf814485699a4142bc056fd37d4e11dd4 (patch)
tree5182090c4cc2186eedbda3cb90ed82a2836f6ff6 /include/linux/workqueue.h
parent5830c5902138f80b0a097b797200c739466beedd (diff)
unify flush_work/flush_work_keventd and rename it to cancel_work_sync
flush_work(wq, work) doesn't need the first parameter, we can use cwq->wq (this was possible from the very beginnig, I missed this). So we can unify flush_work_keventd and flush_work. Also, rename flush_work() to cancel_work_sync() and fix all callers. Perhaps this is not the best name, but "flush_work" is really bad. (akpm: this is why the earlier patches bypassed maintainers) Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Cc: Jeff Garzik <jeff@garzik.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Jens Axboe <jens.axboe@oracle.com> Cc: Tejun Heo <htejun@gmail.com> Cc: Auke Kok <auke-jan.h.kok@intel.com>, Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/workqueue.h')
-rw-r--r--include/linux/workqueue.h21
1 files changed, 12 insertions, 9 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index e1581dce5890..d555f31c0746 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -128,30 +128,33 @@ extern struct workqueue_struct *__create_workqueue(const char *name,
128extern void destroy_workqueue(struct workqueue_struct *wq); 128extern void destroy_workqueue(struct workqueue_struct *wq);
129 129
130extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work)); 130extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work));
131extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay)); 131extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq,
132 struct delayed_work *work, unsigned long delay));
132extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 133extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
133 struct delayed_work *work, unsigned long delay); 134 struct delayed_work *work, unsigned long delay);
135
134extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq)); 136extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
135extern void flush_work(struct workqueue_struct *wq, struct work_struct *work); 137extern void flush_scheduled_work(void);
136extern void flush_work_keventd(struct work_struct *work);
137 138
138extern int FASTCALL(schedule_work(struct work_struct *work)); 139extern int FASTCALL(schedule_work(struct work_struct *work));
139extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay)); 140extern int FASTCALL(schedule_delayed_work(struct delayed_work *work,
140 141 unsigned long delay));
141extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay); 142extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
143 unsigned long delay);
142extern int schedule_on_each_cpu(work_func_t func); 144extern int schedule_on_each_cpu(work_func_t func);
143extern void flush_scheduled_work(void);
144extern int current_is_keventd(void); 145extern int current_is_keventd(void);
145extern int keventd_up(void); 146extern int keventd_up(void);
146 147
147extern void init_workqueues(void); 148extern void init_workqueues(void);
148int execute_in_process_context(work_func_t fn, struct execute_work *); 149int execute_in_process_context(work_func_t fn, struct execute_work *);
149 150
151extern void cancel_work_sync(struct work_struct *work);
152
150/* 153/*
151 * Kill off a pending schedule_delayed_work(). Note that the work callback 154 * Kill off a pending schedule_delayed_work(). Note that the work callback
152 * function may still be running on return from cancel_delayed_work(), unless 155 * function may still be running on return from cancel_delayed_work(), unless
153 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or 156 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
154 * flush_work() or cancel_work_sync() to wait on it. 157 * cancel_work_sync() to wait on it.
155 */ 158 */
156static inline int cancel_delayed_work(struct delayed_work *work) 159static inline int cancel_delayed_work(struct delayed_work *work)
157{ 160{