diff options
| author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2015-05-20 02:41:19 -0400 |
|---|---|---|
| committer | Tejun Heo <tj@kernel.org> | 2015-05-21 17:26:22 -0400 |
| commit | 37b1ef31a568fc02e53587620226e5f3c66454c8 (patch) | |
| tree | a6aba27cbbf72c03ae9f773a46230861fd0c2eac /include/linux/workqueue.h | |
| parent | 899a94fe15a8e928277ff0d0402c086fa67fe16f (diff) | |
workqueue: move flush_scheduled_work() to workqueue.h
flush_scheduled_work() is just a simple call to flush_work().
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'include/linux/workqueue.h')
| -rw-r--r-- | include/linux/workqueue.h | 30 |
1 files changed, 29 insertions, 1 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 4618dd672d1b..738b30b39b68 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
| @@ -435,7 +435,6 @@ extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, | |||
| 435 | 435 | ||
| 436 | extern void flush_workqueue(struct workqueue_struct *wq); | 436 | extern void flush_workqueue(struct workqueue_struct *wq); |
| 437 | extern void drain_workqueue(struct workqueue_struct *wq); | 437 | extern void drain_workqueue(struct workqueue_struct *wq); |
| 438 | extern void flush_scheduled_work(void); | ||
| 439 | 438 | ||
| 440 | extern int schedule_on_each_cpu(work_func_t func); | 439 | extern int schedule_on_each_cpu(work_func_t func); |
| 441 | 440 | ||
| @@ -532,6 +531,35 @@ static inline bool schedule_work(struct work_struct *work) | |||
| 532 | } | 531 | } |
| 533 | 532 | ||
| 534 | /** | 533 | /** |
| 534 | * flush_scheduled_work - ensure that any scheduled work has run to completion. | ||
| 535 | * | ||
| 536 | * Forces execution of the kernel-global workqueue and blocks until its | ||
| 537 | * completion. | ||
| 538 | * | ||
| 539 | * Think twice before calling this function! It's very easy to get into | ||
| 540 | * trouble if you don't take great care. Either of the following situations | ||
| 541 | * will lead to deadlock: | ||
| 542 | * | ||
| 543 | * One of the work items currently on the workqueue needs to acquire | ||
| 544 | * a lock held by your code or its caller. | ||
| 545 | * | ||
| 546 | * Your code is running in the context of a work routine. | ||
| 547 | * | ||
| 548 | * They will be detected by lockdep when they occur, but the first might not | ||
| 549 | * occur very often. It depends on what work items are on the workqueue and | ||
| 550 | * what locks they need, which you have no control over. | ||
| 551 | * | ||
| 552 | * In most situations flushing the entire workqueue is overkill; you merely | ||
| 553 | * need to know that a particular work item isn't queued and isn't running. | ||
| 554 | * In such cases you should use cancel_delayed_work_sync() or | ||
| 555 | * cancel_work_sync() instead. | ||
| 556 | */ | ||
| 557 | static inline void flush_scheduled_work(void) | ||
| 558 | { | ||
| 559 | flush_workqueue(system_wq); | ||
| 560 | } | ||
| 561 | |||
| 562 | /** | ||
| 535 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay | 563 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay |
| 536 | * @cpu: cpu to use | 564 | * @cpu: cpu to use |
| 537 | * @dwork: job to be done | 565 | * @dwork: job to be done |
