aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c36
1 files changed, 17 insertions, 19 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 63885abf1ba0..c9ab4293904f 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -413,23 +413,23 @@ static void wait_on_work(struct cpu_workqueue_struct *cwq,
413} 413}
414 414
415/** 415/**
416 * flush_work - block until a work_struct's callback has terminated 416 * cancel_work_sync - block until a work_struct's callback has terminated
417 * @wq: the workqueue on which the work is queued
418 * @work: the work which is to be flushed 417 * @work: the work which is to be flushed
419 * 418 *
420 * flush_work() will attempt to cancel the work if it is queued. If the work's 419 * cancel_work_sync() will attempt to cancel the work if it is queued. If the
421 * callback appears to be running, flush_work() will block until it has 420 * work's callback appears to be running, cancel_work_sync() will block until
422 * completed. 421 * it has completed.
423 * 422 *
424 * flush_work() is designed to be used when the caller is tearing down data 423 * cancel_work_sync() is designed to be used when the caller is tearing down
425 * structures which the callback function operates upon. It is expected that, 424 * data structures which the callback function operates upon. It is expected
426 * prior to calling flush_work(), the caller has arranged for the work to not 425 * that, prior to calling cancel_work_sync(), the caller has arranged for the
427 * be requeued. 426 * work to not be requeued.
428 */ 427 */
429void flush_work(struct workqueue_struct *wq, struct work_struct *work) 428void cancel_work_sync(struct work_struct *work)
430{ 429{
431 const cpumask_t *cpu_map = wq_cpu_map(wq);
432 struct cpu_workqueue_struct *cwq; 430 struct cpu_workqueue_struct *cwq;
431 struct workqueue_struct *wq;
432 const cpumask_t *cpu_map;
433 int cpu; 433 int cpu;
434 434
435 might_sleep(); 435 might_sleep();
@@ -448,10 +448,13 @@ void flush_work(struct workqueue_struct *wq, struct work_struct *work)
448 work_clear_pending(work); 448 work_clear_pending(work);
449 spin_unlock_irq(&cwq->lock); 449 spin_unlock_irq(&cwq->lock);
450 450
451 wq = cwq->wq;
452 cpu_map = wq_cpu_map(wq);
453
451 for_each_cpu_mask(cpu, *cpu_map) 454 for_each_cpu_mask(cpu, *cpu_map)
452 wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 455 wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
453} 456}
454EXPORT_SYMBOL_GPL(flush_work); 457EXPORT_SYMBOL_GPL(cancel_work_sync);
455 458
456 459
457static struct workqueue_struct *keventd_wq; 460static struct workqueue_struct *keventd_wq;
@@ -540,18 +543,13 @@ void flush_scheduled_work(void)
540} 543}
541EXPORT_SYMBOL(flush_scheduled_work); 544EXPORT_SYMBOL(flush_scheduled_work);
542 545
543void flush_work_keventd(struct work_struct *work)
544{
545 flush_work(keventd_wq, work);
546}
547EXPORT_SYMBOL(flush_work_keventd);
548
549/** 546/**
550 * cancel_rearming_delayed_work - kill off a delayed work whose handler rearms the delayed work. 547 * cancel_rearming_delayed_work - kill off a delayed work whose handler rearms the delayed work.
551 * @dwork: the delayed work struct 548 * @dwork: the delayed work struct
552 * 549 *
553 * Note that the work callback function may still be running on return from 550 * Note that the work callback function may still be running on return from
554 * cancel_delayed_work(). Run flush_workqueue() or flush_work() to wait on it. 551 * cancel_delayed_work(). Run flush_workqueue() or cancel_work_sync() to wait
552 * on it.
555 */ 553 */
556void cancel_rearming_delayed_work(struct delayed_work *dwork) 554void cancel_rearming_delayed_work(struct delayed_work *dwork)
557{ 555{