aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-08-20 17:51:23 -0400
committerTejun Heo <tj@kernel.org>2012-08-20 17:51:23 -0400
commit606a5020b9bdceb20b4f43e11db0054afa349028 (patch)
treed5f65b7a94cd4c5987979a814178cc92cf4508d9 /kernel/workqueue.c
parentdbf2576e37da0fcc7aacbfbb9fd5d3de7888a3c1 (diff)
workqueue: gut flush[_delayed]_work_sync()
Now that all workqueues are non-reentrant, flush[_delayed]_work_sync() are equivalent to flush[_delayed]_work(). Drop the separate implementation and make them thin wrappers around flush[_delayed]_work(). * start_flush_work() no longer takes @wait_executing as the only left user - flush_work() - always sets it to %true. * __cancel_work_timer() uses flush_work() instead of wait_on_work(). Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c122
1 files changed, 10 insertions, 112 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index c4feef9798ea..5f13a9a2c792 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2801,8 +2801,7 @@ reflush:
2801} 2801}
2802EXPORT_SYMBOL_GPL(drain_workqueue); 2802EXPORT_SYMBOL_GPL(drain_workqueue);
2803 2803
2804static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, 2804static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
2805 bool wait_executing)
2806{ 2805{
2807 struct worker *worker = NULL; 2806 struct worker *worker = NULL;
2808 struct global_cwq *gcwq; 2807 struct global_cwq *gcwq;
@@ -2824,13 +2823,12 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
2824 cwq = get_work_cwq(work); 2823 cwq = get_work_cwq(work);
2825 if (unlikely(!cwq || gcwq != cwq->pool->gcwq)) 2824 if (unlikely(!cwq || gcwq != cwq->pool->gcwq))
2826 goto already_gone; 2825 goto already_gone;
2827 } else if (wait_executing) { 2826 } else {
2828 worker = find_worker_executing_work(gcwq, work); 2827 worker = find_worker_executing_work(gcwq, work);
2829 if (!worker) 2828 if (!worker)
2830 goto already_gone; 2829 goto already_gone;
2831 cwq = worker->current_cwq; 2830 cwq = worker->current_cwq;
2832 } else 2831 }
2833 goto already_gone;
2834 2832
2835 insert_wq_barrier(cwq, barr, work, worker); 2833 insert_wq_barrier(cwq, barr, work, worker);
2836 spin_unlock_irq(&gcwq->lock); 2834 spin_unlock_irq(&gcwq->lock);
@@ -2857,15 +2855,8 @@ already_gone:
2857 * flush_work - wait for a work to finish executing the last queueing instance 2855 * flush_work - wait for a work to finish executing the last queueing instance
2858 * @work: the work to flush 2856 * @work: the work to flush
2859 * 2857 *
2860 * Wait until @work has finished execution. This function considers 2858 * Wait until @work has finished execution. @work is guaranteed to be idle
2861 * only the last queueing instance of @work. If @work has been 2859 * on return if it hasn't been requeued since flush started.
2862 * enqueued across different CPUs on a non-reentrant workqueue or on
2863 * multiple workqueues, @work might still be executing on return on
2864 * some of the CPUs from earlier queueing.
2865 *
2866 * If @work was queued only on a non-reentrant, ordered or unbound
2867 * workqueue, @work is guaranteed to be idle on return if it hasn't
2868 * been requeued since flush started.
2869 * 2860 *
2870 * RETURNS: 2861 * RETURNS:
2871 * %true if flush_work() waited for the work to finish execution, 2862 * %true if flush_work() waited for the work to finish execution,
@@ -2878,85 +2869,15 @@ bool flush_work(struct work_struct *work)
2878 lock_map_acquire(&work->lockdep_map); 2869 lock_map_acquire(&work->lockdep_map);
2879 lock_map_release(&work->lockdep_map); 2870 lock_map_release(&work->lockdep_map);
2880 2871
2881 if (start_flush_work(work, &barr, true)) { 2872 if (start_flush_work(work, &barr)) {
2882 wait_for_completion(&barr.done); 2873 wait_for_completion(&barr.done);
2883 destroy_work_on_stack(&barr.work); 2874 destroy_work_on_stack(&barr.work);
2884 return true; 2875 return true;
2885 } else 2876 } else {
2886 return false;
2887}
2888EXPORT_SYMBOL_GPL(flush_work);
2889
2890static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
2891{
2892 struct wq_barrier barr;
2893 struct worker *worker;
2894
2895 spin_lock_irq(&gcwq->lock);
2896
2897 worker = find_worker_executing_work(gcwq, work);
2898 if (unlikely(worker))
2899 insert_wq_barrier(worker->current_cwq, &barr, work, worker);
2900
2901 spin_unlock_irq(&gcwq->lock);
2902
2903 if (unlikely(worker)) {
2904 wait_for_completion(&barr.done);
2905 destroy_work_on_stack(&barr.work);
2906 return true;
2907 } else
2908 return false; 2877 return false;
2909}
2910
2911static bool wait_on_work(struct work_struct *work)
2912{
2913 bool ret = false;
2914 int cpu;
2915
2916 might_sleep();
2917
2918 lock_map_acquire(&work->lockdep_map);
2919 lock_map_release(&work->lockdep_map);
2920
2921 for_each_gcwq_cpu(cpu)
2922 ret |= wait_on_cpu_work(get_gcwq(cpu), work);
2923 return ret;
2924}
2925
2926/**
2927 * flush_work_sync - wait until a work has finished execution
2928 * @work: the work to flush
2929 *
2930 * Wait until @work has finished execution. On return, it's
2931 * guaranteed that all queueing instances of @work which happened
2932 * before this function is called are finished. In other words, if
2933 * @work hasn't been requeued since this function was called, @work is
2934 * guaranteed to be idle on return.
2935 *
2936 * RETURNS:
2937 * %true if flush_work_sync() waited for the work to finish execution,
2938 * %false if it was already idle.
2939 */
2940bool flush_work_sync(struct work_struct *work)
2941{
2942 struct wq_barrier barr;
2943 bool pending, waited;
2944
2945 /* we'll wait for executions separately, queue barr only if pending */
2946 pending = start_flush_work(work, &barr, false);
2947
2948 /* wait for executions to finish */
2949 waited = wait_on_work(work);
2950
2951 /* wait for the pending one */
2952 if (pending) {
2953 wait_for_completion(&barr.done);
2954 destroy_work_on_stack(&barr.work);
2955 } 2878 }
2956
2957 return pending || waited;
2958} 2879}
2959EXPORT_SYMBOL_GPL(flush_work_sync); 2880EXPORT_SYMBOL_GPL(flush_work);
2960 2881
2961static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) 2882static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
2962{ 2883{
@@ -2970,14 +2891,14 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
2970 * would be waiting for before retrying. 2891 * would be waiting for before retrying.
2971 */ 2892 */
2972 if (unlikely(ret == -ENOENT)) 2893 if (unlikely(ret == -ENOENT))
2973 wait_on_work(work); 2894 flush_work(work);
2974 } while (unlikely(ret < 0)); 2895 } while (unlikely(ret < 0));
2975 2896
2976 /* tell other tasks trying to grab @work to back off */ 2897 /* tell other tasks trying to grab @work to back off */
2977 mark_work_canceling(work); 2898 mark_work_canceling(work);
2978 local_irq_restore(flags); 2899 local_irq_restore(flags);
2979 2900
2980 wait_on_work(work); 2901 flush_work(work);
2981 clear_work_data(work); 2902 clear_work_data(work);
2982 return ret; 2903 return ret;
2983} 2904}
@@ -3030,29 +2951,6 @@ bool flush_delayed_work(struct delayed_work *dwork)
3030EXPORT_SYMBOL(flush_delayed_work); 2951EXPORT_SYMBOL(flush_delayed_work);
3031 2952
3032/** 2953/**
3033 * flush_delayed_work_sync - wait for a dwork to finish
3034 * @dwork: the delayed work to flush
3035 *
3036 * Delayed timer is cancelled and the pending work is queued for
3037 * execution immediately. Other than timer handling, its behavior
3038 * is identical to flush_work_sync().
3039 *
3040 * RETURNS:
3041 * %true if flush_work_sync() waited for the work to finish execution,
3042 * %false if it was already idle.
3043 */
3044bool flush_delayed_work_sync(struct delayed_work *dwork)
3045{
3046 local_irq_disable();
3047 if (del_timer_sync(&dwork->timer))
3048 __queue_work(dwork->cpu,
3049 get_work_cwq(&dwork->work)->wq, &dwork->work);
3050 local_irq_enable();
3051 return flush_work_sync(&dwork->work);
3052}
3053EXPORT_SYMBOL(flush_delayed_work_sync);
3054
3055/**
3056 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish 2954 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
3057 * @dwork: the delayed work cancel 2955 * @dwork: the delayed work cancel
3058 * 2956 *