aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorJohannes Berg <johannes.berg@intel.com>2018-08-22 05:49:03 -0400
committerTejun Heo <tj@kernel.org>2018-08-22 11:31:37 -0400
commitd6e89786bed977f37f55ffca11e563f6d2b1e3b5 (patch)
tree099c38c86ac779dfe984e119370032dc16443247 /kernel/workqueue.c
parent66448bc274cadedb71fda7d914e7c29d8dead217 (diff)
workqueue: skip lockdep wq dependency in cancel_work_sync()
In cancel_work_sync(), we can only have one of two cases, even with an ordered workqueue: * the work isn't running, just cancelled before it started * the work is running, but then nothing else can be on the workqueue before it Thus, we need to skip the lockdep workqueue dependency handling, otherwise we get false positive reports from lockdep saying that we have a potential deadlock when the workqueue also has other work items with locking, e.g. work1_function() { mutex_lock(&mutex); ... } work2_function() { /* nothing */ } other_function() { queue_work(ordered_wq, &work1); queue_work(ordered_wq, &work2); mutex_lock(&mutex); cancel_work_sync(&work2); } As described above, this isn't a problem, but lockdep will currently flag it as if cancel_work_sync() was flush_work(), which *is* a problem. Signed-off-by: Johannes Berg <johannes.berg@intel.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c37
1 files changed, 22 insertions, 15 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 7ea75529eabb..aa520e715bbc 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2843,7 +2843,8 @@ reflush:
2843} 2843}
2844EXPORT_SYMBOL_GPL(drain_workqueue); 2844EXPORT_SYMBOL_GPL(drain_workqueue);
2845 2845
2846static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) 2846static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
2847 bool from_cancel)
2847{ 2848{
2848 struct worker *worker = NULL; 2849 struct worker *worker = NULL;
2849 struct worker_pool *pool; 2850 struct worker_pool *pool;
@@ -2885,7 +2886,8 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
2885 * workqueues the deadlock happens when the rescuer stalls, blocking 2886 * workqueues the deadlock happens when the rescuer stalls, blocking
2886 * forward progress. 2887 * forward progress.
2887 */ 2888 */
2888 if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer) { 2889 if (!from_cancel &&
2890 (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
2889 lock_map_acquire(&pwq->wq->lockdep_map); 2891 lock_map_acquire(&pwq->wq->lockdep_map);
2890 lock_map_release(&pwq->wq->lockdep_map); 2892 lock_map_release(&pwq->wq->lockdep_map);
2891 } 2893 }
@@ -2896,6 +2898,22 @@ already_gone:
2896 return false; 2898 return false;
2897} 2899}
2898 2900
2901static bool __flush_work(struct work_struct *work, bool from_cancel)
2902{
2903 struct wq_barrier barr;
2904
2905 if (WARN_ON(!wq_online))
2906 return false;
2907
2908 if (start_flush_work(work, &barr, from_cancel)) {
2909 wait_for_completion(&barr.done);
2910 destroy_work_on_stack(&barr.work);
2911 return true;
2912 } else {
2913 return false;
2914 }
2915}
2916
2899/** 2917/**
2900 * flush_work - wait for a work to finish executing the last queueing instance 2918 * flush_work - wait for a work to finish executing the last queueing instance
2901 * @work: the work to flush 2919 * @work: the work to flush
@@ -2909,18 +2927,7 @@ already_gone:
2909 */ 2927 */
2910bool flush_work(struct work_struct *work) 2928bool flush_work(struct work_struct *work)
2911{ 2929{
2912 struct wq_barrier barr; 2930 return __flush_work(work, false);
2913
2914 if (WARN_ON(!wq_online))
2915 return false;
2916
2917 if (start_flush_work(work, &barr)) {
2918 wait_for_completion(&barr.done);
2919 destroy_work_on_stack(&barr.work);
2920 return true;
2921 } else {
2922 return false;
2923 }
2924} 2931}
2925EXPORT_SYMBOL_GPL(flush_work); 2932EXPORT_SYMBOL_GPL(flush_work);
2926 2933
@@ -2986,7 +2993,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
2986 * isn't executing. 2993 * isn't executing.
2987 */ 2994 */
2988 if (wq_online) 2995 if (wq_online)
2989 flush_work(work); 2996 __flush_work(work, true);
2990 2997
2991 clear_work_data(work); 2998 clear_work_data(work);
2992 2999