diff options
author | Byungchul Park <byungchul.park@lge.com> | 2017-10-25 04:56:04 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-10-25 06:19:03 -0400 |
commit | fd1a5b04dfb899f84ddeb8acdaea6b98283df1e5 (patch) | |
tree | b27e2d02e5bf3c61cd373e503dc5328f07c6ff6f | |
parent | a7967bc31584bd282682981295861e7bcba19e65 (diff) |
workqueue: Remove now redundant lock acquisitions wrt. workqueue flushes
The workqueue code added manual lock acquisition annotations to catch
deadlocks.
After lockdepcrossrelease was introduced, some of those became redundant,
since wait_for_completion() already does the acquisition and tracking.
Remove the duplicate annotations.
Signed-off-by: Byungchul Park <byungchul.park@lge.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: amir73il@gmail.com
Cc: axboe@kernel.dk
Cc: darrick.wong@oracle.com
Cc: david@fromorbit.com
Cc: hch@infradead.org
Cc: idryomov@gmail.com
Cc: johan@kernel.org
Cc: johannes.berg@intel.com
Cc: kernel-team@lge.com
Cc: linux-block@vger.kernel.org
Cc: linux-fsdevel@vger.kernel.org
Cc: linux-mm@kvack.org
Cc: linux-xfs@vger.kernel.org
Cc: oleg@redhat.com
Cc: tj@kernel.org
Link: http://lkml.kernel.org/r/1508921765-15396-9-git-send-email-byungchul.park@lge.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | include/linux/workqueue.h | 4 | ||||
-rw-r--r-- | kernel/workqueue.c | 19 |
2 files changed, 5 insertions, 18 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 1c49431f3121..c8a572cb49be 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -218,7 +218,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } | |||
218 | \ | 218 | \ |
219 | __init_work((_work), _onstack); \ | 219 | __init_work((_work), _onstack); \ |
220 | (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ | 220 | (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ |
221 | lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \ | 221 | lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \ |
222 | INIT_LIST_HEAD(&(_work)->entry); \ | 222 | INIT_LIST_HEAD(&(_work)->entry); \ |
223 | (_work)->func = (_func); \ | 223 | (_work)->func = (_func); \ |
224 | } while (0) | 224 | } while (0) |
@@ -398,7 +398,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, | |||
398 | static struct lock_class_key __key; \ | 398 | static struct lock_class_key __key; \ |
399 | const char *__lock_name; \ | 399 | const char *__lock_name; \ |
400 | \ | 400 | \ |
401 | __lock_name = #fmt#args; \ | 401 | __lock_name = "(wq_completion)"#fmt#args; \ |
402 | \ | 402 | \ |
403 | __alloc_workqueue_key((fmt), (flags), (max_active), \ | 403 | __alloc_workqueue_key((fmt), (flags), (max_active), \ |
404 | &__key, __lock_name, ##args); \ | 404 | &__key, __lock_name, ##args); \ |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 39831b2f3c5f..160fdc6e839a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -2497,15 +2497,8 @@ static void insert_wq_barrier(struct pool_workqueue *pwq, | |||
2497 | INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); | 2497 | INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); |
2498 | __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); | 2498 | __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); |
2499 | 2499 | ||
2500 | /* | 2500 | init_completion_map(&barr->done, &target->lockdep_map); |
2501 | * Explicitly init the crosslock for wq_barrier::done, make its lock | 2501 | |
2502 | * key a subkey of the corresponding work. As a result we won't | ||
2503 | * build a dependency between wq_barrier::done and unrelated work. | ||
2504 | */ | ||
2505 | lockdep_init_map_crosslock((struct lockdep_map *)&barr->done.map, | ||
2506 | "(complete)wq_barr::done", | ||
2507 | target->lockdep_map.key, 1); | ||
2508 | __init_completion(&barr->done); | ||
2509 | barr->task = current; | 2502 | barr->task = current; |
2510 | 2503 | ||
2511 | /* | 2504 | /* |
@@ -2611,16 +2604,13 @@ void flush_workqueue(struct workqueue_struct *wq) | |||
2611 | struct wq_flusher this_flusher = { | 2604 | struct wq_flusher this_flusher = { |
2612 | .list = LIST_HEAD_INIT(this_flusher.list), | 2605 | .list = LIST_HEAD_INIT(this_flusher.list), |
2613 | .flush_color = -1, | 2606 | .flush_color = -1, |
2614 | .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done), | 2607 | .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map), |
2615 | }; | 2608 | }; |
2616 | int next_color; | 2609 | int next_color; |
2617 | 2610 | ||
2618 | if (WARN_ON(!wq_online)) | 2611 | if (WARN_ON(!wq_online)) |
2619 | return; | 2612 | return; |
2620 | 2613 | ||
2621 | lock_map_acquire(&wq->lockdep_map); | ||
2622 | lock_map_release(&wq->lockdep_map); | ||
2623 | |||
2624 | mutex_lock(&wq->mutex); | 2614 | mutex_lock(&wq->mutex); |
2625 | 2615 | ||
2626 | /* | 2616 | /* |
@@ -2883,9 +2873,6 @@ bool flush_work(struct work_struct *work) | |||
2883 | if (WARN_ON(!wq_online)) | 2873 | if (WARN_ON(!wq_online)) |
2884 | return false; | 2874 | return false; |
2885 | 2875 | ||
2886 | lock_map_acquire(&work->lockdep_map); | ||
2887 | lock_map_release(&work->lockdep_map); | ||
2888 | |||
2889 | if (start_flush_work(work, &barr)) { | 2876 | if (start_flush_work(work, &barr)) { |
2890 | wait_for_completion(&barr.done); | 2877 | wait_for_completion(&barr.done); |
2891 | destroy_work_on_stack(&barr.work); | 2878 | destroy_work_on_stack(&barr.work); |