diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2013-02-06 21:04:53 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-02-06 21:04:53 -0500 |
commit | 0b3dae68ac199fac224fea9a31907b44f0d257b3 (patch) | |
tree | 909b0b1d33123c9e8cbd0117e5f42df12e3becde /kernel/workqueue.c | |
parent | 4468a00fd9a274fe1b30c886370d662e4a439efb (diff) |
workqueue: simplify is-work-item-queued-here test
Currently, determining whether a work item is queued on a locked pool
involves somewhat convoluted memory barrier dancing. It goes like the
following.
* When a work item is queued on a pool, work->data is updated before
work->entry is linked to the pending list with a wmb() inbetween.
* When trying to determine whether a work item is currently queued on
a pool pointed to by work->data, it locks the pool and looks at
work->entry. If work->entry is linked, we then do rmb() and then
check whether work->data points to the current pool.
This works because, work->data can only point to a pool if it
currently is or were on the pool and,
* If it currently is on the pool, the tests would obviously succeed.
* It it left the pool, its work->entry was cleared under pool->lock,
so if we're seeing non-empty work->entry, it has to be from the work
item being linked on another pool. Because work->data is updated
before work->entry is linked with wmb() inbetween, work->data update
from another pool is guaranteed to be visible if we do rmb() after
seeing non-empty work->entry. So, we either see empty work->entry
or we see updated work->data pointin to another pool.
While this works, it's convoluted, to put it mildly. With recent
updates, it's now guaranteed that work->data points to cwq only while
the work item is queued and that updating work->data to point to cwq
or back to pool is done under pool->lock, so we can simply test
whether work->data points to cwq which is associated with the
currently locked pool instead of the convoluted memory barrier
dancing.
This patch replaces the memory barrier based "are you still here,
really?" test with much simpler "does work->data points to me?" test -
if work->data points to a cwq which is associated with the currently
locked pool, the work item is guaranteed to be queued on the pool as
work->data can start and stop pointing to such cwq only under
pool->lock and the start and stop coincide with queue and dequeue.
tj: Rewrote the comments and description.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 40 |
1 files changed, 16 insertions, 24 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 1a442c301ddb..251f00914295 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -1068,6 +1068,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, | |||
1068 | unsigned long *flags) | 1068 | unsigned long *flags) |
1069 | { | 1069 | { |
1070 | struct worker_pool *pool; | 1070 | struct worker_pool *pool; |
1071 | struct cpu_workqueue_struct *cwq; | ||
1071 | 1072 | ||
1072 | local_irq_save(*flags); | 1073 | local_irq_save(*flags); |
1073 | 1074 | ||
@@ -1097,14 +1098,17 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, | |||
1097 | goto fail; | 1098 | goto fail; |
1098 | 1099 | ||
1099 | spin_lock(&pool->lock); | 1100 | spin_lock(&pool->lock); |
1100 | if (!list_empty(&work->entry)) { | 1101 | /* |
1101 | /* | 1102 | * work->data is guaranteed to point to cwq only while the work |
1102 | * This work is queued, but perhaps we locked the wrong | 1103 | * item is queued on cwq->wq, and both updating work->data to point |
1103 | * pool. In that case we must see the new value after | 1104 | * to cwq on queueing and to pool on dequeueing are done under |
1104 | * rmb(), see insert_work()->wmb(). | 1105 | * cwq->pool->lock. This in turn guarantees that, if work->data |
1105 | */ | 1106 | * points to cwq which is associated with a locked pool, the work |
1106 | smp_rmb(); | 1107 | * item is currently queued on that pool. |
1107 | if (pool == get_work_pool(work)) { | 1108 | */ |
1109 | cwq = get_work_cwq(work); | ||
1110 | if (cwq) { | ||
1111 | if (cwq->pool == pool) { | ||
1108 | debug_work_deactivate(work); | 1112 | debug_work_deactivate(work); |
1109 | 1113 | ||
1110 | /* | 1114 | /* |
@@ -1159,13 +1163,6 @@ static void insert_work(struct cpu_workqueue_struct *cwq, | |||
1159 | 1163 | ||
1160 | /* we own @work, set data and link */ | 1164 | /* we own @work, set data and link */ |
1161 | set_work_cwq(work, cwq, extra_flags); | 1165 | set_work_cwq(work, cwq, extra_flags); |
1162 | |||
1163 | /* | ||
1164 | * Ensure that we get the right work->data if we see the | ||
1165 | * result of list_add() below, see try_to_grab_pending(). | ||
1166 | */ | ||
1167 | smp_wmb(); | ||
1168 | |||
1169 | list_add_tail(&work->entry, head); | 1166 | list_add_tail(&work->entry, head); |
1170 | 1167 | ||
1171 | /* | 1168 | /* |
@@ -2799,15 +2796,10 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) | |||
2799 | return false; | 2796 | return false; |
2800 | 2797 | ||
2801 | spin_lock_irq(&pool->lock); | 2798 | spin_lock_irq(&pool->lock); |
2802 | if (!list_empty(&work->entry)) { | 2799 | /* see the comment in try_to_grab_pending() with the same code */ |
2803 | /* | 2800 | cwq = get_work_cwq(work); |
2804 | * See the comment near try_to_grab_pending()->smp_rmb(). | 2801 | if (cwq) { |
2805 | * If it was re-queued to a different pool under us, we | 2802 | if (unlikely(cwq->pool != pool)) |
2806 | * are not going to wait. | ||
2807 | */ | ||
2808 | smp_rmb(); | ||
2809 | cwq = get_work_cwq(work); | ||
2810 | if (unlikely(!cwq || pool != cwq->pool)) | ||
2811 | goto already_gone; | 2803 | goto already_gone; |
2812 | } else { | 2804 | } else { |
2813 | worker = find_worker_executing_work(pool, work); | 2805 | worker = find_worker_executing_work(pool, work); |