diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2013-02-06 21:04:53 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-02-06 21:04:53 -0500 |
commit | 4468a00fd9a274fe1b30c886370d662e4a439efb (patch) | |
tree | 6ead9c97eea5cdb16cfd7fca3b80d1b184949e3e /kernel/workqueue.c | |
parent | 60c057bca22285efefbba033624763a778f243bf (diff) |
workqueue: make work->data point to pool after try_to_grab_pending()
We plan to use work->data pointing to cwq as the synchronization
invariant when determining whether a given work item is on a locked
pool or not, which requires work->data pointing to cwq only while the
work item is queued on the associated pool.
With delayed_work updated not to overload work->data for target
workqueue recording, the only case where we still have off-queue
work->data pointing to cwq is try_to_grab_pending() which doesn't
update work->data after stealing a queued work item. There's no
reason for try_to_grab_pending() to not update work->data to point to
the pool instead of cwq, like the normal execution does.
This patch adds set_work_pool_and_keep_pending() which makes
work->data point to pool instead of cwq but keeps the pending bit
unlike set_work_pool_and_clear_pending() (surprise!).
After this patch, it's guaranteed that only queued work items point to
cwqs.
This patch doesn't introduce any visible behavior change.
tj: Renamed the new helper function to match
set_work_pool_and_clear_pending() and rewrote the description.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 10 |
1 files changed, 10 insertions, 0 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 41a502ce3802..1a442c301ddb 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -556,6 +556,13 @@ static void set_work_cwq(struct work_struct *work, | |||
556 | WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags); | 556 | WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags); |
557 | } | 557 | } |
558 | 558 | ||
559 | static void set_work_pool_and_keep_pending(struct work_struct *work, | ||
560 | int pool_id) | ||
561 | { | ||
562 | set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, | ||
563 | WORK_STRUCT_PENDING); | ||
564 | } | ||
565 | |||
559 | static void set_work_pool_and_clear_pending(struct work_struct *work, | 566 | static void set_work_pool_and_clear_pending(struct work_struct *work, |
560 | int pool_id) | 567 | int pool_id) |
561 | { | 568 | { |
@@ -1115,6 +1122,9 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, | |||
1115 | cwq_dec_nr_in_flight(get_work_cwq(work), | 1122 | cwq_dec_nr_in_flight(get_work_cwq(work), |
1116 | get_work_color(work)); | 1123 | get_work_color(work)); |
1117 | 1124 | ||
1125 | /* work->data points to cwq iff queued, point to pool */ | ||
1126 | set_work_pool_and_keep_pending(work, pool->id); | ||
1127 | |||
1118 | spin_unlock(&pool->lock); | 1128 | spin_unlock(&pool->lock); |
1119 | return 1; | 1129 | return 1; |
1120 | } | 1130 | } |