aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/workqueue.h
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-01-24 14:01:33 -0500
committerTejun Heo <tj@kernel.org>2013-01-24 14:01:33 -0500
commit7c3eed5cd60d0f736516e6ade77d90c6255860bd (patch)
treebfc017307b98a4db8c919ba9fb53399189ecf0ad /include/linux/workqueue.h
parent9daf9e678d18585433a4ad90ec51a448e5fd054c (diff)
workqueue: record pool ID instead of CPU in work->data when off-queue
Currently, when a work item is off-queue, work->data records the CPU it was last on, which is used to locate the last executing instance for non-reentrance, flushing, etc. We're in the process of removing global_cwq and making worker_pool the top level abstraction. This patch makes work->data point to the pool it was last associated with instead of CPU. After the previous WORK_OFFQ_POOL_CPU and worker_poo->id additions, the conversion is fairly straight-forward. WORK_OFFQ constants and functions are modified to record and read back pool ID instead. worker_pool_by_id() is added to allow looking up pool from ID. get_work_pool() replaces get_work_gcwq(), which is reimplemented using get_work_pool(). get_work_pool_id() replaces work_cpu(). This patch shouldn't introduce any observable behavior changes. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'include/linux/workqueue.h')
-rw-r--r--include/linux/workqueue.h18
1 files changed, 9 insertions, 9 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index f8b35763e55f..a94e4e84e7b1 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -75,19 +75,19 @@ enum {
75 75
76 /* 76 /*
77 * When a work item is off queue, its high bits point to the last 77 * When a work item is off queue, its high bits point to the last
78 * cpu it was on. Cap at 31 bits and use the highest number to 78 * pool it was on. Cap at 31 bits and use the highest number to
79 * indicate that no cpu is associated. 79 * indicate that no pool is associated.
80 */ 80 */
81 WORK_OFFQ_FLAG_BITS = 1, 81 WORK_OFFQ_FLAG_BITS = 1,
82 WORK_OFFQ_CPU_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS, 82 WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
83 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_CPU_SHIFT, 83 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
84 WORK_OFFQ_CPU_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31, 84 WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
85 WORK_OFFQ_CPU_NONE = (1LU << WORK_OFFQ_CPU_BITS) - 1, 85 WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1,
86 86
87 /* convenience constants */ 87 /* convenience constants */
88 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, 88 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
89 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, 89 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
90 WORK_STRUCT_NO_CPU = (unsigned long)WORK_OFFQ_CPU_NONE << WORK_OFFQ_CPU_SHIFT, 90 WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
91 91
92 /* bit mask for work_busy() return values */ 92 /* bit mask for work_busy() return values */
93 WORK_BUSY_PENDING = 1 << 0, 93 WORK_BUSY_PENDING = 1 << 0,
@@ -103,9 +103,9 @@ struct work_struct {
103#endif 103#endif
104}; 104};
105 105
106#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU) 106#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL)
107#define WORK_DATA_STATIC_INIT() \ 107#define WORK_DATA_STATIC_INIT() \
108 ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU | WORK_STRUCT_STATIC) 108 ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)
109 109
110struct delayed_work { 110struct delayed_work {
111 struct work_struct work; 111 struct work_struct work;