diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2013-02-06 21:04:53 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-02-06 21:04:53 -0500 |
commit | 6be195886ac26abe0194ed1bc7a9224f8a97c310 (patch) | |
tree | a414324b9232efaa2fd8f1dc4a28d308aa5d99f5 /kernel/workqueue.c | |
parent | 706026c2141113886f61e1ad2738c9a7723ec69c (diff) |
workqueue: replace WORK_CPU_NONE/LAST with WORK_CPU_END
Now that workqueue has moved away from gcwqs, workqueue no longer has
the need to have a CPU identifier indicating "no cpu associated" - we
now use WORK_OFFQ_POOL_NONE instead - and most uses of WORK_CPU_NONE
are gone.
The only left usage is as the end marker for for_each_*wq*()
iterators, where the name WORK_CPU_NONE is confusing w/o actual
WORK_CPU_NONE usages. Similarly, WORK_CPU_LAST which equals
WORK_CPU_NONE no longer makes sense.
Replace both WORK_CPU_NONE and LAST with WORK_CPU_END. This patch
doesn't introduce any functional difference.
tj: s/WORK_CPU_LAST/WORK_CPU_END/ and rewrote the description.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 577de1073f24..7e11334a119f 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -258,7 +258,7 @@ static inline int __next_wq_cpu(int cpu, const struct cpumask *mask, | |||
258 | if (sw & 2) | 258 | if (sw & 2) |
259 | return WORK_CPU_UNBOUND; | 259 | return WORK_CPU_UNBOUND; |
260 | } | 260 | } |
261 | return WORK_CPU_NONE; | 261 | return WORK_CPU_END; |
262 | } | 262 | } |
263 | 263 | ||
264 | static inline int __next_cwq_cpu(int cpu, const struct cpumask *mask, | 264 | static inline int __next_cwq_cpu(int cpu, const struct cpumask *mask, |
@@ -282,17 +282,17 @@ static inline int __next_cwq_cpu(int cpu, const struct cpumask *mask, | |||
282 | */ | 282 | */ |
283 | #define for_each_wq_cpu(cpu) \ | 283 | #define for_each_wq_cpu(cpu) \ |
284 | for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, 3); \ | 284 | for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, 3); \ |
285 | (cpu) < WORK_CPU_NONE; \ | 285 | (cpu) < WORK_CPU_END; \ |
286 | (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, 3)) | 286 | (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, 3)) |
287 | 287 | ||
288 | #define for_each_online_wq_cpu(cpu) \ | 288 | #define for_each_online_wq_cpu(cpu) \ |
289 | for ((cpu) = __next_wq_cpu(-1, cpu_online_mask, 3); \ | 289 | for ((cpu) = __next_wq_cpu(-1, cpu_online_mask, 3); \ |
290 | (cpu) < WORK_CPU_NONE; \ | 290 | (cpu) < WORK_CPU_END; \ |
291 | (cpu) = __next_wq_cpu((cpu), cpu_online_mask, 3)) | 291 | (cpu) = __next_wq_cpu((cpu), cpu_online_mask, 3)) |
292 | 292 | ||
293 | #define for_each_cwq_cpu(cpu, wq) \ | 293 | #define for_each_cwq_cpu(cpu, wq) \ |
294 | for ((cpu) = __next_cwq_cpu(-1, cpu_possible_mask, (wq)); \ | 294 | for ((cpu) = __next_cwq_cpu(-1, cpu_possible_mask, (wq)); \ |
295 | (cpu) < WORK_CPU_NONE; \ | 295 | (cpu) < WORK_CPU_END; \ |
296 | (cpu) = __next_cwq_cpu((cpu), cpu_possible_mask, (wq))) | 296 | (cpu) = __next_cwq_cpu((cpu), cpu_possible_mask, (wq))) |
297 | 297 | ||
298 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | 298 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
@@ -3796,7 +3796,7 @@ static int __init init_workqueues(void) | |||
3796 | 3796 | ||
3797 | /* make sure we have enough bits for OFFQ pool ID */ | 3797 | /* make sure we have enough bits for OFFQ pool ID */ |
3798 | BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) < | 3798 | BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) < |
3799 | WORK_CPU_LAST * NR_STD_WORKER_POOLS); | 3799 | WORK_CPU_END * NR_STD_WORKER_POOLS); |
3800 | 3800 | ||
3801 | cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP); | 3801 | cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP); |
3802 | hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN); | 3802 | hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN); |