aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-07-02 04:03:51 -0400
committerTejun Heo <tj@kernel.org>2010-07-02 05:00:08 -0400
commitc7fc77f78f16d138ca997ce096a62f46e2e9420a (patch)
tree0478e5dde66f6ff86d4baa0fe541748e1a6f1ed2 /include/linux
parentf34217977d717385a3e9fd7018ac39fade3964c0 (diff)
workqueue: remove WQ_SINGLE_CPU and use WQ_UNBOUND instead
WQ_SINGLE_CPU combined with @max_active of 1 is used to achieve full ordering among works queued to a workqueue. The same can be achieved using WQ_UNBOUND as unbound workqueues always use the gcwq for WORK_CPU_UNBOUND. As @max_active is always one and benefits from cpu locality isn't accessible anyway, serving them with unbound workqueues should be fine. Drop WQ_SINGLE_CPU support and use WQ_UNBOUND instead. Note that most single thread workqueue users will be converted to use multithread or non-reentrant instead and only the ones which require strict ordering will keep using WQ_UNBOUND + @max_active of 1. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/workqueue.h7
1 files changed, 3 insertions, 4 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 67ce734747f6..d74a529ed13e 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -233,12 +233,11 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
233 233
234enum { 234enum {
235 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ 235 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
236 WQ_SINGLE_CPU = 1 << 1, /* only single cpu at a time */ 236 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
237 WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */ 237 WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */
238 WQ_RESCUER = 1 << 3, /* has an rescue worker */ 238 WQ_RESCUER = 1 << 3, /* has an rescue worker */
239 WQ_HIGHPRI = 1 << 4, /* high priority */ 239 WQ_HIGHPRI = 1 << 4, /* high priority */
240 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ 240 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
241 WQ_UNBOUND = 1 << 6, /* not bound to any cpu */
242 241
243 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ 242 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
244 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ 243 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
@@ -300,9 +299,9 @@ __alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
300#define create_workqueue(name) \ 299#define create_workqueue(name) \
301 alloc_workqueue((name), WQ_RESCUER, 1) 300 alloc_workqueue((name), WQ_RESCUER, 1)
302#define create_freezeable_workqueue(name) \ 301#define create_freezeable_workqueue(name) \
303 alloc_workqueue((name), WQ_FREEZEABLE | WQ_SINGLE_CPU | WQ_RESCUER, 1) 302 alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_RESCUER, 1)
304#define create_singlethread_workqueue(name) \ 303#define create_singlethread_workqueue(name) \
305 alloc_workqueue((name), WQ_SINGLE_CPU | WQ_RESCUER, 1) 304 alloc_workqueue((name), WQ_UNBOUND | WQ_RESCUER, 1)
306 305
307extern void destroy_workqueue(struct workqueue_struct *wq); 306extern void destroy_workqueue(struct workqueue_struct *wq);
308 307