aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/workqueue.h
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-07-02 04:03:51 -0400
committerTejun Heo <tj@kernel.org>2010-07-02 05:00:02 -0400
commitf34217977d717385a3e9fd7018ac39fade3964c0 (patch)
tree7e05645e911eea15b33a368b91ac82ae12884e6d /include/linux/workqueue.h
parentbdbc5dd7de5d07d6c9d3536e598956165a031d4c (diff)
workqueue: implement unbound workqueue
This patch implements unbound workqueue which can be specified with WQ_UNBOUND flag on creation. An unbound workqueue has the following properties. * It uses a dedicated gcwq with a pseudo CPU number WORK_CPU_UNBOUND. This gcwq is always online and disassociated. * Workers are not bound to any CPU and not concurrency managed. Works are dispatched to workers as soon as possible and the only applied limitation is @max_active. IOW, all unbound workqeueues are implicitly high priority. Unbound workqueues can be used as simple execution context provider. Contexts unbound to any cpu are served as soon as possible. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Arjan van de Ven <arjan@linux.intel.com> Cc: David Howells <dhowells@redhat.com>
Diffstat (limited to 'include/linux/workqueue.h')
-rw-r--r--include/linux/workqueue.h15
1 files changed, 14 insertions, 1 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 139069a6286c..67ce734747f6 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -51,7 +51,8 @@ enum {
51 WORK_NO_COLOR = WORK_NR_COLORS, 51 WORK_NO_COLOR = WORK_NR_COLORS,
52 52
53 /* special cpu IDs */ 53 /* special cpu IDs */
54 WORK_CPU_NONE = NR_CPUS, 54 WORK_CPU_UNBOUND = NR_CPUS,
55 WORK_CPU_NONE = NR_CPUS + 1,
55 WORK_CPU_LAST = WORK_CPU_NONE, 56 WORK_CPU_LAST = WORK_CPU_NONE,
56 57
57 /* 58 /*
@@ -237,11 +238,17 @@ enum {
237 WQ_RESCUER = 1 << 3, /* has an rescue worker */ 238 WQ_RESCUER = 1 << 3, /* has an rescue worker */
238 WQ_HIGHPRI = 1 << 4, /* high priority */ 239 WQ_HIGHPRI = 1 << 4, /* high priority */
239 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ 240 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
241 WQ_UNBOUND = 1 << 6, /* not bound to any cpu */
240 242
241 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ 243 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
244 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
242 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, 245 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
243}; 246};
244 247
248/* unbound wq's aren't per-cpu, scale max_active according to #cpus */
249#define WQ_UNBOUND_MAX_ACTIVE \
250 max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
251
245/* 252/*
246 * System-wide workqueues which are always present. 253 * System-wide workqueues which are always present.
247 * 254 *
@@ -256,10 +263,16 @@ enum {
256 * system_nrt_wq is non-reentrant and guarantees that any given work 263 * system_nrt_wq is non-reentrant and guarantees that any given work
257 * item is never executed in parallel by multiple CPUs. Queue 264 * item is never executed in parallel by multiple CPUs. Queue
258 * flushing might take relatively long. 265 * flushing might take relatively long.
266 *
267 * system_unbound_wq is unbound workqueue. Workers are not bound to
268 * any specific CPU, not concurrency managed, and all queued works are
269 * executed immediately as long as max_active limit is not reached and
270 * resources are available.
259 */ 271 */
260extern struct workqueue_struct *system_wq; 272extern struct workqueue_struct *system_wq;
261extern struct workqueue_struct *system_long_wq; 273extern struct workqueue_struct *system_long_wq;
262extern struct workqueue_struct *system_nrt_wq; 274extern struct workqueue_struct *system_nrt_wq;
275extern struct workqueue_struct *system_unbound_wq;
263 276
264extern struct workqueue_struct * 277extern struct workqueue_struct *
265__alloc_workqueue_key(const char *name, unsigned int flags, int max_active, 278__alloc_workqueue_key(const char *name, unsigned int flags, int max_active,