aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-04-01 14:23:34 -0400
committerTejun Heo <tj@kernel.org>2013-04-01 14:23:34 -0400
commitf3f90ad46934202eeefac454fd5d89bf73c6aa34 (patch)
tree3780e2ca6be5b8576ba1eda4820b3e13f877af07 /kernel/workqueue.c
parente3c916a4c7f51722785d34d9f9802b70dac3ce93 (diff)
workqueue: determine NUMA node of workers accourding to the allowed cpumask
When worker tasks are created using kthread_create_on_node(), currently only per-cpu ones have the matching NUMA node specified. All unbound workers are always created with NUMA_NO_NODE. Now that an unbound worker pool may have an arbitrary cpumask associated with it, this isn't optimal. Add pool->node which is determined by the pool's cpumask. If the pool's cpumask is contained inside a NUMA node proper, the pool is associated with that node, and all workers of the pool are created on that node. This currently only makes difference for unbound worker pools with cpumask contained inside single NUMA node, but this will serve as foundation for making all unbound pools NUMA-affine. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c18
1 files changed, 16 insertions, 2 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 248d18aa2a5d..3e18c7b865eb 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -138,6 +138,7 @@ enum {
138struct worker_pool { 138struct worker_pool {
139 spinlock_t lock; /* the pool lock */ 139 spinlock_t lock; /* the pool lock */
140 int cpu; /* I: the associated cpu */ 140 int cpu; /* I: the associated cpu */
141 int node; /* I: the associated node ID */
141 int id; /* I: pool ID */ 142 int id; /* I: pool ID */
142 unsigned int flags; /* X: flags */ 143 unsigned int flags; /* X: flags */
143 144
@@ -1645,7 +1646,6 @@ static struct worker *alloc_worker(void)
1645static struct worker *create_worker(struct worker_pool *pool) 1646static struct worker *create_worker(struct worker_pool *pool)
1646{ 1647{
1647 struct worker *worker = NULL; 1648 struct worker *worker = NULL;
1648 int node = pool->cpu >= 0 ? cpu_to_node(pool->cpu) : NUMA_NO_NODE;
1649 int id = -1; 1649 int id = -1;
1650 char id_buf[16]; 1650 char id_buf[16];
1651 1651
@@ -1678,7 +1678,7 @@ static struct worker *create_worker(struct worker_pool *pool)
1678 else 1678 else
1679 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id); 1679 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
1680 1680
1681 worker->task = kthread_create_on_node(worker_thread, worker, node, 1681 worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
1682 "kworker/%s", id_buf); 1682 "kworker/%s", id_buf);
1683 if (IS_ERR(worker->task)) 1683 if (IS_ERR(worker->task))
1684 goto fail; 1684 goto fail;
@@ -3360,6 +3360,7 @@ static int init_worker_pool(struct worker_pool *pool)
3360 spin_lock_init(&pool->lock); 3360 spin_lock_init(&pool->lock);
3361 pool->id = -1; 3361 pool->id = -1;
3362 pool->cpu = -1; 3362 pool->cpu = -1;
3363 pool->node = NUMA_NO_NODE;
3363 pool->flags |= POOL_DISASSOCIATED; 3364 pool->flags |= POOL_DISASSOCIATED;
3364 INIT_LIST_HEAD(&pool->worklist); 3365 INIT_LIST_HEAD(&pool->worklist);
3365 INIT_LIST_HEAD(&pool->idle_list); 3366 INIT_LIST_HEAD(&pool->idle_list);
@@ -3465,6 +3466,7 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3465{ 3466{
3466 u32 hash = wqattrs_hash(attrs); 3467 u32 hash = wqattrs_hash(attrs);
3467 struct worker_pool *pool; 3468 struct worker_pool *pool;
3469 int node;
3468 3470
3469 lockdep_assert_held(&wq_pool_mutex); 3471 lockdep_assert_held(&wq_pool_mutex);
3470 3472
@@ -3487,6 +3489,17 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3487 lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */ 3489 lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */
3488 copy_workqueue_attrs(pool->attrs, attrs); 3490 copy_workqueue_attrs(pool->attrs, attrs);
3489 3491
3492 /* if cpumask is contained inside a NUMA node, we belong to that node */
3493 if (wq_numa_enabled) {
3494 for_each_node(node) {
3495 if (cpumask_subset(pool->attrs->cpumask,
3496 wq_numa_possible_cpumask[node])) {
3497 pool->node = node;
3498 break;
3499 }
3500 }
3501 }
3502
3490 if (worker_pool_assign_id(pool) < 0) 3503 if (worker_pool_assign_id(pool) < 0)
3491 goto fail; 3504 goto fail;
3492 3505
@@ -4480,6 +4493,7 @@ static int __init init_workqueues(void)
4480 pool->cpu = cpu; 4493 pool->cpu = cpu;
4481 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); 4494 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
4482 pool->attrs->nice = std_nice[i++]; 4495 pool->attrs->nice = std_nice[i++];
4496 pool->node = cpu_to_node(cpu);
4483 4497
4484 /* alloc pool ID */ 4498 /* alloc pool ID */
4485 mutex_lock(&wq_pool_mutex); 4499 mutex_lock(&wq_pool_mutex);