aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorXunlei Pang <pang.xunlei@linaro.org>2015-10-08 23:53:12 -0400
committerTejun Heo <tj@kernel.org>2015-10-12 12:17:31 -0400
commite2273584d3f33f7f2cfe6d7aaade0fa2f1cb3db5 (patch)
tree0d6a5b90dcf356e4739b34ac8f46cd65a607673d
parent25cb62b76430a91cc6195f902e61c2cb84ade622 (diff)
workqueue: Allocate the unbound pool using local node memory
Currently, get_unbound_pool() uses kzalloc() to allocate the worker pool. Actually, we can use the right node to do the allocation, achieving local memory access. This patch selects target node first, and uses kzalloc_node() instead. Signed-off-by: Xunlei Pang <pang.xunlei@linaro.org> Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--kernel/workqueue.c26
1 files changed, 14 insertions, 12 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index ca71582fcfab..96d374735040 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3199,6 +3199,7 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3199 u32 hash = wqattrs_hash(attrs); 3199 u32 hash = wqattrs_hash(attrs);
3200 struct worker_pool *pool; 3200 struct worker_pool *pool;
3201 int node; 3201 int node;
3202 int target_node = NUMA_NO_NODE;
3202 3203
3203 lockdep_assert_held(&wq_pool_mutex); 3204 lockdep_assert_held(&wq_pool_mutex);
3204 3205
@@ -3210,13 +3211,25 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3210 } 3211 }
3211 } 3212 }
3212 3213
3214 /* if cpumask is contained inside a NUMA node, we belong to that node */
3215 if (wq_numa_enabled) {
3216 for_each_node(node) {
3217 if (cpumask_subset(attrs->cpumask,
3218 wq_numa_possible_cpumask[node])) {
3219 target_node = node;
3220 break;
3221 }
3222 }
3223 }
3224
3213 /* nope, create a new one */ 3225 /* nope, create a new one */
3214 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 3226 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node);
3215 if (!pool || init_worker_pool(pool) < 0) 3227 if (!pool || init_worker_pool(pool) < 0)
3216 goto fail; 3228 goto fail;
3217 3229
3218 lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */ 3230 lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */
3219 copy_workqueue_attrs(pool->attrs, attrs); 3231 copy_workqueue_attrs(pool->attrs, attrs);
3232 pool->node = target_node;
3220 3233
3221 /* 3234 /*
3222 * no_numa isn't a worker_pool attribute, always clear it. See 3235 * no_numa isn't a worker_pool attribute, always clear it. See
@@ -3224,17 +3237,6 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3224 */ 3237 */
3225 pool->attrs->no_numa = false; 3238 pool->attrs->no_numa = false;
3226 3239
3227 /* if cpumask is contained inside a NUMA node, we belong to that node */
3228 if (wq_numa_enabled) {
3229 for_each_node(node) {
3230 if (cpumask_subset(pool->attrs->cpumask,
3231 wq_numa_possible_cpumask[node])) {
3232 pool->node = node;
3233 break;
3234 }
3235 }
3236 }
3237
3238 if (worker_pool_assign_id(pool) < 0) 3240 if (worker_pool_assign_id(pool) < 0)
3239 goto fail; 3241 goto fail;
3240 3242