aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-11-05 17:16:27 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-05 17:16:27 -0500
commite25ac7ddaae0e798f794cdaf9109bc71246110cd (patch)
tree8a9f032054e69ace5413e1cdf3db8a3a991d5440 /kernel/workqueue.c
parent75f5db39ff14ed95056f2cca3ad98c3cae97170c (diff)
parente2273584d3f33f7f2cfe6d7aaade0fa2f1cb3db5 (diff)
Merge branch 'for-4.4' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue update from Tejun Heo: "This pull request contains one patch to make an unbound worker pool allocated from the NUMA node containing it if such node exists. As unbound worker pools are node-affine by default, this makes most pools allocated on the right node" * 'for-4.4' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: workqueue: Allocate the unbound pool using local node memory
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c26
1 files changed, 14 insertions, 12 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index bcb14cafe007..c579dbab2e36 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3199,6 +3199,7 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3199 u32 hash = wqattrs_hash(attrs); 3199 u32 hash = wqattrs_hash(attrs);
3200 struct worker_pool *pool; 3200 struct worker_pool *pool;
3201 int node; 3201 int node;
3202 int target_node = NUMA_NO_NODE;
3202 3203
3203 lockdep_assert_held(&wq_pool_mutex); 3204 lockdep_assert_held(&wq_pool_mutex);
3204 3205
@@ -3210,13 +3211,25 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3210 } 3211 }
3211 } 3212 }
3212 3213
3214 /* if cpumask is contained inside a NUMA node, we belong to that node */
3215 if (wq_numa_enabled) {
3216 for_each_node(node) {
3217 if (cpumask_subset(attrs->cpumask,
3218 wq_numa_possible_cpumask[node])) {
3219 target_node = node;
3220 break;
3221 }
3222 }
3223 }
3224
3213 /* nope, create a new one */ 3225 /* nope, create a new one */
3214 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 3226 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node);
3215 if (!pool || init_worker_pool(pool) < 0) 3227 if (!pool || init_worker_pool(pool) < 0)
3216 goto fail; 3228 goto fail;
3217 3229
3218 lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */ 3230 lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */
3219 copy_workqueue_attrs(pool->attrs, attrs); 3231 copy_workqueue_attrs(pool->attrs, attrs);
3232 pool->node = target_node;
3220 3233
3221 /* 3234 /*
3222 * no_numa isn't a worker_pool attribute, always clear it. See 3235 * no_numa isn't a worker_pool attribute, always clear it. See
@@ -3224,17 +3237,6 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3224 */ 3237 */
3225 pool->attrs->no_numa = false; 3238 pool->attrs->no_numa = false;
3226 3239
3227 /* if cpumask is contained inside a NUMA node, we belong to that node */
3228 if (wq_numa_enabled) {
3229 for_each_node(node) {
3230 if (cpumask_subset(pool->attrs->cpumask,
3231 wq_numa_possible_cpumask[node])) {
3232 pool->node = node;
3233 break;
3234 }
3235 }
3236 }
3237
3238 if (worker_pool_assign_id(pool) < 0) 3240 if (worker_pool_assign_id(pool) < 0)
3239 goto fail; 3241 goto fail;
3240 3242