diff options
author | Tejun Heo <tj@kernel.org> | 2013-04-01 14:23:32 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-04-01 14:23:32 -0400 |
commit | bce903809ab3f29eca97e0be5537778c1689c82b (patch) | |
tree | 6015d477e4b7a43693ad8b12a6ce3a84781e5ecd /kernel/workqueue.c | |
parent | a892cacc7f4960a39c0fad7bbdf04c5cbf7c229e (diff) |
workqueue: add wq_numa_tbl_len and wq_numa_possible_cpumask[]
Unbound workqueues are going to be NUMA-affine. Add wq_numa_tbl_len
and wq_numa_possible_cpumask[] in preparation. The former is the
highest NUMA node ID + 1 and the latter is masks of possibles CPUs for
each NUMA node.
This patch only introduces these. Future patches will make use of
them.
v2: NUMA initialization move into wq_numa_init(). Also, the possible
cpumask array is not created if there aren't multiple nodes on the
system. wq_numa_enabled bool added.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 46 |
1 files changed, 46 insertions, 0 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 2bf3d8c6e128..5ca46a2e2616 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <linux/jhash.h> | 44 | #include <linux/jhash.h> |
45 | #include <linux/hashtable.h> | 45 | #include <linux/hashtable.h> |
46 | #include <linux/rculist.h> | 46 | #include <linux/rculist.h> |
47 | #include <linux/nodemask.h> | ||
47 | 48 | ||
48 | #include "workqueue_internal.h" | 49 | #include "workqueue_internal.h" |
49 | 50 | ||
@@ -253,6 +254,12 @@ struct workqueue_struct { | |||
253 | 254 | ||
254 | static struct kmem_cache *pwq_cache; | 255 | static struct kmem_cache *pwq_cache; |
255 | 256 | ||
257 | static int wq_numa_tbl_len; /* highest possible NUMA node id + 1 */ | ||
258 | static cpumask_var_t *wq_numa_possible_cpumask; | ||
259 | /* possible CPUs of each node */ | ||
260 | |||
261 | static bool wq_numa_enabled; /* unbound NUMA affinity enabled */ | ||
262 | |||
256 | static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ | 263 | static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ |
257 | static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ | 264 | static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ |
258 | 265 | ||
@@ -4407,6 +4414,43 @@ out_unlock: | |||
4407 | } | 4414 | } |
4408 | #endif /* CONFIG_FREEZER */ | 4415 | #endif /* CONFIG_FREEZER */ |
4409 | 4416 | ||
4417 | static void __init wq_numa_init(void) | ||
4418 | { | ||
4419 | cpumask_var_t *tbl; | ||
4420 | int node, cpu; | ||
4421 | |||
4422 | /* determine NUMA pwq table len - highest node id + 1 */ | ||
4423 | for_each_node(node) | ||
4424 | wq_numa_tbl_len = max(wq_numa_tbl_len, node + 1); | ||
4425 | |||
4426 | if (num_possible_nodes() <= 1) | ||
4427 | return; | ||
4428 | |||
4429 | /* | ||
4430 | * We want masks of possible CPUs of each node which isn't readily | ||
4431 | * available. Build one from cpu_to_node() which should have been | ||
4432 | * fully initialized by now. | ||
4433 | */ | ||
4434 | tbl = kzalloc(wq_numa_tbl_len * sizeof(tbl[0]), GFP_KERNEL); | ||
4435 | BUG_ON(!tbl); | ||
4436 | |||
4437 | for_each_node(node) | ||
4438 | BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL, node)); | ||
4439 | |||
4440 | for_each_possible_cpu(cpu) { | ||
4441 | node = cpu_to_node(cpu); | ||
4442 | if (WARN_ON(node == NUMA_NO_NODE)) { | ||
4443 | pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu); | ||
4444 | /* happens iff arch is bonkers, let's just proceed */ | ||
4445 | return; | ||
4446 | } | ||
4447 | cpumask_set_cpu(cpu, tbl[node]); | ||
4448 | } | ||
4449 | |||
4450 | wq_numa_possible_cpumask = tbl; | ||
4451 | wq_numa_enabled = true; | ||
4452 | } | ||
4453 | |||
4410 | static int __init init_workqueues(void) | 4454 | static int __init init_workqueues(void) |
4411 | { | 4455 | { |
4412 | int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; | 4456 | int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; |
@@ -4423,6 +4467,8 @@ static int __init init_workqueues(void) | |||
4423 | cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP); | 4467 | cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP); |
4424 | hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN); | 4468 | hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN); |
4425 | 4469 | ||
4470 | wq_numa_init(); | ||
4471 | |||
4426 | /* initialize CPU pools */ | 4472 | /* initialize CPU pools */ |
4427 | for_each_possible_cpu(cpu) { | 4473 | for_each_possible_cpu(cpu) { |
4428 | struct worker_pool *pool; | 4474 | struct worker_pool *pool; |