diff options
author | Jack Steiner <steiner@sgi.com> | 2010-05-26 17:42:49 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-27 12:12:44 -0400 |
commit | 6adef3ebe570bcde67fd6c16101451ddde5712b5 (patch) | |
tree | 0f60e2a4d01850ae33aee6cefc7a59845ede89a0 | |
parent | 2c488db27b614816024e7994117f599337de0f34 (diff) |
cpusets: new round-robin rotor for SLAB allocations
We have observed several workloads running on multi-node systems where
memory is assigned unevenly across the nodes in the system. There are
numerous reasons for this but one is the round-robin rotor in
cpuset_mem_spread_node().
For example, a simple test that writes a multi-page file will allocate
pages on nodes 0 2 4 6 ... Odd nodes are skipped. (Sometimes it
allocates on odd nodes & skips even nodes).
An example is shown below. The program "lfile" writes a file consisting
of 10 pages. The program then mmaps the file & uses get_mempolicy(...,
MPOL_F_NODE) to determine the nodes where the file pages were allocated.
The output is shown below:
# ./lfile
allocated on nodes: 2 4 6 0 1 2 6 0 2
There is a single rotor that is used for allocating both file pages & slab
pages. Writing the file allocates both a data page & a slab page
(buffer_head). This advances the RR rotor 2 nodes for each page
allocated.
A quick confirmation seems to confirm this is the cause of the uneven
allocation:
# echo 0 >/dev/cpuset/memory_spread_slab
# ./lfile
allocated on nodes: 6 7 8 9 0 1 2 3 4 5
This patch introduces a second rotor that is used for slab allocations.
Signed-off-by: Jack Steiner <steiner@sgi.com>
Acked-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Paul Menage <menage@google.com>
Cc: Jack Steiner <steiner@sgi.com>
Cc: Robin Holt <holt@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/cpuset.h | 6 | ||||
-rw-r--r-- | include/linux/sched.h | 1 | ||||
-rw-r--r-- | kernel/cpuset.c | 20 | ||||
-rw-r--r-- | mm/slab.c | 2 |
4 files changed, 24 insertions, 5 deletions
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 20b51cab6593..457ed765a116 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h | |||
@@ -69,6 +69,7 @@ extern void cpuset_task_status_allowed(struct seq_file *m, | |||
69 | struct task_struct *task); | 69 | struct task_struct *task); |
70 | 70 | ||
71 | extern int cpuset_mem_spread_node(void); | 71 | extern int cpuset_mem_spread_node(void); |
72 | extern int cpuset_slab_spread_node(void); | ||
72 | 73 | ||
73 | static inline int cpuset_do_page_mem_spread(void) | 74 | static inline int cpuset_do_page_mem_spread(void) |
74 | { | 75 | { |
@@ -194,6 +195,11 @@ static inline int cpuset_mem_spread_node(void) | |||
194 | return 0; | 195 | return 0; |
195 | } | 196 | } |
196 | 197 | ||
198 | static inline int cpuset_slab_spread_node(void) | ||
199 | { | ||
200 | return 0; | ||
201 | } | ||
202 | |||
197 | static inline int cpuset_do_page_mem_spread(void) | 203 | static inline int cpuset_do_page_mem_spread(void) |
198 | { | 204 | { |
199 | return 0; | 205 | return 0; |
diff --git a/include/linux/sched.h b/include/linux/sched.h index c0151ffd3541..4f31a166b1a1 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1423,6 +1423,7 @@ struct task_struct { | |||
1423 | nodemask_t mems_allowed; /* Protected by alloc_lock */ | 1423 | nodemask_t mems_allowed; /* Protected by alloc_lock */ |
1424 | int mems_allowed_change_disable; | 1424 | int mems_allowed_change_disable; |
1425 | int cpuset_mem_spread_rotor; | 1425 | int cpuset_mem_spread_rotor; |
1426 | int cpuset_slab_spread_rotor; | ||
1426 | #endif | 1427 | #endif |
1427 | #ifdef CONFIG_CGROUPS | 1428 | #ifdef CONFIG_CGROUPS |
1428 | /* Control Group info protected by css_set_lock */ | 1429 | /* Control Group info protected by css_set_lock */ |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 61d6af7fa676..02b9611eadde 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -2469,7 +2469,8 @@ void cpuset_unlock(void) | |||
2469 | } | 2469 | } |
2470 | 2470 | ||
2471 | /** | 2471 | /** |
2472 | * cpuset_mem_spread_node() - On which node to begin search for a page | 2472 | * cpuset_mem_spread_node() - On which node to begin search for a file page |
2473 | * cpuset_slab_spread_node() - On which node to begin search for a slab page | ||
2473 | * | 2474 | * |
2474 | * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for | 2475 | * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for |
2475 | * tasks in a cpuset with is_spread_page or is_spread_slab set), | 2476 | * tasks in a cpuset with is_spread_page or is_spread_slab set), |
@@ -2494,16 +2495,27 @@ void cpuset_unlock(void) | |||
2494 | * See kmem_cache_alloc_node(). | 2495 | * See kmem_cache_alloc_node(). |
2495 | */ | 2496 | */ |
2496 | 2497 | ||
2497 | int cpuset_mem_spread_node(void) | 2498 | static int cpuset_spread_node(int *rotor) |
2498 | { | 2499 | { |
2499 | int node; | 2500 | int node; |
2500 | 2501 | ||
2501 | node = next_node(current->cpuset_mem_spread_rotor, current->mems_allowed); | 2502 | node = next_node(*rotor, current->mems_allowed); |
2502 | if (node == MAX_NUMNODES) | 2503 | if (node == MAX_NUMNODES) |
2503 | node = first_node(current->mems_allowed); | 2504 | node = first_node(current->mems_allowed); |
2504 | current->cpuset_mem_spread_rotor = node; | 2505 | *rotor = node; |
2505 | return node; | 2506 | return node; |
2506 | } | 2507 | } |
2508 | |||
2509 | int cpuset_mem_spread_node(void) | ||
2510 | { | ||
2511 | return cpuset_spread_node(¤t->cpuset_mem_spread_rotor); | ||
2512 | } | ||
2513 | |||
2514 | int cpuset_slab_spread_node(void) | ||
2515 | { | ||
2516 | return cpuset_spread_node(¤t->cpuset_slab_spread_rotor); | ||
2517 | } | ||
2518 | |||
2507 | EXPORT_SYMBOL_GPL(cpuset_mem_spread_node); | 2519 | EXPORT_SYMBOL_GPL(cpuset_mem_spread_node); |
2508 | 2520 | ||
2509 | /** | 2521 | /** |
@@ -3219,7 +3219,7 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) | |||
3219 | nid_alloc = nid_here = numa_node_id(); | 3219 | nid_alloc = nid_here = numa_node_id(); |
3220 | get_mems_allowed(); | 3220 | get_mems_allowed(); |
3221 | if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) | 3221 | if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) |
3222 | nid_alloc = cpuset_mem_spread_node(); | 3222 | nid_alloc = cpuset_slab_spread_node(); |
3223 | else if (current->mempolicy) | 3223 | else if (current->mempolicy) |
3224 | nid_alloc = slab_node(current->mempolicy); | 3224 | nid_alloc = slab_node(current->mempolicy); |
3225 | put_mems_allowed(); | 3225 | put_mems_allowed(); |