aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorJack Steiner <steiner@sgi.com>2010-05-26 17:42:49 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-27 12:12:44 -0400
commit6adef3ebe570bcde67fd6c16101451ddde5712b5 (patch)
tree0f60e2a4d01850ae33aee6cefc7a59845ede89a0 /include/linux
parent2c488db27b614816024e7994117f599337de0f34 (diff)
cpusets: new round-robin rotor for SLAB allocations
We have observed several workloads running on multi-node systems where memory is assigned unevenly across the nodes in the system. There are numerous reasons for this but one is the round-robin rotor in cpuset_mem_spread_node(). For example, a simple test that writes a multi-page file will allocate pages on nodes 0 2 4 6 ... Odd nodes are skipped. (Sometimes it allocates on odd nodes & skips even nodes). An example is shown below. The program "lfile" writes a file consisting of 10 pages. The program then mmaps the file & uses get_mempolicy(..., MPOL_F_NODE) to determine the nodes where the file pages were allocated. The output is shown below: # ./lfile allocated on nodes: 2 4 6 0 1 2 6 0 2 There is a single rotor that is used for allocating both file pages & slab pages. Writing the file allocates both a data page & a slab page (buffer_head). This advances the RR rotor 2 nodes for each page allocated. A quick confirmation seems to confirm this is the cause of the uneven allocation: # echo 0 >/dev/cpuset/memory_spread_slab # ./lfile allocated on nodes: 6 7 8 9 0 1 2 3 4 5 This patch introduces a second rotor that is used for slab allocations. Signed-off-by: Jack Steiner <steiner@sgi.com> Acked-by: Christoph Lameter <cl@linux-foundation.org> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Paul Menage <menage@google.com> Cc: Jack Steiner <steiner@sgi.com> Cc: Robin Holt <holt@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/cpuset.h6
-rw-r--r--include/linux/sched.h1
2 files changed, 7 insertions, 0 deletions
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 20b51cab6593..457ed765a116 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -69,6 +69,7 @@ extern void cpuset_task_status_allowed(struct seq_file *m,
69 struct task_struct *task); 69 struct task_struct *task);
70 70
71extern int cpuset_mem_spread_node(void); 71extern int cpuset_mem_spread_node(void);
72extern int cpuset_slab_spread_node(void);
72 73
73static inline int cpuset_do_page_mem_spread(void) 74static inline int cpuset_do_page_mem_spread(void)
74{ 75{
@@ -194,6 +195,11 @@ static inline int cpuset_mem_spread_node(void)
194 return 0; 195 return 0;
195} 196}
196 197
198static inline int cpuset_slab_spread_node(void)
199{
200 return 0;
201}
202
197static inline int cpuset_do_page_mem_spread(void) 203static inline int cpuset_do_page_mem_spread(void)
198{ 204{
199 return 0; 205 return 0;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c0151ffd3541..4f31a166b1a1 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1423,6 +1423,7 @@ struct task_struct {
1423 nodemask_t mems_allowed; /* Protected by alloc_lock */ 1423 nodemask_t mems_allowed; /* Protected by alloc_lock */
1424 int mems_allowed_change_disable; 1424 int mems_allowed_change_disable;
1425 int cpuset_mem_spread_rotor; 1425 int cpuset_mem_spread_rotor;
1426 int cpuset_slab_spread_rotor;
1426#endif 1427#endif
1427#ifdef CONFIG_CGROUPS 1428#ifdef CONFIG_CGROUPS
1428 /* Control Group info protected by css_set_lock */ 1429 /* Control Group info protected by css_set_lock */