aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cpuset.c
diff options
context:
space:
mode:
authorMiao Xie <miaox@cn.fujitsu.com>2009-06-16 18:31:46 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 22:47:31 -0400
commitf3b39d47ebc51416fc3b690a32dfe030a2035e67 (patch)
tree98873bacc74c60407ba3e93337d12351f35711e3 /kernel/cpuset.c
parentdcf975d58565880a134afb13bde511d1b873ce79 (diff)
cpusets: restructure the function cpuset_update_task_memory_state()
The kernel still allocates the page caches on old node after modifying its cpuset's mems when 'memory_spread_page' was set, or it didn't spread the page cache evenly over all the nodes that faulting task is allowed to usr after memory_spread_page was set. it is caused by the old mem_allowed and flags of the task, the current kernel doesn't updates them unless some function invokes cpuset_update_task_memory_state(), it is too late sometimes.We must update the mem_allowed and the flags of the tasks in time. Slab has the same problem. The following patches fix this bug by updating tasks' mem_allowed and spread flag after its cpuset's mems or spread flag is changed. This patch: Extract a function from cpuset_update_task_memory_state(). It will be used later for update tasks' page/slab spread flags after its cpuset's flag is set Signed-off-by: Miao Xie <miaox@cn.fujitsu.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Paul Menage <menage@google.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Yasunori Goto <y-goto@jp.fujitsu.com> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r--kernel/cpuset.c27
1 files changed, 19 insertions, 8 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index d5a7e17474ee..66b24d9b6638 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -331,6 +331,24 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
331 BUG_ON(!nodes_intersects(*pmask, node_states[N_HIGH_MEMORY])); 331 BUG_ON(!nodes_intersects(*pmask, node_states[N_HIGH_MEMORY]));
332} 332}
333 333
334/*
335 * update task's spread flag if cpuset's page/slab spread flag is set
336 *
337 * Called with callback_mutex/cgroup_mutex held
338 */
339static void cpuset_update_task_spread_flag(struct cpuset *cs,
340 struct task_struct *tsk)
341{
342 if (is_spread_page(cs))
343 tsk->flags |= PF_SPREAD_PAGE;
344 else
345 tsk->flags &= ~PF_SPREAD_PAGE;
346 if (is_spread_slab(cs))
347 tsk->flags |= PF_SPREAD_SLAB;
348 else
349 tsk->flags &= ~PF_SPREAD_SLAB;
350}
351
334/** 352/**
335 * cpuset_update_task_memory_state - update task memory placement 353 * cpuset_update_task_memory_state - update task memory placement
336 * 354 *
@@ -388,14 +406,7 @@ void cpuset_update_task_memory_state(void)
388 cs = task_cs(tsk); /* Maybe changed when task not locked */ 406 cs = task_cs(tsk); /* Maybe changed when task not locked */
389 guarantee_online_mems(cs, &tsk->mems_allowed); 407 guarantee_online_mems(cs, &tsk->mems_allowed);
390 tsk->cpuset_mems_generation = cs->mems_generation; 408 tsk->cpuset_mems_generation = cs->mems_generation;
391 if (is_spread_page(cs)) 409 cpuset_update_task_spread_flag(cs, tsk);
392 tsk->flags |= PF_SPREAD_PAGE;
393 else
394 tsk->flags &= ~PF_SPREAD_PAGE;
395 if (is_spread_slab(cs))
396 tsk->flags |= PF_SPREAD_SLAB;
397 else
398 tsk->flags &= ~PF_SPREAD_SLAB;
399 task_unlock(tsk); 410 task_unlock(tsk);
400 mutex_unlock(&callback_mutex); 411 mutex_unlock(&callback_mutex);
401 mpol_rebind_task(tsk, &tsk->mems_allowed); 412 mpol_rebind_task(tsk, &tsk->mems_allowed);