aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cpuset.c
diff options
context:
space:
mode:
authorSrivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>2012-05-24 10:16:41 -0400
committerIngo Molnar <mingo@kernel.org>2012-07-24 07:53:18 -0400
commit80d1fa6463d934969b7aebf04107fc133463f0f6 (patch)
tree9e86e50c41016e68df655890c93a3b200c6a3ec0 /kernel/cpuset.c
parentd35be8bab9b0ce44bed4b9453f86ebf64062721e (diff)
cpusets, hotplug: Implement cpuset tree traversal in a helper function
At present, the functions that deal with cpusets during CPU/Mem hotplug are quite messy, since a lot of the functionality is mixed up without clear separation. And this takes a toll on optimization as well. For example, the function cpuset_update_active_cpus() is called on both CPU offline and CPU online events; and it invokes scan_for_empty_cpusets(), which makes sense only for CPU offline events. And hence, the current code ends up unnecessarily traversing the cpuset tree during CPU online also. As a first step towards cleaning up those functions, encapsulate the cpuset tree traversal in a helper function, so as to facilitate upcoming changes. Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20120524141635.3692.893.stgit@srivatsabhat.in.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r--kernel/cpuset.c36
1 files changed, 27 insertions, 9 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 746d1eeb5dbe..ba96349aa522 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1990,6 +1990,32 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
1990} 1990}
1991 1991
1992/* 1992/*
1993 * Helper function to traverse cpusets.
1994 * It can be used to walk the cpuset tree from top to bottom, completing
1995 * one layer before dropping down to the next (thus always processing a
1996 * node before any of its children).
1997 */
1998static struct cpuset *cpuset_next(struct list_head *queue)
1999{
2000 struct cpuset *cp;
2001 struct cpuset *child; /* scans child cpusets of cp */
2002 struct cgroup *cont;
2003
2004 if (list_empty(queue))
2005 return NULL;
2006
2007 cp = list_first_entry(queue, struct cpuset, stack_list);
2008 list_del(queue->next);
2009 list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
2010 child = cgroup_cs(cont);
2011 list_add_tail(&child->stack_list, queue);
2012 }
2013
2014 return cp;
2015}
2016
2017
2018/*
1993 * Walk the specified cpuset subtree and look for empty cpusets. 2019 * Walk the specified cpuset subtree and look for empty cpusets.
1994 * The tasks of such cpuset must be moved to a parent cpuset. 2020 * The tasks of such cpuset must be moved to a parent cpuset.
1995 * 2021 *
@@ -2008,19 +2034,11 @@ static void scan_for_empty_cpusets(struct cpuset *root)
2008{ 2034{
2009 LIST_HEAD(queue); 2035 LIST_HEAD(queue);
2010 struct cpuset *cp; /* scans cpusets being updated */ 2036 struct cpuset *cp; /* scans cpusets being updated */
2011 struct cpuset *child; /* scans child cpusets of cp */
2012 struct cgroup *cont;
2013 static nodemask_t oldmems; /* protected by cgroup_mutex */ 2037 static nodemask_t oldmems; /* protected by cgroup_mutex */
2014 2038
2015 list_add_tail((struct list_head *)&root->stack_list, &queue); 2039 list_add_tail((struct list_head *)&root->stack_list, &queue);
2016 2040
2017 while (!list_empty(&queue)) { 2041 while ((cp = cpuset_next(&queue)) != NULL) {
2018 cp = list_first_entry(&queue, struct cpuset, stack_list);
2019 list_del(queue.next);
2020 list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
2021 child = cgroup_cs(cont);
2022 list_add_tail(&child->stack_list, &queue);
2023 }
2024 2042
2025 /* Continue past cpusets with all cpus, mems online */ 2043 /* Continue past cpusets with all cpus, mems online */
2026 if (cpumask_subset(cp->cpus_allowed, cpu_active_mask) && 2044 if (cpumask_subset(cp->cpus_allowed, cpu_active_mask) &&