aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-01-12 19:29:00 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-12 19:29:00 -0500
commit12847095e9c96cdf1ca6dd980ca733c38f8e9a98 (patch)
tree98dc35325c1bf3c61e2c4a2dce8e827bf0d99af5 /kernel
parent1181a2449969c59f0ab6b95374fe6983cc07286d (diff)
parentfd2ab30b65e961b974ae0bc71e0d47d6b35e0968 (diff)
Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: kernel/sched.c: add missing forward declaration for 'double_rq_lock' sched: partly revert "sched debug: remove NULL checking in print_cfs_rt_rq()" cpumask: fix CONFIG_NUMA=y sched.c
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c13
-rw-r--r--kernel/sched_debug.c21
2 files changed, 25 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index deb5ac8c12f3..8be2c13b50d0 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -125,6 +125,9 @@ DEFINE_TRACE(sched_switch);
125DEFINE_TRACE(sched_migrate_task); 125DEFINE_TRACE(sched_migrate_task);
126 126
127#ifdef CONFIG_SMP 127#ifdef CONFIG_SMP
128
129static void double_rq_lock(struct rq *rq1, struct rq *rq2);
130
128/* 131/*
129 * Divide a load by a sched group cpu_power : (load / sg->__cpu_power) 132 * Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
130 * Since cpu_power is a 'constant', we can use a reciprocal divide. 133 * Since cpu_power is a 'constant', we can use a reciprocal divide.
@@ -7282,10 +7285,10 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
7282 * groups, so roll our own. Now each node has its own list of groups which 7285 * groups, so roll our own. Now each node has its own list of groups which
7283 * gets dynamically allocated. 7286 * gets dynamically allocated.
7284 */ 7287 */
7285static DEFINE_PER_CPU(struct sched_domain, node_domains); 7288static DEFINE_PER_CPU(struct static_sched_domain, node_domains);
7286static struct sched_group ***sched_group_nodes_bycpu; 7289static struct sched_group ***sched_group_nodes_bycpu;
7287 7290
7288static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); 7291static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains);
7289static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes); 7292static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
7290 7293
7291static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map, 7294static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
@@ -7560,7 +7563,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
7560#ifdef CONFIG_NUMA 7563#ifdef CONFIG_NUMA
7561 if (cpumask_weight(cpu_map) > 7564 if (cpumask_weight(cpu_map) >
7562 SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) { 7565 SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) {
7563 sd = &per_cpu(allnodes_domains, i); 7566 sd = &per_cpu(allnodes_domains, i).sd;
7564 SD_INIT(sd, ALLNODES); 7567 SD_INIT(sd, ALLNODES);
7565 set_domain_attribute(sd, attr); 7568 set_domain_attribute(sd, attr);
7566 cpumask_copy(sched_domain_span(sd), cpu_map); 7569 cpumask_copy(sched_domain_span(sd), cpu_map);
@@ -7570,7 +7573,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
7570 } else 7573 } else
7571 p = NULL; 7574 p = NULL;
7572 7575
7573 sd = &per_cpu(node_domains, i); 7576 sd = &per_cpu(node_domains, i).sd;
7574 SD_INIT(sd, NODE); 7577 SD_INIT(sd, NODE);
7575 set_domain_attribute(sd, attr); 7578 set_domain_attribute(sd, attr);
7576 sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); 7579 sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
@@ -7688,7 +7691,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
7688 for_each_cpu(j, nodemask) { 7691 for_each_cpu(j, nodemask) {
7689 struct sched_domain *sd; 7692 struct sched_domain *sd;
7690 7693
7691 sd = &per_cpu(node_domains, j); 7694 sd = &per_cpu(node_domains, j).sd;
7692 sd->groups = sg; 7695 sd->groups = sg;
7693 } 7696 }
7694 sg->__cpu_power = 0; 7697 sg->__cpu_power = 0;
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 4293cfa9681d..16eeba4e4169 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -145,6 +145,19 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
145 read_unlock_irqrestore(&tasklist_lock, flags); 145 read_unlock_irqrestore(&tasklist_lock, flags);
146} 146}
147 147
148#if defined(CONFIG_CGROUP_SCHED) && \
149 (defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED))
150static void task_group_path(struct task_group *tg, char *buf, int buflen)
151{
152 /* may be NULL if the underlying cgroup isn't fully-created yet */
153 if (!tg->css.cgroup) {
154 buf[0] = '\0';
155 return;
156 }
157 cgroup_path(tg->css.cgroup, buf, buflen);
158}
159#endif
160
148void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) 161void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
149{ 162{
150 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1, 163 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
@@ -154,10 +167,10 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
154 unsigned long flags; 167 unsigned long flags;
155 168
156#if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED) 169#if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
157 char path[128] = ""; 170 char path[128];
158 struct task_group *tg = cfs_rq->tg; 171 struct task_group *tg = cfs_rq->tg;
159 172
160 cgroup_path(tg->css.cgroup, path, sizeof(path)); 173 task_group_path(tg, path, sizeof(path));
161 174
162 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path); 175 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path);
163#elif defined(CONFIG_USER_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED) 176#elif defined(CONFIG_USER_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
@@ -208,10 +221,10 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
208void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) 221void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
209{ 222{
210#if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED) 223#if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED)
211 char path[128] = ""; 224 char path[128];
212 struct task_group *tg = rt_rq->tg; 225 struct task_group *tg = rt_rq->tg;
213 226
214 cgroup_path(tg->css.cgroup, path, sizeof(path)); 227 task_group_path(tg, path, sizeof(path));
215 228
216 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, path); 229 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, path);
217#else 230#else