aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorM.Baris Demiray <baris@labristeknoloji.com>2005-09-10 03:26:09 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-09-10 13:06:22 -0400
commitda5a5522709a030da91932d4d4c2b179a481a8c0 (patch)
tree7e7d528dcc1cccf71215ebb5b8774a849e5a1753 /kernel/sched.c
parentfc38ed7531eefa332c8c69ee288487860cd6b426 (diff)
[PATCH] sched: make idlest_group/cpu cpus_allowed-aware
Add relevant checks into find_idlest_group() and find_idlest_cpu() to make them return only the groups that have allowed CPUs and allowed CPUs respectively. Signed-off-by: M.Baris Demiray <baris@labristeknoloji.com> Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c17
1 files changed, 13 insertions, 4 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index ef748e691608..bac23fb418f6 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -966,8 +966,11 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
966 int local_group; 966 int local_group;
967 int i; 967 int i;
968 968
969 /* Skip over this group if it has no CPUs allowed */
970 if (!cpus_intersects(group->cpumask, p->cpus_allowed))
971 goto nextgroup;
972
969 local_group = cpu_isset(this_cpu, group->cpumask); 973 local_group = cpu_isset(this_cpu, group->cpumask);
970 /* XXX: put a cpus allowed check */
971 974
972 /* Tally up the load of all CPUs in the group */ 975 /* Tally up the load of all CPUs in the group */
973 avg_load = 0; 976 avg_load = 0;
@@ -992,6 +995,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
992 min_load = avg_load; 995 min_load = avg_load;
993 idlest = group; 996 idlest = group;
994 } 997 }
998nextgroup:
995 group = group->next; 999 group = group->next;
996 } while (group != sd->groups); 1000 } while (group != sd->groups);
997 1001
@@ -1003,13 +1007,18 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
1003/* 1007/*
1004 * find_idlest_queue - find the idlest runqueue among the cpus in group. 1008 * find_idlest_queue - find the idlest runqueue among the cpus in group.
1005 */ 1009 */
1006static int find_idlest_cpu(struct sched_group *group, int this_cpu) 1010static int find_idlest_cpu(struct sched_group *group,
1011 struct task_struct *p, int this_cpu)
1007{ 1012{
1013 cpumask_t tmp;
1008 unsigned long load, min_load = ULONG_MAX; 1014 unsigned long load, min_load = ULONG_MAX;
1009 int idlest = -1; 1015 int idlest = -1;
1010 int i; 1016 int i;
1011 1017
1012 for_each_cpu_mask(i, group->cpumask) { 1018 /* Traverse only the allowed CPUs */
1019 cpus_and(tmp, group->cpumask, p->cpus_allowed);
1020
1021 for_each_cpu_mask(i, tmp) {
1013 load = source_load(i, 0); 1022 load = source_load(i, 0);
1014 1023
1015 if (load < min_load || (load == min_load && i == this_cpu)) { 1024 if (load < min_load || (load == min_load && i == this_cpu)) {
@@ -1052,7 +1061,7 @@ static int sched_balance_self(int cpu, int flag)
1052 if (!group) 1061 if (!group)
1053 goto nextlevel; 1062 goto nextlevel;
1054 1063
1055 new_cpu = find_idlest_cpu(group, cpu); 1064 new_cpu = find_idlest_cpu(group, t, cpu);
1056 if (new_cpu == -1 || new_cpu == cpu) 1065 if (new_cpu == -1 || new_cpu == cpu)
1057 goto nextlevel; 1066 goto nextlevel;
1058 1067