diff options
author | Gregory Haskins <ghaskins@novell.com> | 2009-07-30 10:57:23 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-08-02 08:26:12 -0400 |
commit | 00aec93d10a051ea64f83eff75d4065a19508ea6 (patch) | |
tree | 010148caeaf4d4b36188ba15828343aa6c74efc9 /kernel | |
parent | 3f029d3c6d62068d59301d90c18dbde8ee402107 (diff) |
sched: Fully integrate cpus_active_map and root-domain code
Reflect "active" cpus in the rq->rd->online field, instead of
the online_map.
The motivation is that things that use the root-domain code
(such as cpupri) only care about cpus classified as "active"
anyway. By synchronizing the root-domain state with the active
map, we allow several optimizations.
For instance, we can remove an extra cpumask_and from the
scheduler hotpath by utilizing rq->rd->online (since it is now
a cached version of cpu_active_map & rq->rd->span).
Signed-off-by: Gregory Haskins <ghaskins@novell.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: Max Krasnyansky <maxk@qualcomm.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20090730145723.25226.24493.stgit@dev.haskins.net>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 2 | ||||
-rw-r--r-- | kernel/sched_fair.c | 10 | ||||
-rw-r--r-- | kernel/sched_rt.c | 7 |
3 files changed, 8 insertions, 11 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 613fee54fc89..475138c42548 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -7927,7 +7927,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
7927 | rq->rd = rd; | 7927 | rq->rd = rd; |
7928 | 7928 | ||
7929 | cpumask_set_cpu(rq->cpu, rd->span); | 7929 | cpumask_set_cpu(rq->cpu, rd->span); |
7930 | if (cpumask_test_cpu(rq->cpu, cpu_online_mask)) | 7930 | if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) |
7931 | set_rq_online(rq); | 7931 | set_rq_online(rq); |
7932 | 7932 | ||
7933 | spin_unlock_irqrestore(&rq->lock, flags); | 7933 | spin_unlock_irqrestore(&rq->lock, flags); |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 652e8bdef9aa..493472984879 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1046,17 +1046,21 @@ static void yield_task_fair(struct rq *rq) | |||
1046 | * search starts with cpus closest then further out as needed, | 1046 | * search starts with cpus closest then further out as needed, |
1047 | * so we always favor a closer, idle cpu. | 1047 | * so we always favor a closer, idle cpu. |
1048 | * Domains may include CPUs that are not usable for migration, | 1048 | * Domains may include CPUs that are not usable for migration, |
1049 | * hence we need to mask them out (cpu_active_mask) | 1049 | * hence we need to mask them out (rq->rd->online) |
1050 | * | 1050 | * |
1051 | * Returns the CPU we should wake onto. | 1051 | * Returns the CPU we should wake onto. |
1052 | */ | 1052 | */ |
1053 | #if defined(ARCH_HAS_SCHED_WAKE_IDLE) | 1053 | #if defined(ARCH_HAS_SCHED_WAKE_IDLE) |
1054 | |||
1055 | #define cpu_rd_active(cpu, rq) cpumask_test_cpu(cpu, rq->rd->online) | ||
1056 | |||
1054 | static int wake_idle(int cpu, struct task_struct *p) | 1057 | static int wake_idle(int cpu, struct task_struct *p) |
1055 | { | 1058 | { |
1056 | struct sched_domain *sd; | 1059 | struct sched_domain *sd; |
1057 | int i; | 1060 | int i; |
1058 | unsigned int chosen_wakeup_cpu; | 1061 | unsigned int chosen_wakeup_cpu; |
1059 | int this_cpu; | 1062 | int this_cpu; |
1063 | struct rq *task_rq = task_rq(p); | ||
1060 | 1064 | ||
1061 | /* | 1065 | /* |
1062 | * At POWERSAVINGS_BALANCE_WAKEUP level, if both this_cpu and prev_cpu | 1066 | * At POWERSAVINGS_BALANCE_WAKEUP level, if both this_cpu and prev_cpu |
@@ -1089,10 +1093,10 @@ static int wake_idle(int cpu, struct task_struct *p) | |||
1089 | for_each_domain(cpu, sd) { | 1093 | for_each_domain(cpu, sd) { |
1090 | if ((sd->flags & SD_WAKE_IDLE) | 1094 | if ((sd->flags & SD_WAKE_IDLE) |
1091 | || ((sd->flags & SD_WAKE_IDLE_FAR) | 1095 | || ((sd->flags & SD_WAKE_IDLE_FAR) |
1092 | && !task_hot(p, task_rq(p)->clock, sd))) { | 1096 | && !task_hot(p, task_rq->clock, sd))) { |
1093 | for_each_cpu_and(i, sched_domain_span(sd), | 1097 | for_each_cpu_and(i, sched_domain_span(sd), |
1094 | &p->cpus_allowed) { | 1098 | &p->cpus_allowed) { |
1095 | if (cpu_active(i) && idle_cpu(i)) { | 1099 | if (cpu_rd_active(i, task_rq) && idle_cpu(i)) { |
1096 | if (i != task_cpu(p)) { | 1100 | if (i != task_cpu(p)) { |
1097 | schedstat_inc(p, | 1101 | schedstat_inc(p, |
1098 | se.nr_wakeups_idle); | 1102 | se.nr_wakeups_idle); |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index a8f89bc3e5eb..13f728ef5b38 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -1173,13 +1173,6 @@ static int find_lowest_rq(struct task_struct *task) | |||
1173 | return -1; /* No targets found */ | 1173 | return -1; /* No targets found */ |
1174 | 1174 | ||
1175 | /* | 1175 | /* |
1176 | * Only consider CPUs that are usable for migration. | ||
1177 | * I guess we might want to change cpupri_find() to ignore those | ||
1178 | * in the first place. | ||
1179 | */ | ||
1180 | cpumask_and(lowest_mask, lowest_mask, cpu_active_mask); | ||
1181 | |||
1182 | /* | ||
1183 | * At this point we have built a mask of cpus representing the | 1176 | * At this point we have built a mask of cpus representing the |
1184 | * lowest priority tasks in the system. Now we want to elect | 1177 | * lowest priority tasks in the system. Now we want to elect |
1185 | * the best one based on our affinity and topology. | 1178 | * the best one based on our affinity and topology. |