diff options
author | Viresh Kumar <viresh.kumar@linaro.org> | 2013-06-10 06:57:20 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-06-19 06:58:45 -0400 |
commit | 27723a68caf05381b0b0bc6e127da2c9e7bcb775 (patch) | |
tree | 461a2096520470a40816680ca9a1b32d04acaf7d /kernel/sched/core.c | |
parent | c75e01288ce9c9a6b7beb6b23c07d2e4d1db8c84 (diff) |
sched: Create for_each_sd_topology()
For loop for traversing sched_domain_topology was used at multiple placed in
core.c. This patch removes code redundancy by creating for_each_sd_topology().
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/e0e04542f54e9464bd9da54f5ccfe62ec6c4c0bc.1370861520.git.viresh.kumar@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r-- | kernel/sched/core.c | 9 |
1 files changed, 6 insertions, 3 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 88c2c0ee5a52..547b7d3ff893 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -5565,6 +5565,9 @@ static struct sched_domain_topology_level default_topology[] = { | |||
5565 | 5565 | ||
5566 | static struct sched_domain_topology_level *sched_domain_topology = default_topology; | 5566 | static struct sched_domain_topology_level *sched_domain_topology = default_topology; |
5567 | 5567 | ||
5568 | #define for_each_sd_topology(tl) \ | ||
5569 | for (tl = sched_domain_topology; tl->init; tl++) | ||
5570 | |||
5568 | #ifdef CONFIG_NUMA | 5571 | #ifdef CONFIG_NUMA |
5569 | 5572 | ||
5570 | static int sched_domains_numa_levels; | 5573 | static int sched_domains_numa_levels; |
@@ -5862,7 +5865,7 @@ static int __sdt_alloc(const struct cpumask *cpu_map) | |||
5862 | struct sched_domain_topology_level *tl; | 5865 | struct sched_domain_topology_level *tl; |
5863 | int j; | 5866 | int j; |
5864 | 5867 | ||
5865 | for (tl = sched_domain_topology; tl->init; tl++) { | 5868 | for_each_sd_topology(tl) { |
5866 | struct sd_data *sdd = &tl->data; | 5869 | struct sd_data *sdd = &tl->data; |
5867 | 5870 | ||
5868 | sdd->sd = alloc_percpu(struct sched_domain *); | 5871 | sdd->sd = alloc_percpu(struct sched_domain *); |
@@ -5915,7 +5918,7 @@ static void __sdt_free(const struct cpumask *cpu_map) | |||
5915 | struct sched_domain_topology_level *tl; | 5918 | struct sched_domain_topology_level *tl; |
5916 | int j; | 5919 | int j; |
5917 | 5920 | ||
5918 | for (tl = sched_domain_topology; tl->init; tl++) { | 5921 | for_each_sd_topology(tl) { |
5919 | struct sd_data *sdd = &tl->data; | 5922 | struct sd_data *sdd = &tl->data; |
5920 | 5923 | ||
5921 | for_each_cpu(j, cpu_map) { | 5924 | for_each_cpu(j, cpu_map) { |
@@ -5983,7 +5986,7 @@ static int build_sched_domains(const struct cpumask *cpu_map, | |||
5983 | struct sched_domain_topology_level *tl; | 5986 | struct sched_domain_topology_level *tl; |
5984 | 5987 | ||
5985 | sd = NULL; | 5988 | sd = NULL; |
5986 | for (tl = sched_domain_topology; tl->init; tl++) { | 5989 | for_each_sd_topology(tl) { |
5987 | sd = build_sched_domain(tl, cpu_map, attr, sd, i); | 5990 | sd = build_sched_domain(tl, cpu_map, attr, sd, i); |
5988 | if (tl == sched_domain_topology) | 5991 | if (tl == sched_domain_topology) |
5989 | *per_cpu_ptr(d.sd, i) = sd; | 5992 | *per_cpu_ptr(d.sd, i) = sd; |