diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2010-08-31 04:28:15 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-09-09 14:41:18 -0400 |
commit | f269893c575167447cc9f6d1867e639fb5b6f0c5 (patch) | |
tree | 948004a5365c688573fd5513160a1500f22f5131 /kernel/sched.c | |
parent | ed2d372c0738386b8a184a6a6bea9c16df6ffb68 (diff) |
sched: Merge cpu_to_core_group functions
Merge and simplify the two cpu_to_core_group variants so that the
resulting function follows the same pattern like cpu_to_phys_group.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20100831082843.953617555@de.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 18 |
1 files changed, 5 insertions, 13 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 8eef8e5512d4..1a0c084b1cf9 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -6552,31 +6552,23 @@ cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, | |||
6552 | #ifdef CONFIG_SCHED_MC | 6552 | #ifdef CONFIG_SCHED_MC |
6553 | static DEFINE_PER_CPU(struct static_sched_domain, core_domains); | 6553 | static DEFINE_PER_CPU(struct static_sched_domain, core_domains); |
6554 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_core); | 6554 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_core); |
6555 | #endif /* CONFIG_SCHED_MC */ | ||
6556 | 6555 | ||
6557 | #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) | ||
6558 | static int | 6556 | static int |
6559 | cpu_to_core_group(int cpu, const struct cpumask *cpu_map, | 6557 | cpu_to_core_group(int cpu, const struct cpumask *cpu_map, |
6560 | struct sched_group **sg, struct cpumask *mask) | 6558 | struct sched_group **sg, struct cpumask *mask) |
6561 | { | 6559 | { |
6562 | int group; | 6560 | int group; |
6563 | 6561 | #ifdef CONFIG_SCHED_SMT | |
6564 | cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map); | 6562 | cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map); |
6565 | group = cpumask_first(mask); | 6563 | group = cpumask_first(mask); |
6564 | #else | ||
6565 | group = cpu; | ||
6566 | #endif | ||
6566 | if (sg) | 6567 | if (sg) |
6567 | *sg = &per_cpu(sched_group_core, group).sg; | 6568 | *sg = &per_cpu(sched_group_core, group).sg; |
6568 | return group; | 6569 | return group; |
6569 | } | 6570 | } |
6570 | #elif defined(CONFIG_SCHED_MC) | 6571 | #endif /* CONFIG_SCHED_MC */ |
6571 | static int | ||
6572 | cpu_to_core_group(int cpu, const struct cpumask *cpu_map, | ||
6573 | struct sched_group **sg, struct cpumask *unused) | ||
6574 | { | ||
6575 | if (sg) | ||
6576 | *sg = &per_cpu(sched_group_core, cpu).sg; | ||
6577 | return cpu; | ||
6578 | } | ||
6579 | #endif | ||
6580 | 6572 | ||
6581 | static DEFINE_PER_CPU(struct static_sched_domain, phys_domains); | 6573 | static DEFINE_PER_CPU(struct static_sched_domain, phys_domains); |
6582 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys); | 6574 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys); |