diff options
| author | Vincent Guittot <vincent.guittot@linaro.org> | 2012-07-10 09:08:40 -0400 |
|---|---|---|
| committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2012-07-12 15:38:09 -0400 |
| commit | 130d9aabf997bd8449ff4e877fe3c42df066805e (patch) | |
| tree | 3d26eb788dc0400fdc4c265a09e443483a0d8ef4 | |
| parent | b9a50f74905ad9126c91b495ece8a5f45434c643 (diff) | |
ARM: 7461/1: topology: Add arch_scale_freq_power function
Add infrastructure to be able to modify the cpu_power of each core
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
| -rw-r--r-- | arch/arm/kernel/topology.c | 38 |
1 files changed, 37 insertions, 1 deletions
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 8200deaa14f6..51f23b3ed0a6 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c | |||
| @@ -22,6 +22,37 @@ | |||
| 22 | #include <asm/cputype.h> | 22 | #include <asm/cputype.h> |
| 23 | #include <asm/topology.h> | 23 | #include <asm/topology.h> |
| 24 | 24 | ||
| 25 | /* | ||
| 26 | * cpu power scale management | ||
| 27 | */ | ||
| 28 | |||
| 29 | /* | ||
| 30 | * cpu power table | ||
| 31 | * This per cpu data structure describes the relative capacity of each core. | ||
| 32 | * On a heteregenous system, cores don't have the same computation capacity | ||
| 33 | * and we reflect that difference in the cpu_power field so the scheduler can | ||
| 34 | * take this difference into account during load balance. A per cpu structure | ||
| 35 | * is preferred because each CPU updates its own cpu_power field during the | ||
| 36 | * load balance except for idle cores. One idle core is selected to run the | ||
| 37 | * rebalance_domains for all idle cores and the cpu_power can be updated | ||
| 38 | * during this sequence. | ||
| 39 | */ | ||
| 40 | static DEFINE_PER_CPU(unsigned long, cpu_scale); | ||
| 41 | |||
| 42 | unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu) | ||
| 43 | { | ||
| 44 | return per_cpu(cpu_scale, cpu); | ||
| 45 | } | ||
| 46 | |||
| 47 | static void set_power_scale(unsigned int cpu, unsigned long power) | ||
| 48 | { | ||
| 49 | per_cpu(cpu_scale, cpu) = power; | ||
| 50 | } | ||
| 51 | |||
| 52 | /* | ||
| 53 | * cpu topology management | ||
| 54 | */ | ||
| 55 | |||
| 25 | #define MPIDR_SMP_BITMASK (0x3 << 30) | 56 | #define MPIDR_SMP_BITMASK (0x3 << 30) |
| 26 | #define MPIDR_SMP_VALUE (0x2 << 30) | 57 | #define MPIDR_SMP_VALUE (0x2 << 30) |
| 27 | 58 | ||
| @@ -41,6 +72,9 @@ | |||
| 41 | #define MPIDR_LEVEL2_MASK 0xFF | 72 | #define MPIDR_LEVEL2_MASK 0xFF |
| 42 | #define MPIDR_LEVEL2_SHIFT 16 | 73 | #define MPIDR_LEVEL2_SHIFT 16 |
| 43 | 74 | ||
| 75 | /* | ||
| 76 | * cpu topology table | ||
| 77 | */ | ||
| 44 | struct cputopo_arm cpu_topology[NR_CPUS]; | 78 | struct cputopo_arm cpu_topology[NR_CPUS]; |
| 45 | 79 | ||
| 46 | const struct cpumask *cpu_coregroup_mask(int cpu) | 80 | const struct cpumask *cpu_coregroup_mask(int cpu) |
| @@ -134,7 +168,7 @@ void init_cpu_topology(void) | |||
| 134 | { | 168 | { |
| 135 | unsigned int cpu; | 169 | unsigned int cpu; |
| 136 | 170 | ||
| 137 | /* init core mask */ | 171 | /* init core mask and power*/ |
| 138 | for_each_possible_cpu(cpu) { | 172 | for_each_possible_cpu(cpu) { |
| 139 | struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]); | 173 | struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]); |
| 140 | 174 | ||
| @@ -143,6 +177,8 @@ void init_cpu_topology(void) | |||
| 143 | cpu_topo->socket_id = -1; | 177 | cpu_topo->socket_id = -1; |
| 144 | cpumask_clear(&cpu_topo->core_sibling); | 178 | cpumask_clear(&cpu_topo->core_sibling); |
| 145 | cpumask_clear(&cpu_topo->thread_sibling); | 179 | cpumask_clear(&cpu_topo->thread_sibling); |
| 180 | |||
| 181 | set_power_scale(cpu, SCHED_POWER_SCALE); | ||
| 146 | } | 182 | } |
| 147 | smp_wmb(); | 183 | smp_wmb(); |
| 148 | } | 184 | } |
