diff options
author | Juri Lelli <juri.lelli@arm.com> | 2017-05-31 12:59:31 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2017-06-03 06:10:09 -0400 |
commit | 4ca4f26a9c66103ca158689b7554f07f4968a32c (patch) | |
tree | 870c9dbb8808a359ece803d09ef5c1cf77476cbc /drivers/base/arch_topology.c | |
parent | 615ffd63149117aa5693d6672944966b490cdb66 (diff) |
arm,arm64,drivers: add a prefix to drivers arch_topology interfaces
Now that some functions that deal with arch topology information live
under drivers, there is a clash of naming that might create confusion.
Tidy things up by creating a topology namespace for interfaces used by
arch code; achieve this by prepending a 'topology_' prefix to driver
interfaces.
Signed-off-by: Juri Lelli <juri.lelli@arm.com>
Acked-by: Russell King <rmk+kernel@armlinux.org.uk>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/base/arch_topology.c')
-rw-r--r-- | drivers/base/arch_topology.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 76c19aa0d82f..d1c33a85059e 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c | |||
@@ -25,12 +25,12 @@ | |||
25 | static DEFINE_MUTEX(cpu_scale_mutex); | 25 | static DEFINE_MUTEX(cpu_scale_mutex); |
26 | static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; | 26 | static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; |
27 | 27 | ||
28 | unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) | 28 | unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu) |
29 | { | 29 | { |
30 | return per_cpu(cpu_scale, cpu); | 30 | return per_cpu(cpu_scale, cpu); |
31 | } | 31 | } |
32 | 32 | ||
33 | void set_capacity_scale(unsigned int cpu, unsigned long capacity) | 33 | void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) |
34 | { | 34 | { |
35 | per_cpu(cpu_scale, cpu) = capacity; | 35 | per_cpu(cpu_scale, cpu) = capacity; |
36 | } | 36 | } |
@@ -42,7 +42,7 @@ static ssize_t cpu_capacity_show(struct device *dev, | |||
42 | struct cpu *cpu = container_of(dev, struct cpu, dev); | 42 | struct cpu *cpu = container_of(dev, struct cpu, dev); |
43 | 43 | ||
44 | return sprintf(buf, "%lu\n", | 44 | return sprintf(buf, "%lu\n", |
45 | arch_scale_cpu_capacity(NULL, cpu->dev.id)); | 45 | topology_get_cpu_scale(NULL, cpu->dev.id)); |
46 | } | 46 | } |
47 | 47 | ||
48 | static ssize_t cpu_capacity_store(struct device *dev, | 48 | static ssize_t cpu_capacity_store(struct device *dev, |
@@ -67,7 +67,7 @@ static ssize_t cpu_capacity_store(struct device *dev, | |||
67 | 67 | ||
68 | mutex_lock(&cpu_scale_mutex); | 68 | mutex_lock(&cpu_scale_mutex); |
69 | for_each_cpu(i, &cpu_topology[this_cpu].core_sibling) | 69 | for_each_cpu(i, &cpu_topology[this_cpu].core_sibling) |
70 | set_capacity_scale(i, new_capacity); | 70 | topology_set_cpu_scale(i, new_capacity); |
71 | mutex_unlock(&cpu_scale_mutex); | 71 | mutex_unlock(&cpu_scale_mutex); |
72 | 72 | ||
73 | return count; | 73 | return count; |
@@ -98,7 +98,7 @@ static u32 capacity_scale; | |||
98 | static u32 *raw_capacity; | 98 | static u32 *raw_capacity; |
99 | static bool cap_parsing_failed; | 99 | static bool cap_parsing_failed; |
100 | 100 | ||
101 | void normalize_cpu_capacity(void) | 101 | void topology_normalize_cpu_scale(void) |
102 | { | 102 | { |
103 | u64 capacity; | 103 | u64 capacity; |
104 | int cpu; | 104 | int cpu; |
@@ -113,14 +113,14 @@ void normalize_cpu_capacity(void) | |||
113 | cpu, raw_capacity[cpu]); | 113 | cpu, raw_capacity[cpu]); |
114 | capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT) | 114 | capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT) |
115 | / capacity_scale; | 115 | / capacity_scale; |
116 | set_capacity_scale(cpu, capacity); | 116 | topology_set_cpu_scale(cpu, capacity); |
117 | pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", | 117 | pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", |
118 | cpu, arch_scale_cpu_capacity(NULL, cpu)); | 118 | cpu, topology_get_cpu_scale(NULL, cpu)); |
119 | } | 119 | } |
120 | mutex_unlock(&cpu_scale_mutex); | 120 | mutex_unlock(&cpu_scale_mutex); |
121 | } | 121 | } |
122 | 122 | ||
123 | int __init parse_cpu_capacity(struct device_node *cpu_node, int cpu) | 123 | int __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) |
124 | { | 124 | { |
125 | int ret = 1; | 125 | int ret = 1; |
126 | u32 cpu_capacity; | 126 | u32 cpu_capacity; |
@@ -185,12 +185,12 @@ init_cpu_capacity_callback(struct notifier_block *nb, | |||
185 | cpus_to_visit, | 185 | cpus_to_visit, |
186 | policy->related_cpus); | 186 | policy->related_cpus); |
187 | for_each_cpu(cpu, policy->related_cpus) { | 187 | for_each_cpu(cpu, policy->related_cpus) { |
188 | raw_capacity[cpu] = arch_scale_cpu_capacity(NULL, cpu) * | 188 | raw_capacity[cpu] = topology_get_cpu_scale(NULL, cpu) * |
189 | policy->cpuinfo.max_freq / 1000UL; | 189 | policy->cpuinfo.max_freq / 1000UL; |
190 | capacity_scale = max(raw_capacity[cpu], capacity_scale); | 190 | capacity_scale = max(raw_capacity[cpu], capacity_scale); |
191 | } | 191 | } |
192 | if (cpumask_empty(cpus_to_visit)) { | 192 | if (cpumask_empty(cpus_to_visit)) { |
193 | normalize_cpu_capacity(); | 193 | topology_normalize_cpu_scale(); |
194 | kfree(raw_capacity); | 194 | kfree(raw_capacity); |
195 | pr_debug("cpu_capacity: parsing done\n"); | 195 | pr_debug("cpu_capacity: parsing done\n"); |
196 | cap_parsing_done = true; | 196 | cap_parsing_done = true; |