diff options
author | Nicolas Pitre <nicolas.pitre@linaro.org> | 2015-03-14 17:58:33 -0400 |
---|---|---|
committer | Olof Johansson <olof@lixom.net> | 2015-04-03 15:52:47 -0400 |
commit | 41f26e2d94374f7a31d3dc0b03223df7006a83cd (patch) | |
tree | 5857fe658aeb89d897c9d4f273a4391ec9fe4adc | |
parent | d3a875444ad8d5e64c5a932361ca579312e49801 (diff) |
ARM: vexpress: migrate TC2 to the new MCPM backend abstraction
Signed-off-by: Nicolas Pitre <nico@linaro.org>
Signed-off-by: Olof Johansson <olof@lixom.net>
-rw-r--r-- | arch/arm/mach-vexpress/tc2_pm.c | 291 |
1 files changed, 81 insertions, 210 deletions
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c index 2fb78b4648cb..b3328cd46c33 100644 --- a/arch/arm/mach-vexpress/tc2_pm.c +++ b/arch/arm/mach-vexpress/tc2_pm.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/of_address.h> | 19 | #include <linux/of_address.h> |
20 | #include <linux/of_irq.h> | 20 | #include <linux/of_irq.h> |
21 | #include <linux/spinlock.h> | ||
22 | #include <linux/errno.h> | 21 | #include <linux/errno.h> |
23 | #include <linux/irqchip/arm-gic.h> | 22 | #include <linux/irqchip/arm-gic.h> |
24 | 23 | ||
@@ -44,101 +43,36 @@ | |||
44 | 43 | ||
45 | static void __iomem *scc; | 44 | static void __iomem *scc; |
46 | 45 | ||
47 | /* | ||
48 | * We can't use regular spinlocks. In the switcher case, it is possible | ||
49 | * for an outbound CPU to call power_down() after its inbound counterpart | ||
50 | * is already live using the same logical CPU number which trips lockdep | ||
51 | * debugging. | ||
52 | */ | ||
53 | static arch_spinlock_t tc2_pm_lock = __ARCH_SPIN_LOCK_UNLOCKED; | ||
54 | |||
55 | #define TC2_CLUSTERS 2 | 46 | #define TC2_CLUSTERS 2 |
56 | #define TC2_MAX_CPUS_PER_CLUSTER 3 | 47 | #define TC2_MAX_CPUS_PER_CLUSTER 3 |
57 | 48 | ||
58 | static unsigned int tc2_nr_cpus[TC2_CLUSTERS]; | 49 | static unsigned int tc2_nr_cpus[TC2_CLUSTERS]; |
59 | 50 | ||
60 | /* Keep per-cpu usage count to cope with unordered up/down requests */ | 51 | static int tc2_pm_cpu_powerup(unsigned int cpu, unsigned int cluster) |
61 | static int tc2_pm_use_count[TC2_MAX_CPUS_PER_CLUSTER][TC2_CLUSTERS]; | ||
62 | |||
63 | #define tc2_cluster_unused(cluster) \ | ||
64 | (!tc2_pm_use_count[0][cluster] && \ | ||
65 | !tc2_pm_use_count[1][cluster] && \ | ||
66 | !tc2_pm_use_count[2][cluster]) | ||
67 | |||
68 | static int tc2_pm_power_up(unsigned int cpu, unsigned int cluster) | ||
69 | { | 52 | { |
70 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); | 53 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); |
71 | if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) | 54 | if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) |
72 | return -EINVAL; | 55 | return -EINVAL; |
73 | 56 | ve_spc_set_resume_addr(cluster, cpu, | |
74 | /* | 57 | virt_to_phys(mcpm_entry_point)); |
75 | * Since this is called with IRQs enabled, and no arch_spin_lock_irq | 58 | ve_spc_cpu_wakeup_irq(cluster, cpu, true); |
76 | * variant exists, we need to disable IRQs manually here. | ||
77 | */ | ||
78 | local_irq_disable(); | ||
79 | arch_spin_lock(&tc2_pm_lock); | ||
80 | |||
81 | if (tc2_cluster_unused(cluster)) | ||
82 | ve_spc_powerdown(cluster, false); | ||
83 | |||
84 | tc2_pm_use_count[cpu][cluster]++; | ||
85 | if (tc2_pm_use_count[cpu][cluster] == 1) { | ||
86 | ve_spc_set_resume_addr(cluster, cpu, | ||
87 | virt_to_phys(mcpm_entry_point)); | ||
88 | ve_spc_cpu_wakeup_irq(cluster, cpu, true); | ||
89 | } else if (tc2_pm_use_count[cpu][cluster] != 2) { | ||
90 | /* | ||
91 | * The only possible values are: | ||
92 | * 0 = CPU down | ||
93 | * 1 = CPU (still) up | ||
94 | * 2 = CPU requested to be up before it had a chance | ||
95 | * to actually make itself down. | ||
96 | * Any other value is a bug. | ||
97 | */ | ||
98 | BUG(); | ||
99 | } | ||
100 | |||
101 | arch_spin_unlock(&tc2_pm_lock); | ||
102 | local_irq_enable(); | ||
103 | |||
104 | return 0; | 59 | return 0; |
105 | } | 60 | } |
106 | 61 | ||
107 | static void tc2_pm_down(u64 residency) | 62 | static int tc2_pm_cluster_powerup(unsigned int cluster) |
108 | { | 63 | { |
109 | unsigned int mpidr, cpu, cluster; | 64 | pr_debug("%s: cluster %u\n", __func__, cluster); |
110 | bool last_man = false, skip_wfi = false; | 65 | if (cluster >= TC2_CLUSTERS) |
111 | 66 | return -EINVAL; | |
112 | mpidr = read_cpuid_mpidr(); | 67 | ve_spc_powerdown(cluster, false); |
113 | cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | 68 | return 0; |
114 | cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | 69 | } |
115 | 70 | ||
71 | static void tc2_pm_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster) | ||
72 | { | ||
116 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); | 73 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); |
117 | BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); | 74 | BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); |
118 | 75 | ve_spc_cpu_wakeup_irq(cluster, cpu, true); | |
119 | __mcpm_cpu_going_down(cpu, cluster); | ||
120 | |||
121 | arch_spin_lock(&tc2_pm_lock); | ||
122 | BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP); | ||
123 | tc2_pm_use_count[cpu][cluster]--; | ||
124 | if (tc2_pm_use_count[cpu][cluster] == 0) { | ||
125 | ve_spc_cpu_wakeup_irq(cluster, cpu, true); | ||
126 | if (tc2_cluster_unused(cluster)) { | ||
127 | ve_spc_powerdown(cluster, true); | ||
128 | ve_spc_global_wakeup_irq(true); | ||
129 | last_man = true; | ||
130 | } | ||
131 | } else if (tc2_pm_use_count[cpu][cluster] == 1) { | ||
132 | /* | ||
133 | * A power_up request went ahead of us. | ||
134 | * Even if we do not want to shut this CPU down, | ||
135 | * the caller expects a certain state as if the WFI | ||
136 | * was aborted. So let's continue with cache cleaning. | ||
137 | */ | ||
138 | skip_wfi = true; | ||
139 | } else | ||
140 | BUG(); | ||
141 | |||
142 | /* | 76 | /* |
143 | * If the CPU is committed to power down, make sure | 77 | * If the CPU is committed to power down, make sure |
144 | * the power controller will be in charge of waking it | 78 | * the power controller will be in charge of waking it |
@@ -146,55 +80,38 @@ static void tc2_pm_down(u64 residency) | |||
146 | * to the CPU by disabling the GIC CPU IF to prevent wfi | 80 | * to the CPU by disabling the GIC CPU IF to prevent wfi |
147 | * from completing execution behind power controller back | 81 | * from completing execution behind power controller back |
148 | */ | 82 | */ |
149 | if (!skip_wfi) | 83 | gic_cpu_if_down(); |
150 | gic_cpu_if_down(); | 84 | } |
151 | |||
152 | if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { | ||
153 | arch_spin_unlock(&tc2_pm_lock); | ||
154 | |||
155 | if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) { | ||
156 | /* | ||
157 | * On the Cortex-A15 we need to disable | ||
158 | * L2 prefetching before flushing the cache. | ||
159 | */ | ||
160 | asm volatile( | ||
161 | "mcr p15, 1, %0, c15, c0, 3 \n\t" | ||
162 | "isb \n\t" | ||
163 | "dsb " | ||
164 | : : "r" (0x400) ); | ||
165 | } | ||
166 | |||
167 | v7_exit_coherency_flush(all); | ||
168 | |||
169 | cci_disable_port_by_cpu(mpidr); | ||
170 | |||
171 | __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); | ||
172 | } else { | ||
173 | /* | ||
174 | * If last man then undo any setup done previously. | ||
175 | */ | ||
176 | if (last_man) { | ||
177 | ve_spc_powerdown(cluster, false); | ||
178 | ve_spc_global_wakeup_irq(false); | ||
179 | } | ||
180 | |||
181 | arch_spin_unlock(&tc2_pm_lock); | ||
182 | |||
183 | v7_exit_coherency_flush(louis); | ||
184 | } | ||
185 | |||
186 | __mcpm_cpu_down(cpu, cluster); | ||
187 | 85 | ||
188 | /* Now we are prepared for power-down, do it: */ | 86 | static void tc2_pm_cluster_powerdown_prepare(unsigned int cluster) |
189 | if (!skip_wfi) | 87 | { |
190 | wfi(); | 88 | pr_debug("%s: cluster %u\n", __func__, cluster); |
89 | BUG_ON(cluster >= TC2_CLUSTERS); | ||
90 | ve_spc_powerdown(cluster, true); | ||
91 | ve_spc_global_wakeup_irq(true); | ||
92 | } | ||
191 | 93 | ||
192 | /* Not dead at this point? Let our caller cope. */ | 94 | static void tc2_pm_cpu_cache_disable(void) |
95 | { | ||
96 | v7_exit_coherency_flush(louis); | ||
193 | } | 97 | } |
194 | 98 | ||
195 | static void tc2_pm_power_down(void) | 99 | static void tc2_pm_cluster_cache_disable(void) |
196 | { | 100 | { |
197 | tc2_pm_down(0); | 101 | if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) { |
102 | /* | ||
103 | * On the Cortex-A15 we need to disable | ||
104 | * L2 prefetching before flushing the cache. | ||
105 | */ | ||
106 | asm volatile( | ||
107 | "mcr p15, 1, %0, c15, c0, 3 \n\t" | ||
108 | "isb \n\t" | ||
109 | "dsb " | ||
110 | : : "r" (0x400) ); | ||
111 | } | ||
112 | |||
113 | v7_exit_coherency_flush(all); | ||
114 | cci_disable_port_by_cpu(read_cpuid_mpidr()); | ||
198 | } | 115 | } |
199 | 116 | ||
200 | static int tc2_core_in_reset(unsigned int cpu, unsigned int cluster) | 117 | static int tc2_core_in_reset(unsigned int cpu, unsigned int cluster) |
@@ -217,27 +134,21 @@ static int tc2_pm_wait_for_powerdown(unsigned int cpu, unsigned int cluster) | |||
217 | BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); | 134 | BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); |
218 | 135 | ||
219 | for (tries = 0; tries < TIMEOUT_MSEC / POLL_MSEC; ++tries) { | 136 | for (tries = 0; tries < TIMEOUT_MSEC / POLL_MSEC; ++tries) { |
137 | pr_debug("%s(cpu=%u, cluster=%u): RESET_CTRL = 0x%08X\n", | ||
138 | __func__, cpu, cluster, | ||
139 | readl_relaxed(scc + RESET_CTRL)); | ||
140 | |||
220 | /* | 141 | /* |
221 | * Only examine the hardware state if the target CPU has | 142 | * We need the CPU to reach WFI, but the power |
222 | * caught up at least as far as tc2_pm_down(): | 143 | * controller may put the cluster in reset and |
144 | * power it off as soon as that happens, before | ||
145 | * we have a chance to see STANDBYWFI. | ||
146 | * | ||
147 | * So we need to check for both conditions: | ||
223 | */ | 148 | */ |
224 | if (ACCESS_ONCE(tc2_pm_use_count[cpu][cluster]) == 0) { | 149 | if (tc2_core_in_reset(cpu, cluster) || |
225 | pr_debug("%s(cpu=%u, cluster=%u): RESET_CTRL = 0x%08X\n", | 150 | ve_spc_cpu_in_wfi(cpu, cluster)) |
226 | __func__, cpu, cluster, | 151 | return 0; /* success: the CPU is halted */ |
227 | readl_relaxed(scc + RESET_CTRL)); | ||
228 | |||
229 | /* | ||
230 | * We need the CPU to reach WFI, but the power | ||
231 | * controller may put the cluster in reset and | ||
232 | * power it off as soon as that happens, before | ||
233 | * we have a chance to see STANDBYWFI. | ||
234 | * | ||
235 | * So we need to check for both conditions: | ||
236 | */ | ||
237 | if (tc2_core_in_reset(cpu, cluster) || | ||
238 | ve_spc_cpu_in_wfi(cpu, cluster)) | ||
239 | return 0; /* success: the CPU is halted */ | ||
240 | } | ||
241 | 152 | ||
242 | /* Otherwise, wait and retry: */ | 153 | /* Otherwise, wait and retry: */ |
243 | msleep(POLL_MSEC); | 154 | msleep(POLL_MSEC); |
@@ -246,72 +157,40 @@ static int tc2_pm_wait_for_powerdown(unsigned int cpu, unsigned int cluster) | |||
246 | return -ETIMEDOUT; /* timeout */ | 157 | return -ETIMEDOUT; /* timeout */ |
247 | } | 158 | } |
248 | 159 | ||
249 | static void tc2_pm_suspend(u64 residency) | 160 | static void tc2_pm_cpu_suspend_prepare(unsigned int cpu, unsigned int cluster) |
250 | { | 161 | { |
251 | unsigned int mpidr, cpu, cluster; | ||
252 | |||
253 | mpidr = read_cpuid_mpidr(); | ||
254 | cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | ||
255 | cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | ||
256 | ve_spc_set_resume_addr(cluster, cpu, virt_to_phys(mcpm_entry_point)); | 162 | ve_spc_set_resume_addr(cluster, cpu, virt_to_phys(mcpm_entry_point)); |
257 | tc2_pm_down(residency); | ||
258 | } | 163 | } |
259 | 164 | ||
260 | static void tc2_pm_powered_up(void) | 165 | static void tc2_pm_cpu_is_up(unsigned int cpu, unsigned int cluster) |
261 | { | 166 | { |
262 | unsigned int mpidr, cpu, cluster; | ||
263 | unsigned long flags; | ||
264 | |||
265 | mpidr = read_cpuid_mpidr(); | ||
266 | cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | ||
267 | cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | ||
268 | |||
269 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); | 167 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); |
270 | BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); | 168 | BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); |
271 | |||
272 | local_irq_save(flags); | ||
273 | arch_spin_lock(&tc2_pm_lock); | ||
274 | |||
275 | if (tc2_cluster_unused(cluster)) { | ||
276 | ve_spc_powerdown(cluster, false); | ||
277 | ve_spc_global_wakeup_irq(false); | ||
278 | } | ||
279 | |||
280 | if (!tc2_pm_use_count[cpu][cluster]) | ||
281 | tc2_pm_use_count[cpu][cluster] = 1; | ||
282 | |||
283 | ve_spc_cpu_wakeup_irq(cluster, cpu, false); | 169 | ve_spc_cpu_wakeup_irq(cluster, cpu, false); |
284 | ve_spc_set_resume_addr(cluster, cpu, 0); | 170 | ve_spc_set_resume_addr(cluster, cpu, 0); |
171 | } | ||
285 | 172 | ||
286 | arch_spin_unlock(&tc2_pm_lock); | 173 | static void tc2_pm_cluster_is_up(unsigned int cluster) |
287 | local_irq_restore(flags); | 174 | { |
175 | pr_debug("%s: cluster %u\n", __func__, cluster); | ||
176 | BUG_ON(cluster >= TC2_CLUSTERS); | ||
177 | ve_spc_powerdown(cluster, false); | ||
178 | ve_spc_global_wakeup_irq(false); | ||
288 | } | 179 | } |
289 | 180 | ||
290 | static const struct mcpm_platform_ops tc2_pm_power_ops = { | 181 | static const struct mcpm_platform_ops tc2_pm_power_ops = { |
291 | .power_up = tc2_pm_power_up, | 182 | .cpu_powerup = tc2_pm_cpu_powerup, |
292 | .power_down = tc2_pm_power_down, | 183 | .cluster_powerup = tc2_pm_cluster_powerup, |
184 | .cpu_suspend_prepare = tc2_pm_cpu_suspend_prepare, | ||
185 | .cpu_powerdown_prepare = tc2_pm_cpu_powerdown_prepare, | ||
186 | .cluster_powerdown_prepare = tc2_pm_cluster_powerdown_prepare, | ||
187 | .cpu_cache_disable = tc2_pm_cpu_cache_disable, | ||
188 | .cluster_cache_disable = tc2_pm_cluster_cache_disable, | ||
293 | .wait_for_powerdown = tc2_pm_wait_for_powerdown, | 189 | .wait_for_powerdown = tc2_pm_wait_for_powerdown, |
294 | .suspend = tc2_pm_suspend, | 190 | .cpu_is_up = tc2_pm_cpu_is_up, |
295 | .powered_up = tc2_pm_powered_up, | 191 | .cluster_is_up = tc2_pm_cluster_is_up, |
296 | }; | 192 | }; |
297 | 193 | ||
298 | static bool __init tc2_pm_usage_count_init(void) | ||
299 | { | ||
300 | unsigned int mpidr, cpu, cluster; | ||
301 | |||
302 | mpidr = read_cpuid_mpidr(); | ||
303 | cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | ||
304 | cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | ||
305 | |||
306 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); | ||
307 | if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) { | ||
308 | pr_err("%s: boot CPU is out of bound!\n", __func__); | ||
309 | return false; | ||
310 | } | ||
311 | tc2_pm_use_count[cpu][cluster] = 1; | ||
312 | return true; | ||
313 | } | ||
314 | |||
315 | /* | 194 | /* |
316 | * Enable cluster-level coherency, in preparation for turning on the MMU. | 195 | * Enable cluster-level coherency, in preparation for turning on the MMU. |
317 | */ | 196 | */ |
@@ -323,23 +202,9 @@ static void __naked tc2_pm_power_up_setup(unsigned int affinity_level) | |||
323 | " b cci_enable_port_for_self "); | 202 | " b cci_enable_port_for_self "); |
324 | } | 203 | } |
325 | 204 | ||
326 | static void __init tc2_cache_off(void) | ||
327 | { | ||
328 | pr_info("TC2: disabling cache during MCPM loopback test\n"); | ||
329 | if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) { | ||
330 | /* disable L2 prefetching on the Cortex-A15 */ | ||
331 | asm volatile( | ||
332 | "mcr p15, 1, %0, c15, c0, 3 \n\t" | ||
333 | "isb \n\t" | ||
334 | "dsb " | ||
335 | : : "r" (0x400) ); | ||
336 | } | ||
337 | v7_exit_coherency_flush(all); | ||
338 | cci_disable_port_by_cpu(read_cpuid_mpidr()); | ||
339 | } | ||
340 | |||
341 | static int __init tc2_pm_init(void) | 205 | static int __init tc2_pm_init(void) |
342 | { | 206 | { |
207 | unsigned int mpidr, cpu, cluster; | ||
343 | int ret, irq; | 208 | int ret, irq; |
344 | u32 a15_cluster_id, a7_cluster_id, sys_info; | 209 | u32 a15_cluster_id, a7_cluster_id, sys_info; |
345 | struct device_node *np; | 210 | struct device_node *np; |
@@ -379,14 +244,20 @@ static int __init tc2_pm_init(void) | |||
379 | if (!cci_probed()) | 244 | if (!cci_probed()) |
380 | return -ENODEV; | 245 | return -ENODEV; |
381 | 246 | ||
382 | if (!tc2_pm_usage_count_init()) | 247 | mpidr = read_cpuid_mpidr(); |
248 | cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | ||
249 | cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | ||
250 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); | ||
251 | if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) { | ||
252 | pr_err("%s: boot CPU is out of bound!\n", __func__); | ||
383 | return -EINVAL; | 253 | return -EINVAL; |
254 | } | ||
384 | 255 | ||
385 | ret = mcpm_platform_register(&tc2_pm_power_ops); | 256 | ret = mcpm_platform_register(&tc2_pm_power_ops); |
386 | if (!ret) { | 257 | if (!ret) { |
387 | mcpm_sync_init(tc2_pm_power_up_setup); | 258 | mcpm_sync_init(tc2_pm_power_up_setup); |
388 | /* test if we can (re)enable the CCI on our own */ | 259 | /* test if we can (re)enable the CCI on our own */ |
389 | BUG_ON(mcpm_loopback(tc2_cache_off) != 0); | 260 | BUG_ON(mcpm_loopback(tc2_pm_cluster_cache_disable) != 0); |
390 | pr_info("TC2 power management initialized\n"); | 261 | pr_info("TC2 power management initialized\n"); |
391 | } | 262 | } |
392 | return ret; | 263 | return ret; |