aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicolas Pitre <nicolas.pitre@linaro.org>2015-04-28 14:11:07 -0400
committerNicolas Pitre <nicolas.pitre@linaro.org>2015-05-06 11:45:46 -0400
commit7cc8b991cdc985aaa73bf9c429c810cd442fb74d (patch)
tree8b5f9290847562b1cd0000581b64b6febc95a0f8
parent77404d81cadf192cc1261d6269f622a06b83cdd5 (diff)
ARM: MCPM: make internal helpers private to the core code
This concerns the following helpers: __mcpm_cpu_going_down() __mcpm_cpu_down() __mcpm_outbound_enter_critical() __mcpm_outbound_leave_critical() __mcpm_cluster_state() They are and should only be used by the core code now. Therefore their declarations are removed from mcpm.h and their definitions are made static, hence the need to move them before their users which accounts for the bulk of this patch. This left the mcpm_sync_struct definition at an odd location, therefore it is moved as well with some comment clarifications. Signed-off-by: Nicolas Pitre <nico@linaro.org> Acked-by: Dave Martin <Dave.Martin@arm.com>
-rw-r--r--arch/arm/common/mcpm_entry.c229
-rw-r--r--arch/arm/include/asm/mcpm.h52
2 files changed, 138 insertions, 143 deletions
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
index 0908f96278c4..c5fe2e33e6c3 100644
--- a/arch/arm/common/mcpm_entry.c
+++ b/arch/arm/common/mcpm_entry.c
@@ -20,6 +20,121 @@
20#include <asm/cputype.h> 20#include <asm/cputype.h>
21#include <asm/suspend.h> 21#include <asm/suspend.h>
22 22
23
24struct sync_struct mcpm_sync;
25
26/*
27 * __mcpm_cpu_going_down: Indicates that the cpu is being torn down.
28 * This must be called at the point of committing to teardown of a CPU.
29 * The CPU cache (SCTRL.C bit) is expected to still be active.
30 */
31static void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster)
32{
33 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN;
34 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
35}
36
37/*
38 * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the
39 * cluster can be torn down without disrupting this CPU.
40 * To avoid deadlocks, this must be called before a CPU is powered down.
41 * The CPU cache (SCTRL.C bit) is expected to be off.
42 * However L2 cache might or might not be active.
43 */
44static void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
45{
46 dmb();
47 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
48 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
49 sev();
50}
51
52/*
53 * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
54 * @state: the final state of the cluster:
55 * CLUSTER_UP: no destructive teardown was done and the cluster has been
56 * restored to the previous state (CPU cache still active); or
57 * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
58 * (CPU cache disabled, L2 cache either enabled or disabled).
59 */
60static void __mcpm_outbound_leave_critical(unsigned int cluster, int state)
61{
62 dmb();
63 mcpm_sync.clusters[cluster].cluster = state;
64 sync_cache_w(&mcpm_sync.clusters[cluster].cluster);
65 sev();
66}
67
68/*
69 * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section.
70 * This function should be called by the last man, after local CPU teardown
71 * is complete. CPU cache expected to be active.
72 *
73 * Returns:
74 * false: the critical section was not entered because an inbound CPU was
75 * observed, or the cluster is already being set up;
76 * true: the critical section was entered: it is now safe to tear down the
77 * cluster.
78 */
79static bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster)
80{
81 unsigned int i;
82 struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster];
83
84 /* Warn inbound CPUs that the cluster is being torn down: */
85 c->cluster = CLUSTER_GOING_DOWN;
86 sync_cache_w(&c->cluster);
87
88 /* Back out if the inbound cluster is already in the critical region: */
89 sync_cache_r(&c->inbound);
90 if (c->inbound == INBOUND_COMING_UP)
91 goto abort;
92
93 /*
94 * Wait for all CPUs to get out of the GOING_DOWN state, so that local
95 * teardown is complete on each CPU before tearing down the cluster.
96 *
97 * If any CPU has been woken up again from the DOWN state, then we
98 * shouldn't be taking the cluster down at all: abort in that case.
99 */
100 sync_cache_r(&c->cpus);
101 for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) {
102 int cpustate;
103
104 if (i == cpu)
105 continue;
106
107 while (1) {
108 cpustate = c->cpus[i].cpu;
109 if (cpustate != CPU_GOING_DOWN)
110 break;
111
112 wfe();
113 sync_cache_r(&c->cpus[i].cpu);
114 }
115
116 switch (cpustate) {
117 case CPU_DOWN:
118 continue;
119
120 default:
121 goto abort;
122 }
123 }
124
125 return true;
126
127abort:
128 __mcpm_outbound_leave_critical(cluster, CLUSTER_UP);
129 return false;
130}
131
132static int __mcpm_cluster_state(unsigned int cluster)
133{
134 sync_cache_r(&mcpm_sync.clusters[cluster].cluster);
135 return mcpm_sync.clusters[cluster].cluster;
136}
137
23extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER]; 138extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
24 139
25void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr) 140void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
@@ -299,120 +414,6 @@ int __init mcpm_loopback(void (*cache_disable)(void))
299 414
300#endif 415#endif
301 416
302struct sync_struct mcpm_sync;
303
304/*
305 * __mcpm_cpu_going_down: Indicates that the cpu is being torn down.
306 * This must be called at the point of committing to teardown of a CPU.
307 * The CPU cache (SCTRL.C bit) is expected to still be active.
308 */
309void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster)
310{
311 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN;
312 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
313}
314
315/*
316 * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the
317 * cluster can be torn down without disrupting this CPU.
318 * To avoid deadlocks, this must be called before a CPU is powered down.
319 * The CPU cache (SCTRL.C bit) is expected to be off.
320 * However L2 cache might or might not be active.
321 */
322void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
323{
324 dmb();
325 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
326 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
327 sev();
328}
329
330/*
331 * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
332 * @state: the final state of the cluster:
333 * CLUSTER_UP: no destructive teardown was done and the cluster has been
334 * restored to the previous state (CPU cache still active); or
335 * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
336 * (CPU cache disabled, L2 cache either enabled or disabled).
337 */
338void __mcpm_outbound_leave_critical(unsigned int cluster, int state)
339{
340 dmb();
341 mcpm_sync.clusters[cluster].cluster = state;
342 sync_cache_w(&mcpm_sync.clusters[cluster].cluster);
343 sev();
344}
345
346/*
347 * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section.
348 * This function should be called by the last man, after local CPU teardown
349 * is complete. CPU cache expected to be active.
350 *
351 * Returns:
352 * false: the critical section was not entered because an inbound CPU was
353 * observed, or the cluster is already being set up;
354 * true: the critical section was entered: it is now safe to tear down the
355 * cluster.
356 */
357bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster)
358{
359 unsigned int i;
360 struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster];
361
362 /* Warn inbound CPUs that the cluster is being torn down: */
363 c->cluster = CLUSTER_GOING_DOWN;
364 sync_cache_w(&c->cluster);
365
366 /* Back out if the inbound cluster is already in the critical region: */
367 sync_cache_r(&c->inbound);
368 if (c->inbound == INBOUND_COMING_UP)
369 goto abort;
370
371 /*
372 * Wait for all CPUs to get out of the GOING_DOWN state, so that local
373 * teardown is complete on each CPU before tearing down the cluster.
374 *
375 * If any CPU has been woken up again from the DOWN state, then we
376 * shouldn't be taking the cluster down at all: abort in that case.
377 */
378 sync_cache_r(&c->cpus);
379 for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) {
380 int cpustate;
381
382 if (i == cpu)
383 continue;
384
385 while (1) {
386 cpustate = c->cpus[i].cpu;
387 if (cpustate != CPU_GOING_DOWN)
388 break;
389
390 wfe();
391 sync_cache_r(&c->cpus[i].cpu);
392 }
393
394 switch (cpustate) {
395 case CPU_DOWN:
396 continue;
397
398 default:
399 goto abort;
400 }
401 }
402
403 return true;
404
405abort:
406 __mcpm_outbound_leave_critical(cluster, CLUSTER_UP);
407 return false;
408}
409
410int __mcpm_cluster_state(unsigned int cluster)
411{
412 sync_cache_r(&mcpm_sync.clusters[cluster].cluster);
413 return mcpm_sync.clusters[cluster].cluster;
414}
415
416extern unsigned long mcpm_power_up_setup_phys; 417extern unsigned long mcpm_power_up_setup_phys;
417 418
418int __init mcpm_sync_init( 419int __init mcpm_sync_init(
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
index e2118c941dbf..6a40d5f8db60 100644
--- a/arch/arm/include/asm/mcpm.h
+++ b/arch/arm/include/asm/mcpm.h
@@ -245,35 +245,6 @@ struct mcpm_platform_ops {
245 */ 245 */
246int __init mcpm_platform_register(const struct mcpm_platform_ops *ops); 246int __init mcpm_platform_register(const struct mcpm_platform_ops *ops);
247 247
248/* Synchronisation structures for coordinating safe cluster setup/teardown: */
249
250/*
251 * When modifying this structure, make sure you update the MCPM_SYNC_ defines
252 * to match.
253 */
254struct mcpm_sync_struct {
255 /* individual CPU states */
256 struct {
257 s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE);
258 } cpus[MAX_CPUS_PER_CLUSTER];
259
260 /* cluster state */
261 s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE);
262
263 /* inbound-side state */
264 s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE);
265};
266
267struct sync_struct {
268 struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS];
269};
270
271void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster);
272void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster);
273void __mcpm_outbound_leave_critical(unsigned int cluster, int state);
274bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster);
275int __mcpm_cluster_state(unsigned int cluster);
276
277/** 248/**
278 * mcpm_sync_init - Initialize the cluster synchronization support 249 * mcpm_sync_init - Initialize the cluster synchronization support
279 * 250 *
@@ -312,6 +283,29 @@ int __init mcpm_loopback(void (*cache_disable)(void));
312 283
313void __init mcpm_smp_set_ops(void); 284void __init mcpm_smp_set_ops(void);
314 285
286/*
287 * Synchronisation structures for coordinating safe cluster setup/teardown.
288 * This is private to the MCPM core code and shared between C and assembly.
289 * When modifying this structure, make sure you update the MCPM_SYNC_ defines
290 * to match.
291 */
292struct mcpm_sync_struct {
293 /* individual CPU states */
294 struct {
295 s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE);
296 } cpus[MAX_CPUS_PER_CLUSTER];
297
298 /* cluster state */
299 s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE);
300
301 /* inbound-side state */
302 s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE);
303};
304
305struct sync_struct {
306 struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS];
307};
308
315#else 309#else
316 310
317/* 311/*