diff options
Diffstat (limited to 'arch/arm/common/mcpm_entry.c')
-rw-r--r-- | arch/arm/common/mcpm_entry.c | 229 |
1 files changed, 115 insertions, 114 deletions
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c index 0908f96278c4..c5fe2e33e6c3 100644 --- a/arch/arm/common/mcpm_entry.c +++ b/arch/arm/common/mcpm_entry.c | |||
@@ -20,6 +20,121 @@ | |||
20 | #include <asm/cputype.h> | 20 | #include <asm/cputype.h> |
21 | #include <asm/suspend.h> | 21 | #include <asm/suspend.h> |
22 | 22 | ||
23 | |||
24 | struct sync_struct mcpm_sync; | ||
25 | |||
26 | /* | ||
27 | * __mcpm_cpu_going_down: Indicates that the cpu is being torn down. | ||
28 | * This must be called at the point of committing to teardown of a CPU. | ||
29 | * The CPU cache (SCTRL.C bit) is expected to still be active. | ||
30 | */ | ||
31 | static void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster) | ||
32 | { | ||
33 | mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; | ||
34 | sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); | ||
35 | } | ||
36 | |||
37 | /* | ||
38 | * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the | ||
39 | * cluster can be torn down without disrupting this CPU. | ||
40 | * To avoid deadlocks, this must be called before a CPU is powered down. | ||
41 | * The CPU cache (SCTRL.C bit) is expected to be off. | ||
42 | * However L2 cache might or might not be active. | ||
43 | */ | ||
44 | static void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster) | ||
45 | { | ||
46 | dmb(); | ||
47 | mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; | ||
48 | sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); | ||
49 | sev(); | ||
50 | } | ||
51 | |||
52 | /* | ||
53 | * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section. | ||
54 | * @state: the final state of the cluster: | ||
55 | * CLUSTER_UP: no destructive teardown was done and the cluster has been | ||
56 | * restored to the previous state (CPU cache still active); or | ||
57 | * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off | ||
58 | * (CPU cache disabled, L2 cache either enabled or disabled). | ||
59 | */ | ||
60 | static void __mcpm_outbound_leave_critical(unsigned int cluster, int state) | ||
61 | { | ||
62 | dmb(); | ||
63 | mcpm_sync.clusters[cluster].cluster = state; | ||
64 | sync_cache_w(&mcpm_sync.clusters[cluster].cluster); | ||
65 | sev(); | ||
66 | } | ||
67 | |||
68 | /* | ||
69 | * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section. | ||
70 | * This function should be called by the last man, after local CPU teardown | ||
71 | * is complete. CPU cache expected to be active. | ||
72 | * | ||
73 | * Returns: | ||
74 | * false: the critical section was not entered because an inbound CPU was | ||
75 | * observed, or the cluster is already being set up; | ||
76 | * true: the critical section was entered: it is now safe to tear down the | ||
77 | * cluster. | ||
78 | */ | ||
79 | static bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster) | ||
80 | { | ||
81 | unsigned int i; | ||
82 | struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster]; | ||
83 | |||
84 | /* Warn inbound CPUs that the cluster is being torn down: */ | ||
85 | c->cluster = CLUSTER_GOING_DOWN; | ||
86 | sync_cache_w(&c->cluster); | ||
87 | |||
88 | /* Back out if the inbound cluster is already in the critical region: */ | ||
89 | sync_cache_r(&c->inbound); | ||
90 | if (c->inbound == INBOUND_COMING_UP) | ||
91 | goto abort; | ||
92 | |||
93 | /* | ||
94 | * Wait for all CPUs to get out of the GOING_DOWN state, so that local | ||
95 | * teardown is complete on each CPU before tearing down the cluster. | ||
96 | * | ||
97 | * If any CPU has been woken up again from the DOWN state, then we | ||
98 | * shouldn't be taking the cluster down at all: abort in that case. | ||
99 | */ | ||
100 | sync_cache_r(&c->cpus); | ||
101 | for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) { | ||
102 | int cpustate; | ||
103 | |||
104 | if (i == cpu) | ||
105 | continue; | ||
106 | |||
107 | while (1) { | ||
108 | cpustate = c->cpus[i].cpu; | ||
109 | if (cpustate != CPU_GOING_DOWN) | ||
110 | break; | ||
111 | |||
112 | wfe(); | ||
113 | sync_cache_r(&c->cpus[i].cpu); | ||
114 | } | ||
115 | |||
116 | switch (cpustate) { | ||
117 | case CPU_DOWN: | ||
118 | continue; | ||
119 | |||
120 | default: | ||
121 | goto abort; | ||
122 | } | ||
123 | } | ||
124 | |||
125 | return true; | ||
126 | |||
127 | abort: | ||
128 | __mcpm_outbound_leave_critical(cluster, CLUSTER_UP); | ||
129 | return false; | ||
130 | } | ||
131 | |||
132 | static int __mcpm_cluster_state(unsigned int cluster) | ||
133 | { | ||
134 | sync_cache_r(&mcpm_sync.clusters[cluster].cluster); | ||
135 | return mcpm_sync.clusters[cluster].cluster; | ||
136 | } | ||
137 | |||
23 | extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER]; | 138 | extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER]; |
24 | 139 | ||
25 | void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr) | 140 | void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr) |
@@ -299,120 +414,6 @@ int __init mcpm_loopback(void (*cache_disable)(void)) | |||
299 | 414 | ||
300 | #endif | 415 | #endif |
301 | 416 | ||
302 | struct sync_struct mcpm_sync; | ||
303 | |||
304 | /* | ||
305 | * __mcpm_cpu_going_down: Indicates that the cpu is being torn down. | ||
306 | * This must be called at the point of committing to teardown of a CPU. | ||
307 | * The CPU cache (SCTRL.C bit) is expected to still be active. | ||
308 | */ | ||
309 | void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster) | ||
310 | { | ||
311 | mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; | ||
312 | sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); | ||
313 | } | ||
314 | |||
315 | /* | ||
316 | * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the | ||
317 | * cluster can be torn down without disrupting this CPU. | ||
318 | * To avoid deadlocks, this must be called before a CPU is powered down. | ||
319 | * The CPU cache (SCTRL.C bit) is expected to be off. | ||
320 | * However L2 cache might or might not be active. | ||
321 | */ | ||
322 | void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster) | ||
323 | { | ||
324 | dmb(); | ||
325 | mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; | ||
326 | sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); | ||
327 | sev(); | ||
328 | } | ||
329 | |||
330 | /* | ||
331 | * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section. | ||
332 | * @state: the final state of the cluster: | ||
333 | * CLUSTER_UP: no destructive teardown was done and the cluster has been | ||
334 | * restored to the previous state (CPU cache still active); or | ||
335 | * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off | ||
336 | * (CPU cache disabled, L2 cache either enabled or disabled). | ||
337 | */ | ||
338 | void __mcpm_outbound_leave_critical(unsigned int cluster, int state) | ||
339 | { | ||
340 | dmb(); | ||
341 | mcpm_sync.clusters[cluster].cluster = state; | ||
342 | sync_cache_w(&mcpm_sync.clusters[cluster].cluster); | ||
343 | sev(); | ||
344 | } | ||
345 | |||
346 | /* | ||
347 | * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section. | ||
348 | * This function should be called by the last man, after local CPU teardown | ||
349 | * is complete. CPU cache expected to be active. | ||
350 | * | ||
351 | * Returns: | ||
352 | * false: the critical section was not entered because an inbound CPU was | ||
353 | * observed, or the cluster is already being set up; | ||
354 | * true: the critical section was entered: it is now safe to tear down the | ||
355 | * cluster. | ||
356 | */ | ||
357 | bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster) | ||
358 | { | ||
359 | unsigned int i; | ||
360 | struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster]; | ||
361 | |||
362 | /* Warn inbound CPUs that the cluster is being torn down: */ | ||
363 | c->cluster = CLUSTER_GOING_DOWN; | ||
364 | sync_cache_w(&c->cluster); | ||
365 | |||
366 | /* Back out if the inbound cluster is already in the critical region: */ | ||
367 | sync_cache_r(&c->inbound); | ||
368 | if (c->inbound == INBOUND_COMING_UP) | ||
369 | goto abort; | ||
370 | |||
371 | /* | ||
372 | * Wait for all CPUs to get out of the GOING_DOWN state, so that local | ||
373 | * teardown is complete on each CPU before tearing down the cluster. | ||
374 | * | ||
375 | * If any CPU has been woken up again from the DOWN state, then we | ||
376 | * shouldn't be taking the cluster down at all: abort in that case. | ||
377 | */ | ||
378 | sync_cache_r(&c->cpus); | ||
379 | for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) { | ||
380 | int cpustate; | ||
381 | |||
382 | if (i == cpu) | ||
383 | continue; | ||
384 | |||
385 | while (1) { | ||
386 | cpustate = c->cpus[i].cpu; | ||
387 | if (cpustate != CPU_GOING_DOWN) | ||
388 | break; | ||
389 | |||
390 | wfe(); | ||
391 | sync_cache_r(&c->cpus[i].cpu); | ||
392 | } | ||
393 | |||
394 | switch (cpustate) { | ||
395 | case CPU_DOWN: | ||
396 | continue; | ||
397 | |||
398 | default: | ||
399 | goto abort; | ||
400 | } | ||
401 | } | ||
402 | |||
403 | return true; | ||
404 | |||
405 | abort: | ||
406 | __mcpm_outbound_leave_critical(cluster, CLUSTER_UP); | ||
407 | return false; | ||
408 | } | ||
409 | |||
410 | int __mcpm_cluster_state(unsigned int cluster) | ||
411 | { | ||
412 | sync_cache_r(&mcpm_sync.clusters[cluster].cluster); | ||
413 | return mcpm_sync.clusters[cluster].cluster; | ||
414 | } | ||
415 | |||
416 | extern unsigned long mcpm_power_up_setup_phys; | 417 | extern unsigned long mcpm_power_up_setup_phys; |
417 | 418 | ||
418 | int __init mcpm_sync_init( | 419 | int __init mcpm_sync_init( |