diff options
Diffstat (limited to 'arch/arm/common/mcpm_entry.c')
-rw-r--r-- | arch/arm/common/mcpm_entry.c | 202 |
1 files changed, 170 insertions, 32 deletions
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c index 3c165fc2dce2..5f8a52ac7edf 100644 --- a/arch/arm/common/mcpm_entry.c +++ b/arch/arm/common/mcpm_entry.c | |||
@@ -55,22 +55,81 @@ bool mcpm_is_available(void) | |||
55 | return (platform_ops) ? true : false; | 55 | return (platform_ops) ? true : false; |
56 | } | 56 | } |
57 | 57 | ||
58 | /* | ||
59 | * We can't use regular spinlocks. In the switcher case, it is possible | ||
60 | * for an outbound CPU to call power_down() after its inbound counterpart | ||
61 | * is already live using the same logical CPU number which trips lockdep | ||
62 | * debugging. | ||
63 | */ | ||
64 | static arch_spinlock_t mcpm_lock = __ARCH_SPIN_LOCK_UNLOCKED; | ||
65 | |||
66 | static int mcpm_cpu_use_count[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER]; | ||
67 | |||
68 | static inline bool mcpm_cluster_unused(unsigned int cluster) | ||
69 | { | ||
70 | int i, cnt; | ||
71 | for (i = 0, cnt = 0; i < MAX_CPUS_PER_CLUSTER; i++) | ||
72 | cnt |= mcpm_cpu_use_count[cluster][i]; | ||
73 | return !cnt; | ||
74 | } | ||
75 | |||
58 | int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster) | 76 | int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster) |
59 | { | 77 | { |
78 | bool cpu_is_down, cluster_is_down; | ||
79 | int ret = 0; | ||
80 | |||
60 | if (!platform_ops) | 81 | if (!platform_ops) |
61 | return -EUNATCH; /* try not to shadow power_up errors */ | 82 | return -EUNATCH; /* try not to shadow power_up errors */ |
62 | might_sleep(); | 83 | might_sleep(); |
63 | return platform_ops->power_up(cpu, cluster); | 84 | |
85 | /* backward compatibility callback */ | ||
86 | if (platform_ops->power_up) | ||
87 | return platform_ops->power_up(cpu, cluster); | ||
88 | |||
89 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); | ||
90 | |||
91 | /* | ||
92 | * Since this is called with IRQs enabled, and no arch_spin_lock_irq | ||
93 | * variant exists, we need to disable IRQs manually here. | ||
94 | */ | ||
95 | local_irq_disable(); | ||
96 | arch_spin_lock(&mcpm_lock); | ||
97 | |||
98 | cpu_is_down = !mcpm_cpu_use_count[cluster][cpu]; | ||
99 | cluster_is_down = mcpm_cluster_unused(cluster); | ||
100 | |||
101 | mcpm_cpu_use_count[cluster][cpu]++; | ||
102 | /* | ||
103 | * The only possible values are: | ||
104 | * 0 = CPU down | ||
105 | * 1 = CPU (still) up | ||
106 | * 2 = CPU requested to be up before it had a chance | ||
107 | * to actually make itself down. | ||
108 | * Any other value is a bug. | ||
109 | */ | ||
110 | BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 1 && | ||
111 | mcpm_cpu_use_count[cluster][cpu] != 2); | ||
112 | |||
113 | if (cluster_is_down) | ||
114 | ret = platform_ops->cluster_powerup(cluster); | ||
115 | if (cpu_is_down && !ret) | ||
116 | ret = platform_ops->cpu_powerup(cpu, cluster); | ||
117 | |||
118 | arch_spin_unlock(&mcpm_lock); | ||
119 | local_irq_enable(); | ||
120 | return ret; | ||
64 | } | 121 | } |
65 | 122 | ||
66 | typedef void (*phys_reset_t)(unsigned long); | 123 | typedef void (*phys_reset_t)(unsigned long); |
67 | 124 | ||
68 | void mcpm_cpu_power_down(void) | 125 | void mcpm_cpu_power_down(void) |
69 | { | 126 | { |
127 | unsigned int mpidr, cpu, cluster; | ||
128 | bool cpu_going_down, last_man; | ||
70 | phys_reset_t phys_reset; | 129 | phys_reset_t phys_reset; |
71 | 130 | ||
72 | if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down)) | 131 | if (WARN_ON_ONCE(!platform_ops)) |
73 | return; | 132 | return; |
74 | BUG_ON(!irqs_disabled()); | 133 | BUG_ON(!irqs_disabled()); |
75 | 134 | ||
76 | /* | 135 | /* |
@@ -79,28 +138,65 @@ void mcpm_cpu_power_down(void) | |||
79 | */ | 138 | */ |
80 | setup_mm_for_reboot(); | 139 | setup_mm_for_reboot(); |
81 | 140 | ||
82 | platform_ops->power_down(); | 141 | /* backward compatibility callback */ |
142 | if (platform_ops->power_down) { | ||
143 | platform_ops->power_down(); | ||
144 | goto not_dead; | ||
145 | } | ||
146 | |||
147 | mpidr = read_cpuid_mpidr(); | ||
148 | cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | ||
149 | cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | ||
150 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); | ||
151 | |||
152 | __mcpm_cpu_going_down(cpu, cluster); | ||
83 | 153 | ||
154 | arch_spin_lock(&mcpm_lock); | ||
155 | BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP); | ||
156 | |||
157 | mcpm_cpu_use_count[cluster][cpu]--; | ||
158 | BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 0 && | ||
159 | mcpm_cpu_use_count[cluster][cpu] != 1); | ||
160 | cpu_going_down = !mcpm_cpu_use_count[cluster][cpu]; | ||
161 | last_man = mcpm_cluster_unused(cluster); | ||
162 | |||
163 | if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { | ||
164 | platform_ops->cpu_powerdown_prepare(cpu, cluster); | ||
165 | platform_ops->cluster_powerdown_prepare(cluster); | ||
166 | arch_spin_unlock(&mcpm_lock); | ||
167 | platform_ops->cluster_cache_disable(); | ||
168 | __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); | ||
169 | } else { | ||
170 | if (cpu_going_down) | ||
171 | platform_ops->cpu_powerdown_prepare(cpu, cluster); | ||
172 | arch_spin_unlock(&mcpm_lock); | ||
173 | /* | ||
174 | * If cpu_going_down is false here, that means a power_up | ||
175 | * request raced ahead of us. Even if we do not want to | ||
176 | * shut this CPU down, the caller still expects execution | ||
177 | * to return through the system resume entry path, like | ||
178 | * when the WFI is aborted due to a new IRQ or the like.. | ||
179 | * So let's continue with cache cleaning in all cases. | ||
180 | */ | ||
181 | platform_ops->cpu_cache_disable(); | ||
182 | } | ||
183 | |||
184 | __mcpm_cpu_down(cpu, cluster); | ||
185 | |||
186 | /* Now we are prepared for power-down, do it: */ | ||
187 | if (cpu_going_down) | ||
188 | wfi(); | ||
189 | |||
190 | not_dead: | ||
84 | /* | 191 | /* |
85 | * It is possible for a power_up request to happen concurrently | 192 | * It is possible for a power_up request to happen concurrently |
86 | * with a power_down request for the same CPU. In this case the | 193 | * with a power_down request for the same CPU. In this case the |
87 | * power_down method might not be able to actually enter a | 194 | * CPU might not be able to actually enter a powered down state |
88 | * powered down state with the WFI instruction if the power_up | 195 | * with the WFI instruction if the power_up request has removed |
89 | * method has removed the required reset condition. The | 196 | * the required reset condition. We must perform a re-entry in |
90 | * power_down method is then allowed to return. We must perform | 197 | * the kernel as if the power_up method just had deasserted reset |
91 | * a re-entry in the kernel as if the power_up method just had | 198 | * on the CPU. |
92 | * deasserted reset on the CPU. | ||
93 | * | ||
94 | * To simplify race issues, the platform specific implementation | ||
95 | * must accommodate for the possibility of unordered calls to | ||
96 | * power_down and power_up with a usage count. Therefore, if a | ||
97 | * call to power_up is issued for a CPU that is not down, then | ||
98 | * the next call to power_down must not attempt a full shutdown | ||
99 | * but only do the minimum (normally disabling L1 cache and CPU | ||
100 | * coherency) and return just as if a concurrent power_up request | ||
101 | * had happened as described above. | ||
102 | */ | 199 | */ |
103 | |||
104 | phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); | 200 | phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); |
105 | phys_reset(virt_to_phys(mcpm_entry_point)); | 201 | phys_reset(virt_to_phys(mcpm_entry_point)); |
106 | 202 | ||
@@ -125,26 +221,66 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster) | |||
125 | 221 | ||
126 | void mcpm_cpu_suspend(u64 expected_residency) | 222 | void mcpm_cpu_suspend(u64 expected_residency) |
127 | { | 223 | { |
128 | phys_reset_t phys_reset; | 224 | if (WARN_ON_ONCE(!platform_ops)) |
129 | |||
130 | if (WARN_ON_ONCE(!platform_ops || !platform_ops->suspend)) | ||
131 | return; | 225 | return; |
132 | BUG_ON(!irqs_disabled()); | ||
133 | 226 | ||
134 | /* Very similar to mcpm_cpu_power_down() */ | 227 | /* backward compatibility callback */ |
135 | setup_mm_for_reboot(); | 228 | if (platform_ops->suspend) { |
136 | platform_ops->suspend(expected_residency); | 229 | phys_reset_t phys_reset; |
137 | phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); | 230 | BUG_ON(!irqs_disabled()); |
138 | phys_reset(virt_to_phys(mcpm_entry_point)); | 231 | setup_mm_for_reboot(); |
139 | BUG(); | 232 | platform_ops->suspend(expected_residency); |
233 | phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); | ||
234 | phys_reset(virt_to_phys(mcpm_entry_point)); | ||
235 | BUG(); | ||
236 | } | ||
237 | |||
238 | /* Some platforms might have to enable special resume modes, etc. */ | ||
239 | if (platform_ops->cpu_suspend_prepare) { | ||
240 | unsigned int mpidr = read_cpuid_mpidr(); | ||
241 | unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | ||
242 | unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | ||
243 | arch_spin_lock(&mcpm_lock); | ||
244 | platform_ops->cpu_suspend_prepare(cpu, cluster); | ||
245 | arch_spin_unlock(&mcpm_lock); | ||
246 | } | ||
247 | mcpm_cpu_power_down(); | ||
140 | } | 248 | } |
141 | 249 | ||
142 | int mcpm_cpu_powered_up(void) | 250 | int mcpm_cpu_powered_up(void) |
143 | { | 251 | { |
252 | unsigned int mpidr, cpu, cluster; | ||
253 | bool cpu_was_down, first_man; | ||
254 | unsigned long flags; | ||
255 | |||
144 | if (!platform_ops) | 256 | if (!platform_ops) |
145 | return -EUNATCH; | 257 | return -EUNATCH; |
146 | if (platform_ops->powered_up) | 258 | |
259 | /* backward compatibility callback */ | ||
260 | if (platform_ops->powered_up) { | ||
147 | platform_ops->powered_up(); | 261 | platform_ops->powered_up(); |
262 | return 0; | ||
263 | } | ||
264 | |||
265 | mpidr = read_cpuid_mpidr(); | ||
266 | cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | ||
267 | cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | ||
268 | local_irq_save(flags); | ||
269 | arch_spin_lock(&mcpm_lock); | ||
270 | |||
271 | cpu_was_down = !mcpm_cpu_use_count[cluster][cpu]; | ||
272 | first_man = mcpm_cluster_unused(cluster); | ||
273 | |||
274 | if (first_man && platform_ops->cluster_is_up) | ||
275 | platform_ops->cluster_is_up(cluster); | ||
276 | if (cpu_was_down) | ||
277 | mcpm_cpu_use_count[cluster][cpu] = 1; | ||
278 | if (platform_ops->cpu_is_up) | ||
279 | platform_ops->cpu_is_up(cpu, cluster); | ||
280 | |||
281 | arch_spin_unlock(&mcpm_lock); | ||
282 | local_irq_restore(flags); | ||
283 | |||
148 | return 0; | 284 | return 0; |
149 | } | 285 | } |
150 | 286 | ||
@@ -334,8 +470,10 @@ int __init mcpm_sync_init( | |||
334 | } | 470 | } |
335 | mpidr = read_cpuid_mpidr(); | 471 | mpidr = read_cpuid_mpidr(); |
336 | this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | 472 | this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); |
337 | for_each_online_cpu(i) | 473 | for_each_online_cpu(i) { |
474 | mcpm_cpu_use_count[this_cluster][i] = 1; | ||
338 | mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP; | 475 | mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP; |
476 | } | ||
339 | mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP; | 477 | mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP; |
340 | sync_cache_w(&mcpm_sync); | 478 | sync_cache_w(&mcpm_sync); |
341 | 479 | ||