diff options
author | Paul Burton <paul.burton@imgtec.com> | 2014-04-14 09:13:57 -0400 |
---|---|---|
committer | Paul Burton <paul.burton@imgtec.com> | 2014-05-28 11:20:31 -0400 |
commit | 1d8f1f5a780abe51257f7d2e33142f33d983a9ed (patch) | |
tree | 6f4be0e00ccd7e7c22c94222ea7da3521500560d | |
parent | 3179d37ee1ed602770a8b8ed975bd30faa85b4a3 (diff) |
MIPS: smp-cps: hotplug support
This patch adds support for offlining CPUs via hotplug when using the
CONFIG_MIPS_CPS SMP implementation. When a CPU is offlined one of 2
things will happen:
- If the CPU is part of a core which implements the MT ASE and there
is at least one other VPE online within that core then the VPE will
be halted by settings its TCHalt bit.
- Otherwise if supported the core will be powered down via the CPC.
- Otherwise the CPU will hang by executing an infinite loop.
Bringing CPUs back online is then a process of either clearing the
appropriate VPEs TCHalt bit or powering up the appropriate core via the
CPC. Throughout the process the struct core_boot_config vpe_mask field
must be maintained such that mips_cps_boot_vpes will start & stop the
correct VPEs.
Signed-off-by: Paul Burton <paul.burton@imgtec.com>
-rw-r--r-- | arch/mips/Kconfig | 2 | ||||
-rw-r--r-- | arch/mips/kernel/smp-cps.c | 155 |
2 files changed, 155 insertions, 2 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index c79e6a4a0075..860a1e9e8d92 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -2059,9 +2059,11 @@ config MIPS_CPS | |||
2059 | depends on SYS_SUPPORTS_MIPS_CPS | 2059 | depends on SYS_SUPPORTS_MIPS_CPS |
2060 | select MIPS_CM | 2060 | select MIPS_CM |
2061 | select MIPS_CPC | 2061 | select MIPS_CPC |
2062 | select MIPS_CPS_PM if HOTPLUG_CPU | ||
2062 | select MIPS_GIC_IPI | 2063 | select MIPS_GIC_IPI |
2063 | select SMP | 2064 | select SMP |
2064 | select SYNC_R4K if (CEVT_R4K || CSRC_R4K) | 2065 | select SYNC_R4K if (CEVT_R4K || CSRC_R4K) |
2066 | select SYS_SUPPORTS_HOTPLUG_CPU | ||
2065 | select SYS_SUPPORTS_SMP | 2067 | select SYS_SUPPORTS_SMP |
2066 | select WEAK_ORDERING | 2068 | select WEAK_ORDERING |
2067 | help | 2069 | help |
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index b519c8510972..3c30891fc789 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <asm/mips-cpc.h> | 20 | #include <asm/mips-cpc.h> |
21 | #include <asm/mips_mt.h> | 21 | #include <asm/mips_mt.h> |
22 | #include <asm/mipsregs.h> | 22 | #include <asm/mipsregs.h> |
23 | #include <asm/pm-cps.h> | ||
23 | #include <asm/smp-cps.h> | 24 | #include <asm/smp-cps.h> |
24 | #include <asm/time.h> | 25 | #include <asm/time.h> |
25 | #include <asm/uasm.h> | 26 | #include <asm/uasm.h> |
@@ -194,10 +195,12 @@ static void cps_boot_secondary(int cpu, struct task_struct *idle) | |||
194 | 195 | ||
195 | atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask); | 196 | atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask); |
196 | 197 | ||
198 | preempt_disable(); | ||
199 | |||
197 | if (!test_bit(core, core_power)) { | 200 | if (!test_bit(core, core_power)) { |
198 | /* Boot a VPE on a powered down core */ | 201 | /* Boot a VPE on a powered down core */ |
199 | boot_core(core); | 202 | boot_core(core); |
200 | return; | 203 | goto out; |
201 | } | 204 | } |
202 | 205 | ||
203 | if (core != current_cpu_data.core) { | 206 | if (core != current_cpu_data.core) { |
@@ -214,13 +217,15 @@ static void cps_boot_secondary(int cpu, struct task_struct *idle) | |||
214 | NULL, 1); | 217 | NULL, 1); |
215 | if (err) | 218 | if (err) |
216 | panic("Failed to call remote CPU\n"); | 219 | panic("Failed to call remote CPU\n"); |
217 | return; | 220 | goto out; |
218 | } | 221 | } |
219 | 222 | ||
220 | BUG_ON(!cpu_has_mipsmt); | 223 | BUG_ON(!cpu_has_mipsmt); |
221 | 224 | ||
222 | /* Boot a VPE on this core */ | 225 | /* Boot a VPE on this core */ |
223 | mips_cps_boot_vpes(); | 226 | mips_cps_boot_vpes(); |
227 | out: | ||
228 | preempt_enable(); | ||
224 | } | 229 | } |
225 | 230 | ||
226 | static void cps_init_secondary(void) | 231 | static void cps_init_secondary(void) |
@@ -250,6 +255,148 @@ static void cps_cpus_done(void) | |||
250 | { | 255 | { |
251 | } | 256 | } |
252 | 257 | ||
258 | #ifdef CONFIG_HOTPLUG_CPU | ||
259 | |||
260 | static int cps_cpu_disable(void) | ||
261 | { | ||
262 | unsigned cpu = smp_processor_id(); | ||
263 | struct core_boot_config *core_cfg; | ||
264 | |||
265 | if (!cpu) | ||
266 | return -EBUSY; | ||
267 | |||
268 | if (!cps_pm_support_state(CPS_PM_POWER_GATED)) | ||
269 | return -EINVAL; | ||
270 | |||
271 | core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core]; | ||
272 | atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask); | ||
273 | smp_mb__after_atomic_dec(); | ||
274 | set_cpu_online(cpu, false); | ||
275 | cpu_clear(cpu, cpu_callin_map); | ||
276 | |||
277 | return 0; | ||
278 | } | ||
279 | |||
280 | static DECLARE_COMPLETION(cpu_death_chosen); | ||
281 | static unsigned cpu_death_sibling; | ||
282 | static enum { | ||
283 | CPU_DEATH_HALT, | ||
284 | CPU_DEATH_POWER, | ||
285 | } cpu_death; | ||
286 | |||
287 | void play_dead(void) | ||
288 | { | ||
289 | unsigned cpu, core; | ||
290 | |||
291 | local_irq_disable(); | ||
292 | idle_task_exit(); | ||
293 | cpu = smp_processor_id(); | ||
294 | cpu_death = CPU_DEATH_POWER; | ||
295 | |||
296 | if (cpu_has_mipsmt) { | ||
297 | core = cpu_data[cpu].core; | ||
298 | |||
299 | /* Look for another online VPE within the core */ | ||
300 | for_each_online_cpu(cpu_death_sibling) { | ||
301 | if (cpu_data[cpu_death_sibling].core != core) | ||
302 | continue; | ||
303 | |||
304 | /* | ||
305 | * There is an online VPE within the core. Just halt | ||
306 | * this TC and leave the core alone. | ||
307 | */ | ||
308 | cpu_death = CPU_DEATH_HALT; | ||
309 | break; | ||
310 | } | ||
311 | } | ||
312 | |||
313 | /* This CPU has chosen its way out */ | ||
314 | complete(&cpu_death_chosen); | ||
315 | |||
316 | if (cpu_death == CPU_DEATH_HALT) { | ||
317 | /* Halt this TC */ | ||
318 | write_c0_tchalt(TCHALT_H); | ||
319 | instruction_hazard(); | ||
320 | } else { | ||
321 | /* Power down the core */ | ||
322 | cps_pm_enter_state(CPS_PM_POWER_GATED); | ||
323 | } | ||
324 | |||
325 | /* This should never be reached */ | ||
326 | panic("Failed to offline CPU %u", cpu); | ||
327 | } | ||
328 | |||
329 | static void wait_for_sibling_halt(void *ptr_cpu) | ||
330 | { | ||
331 | unsigned cpu = (unsigned)ptr_cpu; | ||
332 | unsigned vpe_id = cpu_data[cpu].vpe_id; | ||
333 | unsigned halted; | ||
334 | unsigned long flags; | ||
335 | |||
336 | do { | ||
337 | local_irq_save(flags); | ||
338 | settc(vpe_id); | ||
339 | halted = read_tc_c0_tchalt(); | ||
340 | local_irq_restore(flags); | ||
341 | } while (!(halted & TCHALT_H)); | ||
342 | } | ||
343 | |||
344 | static void cps_cpu_die(unsigned int cpu) | ||
345 | { | ||
346 | unsigned core = cpu_data[cpu].core; | ||
347 | unsigned stat; | ||
348 | int err; | ||
349 | |||
350 | /* Wait for the cpu to choose its way out */ | ||
351 | if (!wait_for_completion_timeout(&cpu_death_chosen, | ||
352 | msecs_to_jiffies(5000))) { | ||
353 | pr_err("CPU%u: didn't offline\n", cpu); | ||
354 | return; | ||
355 | } | ||
356 | |||
357 | /* | ||
358 | * Now wait for the CPU to actually offline. Without doing this that | ||
359 | * offlining may race with one or more of: | ||
360 | * | ||
361 | * - Onlining the CPU again. | ||
362 | * - Powering down the core if another VPE within it is offlined. | ||
363 | * - A sibling VPE entering a non-coherent state. | ||
364 | * | ||
365 | * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing | ||
366 | * with which we could race, so do nothing. | ||
367 | */ | ||
368 | if (cpu_death == CPU_DEATH_POWER) { | ||
369 | /* | ||
370 | * Wait for the core to enter a powered down or clock gated | ||
371 | * state, the latter happening when a JTAG probe is connected | ||
372 | * in which case the CPC will refuse to power down the core. | ||
373 | */ | ||
374 | do { | ||
375 | mips_cpc_lock_other(core); | ||
376 | stat = read_cpc_co_stat_conf(); | ||
377 | stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK; | ||
378 | mips_cpc_unlock_other(); | ||
379 | } while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 && | ||
380 | stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 && | ||
381 | stat != CPC_Cx_STAT_CONF_SEQSTATE_U2); | ||
382 | |||
383 | /* Indicate the core is powered off */ | ||
384 | bitmap_clear(core_power, core, 1); | ||
385 | } else if (cpu_has_mipsmt) { | ||
386 | /* | ||
387 | * Have a CPU with access to the offlined CPUs registers wait | ||
388 | * for its TC to halt. | ||
389 | */ | ||
390 | err = smp_call_function_single(cpu_death_sibling, | ||
391 | wait_for_sibling_halt, | ||
392 | (void *)cpu, 1); | ||
393 | if (err) | ||
394 | panic("Failed to call remote sibling CPU\n"); | ||
395 | } | ||
396 | } | ||
397 | |||
398 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
399 | |||
253 | static struct plat_smp_ops cps_smp_ops = { | 400 | static struct plat_smp_ops cps_smp_ops = { |
254 | .smp_setup = cps_smp_setup, | 401 | .smp_setup = cps_smp_setup, |
255 | .prepare_cpus = cps_prepare_cpus, | 402 | .prepare_cpus = cps_prepare_cpus, |
@@ -259,6 +406,10 @@ static struct plat_smp_ops cps_smp_ops = { | |||
259 | .send_ipi_single = gic_send_ipi_single, | 406 | .send_ipi_single = gic_send_ipi_single, |
260 | .send_ipi_mask = gic_send_ipi_mask, | 407 | .send_ipi_mask = gic_send_ipi_mask, |
261 | .cpus_done = cps_cpus_done, | 408 | .cpus_done = cps_cpus_done, |
409 | #ifdef CONFIG_HOTPLUG_CPU | ||
410 | .cpu_disable = cps_cpu_disable, | ||
411 | .cpu_die = cps_cpu_die, | ||
412 | #endif | ||
262 | }; | 413 | }; |
263 | 414 | ||
264 | bool mips_cps_smp_in_use(void) | 415 | bool mips_cps_smp_in_use(void) |