aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChen-Yu Tsai <wens@csie.org>2018-01-17 03:46:51 -0500
committerChen-Yu Tsai <wens@csie.org>2018-02-19 22:12:38 -0500
commit7cbea6327e76f2a7d77d02065a191bb29865a4a9 (patch)
tree04163eda35149dd0a917ad1d540746f6f862db57
parent745373e3d6ee3e398494d3aebe923b57a90ebadf (diff)
ARM: sun9i: smp: Support CPU/cluster power down and hotplugging for cpu1~7
This patch adds common code used to power down all cores and clusters. The code was previously based on the MCPM framework. It has now been adapted to hook into struct smp_operations directly, but the code structure still shows signs of prior work. The primary core (cpu0) requires setting flags to have the BROM bounce execution to the SMP software entry code. This is done in a subsequent patch to keep the changes cleanly separated. By default the ARM SMP code blocks cpu0 from being turned off, so splitting this out is safe. Signed-off-by: Chen-Yu Tsai <wens@csie.org>
-rw-r--r--arch/arm/mach-sunxi/mc_smp.c190
1 files changed, 189 insertions, 1 deletions
diff --git a/arch/arm/mach-sunxi/mc_smp.c b/arch/arm/mach-sunxi/mc_smp.c
index 92e3d7ba496a..fc0acfa07f74 100644
--- a/arch/arm/mach-sunxi/mc_smp.c
+++ b/arch/arm/mach-sunxi/mc_smp.c
@@ -15,6 +15,8 @@
15#include <linux/cpu_pm.h> 15#include <linux/cpu_pm.h>
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/iopoll.h>
19#include <linux/irqchip/arm-gic.h>
18#include <linux/of.h> 20#include <linux/of.h>
19#include <linux/of_address.h> 21#include <linux/of_address.h>
20#include <linux/of_device.h> 22#include <linux/of_device.h>
@@ -30,6 +32,9 @@
30#define SUNXI_CPUS_PER_CLUSTER 4 32#define SUNXI_CPUS_PER_CLUSTER 4
31#define SUNXI_NR_CLUSTERS 2 33#define SUNXI_NR_CLUSTERS 2
32 34
35#define POLL_USEC 100
36#define TIMEOUT_USEC 100000
37
33#define CPUCFG_CX_CTRL_REG0(c) (0x10 * (c)) 38#define CPUCFG_CX_CTRL_REG0(c) (0x10 * (c))
34#define CPUCFG_CX_CTRL_REG0_L1_RST_DISABLE(n) BIT(n) 39#define CPUCFG_CX_CTRL_REG0_L1_RST_DISABLE(n) BIT(n)
35#define CPUCFG_CX_CTRL_REG0_L1_RST_DISABLE_ALL 0xf 40#define CPUCFG_CX_CTRL_REG0_L1_RST_DISABLE_ALL 0xf
@@ -37,6 +42,9 @@
37#define CPUCFG_CX_CTRL_REG0_L2_RST_DISABLE_A15 BIT(0) 42#define CPUCFG_CX_CTRL_REG0_L2_RST_DISABLE_A15 BIT(0)
38#define CPUCFG_CX_CTRL_REG1(c) (0x10 * (c) + 0x4) 43#define CPUCFG_CX_CTRL_REG1(c) (0x10 * (c) + 0x4)
39#define CPUCFG_CX_CTRL_REG1_ACINACTM BIT(0) 44#define CPUCFG_CX_CTRL_REG1_ACINACTM BIT(0)
45#define CPUCFG_CX_STATUS(c) (0x30 + 0x4 * (c))
46#define CPUCFG_CX_STATUS_STANDBYWFI(n) BIT(16 + (n))
47#define CPUCFG_CX_STATUS_STANDBYWFIL2 BIT(0)
40#define CPUCFG_CX_RST_CTRL(c) (0x80 + 0x4 * (c)) 48#define CPUCFG_CX_RST_CTRL(c) (0x80 + 0x4 * (c))
41#define CPUCFG_CX_RST_CTRL_DBG_SOC_RST BIT(24) 49#define CPUCFG_CX_RST_CTRL_DBG_SOC_RST BIT(24)
42#define CPUCFG_CX_RST_CTRL_ETM_RST(n) BIT(20 + (n)) 50#define CPUCFG_CX_RST_CTRL_ETM_RST(n) BIT(20 + (n))
@@ -121,7 +129,7 @@ static int sunxi_cpu_powerup(unsigned int cpu, unsigned int cluster)
121{ 129{
122 u32 reg; 130 u32 reg;
123 131
124 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); 132 pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu);
125 if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS) 133 if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS)
126 return -EINVAL; 134 return -EINVAL;
127 135
@@ -390,8 +398,188 @@ out:
390 return 0; 398 return 0;
391} 399}
392 400
401#ifdef CONFIG_HOTPLUG_CPU
402static void sunxi_cluster_cache_disable(void)
403{
404 unsigned int cluster = MPIDR_AFFINITY_LEVEL(read_cpuid_mpidr(), 1);
405 u32 reg;
406
407 pr_debug("%s: cluster %u\n", __func__, cluster);
408
409 sunxi_cluster_cache_disable_without_axi();
410
411 /* last man standing, assert ACINACTM */
412 reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster));
413 reg |= CPUCFG_CX_CTRL_REG1_ACINACTM;
414 writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster));
415}
416
417static void sunxi_mc_smp_cpu_die(unsigned int l_cpu)
418{
419 unsigned int mpidr, cpu, cluster;
420 bool last_man;
421
422 mpidr = cpu_logical_map(l_cpu);
423 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
424 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
425 pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu);
426
427 spin_lock(&boot_lock);
428 sunxi_mc_smp_cpu_table[cluster][cpu]--;
429 if (sunxi_mc_smp_cpu_table[cluster][cpu] == 1) {
430 /* A power_up request went ahead of us. */
431 pr_debug("%s: aborting due to a power up request\n",
432 __func__);
433 spin_unlock(&boot_lock);
434 return;
435 } else if (sunxi_mc_smp_cpu_table[cluster][cpu] > 1) {
436 pr_err("Cluster %d CPU%d boots multiple times\n",
437 cluster, cpu);
438 BUG();
439 }
440
441 last_man = sunxi_mc_smp_cluster_is_down(cluster);
442 spin_unlock(&boot_lock);
443
444 gic_cpu_if_down(0);
445 if (last_man)
446 sunxi_cluster_cache_disable();
447 else
448 v7_exit_coherency_flush(louis);
449
450 for (;;)
451 wfi();
452}
453
454static int sunxi_cpu_powerdown(unsigned int cpu, unsigned int cluster)
455{
456 u32 reg;
457
458 pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu);
459 if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS)
460 return -EINVAL;
461
462 /* gate processor power */
463 reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster));
464 reg |= PRCM_PWROFF_GATING_REG_CORE(cpu);
465 writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster));
466 udelay(20);
467
468 /* close power switch */
469 sunxi_cpu_power_switch_set(cpu, cluster, false);
470
471 return 0;
472}
473
474static int sunxi_cluster_powerdown(unsigned int cluster)
475{
476 u32 reg;
477
478 pr_debug("%s: cluster %u\n", __func__, cluster);
479 if (cluster >= SUNXI_NR_CLUSTERS)
480 return -EINVAL;
481
482 /* assert cluster resets or system will hang */
483 pr_debug("%s: assert cluster reset\n", __func__);
484 reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
485 reg &= ~CPUCFG_CX_RST_CTRL_DBG_SOC_RST;
486 reg &= ~CPUCFG_CX_RST_CTRL_H_RST;
487 reg &= ~CPUCFG_CX_RST_CTRL_L2_RST;
488 writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
489
490 /* gate cluster power */
491 pr_debug("%s: gate cluster power\n", __func__);
492 reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster));
493 reg |= PRCM_PWROFF_GATING_REG_CLUSTER;
494 writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster));
495 udelay(20);
496
497 return 0;
498}
499
500static int sunxi_mc_smp_cpu_kill(unsigned int l_cpu)
501{
502 unsigned int mpidr, cpu, cluster;
503 unsigned int tries, count;
504 int ret = 0;
505 u32 reg;
506
507 mpidr = cpu_logical_map(l_cpu);
508 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
509 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
510
511 /* This should never happen */
512 if (WARN_ON(cluster >= SUNXI_NR_CLUSTERS ||
513 cpu >= SUNXI_CPUS_PER_CLUSTER))
514 return 0;
515
516 /* wait for CPU core to die and enter WFI */
517 count = TIMEOUT_USEC / POLL_USEC;
518 spin_lock_irq(&boot_lock);
519 for (tries = 0; tries < count; tries++) {
520 spin_unlock_irq(&boot_lock);
521 usleep_range(POLL_USEC / 2, POLL_USEC);
522 spin_lock_irq(&boot_lock);
523
524 /*
525 * If the user turns off a bunch of cores at the same
526 * time, the kernel might call cpu_kill before some of
527 * them are ready. This is because boot_lock serializes
528 * both cpu_die and cpu_kill callbacks. Either one could
529 * run first. We should wait for cpu_die to complete.
530 */
531 if (sunxi_mc_smp_cpu_table[cluster][cpu])
532 continue;
533
534 reg = readl(cpucfg_base + CPUCFG_CX_STATUS(cluster));
535 if (reg & CPUCFG_CX_STATUS_STANDBYWFI(cpu))
536 break;
537 }
538
539 if (tries >= count) {
540 ret = ETIMEDOUT;
541 goto out;
542 }
543
544 /* power down CPU core */
545 sunxi_cpu_powerdown(cpu, cluster);
546
547 if (!sunxi_mc_smp_cluster_is_down(cluster))
548 goto out;
549
550 /* wait for cluster L2 WFI */
551 ret = readl_poll_timeout(cpucfg_base + CPUCFG_CX_STATUS(cluster), reg,
552 reg & CPUCFG_CX_STATUS_STANDBYWFIL2,
553 POLL_USEC, TIMEOUT_USEC);
554 if (ret) {
555 /*
556 * Ignore timeout on the cluster. Leaving the cluster on
557 * will not affect system execution, just use a bit more
558 * power. But returning an error here will only confuse
559 * the user as the CPU has already been shutdown.
560 */
561 ret = 0;
562 goto out;
563 }
564
565 /* Power down cluster */
566 sunxi_cluster_powerdown(cluster);
567
568out:
569 spin_unlock_irq(&boot_lock);
570 pr_debug("%s: cluster %u cpu %u powerdown: %d\n",
571 __func__, cluster, cpu, ret);
572 return !ret;
573}
574
575#endif
576
393static const struct smp_operations sunxi_mc_smp_smp_ops __initconst = { 577static const struct smp_operations sunxi_mc_smp_smp_ops __initconst = {
394 .smp_boot_secondary = sunxi_mc_smp_boot_secondary, 578 .smp_boot_secondary = sunxi_mc_smp_boot_secondary,
579#ifdef CONFIG_HOTPLUG_CPU
580 .cpu_die = sunxi_mc_smp_cpu_die,
581 .cpu_kill = sunxi_mc_smp_cpu_kill,
582#endif
395}; 583};
396 584
397static bool __init sunxi_mc_smp_cpu_table_init(void) 585static bool __init sunxi_mc_smp_cpu_table_init(void)