diff options
| -rw-r--r-- | arch/arm/mach-vexpress/spc.c | 40 | ||||
| -rw-r--r-- | arch/arm/mach-vexpress/spc.h | 1 | ||||
| -rw-r--r-- | arch/arm/mach-vexpress/tc2_pm.c | 66 |
3 files changed, 102 insertions, 5 deletions
diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c index 033d34dcbd3f..c26ef5b92ca7 100644 --- a/arch/arm/mach-vexpress/spc.c +++ b/arch/arm/mach-vexpress/spc.c | |||
| @@ -53,6 +53,11 @@ | |||
| 53 | #define A15_BX_ADDR0 0x68 | 53 | #define A15_BX_ADDR0 0x68 |
| 54 | #define A7_BX_ADDR0 0x78 | 54 | #define A7_BX_ADDR0 0x78 |
| 55 | 55 | ||
| 56 | /* SPC CPU/cluster reset statue */ | ||
| 57 | #define STANDBYWFI_STAT 0x3c | ||
| 58 | #define STANDBYWFI_STAT_A15_CPU_MASK(cpu) (1 << (cpu)) | ||
| 59 | #define STANDBYWFI_STAT_A7_CPU_MASK(cpu) (1 << (3 + (cpu))) | ||
| 60 | |||
| 56 | /* SPC system config interface registers */ | 61 | /* SPC system config interface registers */ |
| 57 | #define SYSCFG_WDATA 0x70 | 62 | #define SYSCFG_WDATA 0x70 |
| 58 | #define SYSCFG_RDATA 0x74 | 63 | #define SYSCFG_RDATA 0x74 |
| @@ -213,6 +218,41 @@ void ve_spc_powerdown(u32 cluster, bool enable) | |||
| 213 | writel_relaxed(enable, info->baseaddr + pwdrn_reg); | 218 | writel_relaxed(enable, info->baseaddr + pwdrn_reg); |
| 214 | } | 219 | } |
| 215 | 220 | ||
| 221 | static u32 standbywfi_cpu_mask(u32 cpu, u32 cluster) | ||
| 222 | { | ||
| 223 | return cluster_is_a15(cluster) ? | ||
| 224 | STANDBYWFI_STAT_A15_CPU_MASK(cpu) | ||
| 225 | : STANDBYWFI_STAT_A7_CPU_MASK(cpu); | ||
| 226 | } | ||
| 227 | |||
| 228 | /** | ||
| 229 | * ve_spc_cpu_in_wfi(u32 cpu, u32 cluster) | ||
| 230 | * | ||
| 231 | * @cpu: mpidr[7:0] bitfield describing CPU affinity level within cluster | ||
| 232 | * @cluster: mpidr[15:8] bitfield describing cluster affinity level | ||
| 233 | * | ||
| 234 | * @return: non-zero if and only if the specified CPU is in WFI | ||
| 235 | * | ||
| 236 | * Take care when interpreting the result of this function: a CPU might | ||
| 237 | * be in WFI temporarily due to idle, and is not necessarily safely | ||
| 238 | * parked. | ||
| 239 | */ | ||
| 240 | int ve_spc_cpu_in_wfi(u32 cpu, u32 cluster) | ||
| 241 | { | ||
| 242 | int ret; | ||
| 243 | u32 mask = standbywfi_cpu_mask(cpu, cluster); | ||
| 244 | |||
| 245 | if (cluster >= MAX_CLUSTERS) | ||
| 246 | return 1; | ||
| 247 | |||
| 248 | ret = readl_relaxed(info->baseaddr + STANDBYWFI_STAT); | ||
| 249 | |||
| 250 | pr_debug("%s: PCFGREG[0x%X] = 0x%08X, mask = 0x%X\n", | ||
| 251 | __func__, STANDBYWFI_STAT, ret, mask); | ||
| 252 | |||
| 253 | return ret & mask; | ||
| 254 | } | ||
| 255 | |||
| 216 | static int ve_spc_get_performance(int cluster, u32 *freq) | 256 | static int ve_spc_get_performance(int cluster, u32 *freq) |
| 217 | { | 257 | { |
| 218 | struct ve_spc_opp *opps = info->opps[cluster]; | 258 | struct ve_spc_opp *opps = info->opps[cluster]; |
diff --git a/arch/arm/mach-vexpress/spc.h b/arch/arm/mach-vexpress/spc.h index dbd44c3720f9..793d065243b9 100644 --- a/arch/arm/mach-vexpress/spc.h +++ b/arch/arm/mach-vexpress/spc.h | |||
| @@ -20,5 +20,6 @@ void ve_spc_global_wakeup_irq(bool set); | |||
| 20 | void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set); | 20 | void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set); |
| 21 | void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr); | 21 | void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr); |
| 22 | void ve_spc_powerdown(u32 cluster, bool enable); | 22 | void ve_spc_powerdown(u32 cluster, bool enable); |
| 23 | int ve_spc_cpu_in_wfi(u32 cpu, u32 cluster); | ||
| 23 | 24 | ||
| 24 | #endif | 25 | #endif |
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c index 05a364c5077a..29e7785a54bc 100644 --- a/arch/arm/mach-vexpress/tc2_pm.c +++ b/arch/arm/mach-vexpress/tc2_pm.c | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | * published by the Free Software Foundation. | 12 | * published by the Free Software Foundation. |
| 13 | */ | 13 | */ |
| 14 | 14 | ||
| 15 | #include <linux/delay.h> | ||
| 15 | #include <linux/init.h> | 16 | #include <linux/init.h> |
| 16 | #include <linux/io.h> | 17 | #include <linux/io.h> |
| 17 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
| @@ -32,11 +33,17 @@ | |||
| 32 | #include "spc.h" | 33 | #include "spc.h" |
| 33 | 34 | ||
| 34 | /* SCC conf registers */ | 35 | /* SCC conf registers */ |
| 36 | #define RESET_CTRL 0x018 | ||
| 37 | #define RESET_A15_NCORERESET(cpu) (1 << (2 + (cpu))) | ||
| 38 | #define RESET_A7_NCORERESET(cpu) (1 << (16 + (cpu))) | ||
| 39 | |||
| 35 | #define A15_CONF 0x400 | 40 | #define A15_CONF 0x400 |
| 36 | #define A7_CONF 0x500 | 41 | #define A7_CONF 0x500 |
| 37 | #define SYS_INFO 0x700 | 42 | #define SYS_INFO 0x700 |
| 38 | #define SPC_BASE 0xb00 | 43 | #define SPC_BASE 0xb00 |
| 39 | 44 | ||
| 45 | static void __iomem *scc; | ||
| 46 | |||
| 40 | /* | 47 | /* |
| 41 | * We can't use regular spinlocks. In the switcher case, it is possible | 48 | * We can't use regular spinlocks. In the switcher case, it is possible |
| 42 | * for an outbound CPU to call power_down() after its inbound counterpart | 49 | * for an outbound CPU to call power_down() after its inbound counterpart |
| @@ -190,6 +197,55 @@ static void tc2_pm_power_down(void) | |||
| 190 | tc2_pm_down(0); | 197 | tc2_pm_down(0); |
| 191 | } | 198 | } |
| 192 | 199 | ||
| 200 | static int tc2_core_in_reset(unsigned int cpu, unsigned int cluster) | ||
| 201 | { | ||
| 202 | u32 mask = cluster ? | ||
| 203 | RESET_A7_NCORERESET(cpu) | ||
| 204 | : RESET_A15_NCORERESET(cpu); | ||
| 205 | |||
| 206 | return !(readl_relaxed(scc + RESET_CTRL) & mask); | ||
| 207 | } | ||
| 208 | |||
| 209 | #define POLL_MSEC 10 | ||
| 210 | #define TIMEOUT_MSEC 1000 | ||
| 211 | |||
| 212 | static int tc2_pm_power_down_finish(unsigned int cpu, unsigned int cluster) | ||
| 213 | { | ||
| 214 | unsigned tries; | ||
| 215 | |||
| 216 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); | ||
| 217 | BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); | ||
| 218 | |||
| 219 | for (tries = 0; tries < TIMEOUT_MSEC / POLL_MSEC; ++tries) { | ||
| 220 | /* | ||
| 221 | * Only examine the hardware state if the target CPU has | ||
| 222 | * caught up at least as far as tc2_pm_down(): | ||
| 223 | */ | ||
| 224 | if (ACCESS_ONCE(tc2_pm_use_count[cpu][cluster]) == 0) { | ||
| 225 | pr_debug("%s(cpu=%u, cluster=%u): RESET_CTRL = 0x%08X\n", | ||
| 226 | __func__, cpu, cluster, | ||
| 227 | readl_relaxed(scc + RESET_CTRL)); | ||
| 228 | |||
| 229 | /* | ||
| 230 | * We need the CPU to reach WFI, but the power | ||
| 231 | * controller may put the cluster in reset and | ||
| 232 | * power it off as soon as that happens, before | ||
| 233 | * we have a chance to see STANDBYWFI. | ||
| 234 | * | ||
| 235 | * So we need to check for both conditions: | ||
| 236 | */ | ||
| 237 | if (tc2_core_in_reset(cpu, cluster) || | ||
| 238 | ve_spc_cpu_in_wfi(cpu, cluster)) | ||
| 239 | return 0; /* success: the CPU is halted */ | ||
| 240 | } | ||
| 241 | |||
| 242 | /* Otherwise, wait and retry: */ | ||
| 243 | msleep(POLL_MSEC); | ||
| 244 | } | ||
| 245 | |||
| 246 | return -ETIMEDOUT; /* timeout */ | ||
| 247 | } | ||
| 248 | |||
| 193 | static void tc2_pm_suspend(u64 residency) | 249 | static void tc2_pm_suspend(u64 residency) |
| 194 | { | 250 | { |
| 195 | unsigned int mpidr, cpu, cluster; | 251 | unsigned int mpidr, cpu, cluster; |
| @@ -232,10 +288,11 @@ static void tc2_pm_powered_up(void) | |||
| 232 | } | 288 | } |
| 233 | 289 | ||
| 234 | static const struct mcpm_platform_ops tc2_pm_power_ops = { | 290 | static const struct mcpm_platform_ops tc2_pm_power_ops = { |
| 235 | .power_up = tc2_pm_power_up, | 291 | .power_up = tc2_pm_power_up, |
| 236 | .power_down = tc2_pm_power_down, | 292 | .power_down = tc2_pm_power_down, |
| 237 | .suspend = tc2_pm_suspend, | 293 | .power_down_finish = tc2_pm_power_down_finish, |
| 238 | .powered_up = tc2_pm_powered_up, | 294 | .suspend = tc2_pm_suspend, |
| 295 | .powered_up = tc2_pm_powered_up, | ||
| 239 | }; | 296 | }; |
| 240 | 297 | ||
| 241 | static bool __init tc2_pm_usage_count_init(void) | 298 | static bool __init tc2_pm_usage_count_init(void) |
| @@ -269,7 +326,6 @@ static void __naked tc2_pm_power_up_setup(unsigned int affinity_level) | |||
| 269 | static int __init tc2_pm_init(void) | 326 | static int __init tc2_pm_init(void) |
| 270 | { | 327 | { |
| 271 | int ret, irq; | 328 | int ret, irq; |
| 272 | void __iomem *scc; | ||
| 273 | u32 a15_cluster_id, a7_cluster_id, sys_info; | 329 | u32 a15_cluster_id, a7_cluster_id, sys_info; |
| 274 | struct device_node *np; | 330 | struct device_node *np; |
| 275 | 331 | ||
