aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2013-08-14 01:07:52 -0400
committerOlof Johansson <olof@lixom.net>2013-08-14 01:07:52 -0400
commite0bb3964c95b6068bd68ee332b20fc24a76ad2aa (patch)
treee78ab25deb5674e236cc44671676cac199590d76
parent3b2f64d00c46e1e4e9bd0bb9bb12619adac27a4b (diff)
parente607b0f985f5277324e3fdce5bb462ef4eac4bc9 (diff)
Merge tag 'tc2-pm' of git://git.linaro.org/people/pawelmoll/linux into next/soc
From Pawel Moll and Nicolas Pitre: - Fixes to the existing Vexpress DCSCB backend. - Lorenzo's minimal SPC driver required by the TC2 MCPM backend. - The MCPM backend enabling SMP secondary boot and CPU hotplug on the VExpress TC2 big.LITTLE platform. - MCPM suspend method to the TC2 backend allowing basic CPU idle/suspend. The cpuidle driver that hooks into this will be submitted separately. * tag 'tc2-pm' of git://git.linaro.org/people/pawelmoll/linux: ARM: vexpress/TC2: implement PM suspend method ARM: vexpress/TC2: basic PM support ARM: vexpress: Add SCC to V2P-CA15_A7's device tree ARM: vexpress/TC2: add Serial Power Controller (SPC) support ARM: vexpress/dcscb: fix cache disabling sequences Signed-off-by: Olof Johansson <olof@lixom.net>
-rw-r--r--Documentation/devicetree/bindings/arm/vexpress-scc.txt33
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts6
-rw-r--r--arch/arm/mach-vexpress/Kconfig8
-rw-r--r--arch/arm/mach-vexpress/Makefile1
-rw-r--r--arch/arm/mach-vexpress/dcscb.c58
-rw-r--r--arch/arm/mach-vexpress/spc.c180
-rw-r--r--arch/arm/mach-vexpress/spc.h24
-rw-r--r--arch/arm/mach-vexpress/tc2_pm.c344
8 files changed, 633 insertions, 21 deletions
diff --git a/Documentation/devicetree/bindings/arm/vexpress-scc.txt b/Documentation/devicetree/bindings/arm/vexpress-scc.txt
new file mode 100644
index 000000000000..ae5043e42e5d
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/vexpress-scc.txt
@@ -0,0 +1,33 @@
1ARM Versatile Express Serial Configuration Controller
2-----------------------------------------------------
3
4Test chips for ARM Versatile Express platform implement SCC (Serial
5Configuration Controller) interface, used to set initial conditions
6for the test chip.
7
8In some cases its registers are also mapped in normal address space
9and can be used to obtain runtime information about the chip internals
10(like silicon temperature sensors) and as interface to other subsystems
11like platform configuration control and power management.
12
13Required properties:
14
15- compatible value: "arm,vexpress-scc,<model>", "arm,vexpress-scc";
16 where <model> is the full tile model name (as used
17 in the tile's Technical Reference Manual),
18 eg. for Coretile Express A15x2 A7x3 (V2P-CA15_A7):
19 compatible = "arm,vexpress-scc,v2p-ca15_a7", "arm,vexpress-scc";
20
21Optional properties:
22
23- reg: when the SCC is memory mapped, physical address and size of the
24 registers window
25- interrupts: when the SCC can generate a system-level interrupt
26
27Example:
28
29 scc@7fff0000 {
30 compatible = "arm,vexpress-scc,v2p-ca15_a7", "arm,vexpress-scc";
31 reg = <0 0x7fff0000 0 0x1000>;
32 interrupts = <0 95 4>;
33 };
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
index d2803be4e1a8..759b0cd20013 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
@@ -125,6 +125,12 @@
125 clock-names = "apb_pclk"; 125 clock-names = "apb_pclk";
126 }; 126 };
127 127
128 scc@7fff0000 {
129 compatible = "arm,vexpress-scc,v2p-ca15_a7", "arm,vexpress-scc";
130 reg = <0 0x7fff0000 0 0x1000>;
131 interrupts = <0 95 4>;
132 };
133
128 timer { 134 timer {
129 compatible = "arm,armv7-timer"; 135 compatible = "arm,armv7-timer";
130 interrupts = <1 13 0xf08>, 136 interrupts = <1 13 0xf08>,
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
index b8bbabec6310..c700e623f9d8 100644
--- a/arch/arm/mach-vexpress/Kconfig
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -66,4 +66,12 @@ config ARCH_VEXPRESS_DCSCB
66 This is needed to provide CPU and cluster power management 66 This is needed to provide CPU and cluster power management
67 on RTSM implementing big.LITTLE. 67 on RTSM implementing big.LITTLE.
68 68
69config ARCH_VEXPRESS_TC2_PM
70 bool "Versatile Express TC2 power management"
71 depends on MCPM
72 select ARM_CCI
73 help
74 Support for CPU and cluster power management on Versatile Express
75 with a TC2 (A15x2 A7x3) big.LITTLE core tile.
76
69endmenu 77endmenu
diff --git a/arch/arm/mach-vexpress/Makefile b/arch/arm/mach-vexpress/Makefile
index 48ba89a8149f..36ea8247123a 100644
--- a/arch/arm/mach-vexpress/Makefile
+++ b/arch/arm/mach-vexpress/Makefile
@@ -7,5 +7,6 @@ ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \
7obj-y := v2m.o 7obj-y := v2m.o
8obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o 8obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o
9obj-$(CONFIG_ARCH_VEXPRESS_DCSCB) += dcscb.o dcscb_setup.o 9obj-$(CONFIG_ARCH_VEXPRESS_DCSCB) += dcscb.o dcscb_setup.o
10obj-$(CONFIG_ARCH_VEXPRESS_TC2_PM) += tc2_pm.o spc.o
10obj-$(CONFIG_SMP) += platsmp.o 11obj-$(CONFIG_SMP) += platsmp.o
11obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o 12obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
diff --git a/arch/arm/mach-vexpress/dcscb.c b/arch/arm/mach-vexpress/dcscb.c
index 16d57a8a9d5a..85fffa702f5b 100644
--- a/arch/arm/mach-vexpress/dcscb.c
+++ b/arch/arm/mach-vexpress/dcscb.c
@@ -136,14 +136,29 @@ static void dcscb_power_down(void)
136 /* 136 /*
137 * Flush all cache levels for this cluster. 137 * Flush all cache levels for this cluster.
138 * 138 *
139 * A15/A7 can hit in the cache with SCTLR.C=0, so we don't need 139 * To do so we do:
140 * a preliminary flush here for those CPUs. At least, that's 140 * - Clear the SCTLR.C bit to prevent further cache allocations
141 * the theory -- without the extra flush, Linux explodes on 141 * - Flush the whole cache
142 * RTSM (to be investigated). 142 * - Clear the ACTLR "SMP" bit to disable local coherency
143 *
144 * Let's do it in the safest possible way i.e. with
145 * no memory access within the following sequence
146 * including to the stack.
143 */ 147 */
144 flush_cache_all(); 148 asm volatile(
145 set_cr(get_cr() & ~CR_C); 149 "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
146 flush_cache_all(); 150 "bic r0, r0, #"__stringify(CR_C)" \n\t"
151 "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
152 "isb \n\t"
153 "bl v7_flush_dcache_all \n\t"
154 "clrex \n\t"
155 "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
156 "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
157 "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
158 "isb \n\t"
159 "dsb "
160 : : : "r0","r1","r2","r3","r4","r5","r6","r7",
161 "r9","r10","r11","lr","memory");
147 162
148 /* 163 /*
149 * This is a harmless no-op. On platforms with a real 164 * This is a harmless no-op. On platforms with a real
@@ -152,9 +167,6 @@ static void dcscb_power_down(void)
152 */ 167 */
153 outer_flush_all(); 168 outer_flush_all();
154 169
155 /* Disable local coherency by clearing the ACTLR "SMP" bit: */
156 set_auxcr(get_auxcr() & ~(1 << 6));
157
158 /* 170 /*
159 * Disable cluster-level coherency by masking 171 * Disable cluster-level coherency by masking
160 * incoming snoops and DVM messages: 172 * incoming snoops and DVM messages:
@@ -167,18 +179,22 @@ static void dcscb_power_down(void)
167 179
168 /* 180 /*
169 * Flush the local CPU cache. 181 * Flush the local CPU cache.
170 * 182 * Let's do it in the safest possible way as above.
171 * A15/A7 can hit in the cache with SCTLR.C=0, so we don't need
172 * a preliminary flush here for those CPUs. At least, that's
173 * the theory -- without the extra flush, Linux explodes on
174 * RTSM (to be investigated).
175 */ 183 */
176 flush_cache_louis(); 184 asm volatile(
177 set_cr(get_cr() & ~CR_C); 185 "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
178 flush_cache_louis(); 186 "bic r0, r0, #"__stringify(CR_C)" \n\t"
179 187 "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
180 /* Disable local coherency by clearing the ACTLR "SMP" bit: */ 188 "isb \n\t"
181 set_auxcr(get_auxcr() & ~(1 << 6)); 189 "bl v7_flush_dcache_louis \n\t"
190 "clrex \n\t"
191 "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
192 "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
193 "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
194 "isb \n\t"
195 "dsb "
196 : : : "r0","r1","r2","r3","r4","r5","r6","r7",
197 "r9","r10","r11","lr","memory");
182 } 198 }
183 199
184 __mcpm_cpu_down(cpu, cluster); 200 __mcpm_cpu_down(cpu, cluster);
diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c
new file mode 100644
index 000000000000..eefb029197ca
--- /dev/null
+++ b/arch/arm/mach-vexpress/spc.c
@@ -0,0 +1,180 @@
1/*
2 * Versatile Express Serial Power Controller (SPC) support
3 *
4 * Copyright (C) 2013 ARM Ltd.
5 *
6 * Authors: Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
7 * Achin Gupta <achin.gupta@arm.com>
8 * Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
15 * kind, whether express or implied; without even the implied warranty
16 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#include <linux/err.h>
21#include <linux/io.h>
22#include <linux/slab.h>
23
24#include <asm/cacheflush.h>
25
26#define SPCLOG "vexpress-spc: "
27
28/* SPC wake-up IRQs status and mask */
29#define WAKE_INT_MASK 0x24
30#define WAKE_INT_RAW 0x28
31#define WAKE_INT_STAT 0x2c
32/* SPC power down registers */
33#define A15_PWRDN_EN 0x30
34#define A7_PWRDN_EN 0x34
35/* SPC per-CPU mailboxes */
36#define A15_BX_ADDR0 0x68
37#define A7_BX_ADDR0 0x78
38
39/* wake-up interrupt masks */
40#define GBL_WAKEUP_INT_MSK (0x3 << 10)
41
42/* TC2 static dual-cluster configuration */
43#define MAX_CLUSTERS 2
44
45struct ve_spc_drvdata {
46 void __iomem *baseaddr;
47 /*
48 * A15s cluster identifier
49 * It corresponds to A15 processors MPIDR[15:8] bitfield
50 */
51 u32 a15_clusid;
52};
53
54static struct ve_spc_drvdata *info;
55
56static inline bool cluster_is_a15(u32 cluster)
57{
58 return cluster == info->a15_clusid;
59}
60
61/**
62 * ve_spc_global_wakeup_irq()
63 *
64 * Function to set/clear global wakeup IRQs. Not protected by locking since
65 * it might be used in code paths where normal cacheable locks are not
66 * working. Locking must be provided by the caller to ensure atomicity.
67 *
68 * @set: if true, global wake-up IRQs are set, if false they are cleared
69 */
70void ve_spc_global_wakeup_irq(bool set)
71{
72 u32 reg;
73
74 reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK);
75
76 if (set)
77 reg |= GBL_WAKEUP_INT_MSK;
78 else
79 reg &= ~GBL_WAKEUP_INT_MSK;
80
81 writel_relaxed(reg, info->baseaddr + WAKE_INT_MASK);
82}
83
84/**
85 * ve_spc_cpu_wakeup_irq()
86 *
87 * Function to set/clear per-CPU wake-up IRQs. Not protected by locking since
88 * it might be used in code paths where normal cacheable locks are not
89 * working. Locking must be provided by the caller to ensure atomicity.
90 *
91 * @cluster: mpidr[15:8] bitfield describing cluster affinity level
92 * @cpu: mpidr[7:0] bitfield describing cpu affinity level
93 * @set: if true, wake-up IRQs are set, if false they are cleared
94 */
95void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set)
96{
97 u32 mask, reg;
98
99 if (cluster >= MAX_CLUSTERS)
100 return;
101
102 mask = 1 << cpu;
103
104 if (!cluster_is_a15(cluster))
105 mask <<= 4;
106
107 reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK);
108
109 if (set)
110 reg |= mask;
111 else
112 reg &= ~mask;
113
114 writel_relaxed(reg, info->baseaddr + WAKE_INT_MASK);
115}
116
117/**
118 * ve_spc_set_resume_addr() - set the jump address used for warm boot
119 *
120 * @cluster: mpidr[15:8] bitfield describing cluster affinity level
121 * @cpu: mpidr[7:0] bitfield describing cpu affinity level
122 * @addr: physical resume address
123 */
124void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr)
125{
126 void __iomem *baseaddr;
127
128 if (cluster >= MAX_CLUSTERS)
129 return;
130
131 if (cluster_is_a15(cluster))
132 baseaddr = info->baseaddr + A15_BX_ADDR0 + (cpu << 2);
133 else
134 baseaddr = info->baseaddr + A7_BX_ADDR0 + (cpu << 2);
135
136 writel_relaxed(addr, baseaddr);
137}
138
139/**
140 * ve_spc_powerdown()
141 *
142 * Function to enable/disable cluster powerdown. Not protected by locking
143 * since it might be used in code paths where normal cacheable locks are not
144 * working. Locking must be provided by the caller to ensure atomicity.
145 *
146 * @cluster: mpidr[15:8] bitfield describing cluster affinity level
147 * @enable: if true enables powerdown, if false disables it
148 */
149void ve_spc_powerdown(u32 cluster, bool enable)
150{
151 u32 pwdrn_reg;
152
153 if (cluster >= MAX_CLUSTERS)
154 return;
155
156 pwdrn_reg = cluster_is_a15(cluster) ? A15_PWRDN_EN : A7_PWRDN_EN;
157 writel_relaxed(enable, info->baseaddr + pwdrn_reg);
158}
159
160int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid)
161{
162 info = kzalloc(sizeof(*info), GFP_KERNEL);
163 if (!info) {
164 pr_err(SPCLOG "unable to allocate mem\n");
165 return -ENOMEM;
166 }
167
168 info->baseaddr = baseaddr;
169 info->a15_clusid = a15_clusid;
170
171 /*
172 * Multi-cluster systems may need this data when non-coherent, during
173 * cluster power-up/power-down. Make sure driver info reaches main
174 * memory.
175 */
176 sync_cache_w(info);
177 sync_cache_w(&info);
178
179 return 0;
180}
diff --git a/arch/arm/mach-vexpress/spc.h b/arch/arm/mach-vexpress/spc.h
new file mode 100644
index 000000000000..5f7e4a446a17
--- /dev/null
+++ b/arch/arm/mach-vexpress/spc.h
@@ -0,0 +1,24 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright (C) 2012 ARM Limited
12 */
13
14
15#ifndef __SPC_H_
16#define __SPC_H_
17
18int __init ve_spc_init(void __iomem *base, u32 a15_clusid);
19void ve_spc_global_wakeup_irq(bool set);
20void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set);
21void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr);
22void ve_spc_powerdown(u32 cluster, bool enable);
23
24#endif
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c
new file mode 100644
index 000000000000..ddd97dd4e9b7
--- /dev/null
+++ b/arch/arm/mach-vexpress/tc2_pm.c
@@ -0,0 +1,344 @@
1/*
2 * arch/arm/mach-vexpress/tc2_pm.c - TC2 power management support
3 *
4 * Created by: Nicolas Pitre, October 2012
5 * Copyright: (C) 2012-2013 Linaro Limited
6 *
7 * Some portions of this file were originally written by Achin Gupta
8 * Copyright: (C) 2012 ARM Limited
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/init.h>
16#include <linux/io.h>
17#include <linux/kernel.h>
18#include <linux/of_address.h>
19#include <linux/spinlock.h>
20#include <linux/errno.h>
21
22#include <asm/mcpm.h>
23#include <asm/proc-fns.h>
24#include <asm/cacheflush.h>
25#include <asm/cputype.h>
26#include <asm/cp15.h>
27
28#include <linux/arm-cci.h>
29
30#include "spc.h"
31
32/* SCC conf registers */
33#define A15_CONF 0x400
34#define A7_CONF 0x500
35#define SYS_INFO 0x700
36#define SPC_BASE 0xb00
37
38/*
39 * We can't use regular spinlocks. In the switcher case, it is possible
40 * for an outbound CPU to call power_down() after its inbound counterpart
41 * is already live using the same logical CPU number which trips lockdep
42 * debugging.
43 */
44static arch_spinlock_t tc2_pm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
45
46#define TC2_CLUSTERS 2
47#define TC2_MAX_CPUS_PER_CLUSTER 3
48
49static unsigned int tc2_nr_cpus[TC2_CLUSTERS];
50
51/* Keep per-cpu usage count to cope with unordered up/down requests */
52static int tc2_pm_use_count[TC2_MAX_CPUS_PER_CLUSTER][TC2_CLUSTERS];
53
54#define tc2_cluster_unused(cluster) \
55 (!tc2_pm_use_count[0][cluster] && \
56 !tc2_pm_use_count[1][cluster] && \
57 !tc2_pm_use_count[2][cluster])
58
59static int tc2_pm_power_up(unsigned int cpu, unsigned int cluster)
60{
61 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
62 if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster])
63 return -EINVAL;
64
65 /*
66 * Since this is called with IRQs enabled, and no arch_spin_lock_irq
67 * variant exists, we need to disable IRQs manually here.
68 */
69 local_irq_disable();
70 arch_spin_lock(&tc2_pm_lock);
71
72 if (tc2_cluster_unused(cluster))
73 ve_spc_powerdown(cluster, false);
74
75 tc2_pm_use_count[cpu][cluster]++;
76 if (tc2_pm_use_count[cpu][cluster] == 1) {
77 ve_spc_set_resume_addr(cluster, cpu,
78 virt_to_phys(mcpm_entry_point));
79 ve_spc_cpu_wakeup_irq(cluster, cpu, true);
80 } else if (tc2_pm_use_count[cpu][cluster] != 2) {
81 /*
82 * The only possible values are:
83 * 0 = CPU down
84 * 1 = CPU (still) up
85 * 2 = CPU requested to be up before it had a chance
86 * to actually make itself down.
87 * Any other value is a bug.
88 */
89 BUG();
90 }
91
92 arch_spin_unlock(&tc2_pm_lock);
93 local_irq_enable();
94
95 return 0;
96}
97
98static void tc2_pm_down(u64 residency)
99{
100 unsigned int mpidr, cpu, cluster;
101 bool last_man = false, skip_wfi = false;
102
103 mpidr = read_cpuid_mpidr();
104 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
105 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
106
107 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
108 BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
109
110 __mcpm_cpu_going_down(cpu, cluster);
111
112 arch_spin_lock(&tc2_pm_lock);
113 BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
114 tc2_pm_use_count[cpu][cluster]--;
115 if (tc2_pm_use_count[cpu][cluster] == 0) {
116 ve_spc_cpu_wakeup_irq(cluster, cpu, true);
117 if (tc2_cluster_unused(cluster)) {
118 ve_spc_powerdown(cluster, true);
119 ve_spc_global_wakeup_irq(true);
120 last_man = true;
121 }
122 } else if (tc2_pm_use_count[cpu][cluster] == 1) {
123 /*
124 * A power_up request went ahead of us.
125 * Even if we do not want to shut this CPU down,
126 * the caller expects a certain state as if the WFI
127 * was aborted. So let's continue with cache cleaning.
128 */
129 skip_wfi = true;
130 } else
131 BUG();
132
133 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
134 arch_spin_unlock(&tc2_pm_lock);
135
136 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) {
137 /*
138 * On the Cortex-A15 we need to disable
139 * L2 prefetching before flushing the cache.
140 */
141 asm volatile(
142 "mcr p15, 1, %0, c15, c0, 3 \n\t"
143 "isb \n\t"
144 "dsb "
145 : : "r" (0x400) );
146 }
147
148 /*
149 * We need to disable and flush the whole (L1 and L2) cache.
150 * Let's do it in the safest possible way i.e. with
151 * no memory access within the following sequence
152 * including the stack.
153 */
154 asm volatile(
155 "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
156 "bic r0, r0, #"__stringify(CR_C)" \n\t"
157 "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
158 "isb \n\t"
159 "bl v7_flush_dcache_all \n\t"
160 "clrex \n\t"
161 "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
162 "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
163 "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
164 "isb \n\t"
165 "dsb "
166 : : : "r0","r1","r2","r3","r4","r5","r6","r7",
167 "r9","r10","r11","lr","memory");
168
169 cci_disable_port_by_cpu(mpidr);
170
171 __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
172 } else {
173 /*
174 * If last man then undo any setup done previously.
175 */
176 if (last_man) {
177 ve_spc_powerdown(cluster, false);
178 ve_spc_global_wakeup_irq(false);
179 }
180
181 arch_spin_unlock(&tc2_pm_lock);
182
183 /*
184 * We need to disable and flush only the L1 cache.
185 * Let's do it in the safest possible way as above.
186 */
187 asm volatile(
188 "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
189 "bic r0, r0, #"__stringify(CR_C)" \n\t"
190 "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
191 "isb \n\t"
192 "bl v7_flush_dcache_louis \n\t"
193 "clrex \n\t"
194 "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
195 "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
196 "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
197 "isb \n\t"
198 "dsb "
199 : : : "r0","r1","r2","r3","r4","r5","r6","r7",
200 "r9","r10","r11","lr","memory");
201 }
202
203 __mcpm_cpu_down(cpu, cluster);
204
205 /* Now we are prepared for power-down, do it: */
206 if (!skip_wfi)
207 wfi();
208
209 /* Not dead at this point? Let our caller cope. */
210}
211
212static void tc2_pm_power_down(void)
213{
214 tc2_pm_down(0);
215}
216
217static void tc2_pm_suspend(u64 residency)
218{
219 unsigned int mpidr, cpu, cluster;
220
221 mpidr = read_cpuid_mpidr();
222 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
223 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
224 ve_spc_set_resume_addr(cluster, cpu, virt_to_phys(mcpm_entry_point));
225 tc2_pm_down(residency);
226}
227
228static void tc2_pm_powered_up(void)
229{
230 unsigned int mpidr, cpu, cluster;
231 unsigned long flags;
232
233 mpidr = read_cpuid_mpidr();
234 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
235 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
236
237 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
238 BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
239
240 local_irq_save(flags);
241 arch_spin_lock(&tc2_pm_lock);
242
243 if (tc2_cluster_unused(cluster)) {
244 ve_spc_powerdown(cluster, false);
245 ve_spc_global_wakeup_irq(false);
246 }
247
248 if (!tc2_pm_use_count[cpu][cluster])
249 tc2_pm_use_count[cpu][cluster] = 1;
250
251 ve_spc_cpu_wakeup_irq(cluster, cpu, false);
252 ve_spc_set_resume_addr(cluster, cpu, 0);
253
254 arch_spin_unlock(&tc2_pm_lock);
255 local_irq_restore(flags);
256}
257
258static const struct mcpm_platform_ops tc2_pm_power_ops = {
259 .power_up = tc2_pm_power_up,
260 .power_down = tc2_pm_power_down,
261 .suspend = tc2_pm_suspend,
262 .powered_up = tc2_pm_powered_up,
263};
264
265static bool __init tc2_pm_usage_count_init(void)
266{
267 unsigned int mpidr, cpu, cluster;
268
269 mpidr = read_cpuid_mpidr();
270 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
271 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
272
273 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
274 if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) {
275 pr_err("%s: boot CPU is out of bound!\n", __func__);
276 return false;
277 }
278 tc2_pm_use_count[cpu][cluster] = 1;
279 return true;
280}
281
282/*
283 * Enable cluster-level coherency, in preparation for turning on the MMU.
284 */
285static void __naked tc2_pm_power_up_setup(unsigned int affinity_level)
286{
287 asm volatile (" \n"
288" cmp r0, #1 \n"
289" bxne lr \n"
290" b cci_enable_port_for_self ");
291}
292
293static int __init tc2_pm_init(void)
294{
295 int ret;
296 void __iomem *scc;
297 u32 a15_cluster_id, a7_cluster_id, sys_info;
298 struct device_node *np;
299
300 /*
301 * The power management-related features are hidden behind
302 * SCC registers. We need to extract runtime information like
303 * cluster ids and number of CPUs really available in clusters.
304 */
305 np = of_find_compatible_node(NULL, NULL,
306 "arm,vexpress-scc,v2p-ca15_a7");
307 scc = of_iomap(np, 0);
308 if (!scc)
309 return -ENODEV;
310
311 a15_cluster_id = readl_relaxed(scc + A15_CONF) & 0xf;
312 a7_cluster_id = readl_relaxed(scc + A7_CONF) & 0xf;
313 if (a15_cluster_id >= TC2_CLUSTERS || a7_cluster_id >= TC2_CLUSTERS)
314 return -EINVAL;
315
316 sys_info = readl_relaxed(scc + SYS_INFO);
317 tc2_nr_cpus[a15_cluster_id] = (sys_info >> 16) & 0xf;
318 tc2_nr_cpus[a7_cluster_id] = (sys_info >> 20) & 0xf;
319
320 /*
321 * A subset of the SCC registers is also used to communicate
322 * with the SPC (power controller). We need to be able to
323 * drive it very early in the boot process to power up
324 * processors, so we initialize the SPC driver here.
325 */
326 ret = ve_spc_init(scc + SPC_BASE, a15_cluster_id);
327 if (ret)
328 return ret;
329
330 if (!cci_probed())
331 return -ENODEV;
332
333 if (!tc2_pm_usage_count_init())
334 return -EINVAL;
335
336 ret = mcpm_platform_register(&tc2_pm_power_ops);
337 if (!ret) {
338 mcpm_sync_init(tc2_pm_power_up_setup);
339 pr_info("TC2 power management initialized\n");
340 }
341 return ret;
342}
343
344early_initcall(tc2_pm_init);