aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKever Yang <kever.yang@rock-chips.com>2014-10-15 13:23:03 -0400
committerHeiko Stuebner <heiko@sntech.de>2014-11-05 16:18:40 -0500
commit3ee851e212d0bb6be8c462059fba74ce2e3f6064 (patch)
treeabf38ddb77760e6390ed3982f4067500cfd75a37
parent6de2d21adaf05b7a456077625b6e311feabd3718 (diff)
ARM: rockchip: add basic smp support for rk3288
This patch add basic rk3288 smp support. Only cortex-A9 need invalid L1, A7/A12/A15/A17 should not invalid L1, since for A7/A12/A15, the invalidation would be taken as clean and invalidate. If you use the software manual invalidation instead of hardware invalidation (assert l1/l2rstdisable during reset) after reset, there is tiny change that some cachelines would be in dirty and valid state after reset(since the ram content would be random value after reset), then the unexpected clean might lead to system crash. It is a known issue for the A12/A17 MPCore multiprocessor that the active processors might be stalled when the individual processor is powered down, we can avoid this prolbem by softreset the processor before power it down. Signed-off-by: Kever Yang <kever.yang@rock-chips.com> Tested-by: Kevin Hilman <khilman@linaro.org> Signed-off-by: Heiko Stuebner <heiko@sntech.de>
-rw-r--r--arch/arm/mach-rockchip/headsmp.S5
-rw-r--r--arch/arm/mach-rockchip/platsmp.c120
2 files changed, 100 insertions, 25 deletions
diff --git a/arch/arm/mach-rockchip/headsmp.S b/arch/arm/mach-rockchip/headsmp.S
index 73206e360e31..46c22dedf632 100644
--- a/arch/arm/mach-rockchip/headsmp.S
+++ b/arch/arm/mach-rockchip/headsmp.S
@@ -16,7 +16,10 @@
16#include <linux/init.h> 16#include <linux/init.h>
17 17
18ENTRY(rockchip_secondary_startup) 18ENTRY(rockchip_secondary_startup)
19 bl v7_invalidate_l1 19 mrc p15, 0, r0, c0, c0, 0 @ read main ID register
20 ldr r1, =0x00000c09 @ Cortex-A9 primary part number
21 teq r0, r1
22 beq v7_invalidate_l1
20 b secondary_startup 23 b secondary_startup
21ENDPROC(rockchip_secondary_startup) 24ENDPROC(rockchip_secondary_startup)
22 25
diff --git a/arch/arm/mach-rockchip/platsmp.c b/arch/arm/mach-rockchip/platsmp.c
index 57b53b32e103..f26fcdca2445 100644
--- a/arch/arm/mach-rockchip/platsmp.c
+++ b/arch/arm/mach-rockchip/platsmp.c
@@ -22,6 +22,8 @@
22#include <linux/regmap.h> 22#include <linux/regmap.h>
23#include <linux/mfd/syscon.h> 23#include <linux/mfd/syscon.h>
24 24
25#include <linux/reset.h>
26#include <linux/cpu.h>
25#include <asm/cacheflush.h> 27#include <asm/cacheflush.h>
26#include <asm/cp15.h> 28#include <asm/cp15.h>
27#include <asm/smp_scu.h> 29#include <asm/smp_scu.h>
@@ -53,11 +55,47 @@ static int pmu_power_domain_is_on(int pd)
53 return !(val & BIT(pd)); 55 return !(val & BIT(pd));
54} 56}
55 57
58struct reset_control *rockchip_get_core_reset(int cpu)
59{
60 struct device *dev = get_cpu_device(cpu);
61 struct device_node *np;
62
63 /* The cpu device is only available after the initial core bringup */
64 if (dev)
65 np = dev->of_node;
66 else
67 np = of_get_cpu_node(cpu, 0);
68
69 return of_reset_control_get(np, NULL);
70}
71
56static int pmu_set_power_domain(int pd, bool on) 72static int pmu_set_power_domain(int pd, bool on)
57{ 73{
58 u32 val = (on) ? 0 : BIT(pd); 74 u32 val = (on) ? 0 : BIT(pd);
59 int ret; 75 int ret;
60 76
77 /*
78 * We need to soft reset the cpu when we turn off the cpu power domain,
79 * or else the active processors might be stalled when the individual
80 * processor is powered down.
81 */
82 if (read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) {
83 struct reset_control *rstc = rockchip_get_core_reset(pd);
84
85 if (IS_ERR(rstc)) {
86 pr_err("%s: could not get reset control for core %d\n",
87 __func__, pd);
88 return PTR_ERR(rstc);
89 }
90
91 if (on)
92 reset_control_deassert(rstc);
93 else
94 reset_control_assert(rstc);
95
96 reset_control_put(rstc);
97 }
98
61 ret = regmap_update_bits(pmu, PMU_PWRDN_CON, BIT(pd), val); 99 ret = regmap_update_bits(pmu, PMU_PWRDN_CON, BIT(pd), val);
62 if (ret < 0) { 100 if (ret < 0) {
63 pr_err("%s: could not update power domain\n", __func__); 101 pr_err("%s: could not update power domain\n", __func__);
@@ -84,6 +122,8 @@ static int pmu_set_power_domain(int pd, bool on)
84static int __cpuinit rockchip_boot_secondary(unsigned int cpu, 122static int __cpuinit rockchip_boot_secondary(unsigned int cpu,
85 struct task_struct *idle) 123 struct task_struct *idle)
86{ 124{
125 int ret;
126
87 if (!sram_base_addr || !pmu) { 127 if (!sram_base_addr || !pmu) {
88 pr_err("%s: sram or pmu missing for cpu boot\n", __func__); 128 pr_err("%s: sram or pmu missing for cpu boot\n", __func__);
89 return -ENXIO; 129 return -ENXIO;
@@ -96,7 +136,26 @@ static int __cpuinit rockchip_boot_secondary(unsigned int cpu,
96 } 136 }
97 137
98 /* start the core */ 138 /* start the core */
99 return pmu_set_power_domain(0 + cpu, true); 139 ret = pmu_set_power_domain(0 + cpu, true);
140 if (ret < 0)
141 return ret;
142
143 if (read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) {
144 /* We communicate with the bootrom to active the cpus other
145 * than cpu0, after a blob of initialize code, they will
146 * stay at wfe state, once they are actived, they will check
147 * the mailbox:
148 * sram_base_addr + 4: 0xdeadbeaf
149 * sram_base_addr + 8: start address for pc
150 * */
151 udelay(10);
152 writel(virt_to_phys(rockchip_secondary_startup),
153 sram_base_addr + 8);
154 writel(0xDEADBEAF, sram_base_addr + 4);
155 dsb_sev();
156 }
157
158 return 0;
100} 159}
101 160
102/** 161/**
@@ -129,8 +188,6 @@ static int __init rockchip_smp_prepare_sram(struct device_node *node)
129 return -EINVAL; 188 return -EINVAL;
130 } 189 }
131 190
132 sram_base_addr = of_iomap(node, 0);
133
134 /* set the boot function for the sram code */ 191 /* set the boot function for the sram code */
135 rockchip_boot_fn = virt_to_phys(rockchip_secondary_startup); 192 rockchip_boot_fn = virt_to_phys(rockchip_secondary_startup);
136 193
@@ -204,40 +261,55 @@ static void __init rockchip_smp_prepare_cpus(unsigned int max_cpus)
204 struct device_node *node; 261 struct device_node *node;
205 unsigned int i; 262 unsigned int i;
206 263
207 node = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu");
208 if (!node) {
209 pr_err("%s: missing scu\n", __func__);
210 return;
211 }
212
213 scu_base_addr = of_iomap(node, 0);
214 if (!scu_base_addr) {
215 pr_err("%s: could not map scu registers\n", __func__);
216 return;
217 }
218
219 node = of_find_compatible_node(NULL, NULL, "rockchip,rk3066-smp-sram"); 264 node = of_find_compatible_node(NULL, NULL, "rockchip,rk3066-smp-sram");
220 if (!node) { 265 if (!node) {
221 pr_err("%s: could not find sram dt node\n", __func__); 266 pr_err("%s: could not find sram dt node\n", __func__);
222 return; 267 return;
223 } 268 }
224 269
225 if (rockchip_smp_prepare_sram(node)) 270 sram_base_addr = of_iomap(node, 0);
271 if (!sram_base_addr) {
272 pr_err("%s: could not map sram registers\n", __func__);
226 return; 273 return;
274 }
227 275
228 if (rockchip_smp_prepare_pmu()) 276 if (rockchip_smp_prepare_pmu())
229 return; 277 return;
230 278
231 /* enable the SCU power domain */ 279 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) {
232 pmu_set_power_domain(PMU_PWRDN_SCU, true); 280 if (rockchip_smp_prepare_sram(node))
281 return;
233 282
234 /* 283 /* enable the SCU power domain */
235 * While the number of cpus is gathered from dt, also get the number 284 pmu_set_power_domain(PMU_PWRDN_SCU, true);
236 * of cores from the scu to verify this value when booting the cores. 285
237 */ 286 node = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu");
238 ncores = scu_get_core_count(scu_base_addr); 287 if (!node) {
288 pr_err("%s: missing scu\n", __func__);
289 return;
290 }
239 291
240 scu_enable(scu_base_addr); 292 scu_base_addr = of_iomap(node, 0);
293 if (!scu_base_addr) {
294 pr_err("%s: could not map scu registers\n", __func__);
295 return;
296 }
297
298 /*
299 * While the number of cpus is gathered from dt, also get the
300 * number of cores from the scu to verify this value when
301 * booting the cores.
302 */
303 ncores = scu_get_core_count(scu_base_addr);
304 pr_err("%s: ncores %d\n", __func__, ncores);
305
306 scu_enable(scu_base_addr);
307 } else {
308 unsigned int l2ctlr;
309
310 asm ("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
311 ncores = ((l2ctlr >> 24) & 0x3) + 1;
312 }
241 313
242 /* Make sure that all cores except the first are really off */ 314 /* Make sure that all cores except the first are really off */
243 for (i = 1; i < ncores; i++) 315 for (i = 1; i < ncores; i++)