aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-shmobile/smp-r8a7779.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mach-shmobile/smp-r8a7779.c')
-rw-r--r--arch/arm/mach-shmobile/smp-r8a7779.c129
1 files changed, 62 insertions, 67 deletions
diff --git a/arch/arm/mach-shmobile/smp-r8a7779.c b/arch/arm/mach-shmobile/smp-r8a7779.c
index 994906560edd..a853bf182ed5 100644
--- a/arch/arm/mach-shmobile/smp-r8a7779.c
+++ b/arch/arm/mach-shmobile/smp-r8a7779.c
@@ -25,11 +25,13 @@
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <mach/common.h> 26#include <mach/common.h>
27#include <mach/r8a7779.h> 27#include <mach/r8a7779.h>
28#include <asm/cacheflush.h>
28#include <asm/smp_plat.h> 29#include <asm/smp_plat.h>
29#include <asm/smp_scu.h> 30#include <asm/smp_scu.h>
30#include <asm/smp_twd.h> 31#include <asm/smp_twd.h>
31 32
32#define AVECR IOMEM(0xfe700040) 33#define AVECR IOMEM(0xfe700040)
34#define R8A7779_SCU_BASE 0xf0000000
33 35
34static struct r8a7779_pm_ch r8a7779_ch_cpu1 = { 36static struct r8a7779_pm_ch r8a7779_ch_cpu1 = {
35 .chan_offs = 0x40, /* PWRSR0 .. PWRER0 */ 37 .chan_offs = 0x40, /* PWRSR0 .. PWRER0 */
@@ -55,44 +57,14 @@ static struct r8a7779_pm_ch *r8a7779_ch_cpu[4] = {
55 [3] = &r8a7779_ch_cpu3, 57 [3] = &r8a7779_ch_cpu3,
56}; 58};
57 59
58static void __iomem *scu_base_addr(void)
59{
60 return (void __iomem *)0xf0000000;
61}
62
63static DEFINE_SPINLOCK(scu_lock);
64static unsigned long tmp;
65
66#ifdef CONFIG_HAVE_ARM_TWD 60#ifdef CONFIG_HAVE_ARM_TWD
67static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, 0xf0000600, 29); 61static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, R8A7779_SCU_BASE + 0x600, 29);
68
69void __init r8a7779_register_twd(void) 62void __init r8a7779_register_twd(void)
70{ 63{
71 twd_local_timer_register(&twd_local_timer); 64 twd_local_timer_register(&twd_local_timer);
72} 65}
73#endif 66#endif
74 67
75static void modify_scu_cpu_psr(unsigned long set, unsigned long clr)
76{
77 void __iomem *scu_base = scu_base_addr();
78
79 spin_lock(&scu_lock);
80 tmp = __raw_readl(scu_base + 8);
81 tmp &= ~clr;
82 tmp |= set;
83 spin_unlock(&scu_lock);
84
85 /* disable cache coherency after releasing the lock */
86 __raw_writel(tmp, scu_base + 8);
87}
88
89static unsigned int __init r8a7779_get_core_count(void)
90{
91 void __iomem *scu_base = scu_base_addr();
92
93 return scu_get_core_count(scu_base);
94}
95
96static int r8a7779_platform_cpu_kill(unsigned int cpu) 68static int r8a7779_platform_cpu_kill(unsigned int cpu)
97{ 69{
98 struct r8a7779_pm_ch *ch = NULL; 70 struct r8a7779_pm_ch *ch = NULL;
@@ -100,9 +72,6 @@ static int r8a7779_platform_cpu_kill(unsigned int cpu)
100 72
101 cpu = cpu_logical_map(cpu); 73 cpu = cpu_logical_map(cpu);
102 74
103 /* disable cache coherency */
104 modify_scu_cpu_psr(3 << (cpu * 8), 0);
105
106 if (cpu < ARRAY_SIZE(r8a7779_ch_cpu)) 75 if (cpu < ARRAY_SIZE(r8a7779_ch_cpu))
107 ch = r8a7779_ch_cpu[cpu]; 76 ch = r8a7779_ch_cpu[cpu];
108 77
@@ -112,25 +81,6 @@ static int r8a7779_platform_cpu_kill(unsigned int cpu)
112 return ret ? ret : 1; 81 return ret ? ret : 1;
113} 82}
114 83
115static int __maybe_unused r8a7779_cpu_kill(unsigned int cpu)
116{
117 int k;
118
119 /* this function is running on another CPU than the offline target,
120 * here we need wait for shutdown code in platform_cpu_die() to
121 * finish before asking SoC-specific code to power off the CPU core.
122 */
123 for (k = 0; k < 1000; k++) {
124 if (shmobile_cpu_is_dead(cpu))
125 return r8a7779_platform_cpu_kill(cpu);
126
127 mdelay(1);
128 }
129
130 return 0;
131}
132
133
134static int __cpuinit r8a7779_boot_secondary(unsigned int cpu, struct task_struct *idle) 84static int __cpuinit r8a7779_boot_secondary(unsigned int cpu, struct task_struct *idle)
135{ 85{
136 struct r8a7779_pm_ch *ch = NULL; 86 struct r8a7779_pm_ch *ch = NULL;
@@ -138,9 +88,6 @@ static int __cpuinit r8a7779_boot_secondary(unsigned int cpu, struct task_struct
138 88
139 cpu = cpu_logical_map(cpu); 89 cpu = cpu_logical_map(cpu);
140 90
141 /* enable cache coherency */
142 modify_scu_cpu_psr(0, 3 << (cpu * 8));
143
144 if (cpu < ARRAY_SIZE(r8a7779_ch_cpu)) 91 if (cpu < ARRAY_SIZE(r8a7779_ch_cpu))
145 ch = r8a7779_ch_cpu[cpu]; 92 ch = r8a7779_ch_cpu[cpu];
146 93
@@ -152,15 +99,13 @@ static int __cpuinit r8a7779_boot_secondary(unsigned int cpu, struct task_struct
152 99
153static void __init r8a7779_smp_prepare_cpus(unsigned int max_cpus) 100static void __init r8a7779_smp_prepare_cpus(unsigned int max_cpus)
154{ 101{
155 int cpu = cpu_logical_map(0); 102 scu_enable(shmobile_scu_base);
156 103
157 scu_enable(scu_base_addr()); 104 /* Map the reset vector (in headsmp-scu.S) */
105 __raw_writel(__pa(shmobile_secondary_vector_scu), AVECR);
158 106
159 /* Map the reset vector (in headsmp.S) */ 107 /* enable cache coherency on booting CPU */
160 __raw_writel(__pa(shmobile_secondary_vector), AVECR); 108 scu_power_mode(shmobile_scu_base, SCU_PM_NORMAL);
161
162 /* enable cache coherency on CPU0 */
163 modify_scu_cpu_psr(0, 3 << (cpu * 8));
164 109
165 r8a7779_pm_init(); 110 r8a7779_pm_init();
166 111
@@ -172,10 +117,60 @@ static void __init r8a7779_smp_prepare_cpus(unsigned int max_cpus)
172 117
173static void __init r8a7779_smp_init_cpus(void) 118static void __init r8a7779_smp_init_cpus(void)
174{ 119{
175 unsigned int ncores = r8a7779_get_core_count(); 120 /* setup r8a7779 specific SCU base */
121 shmobile_scu_base = IOMEM(R8A7779_SCU_BASE);
122
123 shmobile_smp_init_cpus(scu_get_core_count(shmobile_scu_base));
124}
176 125
177 shmobile_smp_init_cpus(ncores); 126#ifdef CONFIG_HOTPLUG_CPU
127static int r8a7779_scu_psr_core_disabled(int cpu)
128{
129 unsigned long mask = 3 << (cpu * 8);
130
131 if ((__raw_readl(shmobile_scu_base + 8) & mask) == mask)
132 return 1;
133
134 return 0;
135}
136
137static int r8a7779_cpu_kill(unsigned int cpu)
138{
139 int k;
140
141 /* this function is running on another CPU than the offline target,
142 * here we need wait for shutdown code in platform_cpu_die() to
143 * finish before asking SoC-specific code to power off the CPU core.
144 */
145 for (k = 0; k < 1000; k++) {
146 if (r8a7779_scu_psr_core_disabled(cpu))
147 return r8a7779_platform_cpu_kill(cpu);
148
149 mdelay(1);
150 }
151
152 return 0;
153}
154
155static void r8a7779_cpu_die(unsigned int cpu)
156{
157 dsb();
158 flush_cache_all();
159
160 /* disable cache coherency */
161 scu_power_mode(shmobile_scu_base, SCU_PM_POWEROFF);
162
163 /* Endless loop until power off from r8a7779_cpu_kill() */
164 while (1)
165 cpu_do_idle();
166}
167
168static int r8a7779_cpu_disable(unsigned int cpu)
169{
170 /* only CPU1->3 have power domains, do not allow hotplug of CPU0 */
171 return cpu == 0 ? -EPERM : 0;
178} 172}
173#endif /* CONFIG_HOTPLUG_CPU */
179 174
180struct smp_operations r8a7779_smp_ops __initdata = { 175struct smp_operations r8a7779_smp_ops __initdata = {
181 .smp_init_cpus = r8a7779_smp_init_cpus, 176 .smp_init_cpus = r8a7779_smp_init_cpus,
@@ -183,7 +178,7 @@ struct smp_operations r8a7779_smp_ops __initdata = {
183 .smp_boot_secondary = r8a7779_boot_secondary, 178 .smp_boot_secondary = r8a7779_boot_secondary,
184#ifdef CONFIG_HOTPLUG_CPU 179#ifdef CONFIG_HOTPLUG_CPU
185 .cpu_kill = r8a7779_cpu_kill, 180 .cpu_kill = r8a7779_cpu_kill,
186 .cpu_die = shmobile_cpu_die, 181 .cpu_die = r8a7779_cpu_die,
187 .cpu_disable = shmobile_cpu_disable, 182 .cpu_disable = r8a7779_cpu_disable,
188#endif 183#endif
189}; 184};