aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-exynos
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mach-exynos')
-rw-r--r--arch/arm/mach-exynos/common.h6
-rw-r--r--arch/arm/mach-exynos/exynos.c1
-rw-r--r--arch/arm/mach-exynos/firmware.c33
-rw-r--r--arch/arm/mach-exynos/mcpm-exynos.c247
-rw-r--r--arch/arm/mach-exynos/platsmp.c23
-rw-r--r--arch/arm/mach-exynos/pm.c12
-rw-r--r--arch/arm/mach-exynos/pm_domains.c29
-rw-r--r--arch/arm/mach-exynos/regs-pmu.h3
-rw-r--r--arch/arm/mach-exynos/smc.h9
-rw-r--r--arch/arm/mach-exynos/suspend.c22
10 files changed, 167 insertions, 218 deletions
diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h
index f70eca7ee705..acd5b560b728 100644
--- a/arch/arm/mach-exynos/common.h
+++ b/arch/arm/mach-exynos/common.h
@@ -126,6 +126,12 @@ enum {
126 126
127void exynos_firmware_init(void); 127void exynos_firmware_init(void);
128 128
129/* CPU BOOT mode flag for Exynos3250 SoC bootloader */
130#define C2_STATE (1 << 3)
131
132void exynos_set_boot_flag(unsigned int cpu, unsigned int mode);
133void exynos_clear_boot_flag(unsigned int cpu, unsigned int mode);
134
129extern u32 exynos_get_eint_wake_mask(void); 135extern u32 exynos_get_eint_wake_mask(void);
130 136
131#ifdef CONFIG_PM_SLEEP 137#ifdef CONFIG_PM_SLEEP
diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c
index 8576a9f734bd..bcde0dd668df 100644
--- a/arch/arm/mach-exynos/exynos.c
+++ b/arch/arm/mach-exynos/exynos.c
@@ -214,6 +214,7 @@ static void __init exynos_dt_machine_init(void)
214 of_machine_is_compatible("samsung,exynos4212") || 214 of_machine_is_compatible("samsung,exynos4212") ||
215 (of_machine_is_compatible("samsung,exynos4412") && 215 (of_machine_is_compatible("samsung,exynos4412") &&
216 of_machine_is_compatible("samsung,trats2")) || 216 of_machine_is_compatible("samsung,trats2")) ||
217 of_machine_is_compatible("samsung,exynos3250") ||
217 of_machine_is_compatible("samsung,exynos5250")) 218 of_machine_is_compatible("samsung,exynos5250"))
218 platform_device_register(&exynos_cpuidle); 219 platform_device_register(&exynos_cpuidle);
219 220
diff --git a/arch/arm/mach-exynos/firmware.c b/arch/arm/mach-exynos/firmware.c
index 4791a3cc00f9..1bd35763f12e 100644
--- a/arch/arm/mach-exynos/firmware.c
+++ b/arch/arm/mach-exynos/firmware.c
@@ -48,7 +48,13 @@ static int exynos_do_idle(unsigned long mode)
48 __raw_writel(virt_to_phys(exynos_cpu_resume_ns), 48 __raw_writel(virt_to_phys(exynos_cpu_resume_ns),
49 sysram_ns_base_addr + 0x24); 49 sysram_ns_base_addr + 0x24);
50 __raw_writel(EXYNOS_AFTR_MAGIC, sysram_ns_base_addr + 0x20); 50 __raw_writel(EXYNOS_AFTR_MAGIC, sysram_ns_base_addr + 0x20);
51 exynos_smc(SMC_CMD_CPU0AFTR, 0, 0, 0); 51 if (soc_is_exynos3250()) {
52 exynos_smc(SMC_CMD_SAVE, OP_TYPE_CORE,
53 SMC_POWERSTATE_IDLE, 0);
54 exynos_smc(SMC_CMD_SHUTDOWN, OP_TYPE_CLUSTER,
55 SMC_POWERSTATE_IDLE, 0);
56 } else
57 exynos_smc(SMC_CMD_CPU0AFTR, 0, 0, 0);
52 break; 58 break;
53 case FW_DO_IDLE_SLEEP: 59 case FW_DO_IDLE_SLEEP:
54 exynos_smc(SMC_CMD_SLEEP, 0, 0, 0); 60 exynos_smc(SMC_CMD_SLEEP, 0, 0, 0);
@@ -206,3 +212,28 @@ void __init exynos_firmware_init(void)
206 outer_cache.configure = exynos_l2_configure; 212 outer_cache.configure = exynos_l2_configure;
207 } 213 }
208} 214}
215
216#define REG_CPU_STATE_ADDR (sysram_ns_base_addr + 0x28)
217#define BOOT_MODE_MASK 0x1f
218
219void exynos_set_boot_flag(unsigned int cpu, unsigned int mode)
220{
221 unsigned int tmp;
222
223 tmp = __raw_readl(REG_CPU_STATE_ADDR + cpu * 4);
224
225 if (mode & BOOT_MODE_MASK)
226 tmp &= ~BOOT_MODE_MASK;
227
228 tmp |= mode;
229 __raw_writel(tmp, REG_CPU_STATE_ADDR + cpu * 4);
230}
231
232void exynos_clear_boot_flag(unsigned int cpu, unsigned int mode)
233{
234 unsigned int tmp;
235
236 tmp = __raw_readl(REG_CPU_STATE_ADDR + cpu * 4);
237 tmp &= ~mode;
238 __raw_writel(tmp, REG_CPU_STATE_ADDR + cpu * 4);
239}
diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c
index b0d3c2e876fb..9bdf54795f05 100644
--- a/arch/arm/mach-exynos/mcpm-exynos.c
+++ b/arch/arm/mach-exynos/mcpm-exynos.c
@@ -61,25 +61,7 @@ static void __iomem *ns_sram_base_addr;
61 : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \ 61 : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
62 "r9", "r10", "lr", "memory") 62 "r9", "r10", "lr", "memory")
63 63
64/* 64static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster)
65 * We can't use regular spinlocks. In the switcher case, it is possible
66 * for an outbound CPU to call power_down() after its inbound counterpart
67 * is already live using the same logical CPU number which trips lockdep
68 * debugging.
69 */
70static arch_spinlock_t exynos_mcpm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
71static int
72cpu_use_count[EXYNOS5420_CPUS_PER_CLUSTER][EXYNOS5420_NR_CLUSTERS];
73
74#define exynos_cluster_usecnt(cluster) \
75 (cpu_use_count[0][cluster] + \
76 cpu_use_count[1][cluster] + \
77 cpu_use_count[2][cluster] + \
78 cpu_use_count[3][cluster])
79
80#define exynos_cluster_unused(cluster) !exynos_cluster_usecnt(cluster)
81
82static int exynos_power_up(unsigned int cpu, unsigned int cluster)
83{ 65{
84 unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER); 66 unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
85 67
@@ -88,127 +70,65 @@ static int exynos_power_up(unsigned int cpu, unsigned int cluster)
88 cluster >= EXYNOS5420_NR_CLUSTERS) 70 cluster >= EXYNOS5420_NR_CLUSTERS)
89 return -EINVAL; 71 return -EINVAL;
90 72
91 /* 73 exynos_cpu_power_up(cpunr);
92 * Since this is called with IRQs enabled, and no arch_spin_lock_irq 74 return 0;
93 * variant exists, we need to disable IRQs manually here. 75}
94 */
95 local_irq_disable();
96 arch_spin_lock(&exynos_mcpm_lock);
97
98 cpu_use_count[cpu][cluster]++;
99 if (cpu_use_count[cpu][cluster] == 1) {
100 bool was_cluster_down =
101 (exynos_cluster_usecnt(cluster) == 1);
102
103 /*
104 * Turn on the cluster (L2/COMMON) and then power on the
105 * cores.
106 */
107 if (was_cluster_down)
108 exynos_cluster_power_up(cluster);
109
110 exynos_cpu_power_up(cpunr);
111 } else if (cpu_use_count[cpu][cluster] != 2) {
112 /*
113 * The only possible values are:
114 * 0 = CPU down
115 * 1 = CPU (still) up
116 * 2 = CPU requested to be up before it had a chance
117 * to actually make itself down.
118 * Any other value is a bug.
119 */
120 BUG();
121 }
122 76
123 arch_spin_unlock(&exynos_mcpm_lock); 77static int exynos_cluster_powerup(unsigned int cluster)
124 local_irq_enable(); 78{
79 pr_debug("%s: cluster %u\n", __func__, cluster);
80 if (cluster >= EXYNOS5420_NR_CLUSTERS)
81 return -EINVAL;
125 82
83 exynos_cluster_power_up(cluster);
126 return 0; 84 return 0;
127} 85}
128 86
129/* 87static void exynos_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster)
130 * NOTE: This function requires the stack data to be visible through power down
131 * and can only be executed on processors like A15 and A7 that hit the cache
132 * with the C bit clear in the SCTLR register.
133 */
134static void exynos_power_down(void)
135{ 88{
136 unsigned int mpidr, cpu, cluster; 89 unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
137 bool last_man = false, skip_wfi = false;
138 unsigned int cpunr;
139
140 mpidr = read_cpuid_mpidr();
141 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
142 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
143 cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
144 90
145 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); 91 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
146 BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER || 92 BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
147 cluster >= EXYNOS5420_NR_CLUSTERS); 93 cluster >= EXYNOS5420_NR_CLUSTERS);
94 exynos_cpu_power_down(cpunr);
95}
148 96
149 __mcpm_cpu_going_down(cpu, cluster); 97static void exynos_cluster_powerdown_prepare(unsigned int cluster)
150 98{
151 arch_spin_lock(&exynos_mcpm_lock); 99 pr_debug("%s: cluster %u\n", __func__, cluster);
152 BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP); 100 BUG_ON(cluster >= EXYNOS5420_NR_CLUSTERS);
153 cpu_use_count[cpu][cluster]--; 101 exynos_cluster_power_down(cluster);
154 if (cpu_use_count[cpu][cluster] == 0) { 102}
155 exynos_cpu_power_down(cpunr);
156
157 if (exynos_cluster_unused(cluster)) {
158 exynos_cluster_power_down(cluster);
159 last_man = true;
160 }
161 } else if (cpu_use_count[cpu][cluster] == 1) {
162 /*
163 * A power_up request went ahead of us.
164 * Even if we do not want to shut this CPU down,
165 * the caller expects a certain state as if the WFI
166 * was aborted. So let's continue with cache cleaning.
167 */
168 skip_wfi = true;
169 } else {
170 BUG();
171 }
172
173 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
174 arch_spin_unlock(&exynos_mcpm_lock);
175
176 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
177 /*
178 * On the Cortex-A15 we need to disable
179 * L2 prefetching before flushing the cache.
180 */
181 asm volatile(
182 "mcr p15, 1, %0, c15, c0, 3\n\t"
183 "isb\n\t"
184 "dsb"
185 : : "r" (0x400));
186 }
187 103
188 /* Flush all cache levels for this cluster. */ 104static void exynos_cpu_cache_disable(void)
189 exynos_v7_exit_coherency_flush(all); 105{
106 /* Disable and flush the local CPU cache. */
107 exynos_v7_exit_coherency_flush(louis);
108}
190 109
110static void exynos_cluster_cache_disable(void)
111{
112 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
191 /* 113 /*
192 * Disable cluster-level coherency by masking 114 * On the Cortex-A15 we need to disable
193 * incoming snoops and DVM messages: 115 * L2 prefetching before flushing the cache.
194 */ 116 */
195 cci_disable_port_by_cpu(mpidr); 117 asm volatile(
196 118 "mcr p15, 1, %0, c15, c0, 3\n\t"
197 __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); 119 "isb\n\t"
198 } else { 120 "dsb"
199 arch_spin_unlock(&exynos_mcpm_lock); 121 : : "r" (0x400));
200
201 /* Disable and flush the local CPU cache. */
202 exynos_v7_exit_coherency_flush(louis);
203 } 122 }
204 123
205 __mcpm_cpu_down(cpu, cluster); 124 /* Flush all cache levels for this cluster. */
206 125 exynos_v7_exit_coherency_flush(all);
207 /* Now we are prepared for power-down, do it: */
208 if (!skip_wfi)
209 wfi();
210 126
211 /* Not dead at this point? Let our caller cope. */ 127 /*
128 * Disable cluster-level coherency by masking
129 * incoming snoops and DVM messages:
130 */
131 cci_disable_port_by_cpu(read_cpuid_mpidr());
212} 132}
213 133
214static int exynos_wait_for_powerdown(unsigned int cpu, unsigned int cluster) 134static int exynos_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
@@ -222,10 +142,8 @@ static int exynos_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
222 142
223 /* Wait for the core state to be OFF */ 143 /* Wait for the core state to be OFF */
224 while (tries--) { 144 while (tries--) {
225 if (ACCESS_ONCE(cpu_use_count[cpu][cluster]) == 0) { 145 if ((exynos_cpu_power_state(cpunr) == 0))
226 if ((exynos_cpu_power_state(cpunr) == 0)) 146 return 0; /* success: the CPU is halted */
227 return 0; /* success: the CPU is halted */
228 }
229 147
230 /* Otherwise, wait and retry: */ 148 /* Otherwise, wait and retry: */
231 msleep(1); 149 msleep(1);
@@ -234,63 +152,23 @@ static int exynos_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
234 return -ETIMEDOUT; /* timeout */ 152 return -ETIMEDOUT; /* timeout */
235} 153}
236 154
237static void exynos_powered_up(void) 155static void exynos_cpu_is_up(unsigned int cpu, unsigned int cluster)
238{
239 unsigned int mpidr, cpu, cluster;
240
241 mpidr = read_cpuid_mpidr();
242 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
243 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
244
245 arch_spin_lock(&exynos_mcpm_lock);
246 if (cpu_use_count[cpu][cluster] == 0)
247 cpu_use_count[cpu][cluster] = 1;
248 arch_spin_unlock(&exynos_mcpm_lock);
249}
250
251static void exynos_suspend(u64 residency)
252{ 156{
253 unsigned int mpidr, cpunr; 157 /* especially when resuming: make sure power control is set */
254 158 exynos_cpu_powerup(cpu, cluster);
255 exynos_power_down();
256
257 /*
258 * Execution reaches here only if cpu did not power down.
259 * Hence roll back the changes done in exynos_power_down function.
260 *
261 * CAUTION: "This function requires the stack data to be visible through
262 * power down and can only be executed on processors like A15 and A7
263 * that hit the cache with the C bit clear in the SCTLR register."
264 */
265 mpidr = read_cpuid_mpidr();
266 cpunr = exynos_pmu_cpunr(mpidr);
267
268 exynos_cpu_power_up(cpunr);
269} 159}
270 160
271static const struct mcpm_platform_ops exynos_power_ops = { 161static const struct mcpm_platform_ops exynos_power_ops = {
272 .power_up = exynos_power_up, 162 .cpu_powerup = exynos_cpu_powerup,
273 .power_down = exynos_power_down, 163 .cluster_powerup = exynos_cluster_powerup,
164 .cpu_powerdown_prepare = exynos_cpu_powerdown_prepare,
165 .cluster_powerdown_prepare = exynos_cluster_powerdown_prepare,
166 .cpu_cache_disable = exynos_cpu_cache_disable,
167 .cluster_cache_disable = exynos_cluster_cache_disable,
274 .wait_for_powerdown = exynos_wait_for_powerdown, 168 .wait_for_powerdown = exynos_wait_for_powerdown,
275 .suspend = exynos_suspend, 169 .cpu_is_up = exynos_cpu_is_up,
276 .powered_up = exynos_powered_up,
277}; 170};
278 171
279static void __init exynos_mcpm_usage_count_init(void)
280{
281 unsigned int mpidr, cpu, cluster;
282
283 mpidr = read_cpuid_mpidr();
284 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
285 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
286
287 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
288 BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
289 cluster >= EXYNOS5420_NR_CLUSTERS);
290
291 cpu_use_count[cpu][cluster] = 1;
292}
293
294/* 172/*
295 * Enable cluster-level coherency, in preparation for turning on the MMU. 173 * Enable cluster-level coherency, in preparation for turning on the MMU.
296 */ 174 */
@@ -302,19 +180,6 @@ static void __naked exynos_pm_power_up_setup(unsigned int affinity_level)
302 "b cci_enable_port_for_self"); 180 "b cci_enable_port_for_self");
303} 181}
304 182
305static void __init exynos_cache_off(void)
306{
307 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
308 /* disable L2 prefetching on the Cortex-A15 */
309 asm volatile(
310 "mcr p15, 1, %0, c15, c0, 3\n\t"
311 "isb\n\t"
312 "dsb"
313 : : "r" (0x400));
314 }
315 exynos_v7_exit_coherency_flush(all);
316}
317
318static const struct of_device_id exynos_dt_mcpm_match[] = { 183static const struct of_device_id exynos_dt_mcpm_match[] = {
319 { .compatible = "samsung,exynos5420" }, 184 { .compatible = "samsung,exynos5420" },
320 { .compatible = "samsung,exynos5800" }, 185 { .compatible = "samsung,exynos5800" },
@@ -370,13 +235,11 @@ static int __init exynos_mcpm_init(void)
370 */ 235 */
371 pmu_raw_writel(EXYNOS5420_SWRESET_KFC_SEL, S5P_PMU_SPARE3); 236 pmu_raw_writel(EXYNOS5420_SWRESET_KFC_SEL, S5P_PMU_SPARE3);
372 237
373 exynos_mcpm_usage_count_init();
374
375 ret = mcpm_platform_register(&exynos_power_ops); 238 ret = mcpm_platform_register(&exynos_power_ops);
376 if (!ret) 239 if (!ret)
377 ret = mcpm_sync_init(exynos_pm_power_up_setup); 240 ret = mcpm_sync_init(exynos_pm_power_up_setup);
378 if (!ret) 241 if (!ret)
379 ret = mcpm_loopback(exynos_cache_off); /* turn on the CCI */ 242 ret = mcpm_loopback(exynos_cluster_cache_disable); /* turn on the CCI */
380 if (ret) { 243 if (ret) {
381 iounmap(ns_sram_base_addr); 244 iounmap(ns_sram_base_addr);
382 return ret; 245 return ret;
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index d2e9f12d12f1..ebd135bb0995 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -126,6 +126,8 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
126 */ 126 */
127void exynos_cpu_power_down(int cpu) 127void exynos_cpu_power_down(int cpu)
128{ 128{
129 u32 core_conf;
130
129 if (cpu == 0 && (soc_is_exynos5420() || soc_is_exynos5800())) { 131 if (cpu == 0 && (soc_is_exynos5420() || soc_is_exynos5800())) {
130 /* 132 /*
131 * Bypass power down for CPU0 during suspend. Check for 133 * Bypass power down for CPU0 during suspend. Check for
@@ -137,7 +139,10 @@ void exynos_cpu_power_down(int cpu)
137 if (!(val & S5P_CORE_LOCAL_PWR_EN)) 139 if (!(val & S5P_CORE_LOCAL_PWR_EN))
138 return; 140 return;
139 } 141 }
140 pmu_raw_writel(0, EXYNOS_ARM_CORE_CONFIGURATION(cpu)); 142
143 core_conf = pmu_raw_readl(EXYNOS_ARM_CORE_CONFIGURATION(cpu));
144 core_conf &= ~S5P_CORE_LOCAL_PWR_EN;
145 pmu_raw_writel(core_conf, EXYNOS_ARM_CORE_CONFIGURATION(cpu));
141} 146}
142 147
143/** 148/**
@@ -148,7 +153,12 @@ void exynos_cpu_power_down(int cpu)
148 */ 153 */
149void exynos_cpu_power_up(int cpu) 154void exynos_cpu_power_up(int cpu)
150{ 155{
151 pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN, 156 u32 core_conf = S5P_CORE_LOCAL_PWR_EN;
157
158 if (soc_is_exynos3250())
159 core_conf |= S5P_CORE_AUTOWAKEUP_EN;
160
161 pmu_raw_writel(core_conf,
152 EXYNOS_ARM_CORE_CONFIGURATION(cpu)); 162 EXYNOS_ARM_CORE_CONFIGURATION(cpu));
153} 163}
154 164
@@ -226,6 +236,10 @@ static void exynos_core_restart(u32 core_id)
226 if (!of_machine_is_compatible("samsung,exynos3250")) 236 if (!of_machine_is_compatible("samsung,exynos3250"))
227 return; 237 return;
228 238
239 while (!pmu_raw_readl(S5P_PMU_SPARE2))
240 udelay(10);
241 udelay(10);
242
229 val = pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(core_id)); 243 val = pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(core_id));
230 val |= S5P_CORE_WAKEUP_FROM_LOCAL_CFG; 244 val |= S5P_CORE_WAKEUP_FROM_LOCAL_CFG;
231 pmu_raw_writel(val, EXYNOS_ARM_CORE_STATUS(core_id)); 245 pmu_raw_writel(val, EXYNOS_ARM_CORE_STATUS(core_id));
@@ -346,7 +360,10 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
346 360
347 call_firmware_op(cpu_boot, core_id); 361 call_firmware_op(cpu_boot, core_id);
348 362
349 arch_send_wakeup_ipi_mask(cpumask_of(cpu)); 363 if (soc_is_exynos3250())
364 dsb_sev();
365 else
366 arch_send_wakeup_ipi_mask(cpumask_of(cpu));
350 367
351 if (pen_release == -1) 368 if (pen_release == -1)
352 break; 369 break;
diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c
index 5685250693fd..cc75ab448be3 100644
--- a/arch/arm/mach-exynos/pm.c
+++ b/arch/arm/mach-exynos/pm.c
@@ -127,6 +127,8 @@ int exynos_pm_central_resume(void)
127static void exynos_set_wakeupmask(long mask) 127static void exynos_set_wakeupmask(long mask)
128{ 128{
129 pmu_raw_writel(mask, S5P_WAKEUP_MASK); 129 pmu_raw_writel(mask, S5P_WAKEUP_MASK);
130 if (soc_is_exynos3250())
131 pmu_raw_writel(0x0, S5P_WAKEUP_MASK2);
130} 132}
131 133
132static void exynos_cpu_set_boot_vector(long flags) 134static void exynos_cpu_set_boot_vector(long flags)
@@ -140,7 +142,7 @@ static int exynos_aftr_finisher(unsigned long flags)
140{ 142{
141 int ret; 143 int ret;
142 144
143 exynos_set_wakeupmask(0x0000ff3e); 145 exynos_set_wakeupmask(soc_is_exynos3250() ? 0x40003ffe : 0x0000ff3e);
144 /* Set value of power down register for aftr mode */ 146 /* Set value of power down register for aftr mode */
145 exynos_sys_powerdown_conf(SYS_AFTR); 147 exynos_sys_powerdown_conf(SYS_AFTR);
146 148
@@ -157,8 +159,13 @@ static int exynos_aftr_finisher(unsigned long flags)
157 159
158void exynos_enter_aftr(void) 160void exynos_enter_aftr(void)
159{ 161{
162 unsigned int cpuid = smp_processor_id();
163
160 cpu_pm_enter(); 164 cpu_pm_enter();
161 165
166 if (soc_is_exynos3250())
167 exynos_set_boot_flag(cpuid, C2_STATE);
168
162 exynos_pm_central_suspend(); 169 exynos_pm_central_suspend();
163 170
164 if (of_machine_is_compatible("samsung,exynos4212") || 171 if (of_machine_is_compatible("samsung,exynos4212") ||
@@ -178,6 +185,9 @@ void exynos_enter_aftr(void)
178 185
179 exynos_pm_central_resume(); 186 exynos_pm_central_resume();
180 187
188 if (soc_is_exynos3250())
189 exynos_clear_boot_flag(cpuid, C2_STATE);
190
181 cpu_pm_exit(); 191 cpu_pm_exit();
182} 192}
183 193
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index 37266a826437..cbe56b35aea0 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -37,6 +37,7 @@ struct exynos_pm_domain {
37 struct clk *oscclk; 37 struct clk *oscclk;
38 struct clk *clk[MAX_CLK_PER_DOMAIN]; 38 struct clk *clk[MAX_CLK_PER_DOMAIN];
39 struct clk *pclk[MAX_CLK_PER_DOMAIN]; 39 struct clk *pclk[MAX_CLK_PER_DOMAIN];
40 struct clk *asb_clk[MAX_CLK_PER_DOMAIN];
40}; 41};
41 42
42static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on) 43static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
@@ -45,14 +46,19 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
45 void __iomem *base; 46 void __iomem *base;
46 u32 timeout, pwr; 47 u32 timeout, pwr;
47 char *op; 48 char *op;
49 int i;
48 50
49 pd = container_of(domain, struct exynos_pm_domain, pd); 51 pd = container_of(domain, struct exynos_pm_domain, pd);
50 base = pd->base; 52 base = pd->base;
51 53
54 for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
55 if (IS_ERR(pd->asb_clk[i]))
56 break;
57 clk_prepare_enable(pd->asb_clk[i]);
58 }
59
52 /* Set oscclk before powering off a domain*/ 60 /* Set oscclk before powering off a domain*/
53 if (!power_on) { 61 if (!power_on) {
54 int i;
55
56 for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) { 62 for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
57 if (IS_ERR(pd->clk[i])) 63 if (IS_ERR(pd->clk[i]))
58 break; 64 break;
@@ -81,8 +87,6 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
81 87
82 /* Restore clocks after powering on a domain*/ 88 /* Restore clocks after powering on a domain*/
83 if (power_on) { 89 if (power_on) {
84 int i;
85
86 for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) { 90 for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
87 if (IS_ERR(pd->clk[i])) 91 if (IS_ERR(pd->clk[i]))
88 break; 92 break;
@@ -92,6 +96,12 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
92 } 96 }
93 } 97 }
94 98
99 for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
100 if (IS_ERR(pd->asb_clk[i]))
101 break;
102 clk_disable_unprepare(pd->asb_clk[i]);
103 }
104
95 return 0; 105 return 0;
96} 106}
97 107
@@ -125,12 +135,21 @@ static __init int exynos4_pm_init_power_domain(void)
125 return -ENOMEM; 135 return -ENOMEM;
126 } 136 }
127 137
128 pd->pd.name = kstrdup(np->name, GFP_KERNEL); 138 pd->pd.name = kstrdup(dev_name(dev), GFP_KERNEL);
129 pd->name = pd->pd.name; 139 pd->name = pd->pd.name;
130 pd->base = of_iomap(np, 0); 140 pd->base = of_iomap(np, 0);
131 pd->pd.power_off = exynos_pd_power_off; 141 pd->pd.power_off = exynos_pd_power_off;
132 pd->pd.power_on = exynos_pd_power_on; 142 pd->pd.power_on = exynos_pd_power_on;
133 143
144 for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
145 char clk_name[8];
146
147 snprintf(clk_name, sizeof(clk_name), "asb%d", i);
148 pd->asb_clk[i] = clk_get(dev, clk_name);
149 if (IS_ERR(pd->asb_clk[i]))
150 break;
151 }
152
134 pd->oscclk = clk_get(dev, "oscclk"); 153 pd->oscclk = clk_get(dev, "oscclk");
135 if (IS_ERR(pd->oscclk)) 154 if (IS_ERR(pd->oscclk))
136 goto no_clk; 155 goto no_clk;
diff --git a/arch/arm/mach-exynos/regs-pmu.h b/arch/arm/mach-exynos/regs-pmu.h
index eb461e1c325a..b7614333d296 100644
--- a/arch/arm/mach-exynos/regs-pmu.h
+++ b/arch/arm/mach-exynos/regs-pmu.h
@@ -43,12 +43,14 @@
43#define S5P_WAKEUP_STAT 0x0600 43#define S5P_WAKEUP_STAT 0x0600
44#define S5P_EINT_WAKEUP_MASK 0x0604 44#define S5P_EINT_WAKEUP_MASK 0x0604
45#define S5P_WAKEUP_MASK 0x0608 45#define S5P_WAKEUP_MASK 0x0608
46#define S5P_WAKEUP_MASK2 0x0614
46 47
47#define S5P_INFORM0 0x0800 48#define S5P_INFORM0 0x0800
48#define S5P_INFORM1 0x0804 49#define S5P_INFORM1 0x0804
49#define S5P_INFORM5 0x0814 50#define S5P_INFORM5 0x0814
50#define S5P_INFORM6 0x0818 51#define S5P_INFORM6 0x0818
51#define S5P_INFORM7 0x081C 52#define S5P_INFORM7 0x081C
53#define S5P_PMU_SPARE2 0x0908
52#define S5P_PMU_SPARE3 0x090C 54#define S5P_PMU_SPARE3 0x090C
53 55
54#define EXYNOS_IROM_DATA2 0x0988 56#define EXYNOS_IROM_DATA2 0x0988
@@ -182,6 +184,7 @@
182 184
183#define S5P_CORE_LOCAL_PWR_EN 0x3 185#define S5P_CORE_LOCAL_PWR_EN 0x3
184#define S5P_CORE_WAKEUP_FROM_LOCAL_CFG (0x3 << 8) 186#define S5P_CORE_WAKEUP_FROM_LOCAL_CFG (0x3 << 8)
187#define S5P_CORE_AUTOWAKEUP_EN (1 << 31)
185 188
186/* Only for EXYNOS4210 */ 189/* Only for EXYNOS4210 */
187#define S5P_CMU_CLKSTOP_LCD1_LOWPWR 0x1154 190#define S5P_CMU_CLKSTOP_LCD1_LOWPWR 0x1154
diff --git a/arch/arm/mach-exynos/smc.h b/arch/arm/mach-exynos/smc.h
index f7b82f9c1e21..c2845717bc8f 100644
--- a/arch/arm/mach-exynos/smc.h
+++ b/arch/arm/mach-exynos/smc.h
@@ -17,6 +17,8 @@
17#define SMC_CMD_SLEEP (-3) 17#define SMC_CMD_SLEEP (-3)
18#define SMC_CMD_CPU1BOOT (-4) 18#define SMC_CMD_CPU1BOOT (-4)
19#define SMC_CMD_CPU0AFTR (-5) 19#define SMC_CMD_CPU0AFTR (-5)
20#define SMC_CMD_SAVE (-6)
21#define SMC_CMD_SHUTDOWN (-7)
20/* For CP15 Access */ 22/* For CP15 Access */
21#define SMC_CMD_C15RESUME (-11) 23#define SMC_CMD_C15RESUME (-11)
22/* For L2 Cache Access */ 24/* For L2 Cache Access */
@@ -32,4 +34,11 @@ extern void exynos_smc(u32 cmd, u32 arg1, u32 arg2, u32 arg3);
32 34
33#endif /* __ASSEMBLY__ */ 35#endif /* __ASSEMBLY__ */
34 36
37/* op type for SMC_CMD_SAVE and SMC_CMD_SHUTDOWN */
38#define OP_TYPE_CORE 0x0
39#define OP_TYPE_CLUSTER 0x1
40
41/* Power State required for SMC_CMD_SAVE and SMC_CMD_SHUTDOWN */
42#define SMC_POWERSTATE_IDLE 0x1
43
35#endif 44#endif
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
index 2146d918aedd..3e6aea7f83af 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
@@ -65,8 +65,6 @@ static struct sleep_save exynos_core_save[] = {
65 65
66struct exynos_pm_data { 66struct exynos_pm_data {
67 const struct exynos_wkup_irq *wkup_irq; 67 const struct exynos_wkup_irq *wkup_irq;
68 struct sleep_save *extra_save;
69 int num_extra_save;
70 unsigned int wake_disable_mask; 68 unsigned int wake_disable_mask;
71 unsigned int *release_ret_regs; 69 unsigned int *release_ret_regs;
72 70
@@ -77,7 +75,7 @@ struct exynos_pm_data {
77 int (*cpu_suspend)(unsigned long); 75 int (*cpu_suspend)(unsigned long);
78}; 76};
79 77
80struct exynos_pm_data *pm_data; 78static const struct exynos_pm_data *pm_data;
81 79
82static int exynos5420_cpu_state; 80static int exynos5420_cpu_state;
83static unsigned int exynos_pmu_spare3; 81static unsigned int exynos_pmu_spare3;
@@ -106,7 +104,7 @@ static const struct exynos_wkup_irq exynos5250_wkup_irq[] = {
106 { /* sentinel */ }, 104 { /* sentinel */ },
107}; 105};
108 106
109unsigned int exynos_release_ret_regs[] = { 107static unsigned int exynos_release_ret_regs[] = {
110 S5P_PAD_RET_MAUDIO_OPTION, 108 S5P_PAD_RET_MAUDIO_OPTION,
111 S5P_PAD_RET_GPIO_OPTION, 109 S5P_PAD_RET_GPIO_OPTION,
112 S5P_PAD_RET_UART_OPTION, 110 S5P_PAD_RET_UART_OPTION,
@@ -117,7 +115,7 @@ unsigned int exynos_release_ret_regs[] = {
117 REG_TABLE_END, 115 REG_TABLE_END,
118}; 116};
119 117
120unsigned int exynos3250_release_ret_regs[] = { 118static unsigned int exynos3250_release_ret_regs[] = {
121 S5P_PAD_RET_MAUDIO_OPTION, 119 S5P_PAD_RET_MAUDIO_OPTION,
122 S5P_PAD_RET_GPIO_OPTION, 120 S5P_PAD_RET_GPIO_OPTION,
123 S5P_PAD_RET_UART_OPTION, 121 S5P_PAD_RET_UART_OPTION,
@@ -130,7 +128,7 @@ unsigned int exynos3250_release_ret_regs[] = {
130 REG_TABLE_END, 128 REG_TABLE_END,
131}; 129};
132 130
133unsigned int exynos5420_release_ret_regs[] = { 131static unsigned int exynos5420_release_ret_regs[] = {
134 EXYNOS_PAD_RET_DRAM_OPTION, 132 EXYNOS_PAD_RET_DRAM_OPTION,
135 EXYNOS_PAD_RET_MAUDIO_OPTION, 133 EXYNOS_PAD_RET_MAUDIO_OPTION,
136 EXYNOS_PAD_RET_JTAG_OPTION, 134 EXYNOS_PAD_RET_JTAG_OPTION,
@@ -349,10 +347,6 @@ static void exynos_pm_prepare(void)
349 347
350 s3c_pm_do_save(exynos_core_save, ARRAY_SIZE(exynos_core_save)); 348 s3c_pm_do_save(exynos_core_save, ARRAY_SIZE(exynos_core_save));
351 349
352 if (pm_data->extra_save)
353 s3c_pm_do_save(pm_data->extra_save,
354 pm_data->num_extra_save);
355
356 exynos_pm_enter_sleep_mode(); 350 exynos_pm_enter_sleep_mode();
357 351
358 /* ensure at least INFORM0 has the resume address */ 352 /* ensure at least INFORM0 has the resume address */
@@ -475,10 +469,6 @@ static void exynos_pm_resume(void)
475 /* For release retention */ 469 /* For release retention */
476 exynos_pm_release_retention(); 470 exynos_pm_release_retention();
477 471
478 if (pm_data->extra_save)
479 s3c_pm_do_restore_core(pm_data->extra_save,
480 pm_data->num_extra_save);
481
482 s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save)); 472 s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save));
483 473
484 if (cpuid == ARM_CPU_PART_CORTEX_A9) 474 if (cpuid == ARM_CPU_PART_CORTEX_A9)
@@ -685,7 +675,7 @@ static const struct exynos_pm_data exynos5250_pm_data = {
685 .cpu_suspend = exynos_cpu_suspend, 675 .cpu_suspend = exynos_cpu_suspend,
686}; 676};
687 677
688static struct exynos_pm_data exynos5420_pm_data = { 678static const struct exynos_pm_data exynos5420_pm_data = {
689 .wkup_irq = exynos5250_wkup_irq, 679 .wkup_irq = exynos5250_wkup_irq,
690 .wake_disable_mask = (0x7F << 7) | (0x1F << 1), 680 .wake_disable_mask = (0x7F << 7) | (0x1F << 1),
691 .release_ret_regs = exynos5420_release_ret_regs, 681 .release_ret_regs = exynos5420_release_ret_regs,
@@ -736,7 +726,7 @@ void __init exynos_pm_init(void)
736 if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) 726 if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL)))
737 pr_warn("Outdated DT detected, suspend/resume will NOT work\n"); 727 pr_warn("Outdated DT detected, suspend/resume will NOT work\n");
738 728
739 pm_data = (struct exynos_pm_data *) match->data; 729 pm_data = (const struct exynos_pm_data *) match->data;
740 730
741 /* All wakeup disable */ 731 /* All wakeup disable */
742 tmp = pmu_raw_readl(S5P_WAKEUP_MASK); 732 tmp = pmu_raw_readl(S5P_WAKEUP_MASK);