aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/Kconfig.debug12
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/boot/dts/r8a7790-lager.dts18
-rw-r--r--arch/arm/boot/dts/r8a7791-koelsch.dts18
-rw-r--r--arch/arm/common/mcpm_entry.c202
-rw-r--r--arch/arm/include/asm/mcpm.h65
-rw-r--r--arch/arm/mach-alpine/Kconfig12
-rw-r--r--arch/arm/mach-alpine/Makefile2
-rw-r--r--arch/arm/mach-alpine/alpine_cpu_pm.c70
-rw-r--r--arch/arm/mach-alpine/alpine_cpu_pm.h26
-rw-r--r--arch/arm/mach-alpine/alpine_cpu_resume.h38
-rw-r--r--arch/arm/mach-alpine/alpine_machine.c28
-rw-r--r--arch/arm/mach-alpine/platsmp.c49
-rw-r--r--arch/arm/mach-bcm/bcm_cygnus.c2
-rw-r--r--arch/arm/mach-exynos/common.h6
-rw-r--r--arch/arm/mach-exynos/exynos.c1
-rw-r--r--arch/arm/mach-exynos/firmware.c33
-rw-r--r--arch/arm/mach-exynos/mcpm-exynos.c247
-rw-r--r--arch/arm/mach-exynos/platsmp.c23
-rw-r--r--arch/arm/mach-exynos/pm.c12
-rw-r--r--arch/arm/mach-exynos/pm_domains.c29
-rw-r--r--arch/arm/mach-exynos/regs-pmu.h3
-rw-r--r--arch/arm/mach-exynos/smc.h9
-rw-r--r--arch/arm/mach-exynos/suspend.c22
-rw-r--r--arch/arm/mach-imx/Kconfig3
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c8
-rw-r--r--arch/arm/mach-imx/gpc.c213
-rw-r--r--arch/arm/mach-meson/Kconfig3
-rw-r--r--arch/arm/mach-mvebu/Kconfig14
-rw-r--r--arch/arm/mach-mvebu/board-v7.c14
-rw-r--r--arch/arm/mach-mvebu/platsmp-a9.c2
-rw-r--r--arch/arm/mach-omap2/omap_device.c3
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_43xx_data.c36
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c113
-rw-r--r--arch/arm/mach-omap2/prcm43xx.h1
-rw-r--r--arch/arm/mach-rockchip/platsmp.c4
-rw-r--r--arch/arm/mach-rockchip/pm.c14
-rw-r--r--arch/arm/mach-rockchip/pm.h6
-rw-r--r--arch/arm/mach-s3c64xx/crag6410.h1
-rw-r--r--arch/arm/mach-s3c64xx/mach-crag6410.c1
-rw-r--r--arch/arm/mach-shmobile/Kconfig2
-rw-r--r--arch/arm/mach-shmobile/Makefile2
-rw-r--r--arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c147
-rw-r--r--arch/arm/mach-shmobile/setup-rcar-gen2.c3
-rw-r--r--arch/arm/mach-vexpress/dcscb.c197
-rw-r--r--arch/arm/mach-vexpress/tc2_pm.c291
47 files changed, 1390 insertions, 618 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index c8baa96ed748..d74c9ea6eb06 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -839,6 +839,8 @@ config ARCH_VIRT
839# 839#
840source "arch/arm/mach-mvebu/Kconfig" 840source "arch/arm/mach-mvebu/Kconfig"
841 841
842source "arch/arm/mach-alpine/Kconfig"
843
842source "arch/arm/mach-asm9260/Kconfig" 844source "arch/arm/mach-asm9260/Kconfig"
843 845
844source "arch/arm/mach-at91/Kconfig" 846source "arch/arm/mach-at91/Kconfig"
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 4cfb4b8fc2dd..771d5185a397 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -93,6 +93,14 @@ choice
93 prompt "Kernel low-level debugging port" 93 prompt "Kernel low-level debugging port"
94 depends on DEBUG_LL 94 depends on DEBUG_LL
95 95
96 config DEBUG_ALPINE_UART0
97 bool "Kernel low-level debugging messages via Alpine UART0"
98 depends on ARCH_ALPINE
99 select DEBUG_UART_8250
100 help
101 Say Y here if you want kernel low-level debugging support
102 on Alpine based platforms.
103
96 config DEBUG_ASM9260_UART 104 config DEBUG_ASM9260_UART
97 bool "Kernel low-level debugging via asm9260 UART" 105 bool "Kernel low-level debugging via asm9260 UART"
98 depends on MACH_ASM9260 106 depends on MACH_ASM9260
@@ -1397,6 +1405,7 @@ config DEBUG_UART_PHYS
1397 default 0xf8b00000 if DEBUG_HIX5HD2_UART 1405 default 0xf8b00000 if DEBUG_HIX5HD2_UART
1398 default 0xf991e000 if DEBUG_QCOM_UARTDM 1406 default 0xf991e000 if DEBUG_QCOM_UARTDM
1399 default 0xfcb00000 if DEBUG_HI3620_UART 1407 default 0xfcb00000 if DEBUG_HI3620_UART
1408 default 0xfd883000 if DEBUG_ALPINE_UART0
1400 default 0xfe800000 if ARCH_IOP32X 1409 default 0xfe800000 if ARCH_IOP32X
1401 default 0xff690000 if DEBUG_RK32_UART2 1410 default 0xff690000 if DEBUG_RK32_UART2
1402 default 0xffc02000 if DEBUG_SOCFPGA_UART 1411 default 0xffc02000 if DEBUG_SOCFPGA_UART
@@ -1462,6 +1471,7 @@ config DEBUG_UART_VIRT
1462 default 0xfd000000 if ARCH_SPEAR3XX || ARCH_SPEAR6XX 1471 default 0xfd000000 if ARCH_SPEAR3XX || ARCH_SPEAR6XX
1463 default 0xfd000000 if ARCH_SPEAR13XX 1472 default 0xfd000000 if ARCH_SPEAR13XX
1464 default 0xfd012000 if ARCH_MV78XX0 1473 default 0xfd012000 if ARCH_MV78XX0
1474 default 0xfd883000 if DEBUG_ALPINE_UART0
1465 default 0xfde12000 if ARCH_DOVE 1475 default 0xfde12000 if ARCH_DOVE
1466 default 0xfe012000 if ARCH_ORION5X 1476 default 0xfe012000 if ARCH_ORION5X
1467 default 0xf31004c0 if DEBUG_MESON_UARTAO 1477 default 0xf31004c0 if DEBUG_MESON_UARTAO
@@ -1522,7 +1532,7 @@ config DEBUG_UART_8250_WORD
1522 depends on DEBUG_LL_UART_8250 || DEBUG_UART_8250 1532 depends on DEBUG_LL_UART_8250 || DEBUG_UART_8250
1523 depends on DEBUG_UART_8250_SHIFT >= 2 1533 depends on DEBUG_UART_8250_SHIFT >= 2
1524 default y if DEBUG_PICOXCELL_UART || DEBUG_SOCFPGA_UART || \ 1534 default y if DEBUG_PICOXCELL_UART || DEBUG_SOCFPGA_UART || \
1525 ARCH_KEYSTONE || \ 1535 ARCH_KEYSTONE || DEBUG_ALPINE_UART0 || \
1526 DEBUG_DAVINCI_DMx_UART0 || DEBUG_DAVINCI_DA8XX_UART1 || \ 1536 DEBUG_DAVINCI_DMx_UART0 || DEBUG_DAVINCI_DA8XX_UART1 || \
1527 DEBUG_DAVINCI_DA8XX_UART2 || \ 1537 DEBUG_DAVINCI_DA8XX_UART2 || \
1528 DEBUG_BCM_KONA_UART || DEBUG_RK32_UART2 || \ 1538 DEBUG_BCM_KONA_UART || DEBUG_RK32_UART2 || \
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index cced41d40ce2..985227cbbd1b 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -142,6 +142,7 @@ textofs-$(CONFIG_ARCH_AXXIA) := 0x00308000
142 142
143# Machine directory name. This list is sorted alphanumerically 143# Machine directory name. This list is sorted alphanumerically
144# by CONFIG_* macro name. 144# by CONFIG_* macro name.
145machine-$(CONFIG_ARCH_ALPINE) += alpine
145machine-$(CONFIG_ARCH_AT91) += at91 146machine-$(CONFIG_ARCH_AT91) += at91
146machine-$(CONFIG_ARCH_AXXIA) += axxia 147machine-$(CONFIG_ARCH_AXXIA) += axxia
147machine-$(CONFIG_ARCH_BCM) += bcm 148machine-$(CONFIG_ARCH_BCM) += bcm
diff --git a/arch/arm/boot/dts/r8a7790-lager.dts b/arch/arm/boot/dts/r8a7790-lager.dts
index 0c3b6783b72a..2a0f895c48d0 100644
--- a/arch/arm/boot/dts/r8a7790-lager.dts
+++ b/arch/arm/boot/dts/r8a7790-lager.dts
@@ -513,9 +513,27 @@
513 pinctrl-0 = <&iic3_pins>; 513 pinctrl-0 = <&iic3_pins>;
514 status = "okay"; 514 status = "okay";
515 515
516 pmic@58 {
517 compatible = "dlg,da9063";
518 reg = <0x58>;
519 interrupt-parent = <&irqc0>;
520 interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
521 interrupt-controller;
522
523 rtc {
524 compatible = "dlg,da9063-rtc";
525 };
526
527 wdt {
528 compatible = "dlg,da9063-watchdog";
529 };
530 };
531
516 vdd_dvfs: regulator@68 { 532 vdd_dvfs: regulator@68 {
517 compatible = "dlg,da9210"; 533 compatible = "dlg,da9210";
518 reg = <0x68>; 534 reg = <0x68>;
535 interrupt-parent = <&irqc0>;
536 interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
519 537
520 regulator-min-microvolt = <1000000>; 538 regulator-min-microvolt = <1000000>;
521 regulator-max-microvolt = <1000000>; 539 regulator-max-microvolt = <1000000>;
diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts
index a3c27807f6c5..b2dcf640d583 100644
--- a/arch/arm/boot/dts/r8a7791-koelsch.dts
+++ b/arch/arm/boot/dts/r8a7791-koelsch.dts
@@ -517,9 +517,27 @@
517 status = "okay"; 517 status = "okay";
518 clock-frequency = <100000>; 518 clock-frequency = <100000>;
519 519
520 pmic@58 {
521 compatible = "dlg,da9063";
522 reg = <0x58>;
523 interrupt-parent = <&irqc0>;
524 interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
525 interrupt-controller;
526
527 rtc {
528 compatible = "dlg,da9063-rtc";
529 };
530
531 wdt {
532 compatible = "dlg,da9063-watchdog";
533 };
534 };
535
520 vdd_dvfs: regulator@68 { 536 vdd_dvfs: regulator@68 {
521 compatible = "dlg,da9210"; 537 compatible = "dlg,da9210";
522 reg = <0x68>; 538 reg = <0x68>;
539 interrupt-parent = <&irqc0>;
540 interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
523 541
524 regulator-min-microvolt = <1000000>; 542 regulator-min-microvolt = <1000000>;
525 regulator-max-microvolt = <1000000>; 543 regulator-max-microvolt = <1000000>;
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
index 3c165fc2dce2..5f8a52ac7edf 100644
--- a/arch/arm/common/mcpm_entry.c
+++ b/arch/arm/common/mcpm_entry.c
@@ -55,22 +55,81 @@ bool mcpm_is_available(void)
55 return (platform_ops) ? true : false; 55 return (platform_ops) ? true : false;
56} 56}
57 57
58/*
59 * We can't use regular spinlocks. In the switcher case, it is possible
60 * for an outbound CPU to call power_down() after its inbound counterpart
61 * is already live using the same logical CPU number which trips lockdep
62 * debugging.
63 */
64static arch_spinlock_t mcpm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
65
66static int mcpm_cpu_use_count[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
67
68static inline bool mcpm_cluster_unused(unsigned int cluster)
69{
70 int i, cnt;
71 for (i = 0, cnt = 0; i < MAX_CPUS_PER_CLUSTER; i++)
72 cnt |= mcpm_cpu_use_count[cluster][i];
73 return !cnt;
74}
75
58int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster) 76int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
59{ 77{
78 bool cpu_is_down, cluster_is_down;
79 int ret = 0;
80
60 if (!platform_ops) 81 if (!platform_ops)
61 return -EUNATCH; /* try not to shadow power_up errors */ 82 return -EUNATCH; /* try not to shadow power_up errors */
62 might_sleep(); 83 might_sleep();
63 return platform_ops->power_up(cpu, cluster); 84
85 /* backward compatibility callback */
86 if (platform_ops->power_up)
87 return platform_ops->power_up(cpu, cluster);
88
89 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
90
91 /*
92 * Since this is called with IRQs enabled, and no arch_spin_lock_irq
93 * variant exists, we need to disable IRQs manually here.
94 */
95 local_irq_disable();
96 arch_spin_lock(&mcpm_lock);
97
98 cpu_is_down = !mcpm_cpu_use_count[cluster][cpu];
99 cluster_is_down = mcpm_cluster_unused(cluster);
100
101 mcpm_cpu_use_count[cluster][cpu]++;
102 /*
103 * The only possible values are:
104 * 0 = CPU down
105 * 1 = CPU (still) up
106 * 2 = CPU requested to be up before it had a chance
107 * to actually make itself down.
108 * Any other value is a bug.
109 */
110 BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 1 &&
111 mcpm_cpu_use_count[cluster][cpu] != 2);
112
113 if (cluster_is_down)
114 ret = platform_ops->cluster_powerup(cluster);
115 if (cpu_is_down && !ret)
116 ret = platform_ops->cpu_powerup(cpu, cluster);
117
118 arch_spin_unlock(&mcpm_lock);
119 local_irq_enable();
120 return ret;
64} 121}
65 122
66typedef void (*phys_reset_t)(unsigned long); 123typedef void (*phys_reset_t)(unsigned long);
67 124
68void mcpm_cpu_power_down(void) 125void mcpm_cpu_power_down(void)
69{ 126{
127 unsigned int mpidr, cpu, cluster;
128 bool cpu_going_down, last_man;
70 phys_reset_t phys_reset; 129 phys_reset_t phys_reset;
71 130
72 if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down)) 131 if (WARN_ON_ONCE(!platform_ops))
73 return; 132 return;
74 BUG_ON(!irqs_disabled()); 133 BUG_ON(!irqs_disabled());
75 134
76 /* 135 /*
@@ -79,28 +138,65 @@ void mcpm_cpu_power_down(void)
79 */ 138 */
80 setup_mm_for_reboot(); 139 setup_mm_for_reboot();
81 140
82 platform_ops->power_down(); 141 /* backward compatibility callback */
142 if (platform_ops->power_down) {
143 platform_ops->power_down();
144 goto not_dead;
145 }
146
147 mpidr = read_cpuid_mpidr();
148 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
149 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
150 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
151
152 __mcpm_cpu_going_down(cpu, cluster);
83 153
154 arch_spin_lock(&mcpm_lock);
155 BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
156
157 mcpm_cpu_use_count[cluster][cpu]--;
158 BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 0 &&
159 mcpm_cpu_use_count[cluster][cpu] != 1);
160 cpu_going_down = !mcpm_cpu_use_count[cluster][cpu];
161 last_man = mcpm_cluster_unused(cluster);
162
163 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
164 platform_ops->cpu_powerdown_prepare(cpu, cluster);
165 platform_ops->cluster_powerdown_prepare(cluster);
166 arch_spin_unlock(&mcpm_lock);
167 platform_ops->cluster_cache_disable();
168 __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
169 } else {
170 if (cpu_going_down)
171 platform_ops->cpu_powerdown_prepare(cpu, cluster);
172 arch_spin_unlock(&mcpm_lock);
173 /*
174 * If cpu_going_down is false here, that means a power_up
175 * request raced ahead of us. Even if we do not want to
176 * shut this CPU down, the caller still expects execution
177 * to return through the system resume entry path, like
178 * when the WFI is aborted due to a new IRQ or the like..
179 * So let's continue with cache cleaning in all cases.
180 */
181 platform_ops->cpu_cache_disable();
182 }
183
184 __mcpm_cpu_down(cpu, cluster);
185
186 /* Now we are prepared for power-down, do it: */
187 if (cpu_going_down)
188 wfi();
189
190not_dead:
84 /* 191 /*
85 * It is possible for a power_up request to happen concurrently 192 * It is possible for a power_up request to happen concurrently
86 * with a power_down request for the same CPU. In this case the 193 * with a power_down request for the same CPU. In this case the
87 * power_down method might not be able to actually enter a 194 * CPU might not be able to actually enter a powered down state
88 * powered down state with the WFI instruction if the power_up 195 * with the WFI instruction if the power_up request has removed
89 * method has removed the required reset condition. The 196 * the required reset condition. We must perform a re-entry in
90 * power_down method is then allowed to return. We must perform 197 * the kernel as if the power_up method just had deasserted reset
91 * a re-entry in the kernel as if the power_up method just had 198 * on the CPU.
92 * deasserted reset on the CPU.
93 *
94 * To simplify race issues, the platform specific implementation
95 * must accommodate for the possibility of unordered calls to
96 * power_down and power_up with a usage count. Therefore, if a
97 * call to power_up is issued for a CPU that is not down, then
98 * the next call to power_down must not attempt a full shutdown
99 * but only do the minimum (normally disabling L1 cache and CPU
100 * coherency) and return just as if a concurrent power_up request
101 * had happened as described above.
102 */ 199 */
103
104 phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); 200 phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
105 phys_reset(virt_to_phys(mcpm_entry_point)); 201 phys_reset(virt_to_phys(mcpm_entry_point));
106 202
@@ -125,26 +221,66 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster)
125 221
126void mcpm_cpu_suspend(u64 expected_residency) 222void mcpm_cpu_suspend(u64 expected_residency)
127{ 223{
128 phys_reset_t phys_reset; 224 if (WARN_ON_ONCE(!platform_ops))
129
130 if (WARN_ON_ONCE(!platform_ops || !platform_ops->suspend))
131 return; 225 return;
132 BUG_ON(!irqs_disabled());
133 226
134 /* Very similar to mcpm_cpu_power_down() */ 227 /* backward compatibility callback */
135 setup_mm_for_reboot(); 228 if (platform_ops->suspend) {
136 platform_ops->suspend(expected_residency); 229 phys_reset_t phys_reset;
137 phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); 230 BUG_ON(!irqs_disabled());
138 phys_reset(virt_to_phys(mcpm_entry_point)); 231 setup_mm_for_reboot();
139 BUG(); 232 platform_ops->suspend(expected_residency);
233 phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
234 phys_reset(virt_to_phys(mcpm_entry_point));
235 BUG();
236 }
237
238 /* Some platforms might have to enable special resume modes, etc. */
239 if (platform_ops->cpu_suspend_prepare) {
240 unsigned int mpidr = read_cpuid_mpidr();
241 unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
242 unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
243 arch_spin_lock(&mcpm_lock);
244 platform_ops->cpu_suspend_prepare(cpu, cluster);
245 arch_spin_unlock(&mcpm_lock);
246 }
247 mcpm_cpu_power_down();
140} 248}
141 249
142int mcpm_cpu_powered_up(void) 250int mcpm_cpu_powered_up(void)
143{ 251{
252 unsigned int mpidr, cpu, cluster;
253 bool cpu_was_down, first_man;
254 unsigned long flags;
255
144 if (!platform_ops) 256 if (!platform_ops)
145 return -EUNATCH; 257 return -EUNATCH;
146 if (platform_ops->powered_up) 258
259 /* backward compatibility callback */
260 if (platform_ops->powered_up) {
147 platform_ops->powered_up(); 261 platform_ops->powered_up();
262 return 0;
263 }
264
265 mpidr = read_cpuid_mpidr();
266 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
267 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
268 local_irq_save(flags);
269 arch_spin_lock(&mcpm_lock);
270
271 cpu_was_down = !mcpm_cpu_use_count[cluster][cpu];
272 first_man = mcpm_cluster_unused(cluster);
273
274 if (first_man && platform_ops->cluster_is_up)
275 platform_ops->cluster_is_up(cluster);
276 if (cpu_was_down)
277 mcpm_cpu_use_count[cluster][cpu] = 1;
278 if (platform_ops->cpu_is_up)
279 platform_ops->cpu_is_up(cpu, cluster);
280
281 arch_spin_unlock(&mcpm_lock);
282 local_irq_restore(flags);
283
148 return 0; 284 return 0;
149} 285}
150 286
@@ -334,8 +470,10 @@ int __init mcpm_sync_init(
334 } 470 }
335 mpidr = read_cpuid_mpidr(); 471 mpidr = read_cpuid_mpidr();
336 this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); 472 this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
337 for_each_online_cpu(i) 473 for_each_online_cpu(i) {
474 mcpm_cpu_use_count[this_cluster][i] = 1;
338 mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP; 475 mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP;
476 }
339 mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP; 477 mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP;
340 sync_cache_w(&mcpm_sync); 478 sync_cache_w(&mcpm_sync);
341 479
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
index 3446f6a1d9fa..50b378f59e08 100644
--- a/arch/arm/include/asm/mcpm.h
+++ b/arch/arm/include/asm/mcpm.h
@@ -171,12 +171,73 @@ void mcpm_cpu_suspend(u64 expected_residency);
171int mcpm_cpu_powered_up(void); 171int mcpm_cpu_powered_up(void);
172 172
173/* 173/*
174 * Platform specific methods used in the implementation of the above API. 174 * Platform specific callbacks used in the implementation of the above API.
175 *
176 * cpu_powerup:
177 * Make given CPU runable. Called with MCPM lock held and IRQs disabled.
178 * The given cluster is assumed to be set up (cluster_powerup would have
179 * been called beforehand). Must return 0 for success or negative error code.
180 *
181 * cluster_powerup:
182 * Set up power for given cluster. Called with MCPM lock held and IRQs
183 * disabled. Called before first cpu_powerup when cluster is down. Must
184 * return 0 for success or negative error code.
185 *
186 * cpu_suspend_prepare:
187 * Special suspend configuration. Called on target CPU with MCPM lock held
188 * and IRQs disabled. This callback is optional. If provided, it is called
189 * before cpu_powerdown_prepare.
190 *
191 * cpu_powerdown_prepare:
192 * Configure given CPU for power down. Called on target CPU with MCPM lock
193 * held and IRQs disabled. Power down must be effective only at the next WFI instruction.
194 *
195 * cluster_powerdown_prepare:
196 * Configure given cluster for power down. Called on one CPU from target
197 * cluster with MCPM lock held and IRQs disabled. A cpu_powerdown_prepare
198 * for each CPU in the cluster has happened when this occurs.
199 *
200 * cpu_cache_disable:
201 * Clean and disable CPU level cache for the calling CPU. Called on with IRQs
202 * disabled only. The CPU is no longer cache coherent with the rest of the
203 * system when this returns.
204 *
205 * cluster_cache_disable:
206 * Clean and disable the cluster wide cache as well as the CPU level cache
207 * for the calling CPU. No call to cpu_cache_disable will happen for this
208 * CPU. Called with IRQs disabled and only when all the other CPUs are done
209 * with their own cpu_cache_disable. The cluster is no longer cache coherent
210 * with the rest of the system when this returns.
211 *
212 * cpu_is_up:
213 * Called on given CPU after it has been powered up or resumed. The MCPM lock
214 * is held and IRQs disabled. This callback is optional.
215 *
216 * cluster_is_up:
217 * Called by the first CPU to be powered up or resumed in given cluster.
218 * The MCPM lock is held and IRQs disabled. This callback is optional. If
219 * provided, it is called before cpu_is_up for that CPU.
220 *
221 * wait_for_powerdown:
222 * Wait until given CPU is powered down. This is called in sleeping context.
223 * Some reasonable timeout must be considered. Must return 0 for success or
224 * negative error code.
175 */ 225 */
176struct mcpm_platform_ops { 226struct mcpm_platform_ops {
227 int (*cpu_powerup)(unsigned int cpu, unsigned int cluster);
228 int (*cluster_powerup)(unsigned int cluster);
229 void (*cpu_suspend_prepare)(unsigned int cpu, unsigned int cluster);
230 void (*cpu_powerdown_prepare)(unsigned int cpu, unsigned int cluster);
231 void (*cluster_powerdown_prepare)(unsigned int cluster);
232 void (*cpu_cache_disable)(void);
233 void (*cluster_cache_disable)(void);
234 void (*cpu_is_up)(unsigned int cpu, unsigned int cluster);
235 void (*cluster_is_up)(unsigned int cluster);
236 int (*wait_for_powerdown)(unsigned int cpu, unsigned int cluster);
237
238 /* deprecated callbacks */
177 int (*power_up)(unsigned int cpu, unsigned int cluster); 239 int (*power_up)(unsigned int cpu, unsigned int cluster);
178 void (*power_down)(void); 240 void (*power_down)(void);
179 int (*wait_for_powerdown)(unsigned int cpu, unsigned int cluster);
180 void (*suspend)(u64); 241 void (*suspend)(u64);
181 void (*powered_up)(void); 242 void (*powered_up)(void);
182}; 243};
diff --git a/arch/arm/mach-alpine/Kconfig b/arch/arm/mach-alpine/Kconfig
new file mode 100644
index 000000000000..2c44b930505a
--- /dev/null
+++ b/arch/arm/mach-alpine/Kconfig
@@ -0,0 +1,12 @@
1config ARCH_ALPINE
2 bool "Annapurna Labs Alpine platform" if ARCH_MULTI_V7
3 select ARM_AMBA
4 select ARM_GIC
5 select GENERIC_IRQ_CHIP
6 select HAVE_ARM_ARCH_TIMER
7 select HAVE_SMP
8 select MFD_SYSCON
9 select PCI
10 select PCI_HOST_GENERIC
11 help
12 This enables support for the Annapurna Labs Alpine V1 boards.
diff --git a/arch/arm/mach-alpine/Makefile b/arch/arm/mach-alpine/Makefile
new file mode 100644
index 000000000000..b6674890be71
--- /dev/null
+++ b/arch/arm/mach-alpine/Makefile
@@ -0,0 +1,2 @@
1obj-y += alpine_machine.o
2obj-$(CONFIG_SMP) += platsmp.o alpine_cpu_pm.o
diff --git a/arch/arm/mach-alpine/alpine_cpu_pm.c b/arch/arm/mach-alpine/alpine_cpu_pm.c
new file mode 100644
index 000000000000..121c77c4b53c
--- /dev/null
+++ b/arch/arm/mach-alpine/alpine_cpu_pm.c
@@ -0,0 +1,70 @@
1/*
2 * Low-level power-management support for Alpine platform.
3 *
4 * Copyright (C) 2015 Annapurna Labs Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/io.h>
18#include <linux/of.h>
19#include <linux/of_address.h>
20#include <linux/regmap.h>
21#include <linux/mfd/syscon.h>
22
23#include "alpine_cpu_pm.h"
24#include "alpine_cpu_resume.h"
25
26/* NB registers */
27#define AL_SYSFAB_POWER_CONTROL(cpu) (0x2000 + (cpu)*0x100 + 0x20)
28
29static struct regmap *al_sysfabric;
30static struct al_cpu_resume_regs __iomem *al_cpu_resume_regs;
31static int wakeup_supported;
32
33int alpine_cpu_wakeup(unsigned int phys_cpu, uint32_t phys_resume_addr)
34{
35 if (!wakeup_supported)
36 return -ENOSYS;
37
38 /*
39 * Set CPU resume address -
40 * secure firmware running on boot will jump to this address
41 * after setting proper CPU mode, and initialiing e.g. secure
42 * regs (the same mode all CPUs are booted to - usually HYP)
43 */
44 writel(phys_resume_addr,
45 &al_cpu_resume_regs->per_cpu[phys_cpu].resume_addr);
46
47 /* Power-up the CPU */
48 regmap_write(al_sysfabric, AL_SYSFAB_POWER_CONTROL(phys_cpu), 0);
49
50 return 0;
51}
52
53void __init alpine_cpu_pm_init(void)
54{
55 struct device_node *np;
56 uint32_t watermark;
57
58 al_sysfabric = syscon_regmap_lookup_by_compatible("al,alpine-sysfabric-service");
59
60 np = of_find_compatible_node(NULL, NULL, "al,alpine-cpu-resume");
61 al_cpu_resume_regs = of_iomap(np, 0);
62
63 wakeup_supported = !IS_ERR(al_sysfabric) && al_cpu_resume_regs;
64
65 if (wakeup_supported) {
66 watermark = readl(&al_cpu_resume_regs->watermark);
67 wakeup_supported = (watermark & AL_CPU_RESUME_MAGIC_NUM_MASK)
68 == AL_CPU_RESUME_MAGIC_NUM;
69 }
70}
diff --git a/arch/arm/mach-alpine/alpine_cpu_pm.h b/arch/arm/mach-alpine/alpine_cpu_pm.h
new file mode 100644
index 000000000000..5179e697c492
--- /dev/null
+++ b/arch/arm/mach-alpine/alpine_cpu_pm.h
@@ -0,0 +1,26 @@
1/*
2 * Low-level power-management support for Alpine platform.
3 *
4 * Copyright (C) 2015 Annapurna Labs Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#ifndef __ALPINE_CPU_PM_H__
18#define __ALPINE_CPU_PM_H__
19
20/* Alpine CPU Power Management Services Initialization */
21void alpine_cpu_pm_init(void);
22
23/* Wake-up a CPU */
24int alpine_cpu_wakeup(unsigned int phys_cpu, uint32_t phys_resume_addr);
25
26#endif /* __ALPINE_CPU_PM_H__ */
diff --git a/arch/arm/mach-alpine/alpine_cpu_resume.h b/arch/arm/mach-alpine/alpine_cpu_resume.h
new file mode 100644
index 000000000000..c80150c0d2d8
--- /dev/null
+++ b/arch/arm/mach-alpine/alpine_cpu_resume.h
@@ -0,0 +1,38 @@
1/*
2 * Annapurna labs cpu-resume register structure.
3 *
4 * Copyright (C) 2015 Annapurna Labs Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#ifndef ALPINE_CPU_RESUME_H_
18#define ALPINE_CPU_RESUME_H_
19
20/* Per-cpu regs */
21struct al_cpu_resume_regs_per_cpu {
22 uint32_t flags;
23 uint32_t resume_addr;
24};
25
26/* general regs */
27struct al_cpu_resume_regs {
28 /* Watermark for validating the CPU resume struct */
29 uint32_t watermark;
30 uint32_t flags;
31 struct al_cpu_resume_regs_per_cpu per_cpu[];
32};
33
34/* The expected magic number for validating the resume addresses */
35#define AL_CPU_RESUME_MAGIC_NUM 0xf0e1d200
36#define AL_CPU_RESUME_MAGIC_NUM_MASK 0xffffff00
37
38#endif /* ALPINE_CPU_RESUME_H_ */
diff --git a/arch/arm/mach-alpine/alpine_machine.c b/arch/arm/mach-alpine/alpine_machine.c
new file mode 100644
index 000000000000..b8e2145e962b
--- /dev/null
+++ b/arch/arm/mach-alpine/alpine_machine.c
@@ -0,0 +1,28 @@
1/*
2 * Machine declaration for Alpine platforms.
3 *
4 * Copyright (C) 2015 Annapurna Labs Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/of_platform.h>
18
19#include <asm/mach/arch.h>
20
21static const char * const al_match[] __initconst = {
22 "al,alpine",
23 NULL,
24};
25
26DT_MACHINE_START(AL_DT, "Annapurna Labs Alpine")
27 .dt_compat = al_match,
28MACHINE_END
diff --git a/arch/arm/mach-alpine/platsmp.c b/arch/arm/mach-alpine/platsmp.c
new file mode 100644
index 000000000000..f78429f48bd6
--- /dev/null
+++ b/arch/arm/mach-alpine/platsmp.c
@@ -0,0 +1,49 @@
1/*
2 * SMP operations for Alpine platform.
3 *
4 * Copyright (C) 2015 Annapurna Labs Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/init.h>
18#include <linux/errno.h>
19#include <linux/io.h>
20#include <linux/of.h>
21
22#include <asm/smp_plat.h>
23
24#include "alpine_cpu_pm.h"
25
26static int alpine_boot_secondary(unsigned int cpu, struct task_struct *idle)
27{
28 phys_addr_t addr;
29
30 addr = virt_to_phys(secondary_startup);
31
32 if (addr > (phys_addr_t)(uint32_t)(-1)) {
33 pr_err("FAIL: resume address over 32bit (%pa)", &addr);
34 return -EINVAL;
35 }
36
37 return alpine_cpu_wakeup(cpu_logical_map(cpu), (uint32_t)addr);
38}
39
40static void __init alpine_smp_prepare_cpus(unsigned int max_cpus)
41{
42 alpine_cpu_pm_init();
43}
44
45static struct smp_operations alpine_smp_ops __initdata = {
46 .smp_prepare_cpus = alpine_smp_prepare_cpus,
47 .smp_boot_secondary = alpine_boot_secondary,
48};
49CPU_METHOD_OF_DECLARE(alpine_smp, "al,alpine-smp", &alpine_smp_ops);
diff --git a/arch/arm/mach-bcm/bcm_cygnus.c b/arch/arm/mach-bcm/bcm_cygnus.c
index 30dc58be51b8..7ae894c7849b 100644
--- a/arch/arm/mach-bcm/bcm_cygnus.c
+++ b/arch/arm/mach-bcm/bcm_cygnus.c
@@ -13,7 +13,7 @@
13 13
14#include <asm/mach/arch.h> 14#include <asm/mach/arch.h>
15 15
16static const char const *bcm_cygnus_dt_compat[] = { 16static const char * const bcm_cygnus_dt_compat[] __initconst = {
17 "brcm,cygnus", 17 "brcm,cygnus",
18 NULL, 18 NULL,
19}; 19};
diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h
index f70eca7ee705..acd5b560b728 100644
--- a/arch/arm/mach-exynos/common.h
+++ b/arch/arm/mach-exynos/common.h
@@ -126,6 +126,12 @@ enum {
126 126
127void exynos_firmware_init(void); 127void exynos_firmware_init(void);
128 128
129/* CPU BOOT mode flag for Exynos3250 SoC bootloader */
130#define C2_STATE (1 << 3)
131
132void exynos_set_boot_flag(unsigned int cpu, unsigned int mode);
133void exynos_clear_boot_flag(unsigned int cpu, unsigned int mode);
134
129extern u32 exynos_get_eint_wake_mask(void); 135extern u32 exynos_get_eint_wake_mask(void);
130 136
131#ifdef CONFIG_PM_SLEEP 137#ifdef CONFIG_PM_SLEEP
diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c
index 8576a9f734bd..bcde0dd668df 100644
--- a/arch/arm/mach-exynos/exynos.c
+++ b/arch/arm/mach-exynos/exynos.c
@@ -214,6 +214,7 @@ static void __init exynos_dt_machine_init(void)
214 of_machine_is_compatible("samsung,exynos4212") || 214 of_machine_is_compatible("samsung,exynos4212") ||
215 (of_machine_is_compatible("samsung,exynos4412") && 215 (of_machine_is_compatible("samsung,exynos4412") &&
216 of_machine_is_compatible("samsung,trats2")) || 216 of_machine_is_compatible("samsung,trats2")) ||
217 of_machine_is_compatible("samsung,exynos3250") ||
217 of_machine_is_compatible("samsung,exynos5250")) 218 of_machine_is_compatible("samsung,exynos5250"))
218 platform_device_register(&exynos_cpuidle); 219 platform_device_register(&exynos_cpuidle);
219 220
diff --git a/arch/arm/mach-exynos/firmware.c b/arch/arm/mach-exynos/firmware.c
index 4791a3cc00f9..1bd35763f12e 100644
--- a/arch/arm/mach-exynos/firmware.c
+++ b/arch/arm/mach-exynos/firmware.c
@@ -48,7 +48,13 @@ static int exynos_do_idle(unsigned long mode)
48 __raw_writel(virt_to_phys(exynos_cpu_resume_ns), 48 __raw_writel(virt_to_phys(exynos_cpu_resume_ns),
49 sysram_ns_base_addr + 0x24); 49 sysram_ns_base_addr + 0x24);
50 __raw_writel(EXYNOS_AFTR_MAGIC, sysram_ns_base_addr + 0x20); 50 __raw_writel(EXYNOS_AFTR_MAGIC, sysram_ns_base_addr + 0x20);
51 exynos_smc(SMC_CMD_CPU0AFTR, 0, 0, 0); 51 if (soc_is_exynos3250()) {
52 exynos_smc(SMC_CMD_SAVE, OP_TYPE_CORE,
53 SMC_POWERSTATE_IDLE, 0);
54 exynos_smc(SMC_CMD_SHUTDOWN, OP_TYPE_CLUSTER,
55 SMC_POWERSTATE_IDLE, 0);
56 } else
57 exynos_smc(SMC_CMD_CPU0AFTR, 0, 0, 0);
52 break; 58 break;
53 case FW_DO_IDLE_SLEEP: 59 case FW_DO_IDLE_SLEEP:
54 exynos_smc(SMC_CMD_SLEEP, 0, 0, 0); 60 exynos_smc(SMC_CMD_SLEEP, 0, 0, 0);
@@ -206,3 +212,28 @@ void __init exynos_firmware_init(void)
206 outer_cache.configure = exynos_l2_configure; 212 outer_cache.configure = exynos_l2_configure;
207 } 213 }
208} 214}
215
216#define REG_CPU_STATE_ADDR (sysram_ns_base_addr + 0x28)
217#define BOOT_MODE_MASK 0x1f
218
219void exynos_set_boot_flag(unsigned int cpu, unsigned int mode)
220{
221 unsigned int tmp;
222
223 tmp = __raw_readl(REG_CPU_STATE_ADDR + cpu * 4);
224
225 if (mode & BOOT_MODE_MASK)
226 tmp &= ~BOOT_MODE_MASK;
227
228 tmp |= mode;
229 __raw_writel(tmp, REG_CPU_STATE_ADDR + cpu * 4);
230}
231
232void exynos_clear_boot_flag(unsigned int cpu, unsigned int mode)
233{
234 unsigned int tmp;
235
236 tmp = __raw_readl(REG_CPU_STATE_ADDR + cpu * 4);
237 tmp &= ~mode;
238 __raw_writel(tmp, REG_CPU_STATE_ADDR + cpu * 4);
239}
diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c
index b0d3c2e876fb..9bdf54795f05 100644
--- a/arch/arm/mach-exynos/mcpm-exynos.c
+++ b/arch/arm/mach-exynos/mcpm-exynos.c
@@ -61,25 +61,7 @@ static void __iomem *ns_sram_base_addr;
61 : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \ 61 : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
62 "r9", "r10", "lr", "memory") 62 "r9", "r10", "lr", "memory")
63 63
64/* 64static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster)
65 * We can't use regular spinlocks. In the switcher case, it is possible
66 * for an outbound CPU to call power_down() after its inbound counterpart
67 * is already live using the same logical CPU number which trips lockdep
68 * debugging.
69 */
70static arch_spinlock_t exynos_mcpm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
71static int
72cpu_use_count[EXYNOS5420_CPUS_PER_CLUSTER][EXYNOS5420_NR_CLUSTERS];
73
74#define exynos_cluster_usecnt(cluster) \
75 (cpu_use_count[0][cluster] + \
76 cpu_use_count[1][cluster] + \
77 cpu_use_count[2][cluster] + \
78 cpu_use_count[3][cluster])
79
80#define exynos_cluster_unused(cluster) !exynos_cluster_usecnt(cluster)
81
82static int exynos_power_up(unsigned int cpu, unsigned int cluster)
83{ 65{
84 unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER); 66 unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
85 67
@@ -88,127 +70,65 @@ static int exynos_power_up(unsigned int cpu, unsigned int cluster)
88 cluster >= EXYNOS5420_NR_CLUSTERS) 70 cluster >= EXYNOS5420_NR_CLUSTERS)
89 return -EINVAL; 71 return -EINVAL;
90 72
91 /* 73 exynos_cpu_power_up(cpunr);
92 * Since this is called with IRQs enabled, and no arch_spin_lock_irq 74 return 0;
93 * variant exists, we need to disable IRQs manually here. 75}
94 */
95 local_irq_disable();
96 arch_spin_lock(&exynos_mcpm_lock);
97
98 cpu_use_count[cpu][cluster]++;
99 if (cpu_use_count[cpu][cluster] == 1) {
100 bool was_cluster_down =
101 (exynos_cluster_usecnt(cluster) == 1);
102
103 /*
104 * Turn on the cluster (L2/COMMON) and then power on the
105 * cores.
106 */
107 if (was_cluster_down)
108 exynos_cluster_power_up(cluster);
109
110 exynos_cpu_power_up(cpunr);
111 } else if (cpu_use_count[cpu][cluster] != 2) {
112 /*
113 * The only possible values are:
114 * 0 = CPU down
115 * 1 = CPU (still) up
116 * 2 = CPU requested to be up before it had a chance
117 * to actually make itself down.
118 * Any other value is a bug.
119 */
120 BUG();
121 }
122 76
123 arch_spin_unlock(&exynos_mcpm_lock); 77static int exynos_cluster_powerup(unsigned int cluster)
124 local_irq_enable(); 78{
79 pr_debug("%s: cluster %u\n", __func__, cluster);
80 if (cluster >= EXYNOS5420_NR_CLUSTERS)
81 return -EINVAL;
125 82
83 exynos_cluster_power_up(cluster);
126 return 0; 84 return 0;
127} 85}
128 86
129/* 87static void exynos_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster)
130 * NOTE: This function requires the stack data to be visible through power down
131 * and can only be executed on processors like A15 and A7 that hit the cache
132 * with the C bit clear in the SCTLR register.
133 */
134static void exynos_power_down(void)
135{ 88{
136 unsigned int mpidr, cpu, cluster; 89 unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
137 bool last_man = false, skip_wfi = false;
138 unsigned int cpunr;
139
140 mpidr = read_cpuid_mpidr();
141 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
142 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
143 cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
144 90
145 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); 91 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
146 BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER || 92 BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
147 cluster >= EXYNOS5420_NR_CLUSTERS); 93 cluster >= EXYNOS5420_NR_CLUSTERS);
94 exynos_cpu_power_down(cpunr);
95}
148 96
149 __mcpm_cpu_going_down(cpu, cluster); 97static void exynos_cluster_powerdown_prepare(unsigned int cluster)
150 98{
151 arch_spin_lock(&exynos_mcpm_lock); 99 pr_debug("%s: cluster %u\n", __func__, cluster);
152 BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP); 100 BUG_ON(cluster >= EXYNOS5420_NR_CLUSTERS);
153 cpu_use_count[cpu][cluster]--; 101 exynos_cluster_power_down(cluster);
154 if (cpu_use_count[cpu][cluster] == 0) { 102}
155 exynos_cpu_power_down(cpunr);
156
157 if (exynos_cluster_unused(cluster)) {
158 exynos_cluster_power_down(cluster);
159 last_man = true;
160 }
161 } else if (cpu_use_count[cpu][cluster] == 1) {
162 /*
163 * A power_up request went ahead of us.
164 * Even if we do not want to shut this CPU down,
165 * the caller expects a certain state as if the WFI
166 * was aborted. So let's continue with cache cleaning.
167 */
168 skip_wfi = true;
169 } else {
170 BUG();
171 }
172
173 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
174 arch_spin_unlock(&exynos_mcpm_lock);
175
176 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
177 /*
178 * On the Cortex-A15 we need to disable
179 * L2 prefetching before flushing the cache.
180 */
181 asm volatile(
182 "mcr p15, 1, %0, c15, c0, 3\n\t"
183 "isb\n\t"
184 "dsb"
185 : : "r" (0x400));
186 }
187 103
188 /* Flush all cache levels for this cluster. */ 104static void exynos_cpu_cache_disable(void)
189 exynos_v7_exit_coherency_flush(all); 105{
106 /* Disable and flush the local CPU cache. */
107 exynos_v7_exit_coherency_flush(louis);
108}
190 109
110static void exynos_cluster_cache_disable(void)
111{
112 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
191 /* 113 /*
192 * Disable cluster-level coherency by masking 114 * On the Cortex-A15 we need to disable
193 * incoming snoops and DVM messages: 115 * L2 prefetching before flushing the cache.
194 */ 116 */
195 cci_disable_port_by_cpu(mpidr); 117 asm volatile(
196 118 "mcr p15, 1, %0, c15, c0, 3\n\t"
197 __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); 119 "isb\n\t"
198 } else { 120 "dsb"
199 arch_spin_unlock(&exynos_mcpm_lock); 121 : : "r" (0x400));
200
201 /* Disable and flush the local CPU cache. */
202 exynos_v7_exit_coherency_flush(louis);
203 } 122 }
204 123
205 __mcpm_cpu_down(cpu, cluster); 124 /* Flush all cache levels for this cluster. */
206 125 exynos_v7_exit_coherency_flush(all);
207 /* Now we are prepared for power-down, do it: */
208 if (!skip_wfi)
209 wfi();
210 126
211 /* Not dead at this point? Let our caller cope. */ 127 /*
128 * Disable cluster-level coherency by masking
129 * incoming snoops and DVM messages:
130 */
131 cci_disable_port_by_cpu(read_cpuid_mpidr());
212} 132}
213 133
214static int exynos_wait_for_powerdown(unsigned int cpu, unsigned int cluster) 134static int exynos_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
@@ -222,10 +142,8 @@ static int exynos_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
222 142
223 /* Wait for the core state to be OFF */ 143 /* Wait for the core state to be OFF */
224 while (tries--) { 144 while (tries--) {
225 if (ACCESS_ONCE(cpu_use_count[cpu][cluster]) == 0) { 145 if ((exynos_cpu_power_state(cpunr) == 0))
226 if ((exynos_cpu_power_state(cpunr) == 0)) 146 return 0; /* success: the CPU is halted */
227 return 0; /* success: the CPU is halted */
228 }
229 147
230 /* Otherwise, wait and retry: */ 148 /* Otherwise, wait and retry: */
231 msleep(1); 149 msleep(1);
@@ -234,63 +152,23 @@ static int exynos_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
234 return -ETIMEDOUT; /* timeout */ 152 return -ETIMEDOUT; /* timeout */
235} 153}
236 154
237static void exynos_powered_up(void) 155static void exynos_cpu_is_up(unsigned int cpu, unsigned int cluster)
238{
239 unsigned int mpidr, cpu, cluster;
240
241 mpidr = read_cpuid_mpidr();
242 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
243 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
244
245 arch_spin_lock(&exynos_mcpm_lock);
246 if (cpu_use_count[cpu][cluster] == 0)
247 cpu_use_count[cpu][cluster] = 1;
248 arch_spin_unlock(&exynos_mcpm_lock);
249}
250
251static void exynos_suspend(u64 residency)
252{ 156{
253 unsigned int mpidr, cpunr; 157 /* especially when resuming: make sure power control is set */
254 158 exynos_cpu_powerup(cpu, cluster);
255 exynos_power_down();
256
257 /*
258 * Execution reaches here only if cpu did not power down.
259 * Hence roll back the changes done in exynos_power_down function.
260 *
261 * CAUTION: "This function requires the stack data to be visible through
262 * power down and can only be executed on processors like A15 and A7
263 * that hit the cache with the C bit clear in the SCTLR register."
264 */
265 mpidr = read_cpuid_mpidr();
266 cpunr = exynos_pmu_cpunr(mpidr);
267
268 exynos_cpu_power_up(cpunr);
269} 159}
270 160
271static const struct mcpm_platform_ops exynos_power_ops = { 161static const struct mcpm_platform_ops exynos_power_ops = {
272 .power_up = exynos_power_up, 162 .cpu_powerup = exynos_cpu_powerup,
273 .power_down = exynos_power_down, 163 .cluster_powerup = exynos_cluster_powerup,
164 .cpu_powerdown_prepare = exynos_cpu_powerdown_prepare,
165 .cluster_powerdown_prepare = exynos_cluster_powerdown_prepare,
166 .cpu_cache_disable = exynos_cpu_cache_disable,
167 .cluster_cache_disable = exynos_cluster_cache_disable,
274 .wait_for_powerdown = exynos_wait_for_powerdown, 168 .wait_for_powerdown = exynos_wait_for_powerdown,
275 .suspend = exynos_suspend, 169 .cpu_is_up = exynos_cpu_is_up,
276 .powered_up = exynos_powered_up,
277}; 170};
278 171
279static void __init exynos_mcpm_usage_count_init(void)
280{
281 unsigned int mpidr, cpu, cluster;
282
283 mpidr = read_cpuid_mpidr();
284 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
285 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
286
287 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
288 BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
289 cluster >= EXYNOS5420_NR_CLUSTERS);
290
291 cpu_use_count[cpu][cluster] = 1;
292}
293
294/* 172/*
295 * Enable cluster-level coherency, in preparation for turning on the MMU. 173 * Enable cluster-level coherency, in preparation for turning on the MMU.
296 */ 174 */
@@ -302,19 +180,6 @@ static void __naked exynos_pm_power_up_setup(unsigned int affinity_level)
302 "b cci_enable_port_for_self"); 180 "b cci_enable_port_for_self");
303} 181}
304 182
305static void __init exynos_cache_off(void)
306{
307 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
308 /* disable L2 prefetching on the Cortex-A15 */
309 asm volatile(
310 "mcr p15, 1, %0, c15, c0, 3\n\t"
311 "isb\n\t"
312 "dsb"
313 : : "r" (0x400));
314 }
315 exynos_v7_exit_coherency_flush(all);
316}
317
318static const struct of_device_id exynos_dt_mcpm_match[] = { 183static const struct of_device_id exynos_dt_mcpm_match[] = {
319 { .compatible = "samsung,exynos5420" }, 184 { .compatible = "samsung,exynos5420" },
320 { .compatible = "samsung,exynos5800" }, 185 { .compatible = "samsung,exynos5800" },
@@ -370,13 +235,11 @@ static int __init exynos_mcpm_init(void)
370 */ 235 */
371 pmu_raw_writel(EXYNOS5420_SWRESET_KFC_SEL, S5P_PMU_SPARE3); 236 pmu_raw_writel(EXYNOS5420_SWRESET_KFC_SEL, S5P_PMU_SPARE3);
372 237
373 exynos_mcpm_usage_count_init();
374
375 ret = mcpm_platform_register(&exynos_power_ops); 238 ret = mcpm_platform_register(&exynos_power_ops);
376 if (!ret) 239 if (!ret)
377 ret = mcpm_sync_init(exynos_pm_power_up_setup); 240 ret = mcpm_sync_init(exynos_pm_power_up_setup);
378 if (!ret) 241 if (!ret)
379 ret = mcpm_loopback(exynos_cache_off); /* turn on the CCI */ 242 ret = mcpm_loopback(exynos_cluster_cache_disable); /* turn on the CCI */
380 if (ret) { 243 if (ret) {
381 iounmap(ns_sram_base_addr); 244 iounmap(ns_sram_base_addr);
382 return ret; 245 return ret;
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index d2e9f12d12f1..ebd135bb0995 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -126,6 +126,8 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
126 */ 126 */
127void exynos_cpu_power_down(int cpu) 127void exynos_cpu_power_down(int cpu)
128{ 128{
129 u32 core_conf;
130
129 if (cpu == 0 && (soc_is_exynos5420() || soc_is_exynos5800())) { 131 if (cpu == 0 && (soc_is_exynos5420() || soc_is_exynos5800())) {
130 /* 132 /*
131 * Bypass power down for CPU0 during suspend. Check for 133 * Bypass power down for CPU0 during suspend. Check for
@@ -137,7 +139,10 @@ void exynos_cpu_power_down(int cpu)
137 if (!(val & S5P_CORE_LOCAL_PWR_EN)) 139 if (!(val & S5P_CORE_LOCAL_PWR_EN))
138 return; 140 return;
139 } 141 }
140 pmu_raw_writel(0, EXYNOS_ARM_CORE_CONFIGURATION(cpu)); 142
143 core_conf = pmu_raw_readl(EXYNOS_ARM_CORE_CONFIGURATION(cpu));
144 core_conf &= ~S5P_CORE_LOCAL_PWR_EN;
145 pmu_raw_writel(core_conf, EXYNOS_ARM_CORE_CONFIGURATION(cpu));
141} 146}
142 147
143/** 148/**
@@ -148,7 +153,12 @@ void exynos_cpu_power_down(int cpu)
148 */ 153 */
149void exynos_cpu_power_up(int cpu) 154void exynos_cpu_power_up(int cpu)
150{ 155{
151 pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN, 156 u32 core_conf = S5P_CORE_LOCAL_PWR_EN;
157
158 if (soc_is_exynos3250())
159 core_conf |= S5P_CORE_AUTOWAKEUP_EN;
160
161 pmu_raw_writel(core_conf,
152 EXYNOS_ARM_CORE_CONFIGURATION(cpu)); 162 EXYNOS_ARM_CORE_CONFIGURATION(cpu));
153} 163}
154 164
@@ -226,6 +236,10 @@ static void exynos_core_restart(u32 core_id)
226 if (!of_machine_is_compatible("samsung,exynos3250")) 236 if (!of_machine_is_compatible("samsung,exynos3250"))
227 return; 237 return;
228 238
239 while (!pmu_raw_readl(S5P_PMU_SPARE2))
240 udelay(10);
241 udelay(10);
242
229 val = pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(core_id)); 243 val = pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(core_id));
230 val |= S5P_CORE_WAKEUP_FROM_LOCAL_CFG; 244 val |= S5P_CORE_WAKEUP_FROM_LOCAL_CFG;
231 pmu_raw_writel(val, EXYNOS_ARM_CORE_STATUS(core_id)); 245 pmu_raw_writel(val, EXYNOS_ARM_CORE_STATUS(core_id));
@@ -346,7 +360,10 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
346 360
347 call_firmware_op(cpu_boot, core_id); 361 call_firmware_op(cpu_boot, core_id);
348 362
349 arch_send_wakeup_ipi_mask(cpumask_of(cpu)); 363 if (soc_is_exynos3250())
364 dsb_sev();
365 else
366 arch_send_wakeup_ipi_mask(cpumask_of(cpu));
350 367
351 if (pen_release == -1) 368 if (pen_release == -1)
352 break; 369 break;
diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c
index 5685250693fd..cc75ab448be3 100644
--- a/arch/arm/mach-exynos/pm.c
+++ b/arch/arm/mach-exynos/pm.c
@@ -127,6 +127,8 @@ int exynos_pm_central_resume(void)
127static void exynos_set_wakeupmask(long mask) 127static void exynos_set_wakeupmask(long mask)
128{ 128{
129 pmu_raw_writel(mask, S5P_WAKEUP_MASK); 129 pmu_raw_writel(mask, S5P_WAKEUP_MASK);
130 if (soc_is_exynos3250())
131 pmu_raw_writel(0x0, S5P_WAKEUP_MASK2);
130} 132}
131 133
132static void exynos_cpu_set_boot_vector(long flags) 134static void exynos_cpu_set_boot_vector(long flags)
@@ -140,7 +142,7 @@ static int exynos_aftr_finisher(unsigned long flags)
140{ 142{
141 int ret; 143 int ret;
142 144
143 exynos_set_wakeupmask(0x0000ff3e); 145 exynos_set_wakeupmask(soc_is_exynos3250() ? 0x40003ffe : 0x0000ff3e);
144 /* Set value of power down register for aftr mode */ 146 /* Set value of power down register for aftr mode */
145 exynos_sys_powerdown_conf(SYS_AFTR); 147 exynos_sys_powerdown_conf(SYS_AFTR);
146 148
@@ -157,8 +159,13 @@ static int exynos_aftr_finisher(unsigned long flags)
157 159
158void exynos_enter_aftr(void) 160void exynos_enter_aftr(void)
159{ 161{
162 unsigned int cpuid = smp_processor_id();
163
160 cpu_pm_enter(); 164 cpu_pm_enter();
161 165
166 if (soc_is_exynos3250())
167 exynos_set_boot_flag(cpuid, C2_STATE);
168
162 exynos_pm_central_suspend(); 169 exynos_pm_central_suspend();
163 170
164 if (of_machine_is_compatible("samsung,exynos4212") || 171 if (of_machine_is_compatible("samsung,exynos4212") ||
@@ -178,6 +185,9 @@ void exynos_enter_aftr(void)
178 185
179 exynos_pm_central_resume(); 186 exynos_pm_central_resume();
180 187
188 if (soc_is_exynos3250())
189 exynos_clear_boot_flag(cpuid, C2_STATE);
190
181 cpu_pm_exit(); 191 cpu_pm_exit();
182} 192}
183 193
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index 37266a826437..cbe56b35aea0 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -37,6 +37,7 @@ struct exynos_pm_domain {
37 struct clk *oscclk; 37 struct clk *oscclk;
38 struct clk *clk[MAX_CLK_PER_DOMAIN]; 38 struct clk *clk[MAX_CLK_PER_DOMAIN];
39 struct clk *pclk[MAX_CLK_PER_DOMAIN]; 39 struct clk *pclk[MAX_CLK_PER_DOMAIN];
40 struct clk *asb_clk[MAX_CLK_PER_DOMAIN];
40}; 41};
41 42
42static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on) 43static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
@@ -45,14 +46,19 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
45 void __iomem *base; 46 void __iomem *base;
46 u32 timeout, pwr; 47 u32 timeout, pwr;
47 char *op; 48 char *op;
49 int i;
48 50
49 pd = container_of(domain, struct exynos_pm_domain, pd); 51 pd = container_of(domain, struct exynos_pm_domain, pd);
50 base = pd->base; 52 base = pd->base;
51 53
54 for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
55 if (IS_ERR(pd->asb_clk[i]))
56 break;
57 clk_prepare_enable(pd->asb_clk[i]);
58 }
59
52 /* Set oscclk before powering off a domain*/ 60 /* Set oscclk before powering off a domain*/
53 if (!power_on) { 61 if (!power_on) {
54 int i;
55
56 for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) { 62 for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
57 if (IS_ERR(pd->clk[i])) 63 if (IS_ERR(pd->clk[i]))
58 break; 64 break;
@@ -81,8 +87,6 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
81 87
82 /* Restore clocks after powering on a domain*/ 88 /* Restore clocks after powering on a domain*/
83 if (power_on) { 89 if (power_on) {
84 int i;
85
86 for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) { 90 for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
87 if (IS_ERR(pd->clk[i])) 91 if (IS_ERR(pd->clk[i]))
88 break; 92 break;
@@ -92,6 +96,12 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
92 } 96 }
93 } 97 }
94 98
99 for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
100 if (IS_ERR(pd->asb_clk[i]))
101 break;
102 clk_disable_unprepare(pd->asb_clk[i]);
103 }
104
95 return 0; 105 return 0;
96} 106}
97 107
@@ -125,12 +135,21 @@ static __init int exynos4_pm_init_power_domain(void)
125 return -ENOMEM; 135 return -ENOMEM;
126 } 136 }
127 137
128 pd->pd.name = kstrdup(np->name, GFP_KERNEL); 138 pd->pd.name = kstrdup(dev_name(dev), GFP_KERNEL);
129 pd->name = pd->pd.name; 139 pd->name = pd->pd.name;
130 pd->base = of_iomap(np, 0); 140 pd->base = of_iomap(np, 0);
131 pd->pd.power_off = exynos_pd_power_off; 141 pd->pd.power_off = exynos_pd_power_off;
132 pd->pd.power_on = exynos_pd_power_on; 142 pd->pd.power_on = exynos_pd_power_on;
133 143
144 for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
145 char clk_name[8];
146
147 snprintf(clk_name, sizeof(clk_name), "asb%d", i);
148 pd->asb_clk[i] = clk_get(dev, clk_name);
149 if (IS_ERR(pd->asb_clk[i]))
150 break;
151 }
152
134 pd->oscclk = clk_get(dev, "oscclk"); 153 pd->oscclk = clk_get(dev, "oscclk");
135 if (IS_ERR(pd->oscclk)) 154 if (IS_ERR(pd->oscclk))
136 goto no_clk; 155 goto no_clk;
diff --git a/arch/arm/mach-exynos/regs-pmu.h b/arch/arm/mach-exynos/regs-pmu.h
index eb461e1c325a..b7614333d296 100644
--- a/arch/arm/mach-exynos/regs-pmu.h
+++ b/arch/arm/mach-exynos/regs-pmu.h
@@ -43,12 +43,14 @@
43#define S5P_WAKEUP_STAT 0x0600 43#define S5P_WAKEUP_STAT 0x0600
44#define S5P_EINT_WAKEUP_MASK 0x0604 44#define S5P_EINT_WAKEUP_MASK 0x0604
45#define S5P_WAKEUP_MASK 0x0608 45#define S5P_WAKEUP_MASK 0x0608
46#define S5P_WAKEUP_MASK2 0x0614
46 47
47#define S5P_INFORM0 0x0800 48#define S5P_INFORM0 0x0800
48#define S5P_INFORM1 0x0804 49#define S5P_INFORM1 0x0804
49#define S5P_INFORM5 0x0814 50#define S5P_INFORM5 0x0814
50#define S5P_INFORM6 0x0818 51#define S5P_INFORM6 0x0818
51#define S5P_INFORM7 0x081C 52#define S5P_INFORM7 0x081C
53#define S5P_PMU_SPARE2 0x0908
52#define S5P_PMU_SPARE3 0x090C 54#define S5P_PMU_SPARE3 0x090C
53 55
54#define EXYNOS_IROM_DATA2 0x0988 56#define EXYNOS_IROM_DATA2 0x0988
@@ -182,6 +184,7 @@
182 184
183#define S5P_CORE_LOCAL_PWR_EN 0x3 185#define S5P_CORE_LOCAL_PWR_EN 0x3
184#define S5P_CORE_WAKEUP_FROM_LOCAL_CFG (0x3 << 8) 186#define S5P_CORE_WAKEUP_FROM_LOCAL_CFG (0x3 << 8)
187#define S5P_CORE_AUTOWAKEUP_EN (1 << 31)
185 188
186/* Only for EXYNOS4210 */ 189/* Only for EXYNOS4210 */
187#define S5P_CMU_CLKSTOP_LCD1_LOWPWR 0x1154 190#define S5P_CMU_CLKSTOP_LCD1_LOWPWR 0x1154
diff --git a/arch/arm/mach-exynos/smc.h b/arch/arm/mach-exynos/smc.h
index f7b82f9c1e21..c2845717bc8f 100644
--- a/arch/arm/mach-exynos/smc.h
+++ b/arch/arm/mach-exynos/smc.h
@@ -17,6 +17,8 @@
17#define SMC_CMD_SLEEP (-3) 17#define SMC_CMD_SLEEP (-3)
18#define SMC_CMD_CPU1BOOT (-4) 18#define SMC_CMD_CPU1BOOT (-4)
19#define SMC_CMD_CPU0AFTR (-5) 19#define SMC_CMD_CPU0AFTR (-5)
20#define SMC_CMD_SAVE (-6)
21#define SMC_CMD_SHUTDOWN (-7)
20/* For CP15 Access */ 22/* For CP15 Access */
21#define SMC_CMD_C15RESUME (-11) 23#define SMC_CMD_C15RESUME (-11)
22/* For L2 Cache Access */ 24/* For L2 Cache Access */
@@ -32,4 +34,11 @@ extern void exynos_smc(u32 cmd, u32 arg1, u32 arg2, u32 arg3);
32 34
33#endif /* __ASSEMBLY__ */ 35#endif /* __ASSEMBLY__ */
34 36
37/* op type for SMC_CMD_SAVE and SMC_CMD_SHUTDOWN */
38#define OP_TYPE_CORE 0x0
39#define OP_TYPE_CLUSTER 0x1
40
41/* Power State required for SMC_CMD_SAVE and SMC_CMD_SHUTDOWN */
42#define SMC_POWERSTATE_IDLE 0x1
43
35#endif 44#endif
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
index 2146d918aedd..3e6aea7f83af 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
@@ -65,8 +65,6 @@ static struct sleep_save exynos_core_save[] = {
65 65
66struct exynos_pm_data { 66struct exynos_pm_data {
67 const struct exynos_wkup_irq *wkup_irq; 67 const struct exynos_wkup_irq *wkup_irq;
68 struct sleep_save *extra_save;
69 int num_extra_save;
70 unsigned int wake_disable_mask; 68 unsigned int wake_disable_mask;
71 unsigned int *release_ret_regs; 69 unsigned int *release_ret_regs;
72 70
@@ -77,7 +75,7 @@ struct exynos_pm_data {
77 int (*cpu_suspend)(unsigned long); 75 int (*cpu_suspend)(unsigned long);
78}; 76};
79 77
80struct exynos_pm_data *pm_data; 78static const struct exynos_pm_data *pm_data;
81 79
82static int exynos5420_cpu_state; 80static int exynos5420_cpu_state;
83static unsigned int exynos_pmu_spare3; 81static unsigned int exynos_pmu_spare3;
@@ -106,7 +104,7 @@ static const struct exynos_wkup_irq exynos5250_wkup_irq[] = {
106 { /* sentinel */ }, 104 { /* sentinel */ },
107}; 105};
108 106
109unsigned int exynos_release_ret_regs[] = { 107static unsigned int exynos_release_ret_regs[] = {
110 S5P_PAD_RET_MAUDIO_OPTION, 108 S5P_PAD_RET_MAUDIO_OPTION,
111 S5P_PAD_RET_GPIO_OPTION, 109 S5P_PAD_RET_GPIO_OPTION,
112 S5P_PAD_RET_UART_OPTION, 110 S5P_PAD_RET_UART_OPTION,
@@ -117,7 +115,7 @@ unsigned int exynos_release_ret_regs[] = {
117 REG_TABLE_END, 115 REG_TABLE_END,
118}; 116};
119 117
120unsigned int exynos3250_release_ret_regs[] = { 118static unsigned int exynos3250_release_ret_regs[] = {
121 S5P_PAD_RET_MAUDIO_OPTION, 119 S5P_PAD_RET_MAUDIO_OPTION,
122 S5P_PAD_RET_GPIO_OPTION, 120 S5P_PAD_RET_GPIO_OPTION,
123 S5P_PAD_RET_UART_OPTION, 121 S5P_PAD_RET_UART_OPTION,
@@ -130,7 +128,7 @@ unsigned int exynos3250_release_ret_regs[] = {
130 REG_TABLE_END, 128 REG_TABLE_END,
131}; 129};
132 130
133unsigned int exynos5420_release_ret_regs[] = { 131static unsigned int exynos5420_release_ret_regs[] = {
134 EXYNOS_PAD_RET_DRAM_OPTION, 132 EXYNOS_PAD_RET_DRAM_OPTION,
135 EXYNOS_PAD_RET_MAUDIO_OPTION, 133 EXYNOS_PAD_RET_MAUDIO_OPTION,
136 EXYNOS_PAD_RET_JTAG_OPTION, 134 EXYNOS_PAD_RET_JTAG_OPTION,
@@ -349,10 +347,6 @@ static void exynos_pm_prepare(void)
349 347
350 s3c_pm_do_save(exynos_core_save, ARRAY_SIZE(exynos_core_save)); 348 s3c_pm_do_save(exynos_core_save, ARRAY_SIZE(exynos_core_save));
351 349
352 if (pm_data->extra_save)
353 s3c_pm_do_save(pm_data->extra_save,
354 pm_data->num_extra_save);
355
356 exynos_pm_enter_sleep_mode(); 350 exynos_pm_enter_sleep_mode();
357 351
358 /* ensure at least INFORM0 has the resume address */ 352 /* ensure at least INFORM0 has the resume address */
@@ -475,10 +469,6 @@ static void exynos_pm_resume(void)
475 /* For release retention */ 469 /* For release retention */
476 exynos_pm_release_retention(); 470 exynos_pm_release_retention();
477 471
478 if (pm_data->extra_save)
479 s3c_pm_do_restore_core(pm_data->extra_save,
480 pm_data->num_extra_save);
481
482 s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save)); 472 s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save));
483 473
484 if (cpuid == ARM_CPU_PART_CORTEX_A9) 474 if (cpuid == ARM_CPU_PART_CORTEX_A9)
@@ -685,7 +675,7 @@ static const struct exynos_pm_data exynos5250_pm_data = {
685 .cpu_suspend = exynos_cpu_suspend, 675 .cpu_suspend = exynos_cpu_suspend,
686}; 676};
687 677
688static struct exynos_pm_data exynos5420_pm_data = { 678static const struct exynos_pm_data exynos5420_pm_data = {
689 .wkup_irq = exynos5250_wkup_irq, 679 .wkup_irq = exynos5250_wkup_irq,
690 .wake_disable_mask = (0x7F << 7) | (0x1F << 1), 680 .wake_disable_mask = (0x7F << 7) | (0x1F << 1),
691 .release_ret_regs = exynos5420_release_ret_regs, 681 .release_ret_regs = exynos5420_release_ret_regs,
@@ -736,7 +726,7 @@ void __init exynos_pm_init(void)
736 if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) 726 if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL)))
737 pr_warn("Outdated DT detected, suspend/resume will NOT work\n"); 727 pr_warn("Outdated DT detected, suspend/resume will NOT work\n");
738 728
739 pm_data = (struct exynos_pm_data *) match->data; 729 pm_data = (const struct exynos_pm_data *) match->data;
740 730
741 /* All wakeup disable */ 731 /* All wakeup disable */
742 tmp = pmu_raw_readl(S5P_WAKEUP_MASK); 732 tmp = pmu_raw_readl(S5P_WAKEUP_MASK);
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 0748747b2bc6..3a3d3e9d7bfd 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -21,6 +21,7 @@ config MXC_AVIC
21 21
22config MXC_DEBUG_BOARD 22config MXC_DEBUG_BOARD
23 bool "Enable MXC debug board(for 3-stack)" 23 bool "Enable MXC debug board(for 3-stack)"
24 depends on MACH_MX27_3DS || MACH_MX31_3DS || MACH_MX35_3DS
24 help 25 help
25 The debug board is an integral part of the MXC 3-stack(PDK) 26 The debug board is an integral part of the MXC 3-stack(PDK)
26 platforms, it can be attached or removed from the peripheral 27 platforms, it can be attached or removed from the peripheral
@@ -50,6 +51,7 @@ config HAVE_IMX_ANATOP
50 51
51config HAVE_IMX_GPC 52config HAVE_IMX_GPC
52 bool 53 bool
54 select PM_GENERIC_DOMAINS if PM
53 55
54config HAVE_IMX_MMDC 56config HAVE_IMX_MMDC
55 bool 57 bool
@@ -586,6 +588,7 @@ config SOC_VF610
586 select ARM_GIC 588 select ARM_GIC
587 select PINCTRL_VF610 589 select PINCTRL_VF610
588 select PL310_ERRATA_769419 if CACHE_L2X0 590 select PL310_ERRATA_769419 if CACHE_L2X0
591 select SMP_ON_UP if SMP
589 592
590 help 593 help
591 This enables support for Freescale Vybrid VF610 processor. 594 This enables support for Freescale Vybrid VF610 processor.
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index d04a430607b8..469a150bf98f 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -119,6 +119,7 @@ static unsigned int share_count_asrc;
119static unsigned int share_count_ssi1; 119static unsigned int share_count_ssi1;
120static unsigned int share_count_ssi2; 120static unsigned int share_count_ssi2;
121static unsigned int share_count_ssi3; 121static unsigned int share_count_ssi3;
122static unsigned int share_count_mipi_core_cfg;
122 123
123static void __init imx6q_clocks_init(struct device_node *ccm_node) 124static void __init imx6q_clocks_init(struct device_node *ccm_node)
124{ 125{
@@ -246,6 +247,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
246 clk[IMX6QDL_CLK_PLL3_60M] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8); 247 clk[IMX6QDL_CLK_PLL3_60M] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8);
247 clk[IMX6QDL_CLK_TWD] = imx_clk_fixed_factor("twd", "arm", 1, 2); 248 clk[IMX6QDL_CLK_TWD] = imx_clk_fixed_factor("twd", "arm", 1, 2);
248 clk[IMX6QDL_CLK_GPT_3M] = imx_clk_fixed_factor("gpt_3m", "osc", 1, 8); 249 clk[IMX6QDL_CLK_GPT_3M] = imx_clk_fixed_factor("gpt_3m", "osc", 1, 8);
250 clk[IMX6QDL_CLK_VIDEO_27M] = imx_clk_fixed_factor("video_27m", "pll3_pfd1_540m", 1, 20);
249 if (cpu_is_imx6dl()) { 251 if (cpu_is_imx6dl()) {
250 clk[IMX6QDL_CLK_GPU2D_AXI] = imx_clk_fixed_factor("gpu2d_axi", "mmdc_ch0_axi_podf", 1, 1); 252 clk[IMX6QDL_CLK_GPU2D_AXI] = imx_clk_fixed_factor("gpu2d_axi", "mmdc_ch0_axi_podf", 1, 1);
251 clk[IMX6QDL_CLK_GPU3D_AXI] = imx_clk_fixed_factor("gpu3d_axi", "mmdc_ch0_axi_podf", 1, 1); 253 clk[IMX6QDL_CLK_GPU3D_AXI] = imx_clk_fixed_factor("gpu3d_axi", "mmdc_ch0_axi_podf", 1, 1);
@@ -400,7 +402,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
400 clk[IMX6QDL_CLK_GPU2D_CORE] = imx_clk_gate2("gpu2d_core", "gpu2d_core_podf", base + 0x6c, 24); 402 clk[IMX6QDL_CLK_GPU2D_CORE] = imx_clk_gate2("gpu2d_core", "gpu2d_core_podf", base + 0x6c, 24);
401 clk[IMX6QDL_CLK_GPU3D_CORE] = imx_clk_gate2("gpu3d_core", "gpu3d_core_podf", base + 0x6c, 26); 403 clk[IMX6QDL_CLK_GPU3D_CORE] = imx_clk_gate2("gpu3d_core", "gpu3d_core_podf", base + 0x6c, 26);
402 clk[IMX6QDL_CLK_HDMI_IAHB] = imx_clk_gate2("hdmi_iahb", "ahb", base + 0x70, 0); 404 clk[IMX6QDL_CLK_HDMI_IAHB] = imx_clk_gate2("hdmi_iahb", "ahb", base + 0x70, 0);
403 clk[IMX6QDL_CLK_HDMI_ISFR] = imx_clk_gate2("hdmi_isfr", "pll3_pfd1_540m", base + 0x70, 4); 405 clk[IMX6QDL_CLK_HDMI_ISFR] = imx_clk_gate2("hdmi_isfr", "video_27m", base + 0x70, 4);
404 clk[IMX6QDL_CLK_I2C1] = imx_clk_gate2("i2c1", "ipg_per", base + 0x70, 6); 406 clk[IMX6QDL_CLK_I2C1] = imx_clk_gate2("i2c1", "ipg_per", base + 0x70, 6);
405 clk[IMX6QDL_CLK_I2C2] = imx_clk_gate2("i2c2", "ipg_per", base + 0x70, 8); 407 clk[IMX6QDL_CLK_I2C2] = imx_clk_gate2("i2c2", "ipg_per", base + 0x70, 8);
406 clk[IMX6QDL_CLK_I2C3] = imx_clk_gate2("i2c3", "ipg_per", base + 0x70, 10); 408 clk[IMX6QDL_CLK_I2C3] = imx_clk_gate2("i2c3", "ipg_per", base + 0x70, 10);
@@ -415,7 +417,9 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
415 clk[IMX6QDL_CLK_LDB_DI0] = imx_clk_gate2("ldb_di0", "ldb_di0_podf", base + 0x74, 12); 417 clk[IMX6QDL_CLK_LDB_DI0] = imx_clk_gate2("ldb_di0", "ldb_di0_podf", base + 0x74, 12);
416 clk[IMX6QDL_CLK_LDB_DI1] = imx_clk_gate2("ldb_di1", "ldb_di1_podf", base + 0x74, 14); 418 clk[IMX6QDL_CLK_LDB_DI1] = imx_clk_gate2("ldb_di1", "ldb_di1_podf", base + 0x74, 14);
417 clk[IMX6QDL_CLK_IPU2_DI1] = imx_clk_gate2("ipu2_di1", "ipu2_di1_sel", base + 0x74, 10); 419 clk[IMX6QDL_CLK_IPU2_DI1] = imx_clk_gate2("ipu2_di1", "ipu2_di1_sel", base + 0x74, 10);
418 clk[IMX6QDL_CLK_HSI_TX] = imx_clk_gate2("hsi_tx", "hsi_tx_podf", base + 0x74, 16); 420 clk[IMX6QDL_CLK_HSI_TX] = imx_clk_gate2_shared("hsi_tx", "hsi_tx_podf", base + 0x74, 16, &share_count_mipi_core_cfg);
421 clk[IMX6QDL_CLK_MIPI_CORE_CFG] = imx_clk_gate2_shared("mipi_core_cfg", "video_27m", base + 0x74, 16, &share_count_mipi_core_cfg);
422 clk[IMX6QDL_CLK_MIPI_IPG] = imx_clk_gate2_shared("mipi_ipg", "ipg", base + 0x74, 16, &share_count_mipi_core_cfg);
419 if (cpu_is_imx6dl()) 423 if (cpu_is_imx6dl())
420 /* 424 /*
421 * The multiplexer and divider of the imx6q clock gpu2d get 425 * The multiplexer and divider of the imx6q clock gpu2d get
diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c
index 745caa18ab2c..029f59ce2712 100644
--- a/arch/arm/mach-imx/gpc.c
+++ b/arch/arm/mach-imx/gpc.c
@@ -10,15 +10,25 @@
10 * http://www.gnu.org/copyleft/gpl.html 10 * http://www.gnu.org/copyleft/gpl.html
11 */ 11 */
12 12
13#include <linux/clk.h>
14#include <linux/delay.h>
13#include <linux/io.h> 15#include <linux/io.h>
14#include <linux/irq.h> 16#include <linux/irq.h>
15#include <linux/of.h> 17#include <linux/of.h>
16#include <linux/of_address.h> 18#include <linux/of_address.h>
17#include <linux/of_irq.h> 19#include <linux/of_irq.h>
20#include <linux/platform_device.h>
21#include <linux/pm_domain.h>
22#include <linux/regulator/consumer.h>
18#include <linux/irqchip/arm-gic.h> 23#include <linux/irqchip/arm-gic.h>
19#include "common.h" 24#include "common.h"
25#include "hardware.h"
20 26
27#define GPC_CNTR 0x000
21#define GPC_IMR1 0x008 28#define GPC_IMR1 0x008
29#define GPC_PGC_GPU_PDN 0x260
30#define GPC_PGC_GPU_PUPSCR 0x264
31#define GPC_PGC_GPU_PDNSCR 0x268
22#define GPC_PGC_CPU_PDN 0x2a0 32#define GPC_PGC_CPU_PDN 0x2a0
23#define GPC_PGC_CPU_PUPSCR 0x2a4 33#define GPC_PGC_CPU_PUPSCR 0x2a4
24#define GPC_PGC_CPU_PDNSCR 0x2a8 34#define GPC_PGC_CPU_PDNSCR 0x2a8
@@ -27,6 +37,18 @@
27 37
28#define IMR_NUM 4 38#define IMR_NUM 4
29 39
40#define GPU_VPU_PUP_REQ BIT(1)
41#define GPU_VPU_PDN_REQ BIT(0)
42
43#define GPC_CLK_MAX 6
44
45struct pu_domain {
46 struct generic_pm_domain base;
47 struct regulator *reg;
48 struct clk *clk[GPC_CLK_MAX];
49 int num_clks;
50};
51
30static void __iomem *gpc_base; 52static void __iomem *gpc_base;
31static u32 gpc_wake_irqs[IMR_NUM]; 53static u32 gpc_wake_irqs[IMR_NUM];
32static u32 gpc_saved_imrs[IMR_NUM]; 54static u32 gpc_saved_imrs[IMR_NUM];
@@ -170,3 +192,194 @@ void __init imx_gpc_init(void)
170 gic_arch_extn.irq_unmask = imx_gpc_irq_unmask; 192 gic_arch_extn.irq_unmask = imx_gpc_irq_unmask;
171 gic_arch_extn.irq_set_wake = imx_gpc_irq_set_wake; 193 gic_arch_extn.irq_set_wake = imx_gpc_irq_set_wake;
172} 194}
195
196#ifdef CONFIG_PM_GENERIC_DOMAINS
197
198static void _imx6q_pm_pu_power_off(struct generic_pm_domain *genpd)
199{
200 int iso, iso2sw;
201 u32 val;
202
203 /* Read ISO and ISO2SW power down delays */
204 val = readl_relaxed(gpc_base + GPC_PGC_GPU_PDNSCR);
205 iso = val & 0x3f;
206 iso2sw = (val >> 8) & 0x3f;
207
208 /* Gate off PU domain when GPU/VPU when powered down */
209 writel_relaxed(0x1, gpc_base + GPC_PGC_GPU_PDN);
210
211 /* Request GPC to power down GPU/VPU */
212 val = readl_relaxed(gpc_base + GPC_CNTR);
213 val |= GPU_VPU_PDN_REQ;
214 writel_relaxed(val, gpc_base + GPC_CNTR);
215
216 /* Wait ISO + ISO2SW IPG clock cycles */
217 ndelay((iso + iso2sw) * 1000 / 66);
218}
219
220static int imx6q_pm_pu_power_off(struct generic_pm_domain *genpd)
221{
222 struct pu_domain *pu = container_of(genpd, struct pu_domain, base);
223
224 _imx6q_pm_pu_power_off(genpd);
225
226 if (pu->reg)
227 regulator_disable(pu->reg);
228
229 return 0;
230}
231
232static int imx6q_pm_pu_power_on(struct generic_pm_domain *genpd)
233{
234 struct pu_domain *pu = container_of(genpd, struct pu_domain, base);
235 int i, ret, sw, sw2iso;
236 u32 val;
237
238 if (pu->reg)
239 ret = regulator_enable(pu->reg);
240 if (pu->reg && ret) {
241 pr_err("%s: failed to enable regulator: %d\n", __func__, ret);
242 return ret;
243 }
244
245 /* Enable reset clocks for all devices in the PU domain */
246 for (i = 0; i < pu->num_clks; i++)
247 clk_prepare_enable(pu->clk[i]);
248
249 /* Gate off PU domain when GPU/VPU when powered down */
250 writel_relaxed(0x1, gpc_base + GPC_PGC_GPU_PDN);
251
252 /* Read ISO and ISO2SW power down delays */
253 val = readl_relaxed(gpc_base + GPC_PGC_GPU_PUPSCR);
254 sw = val & 0x3f;
255 sw2iso = (val >> 8) & 0x3f;
256
257 /* Request GPC to power up GPU/VPU */
258 val = readl_relaxed(gpc_base + GPC_CNTR);
259 val |= GPU_VPU_PUP_REQ;
260 writel_relaxed(val, gpc_base + GPC_CNTR);
261
262 /* Wait ISO + ISO2SW IPG clock cycles */
263 ndelay((sw + sw2iso) * 1000 / 66);
264
265 /* Disable reset clocks for all devices in the PU domain */
266 for (i = 0; i < pu->num_clks; i++)
267 clk_disable_unprepare(pu->clk[i]);
268
269 return 0;
270}
271
272static struct generic_pm_domain imx6q_arm_domain = {
273 .name = "ARM",
274};
275
276static struct pu_domain imx6q_pu_domain = {
277 .base = {
278 .name = "PU",
279 .power_off = imx6q_pm_pu_power_off,
280 .power_on = imx6q_pm_pu_power_on,
281 .power_off_latency_ns = 25000,
282 .power_on_latency_ns = 2000000,
283 },
284};
285
286static struct generic_pm_domain imx6sl_display_domain = {
287 .name = "DISPLAY",
288};
289
290static struct generic_pm_domain *imx_gpc_domains[] = {
291 &imx6q_arm_domain,
292 &imx6q_pu_domain.base,
293 &imx6sl_display_domain,
294};
295
296static struct genpd_onecell_data imx_gpc_onecell_data = {
297 .domains = imx_gpc_domains,
298 .num_domains = ARRAY_SIZE(imx_gpc_domains),
299};
300
301static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
302{
303 struct clk *clk;
304 bool is_off;
305 int i;
306
307 imx6q_pu_domain.reg = pu_reg;
308
309 for (i = 0; ; i++) {
310 clk = of_clk_get(dev->of_node, i);
311 if (IS_ERR(clk))
312 break;
313 if (i >= GPC_CLK_MAX) {
314 dev_err(dev, "more than %d clocks\n", GPC_CLK_MAX);
315 goto clk_err;
316 }
317 imx6q_pu_domain.clk[i] = clk;
318 }
319 imx6q_pu_domain.num_clks = i;
320
321 is_off = IS_ENABLED(CONFIG_PM);
322 if (is_off) {
323 _imx6q_pm_pu_power_off(&imx6q_pu_domain.base);
324 } else {
325 /*
326 * Enable power if compiled without CONFIG_PM in case the
327 * bootloader disabled it.
328 */
329 imx6q_pm_pu_power_on(&imx6q_pu_domain.base);
330 }
331
332 pm_genpd_init(&imx6q_pu_domain.base, NULL, is_off);
333 return of_genpd_add_provider_onecell(dev->of_node,
334 &imx_gpc_onecell_data);
335
336clk_err:
337 while (i--)
338 clk_put(imx6q_pu_domain.clk[i]);
339 return -EINVAL;
340}
341
342#else
343static inline int imx_gpc_genpd_init(struct device *dev, struct regulator *reg)
344{
345 return 0;
346}
347#endif /* CONFIG_PM_GENERIC_DOMAINS */
348
349static int imx_gpc_probe(struct platform_device *pdev)
350{
351 struct regulator *pu_reg;
352 int ret;
353
354 pu_reg = devm_regulator_get_optional(&pdev->dev, "pu");
355 if (PTR_ERR(pu_reg) == -ENODEV)
356 pu_reg = NULL;
357 if (IS_ERR(pu_reg)) {
358 ret = PTR_ERR(pu_reg);
359 dev_err(&pdev->dev, "failed to get pu regulator: %d\n", ret);
360 return ret;
361 }
362
363 return imx_gpc_genpd_init(&pdev->dev, pu_reg);
364}
365
366static const struct of_device_id imx_gpc_dt_ids[] = {
367 { .compatible = "fsl,imx6q-gpc" },
368 { .compatible = "fsl,imx6sl-gpc" },
369 { }
370};
371
372static struct platform_driver imx_gpc_driver = {
373 .driver = {
374 .name = "imx-gpc",
375 .owner = THIS_MODULE,
376 .of_match_table = imx_gpc_dt_ids,
377 },
378 .probe = imx_gpc_probe,
379};
380
381static int __init imx_pgc_init(void)
382{
383 return platform_driver_register(&imx_gpc_driver);
384}
385subsys_initcall(imx_pgc_init);
diff --git a/arch/arm/mach-meson/Kconfig b/arch/arm/mach-meson/Kconfig
index 18301dc9d2e7..0743e2059645 100644
--- a/arch/arm/mach-meson/Kconfig
+++ b/arch/arm/mach-meson/Kconfig
@@ -1,8 +1,11 @@
1menuconfig ARCH_MESON 1menuconfig ARCH_MESON
2 bool "Amlogic Meson SoCs" if ARCH_MULTI_V7 2 bool "Amlogic Meson SoCs" if ARCH_MULTI_V7
3 select ARCH_REQUIRE_GPIOLIB
3 select GENERIC_IRQ_CHIP 4 select GENERIC_IRQ_CHIP
4 select ARM_GIC 5 select ARM_GIC
5 select CACHE_L2X0 6 select CACHE_L2X0
7 select PINCTRL
8 select PINCTRL_MESON
6 9
7if ARCH_MESON 10if ARCH_MESON
8 11
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
index c1e4567a5ab3..97473168d6b6 100644
--- a/arch/arm/mach-mvebu/Kconfig
+++ b/arch/arm/mach-mvebu/Kconfig
@@ -64,6 +64,20 @@ config MACH_ARMADA_38X
64 Say 'Y' here if you want your kernel to support boards based 64 Say 'Y' here if you want your kernel to support boards based
65 on the Marvell Armada 380/385 SoC with device tree. 65 on the Marvell Armada 380/385 SoC with device tree.
66 66
67config MACH_ARMADA_39X
68 bool "Marvell Armada 39x boards" if ARCH_MULTI_V7
69 select ARM_GIC
70 select ARMADA_39X_CLK
71 select CACHE_L2X0
72 select HAVE_ARM_SCU
73 select HAVE_ARM_TWD if SMP
74 select HAVE_SMP
75 select MACH_MVEBU_V7
76 select PINCTRL_ARMADA_39X
77 help
78 Say 'Y' here if you want your kernel to support boards based
79 on the Marvell Armada 39x SoC with device tree.
80
67config MACH_ARMADA_XP 81config MACH_ARMADA_XP
68 bool "Marvell Armada XP boards" if ARCH_MULTI_V7 82 bool "Marvell Armada XP boards" if ARCH_MULTI_V7
69 select ARMADA_XP_CLK 83 select ARMADA_XP_CLK
diff --git a/arch/arm/mach-mvebu/board-v7.c b/arch/arm/mach-mvebu/board-v7.c
index 31b66f26e029..afee9083ad92 100644
--- a/arch/arm/mach-mvebu/board-v7.c
+++ b/arch/arm/mach-mvebu/board-v7.c
@@ -232,3 +232,17 @@ DT_MACHINE_START(ARMADA_38X_DT, "Marvell Armada 380/385 (Device Tree)")
232 .restart = mvebu_restart, 232 .restart = mvebu_restart,
233 .dt_compat = armada_38x_dt_compat, 233 .dt_compat = armada_38x_dt_compat,
234MACHINE_END 234MACHINE_END
235
236static const char * const armada_39x_dt_compat[] __initconst = {
237 "marvell,armada390",
238 "marvell,armada398",
239 NULL,
240};
241
242DT_MACHINE_START(ARMADA_39X_DT, "Marvell Armada 39x (Device Tree)")
243 .l2c_aux_val = 0,
244 .l2c_aux_mask = ~0,
245 .init_irq = mvebu_init_irq,
246 .restart = mvebu_restart,
247 .dt_compat = armada_39x_dt_compat,
248MACHINE_END
diff --git a/arch/arm/mach-mvebu/platsmp-a9.c b/arch/arm/mach-mvebu/platsmp-a9.c
index 2ec1a42b4321..df0a9cc5da59 100644
--- a/arch/arm/mach-mvebu/platsmp-a9.c
+++ b/arch/arm/mach-mvebu/platsmp-a9.c
@@ -110,3 +110,5 @@ CPU_METHOD_OF_DECLARE(mvebu_armada_375_smp, "marvell,armada-375-smp",
110 &mvebu_cortex_a9_smp_ops); 110 &mvebu_cortex_a9_smp_ops);
111CPU_METHOD_OF_DECLARE(mvebu_armada_380_smp, "marvell,armada-380-smp", 111CPU_METHOD_OF_DECLARE(mvebu_armada_380_smp, "marvell,armada-380-smp",
112 &armada_38x_smp_ops); 112 &armada_38x_smp_ops);
113CPU_METHOD_OF_DECLARE(mvebu_armada_390_smp, "marvell,armada-390-smp",
114 &armada_38x_smp_ops);
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index be9541e18650..166b18f515a2 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -690,6 +690,9 @@ struct dev_pm_domain omap_device_pm_domain = {
690 USE_PLATFORM_PM_SLEEP_OPS 690 USE_PLATFORM_PM_SLEEP_OPS
691 .suspend_noirq = _od_suspend_noirq, 691 .suspend_noirq = _od_suspend_noirq,
692 .resume_noirq = _od_resume_noirq, 692 .resume_noirq = _od_resume_noirq,
693 .freeze_noirq = _od_suspend_noirq,
694 .thaw_noirq = _od_resume_noirq,
695 .restore_noirq = _od_resume_noirq,
693 } 696 }
694}; 697};
695 698
diff --git a/arch/arm/mach-omap2/omap_hwmod_43xx_data.c b/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
index 8eb85925e444..e2223148ba4d 100644
--- a/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
@@ -20,6 +20,7 @@
20#include "omap_hwmod_33xx_43xx_common_data.h" 20#include "omap_hwmod_33xx_43xx_common_data.h"
21#include "prcm43xx.h" 21#include "prcm43xx.h"
22#include "omap_hwmod_common_data.h" 22#include "omap_hwmod_common_data.h"
23#include "hdq1w.h"
23 24
24 25
25/* IP blocks */ 26/* IP blocks */
@@ -516,6 +517,33 @@ static struct omap_hwmod am43xx_dss_rfbi_hwmod = {
516 .parent_hwmod = &am43xx_dss_core_hwmod, 517 .parent_hwmod = &am43xx_dss_core_hwmod,
517}; 518};
518 519
520/* HDQ1W */
521static struct omap_hwmod_class_sysconfig am43xx_hdq1w_sysc = {
522 .rev_offs = 0x0000,
523 .sysc_offs = 0x0014,
524 .syss_offs = 0x0018,
525 .sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
526 .sysc_fields = &omap_hwmod_sysc_type1,
527};
528
529static struct omap_hwmod_class am43xx_hdq1w_hwmod_class = {
530 .name = "hdq1w",
531 .sysc = &am43xx_hdq1w_sysc,
532 .reset = &omap_hdq1w_reset,
533};
534
535static struct omap_hwmod am43xx_hdq1w_hwmod = {
536 .name = "hdq1w",
537 .class = &am43xx_hdq1w_hwmod_class,
538 .clkdm_name = "l4ls_clkdm",
539 .prcm = {
540 .omap4 = {
541 .clkctrl_offs = AM43XX_CM_PER_HDQ1W_CLKCTRL_OFFSET,
542 .modulemode = MODULEMODE_SWCTRL,
543 },
544 },
545};
546
519/* Interfaces */ 547/* Interfaces */
520static struct omap_hwmod_ocp_if am43xx_l3_main__l4_hs = { 548static struct omap_hwmod_ocp_if am43xx_l3_main__l4_hs = {
521 .master = &am33xx_l3_main_hwmod, 549 .master = &am33xx_l3_main_hwmod,
@@ -790,6 +818,13 @@ static struct omap_hwmod_ocp_if am43xx_l4_ls__dss_rfbi = {
790 .user = OCP_USER_MPU | OCP_USER_SDMA, 818 .user = OCP_USER_MPU | OCP_USER_SDMA,
791}; 819};
792 820
821static struct omap_hwmod_ocp_if am43xx_l4_ls__hdq1w = {
822 .master = &am33xx_l4_ls_hwmod,
823 .slave = &am43xx_hdq1w_hwmod,
824 .clk = "l4ls_gclk",
825 .user = OCP_USER_MPU | OCP_USER_SDMA,
826};
827
793static struct omap_hwmod_ocp_if *am43xx_hwmod_ocp_ifs[] __initdata = { 828static struct omap_hwmod_ocp_if *am43xx_hwmod_ocp_ifs[] __initdata = {
794 &am33xx_l4_wkup__synctimer, 829 &am33xx_l4_wkup__synctimer,
795 &am43xx_l4_ls__timer8, 830 &am43xx_l4_ls__timer8,
@@ -889,6 +924,7 @@ static struct omap_hwmod_ocp_if *am43xx_hwmod_ocp_ifs[] __initdata = {
889 &am43xx_l4_ls__dss, 924 &am43xx_l4_ls__dss,
890 &am43xx_l4_ls__dss_dispc, 925 &am43xx_l4_ls__dss_dispc,
891 &am43xx_l4_ls__dss_rfbi, 926 &am43xx_l4_ls__dss_rfbi,
927 &am43xx_l4_ls__hdq1w,
892 NULL, 928 NULL,
893}; 929};
894 930
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index 16fe7a1b7a35..0e64c2fac0b5 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -1726,21 +1726,6 @@ static struct omap_hwmod_class dra7xx_timer_1ms_hwmod_class = {
1726 .sysc = &dra7xx_timer_1ms_sysc, 1726 .sysc = &dra7xx_timer_1ms_sysc,
1727}; 1727};
1728 1728
1729static struct omap_hwmod_class_sysconfig dra7xx_timer_secure_sysc = {
1730 .rev_offs = 0x0000,
1731 .sysc_offs = 0x0010,
1732 .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_RESET_STATUS |
1733 SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
1734 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
1735 SIDLE_SMART_WKUP),
1736 .sysc_fields = &omap_hwmod_sysc_type2,
1737};
1738
1739static struct omap_hwmod_class dra7xx_timer_secure_hwmod_class = {
1740 .name = "timer",
1741 .sysc = &dra7xx_timer_secure_sysc,
1742};
1743
1744static struct omap_hwmod_class_sysconfig dra7xx_timer_sysc = { 1729static struct omap_hwmod_class_sysconfig dra7xx_timer_sysc = {
1745 .rev_offs = 0x0000, 1730 .rev_offs = 0x0000,
1746 .sysc_offs = 0x0010, 1731 .sysc_offs = 0x0010,
@@ -1804,7 +1789,7 @@ static struct omap_hwmod dra7xx_timer3_hwmod = {
1804/* timer4 */ 1789/* timer4 */
1805static struct omap_hwmod dra7xx_timer4_hwmod = { 1790static struct omap_hwmod dra7xx_timer4_hwmod = {
1806 .name = "timer4", 1791 .name = "timer4",
1807 .class = &dra7xx_timer_secure_hwmod_class, 1792 .class = &dra7xx_timer_hwmod_class,
1808 .clkdm_name = "l4per_clkdm", 1793 .clkdm_name = "l4per_clkdm",
1809 .main_clk = "timer4_gfclk_mux", 1794 .main_clk = "timer4_gfclk_mux",
1810 .prcm = { 1795 .prcm = {
@@ -1921,6 +1906,66 @@ static struct omap_hwmod dra7xx_timer11_hwmod = {
1921 }, 1906 },
1922}; 1907};
1923 1908
1909/* timer13 */
1910static struct omap_hwmod dra7xx_timer13_hwmod = {
1911 .name = "timer13",
1912 .class = &dra7xx_timer_hwmod_class,
1913 .clkdm_name = "l4per3_clkdm",
1914 .main_clk = "timer13_gfclk_mux",
1915 .prcm = {
1916 .omap4 = {
1917 .clkctrl_offs = DRA7XX_CM_L4PER3_TIMER13_CLKCTRL_OFFSET,
1918 .context_offs = DRA7XX_RM_L4PER3_TIMER13_CONTEXT_OFFSET,
1919 .modulemode = MODULEMODE_SWCTRL,
1920 },
1921 },
1922};
1923
1924/* timer14 */
1925static struct omap_hwmod dra7xx_timer14_hwmod = {
1926 .name = "timer14",
1927 .class = &dra7xx_timer_hwmod_class,
1928 .clkdm_name = "l4per3_clkdm",
1929 .main_clk = "timer14_gfclk_mux",
1930 .prcm = {
1931 .omap4 = {
1932 .clkctrl_offs = DRA7XX_CM_L4PER3_TIMER14_CLKCTRL_OFFSET,
1933 .context_offs = DRA7XX_RM_L4PER3_TIMER14_CONTEXT_OFFSET,
1934 .modulemode = MODULEMODE_SWCTRL,
1935 },
1936 },
1937};
1938
1939/* timer15 */
1940static struct omap_hwmod dra7xx_timer15_hwmod = {
1941 .name = "timer15",
1942 .class = &dra7xx_timer_hwmod_class,
1943 .clkdm_name = "l4per3_clkdm",
1944 .main_clk = "timer15_gfclk_mux",
1945 .prcm = {
1946 .omap4 = {
1947 .clkctrl_offs = DRA7XX_CM_L4PER3_TIMER15_CLKCTRL_OFFSET,
1948 .context_offs = DRA7XX_RM_L4PER3_TIMER15_CONTEXT_OFFSET,
1949 .modulemode = MODULEMODE_SWCTRL,
1950 },
1951 },
1952};
1953
1954/* timer16 */
1955static struct omap_hwmod dra7xx_timer16_hwmod = {
1956 .name = "timer16",
1957 .class = &dra7xx_timer_hwmod_class,
1958 .clkdm_name = "l4per3_clkdm",
1959 .main_clk = "timer16_gfclk_mux",
1960 .prcm = {
1961 .omap4 = {
1962 .clkctrl_offs = DRA7XX_CM_L4PER3_TIMER16_CLKCTRL_OFFSET,
1963 .context_offs = DRA7XX_RM_L4PER3_TIMER16_CONTEXT_OFFSET,
1964 .modulemode = MODULEMODE_SWCTRL,
1965 },
1966 },
1967};
1968
1924/* 1969/*
1925 * 'uart' class 1970 * 'uart' class
1926 * 1971 *
@@ -3059,6 +3104,38 @@ static struct omap_hwmod_ocp_if dra7xx_l4_per1__timer11 = {
3059 .user = OCP_USER_MPU | OCP_USER_SDMA, 3104 .user = OCP_USER_MPU | OCP_USER_SDMA,
3060}; 3105};
3061 3106
3107/* l4_per3 -> timer13 */
3108static struct omap_hwmod_ocp_if dra7xx_l4_per3__timer13 = {
3109 .master = &dra7xx_l4_per3_hwmod,
3110 .slave = &dra7xx_timer13_hwmod,
3111 .clk = "l3_iclk_div",
3112 .user = OCP_USER_MPU | OCP_USER_SDMA,
3113};
3114
3115/* l4_per3 -> timer14 */
3116static struct omap_hwmod_ocp_if dra7xx_l4_per3__timer14 = {
3117 .master = &dra7xx_l4_per3_hwmod,
3118 .slave = &dra7xx_timer14_hwmod,
3119 .clk = "l3_iclk_div",
3120 .user = OCP_USER_MPU | OCP_USER_SDMA,
3121};
3122
3123/* l4_per3 -> timer15 */
3124static struct omap_hwmod_ocp_if dra7xx_l4_per3__timer15 = {
3125 .master = &dra7xx_l4_per3_hwmod,
3126 .slave = &dra7xx_timer15_hwmod,
3127 .clk = "l3_iclk_div",
3128 .user = OCP_USER_MPU | OCP_USER_SDMA,
3129};
3130
3131/* l4_per3 -> timer16 */
3132static struct omap_hwmod_ocp_if dra7xx_l4_per3__timer16 = {
3133 .master = &dra7xx_l4_per3_hwmod,
3134 .slave = &dra7xx_timer16_hwmod,
3135 .clk = "l3_iclk_div",
3136 .user = OCP_USER_MPU | OCP_USER_SDMA,
3137};
3138
3062/* l4_per1 -> uart1 */ 3139/* l4_per1 -> uart1 */
3063static struct omap_hwmod_ocp_if dra7xx_l4_per1__uart1 = { 3140static struct omap_hwmod_ocp_if dra7xx_l4_per1__uart1 = {
3064 .master = &dra7xx_l4_per1_hwmod, 3141 .master = &dra7xx_l4_per1_hwmod,
@@ -3295,6 +3372,10 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
3295 &dra7xx_l4_per1__timer9, 3372 &dra7xx_l4_per1__timer9,
3296 &dra7xx_l4_per1__timer10, 3373 &dra7xx_l4_per1__timer10,
3297 &dra7xx_l4_per1__timer11, 3374 &dra7xx_l4_per1__timer11,
3375 &dra7xx_l4_per3__timer13,
3376 &dra7xx_l4_per3__timer14,
3377 &dra7xx_l4_per3__timer15,
3378 &dra7xx_l4_per3__timer16,
3298 &dra7xx_l4_per1__uart1, 3379 &dra7xx_l4_per1__uart1,
3299 &dra7xx_l4_per1__uart2, 3380 &dra7xx_l4_per1__uart2,
3300 &dra7xx_l4_per1__uart3, 3381 &dra7xx_l4_per1__uart3,
diff --git a/arch/arm/mach-omap2/prcm43xx.h b/arch/arm/mach-omap2/prcm43xx.h
index ad7b3e9977f8..48df3b55057e 100644
--- a/arch/arm/mach-omap2/prcm43xx.h
+++ b/arch/arm/mach-omap2/prcm43xx.h
@@ -143,5 +143,6 @@
143#define AM43XX_CM_PER_USB_OTG_SS1_CLKCTRL_OFFSET 0x0268 143#define AM43XX_CM_PER_USB_OTG_SS1_CLKCTRL_OFFSET 0x0268
144#define AM43XX_CM_PER_USBPHYOCP2SCP1_CLKCTRL_OFFSET 0x05C0 144#define AM43XX_CM_PER_USBPHYOCP2SCP1_CLKCTRL_OFFSET 0x05C0
145#define AM43XX_CM_PER_DSS_CLKCTRL_OFFSET 0x0a20 145#define AM43XX_CM_PER_DSS_CLKCTRL_OFFSET 0x0a20
146#define AM43XX_CM_PER_HDQ1W_CLKCTRL_OFFSET 0x04a0
146 147
147#endif 148#endif
diff --git a/arch/arm/mach-rockchip/platsmp.c b/arch/arm/mach-rockchip/platsmp.c
index f26fcdca2445..5b4ca3c3c879 100644
--- a/arch/arm/mach-rockchip/platsmp.c
+++ b/arch/arm/mach-rockchip/platsmp.c
@@ -55,7 +55,7 @@ static int pmu_power_domain_is_on(int pd)
55 return !(val & BIT(pd)); 55 return !(val & BIT(pd));
56} 56}
57 57
58struct reset_control *rockchip_get_core_reset(int cpu) 58static struct reset_control *rockchip_get_core_reset(int cpu)
59{ 59{
60 struct device *dev = get_cpu_device(cpu); 60 struct device *dev = get_cpu_device(cpu);
61 struct device_node *np; 61 struct device_node *np;
@@ -201,7 +201,7 @@ static int __init rockchip_smp_prepare_sram(struct device_node *node)
201 return 0; 201 return 0;
202} 202}
203 203
204static struct regmap_config rockchip_pmu_regmap_config = { 204static const struct regmap_config rockchip_pmu_regmap_config = {
205 .reg_bits = 32, 205 .reg_bits = 32,
206 .val_bits = 32, 206 .val_bits = 32,
207 .reg_stride = 4, 207 .reg_stride = 4,
diff --git a/arch/arm/mach-rockchip/pm.c b/arch/arm/mach-rockchip/pm.c
index 50cb781aaa36..b07d88602073 100644
--- a/arch/arm/mach-rockchip/pm.c
+++ b/arch/arm/mach-rockchip/pm.c
@@ -75,9 +75,13 @@ static void rk3288_slp_mode_set(int level)
75 regmap_read(pmu_regmap, RK3288_PMU_PWRMODE_CON, 75 regmap_read(pmu_regmap, RK3288_PMU_PWRMODE_CON,
76 &rk3288_pmu_pwr_mode_con); 76 &rk3288_pmu_pwr_mode_con);
77 77
78 /* set bit 8 so that system will resume to FAST_BOOT_ADDR */ 78 /*
79 * SGRF_FAST_BOOT_EN - system to boot from FAST_BOOT_ADDR
80 * PCLK_WDT_GATE - disable WDT during suspend.
81 */
79 regmap_write(sgrf_regmap, RK3288_SGRF_SOC_CON0, 82 regmap_write(sgrf_regmap, RK3288_SGRF_SOC_CON0,
80 SGRF_FAST_BOOT_EN | SGRF_FAST_BOOT_EN_WRITE); 83 SGRF_PCLK_WDT_GATE | SGRF_FAST_BOOT_EN
84 | SGRF_PCLK_WDT_GATE_WRITE | SGRF_FAST_BOOT_EN_WRITE);
81 85
82 /* booting address of resuming system is from this register value */ 86 /* booting address of resuming system is from this register value */
83 regmap_write(sgrf_regmap, RK3288_SGRF_FAST_BOOT_ADDR, 87 regmap_write(sgrf_regmap, RK3288_SGRF_FAST_BOOT_ADDR,
@@ -122,7 +126,8 @@ static void rk3288_slp_mode_set_resume(void)
122 rk3288_pmu_pwr_mode_con); 126 rk3288_pmu_pwr_mode_con);
123 127
124 regmap_write(sgrf_regmap, RK3288_SGRF_SOC_CON0, 128 regmap_write(sgrf_regmap, RK3288_SGRF_SOC_CON0,
125 rk3288_sgrf_soc_con0 | SGRF_FAST_BOOT_EN_WRITE); 129 rk3288_sgrf_soc_con0 | SGRF_PCLK_WDT_GATE_WRITE
130 | SGRF_FAST_BOOT_EN_WRITE);
126} 131}
127 132
128static int rockchip_lpmode_enter(unsigned long arg) 133static int rockchip_lpmode_enter(unsigned long arg)
@@ -209,6 +214,9 @@ static int rk3288_suspend_init(struct device_node *np)
209 memcpy(rk3288_bootram_base, rockchip_slp_cpu_resume, 214 memcpy(rk3288_bootram_base, rockchip_slp_cpu_resume,
210 rk3288_bootram_sz); 215 rk3288_bootram_sz);
211 216
217 regmap_write(pmu_regmap, RK3288_PMU_OSC_CNT, OSC_STABL_CNT_THRESH);
218 regmap_write(pmu_regmap, RK3288_PMU_STABL_CNT, PMU_STABL_CNT_THRESH);
219
212 return 0; 220 return 0;
213} 221}
214 222
diff --git a/arch/arm/mach-rockchip/pm.h b/arch/arm/mach-rockchip/pm.h
index 7c889c04604b..03ff31d8282d 100644
--- a/arch/arm/mach-rockchip/pm.h
+++ b/arch/arm/mach-rockchip/pm.h
@@ -50,6 +50,8 @@ static inline void rockchip_suspend_init(void)
50 50
51#define RK3288_SGRF_SOC_CON0 (0x0000) 51#define RK3288_SGRF_SOC_CON0 (0x0000)
52#define RK3288_SGRF_FAST_BOOT_ADDR (0x0120) 52#define RK3288_SGRF_FAST_BOOT_ADDR (0x0120)
53#define SGRF_PCLK_WDT_GATE BIT(6)
54#define SGRF_PCLK_WDT_GATE_WRITE BIT(22)
53#define SGRF_FAST_BOOT_EN BIT(8) 55#define SGRF_FAST_BOOT_EN BIT(8)
54#define SGRF_FAST_BOOT_EN_WRITE BIT(24) 56#define SGRF_FAST_BOOT_EN_WRITE BIT(24)
55 57
@@ -63,6 +65,10 @@ static inline void rockchip_suspend_init(void)
63/* PMU_WAKEUP_CFG1 bits */ 65/* PMU_WAKEUP_CFG1 bits */
64#define PMU_ARMINT_WAKEUP_EN BIT(0) 66#define PMU_ARMINT_WAKEUP_EN BIT(0)
65 67
68/* wait 30ms for OSC stable and 30ms for pmic stable */
69#define OSC_STABL_CNT_THRESH (32 * 30)
70#define PMU_STABL_CNT_THRESH (32 * 30)
71
66enum rk3288_pwr_mode_con { 72enum rk3288_pwr_mode_con {
67 PMU_PWR_MODE_EN = 0, 73 PMU_PWR_MODE_EN = 0,
68 PMU_CLK_CORE_SRC_GATE_EN, 74 PMU_CLK_CORE_SRC_GATE_EN,
diff --git a/arch/arm/mach-s3c64xx/crag6410.h b/arch/arm/mach-s3c64xx/crag6410.h
index 7bc66682687e..dcbe17f5e5f8 100644
--- a/arch/arm/mach-s3c64xx/crag6410.h
+++ b/arch/arm/mach-s3c64xx/crag6410.h
@@ -14,6 +14,7 @@
14#include <mach/gpio-samsung.h> 14#include <mach/gpio-samsung.h>
15 15
16#define GLENFARCLAS_PMIC_IRQ_BASE IRQ_BOARD_START 16#define GLENFARCLAS_PMIC_IRQ_BASE IRQ_BOARD_START
17#define BANFF_PMIC_IRQ_BASE (IRQ_BOARD_START + 64)
17 18
18#define PCA935X_GPIO_BASE GPIO_BOARD_START 19#define PCA935X_GPIO_BASE GPIO_BOARD_START
19#define CODEC_GPIO_BASE (GPIO_BOARD_START + 8) 20#define CODEC_GPIO_BASE (GPIO_BOARD_START + 8)
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
index 10b913baab28..65c426bc45f7 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
@@ -554,6 +554,7 @@ static struct wm831x_touch_pdata touch_pdata = {
554 554
555static struct wm831x_pdata crag_pmic_pdata = { 555static struct wm831x_pdata crag_pmic_pdata = {
556 .wm831x_num = 1, 556 .wm831x_num = 1,
557 .irq_base = BANFF_PMIC_IRQ_BASE,
557 .gpio_base = BANFF_PMIC_GPIO_BASE, 558 .gpio_base = BANFF_PMIC_GPIO_BASE,
558 .soft_shutdown = true, 559 .soft_shutdown = true,
559 560
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index 2f36c85eec4b..347b6a58fc3e 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -69,10 +69,12 @@ config ARCH_R8A7779
69config ARCH_R8A7790 69config ARCH_R8A7790
70 bool "R-Car H2 (R8A77900)" 70 bool "R-Car H2 (R8A77900)"
71 select ARCH_RCAR_GEN2 71 select ARCH_RCAR_GEN2
72 select I2C
72 73
73config ARCH_R8A7791 74config ARCH_R8A7791
74 bool "R-Car M2-W (R8A77910)" 75 bool "R-Car M2-W (R8A77910)"
75 select ARCH_RCAR_GEN2 76 select ARCH_RCAR_GEN2
77 select I2C
76 78
77config ARCH_R8A7794 79config ARCH_R8A7794
78 bool "R-Car E2 (R8A77940)" 80 bool "R-Car E2 (R8A77940)"
diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile
index 03794b0de64b..3631d8b6fc5e 100644
--- a/arch/arm/mach-shmobile/Makefile
+++ b/arch/arm/mach-shmobile/Makefile
@@ -35,6 +35,8 @@ cpu-y := platsmp.o headsmp.o
35# Shared SoC family objects 35# Shared SoC family objects
36obj-$(CONFIG_ARCH_RCAR_GEN2) += setup-rcar-gen2.o platsmp-apmu.o $(cpu-y) 36obj-$(CONFIG_ARCH_RCAR_GEN2) += setup-rcar-gen2.o platsmp-apmu.o $(cpu-y)
37CFLAGS_setup-rcar-gen2.o += -march=armv7-a 37CFLAGS_setup-rcar-gen2.o += -march=armv7-a
38obj-$(CONFIG_ARCH_R8A7790) += regulator-quirk-rcar-gen2.o
39obj-$(CONFIG_ARCH_R8A7791) += regulator-quirk-rcar-gen2.o
38 40
39# SMP objects 41# SMP objects
40smp-y := $(cpu-y) 42smp-y := $(cpu-y)
diff --git a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
new file mode 100644
index 000000000000..384e6e934b87
--- /dev/null
+++ b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
@@ -0,0 +1,147 @@
1/*
2 * R-Car Generation 2 da9063/da9210 regulator quirk
3 *
4 * The r8a7790/lager and r8a7791/koelsch development boards have da9063 and
5 * da9210 regulators. Both regulators have their interrupt request lines tied
6 * to the same interrupt pin (IRQ2) on the SoC.
7 *
8 * After cold boot or da9063-induced restart, both the da9063 and da9210 seem
9 * to assert their interrupt request lines. Hence as soon as one driver
10 * requests this irq, it gets stuck in an interrupt storm, as it only manages
11 * to deassert its own interrupt request line, and the other driver hasn't
12 * installed an interrupt handler yet.
13 *
14 * To handle this, install a quirk that masks the interrupts in both the
15 * da9063 and da9210. This quirk has to run after the i2c master driver has
16 * been initialized, but before the i2c slave drivers are initialized.
17 *
18 * Copyright (C) 2015 Glider bvba
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; version 2 of the License.
23 *
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
28 */
29
30#include <linux/device.h>
31#include <linux/i2c.h>
32#include <linux/init.h>
33#include <linux/io.h>
34#include <linux/notifier.h>
35#include <linux/of.h>
36#include <linux/mfd/da9063/registers.h>
37
38
39#define IRQC_BASE 0xe61c0000
40#define IRQC_MONITOR 0x104 /* IRQn Signal Level Monitor Register */
41
42#define REGULATOR_IRQ_MASK BIT(2) /* IRQ2, active low */
43
44static void __iomem *irqc;
45
46static const u8 da9063_mask_regs[] = {
47 DA9063_REG_IRQ_MASK_A,
48 DA9063_REG_IRQ_MASK_B,
49 DA9063_REG_IRQ_MASK_C,
50 DA9063_REG_IRQ_MASK_D,
51};
52
53/* DA9210 System Control and Event Registers */
54#define DA9210_REG_MASK_A 0x54
55#define DA9210_REG_MASK_B 0x55
56
57static const u8 da9210_mask_regs[] = {
58 DA9210_REG_MASK_A,
59 DA9210_REG_MASK_B,
60};
61
62static void da9xxx_mask_irqs(struct i2c_client *client, const u8 regs[],
63 unsigned int nregs)
64{
65 unsigned int i;
66
67 dev_info(&client->dev, "Masking %s interrupt sources\n", client->name);
68
69 for (i = 0; i < nregs; i++) {
70 int error = i2c_smbus_write_byte_data(client, regs[i], ~0);
71 if (error) {
72 dev_err(&client->dev, "i2c error %d\n", error);
73 return;
74 }
75 }
76}
77
78static int regulator_quirk_notify(struct notifier_block *nb,
79 unsigned long action, void *data)
80{
81 struct device *dev = data;
82 struct i2c_client *client;
83 u32 mon;
84
85 mon = ioread32(irqc + IRQC_MONITOR);
86 dev_dbg(dev, "%s: %ld, IRQC_MONITOR = 0x%x\n", __func__, action, mon);
87 if (mon & REGULATOR_IRQ_MASK)
88 goto remove;
89
90 if (action != BUS_NOTIFY_ADD_DEVICE || dev->type == &i2c_adapter_type)
91 return 0;
92
93 client = to_i2c_client(dev);
94 dev_dbg(dev, "Detected %s\n", client->name);
95
96 if ((client->addr == 0x58 && !strcmp(client->name, "da9063")))
97 da9xxx_mask_irqs(client, da9063_mask_regs,
98 ARRAY_SIZE(da9063_mask_regs));
99 else if (client->addr == 0x68 && !strcmp(client->name, "da9210"))
100 da9xxx_mask_irqs(client, da9210_mask_regs,
101 ARRAY_SIZE(da9210_mask_regs));
102
103 mon = ioread32(irqc + IRQC_MONITOR);
104 if (mon & REGULATOR_IRQ_MASK)
105 goto remove;
106
107 return 0;
108
109remove:
110 dev_info(dev, "IRQ2 is not asserted, removing quirk\n");
111
112 bus_unregister_notifier(&i2c_bus_type, nb);
113 iounmap(irqc);
114 return 0;
115}
116
117static struct notifier_block regulator_quirk_nb = {
118 .notifier_call = regulator_quirk_notify
119};
120
121static int __init rcar_gen2_regulator_quirk(void)
122{
123 u32 mon;
124
125 if (!of_machine_is_compatible("renesas,koelsch") &&
126 !of_machine_is_compatible("renesas,lager"))
127 return -ENODEV;
128
129 irqc = ioremap(IRQC_BASE, PAGE_SIZE);
130 if (!irqc)
131 return -ENOMEM;
132
133 mon = ioread32(irqc + IRQC_MONITOR);
134 if (mon & REGULATOR_IRQ_MASK) {
135 pr_debug("%s: IRQ2 is not asserted, not installing quirk\n",
136 __func__);
137 iounmap(irqc);
138 return 0;
139 }
140
141 pr_info("IRQ2 is asserted, installing da9063/da9210 regulator quirk\n");
142
143 bus_register_notifier(&i2c_bus_type, &regulator_quirk_nb);
144 return 0;
145}
146
147arch_initcall(rcar_gen2_regulator_quirk);
diff --git a/arch/arm/mach-shmobile/setup-rcar-gen2.c b/arch/arm/mach-shmobile/setup-rcar-gen2.c
index 51464cc6d65b..5d13595aa027 100644
--- a/arch/arm/mach-shmobile/setup-rcar-gen2.c
+++ b/arch/arm/mach-shmobile/setup-rcar-gen2.c
@@ -21,6 +21,7 @@
21#include <linux/dma-contiguous.h> 21#include <linux/dma-contiguous.h>
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/memblock.h>
24#include <linux/of.h> 25#include <linux/of.h>
25#include <linux/of_fdt.h> 26#include <linux/of_fdt.h>
26#include <asm/mach/arch.h> 27#include <asm/mach/arch.h>
@@ -195,7 +196,7 @@ void __init rcar_gen2_reserve(void)
195 196
196 of_scan_flat_dt(rcar_gen2_scan_mem, &mrc); 197 of_scan_flat_dt(rcar_gen2_scan_mem, &mrc);
197#ifdef CONFIG_DMA_CMA 198#ifdef CONFIG_DMA_CMA
198 if (mrc.size) 199 if (mrc.size && memblock_is_region_memory(mrc.base, mrc.size))
199 dma_contiguous_reserve_area(mrc.size, mrc.base, 0, 200 dma_contiguous_reserve_area(mrc.size, mrc.base, 0,
200 &rcar_gen2_dma_contiguous, true); 201 &rcar_gen2_dma_contiguous, true);
201#endif 202#endif
diff --git a/arch/arm/mach-vexpress/dcscb.c b/arch/arm/mach-vexpress/dcscb.c
index 30b993399ed7..5cedcf572104 100644
--- a/arch/arm/mach-vexpress/dcscb.c
+++ b/arch/arm/mach-vexpress/dcscb.c
@@ -12,7 +12,6 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/io.h> 14#include <linux/io.h>
15#include <linux/spinlock.h>
16#include <linux/errno.h> 15#include <linux/errno.h>
17#include <linux/of_address.h> 16#include <linux/of_address.h>
18#include <linux/vexpress.h> 17#include <linux/vexpress.h>
@@ -36,163 +35,102 @@
36#define KFC_CFG_W 0x2c 35#define KFC_CFG_W 0x2c
37#define DCS_CFG_R 0x30 36#define DCS_CFG_R 0x30
38 37
39/*
40 * We can't use regular spinlocks. In the switcher case, it is possible
41 * for an outbound CPU to call power_down() while its inbound counterpart
42 * is already live using the same logical CPU number which trips lockdep
43 * debugging.
44 */
45static arch_spinlock_t dcscb_lock = __ARCH_SPIN_LOCK_UNLOCKED;
46
47static void __iomem *dcscb_base; 38static void __iomem *dcscb_base;
48static int dcscb_use_count[4][2];
49static int dcscb_allcpus_mask[2]; 39static int dcscb_allcpus_mask[2];
50 40
51static int dcscb_power_up(unsigned int cpu, unsigned int cluster) 41static int dcscb_cpu_powerup(unsigned int cpu, unsigned int cluster)
52{ 42{
53 unsigned int rst_hold, cpumask = (1 << cpu); 43 unsigned int rst_hold, cpumask = (1 << cpu);
54 unsigned int all_mask;
55 44
56 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); 45 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
57 if (cpu >= 4 || cluster >= 2) 46 if (cluster >= 2 || !(cpumask & dcscb_allcpus_mask[cluster]))
58 return -EINVAL; 47 return -EINVAL;
59 48
60 all_mask = dcscb_allcpus_mask[cluster]; 49 rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
50 rst_hold &= ~(cpumask | (cpumask << 4));
51 writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
52 return 0;
53}
61 54
62 /* 55static int dcscb_cluster_powerup(unsigned int cluster)
63 * Since this is called with IRQs enabled, and no arch_spin_lock_irq 56{
64 * variant exists, we need to disable IRQs manually here. 57 unsigned int rst_hold;
65 */
66 local_irq_disable();
67 arch_spin_lock(&dcscb_lock);
68
69 dcscb_use_count[cpu][cluster]++;
70 if (dcscb_use_count[cpu][cluster] == 1) {
71 rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
72 if (rst_hold & (1 << 8)) {
73 /* remove cluster reset and add individual CPU's reset */
74 rst_hold &= ~(1 << 8);
75 rst_hold |= all_mask;
76 }
77 rst_hold &= ~(cpumask | (cpumask << 4));
78 writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
79 } else if (dcscb_use_count[cpu][cluster] != 2) {
80 /*
81 * The only possible values are:
82 * 0 = CPU down
83 * 1 = CPU (still) up
84 * 2 = CPU requested to be up before it had a chance
85 * to actually make itself down.
86 * Any other value is a bug.
87 */
88 BUG();
89 }
90 58
91 arch_spin_unlock(&dcscb_lock); 59 pr_debug("%s: cluster %u\n", __func__, cluster);
92 local_irq_enable(); 60 if (cluster >= 2)
61 return -EINVAL;
93 62
63 /* remove cluster reset and add individual CPU's reset */
64 rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
65 rst_hold &= ~(1 << 8);
66 rst_hold |= dcscb_allcpus_mask[cluster];
67 writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
94 return 0; 68 return 0;
95} 69}
96 70
97static void dcscb_power_down(void) 71static void dcscb_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster)
98{ 72{
99 unsigned int mpidr, cpu, cluster, rst_hold, cpumask, all_mask; 73 unsigned int rst_hold;
100 bool last_man = false, skip_wfi = false;
101
102 mpidr = read_cpuid_mpidr();
103 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
104 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
105 cpumask = (1 << cpu);
106 74
107 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); 75 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
108 BUG_ON(cpu >= 4 || cluster >= 2); 76 BUG_ON(cluster >= 2 || !((1 << cpu) & dcscb_allcpus_mask[cluster]));
109
110 all_mask = dcscb_allcpus_mask[cluster];
111
112 __mcpm_cpu_going_down(cpu, cluster);
113
114 arch_spin_lock(&dcscb_lock);
115 BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
116 dcscb_use_count[cpu][cluster]--;
117 if (dcscb_use_count[cpu][cluster] == 0) {
118 rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
119 rst_hold |= cpumask;
120 if (((rst_hold | (rst_hold >> 4)) & all_mask) == all_mask) {
121 rst_hold |= (1 << 8);
122 last_man = true;
123 }
124 writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
125 } else if (dcscb_use_count[cpu][cluster] == 1) {
126 /*
127 * A power_up request went ahead of us.
128 * Even if we do not want to shut this CPU down,
129 * the caller expects a certain state as if the WFI
130 * was aborted. So let's continue with cache cleaning.
131 */
132 skip_wfi = true;
133 } else
134 BUG();
135
136 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
137 arch_spin_unlock(&dcscb_lock);
138
139 /* Flush all cache levels for this cluster. */
140 v7_exit_coherency_flush(all);
141
142 /*
143 * A full outer cache flush could be needed at this point
144 * on platforms with such a cache, depending on where the
145 * outer cache sits. In some cases the notion of a "last
146 * cluster standing" would need to be implemented if the
147 * outer cache is shared across clusters. In any case, when
148 * the outer cache needs flushing, there is no concurrent
149 * access to the cache controller to worry about and no
150 * special locking besides what is already provided by the
151 * MCPM state machinery is needed.
152 */
153
154 /*
155 * Disable cluster-level coherency by masking
156 * incoming snoops and DVM messages:
157 */
158 cci_disable_port_by_cpu(mpidr);
159
160 __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
161 } else {
162 arch_spin_unlock(&dcscb_lock);
163
164 /* Disable and flush the local CPU cache. */
165 v7_exit_coherency_flush(louis);
166 }
167 77
168 __mcpm_cpu_down(cpu, cluster); 78 rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
79 rst_hold |= (1 << cpu);
80 writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
81}
169 82
170 /* Now we are prepared for power-down, do it: */ 83static void dcscb_cluster_powerdown_prepare(unsigned int cluster)
171 dsb(); 84{
172 if (!skip_wfi) 85 unsigned int rst_hold;
173 wfi();
174 86
175 /* Not dead at this point? Let our caller cope. */ 87 pr_debug("%s: cluster %u\n", __func__, cluster);
88 BUG_ON(cluster >= 2);
89
90 rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
91 rst_hold |= (1 << 8);
92 writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
176} 93}
177 94
178static const struct mcpm_platform_ops dcscb_power_ops = { 95static void dcscb_cpu_cache_disable(void)
179 .power_up = dcscb_power_up, 96{
180 .power_down = dcscb_power_down, 97 /* Disable and flush the local CPU cache. */
181}; 98 v7_exit_coherency_flush(louis);
99}
182 100
183static void __init dcscb_usage_count_init(void) 101static void dcscb_cluster_cache_disable(void)
184{ 102{
185 unsigned int mpidr, cpu, cluster; 103 /* Flush all cache levels for this cluster. */
104 v7_exit_coherency_flush(all);
186 105
187 mpidr = read_cpuid_mpidr(); 106 /*
188 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); 107 * A full outer cache flush could be needed at this point
189 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); 108 * on platforms with such a cache, depending on where the
109 * outer cache sits. In some cases the notion of a "last
110 * cluster standing" would need to be implemented if the
111 * outer cache is shared across clusters. In any case, when
112 * the outer cache needs flushing, there is no concurrent
113 * access to the cache controller to worry about and no
114 * special locking besides what is already provided by the
115 * MCPM state machinery is needed.
116 */
190 117
191 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); 118 /*
192 BUG_ON(cpu >= 4 || cluster >= 2); 119 * Disable cluster-level coherency by masking
193 dcscb_use_count[cpu][cluster] = 1; 120 * incoming snoops and DVM messages:
121 */
122 cci_disable_port_by_cpu(read_cpuid_mpidr());
194} 123}
195 124
125static const struct mcpm_platform_ops dcscb_power_ops = {
126 .cpu_powerup = dcscb_cpu_powerup,
127 .cluster_powerup = dcscb_cluster_powerup,
128 .cpu_powerdown_prepare = dcscb_cpu_powerdown_prepare,
129 .cluster_powerdown_prepare = dcscb_cluster_powerdown_prepare,
130 .cpu_cache_disable = dcscb_cpu_cache_disable,
131 .cluster_cache_disable = dcscb_cluster_cache_disable,
132};
133
196extern void dcscb_power_up_setup(unsigned int affinity_level); 134extern void dcscb_power_up_setup(unsigned int affinity_level);
197 135
198static int __init dcscb_init(void) 136static int __init dcscb_init(void)
@@ -213,7 +151,6 @@ static int __init dcscb_init(void)
213 cfg = readl_relaxed(dcscb_base + DCS_CFG_R); 151 cfg = readl_relaxed(dcscb_base + DCS_CFG_R);
214 dcscb_allcpus_mask[0] = (1 << (((cfg >> 16) >> (0 << 2)) & 0xf)) - 1; 152 dcscb_allcpus_mask[0] = (1 << (((cfg >> 16) >> (0 << 2)) & 0xf)) - 1;
215 dcscb_allcpus_mask[1] = (1 << (((cfg >> 16) >> (1 << 2)) & 0xf)) - 1; 153 dcscb_allcpus_mask[1] = (1 << (((cfg >> 16) >> (1 << 2)) & 0xf)) - 1;
216 dcscb_usage_count_init();
217 154
218 ret = mcpm_platform_register(&dcscb_power_ops); 155 ret = mcpm_platform_register(&dcscb_power_ops);
219 if (!ret) 156 if (!ret)
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c
index 2fb78b4648cb..b3328cd46c33 100644
--- a/arch/arm/mach-vexpress/tc2_pm.c
+++ b/arch/arm/mach-vexpress/tc2_pm.c
@@ -18,7 +18,6 @@
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/of_address.h> 19#include <linux/of_address.h>
20#include <linux/of_irq.h> 20#include <linux/of_irq.h>
21#include <linux/spinlock.h>
22#include <linux/errno.h> 21#include <linux/errno.h>
23#include <linux/irqchip/arm-gic.h> 22#include <linux/irqchip/arm-gic.h>
24 23
@@ -44,101 +43,36 @@
44 43
45static void __iomem *scc; 44static void __iomem *scc;
46 45
47/*
48 * We can't use regular spinlocks. In the switcher case, it is possible
49 * for an outbound CPU to call power_down() after its inbound counterpart
50 * is already live using the same logical CPU number which trips lockdep
51 * debugging.
52 */
53static arch_spinlock_t tc2_pm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
54
55#define TC2_CLUSTERS 2 46#define TC2_CLUSTERS 2
56#define TC2_MAX_CPUS_PER_CLUSTER 3 47#define TC2_MAX_CPUS_PER_CLUSTER 3
57 48
58static unsigned int tc2_nr_cpus[TC2_CLUSTERS]; 49static unsigned int tc2_nr_cpus[TC2_CLUSTERS];
59 50
60/* Keep per-cpu usage count to cope with unordered up/down requests */ 51static int tc2_pm_cpu_powerup(unsigned int cpu, unsigned int cluster)
61static int tc2_pm_use_count[TC2_MAX_CPUS_PER_CLUSTER][TC2_CLUSTERS];
62
63#define tc2_cluster_unused(cluster) \
64 (!tc2_pm_use_count[0][cluster] && \
65 !tc2_pm_use_count[1][cluster] && \
66 !tc2_pm_use_count[2][cluster])
67
68static int tc2_pm_power_up(unsigned int cpu, unsigned int cluster)
69{ 52{
70 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); 53 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
71 if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) 54 if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster])
72 return -EINVAL; 55 return -EINVAL;
73 56 ve_spc_set_resume_addr(cluster, cpu,
74 /* 57 virt_to_phys(mcpm_entry_point));
75 * Since this is called with IRQs enabled, and no arch_spin_lock_irq 58 ve_spc_cpu_wakeup_irq(cluster, cpu, true);
76 * variant exists, we need to disable IRQs manually here.
77 */
78 local_irq_disable();
79 arch_spin_lock(&tc2_pm_lock);
80
81 if (tc2_cluster_unused(cluster))
82 ve_spc_powerdown(cluster, false);
83
84 tc2_pm_use_count[cpu][cluster]++;
85 if (tc2_pm_use_count[cpu][cluster] == 1) {
86 ve_spc_set_resume_addr(cluster, cpu,
87 virt_to_phys(mcpm_entry_point));
88 ve_spc_cpu_wakeup_irq(cluster, cpu, true);
89 } else if (tc2_pm_use_count[cpu][cluster] != 2) {
90 /*
91 * The only possible values are:
92 * 0 = CPU down
93 * 1 = CPU (still) up
94 * 2 = CPU requested to be up before it had a chance
95 * to actually make itself down.
96 * Any other value is a bug.
97 */
98 BUG();
99 }
100
101 arch_spin_unlock(&tc2_pm_lock);
102 local_irq_enable();
103
104 return 0; 59 return 0;
105} 60}
106 61
107static void tc2_pm_down(u64 residency) 62static int tc2_pm_cluster_powerup(unsigned int cluster)
108{ 63{
109 unsigned int mpidr, cpu, cluster; 64 pr_debug("%s: cluster %u\n", __func__, cluster);
110 bool last_man = false, skip_wfi = false; 65 if (cluster >= TC2_CLUSTERS)
111 66 return -EINVAL;
112 mpidr = read_cpuid_mpidr(); 67 ve_spc_powerdown(cluster, false);
113 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); 68 return 0;
114 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); 69}
115 70
71static void tc2_pm_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster)
72{
116 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); 73 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
117 BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); 74 BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
118 75 ve_spc_cpu_wakeup_irq(cluster, cpu, true);
119 __mcpm_cpu_going_down(cpu, cluster);
120
121 arch_spin_lock(&tc2_pm_lock);
122 BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
123 tc2_pm_use_count[cpu][cluster]--;
124 if (tc2_pm_use_count[cpu][cluster] == 0) {
125 ve_spc_cpu_wakeup_irq(cluster, cpu, true);
126 if (tc2_cluster_unused(cluster)) {
127 ve_spc_powerdown(cluster, true);
128 ve_spc_global_wakeup_irq(true);
129 last_man = true;
130 }
131 } else if (tc2_pm_use_count[cpu][cluster] == 1) {
132 /*
133 * A power_up request went ahead of us.
134 * Even if we do not want to shut this CPU down,
135 * the caller expects a certain state as if the WFI
136 * was aborted. So let's continue with cache cleaning.
137 */
138 skip_wfi = true;
139 } else
140 BUG();
141
142 /* 76 /*
143 * If the CPU is committed to power down, make sure 77 * If the CPU is committed to power down, make sure
144 * the power controller will be in charge of waking it 78 * the power controller will be in charge of waking it
@@ -146,55 +80,38 @@ static void tc2_pm_down(u64 residency)
146 * to the CPU by disabling the GIC CPU IF to prevent wfi 80 * to the CPU by disabling the GIC CPU IF to prevent wfi
147 * from completing execution behind power controller back 81 * from completing execution behind power controller back
148 */ 82 */
149 if (!skip_wfi) 83 gic_cpu_if_down();
150 gic_cpu_if_down(); 84}
151
152 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
153 arch_spin_unlock(&tc2_pm_lock);
154
155 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
156 /*
157 * On the Cortex-A15 we need to disable
158 * L2 prefetching before flushing the cache.
159 */
160 asm volatile(
161 "mcr p15, 1, %0, c15, c0, 3 \n\t"
162 "isb \n\t"
163 "dsb "
164 : : "r" (0x400) );
165 }
166
167 v7_exit_coherency_flush(all);
168
169 cci_disable_port_by_cpu(mpidr);
170
171 __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
172 } else {
173 /*
174 * If last man then undo any setup done previously.
175 */
176 if (last_man) {
177 ve_spc_powerdown(cluster, false);
178 ve_spc_global_wakeup_irq(false);
179 }
180
181 arch_spin_unlock(&tc2_pm_lock);
182
183 v7_exit_coherency_flush(louis);
184 }
185
186 __mcpm_cpu_down(cpu, cluster);
187 85
188 /* Now we are prepared for power-down, do it: */ 86static void tc2_pm_cluster_powerdown_prepare(unsigned int cluster)
189 if (!skip_wfi) 87{
190 wfi(); 88 pr_debug("%s: cluster %u\n", __func__, cluster);
89 BUG_ON(cluster >= TC2_CLUSTERS);
90 ve_spc_powerdown(cluster, true);
91 ve_spc_global_wakeup_irq(true);
92}
191 93
192 /* Not dead at this point? Let our caller cope. */ 94static void tc2_pm_cpu_cache_disable(void)
95{
96 v7_exit_coherency_flush(louis);
193} 97}
194 98
195static void tc2_pm_power_down(void) 99static void tc2_pm_cluster_cache_disable(void)
196{ 100{
197 tc2_pm_down(0); 101 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
102 /*
103 * On the Cortex-A15 we need to disable
104 * L2 prefetching before flushing the cache.
105 */
106 asm volatile(
107 "mcr p15, 1, %0, c15, c0, 3 \n\t"
108 "isb \n\t"
109 "dsb "
110 : : "r" (0x400) );
111 }
112
113 v7_exit_coherency_flush(all);
114 cci_disable_port_by_cpu(read_cpuid_mpidr());
198} 115}
199 116
200static int tc2_core_in_reset(unsigned int cpu, unsigned int cluster) 117static int tc2_core_in_reset(unsigned int cpu, unsigned int cluster)
@@ -217,27 +134,21 @@ static int tc2_pm_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
217 BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); 134 BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
218 135
219 for (tries = 0; tries < TIMEOUT_MSEC / POLL_MSEC; ++tries) { 136 for (tries = 0; tries < TIMEOUT_MSEC / POLL_MSEC; ++tries) {
137 pr_debug("%s(cpu=%u, cluster=%u): RESET_CTRL = 0x%08X\n",
138 __func__, cpu, cluster,
139 readl_relaxed(scc + RESET_CTRL));
140
220 /* 141 /*
221 * Only examine the hardware state if the target CPU has 142 * We need the CPU to reach WFI, but the power
222 * caught up at least as far as tc2_pm_down(): 143 * controller may put the cluster in reset and
144 * power it off as soon as that happens, before
145 * we have a chance to see STANDBYWFI.
146 *
147 * So we need to check for both conditions:
223 */ 148 */
224 if (ACCESS_ONCE(tc2_pm_use_count[cpu][cluster]) == 0) { 149 if (tc2_core_in_reset(cpu, cluster) ||
225 pr_debug("%s(cpu=%u, cluster=%u): RESET_CTRL = 0x%08X\n", 150 ve_spc_cpu_in_wfi(cpu, cluster))
226 __func__, cpu, cluster, 151 return 0; /* success: the CPU is halted */
227 readl_relaxed(scc + RESET_CTRL));
228
229 /*
230 * We need the CPU to reach WFI, but the power
231 * controller may put the cluster in reset and
232 * power it off as soon as that happens, before
233 * we have a chance to see STANDBYWFI.
234 *
235 * So we need to check for both conditions:
236 */
237 if (tc2_core_in_reset(cpu, cluster) ||
238 ve_spc_cpu_in_wfi(cpu, cluster))
239 return 0; /* success: the CPU is halted */
240 }
241 152
242 /* Otherwise, wait and retry: */ 153 /* Otherwise, wait and retry: */
243 msleep(POLL_MSEC); 154 msleep(POLL_MSEC);
@@ -246,72 +157,40 @@ static int tc2_pm_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
246 return -ETIMEDOUT; /* timeout */ 157 return -ETIMEDOUT; /* timeout */
247} 158}
248 159
249static void tc2_pm_suspend(u64 residency) 160static void tc2_pm_cpu_suspend_prepare(unsigned int cpu, unsigned int cluster)
250{ 161{
251 unsigned int mpidr, cpu, cluster;
252
253 mpidr = read_cpuid_mpidr();
254 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
255 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
256 ve_spc_set_resume_addr(cluster, cpu, virt_to_phys(mcpm_entry_point)); 162 ve_spc_set_resume_addr(cluster, cpu, virt_to_phys(mcpm_entry_point));
257 tc2_pm_down(residency);
258} 163}
259 164
260static void tc2_pm_powered_up(void) 165static void tc2_pm_cpu_is_up(unsigned int cpu, unsigned int cluster)
261{ 166{
262 unsigned int mpidr, cpu, cluster;
263 unsigned long flags;
264
265 mpidr = read_cpuid_mpidr();
266 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
267 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
268
269 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); 167 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
270 BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); 168 BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
271
272 local_irq_save(flags);
273 arch_spin_lock(&tc2_pm_lock);
274
275 if (tc2_cluster_unused(cluster)) {
276 ve_spc_powerdown(cluster, false);
277 ve_spc_global_wakeup_irq(false);
278 }
279
280 if (!tc2_pm_use_count[cpu][cluster])
281 tc2_pm_use_count[cpu][cluster] = 1;
282
283 ve_spc_cpu_wakeup_irq(cluster, cpu, false); 169 ve_spc_cpu_wakeup_irq(cluster, cpu, false);
284 ve_spc_set_resume_addr(cluster, cpu, 0); 170 ve_spc_set_resume_addr(cluster, cpu, 0);
171}
285 172
286 arch_spin_unlock(&tc2_pm_lock); 173static void tc2_pm_cluster_is_up(unsigned int cluster)
287 local_irq_restore(flags); 174{
175 pr_debug("%s: cluster %u\n", __func__, cluster);
176 BUG_ON(cluster >= TC2_CLUSTERS);
177 ve_spc_powerdown(cluster, false);
178 ve_spc_global_wakeup_irq(false);
288} 179}
289 180
290static const struct mcpm_platform_ops tc2_pm_power_ops = { 181static const struct mcpm_platform_ops tc2_pm_power_ops = {
291 .power_up = tc2_pm_power_up, 182 .cpu_powerup = tc2_pm_cpu_powerup,
292 .power_down = tc2_pm_power_down, 183 .cluster_powerup = tc2_pm_cluster_powerup,
184 .cpu_suspend_prepare = tc2_pm_cpu_suspend_prepare,
185 .cpu_powerdown_prepare = tc2_pm_cpu_powerdown_prepare,
186 .cluster_powerdown_prepare = tc2_pm_cluster_powerdown_prepare,
187 .cpu_cache_disable = tc2_pm_cpu_cache_disable,
188 .cluster_cache_disable = tc2_pm_cluster_cache_disable,
293 .wait_for_powerdown = tc2_pm_wait_for_powerdown, 189 .wait_for_powerdown = tc2_pm_wait_for_powerdown,
294 .suspend = tc2_pm_suspend, 190 .cpu_is_up = tc2_pm_cpu_is_up,
295 .powered_up = tc2_pm_powered_up, 191 .cluster_is_up = tc2_pm_cluster_is_up,
296}; 192};
297 193
298static bool __init tc2_pm_usage_count_init(void)
299{
300 unsigned int mpidr, cpu, cluster;
301
302 mpidr = read_cpuid_mpidr();
303 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
304 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
305
306 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
307 if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) {
308 pr_err("%s: boot CPU is out of bound!\n", __func__);
309 return false;
310 }
311 tc2_pm_use_count[cpu][cluster] = 1;
312 return true;
313}
314
315/* 194/*
316 * Enable cluster-level coherency, in preparation for turning on the MMU. 195 * Enable cluster-level coherency, in preparation for turning on the MMU.
317 */ 196 */
@@ -323,23 +202,9 @@ static void __naked tc2_pm_power_up_setup(unsigned int affinity_level)
323" b cci_enable_port_for_self "); 202" b cci_enable_port_for_self ");
324} 203}
325 204
326static void __init tc2_cache_off(void)
327{
328 pr_info("TC2: disabling cache during MCPM loopback test\n");
329 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
330 /* disable L2 prefetching on the Cortex-A15 */
331 asm volatile(
332 "mcr p15, 1, %0, c15, c0, 3 \n\t"
333 "isb \n\t"
334 "dsb "
335 : : "r" (0x400) );
336 }
337 v7_exit_coherency_flush(all);
338 cci_disable_port_by_cpu(read_cpuid_mpidr());
339}
340
341static int __init tc2_pm_init(void) 205static int __init tc2_pm_init(void)
342{ 206{
207 unsigned int mpidr, cpu, cluster;
343 int ret, irq; 208 int ret, irq;
344 u32 a15_cluster_id, a7_cluster_id, sys_info; 209 u32 a15_cluster_id, a7_cluster_id, sys_info;
345 struct device_node *np; 210 struct device_node *np;
@@ -379,14 +244,20 @@ static int __init tc2_pm_init(void)
379 if (!cci_probed()) 244 if (!cci_probed())
380 return -ENODEV; 245 return -ENODEV;
381 246
382 if (!tc2_pm_usage_count_init()) 247 mpidr = read_cpuid_mpidr();
248 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
249 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
250 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
251 if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) {
252 pr_err("%s: boot CPU is out of bound!\n", __func__);
383 return -EINVAL; 253 return -EINVAL;
254 }
384 255
385 ret = mcpm_platform_register(&tc2_pm_power_ops); 256 ret = mcpm_platform_register(&tc2_pm_power_ops);
386 if (!ret) { 257 if (!ret) {
387 mcpm_sync_init(tc2_pm_power_up_setup); 258 mcpm_sync_init(tc2_pm_power_up_setup);
388 /* test if we can (re)enable the CCI on our own */ 259 /* test if we can (re)enable the CCI on our own */
389 BUG_ON(mcpm_loopback(tc2_cache_off) != 0); 260 BUG_ON(mcpm_loopback(tc2_pm_cluster_cache_disable) != 0);
390 pr_info("TC2 power management initialized\n"); 261 pr_info("TC2 power management initialized\n");
391 } 262 }
392 return ret; 263 return ret;