diff options
author | Olof Johansson <olof@lixom.net> | 2013-08-28 14:29:18 -0400 |
---|---|---|
committer | Olof Johansson <olof@lixom.net> | 2013-08-28 14:29:18 -0400 |
commit | aaf75e454cc5e16e7f24bd87590b2d882ddb1671 (patch) | |
tree | 0260f84da58f68a55c467fdd9cfde6b43acc9e22 | |
parent | b36f4be3de1b123d8601de062e7dbfc904f305fb (diff) | |
parent | 14d2c34cfa0026ba3916f5d5b2f1ad433beeef5a (diff) |
Merge branch 'cpuidle/biglittle' into next/drivers
From Lorenzo Pieralisi:
This patch series contains:
- GIC driver update to add a method to disable the GIC CPU IF
- TC2 MCPM update to add GIC CPU disabling to suspend method
- TC2 CPU idle big.LITTLE driver
* cpuidle/biglittle:
cpuidle: big.LITTLE: vexpress-TC2 CPU idle driver
ARM: vexpress: tc2: disable GIC CPU IF in tc2_pm_suspend
drivers: irq-chip: irq-gic: introduce gic_cpu_if_down()
ARM: vexpress/TC2: implement PM suspend method
ARM: vexpress/TC2: basic PM support
ARM: vexpress: Add SCC to V2P-CA15_A7's device tree
ARM: vexpress/TC2: add Serial Power Controller (SPC) support
ARM: vexpress/dcscb: fix cache disabling sequences
Signed-off-by: Olof Johansson <olof@lixom.net>
-rw-r--r-- | Documentation/devicetree/bindings/arm/vexpress-scc.txt | 33 | ||||
-rw-r--r-- | MAINTAINERS | 9 | ||||
-rw-r--r-- | arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts | 6 | ||||
-rw-r--r-- | arch/arm/mach-vexpress/Kconfig | 8 | ||||
-rw-r--r-- | arch/arm/mach-vexpress/Makefile | 1 | ||||
-rw-r--r-- | arch/arm/mach-vexpress/dcscb.c | 58 | ||||
-rw-r--r-- | arch/arm/mach-vexpress/spc.c | 180 | ||||
-rw-r--r-- | arch/arm/mach-vexpress/spc.h | 24 | ||||
-rw-r--r-- | arch/arm/mach-vexpress/tc2_pm.c | 346 | ||||
-rw-r--r-- | drivers/cpuidle/Kconfig | 10 | ||||
-rw-r--r-- | drivers/cpuidle/Makefile | 1 | ||||
-rw-r--r-- | drivers/cpuidle/cpuidle-big_little.c | 209 | ||||
-rw-r--r-- | drivers/irqchip/irq-gic.c | 6 | ||||
-rw-r--r-- | include/linux/irqchip/arm-gic.h | 1 |
14 files changed, 871 insertions, 21 deletions
diff --git a/Documentation/devicetree/bindings/arm/vexpress-scc.txt b/Documentation/devicetree/bindings/arm/vexpress-scc.txt new file mode 100644 index 000000000000..ae5043e42e5d --- /dev/null +++ b/Documentation/devicetree/bindings/arm/vexpress-scc.txt | |||
@@ -0,0 +1,33 @@ | |||
1 | ARM Versatile Express Serial Configuration Controller | ||
2 | ----------------------------------------------------- | ||
3 | |||
4 | Test chips for ARM Versatile Express platform implement SCC (Serial | ||
5 | Configuration Controller) interface, used to set initial conditions | ||
6 | for the test chip. | ||
7 | |||
8 | In some cases its registers are also mapped in normal address space | ||
9 | and can be used to obtain runtime information about the chip internals | ||
10 | (like silicon temperature sensors) and as interface to other subsystems | ||
11 | like platform configuration control and power management. | ||
12 | |||
13 | Required properties: | ||
14 | |||
15 | - compatible value: "arm,vexpress-scc,<model>", "arm,vexpress-scc"; | ||
16 | where <model> is the full tile model name (as used | ||
17 | in the tile's Technical Reference Manual), | ||
18 | eg. for Coretile Express A15x2 A7x3 (V2P-CA15_A7): | ||
19 | compatible = "arm,vexpress-scc,v2p-ca15_a7", "arm,vexpress-scc"; | ||
20 | |||
21 | Optional properties: | ||
22 | |||
23 | - reg: when the SCC is memory mapped, physical address and size of the | ||
24 | registers window | ||
25 | - interrupts: when the SCC can generate a system-level interrupt | ||
26 | |||
27 | Example: | ||
28 | |||
29 | scc@7fff0000 { | ||
30 | compatible = "arm,vexpress-scc,v2p-ca15_a7", "arm,vexpress-scc"; | ||
31 | reg = <0 0x7fff0000 0 0x1000>; | ||
32 | interrupts = <0 95 4>; | ||
33 | }; | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 229c66b12cc2..a0001ef1b071 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -2268,6 +2268,15 @@ F: drivers/cpufreq/arm_big_little.h | |||
2268 | F: drivers/cpufreq/arm_big_little.c | 2268 | F: drivers/cpufreq/arm_big_little.c |
2269 | F: drivers/cpufreq/arm_big_little_dt.c | 2269 | F: drivers/cpufreq/arm_big_little_dt.c |
2270 | 2270 | ||
2271 | CPUIDLE DRIVER - ARM BIG LITTLE | ||
2272 | M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> | ||
2273 | M: Daniel Lezcano <daniel.lezcano@linaro.org> | ||
2274 | L: linux-pm@vger.kernel.org | ||
2275 | L: linux-arm-kernel@lists.infradead.org | ||
2276 | T: git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git | ||
2277 | S: Maintained | ||
2278 | F: drivers/cpuidle/cpuidle-big_little.c | ||
2279 | |||
2271 | CPUIDLE DRIVERS | 2280 | CPUIDLE DRIVERS |
2272 | M: Rafael J. Wysocki <rjw@sisk.pl> | 2281 | M: Rafael J. Wysocki <rjw@sisk.pl> |
2273 | M: Daniel Lezcano <daniel.lezcano@linaro.org> | 2282 | M: Daniel Lezcano <daniel.lezcano@linaro.org> |
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts index d2803be4e1a8..759b0cd20013 100644 --- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts +++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts | |||
@@ -125,6 +125,12 @@ | |||
125 | clock-names = "apb_pclk"; | 125 | clock-names = "apb_pclk"; |
126 | }; | 126 | }; |
127 | 127 | ||
128 | scc@7fff0000 { | ||
129 | compatible = "arm,vexpress-scc,v2p-ca15_a7", "arm,vexpress-scc"; | ||
130 | reg = <0 0x7fff0000 0 0x1000>; | ||
131 | interrupts = <0 95 4>; | ||
132 | }; | ||
133 | |||
128 | timer { | 134 | timer { |
129 | compatible = "arm,armv7-timer"; | 135 | compatible = "arm,armv7-timer"; |
130 | interrupts = <1 13 0xf08>, | 136 | interrupts = <1 13 0xf08>, |
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig index b8bbabec6310..c700e623f9d8 100644 --- a/arch/arm/mach-vexpress/Kconfig +++ b/arch/arm/mach-vexpress/Kconfig | |||
@@ -66,4 +66,12 @@ config ARCH_VEXPRESS_DCSCB | |||
66 | This is needed to provide CPU and cluster power management | 66 | This is needed to provide CPU and cluster power management |
67 | on RTSM implementing big.LITTLE. | 67 | on RTSM implementing big.LITTLE. |
68 | 68 | ||
69 | config ARCH_VEXPRESS_TC2_PM | ||
70 | bool "Versatile Express TC2 power management" | ||
71 | depends on MCPM | ||
72 | select ARM_CCI | ||
73 | help | ||
74 | Support for CPU and cluster power management on Versatile Express | ||
75 | with a TC2 (A15x2 A7x3) big.LITTLE core tile. | ||
76 | |||
69 | endmenu | 77 | endmenu |
diff --git a/arch/arm/mach-vexpress/Makefile b/arch/arm/mach-vexpress/Makefile index 48ba89a8149f..36ea8247123a 100644 --- a/arch/arm/mach-vexpress/Makefile +++ b/arch/arm/mach-vexpress/Makefile | |||
@@ -7,5 +7,6 @@ ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \ | |||
7 | obj-y := v2m.o | 7 | obj-y := v2m.o |
8 | obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o | 8 | obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o |
9 | obj-$(CONFIG_ARCH_VEXPRESS_DCSCB) += dcscb.o dcscb_setup.o | 9 | obj-$(CONFIG_ARCH_VEXPRESS_DCSCB) += dcscb.o dcscb_setup.o |
10 | obj-$(CONFIG_ARCH_VEXPRESS_TC2_PM) += tc2_pm.o spc.o | ||
10 | obj-$(CONFIG_SMP) += platsmp.o | 11 | obj-$(CONFIG_SMP) += platsmp.o |
11 | obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o | 12 | obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o |
diff --git a/arch/arm/mach-vexpress/dcscb.c b/arch/arm/mach-vexpress/dcscb.c index 16d57a8a9d5a..85fffa702f5b 100644 --- a/arch/arm/mach-vexpress/dcscb.c +++ b/arch/arm/mach-vexpress/dcscb.c | |||
@@ -136,14 +136,29 @@ static void dcscb_power_down(void) | |||
136 | /* | 136 | /* |
137 | * Flush all cache levels for this cluster. | 137 | * Flush all cache levels for this cluster. |
138 | * | 138 | * |
139 | * A15/A7 can hit in the cache with SCTLR.C=0, so we don't need | 139 | * To do so we do: |
140 | * a preliminary flush here for those CPUs. At least, that's | 140 | * - Clear the SCTLR.C bit to prevent further cache allocations |
141 | * the theory -- without the extra flush, Linux explodes on | 141 | * - Flush the whole cache |
142 | * RTSM (to be investigated). | 142 | * - Clear the ACTLR "SMP" bit to disable local coherency |
143 | * | ||
144 | * Let's do it in the safest possible way i.e. with | ||
145 | * no memory access within the following sequence | ||
146 | * including to the stack. | ||
143 | */ | 147 | */ |
144 | flush_cache_all(); | 148 | asm volatile( |
145 | set_cr(get_cr() & ~CR_C); | 149 | "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t" |
146 | flush_cache_all(); | 150 | "bic r0, r0, #"__stringify(CR_C)" \n\t" |
151 | "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t" | ||
152 | "isb \n\t" | ||
153 | "bl v7_flush_dcache_all \n\t" | ||
154 | "clrex \n\t" | ||
155 | "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t" | ||
156 | "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" | ||
157 | "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t" | ||
158 | "isb \n\t" | ||
159 | "dsb " | ||
160 | : : : "r0","r1","r2","r3","r4","r5","r6","r7", | ||
161 | "r9","r10","r11","lr","memory"); | ||
147 | 162 | ||
148 | /* | 163 | /* |
149 | * This is a harmless no-op. On platforms with a real | 164 | * This is a harmless no-op. On platforms with a real |
@@ -152,9 +167,6 @@ static void dcscb_power_down(void) | |||
152 | */ | 167 | */ |
153 | outer_flush_all(); | 168 | outer_flush_all(); |
154 | 169 | ||
155 | /* Disable local coherency by clearing the ACTLR "SMP" bit: */ | ||
156 | set_auxcr(get_auxcr() & ~(1 << 6)); | ||
157 | |||
158 | /* | 170 | /* |
159 | * Disable cluster-level coherency by masking | 171 | * Disable cluster-level coherency by masking |
160 | * incoming snoops and DVM messages: | 172 | * incoming snoops and DVM messages: |
@@ -167,18 +179,22 @@ static void dcscb_power_down(void) | |||
167 | 179 | ||
168 | /* | 180 | /* |
169 | * Flush the local CPU cache. | 181 | * Flush the local CPU cache. |
170 | * | 182 | * Let's do it in the safest possible way as above. |
171 | * A15/A7 can hit in the cache with SCTLR.C=0, so we don't need | ||
172 | * a preliminary flush here for those CPUs. At least, that's | ||
173 | * the theory -- without the extra flush, Linux explodes on | ||
174 | * RTSM (to be investigated). | ||
175 | */ | 183 | */ |
176 | flush_cache_louis(); | 184 | asm volatile( |
177 | set_cr(get_cr() & ~CR_C); | 185 | "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t" |
178 | flush_cache_louis(); | 186 | "bic r0, r0, #"__stringify(CR_C)" \n\t" |
179 | 187 | "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t" | |
180 | /* Disable local coherency by clearing the ACTLR "SMP" bit: */ | 188 | "isb \n\t" |
181 | set_auxcr(get_auxcr() & ~(1 << 6)); | 189 | "bl v7_flush_dcache_louis \n\t" |
190 | "clrex \n\t" | ||
191 | "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t" | ||
192 | "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" | ||
193 | "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t" | ||
194 | "isb \n\t" | ||
195 | "dsb " | ||
196 | : : : "r0","r1","r2","r3","r4","r5","r6","r7", | ||
197 | "r9","r10","r11","lr","memory"); | ||
182 | } | 198 | } |
183 | 199 | ||
184 | __mcpm_cpu_down(cpu, cluster); | 200 | __mcpm_cpu_down(cpu, cluster); |
diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c new file mode 100644 index 000000000000..eefb029197ca --- /dev/null +++ b/arch/arm/mach-vexpress/spc.c | |||
@@ -0,0 +1,180 @@ | |||
1 | /* | ||
2 | * Versatile Express Serial Power Controller (SPC) support | ||
3 | * | ||
4 | * Copyright (C) 2013 ARM Ltd. | ||
5 | * | ||
6 | * Authors: Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com> | ||
7 | * Achin Gupta <achin.gupta@arm.com> | ||
8 | * Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
15 | * kind, whether express or implied; without even the implied warranty | ||
16 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | */ | ||
19 | |||
20 | #include <linux/err.h> | ||
21 | #include <linux/io.h> | ||
22 | #include <linux/slab.h> | ||
23 | |||
24 | #include <asm/cacheflush.h> | ||
25 | |||
26 | #define SPCLOG "vexpress-spc: " | ||
27 | |||
28 | /* SPC wake-up IRQs status and mask */ | ||
29 | #define WAKE_INT_MASK 0x24 | ||
30 | #define WAKE_INT_RAW 0x28 | ||
31 | #define WAKE_INT_STAT 0x2c | ||
32 | /* SPC power down registers */ | ||
33 | #define A15_PWRDN_EN 0x30 | ||
34 | #define A7_PWRDN_EN 0x34 | ||
35 | /* SPC per-CPU mailboxes */ | ||
36 | #define A15_BX_ADDR0 0x68 | ||
37 | #define A7_BX_ADDR0 0x78 | ||
38 | |||
39 | /* wake-up interrupt masks */ | ||
40 | #define GBL_WAKEUP_INT_MSK (0x3 << 10) | ||
41 | |||
42 | /* TC2 static dual-cluster configuration */ | ||
43 | #define MAX_CLUSTERS 2 | ||
44 | |||
45 | struct ve_spc_drvdata { | ||
46 | void __iomem *baseaddr; | ||
47 | /* | ||
48 | * A15s cluster identifier | ||
49 | * It corresponds to A15 processors MPIDR[15:8] bitfield | ||
50 | */ | ||
51 | u32 a15_clusid; | ||
52 | }; | ||
53 | |||
54 | static struct ve_spc_drvdata *info; | ||
55 | |||
56 | static inline bool cluster_is_a15(u32 cluster) | ||
57 | { | ||
58 | return cluster == info->a15_clusid; | ||
59 | } | ||
60 | |||
61 | /** | ||
62 | * ve_spc_global_wakeup_irq() | ||
63 | * | ||
64 | * Function to set/clear global wakeup IRQs. Not protected by locking since | ||
65 | * it might be used in code paths where normal cacheable locks are not | ||
66 | * working. Locking must be provided by the caller to ensure atomicity. | ||
67 | * | ||
68 | * @set: if true, global wake-up IRQs are set, if false they are cleared | ||
69 | */ | ||
70 | void ve_spc_global_wakeup_irq(bool set) | ||
71 | { | ||
72 | u32 reg; | ||
73 | |||
74 | reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK); | ||
75 | |||
76 | if (set) | ||
77 | reg |= GBL_WAKEUP_INT_MSK; | ||
78 | else | ||
79 | reg &= ~GBL_WAKEUP_INT_MSK; | ||
80 | |||
81 | writel_relaxed(reg, info->baseaddr + WAKE_INT_MASK); | ||
82 | } | ||
83 | |||
84 | /** | ||
85 | * ve_spc_cpu_wakeup_irq() | ||
86 | * | ||
87 | * Function to set/clear per-CPU wake-up IRQs. Not protected by locking since | ||
88 | * it might be used in code paths where normal cacheable locks are not | ||
89 | * working. Locking must be provided by the caller to ensure atomicity. | ||
90 | * | ||
91 | * @cluster: mpidr[15:8] bitfield describing cluster affinity level | ||
92 | * @cpu: mpidr[7:0] bitfield describing cpu affinity level | ||
93 | * @set: if true, wake-up IRQs are set, if false they are cleared | ||
94 | */ | ||
95 | void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set) | ||
96 | { | ||
97 | u32 mask, reg; | ||
98 | |||
99 | if (cluster >= MAX_CLUSTERS) | ||
100 | return; | ||
101 | |||
102 | mask = 1 << cpu; | ||
103 | |||
104 | if (!cluster_is_a15(cluster)) | ||
105 | mask <<= 4; | ||
106 | |||
107 | reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK); | ||
108 | |||
109 | if (set) | ||
110 | reg |= mask; | ||
111 | else | ||
112 | reg &= ~mask; | ||
113 | |||
114 | writel_relaxed(reg, info->baseaddr + WAKE_INT_MASK); | ||
115 | } | ||
116 | |||
117 | /** | ||
118 | * ve_spc_set_resume_addr() - set the jump address used for warm boot | ||
119 | * | ||
120 | * @cluster: mpidr[15:8] bitfield describing cluster affinity level | ||
121 | * @cpu: mpidr[7:0] bitfield describing cpu affinity level | ||
122 | * @addr: physical resume address | ||
123 | */ | ||
124 | void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr) | ||
125 | { | ||
126 | void __iomem *baseaddr; | ||
127 | |||
128 | if (cluster >= MAX_CLUSTERS) | ||
129 | return; | ||
130 | |||
131 | if (cluster_is_a15(cluster)) | ||
132 | baseaddr = info->baseaddr + A15_BX_ADDR0 + (cpu << 2); | ||
133 | else | ||
134 | baseaddr = info->baseaddr + A7_BX_ADDR0 + (cpu << 2); | ||
135 | |||
136 | writel_relaxed(addr, baseaddr); | ||
137 | } | ||
138 | |||
139 | /** | ||
140 | * ve_spc_powerdown() | ||
141 | * | ||
142 | * Function to enable/disable cluster powerdown. Not protected by locking | ||
143 | * since it might be used in code paths where normal cacheable locks are not | ||
144 | * working. Locking must be provided by the caller to ensure atomicity. | ||
145 | * | ||
146 | * @cluster: mpidr[15:8] bitfield describing cluster affinity level | ||
147 | * @enable: if true enables powerdown, if false disables it | ||
148 | */ | ||
149 | void ve_spc_powerdown(u32 cluster, bool enable) | ||
150 | { | ||
151 | u32 pwdrn_reg; | ||
152 | |||
153 | if (cluster >= MAX_CLUSTERS) | ||
154 | return; | ||
155 | |||
156 | pwdrn_reg = cluster_is_a15(cluster) ? A15_PWRDN_EN : A7_PWRDN_EN; | ||
157 | writel_relaxed(enable, info->baseaddr + pwdrn_reg); | ||
158 | } | ||
159 | |||
160 | int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid) | ||
161 | { | ||
162 | info = kzalloc(sizeof(*info), GFP_KERNEL); | ||
163 | if (!info) { | ||
164 | pr_err(SPCLOG "unable to allocate mem\n"); | ||
165 | return -ENOMEM; | ||
166 | } | ||
167 | |||
168 | info->baseaddr = baseaddr; | ||
169 | info->a15_clusid = a15_clusid; | ||
170 | |||
171 | /* | ||
172 | * Multi-cluster systems may need this data when non-coherent, during | ||
173 | * cluster power-up/power-down. Make sure driver info reaches main | ||
174 | * memory. | ||
175 | */ | ||
176 | sync_cache_w(info); | ||
177 | sync_cache_w(&info); | ||
178 | |||
179 | return 0; | ||
180 | } | ||
diff --git a/arch/arm/mach-vexpress/spc.h b/arch/arm/mach-vexpress/spc.h new file mode 100644 index 000000000000..5f7e4a446a17 --- /dev/null +++ b/arch/arm/mach-vexpress/spc.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License version 2 as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * Copyright (C) 2012 ARM Limited | ||
12 | */ | ||
13 | |||
14 | |||
15 | #ifndef __SPC_H_ | ||
16 | #define __SPC_H_ | ||
17 | |||
18 | int __init ve_spc_init(void __iomem *base, u32 a15_clusid); | ||
19 | void ve_spc_global_wakeup_irq(bool set); | ||
20 | void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set); | ||
21 | void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr); | ||
22 | void ve_spc_powerdown(u32 cluster, bool enable); | ||
23 | |||
24 | #endif | ||
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c new file mode 100644 index 000000000000..68adb40d2c71 --- /dev/null +++ b/arch/arm/mach-vexpress/tc2_pm.c | |||
@@ -0,0 +1,346 @@ | |||
1 | /* | ||
2 | * arch/arm/mach-vexpress/tc2_pm.c - TC2 power management support | ||
3 | * | ||
4 | * Created by: Nicolas Pitre, October 2012 | ||
5 | * Copyright: (C) 2012-2013 Linaro Limited | ||
6 | * | ||
7 | * Some portions of this file were originally written by Achin Gupta | ||
8 | * Copyright: (C) 2012 ARM Limited | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | */ | ||
14 | |||
15 | #include <linux/init.h> | ||
16 | #include <linux/io.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/of_address.h> | ||
19 | #include <linux/spinlock.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/irqchip/arm-gic.h> | ||
22 | |||
23 | #include <asm/mcpm.h> | ||
24 | #include <asm/proc-fns.h> | ||
25 | #include <asm/cacheflush.h> | ||
26 | #include <asm/cputype.h> | ||
27 | #include <asm/cp15.h> | ||
28 | |||
29 | #include <linux/arm-cci.h> | ||
30 | |||
31 | #include "spc.h" | ||
32 | |||
33 | /* SCC conf registers */ | ||
34 | #define A15_CONF 0x400 | ||
35 | #define A7_CONF 0x500 | ||
36 | #define SYS_INFO 0x700 | ||
37 | #define SPC_BASE 0xb00 | ||
38 | |||
39 | /* | ||
40 | * We can't use regular spinlocks. In the switcher case, it is possible | ||
41 | * for an outbound CPU to call power_down() after its inbound counterpart | ||
42 | * is already live using the same logical CPU number which trips lockdep | ||
43 | * debugging. | ||
44 | */ | ||
45 | static arch_spinlock_t tc2_pm_lock = __ARCH_SPIN_LOCK_UNLOCKED; | ||
46 | |||
47 | #define TC2_CLUSTERS 2 | ||
48 | #define TC2_MAX_CPUS_PER_CLUSTER 3 | ||
49 | |||
50 | static unsigned int tc2_nr_cpus[TC2_CLUSTERS]; | ||
51 | |||
52 | /* Keep per-cpu usage count to cope with unordered up/down requests */ | ||
53 | static int tc2_pm_use_count[TC2_MAX_CPUS_PER_CLUSTER][TC2_CLUSTERS]; | ||
54 | |||
55 | #define tc2_cluster_unused(cluster) \ | ||
56 | (!tc2_pm_use_count[0][cluster] && \ | ||
57 | !tc2_pm_use_count[1][cluster] && \ | ||
58 | !tc2_pm_use_count[2][cluster]) | ||
59 | |||
60 | static int tc2_pm_power_up(unsigned int cpu, unsigned int cluster) | ||
61 | { | ||
62 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); | ||
63 | if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) | ||
64 | return -EINVAL; | ||
65 | |||
66 | /* | ||
67 | * Since this is called with IRQs enabled, and no arch_spin_lock_irq | ||
68 | * variant exists, we need to disable IRQs manually here. | ||
69 | */ | ||
70 | local_irq_disable(); | ||
71 | arch_spin_lock(&tc2_pm_lock); | ||
72 | |||
73 | if (tc2_cluster_unused(cluster)) | ||
74 | ve_spc_powerdown(cluster, false); | ||
75 | |||
76 | tc2_pm_use_count[cpu][cluster]++; | ||
77 | if (tc2_pm_use_count[cpu][cluster] == 1) { | ||
78 | ve_spc_set_resume_addr(cluster, cpu, | ||
79 | virt_to_phys(mcpm_entry_point)); | ||
80 | ve_spc_cpu_wakeup_irq(cluster, cpu, true); | ||
81 | } else if (tc2_pm_use_count[cpu][cluster] != 2) { | ||
82 | /* | ||
83 | * The only possible values are: | ||
84 | * 0 = CPU down | ||
85 | * 1 = CPU (still) up | ||
86 | * 2 = CPU requested to be up before it had a chance | ||
87 | * to actually make itself down. | ||
88 | * Any other value is a bug. | ||
89 | */ | ||
90 | BUG(); | ||
91 | } | ||
92 | |||
93 | arch_spin_unlock(&tc2_pm_lock); | ||
94 | local_irq_enable(); | ||
95 | |||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | static void tc2_pm_down(u64 residency) | ||
100 | { | ||
101 | unsigned int mpidr, cpu, cluster; | ||
102 | bool last_man = false, skip_wfi = false; | ||
103 | |||
104 | mpidr = read_cpuid_mpidr(); | ||
105 | cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | ||
106 | cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | ||
107 | |||
108 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); | ||
109 | BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); | ||
110 | |||
111 | __mcpm_cpu_going_down(cpu, cluster); | ||
112 | |||
113 | arch_spin_lock(&tc2_pm_lock); | ||
114 | BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP); | ||
115 | tc2_pm_use_count[cpu][cluster]--; | ||
116 | if (tc2_pm_use_count[cpu][cluster] == 0) { | ||
117 | ve_spc_cpu_wakeup_irq(cluster, cpu, true); | ||
118 | if (tc2_cluster_unused(cluster)) { | ||
119 | ve_spc_powerdown(cluster, true); | ||
120 | ve_spc_global_wakeup_irq(true); | ||
121 | last_man = true; | ||
122 | } | ||
123 | } else if (tc2_pm_use_count[cpu][cluster] == 1) { | ||
124 | /* | ||
125 | * A power_up request went ahead of us. | ||
126 | * Even if we do not want to shut this CPU down, | ||
127 | * the caller expects a certain state as if the WFI | ||
128 | * was aborted. So let's continue with cache cleaning. | ||
129 | */ | ||
130 | skip_wfi = true; | ||
131 | } else | ||
132 | BUG(); | ||
133 | |||
134 | if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { | ||
135 | arch_spin_unlock(&tc2_pm_lock); | ||
136 | |||
137 | if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) { | ||
138 | /* | ||
139 | * On the Cortex-A15 we need to disable | ||
140 | * L2 prefetching before flushing the cache. | ||
141 | */ | ||
142 | asm volatile( | ||
143 | "mcr p15, 1, %0, c15, c0, 3 \n\t" | ||
144 | "isb \n\t" | ||
145 | "dsb " | ||
146 | : : "r" (0x400) ); | ||
147 | } | ||
148 | |||
149 | /* | ||
150 | * We need to disable and flush the whole (L1 and L2) cache. | ||
151 | * Let's do it in the safest possible way i.e. with | ||
152 | * no memory access within the following sequence | ||
153 | * including the stack. | ||
154 | */ | ||
155 | asm volatile( | ||
156 | "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t" | ||
157 | "bic r0, r0, #"__stringify(CR_C)" \n\t" | ||
158 | "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t" | ||
159 | "isb \n\t" | ||
160 | "bl v7_flush_dcache_all \n\t" | ||
161 | "clrex \n\t" | ||
162 | "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t" | ||
163 | "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" | ||
164 | "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t" | ||
165 | "isb \n\t" | ||
166 | "dsb " | ||
167 | : : : "r0","r1","r2","r3","r4","r5","r6","r7", | ||
168 | "r9","r10","r11","lr","memory"); | ||
169 | |||
170 | cci_disable_port_by_cpu(mpidr); | ||
171 | |||
172 | __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); | ||
173 | } else { | ||
174 | /* | ||
175 | * If last man then undo any setup done previously. | ||
176 | */ | ||
177 | if (last_man) { | ||
178 | ve_spc_powerdown(cluster, false); | ||
179 | ve_spc_global_wakeup_irq(false); | ||
180 | } | ||
181 | |||
182 | arch_spin_unlock(&tc2_pm_lock); | ||
183 | |||
184 | /* | ||
185 | * We need to disable and flush only the L1 cache. | ||
186 | * Let's do it in the safest possible way as above. | ||
187 | */ | ||
188 | asm volatile( | ||
189 | "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t" | ||
190 | "bic r0, r0, #"__stringify(CR_C)" \n\t" | ||
191 | "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t" | ||
192 | "isb \n\t" | ||
193 | "bl v7_flush_dcache_louis \n\t" | ||
194 | "clrex \n\t" | ||
195 | "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t" | ||
196 | "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" | ||
197 | "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t" | ||
198 | "isb \n\t" | ||
199 | "dsb " | ||
200 | : : : "r0","r1","r2","r3","r4","r5","r6","r7", | ||
201 | "r9","r10","r11","lr","memory"); | ||
202 | } | ||
203 | |||
204 | __mcpm_cpu_down(cpu, cluster); | ||
205 | |||
206 | /* Now we are prepared for power-down, do it: */ | ||
207 | if (!skip_wfi) | ||
208 | wfi(); | ||
209 | |||
210 | /* Not dead at this point? Let our caller cope. */ | ||
211 | } | ||
212 | |||
213 | static void tc2_pm_power_down(void) | ||
214 | { | ||
215 | tc2_pm_down(0); | ||
216 | } | ||
217 | |||
218 | static void tc2_pm_suspend(u64 residency) | ||
219 | { | ||
220 | unsigned int mpidr, cpu, cluster; | ||
221 | |||
222 | mpidr = read_cpuid_mpidr(); | ||
223 | cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | ||
224 | cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | ||
225 | ve_spc_set_resume_addr(cluster, cpu, virt_to_phys(mcpm_entry_point)); | ||
226 | gic_cpu_if_down(); | ||
227 | tc2_pm_down(residency); | ||
228 | } | ||
229 | |||
230 | static void tc2_pm_powered_up(void) | ||
231 | { | ||
232 | unsigned int mpidr, cpu, cluster; | ||
233 | unsigned long flags; | ||
234 | |||
235 | mpidr = read_cpuid_mpidr(); | ||
236 | cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | ||
237 | cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | ||
238 | |||
239 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); | ||
240 | BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); | ||
241 | |||
242 | local_irq_save(flags); | ||
243 | arch_spin_lock(&tc2_pm_lock); | ||
244 | |||
245 | if (tc2_cluster_unused(cluster)) { | ||
246 | ve_spc_powerdown(cluster, false); | ||
247 | ve_spc_global_wakeup_irq(false); | ||
248 | } | ||
249 | |||
250 | if (!tc2_pm_use_count[cpu][cluster]) | ||
251 | tc2_pm_use_count[cpu][cluster] = 1; | ||
252 | |||
253 | ve_spc_cpu_wakeup_irq(cluster, cpu, false); | ||
254 | ve_spc_set_resume_addr(cluster, cpu, 0); | ||
255 | |||
256 | arch_spin_unlock(&tc2_pm_lock); | ||
257 | local_irq_restore(flags); | ||
258 | } | ||
259 | |||
260 | static const struct mcpm_platform_ops tc2_pm_power_ops = { | ||
261 | .power_up = tc2_pm_power_up, | ||
262 | .power_down = tc2_pm_power_down, | ||
263 | .suspend = tc2_pm_suspend, | ||
264 | .powered_up = tc2_pm_powered_up, | ||
265 | }; | ||
266 | |||
267 | static bool __init tc2_pm_usage_count_init(void) | ||
268 | { | ||
269 | unsigned int mpidr, cpu, cluster; | ||
270 | |||
271 | mpidr = read_cpuid_mpidr(); | ||
272 | cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | ||
273 | cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | ||
274 | |||
275 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); | ||
276 | if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) { | ||
277 | pr_err("%s: boot CPU is out of bound!\n", __func__); | ||
278 | return false; | ||
279 | } | ||
280 | tc2_pm_use_count[cpu][cluster] = 1; | ||
281 | return true; | ||
282 | } | ||
283 | |||
284 | /* | ||
285 | * Enable cluster-level coherency, in preparation for turning on the MMU. | ||
286 | */ | ||
287 | static void __naked tc2_pm_power_up_setup(unsigned int affinity_level) | ||
288 | { | ||
289 | asm volatile (" \n" | ||
290 | " cmp r0, #1 \n" | ||
291 | " bxne lr \n" | ||
292 | " b cci_enable_port_for_self "); | ||
293 | } | ||
294 | |||
295 | static int __init tc2_pm_init(void) | ||
296 | { | ||
297 | int ret; | ||
298 | void __iomem *scc; | ||
299 | u32 a15_cluster_id, a7_cluster_id, sys_info; | ||
300 | struct device_node *np; | ||
301 | |||
302 | /* | ||
303 | * The power management-related features are hidden behind | ||
304 | * SCC registers. We need to extract runtime information like | ||
305 | * cluster ids and number of CPUs really available in clusters. | ||
306 | */ | ||
307 | np = of_find_compatible_node(NULL, NULL, | ||
308 | "arm,vexpress-scc,v2p-ca15_a7"); | ||
309 | scc = of_iomap(np, 0); | ||
310 | if (!scc) | ||
311 | return -ENODEV; | ||
312 | |||
313 | a15_cluster_id = readl_relaxed(scc + A15_CONF) & 0xf; | ||
314 | a7_cluster_id = readl_relaxed(scc + A7_CONF) & 0xf; | ||
315 | if (a15_cluster_id >= TC2_CLUSTERS || a7_cluster_id >= TC2_CLUSTERS) | ||
316 | return -EINVAL; | ||
317 | |||
318 | sys_info = readl_relaxed(scc + SYS_INFO); | ||
319 | tc2_nr_cpus[a15_cluster_id] = (sys_info >> 16) & 0xf; | ||
320 | tc2_nr_cpus[a7_cluster_id] = (sys_info >> 20) & 0xf; | ||
321 | |||
322 | /* | ||
323 | * A subset of the SCC registers is also used to communicate | ||
324 | * with the SPC (power controller). We need to be able to | ||
325 | * drive it very early in the boot process to power up | ||
326 | * processors, so we initialize the SPC driver here. | ||
327 | */ | ||
328 | ret = ve_spc_init(scc + SPC_BASE, a15_cluster_id); | ||
329 | if (ret) | ||
330 | return ret; | ||
331 | |||
332 | if (!cci_probed()) | ||
333 | return -ENODEV; | ||
334 | |||
335 | if (!tc2_pm_usage_count_init()) | ||
336 | return -EINVAL; | ||
337 | |||
338 | ret = mcpm_platform_register(&tc2_pm_power_ops); | ||
339 | if (!ret) { | ||
340 | mcpm_sync_init(tc2_pm_power_up_setup); | ||
341 | pr_info("TC2 power management initialized\n"); | ||
342 | } | ||
343 | return ret; | ||
344 | } | ||
345 | |||
346 | early_initcall(tc2_pm_init); | ||
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig index 0e2cd5cab4d0..0f8658773be3 100644 --- a/drivers/cpuidle/Kconfig +++ b/drivers/cpuidle/Kconfig | |||
@@ -42,6 +42,16 @@ config CPU_IDLE_ZYNQ | |||
42 | help | 42 | help |
43 | Select this to enable cpuidle on Xilinx Zynq processors. | 43 | Select this to enable cpuidle on Xilinx Zynq processors. |
44 | 44 | ||
45 | config CPU_IDLE_BIG_LITTLE | ||
46 | bool "Support for ARM big.LITTLE processors" | ||
47 | depends on ARCH_VEXPRESS_TC2_PM | ||
48 | select ARM_CPU_SUSPEND | ||
49 | select CPU_IDLE_MULTIPLE_DRIVERS | ||
50 | help | ||
51 | Select this option to enable CPU idle driver for big.LITTLE based | ||
52 | ARM systems. Driver manages CPUs coordination through MCPM and | ||
53 | define different C-states for little and big cores through the | ||
54 | multiple CPU idle drivers infrastructure. | ||
45 | endif | 55 | endif |
46 | 56 | ||
47 | config ARCH_NEEDS_CPU_IDLE_COUPLED | 57 | config ARCH_NEEDS_CPU_IDLE_COUPLED |
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile index 8767a7b3eb91..3b6445c106df 100644 --- a/drivers/cpuidle/Makefile +++ b/drivers/cpuidle/Makefile | |||
@@ -8,3 +8,4 @@ obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o | |||
8 | obj-$(CONFIG_CPU_IDLE_CALXEDA) += cpuidle-calxeda.o | 8 | obj-$(CONFIG_CPU_IDLE_CALXEDA) += cpuidle-calxeda.o |
9 | obj-$(CONFIG_ARCH_KIRKWOOD) += cpuidle-kirkwood.o | 9 | obj-$(CONFIG_ARCH_KIRKWOOD) += cpuidle-kirkwood.o |
10 | obj-$(CONFIG_CPU_IDLE_ZYNQ) += cpuidle-zynq.o | 10 | obj-$(CONFIG_CPU_IDLE_ZYNQ) += cpuidle-zynq.o |
11 | obj-$(CONFIG_CPU_IDLE_BIG_LITTLE) += cpuidle-big_little.o | ||
diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c new file mode 100644 index 000000000000..b45fc6249041 --- /dev/null +++ b/drivers/cpuidle/cpuidle-big_little.c | |||
@@ -0,0 +1,209 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2013 ARM/Linaro | ||
3 | * | ||
4 | * Authors: Daniel Lezcano <daniel.lezcano@linaro.org> | ||
5 | * Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> | ||
6 | * Nicolas Pitre <nicolas.pitre@linaro.org> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * Maintainer: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> | ||
13 | * Maintainer: Daniel Lezcano <daniel.lezcano@linaro.org> | ||
14 | */ | ||
15 | #include <linux/cpuidle.h> | ||
16 | #include <linux/cpu_pm.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/of.h> | ||
19 | |||
20 | #include <asm/cpu.h> | ||
21 | #include <asm/cputype.h> | ||
22 | #include <asm/cpuidle.h> | ||
23 | #include <asm/mcpm.h> | ||
24 | #include <asm/smp_plat.h> | ||
25 | #include <asm/suspend.h> | ||
26 | |||
27 | static int bl_enter_powerdown(struct cpuidle_device *dev, | ||
28 | struct cpuidle_driver *drv, int idx); | ||
29 | |||
30 | /* | ||
31 | * NB: Owing to current menu governor behaviour big and LITTLE | ||
32 | * index 1 states have to define exit_latency and target_residency for | ||
33 | * cluster state since, when all CPUs in a cluster hit it, the cluster | ||
34 | * can be shutdown. This means that when a single CPU enters this state | ||
35 | * the exit_latency and target_residency values are somewhat overkill. | ||
36 | * There is no notion of cluster states in the menu governor, so CPUs | ||
37 | * have to define CPU states where possibly the cluster will be shutdown | ||
38 | * depending on the state of other CPUs. idle states entry and exit happen | ||
39 | * at random times; however the cluster state provides target_residency | ||
40 | * values as if all CPUs in a cluster enter the state at once; this is | ||
41 | * somewhat optimistic and behaviour should be fixed either in the governor | ||
42 | * or in the MCPM back-ends. | ||
43 | * To make this driver 100% generic the number of states and the exit_latency | ||
44 | * target_residency values must be obtained from device tree bindings. | ||
45 | * | ||
46 | * exit_latency: refers to the TC2 vexpress test chip and depends on the | ||
47 | * current cluster operating point. It is the time it takes to get the CPU | ||
48 | * up and running when the CPU is powered up on cluster wake-up from shutdown. | ||
49 | * Current values for big and LITTLE clusters are provided for clusters | ||
50 | * running at default operating points. | ||
51 | * | ||
52 | * target_residency: it is the minimum amount of time the cluster has | ||
53 | * to be down to break even in terms of power consumption. cluster | ||
54 | * shutdown has inherent dynamic power costs (L2 writebacks to DRAM | ||
55 | * being the main factor) that depend on the current operating points. | ||
56 | * The current values for both clusters are provided for a CPU whose half | ||
57 | * of L2 lines are dirty and require cleaning to DRAM, and takes into | ||
58 | * account leakage static power values related to the vexpress TC2 testchip. | ||
59 | */ | ||
60 | static struct cpuidle_driver bl_idle_little_driver = { | ||
61 | .name = "little_idle", | ||
62 | .owner = THIS_MODULE, | ||
63 | .states[0] = ARM_CPUIDLE_WFI_STATE, | ||
64 | .states[1] = { | ||
65 | .enter = bl_enter_powerdown, | ||
66 | .exit_latency = 700, | ||
67 | .target_residency = 2500, | ||
68 | .flags = CPUIDLE_FLAG_TIME_VALID | | ||
69 | CPUIDLE_FLAG_TIMER_STOP, | ||
70 | .name = "C1", | ||
71 | .desc = "ARM little-cluster power down", | ||
72 | }, | ||
73 | .state_count = 2, | ||
74 | }; | ||
75 | |||
76 | static struct cpuidle_driver bl_idle_big_driver = { | ||
77 | .name = "big_idle", | ||
78 | .owner = THIS_MODULE, | ||
79 | .states[0] = ARM_CPUIDLE_WFI_STATE, | ||
80 | .states[1] = { | ||
81 | .enter = bl_enter_powerdown, | ||
82 | .exit_latency = 500, | ||
83 | .target_residency = 2000, | ||
84 | .flags = CPUIDLE_FLAG_TIME_VALID | | ||
85 | CPUIDLE_FLAG_TIMER_STOP, | ||
86 | .name = "C1", | ||
87 | .desc = "ARM big-cluster power down", | ||
88 | }, | ||
89 | .state_count = 2, | ||
90 | }; | ||
91 | |||
92 | /* | ||
93 | * notrace prevents trace shims from getting inserted where they | ||
94 | * should not. Global jumps and ldrex/strex must not be inserted | ||
95 | * in power down sequences where caches and MMU may be turned off. | ||
96 | */ | ||
97 | static int notrace bl_powerdown_finisher(unsigned long arg) | ||
98 | { | ||
99 | /* MCPM works with HW CPU identifiers */ | ||
100 | unsigned int mpidr = read_cpuid_mpidr(); | ||
101 | unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | ||
102 | unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | ||
103 | |||
104 | mcpm_set_entry_vector(cpu, cluster, cpu_resume); | ||
105 | |||
106 | /* | ||
107 | * Residency value passed to mcpm_cpu_suspend back-end | ||
108 | * has to be given clear semantics. Set to 0 as a | ||
109 | * temporary value. | ||
110 | */ | ||
111 | mcpm_cpu_suspend(0); | ||
112 | |||
113 | /* return value != 0 means failure */ | ||
114 | return 1; | ||
115 | } | ||
116 | |||
117 | /** | ||
118 | * bl_enter_powerdown - Programs CPU to enter the specified state | ||
119 | * @dev: cpuidle device | ||
120 | * @drv: The target state to be programmed | ||
121 | * @idx: state index | ||
122 | * | ||
123 | * Called from the CPUidle framework to program the device to the | ||
124 | * specified target state selected by the governor. | ||
125 | */ | ||
126 | static int bl_enter_powerdown(struct cpuidle_device *dev, | ||
127 | struct cpuidle_driver *drv, int idx) | ||
128 | { | ||
129 | cpu_pm_enter(); | ||
130 | |||
131 | cpu_suspend(0, bl_powerdown_finisher); | ||
132 | |||
133 | /* signals the MCPM core that CPU is out of low power state */ | ||
134 | mcpm_cpu_powered_up(); | ||
135 | |||
136 | cpu_pm_exit(); | ||
137 | |||
138 | return idx; | ||
139 | } | ||
140 | |||
141 | static int __init bl_idle_driver_init(struct cpuidle_driver *drv, int cpu_id) | ||
142 | { | ||
143 | struct cpuinfo_arm *cpu_info; | ||
144 | struct cpumask *cpumask; | ||
145 | unsigned long cpuid; | ||
146 | int cpu; | ||
147 | |||
148 | cpumask = kzalloc(cpumask_size(), GFP_KERNEL); | ||
149 | if (!cpumask) | ||
150 | return -ENOMEM; | ||
151 | |||
152 | for_each_possible_cpu(cpu) { | ||
153 | cpu_info = &per_cpu(cpu_data, cpu); | ||
154 | cpuid = is_smp() ? cpu_info->cpuid : read_cpuid_id(); | ||
155 | |||
156 | /* read cpu id part number */ | ||
157 | if ((cpuid & 0xFFF0) == cpu_id) | ||
158 | cpumask_set_cpu(cpu, cpumask); | ||
159 | } | ||
160 | |||
161 | drv->cpumask = cpumask; | ||
162 | |||
163 | return 0; | ||
164 | } | ||
165 | |||
166 | static int __init bl_idle_init(void) | ||
167 | { | ||
168 | int ret; | ||
169 | |||
170 | /* | ||
171 | * Initialize the driver just for a compliant set of machines | ||
172 | */ | ||
173 | if (!of_machine_is_compatible("arm,vexpress,v2p-ca15_a7")) | ||
174 | return -ENODEV; | ||
175 | /* | ||
176 | * For now the differentiation between little and big cores | ||
177 | * is based on the part number. A7 cores are considered little | ||
178 | * cores, A15 are considered big cores. This distinction may | ||
179 | * evolve in the future with a more generic matching approach. | ||
180 | */ | ||
181 | ret = bl_idle_driver_init(&bl_idle_little_driver, | ||
182 | ARM_CPU_PART_CORTEX_A7); | ||
183 | if (ret) | ||
184 | return ret; | ||
185 | |||
186 | ret = bl_idle_driver_init(&bl_idle_big_driver, ARM_CPU_PART_CORTEX_A15); | ||
187 | if (ret) | ||
188 | goto out_uninit_little; | ||
189 | |||
190 | ret = cpuidle_register(&bl_idle_little_driver, NULL); | ||
191 | if (ret) | ||
192 | goto out_uninit_big; | ||
193 | |||
194 | ret = cpuidle_register(&bl_idle_big_driver, NULL); | ||
195 | if (ret) | ||
196 | goto out_unregister_little; | ||
197 | |||
198 | return 0; | ||
199 | |||
200 | out_unregister_little: | ||
201 | cpuidle_unregister(&bl_idle_little_driver); | ||
202 | out_uninit_big: | ||
203 | kfree(bl_idle_big_driver.cpumask); | ||
204 | out_uninit_little: | ||
205 | kfree(bl_idle_little_driver.cpumask); | ||
206 | |||
207 | return ret; | ||
208 | } | ||
209 | device_initcall(bl_idle_init); | ||
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index ee7c50312066..d0e948084eaf 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c | |||
@@ -453,6 +453,12 @@ static void gic_cpu_init(struct gic_chip_data *gic) | |||
453 | writel_relaxed(1, base + GIC_CPU_CTRL); | 453 | writel_relaxed(1, base + GIC_CPU_CTRL); |
454 | } | 454 | } |
455 | 455 | ||
456 | void gic_cpu_if_down(void) | ||
457 | { | ||
458 | void __iomem *cpu_base = gic_data_cpu_base(&gic_data[0]); | ||
459 | writel_relaxed(0, cpu_base + GIC_CPU_CTRL); | ||
460 | } | ||
461 | |||
456 | #ifdef CONFIG_CPU_PM | 462 | #ifdef CONFIG_CPU_PM |
457 | /* | 463 | /* |
458 | * Saves the GIC distributor registers during suspend or idle. Must be called | 464 | * Saves the GIC distributor registers during suspend or idle. Must be called |
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index 3e203eb23cc7..0e5d9ecdb2b6 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h | |||
@@ -66,6 +66,7 @@ extern struct irq_chip gic_arch_extn; | |||
66 | void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *, | 66 | void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *, |
67 | u32 offset, struct device_node *); | 67 | u32 offset, struct device_node *); |
68 | void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); | 68 | void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); |
69 | void gic_cpu_if_down(void); | ||
69 | 70 | ||
70 | static inline void gic_init(unsigned int nr, int start, | 71 | static inline void gic_init(unsigned int nr, int start, |
71 | void __iomem *dist , void __iomem *cpu) | 72 | void __iomem *dist , void __iomem *cpu) |