aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicolas Pitre <nicolas.pitre@linaro.org>2015-04-28 13:44:00 -0400
committerNicolas Pitre <nicolas.pitre@linaro.org>2015-05-06 11:43:12 -0400
commit77404d81cadf192cc1261d6269f622a06b83cdd5 (patch)
tree0f8a81495efe94a38149d593c0219ee4488af550
parent905cdf9dda5d89d843667b2f11da2308d1fd1c34 (diff)
ARM: MCPM: remove backward compatibility code
Now that no one uses the old callbacks anymore, let's remove them and associated support code. Signed-off-by: Nicolas Pitre <nico@linaro.org> Acked-by: Dave Martin <Dave.Martin@arm.com>
-rw-r--r--arch/arm/common/mcpm_entry.c45
-rw-r--r--arch/arm/include/asm/mcpm.h6
2 files changed, 5 insertions, 46 deletions
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
index 5f8a52ac7edf..0908f96278c4 100644
--- a/arch/arm/common/mcpm_entry.c
+++ b/arch/arm/common/mcpm_entry.c
@@ -78,16 +78,11 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
78 bool cpu_is_down, cluster_is_down; 78 bool cpu_is_down, cluster_is_down;
79 int ret = 0; 79 int ret = 0;
80 80
81 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
81 if (!platform_ops) 82 if (!platform_ops)
82 return -EUNATCH; /* try not to shadow power_up errors */ 83 return -EUNATCH; /* try not to shadow power_up errors */
83 might_sleep(); 84 might_sleep();
84 85
85 /* backward compatibility callback */
86 if (platform_ops->power_up)
87 return platform_ops->power_up(cpu, cluster);
88
89 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
90
91 /* 86 /*
92 * Since this is called with IRQs enabled, and no arch_spin_lock_irq 87 * Since this is called with IRQs enabled, and no arch_spin_lock_irq
93 * variant exists, we need to disable IRQs manually here. 88 * variant exists, we need to disable IRQs manually here.
@@ -128,29 +123,17 @@ void mcpm_cpu_power_down(void)
128 bool cpu_going_down, last_man; 123 bool cpu_going_down, last_man;
129 phys_reset_t phys_reset; 124 phys_reset_t phys_reset;
130 125
126 mpidr = read_cpuid_mpidr();
127 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
128 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
129 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
131 if (WARN_ON_ONCE(!platform_ops)) 130 if (WARN_ON_ONCE(!platform_ops))
132 return; 131 return;
133 BUG_ON(!irqs_disabled()); 132 BUG_ON(!irqs_disabled());
134 133
135 /*
136 * Do this before calling into the power_down method,
137 * as it might not always be safe to do afterwards.
138 */
139 setup_mm_for_reboot(); 134 setup_mm_for_reboot();
140 135
141 /* backward compatibility callback */
142 if (platform_ops->power_down) {
143 platform_ops->power_down();
144 goto not_dead;
145 }
146
147 mpidr = read_cpuid_mpidr();
148 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
149 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
150 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
151
152 __mcpm_cpu_going_down(cpu, cluster); 136 __mcpm_cpu_going_down(cpu, cluster);
153
154 arch_spin_lock(&mcpm_lock); 137 arch_spin_lock(&mcpm_lock);
155 BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP); 138 BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
156 139
@@ -187,7 +170,6 @@ void mcpm_cpu_power_down(void)
187 if (cpu_going_down) 170 if (cpu_going_down)
188 wfi(); 171 wfi();
189 172
190not_dead:
191 /* 173 /*
192 * It is possible for a power_up request to happen concurrently 174 * It is possible for a power_up request to happen concurrently
193 * with a power_down request for the same CPU. In this case the 175 * with a power_down request for the same CPU. In this case the
@@ -224,17 +206,6 @@ void mcpm_cpu_suspend(u64 expected_residency)
224 if (WARN_ON_ONCE(!platform_ops)) 206 if (WARN_ON_ONCE(!platform_ops))
225 return; 207 return;
226 208
227 /* backward compatibility callback */
228 if (platform_ops->suspend) {
229 phys_reset_t phys_reset;
230 BUG_ON(!irqs_disabled());
231 setup_mm_for_reboot();
232 platform_ops->suspend(expected_residency);
233 phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
234 phys_reset(virt_to_phys(mcpm_entry_point));
235 BUG();
236 }
237
238 /* Some platforms might have to enable special resume modes, etc. */ 209 /* Some platforms might have to enable special resume modes, etc. */
239 if (platform_ops->cpu_suspend_prepare) { 210 if (platform_ops->cpu_suspend_prepare) {
240 unsigned int mpidr = read_cpuid_mpidr(); 211 unsigned int mpidr = read_cpuid_mpidr();
@@ -256,12 +227,6 @@ int mcpm_cpu_powered_up(void)
256 if (!platform_ops) 227 if (!platform_ops)
257 return -EUNATCH; 228 return -EUNATCH;
258 229
259 /* backward compatibility callback */
260 if (platform_ops->powered_up) {
261 platform_ops->powered_up();
262 return 0;
263 }
264
265 mpidr = read_cpuid_mpidr(); 230 mpidr = read_cpuid_mpidr();
266 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); 231 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
267 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); 232 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
index 50b378f59e08..e2118c941dbf 100644
--- a/arch/arm/include/asm/mcpm.h
+++ b/arch/arm/include/asm/mcpm.h
@@ -234,12 +234,6 @@ struct mcpm_platform_ops {
234 void (*cpu_is_up)(unsigned int cpu, unsigned int cluster); 234 void (*cpu_is_up)(unsigned int cpu, unsigned int cluster);
235 void (*cluster_is_up)(unsigned int cluster); 235 void (*cluster_is_up)(unsigned int cluster);
236 int (*wait_for_powerdown)(unsigned int cpu, unsigned int cluster); 236 int (*wait_for_powerdown)(unsigned int cpu, unsigned int cluster);
237
238 /* deprecated callbacks */
239 int (*power_up)(unsigned int cpu, unsigned int cluster);
240 void (*power_down)(void);
241 void (*suspend)(u64);
242 void (*powered_up)(void);
243}; 237};
244 238
245/** 239/**