diff options
Diffstat (limited to 'arch/mips/kernel/smp-cps.c')
-rw-r--r-- | arch/mips/kernel/smp-cps.c | 432 |
1 files changed, 284 insertions, 148 deletions
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index bb36b4e6b55f..df0598d9bfdd 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c | |||
@@ -20,104 +20,43 @@ | |||
20 | #include <asm/mips-cpc.h> | 20 | #include <asm/mips-cpc.h> |
21 | #include <asm/mips_mt.h> | 21 | #include <asm/mips_mt.h> |
22 | #include <asm/mipsregs.h> | 22 | #include <asm/mipsregs.h> |
23 | #include <asm/pm-cps.h> | ||
23 | #include <asm/smp-cps.h> | 24 | #include <asm/smp-cps.h> |
24 | #include <asm/time.h> | 25 | #include <asm/time.h> |
25 | #include <asm/uasm.h> | 26 | #include <asm/uasm.h> |
26 | 27 | ||
27 | static DECLARE_BITMAP(core_power, NR_CPUS); | 28 | static DECLARE_BITMAP(core_power, NR_CPUS); |
28 | 29 | ||
29 | struct boot_config mips_cps_bootcfg; | 30 | struct core_boot_config *mips_cps_core_bootcfg; |
30 | 31 | ||
31 | static void init_core(void) | 32 | static unsigned core_vpe_count(unsigned core) |
32 | { | 33 | { |
33 | unsigned int nvpes, t; | 34 | unsigned cfg; |
34 | u32 mvpconf0, vpeconf0, vpecontrol, tcstatus, tcbind, status; | ||
35 | 35 | ||
36 | if (!cpu_has_mipsmt) | 36 | if (!config_enabled(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt) |
37 | return; | 37 | return 1; |
38 | |||
39 | /* Enter VPE configuration state */ | ||
40 | dvpe(); | ||
41 | set_c0_mvpcontrol(MVPCONTROL_VPC); | ||
42 | |||
43 | /* Retrieve the count of VPEs in this core */ | ||
44 | mvpconf0 = read_c0_mvpconf0(); | ||
45 | nvpes = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; | ||
46 | smp_num_siblings = nvpes; | ||
47 | |||
48 | for (t = 1; t < nvpes; t++) { | ||
49 | /* Use a 1:1 mapping of TC index to VPE index */ | ||
50 | settc(t); | ||
51 | |||
52 | /* Bind 1 TC to this VPE */ | ||
53 | tcbind = read_tc_c0_tcbind(); | ||
54 | tcbind &= ~TCBIND_CURVPE; | ||
55 | tcbind |= t << TCBIND_CURVPE_SHIFT; | ||
56 | write_tc_c0_tcbind(tcbind); | ||
57 | |||
58 | /* Set exclusive TC, non-active, master */ | ||
59 | vpeconf0 = read_vpe_c0_vpeconf0(); | ||
60 | vpeconf0 &= ~(VPECONF0_XTC | VPECONF0_VPA); | ||
61 | vpeconf0 |= t << VPECONF0_XTC_SHIFT; | ||
62 | vpeconf0 |= VPECONF0_MVP; | ||
63 | write_vpe_c0_vpeconf0(vpeconf0); | ||
64 | |||
65 | /* Declare TC non-active, non-allocatable & interrupt exempt */ | ||
66 | tcstatus = read_tc_c0_tcstatus(); | ||
67 | tcstatus &= ~(TCSTATUS_A | TCSTATUS_DA); | ||
68 | tcstatus |= TCSTATUS_IXMT; | ||
69 | write_tc_c0_tcstatus(tcstatus); | ||
70 | |||
71 | /* Halt the TC */ | ||
72 | write_tc_c0_tchalt(TCHALT_H); | ||
73 | |||
74 | /* Allow only 1 TC to execute */ | ||
75 | vpecontrol = read_vpe_c0_vpecontrol(); | ||
76 | vpecontrol &= ~VPECONTROL_TE; | ||
77 | write_vpe_c0_vpecontrol(vpecontrol); | ||
78 | |||
79 | /* Copy (most of) Status from VPE 0 */ | ||
80 | status = read_c0_status(); | ||
81 | status &= ~(ST0_IM | ST0_IE | ST0_KSU); | ||
82 | status |= ST0_CU0; | ||
83 | write_vpe_c0_status(status); | ||
84 | |||
85 | /* Copy Config from VPE 0 */ | ||
86 | write_vpe_c0_config(read_c0_config()); | ||
87 | write_vpe_c0_config7(read_c0_config7()); | ||
88 | |||
89 | /* Ensure no software interrupts are pending */ | ||
90 | write_vpe_c0_cause(0); | ||
91 | |||
92 | /* Sync Count */ | ||
93 | write_vpe_c0_count(read_c0_count()); | ||
94 | } | ||
95 | 38 | ||
96 | /* Leave VPE configuration state */ | 39 | write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF); |
97 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | 40 | cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE_MSK; |
41 | return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1; | ||
98 | } | 42 | } |
99 | 43 | ||
100 | static void __init cps_smp_setup(void) | 44 | static void __init cps_smp_setup(void) |
101 | { | 45 | { |
102 | unsigned int ncores, nvpes, core_vpes; | 46 | unsigned int ncores, nvpes, core_vpes; |
103 | int c, v; | 47 | int c, v; |
104 | u32 core_cfg, *entry_code; | ||
105 | 48 | ||
106 | /* Detect & record VPE topology */ | 49 | /* Detect & record VPE topology */ |
107 | ncores = mips_cm_numcores(); | 50 | ncores = mips_cm_numcores(); |
108 | pr_info("VPE topology "); | 51 | pr_info("VPE topology "); |
109 | for (c = nvpes = 0; c < ncores; c++) { | 52 | for (c = nvpes = 0; c < ncores; c++) { |
110 | if (cpu_has_mipsmt && config_enabled(CONFIG_MIPS_MT_SMP)) { | 53 | core_vpes = core_vpe_count(c); |
111 | write_gcr_cl_other(c << CM_GCR_Cx_OTHER_CORENUM_SHF); | ||
112 | core_cfg = read_gcr_co_config(); | ||
113 | core_vpes = ((core_cfg & CM_GCR_Cx_CONFIG_PVPE_MSK) >> | ||
114 | CM_GCR_Cx_CONFIG_PVPE_SHF) + 1; | ||
115 | } else { | ||
116 | core_vpes = 1; | ||
117 | } | ||
118 | |||
119 | pr_cont("%c%u", c ? ',' : '{', core_vpes); | 54 | pr_cont("%c%u", c ? ',' : '{', core_vpes); |
120 | 55 | ||
56 | /* Use the number of VPEs in core 0 for smp_num_siblings */ | ||
57 | if (!c) | ||
58 | smp_num_siblings = core_vpes; | ||
59 | |||
121 | for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) { | 60 | for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) { |
122 | cpu_data[nvpes + v].core = c; | 61 | cpu_data[nvpes + v].core = c; |
123 | #ifdef CONFIG_MIPS_MT_SMP | 62 | #ifdef CONFIG_MIPS_MT_SMP |
@@ -137,19 +76,14 @@ static void __init cps_smp_setup(void) | |||
137 | __cpu_logical_map[v] = v; | 76 | __cpu_logical_map[v] = v; |
138 | } | 77 | } |
139 | 78 | ||
79 | /* Set a coherent default CCA (CWB) */ | ||
80 | change_c0_config(CONF_CM_CMASK, 0x5); | ||
81 | |||
140 | /* Core 0 is powered up (we're running on it) */ | 82 | /* Core 0 is powered up (we're running on it) */ |
141 | bitmap_set(core_power, 0, 1); | 83 | bitmap_set(core_power, 0, 1); |
142 | 84 | ||
143 | /* Disable MT - we only want to run 1 TC per VPE */ | ||
144 | if (cpu_has_mipsmt) | ||
145 | dmt(); | ||
146 | |||
147 | /* Initialise core 0 */ | 85 | /* Initialise core 0 */ |
148 | init_core(); | 86 | mips_cps_core_init(); |
149 | |||
150 | /* Patch the start of mips_cps_core_entry to provide the CM base */ | ||
151 | entry_code = (u32 *)&mips_cps_core_entry; | ||
152 | UASM_i_LA(&entry_code, 3, (long)mips_cm_base); | ||
153 | 87 | ||
154 | /* Make core 0 coherent with everything */ | 88 | /* Make core 0 coherent with everything */ |
155 | write_gcr_cl_coherence(0xff); | 89 | write_gcr_cl_coherence(0xff); |
@@ -157,15 +91,99 @@ static void __init cps_smp_setup(void) | |||
157 | 91 | ||
158 | static void __init cps_prepare_cpus(unsigned int max_cpus) | 92 | static void __init cps_prepare_cpus(unsigned int max_cpus) |
159 | { | 93 | { |
94 | unsigned ncores, core_vpes, c, cca; | ||
95 | bool cca_unsuitable; | ||
96 | u32 *entry_code; | ||
97 | |||
160 | mips_mt_set_cpuoptions(); | 98 | mips_mt_set_cpuoptions(); |
99 | |||
100 | /* Detect whether the CCA is unsuited to multi-core SMP */ | ||
101 | cca = read_c0_config() & CONF_CM_CMASK; | ||
102 | switch (cca) { | ||
103 | case 0x4: /* CWBE */ | ||
104 | case 0x5: /* CWB */ | ||
105 | /* The CCA is coherent, multi-core is fine */ | ||
106 | cca_unsuitable = false; | ||
107 | break; | ||
108 | |||
109 | default: | ||
110 | /* CCA is not coherent, multi-core is not usable */ | ||
111 | cca_unsuitable = true; | ||
112 | } | ||
113 | |||
114 | /* Warn the user if the CCA prevents multi-core */ | ||
115 | ncores = mips_cm_numcores(); | ||
116 | if (cca_unsuitable && ncores > 1) { | ||
117 | pr_warn("Using only one core due to unsuitable CCA 0x%x\n", | ||
118 | cca); | ||
119 | |||
120 | for_each_present_cpu(c) { | ||
121 | if (cpu_data[c].core) | ||
122 | set_cpu_present(c, false); | ||
123 | } | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * Patch the start of mips_cps_core_entry to provide: | ||
128 | * | ||
129 | * v0 = CM base address | ||
130 | * s0 = kseg0 CCA | ||
131 | */ | ||
132 | entry_code = (u32 *)&mips_cps_core_entry; | ||
133 | UASM_i_LA(&entry_code, 3, (long)mips_cm_base); | ||
134 | uasm_i_addiu(&entry_code, 16, 0, cca); | ||
135 | dma_cache_wback_inv((unsigned long)&mips_cps_core_entry, | ||
136 | (void *)entry_code - (void *)&mips_cps_core_entry); | ||
137 | |||
138 | /* Allocate core boot configuration structs */ | ||
139 | mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg), | ||
140 | GFP_KERNEL); | ||
141 | if (!mips_cps_core_bootcfg) { | ||
142 | pr_err("Failed to allocate boot config for %u cores\n", ncores); | ||
143 | goto err_out; | ||
144 | } | ||
145 | |||
146 | /* Allocate VPE boot configuration structs */ | ||
147 | for (c = 0; c < ncores; c++) { | ||
148 | core_vpes = core_vpe_count(c); | ||
149 | mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes, | ||
150 | sizeof(*mips_cps_core_bootcfg[c].vpe_config), | ||
151 | GFP_KERNEL); | ||
152 | if (!mips_cps_core_bootcfg[c].vpe_config) { | ||
153 | pr_err("Failed to allocate %u VPE boot configs\n", | ||
154 | core_vpes); | ||
155 | goto err_out; | ||
156 | } | ||
157 | } | ||
158 | |||
159 | /* Mark this CPU as booted */ | ||
160 | atomic_set(&mips_cps_core_bootcfg[current_cpu_data.core].vpe_mask, | ||
161 | 1 << cpu_vpe_id(¤t_cpu_data)); | ||
162 | |||
163 | return; | ||
164 | err_out: | ||
165 | /* Clean up allocations */ | ||
166 | if (mips_cps_core_bootcfg) { | ||
167 | for (c = 0; c < ncores; c++) | ||
168 | kfree(mips_cps_core_bootcfg[c].vpe_config); | ||
169 | kfree(mips_cps_core_bootcfg); | ||
170 | mips_cps_core_bootcfg = NULL; | ||
171 | } | ||
172 | |||
173 | /* Effectively disable SMP by declaring CPUs not present */ | ||
174 | for_each_possible_cpu(c) { | ||
175 | if (c == 0) | ||
176 | continue; | ||
177 | set_cpu_present(c, false); | ||
178 | } | ||
161 | } | 179 | } |
162 | 180 | ||
163 | static void boot_core(struct boot_config *cfg) | 181 | static void boot_core(unsigned core) |
164 | { | 182 | { |
165 | u32 access; | 183 | u32 access; |
166 | 184 | ||
167 | /* Select the appropriate core */ | 185 | /* Select the appropriate core */ |
168 | write_gcr_cl_other(cfg->core << CM_GCR_Cx_OTHER_CORENUM_SHF); | 186 | write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF); |
169 | 187 | ||
170 | /* Set its reset vector */ | 188 | /* Set its reset vector */ |
171 | write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry)); | 189 | write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry)); |
@@ -175,104 +193,74 @@ static void boot_core(struct boot_config *cfg) | |||
175 | 193 | ||
176 | /* Ensure the core can access the GCRs */ | 194 | /* Ensure the core can access the GCRs */ |
177 | access = read_gcr_access(); | 195 | access = read_gcr_access(); |
178 | access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + cfg->core); | 196 | access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + core); |
179 | write_gcr_access(access); | 197 | write_gcr_access(access); |
180 | 198 | ||
181 | /* Copy cfg */ | ||
182 | mips_cps_bootcfg = *cfg; | ||
183 | |||
184 | if (mips_cpc_present()) { | 199 | if (mips_cpc_present()) { |
185 | /* Select the appropriate core */ | ||
186 | write_cpc_cl_other(cfg->core << CPC_Cx_OTHER_CORENUM_SHF); | ||
187 | |||
188 | /* Reset the core */ | 200 | /* Reset the core */ |
201 | mips_cpc_lock_other(core); | ||
189 | write_cpc_co_cmd(CPC_Cx_CMD_RESET); | 202 | write_cpc_co_cmd(CPC_Cx_CMD_RESET); |
203 | mips_cpc_unlock_other(); | ||
190 | } else { | 204 | } else { |
191 | /* Take the core out of reset */ | 205 | /* Take the core out of reset */ |
192 | write_gcr_co_reset_release(0); | 206 | write_gcr_co_reset_release(0); |
193 | } | 207 | } |
194 | 208 | ||
195 | /* The core is now powered up */ | 209 | /* The core is now powered up */ |
196 | bitmap_set(core_power, cfg->core, 1); | 210 | bitmap_set(core_power, core, 1); |
197 | } | 211 | } |
198 | 212 | ||
199 | static void boot_vpe(void *info) | 213 | static void remote_vpe_boot(void *dummy) |
200 | { | 214 | { |
201 | struct boot_config *cfg = info; | 215 | mips_cps_boot_vpes(); |
202 | u32 tcstatus, vpeconf0; | ||
203 | |||
204 | /* Enter VPE configuration state */ | ||
205 | dvpe(); | ||
206 | set_c0_mvpcontrol(MVPCONTROL_VPC); | ||
207 | |||
208 | settc(cfg->vpe); | ||
209 | |||
210 | /* Set the TC restart PC */ | ||
211 | write_tc_c0_tcrestart((unsigned long)&smp_bootstrap); | ||
212 | |||
213 | /* Activate the TC, allow interrupts */ | ||
214 | tcstatus = read_tc_c0_tcstatus(); | ||
215 | tcstatus &= ~TCSTATUS_IXMT; | ||
216 | tcstatus |= TCSTATUS_A; | ||
217 | write_tc_c0_tcstatus(tcstatus); | ||
218 | |||
219 | /* Clear the TC halt bit */ | ||
220 | write_tc_c0_tchalt(0); | ||
221 | |||
222 | /* Activate the VPE */ | ||
223 | vpeconf0 = read_vpe_c0_vpeconf0(); | ||
224 | vpeconf0 |= VPECONF0_VPA; | ||
225 | write_vpe_c0_vpeconf0(vpeconf0); | ||
226 | |||
227 | /* Set the stack & global pointer registers */ | ||
228 | write_tc_gpr_sp(cfg->sp); | ||
229 | write_tc_gpr_gp(cfg->gp); | ||
230 | |||
231 | /* Leave VPE configuration state */ | ||
232 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | ||
233 | |||
234 | /* Enable other VPEs to execute */ | ||
235 | evpe(EVPE_ENABLE); | ||
236 | } | 216 | } |
237 | 217 | ||
238 | static void cps_boot_secondary(int cpu, struct task_struct *idle) | 218 | static void cps_boot_secondary(int cpu, struct task_struct *idle) |
239 | { | 219 | { |
240 | struct boot_config cfg; | 220 | unsigned core = cpu_data[cpu].core; |
221 | unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); | ||
222 | struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; | ||
223 | struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id]; | ||
241 | unsigned int remote; | 224 | unsigned int remote; |
242 | int err; | 225 | int err; |
243 | 226 | ||
244 | cfg.core = cpu_data[cpu].core; | 227 | vpe_cfg->pc = (unsigned long)&smp_bootstrap; |
245 | cfg.vpe = cpu_vpe_id(&cpu_data[cpu]); | 228 | vpe_cfg->sp = __KSTK_TOS(idle); |
246 | cfg.pc = (unsigned long)&smp_bootstrap; | 229 | vpe_cfg->gp = (unsigned long)task_thread_info(idle); |
247 | cfg.sp = __KSTK_TOS(idle); | 230 | |
248 | cfg.gp = (unsigned long)task_thread_info(idle); | 231 | atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask); |
232 | |||
233 | preempt_disable(); | ||
249 | 234 | ||
250 | if (!test_bit(cfg.core, core_power)) { | 235 | if (!test_bit(core, core_power)) { |
251 | /* Boot a VPE on a powered down core */ | 236 | /* Boot a VPE on a powered down core */ |
252 | boot_core(&cfg); | 237 | boot_core(core); |
253 | return; | 238 | goto out; |
254 | } | 239 | } |
255 | 240 | ||
256 | if (cfg.core != current_cpu_data.core) { | 241 | if (core != current_cpu_data.core) { |
257 | /* Boot a VPE on another powered up core */ | 242 | /* Boot a VPE on another powered up core */ |
258 | for (remote = 0; remote < NR_CPUS; remote++) { | 243 | for (remote = 0; remote < NR_CPUS; remote++) { |
259 | if (cpu_data[remote].core != cfg.core) | 244 | if (cpu_data[remote].core != core) |
260 | continue; | 245 | continue; |
261 | if (cpu_online(remote)) | 246 | if (cpu_online(remote)) |
262 | break; | 247 | break; |
263 | } | 248 | } |
264 | BUG_ON(remote >= NR_CPUS); | 249 | BUG_ON(remote >= NR_CPUS); |
265 | 250 | ||
266 | err = smp_call_function_single(remote, boot_vpe, &cfg, 1); | 251 | err = smp_call_function_single(remote, remote_vpe_boot, |
252 | NULL, 1); | ||
267 | if (err) | 253 | if (err) |
268 | panic("Failed to call remote CPU\n"); | 254 | panic("Failed to call remote CPU\n"); |
269 | return; | 255 | goto out; |
270 | } | 256 | } |
271 | 257 | ||
272 | BUG_ON(!cpu_has_mipsmt); | 258 | BUG_ON(!cpu_has_mipsmt); |
273 | 259 | ||
274 | /* Boot a VPE on this core */ | 260 | /* Boot a VPE on this core */ |
275 | boot_vpe(&cfg); | 261 | mips_cps_boot_vpes(); |
262 | out: | ||
263 | preempt_enable(); | ||
276 | } | 264 | } |
277 | 265 | ||
278 | static void cps_init_secondary(void) | 266 | static void cps_init_secondary(void) |
@@ -281,10 +269,6 @@ static void cps_init_secondary(void) | |||
281 | if (cpu_has_mipsmt) | 269 | if (cpu_has_mipsmt) |
282 | dmt(); | 270 | dmt(); |
283 | 271 | ||
284 | /* TODO: revisit this assumption once hotplug is implemented */ | ||
285 | if (cpu_vpe_id(¤t_cpu_data) == 0) | ||
286 | init_core(); | ||
287 | |||
288 | change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | | 272 | change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | |
289 | STATUSF_IP6 | STATUSF_IP7); | 273 | STATUSF_IP6 | STATUSF_IP7); |
290 | } | 274 | } |
@@ -302,6 +286,148 @@ static void cps_smp_finish(void) | |||
302 | local_irq_enable(); | 286 | local_irq_enable(); |
303 | } | 287 | } |
304 | 288 | ||
289 | #ifdef CONFIG_HOTPLUG_CPU | ||
290 | |||
291 | static int cps_cpu_disable(void) | ||
292 | { | ||
293 | unsigned cpu = smp_processor_id(); | ||
294 | struct core_boot_config *core_cfg; | ||
295 | |||
296 | if (!cpu) | ||
297 | return -EBUSY; | ||
298 | |||
299 | if (!cps_pm_support_state(CPS_PM_POWER_GATED)) | ||
300 | return -EINVAL; | ||
301 | |||
302 | core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core]; | ||
303 | atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask); | ||
304 | smp_mb__after_atomic_dec(); | ||
305 | set_cpu_online(cpu, false); | ||
306 | cpu_clear(cpu, cpu_callin_map); | ||
307 | |||
308 | return 0; | ||
309 | } | ||
310 | |||
311 | static DECLARE_COMPLETION(cpu_death_chosen); | ||
312 | static unsigned cpu_death_sibling; | ||
313 | static enum { | ||
314 | CPU_DEATH_HALT, | ||
315 | CPU_DEATH_POWER, | ||
316 | } cpu_death; | ||
317 | |||
318 | void play_dead(void) | ||
319 | { | ||
320 | unsigned cpu, core; | ||
321 | |||
322 | local_irq_disable(); | ||
323 | idle_task_exit(); | ||
324 | cpu = smp_processor_id(); | ||
325 | cpu_death = CPU_DEATH_POWER; | ||
326 | |||
327 | if (cpu_has_mipsmt) { | ||
328 | core = cpu_data[cpu].core; | ||
329 | |||
330 | /* Look for another online VPE within the core */ | ||
331 | for_each_online_cpu(cpu_death_sibling) { | ||
332 | if (cpu_data[cpu_death_sibling].core != core) | ||
333 | continue; | ||
334 | |||
335 | /* | ||
336 | * There is an online VPE within the core. Just halt | ||
337 | * this TC and leave the core alone. | ||
338 | */ | ||
339 | cpu_death = CPU_DEATH_HALT; | ||
340 | break; | ||
341 | } | ||
342 | } | ||
343 | |||
344 | /* This CPU has chosen its way out */ | ||
345 | complete(&cpu_death_chosen); | ||
346 | |||
347 | if (cpu_death == CPU_DEATH_HALT) { | ||
348 | /* Halt this TC */ | ||
349 | write_c0_tchalt(TCHALT_H); | ||
350 | instruction_hazard(); | ||
351 | } else { | ||
352 | /* Power down the core */ | ||
353 | cps_pm_enter_state(CPS_PM_POWER_GATED); | ||
354 | } | ||
355 | |||
356 | /* This should never be reached */ | ||
357 | panic("Failed to offline CPU %u", cpu); | ||
358 | } | ||
359 | |||
360 | static void wait_for_sibling_halt(void *ptr_cpu) | ||
361 | { | ||
362 | unsigned cpu = (unsigned)ptr_cpu; | ||
363 | unsigned vpe_id = cpu_data[cpu].vpe_id; | ||
364 | unsigned halted; | ||
365 | unsigned long flags; | ||
366 | |||
367 | do { | ||
368 | local_irq_save(flags); | ||
369 | settc(vpe_id); | ||
370 | halted = read_tc_c0_tchalt(); | ||
371 | local_irq_restore(flags); | ||
372 | } while (!(halted & TCHALT_H)); | ||
373 | } | ||
374 | |||
375 | static void cps_cpu_die(unsigned int cpu) | ||
376 | { | ||
377 | unsigned core = cpu_data[cpu].core; | ||
378 | unsigned stat; | ||
379 | int err; | ||
380 | |||
381 | /* Wait for the cpu to choose its way out */ | ||
382 | if (!wait_for_completion_timeout(&cpu_death_chosen, | ||
383 | msecs_to_jiffies(5000))) { | ||
384 | pr_err("CPU%u: didn't offline\n", cpu); | ||
385 | return; | ||
386 | } | ||
387 | |||
388 | /* | ||
389 | * Now wait for the CPU to actually offline. Without doing this that | ||
390 | * offlining may race with one or more of: | ||
391 | * | ||
392 | * - Onlining the CPU again. | ||
393 | * - Powering down the core if another VPE within it is offlined. | ||
394 | * - A sibling VPE entering a non-coherent state. | ||
395 | * | ||
396 | * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing | ||
397 | * with which we could race, so do nothing. | ||
398 | */ | ||
399 | if (cpu_death == CPU_DEATH_POWER) { | ||
400 | /* | ||
401 | * Wait for the core to enter a powered down or clock gated | ||
402 | * state, the latter happening when a JTAG probe is connected | ||
403 | * in which case the CPC will refuse to power down the core. | ||
404 | */ | ||
405 | do { | ||
406 | mips_cpc_lock_other(core); | ||
407 | stat = read_cpc_co_stat_conf(); | ||
408 | stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK; | ||
409 | mips_cpc_unlock_other(); | ||
410 | } while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 && | ||
411 | stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 && | ||
412 | stat != CPC_Cx_STAT_CONF_SEQSTATE_U2); | ||
413 | |||
414 | /* Indicate the core is powered off */ | ||
415 | bitmap_clear(core_power, core, 1); | ||
416 | } else if (cpu_has_mipsmt) { | ||
417 | /* | ||
418 | * Have a CPU with access to the offlined CPUs registers wait | ||
419 | * for its TC to halt. | ||
420 | */ | ||
421 | err = smp_call_function_single(cpu_death_sibling, | ||
422 | wait_for_sibling_halt, | ||
423 | (void *)cpu, 1); | ||
424 | if (err) | ||
425 | panic("Failed to call remote sibling CPU\n"); | ||
426 | } | ||
427 | } | ||
428 | |||
429 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
430 | |||
305 | static struct plat_smp_ops cps_smp_ops = { | 431 | static struct plat_smp_ops cps_smp_ops = { |
306 | .smp_setup = cps_smp_setup, | 432 | .smp_setup = cps_smp_setup, |
307 | .prepare_cpus = cps_prepare_cpus, | 433 | .prepare_cpus = cps_prepare_cpus, |
@@ -310,8 +436,18 @@ static struct plat_smp_ops cps_smp_ops = { | |||
310 | .smp_finish = cps_smp_finish, | 436 | .smp_finish = cps_smp_finish, |
311 | .send_ipi_single = gic_send_ipi_single, | 437 | .send_ipi_single = gic_send_ipi_single, |
312 | .send_ipi_mask = gic_send_ipi_mask, | 438 | .send_ipi_mask = gic_send_ipi_mask, |
439 | #ifdef CONFIG_HOTPLUG_CPU | ||
440 | .cpu_disable = cps_cpu_disable, | ||
441 | .cpu_die = cps_cpu_die, | ||
442 | #endif | ||
313 | }; | 443 | }; |
314 | 444 | ||
445 | bool mips_cps_smp_in_use(void) | ||
446 | { | ||
447 | extern struct plat_smp_ops *mp_ops; | ||
448 | return mp_ops == &cps_smp_ops; | ||
449 | } | ||
450 | |||
315 | int register_cps_smp_ops(void) | 451 | int register_cps_smp_ops(void) |
316 | { | 452 | { |
317 | if (!mips_cm_present()) { | 453 | if (!mips_cm_present()) { |