aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Burton <paul.burton@imgtec.com>2014-04-14 07:04:27 -0400
committerPaul Burton <paul.burton@imgtec.com>2014-05-28 11:20:28 -0400
commit245a7868d2f2e54a9a9b084de00d003a9badb2a5 (patch)
tree1b00bb19dfdc29b1ba27930db698c2d3fd21f16f
parentd674dd14e85c49ca0e422de53a4c2b5bf44a339a (diff)
MIPS: smp-cps: rework core/VPE initialisation
When hotplug and/or a powered down idle state are supported cases will arise where a non-zero VPE must be brought online without VPE 0, and it where multiple VPEs must be onlined simultaneously. This patch prepares for that by: - Splitting struct boot_config into core & VPE boot config structures, allocated one per core or VPE respectively. This allows for multiple VPEs to be onlined simultaneously without clobbering each others configuration. - Indicating which VPEs should be online within a core at any given time using a bitmap. This allows multiple VPEs to be brought online simultaneously and also indicates to VPE 0 whether it should halt after starting any non-zero VPEs that should be online within the core. For example if all VPEs within a core are offlined via hotplug and the user onlines the second VPE within that core: 1) The core will be powered up. 2) VPE 0 will run from the BEV (ie. mips_cps_core_entry) to initialise the core. 3) VPE 0 will start VPE 1 because its bit is set in the cores bitmap. 4) VPE 0 will halt itself because its bit is clear in the cores bitmap. - Moving the core & VPE initialisation to assembly code which does not make any use of the stack. This is because if a non-zero VPE is to be brought online in a powered down core then when VPE 0 of that core runs it may not have a valid stack, and even if it did then it's messy to run through parts of generic kernel code on VPE 0 before starting the correct VPE. Signed-off-by: Paul Burton <paul.burton@imgtec.com>
-rw-r--r--arch/mips/include/asm/smp-cps.h14
-rw-r--r--arch/mips/kernel/asm-offsets.c14
-rw-r--r--arch/mips/kernel/cps-vec.S282
-rw-r--r--arch/mips/kernel/mips-cpc.c2
-rw-r--r--arch/mips/kernel/smp-cps.c223
5 files changed, 374 insertions, 161 deletions
diff --git a/arch/mips/include/asm/smp-cps.h b/arch/mips/include/asm/smp-cps.h
index d60d1a2180d1..d49279e92eb5 100644
--- a/arch/mips/include/asm/smp-cps.h
+++ b/arch/mips/include/asm/smp-cps.h
@@ -13,17 +13,23 @@
13 13
14#ifndef __ASSEMBLY__ 14#ifndef __ASSEMBLY__
15 15
16struct boot_config { 16struct vpe_boot_config {
17 unsigned int core;
18 unsigned int vpe;
19 unsigned long pc; 17 unsigned long pc;
20 unsigned long sp; 18 unsigned long sp;
21 unsigned long gp; 19 unsigned long gp;
22}; 20};
23 21
24extern struct boot_config mips_cps_bootcfg; 22struct core_boot_config {
23 atomic_t vpe_mask;
24 struct vpe_boot_config *vpe_config;
25};
26
27extern struct core_boot_config *mips_cps_core_bootcfg;
25 28
26extern void mips_cps_core_entry(void); 29extern void mips_cps_core_entry(void);
30extern void mips_cps_core_init(void);
31
32extern struct vpe_boot_config *mips_cps_boot_vpes(void);
27 33
28#else /* __ASSEMBLY__ */ 34#else /* __ASSEMBLY__ */
29 35
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index e085cde13dba..d63490d8c26e 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -487,10 +487,14 @@ void output_kvm_defines(void)
487void output_cps_defines(void) 487void output_cps_defines(void)
488{ 488{
489 COMMENT(" MIPS CPS offsets. "); 489 COMMENT(" MIPS CPS offsets. ");
490 OFFSET(BOOTCFG_CORE, boot_config, core); 490
491 OFFSET(BOOTCFG_VPE, boot_config, vpe); 491 OFFSET(COREBOOTCFG_VPEMASK, core_boot_config, vpe_mask);
492 OFFSET(BOOTCFG_PC, boot_config, pc); 492 OFFSET(COREBOOTCFG_VPECONFIG, core_boot_config, vpe_config);
493 OFFSET(BOOTCFG_SP, boot_config, sp); 493 DEFINE(COREBOOTCFG_SIZE, sizeof(struct core_boot_config));
494 OFFSET(BOOTCFG_GP, boot_config, gp); 494
495 OFFSET(VPEBOOTCFG_PC, vpe_boot_config, pc);
496 OFFSET(VPEBOOTCFG_SP, vpe_boot_config, sp);
497 OFFSET(VPEBOOTCFG_GP, vpe_boot_config, gp);
498 DEFINE(VPEBOOTCFG_SIZE, sizeof(struct vpe_boot_config));
495} 499}
496#endif 500#endif
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index f7a46db4b161..57ec18c7d17f 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -14,12 +14,33 @@
14#include <asm/asmmacro.h> 14#include <asm/asmmacro.h>
15#include <asm/cacheops.h> 15#include <asm/cacheops.h>
16#include <asm/mipsregs.h> 16#include <asm/mipsregs.h>
17#include <asm/mipsmtregs.h>
17 18
18#define GCR_CL_COHERENCE_OFS 0x2008 19#define GCR_CL_COHERENCE_OFS 0x2008
20#define GCR_CL_ID_OFS 0x2028
21
22.extern mips_cm_base
23
24.set noreorder
25
26 /*
27 * Set dest to non-zero if the core supports the MT ASE, else zero. If
28 * MT is not supported then branch to nomt.
29 */
30 .macro has_mt dest, nomt
31 mfc0 \dest, CP0_CONFIG
32 bgez \dest, \nomt
33 mfc0 \dest, CP0_CONFIG, 1
34 bgez \dest, \nomt
35 mfc0 \dest, CP0_CONFIG, 2
36 bgez \dest, \nomt
37 mfc0 \dest, CP0_CONFIG, 3
38 andi \dest, \dest, MIPS_CONF3_MT
39 beqz \dest, \nomt
40 .endm
19 41
20.section .text.cps-vec 42.section .text.cps-vec
21.balign 0x1000 43.balign 0x1000
22.set noreorder
23 44
24LEAF(mips_cps_core_entry) 45LEAF(mips_cps_core_entry)
25 /* 46 /*
@@ -134,21 +155,24 @@ dcache_done:
134 jr t0 155 jr t0
135 nop 156 nop
136 157
1371: /* We're up, cached & coherent */ 158 /*
159 * We're up, cached & coherent. Perform any further required core-level
160 * initialisation.
161 */
1621: jal mips_cps_core_init
163 nop
138 164
139 /* 165 /*
140 * TODO: We should check the VPE number we intended to boot here, and 166 * Boot any other VPEs within this core that should be online, and
141 * if non-zero we should start that VPE and stop this one. For 167 * deactivate this VPE if it should be offline.
142 * the moment this doesn't matter since CPUs are brought up
143 * sequentially and in order, but once hotplug is implemented
144 * this will need revisiting.
145 */ 168 */
169 jal mips_cps_boot_vpes
170 nop
146 171
147 /* Off we go! */ 172 /* Off we go! */
148 la t0, mips_cps_bootcfg 173 lw t1, VPEBOOTCFG_PC(v0)
149 lw t1, BOOTCFG_PC(t0) 174 lw gp, VPEBOOTCFG_GP(v0)
150 lw gp, BOOTCFG_GP(t0) 175 lw sp, VPEBOOTCFG_SP(v0)
151 lw sp, BOOTCFG_SP(t0)
152 jr t1 176 jr t1
153 nop 177 nop
154 END(mips_cps_core_entry) 178 END(mips_cps_core_entry)
@@ -189,3 +213,237 @@ LEAF(excep_ejtag)
189 jr k0 213 jr k0
190 nop 214 nop
191 END(excep_ejtag) 215 END(excep_ejtag)
216
217LEAF(mips_cps_core_init)
218#ifdef CONFIG_MIPS_MT
219 /* Check that the core implements the MT ASE */
220 has_mt t0, 3f
221 nop
222
223 .set push
224 .set mt
225
226 /* Only allow 1 TC per VPE to execute... */
227 dmt
228
229 /* ...and for the moment only 1 VPE */
230 dvpe
231 la t1, 1f
232 jr.hb t1
233 nop
234
235 /* Enter VPE configuration state */
2361: mfc0 t0, CP0_MVPCONTROL
237 ori t0, t0, MVPCONTROL_VPC
238 mtc0 t0, CP0_MVPCONTROL
239
240 /* Retrieve the number of VPEs within the core */
241 mfc0 t0, CP0_MVPCONF0
242 srl t0, t0, MVPCONF0_PVPE_SHIFT
243 andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
244 addi t7, t0, 1
245
246 /* If there's only 1, we're done */
247 beqz t0, 2f
248 nop
249
250 /* Loop through each VPE within this core */
251 li t5, 1
252
2531: /* Operate on the appropriate TC */
254 mtc0 t5, CP0_VPECONTROL
255 ehb
256
257 /* Bind TC to VPE (1:1 TC:VPE mapping) */
258 mttc0 t5, CP0_TCBIND
259
260 /* Set exclusive TC, non-active, master */
261 li t0, VPECONF0_MVP
262 sll t1, t5, VPECONF0_XTC_SHIFT
263 or t0, t0, t1
264 mttc0 t0, CP0_VPECONF0
265
266 /* Set TC non-active, non-allocatable */
267 mttc0 zero, CP0_TCSTATUS
268
269 /* Set TC halted */
270 li t0, TCHALT_H
271 mttc0 t0, CP0_TCHALT
272
273 /* Next VPE */
274 addi t5, t5, 1
275 slt t0, t5, t7
276 bnez t0, 1b
277 nop
278
279 /* Leave VPE configuration state */
2802: mfc0 t0, CP0_MVPCONTROL
281 xori t0, t0, MVPCONTROL_VPC
282 mtc0 t0, CP0_MVPCONTROL
283
2843: .set pop
285#endif
286 jr ra
287 nop
288 END(mips_cps_core_init)
289
290LEAF(mips_cps_boot_vpes)
291 /* Retrieve CM base address */
292 la t0, mips_cm_base
293 lw t0, 0(t0)
294
295 /* Calculate a pointer to this cores struct core_boot_config */
296 lw t0, GCR_CL_ID_OFS(t0)
297 li t1, COREBOOTCFG_SIZE
298 mul t0, t0, t1
299 la t1, mips_cps_core_bootcfg
300 lw t1, 0(t1)
301 addu t0, t0, t1
302
303 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */
304 has_mt t6, 1f
305 li t9, 0
306
307 /* Find the number of VPEs present in the core */
308 mfc0 t1, CP0_MVPCONF0
309 srl t1, t1, MVPCONF0_PVPE_SHIFT
310 andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
311 addi t1, t1, 1
312
313 /* Calculate a mask for the VPE ID from EBase.CPUNum */
314 clz t1, t1
315 li t2, 31
316 subu t1, t2, t1
317 li t2, 1
318 sll t1, t2, t1
319 addiu t1, t1, -1
320
321 /* Retrieve the VPE ID from EBase.CPUNum */
322 mfc0 t9, $15, 1
323 and t9, t9, t1
324
3251: /* Calculate a pointer to this VPEs struct vpe_boot_config */
326 li t1, VPEBOOTCFG_SIZE
327 mul v0, t9, t1
328 lw t7, COREBOOTCFG_VPECONFIG(t0)
329 addu v0, v0, t7
330
331#ifdef CONFIG_MIPS_MT
332
333 /* If the core doesn't support MT then return */
334 bnez t6, 1f
335 nop
336 jr ra
337 nop
338
339 .set push
340 .set mt
341
3421: /* Enter VPE configuration state */
343 dvpe
344 la t1, 1f
345 jr.hb t1
346 nop
3471: mfc0 t1, CP0_MVPCONTROL
348 ori t1, t1, MVPCONTROL_VPC
349 mtc0 t1, CP0_MVPCONTROL
350 ehb
351
352 /* Loop through each VPE */
353 lw t6, COREBOOTCFG_VPEMASK(t0)
354 move t8, t6
355 li t5, 0
356
357 /* Check whether the VPE should be running. If not, skip it */
3581: andi t0, t6, 1
359 beqz t0, 2f
360 nop
361
362 /* Operate on the appropriate TC */
363 mfc0 t0, CP0_VPECONTROL
364 ori t0, t0, VPECONTROL_TARGTC
365 xori t0, t0, VPECONTROL_TARGTC
366 or t0, t0, t5
367 mtc0 t0, CP0_VPECONTROL
368 ehb
369
370 /* Skip the VPE if its TC is not halted */
371 mftc0 t0, CP0_TCHALT
372 beqz t0, 2f
373 nop
374
375 /* Calculate a pointer to the VPEs struct vpe_boot_config */
376 li t0, VPEBOOTCFG_SIZE
377 mul t0, t0, t5
378 addu t0, t0, t7
379
380 /* Set the TC restart PC */
381 lw t1, VPEBOOTCFG_PC(t0)
382 mttc0 t1, CP0_TCRESTART
383
384 /* Set the TC stack pointer */
385 lw t1, VPEBOOTCFG_SP(t0)
386 mttgpr t1, sp
387
388 /* Set the TC global pointer */
389 lw t1, VPEBOOTCFG_GP(t0)
390 mttgpr t1, gp
391
392 /* Copy config from this VPE */
393 mfc0 t0, CP0_CONFIG
394 mttc0 t0, CP0_CONFIG
395
396 /* Ensure no software interrupts are pending */
397 mttc0 zero, CP0_CAUSE
398 mttc0 zero, CP0_STATUS
399
400 /* Set TC active, not interrupt exempt */
401 mftc0 t0, CP0_TCSTATUS
402 li t1, ~TCSTATUS_IXMT
403 and t0, t0, t1
404 ori t0, t0, TCSTATUS_A
405 mttc0 t0, CP0_TCSTATUS
406
407 /* Clear the TC halt bit */
408 mttc0 zero, CP0_TCHALT
409
410 /* Set VPE active */
411 mftc0 t0, CP0_VPECONF0
412 ori t0, t0, VPECONF0_VPA
413 mttc0 t0, CP0_VPECONF0
414
415 /* Next VPE */
4162: srl t6, t6, 1
417 addi t5, t5, 1
418 bnez t6, 1b
419 nop
420
421 /* Leave VPE configuration state */
422 mfc0 t1, CP0_MVPCONTROL
423 xori t1, t1, MVPCONTROL_VPC
424 mtc0 t1, CP0_MVPCONTROL
425 ehb
426 evpe
427
428 /* Check whether this VPE is meant to be running */
429 li t0, 1
430 sll t0, t0, t9
431 and t0, t0, t8
432 bnez t0, 2f
433 nop
434
435 /* This VPE should be offline, halt the TC */
436 li t0, TCHALT_H
437 mtc0 t0, CP0_TCHALT
438 la t0, 1f
4391: jr.hb t0
440 nop
441
4422: .set pop
443
444#endif /* CONFIG_MIPS_MT */
445
446 /* Return */
447 jr ra
448 nop
449 END(mips_cps_boot_vpes)
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
index 2368fc5ccf1e..ba473608a347 100644
--- a/arch/mips/kernel/mips-cpc.c
+++ b/arch/mips/kernel/mips-cpc.c
@@ -9,6 +9,8 @@
9 */ 9 */
10 10
11#include <linux/errno.h> 11#include <linux/errno.h>
12#include <linux/percpu.h>
13#include <linux/spinlock.h>
12 14
13#include <asm/mips-cm.h> 15#include <asm/mips-cm.h>
14#include <asm/mips-cpc.h> 16#include <asm/mips-cpc.h>
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index 536eec0d21b6..af90e82f94bf 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -26,98 +26,37 @@
26 26
27static DECLARE_BITMAP(core_power, NR_CPUS); 27static DECLARE_BITMAP(core_power, NR_CPUS);
28 28
29struct boot_config mips_cps_bootcfg; 29struct core_boot_config *mips_cps_core_bootcfg;
30 30
31static void init_core(void) 31static unsigned core_vpe_count(unsigned core)
32{ 32{
33 unsigned int nvpes, t; 33 unsigned cfg;
34 u32 mvpconf0, vpeconf0, vpecontrol, tcstatus, tcbind, status;
35 34
36 if (!cpu_has_mipsmt) 35 if (!config_enabled(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt)
37 return; 36 return 1;
38
39 /* Enter VPE configuration state */
40 dvpe();
41 set_c0_mvpcontrol(MVPCONTROL_VPC);
42
43 /* Retrieve the count of VPEs in this core */
44 mvpconf0 = read_c0_mvpconf0();
45 nvpes = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
46 smp_num_siblings = nvpes;
47
48 for (t = 1; t < nvpes; t++) {
49 /* Use a 1:1 mapping of TC index to VPE index */
50 settc(t);
51
52 /* Bind 1 TC to this VPE */
53 tcbind = read_tc_c0_tcbind();
54 tcbind &= ~TCBIND_CURVPE;
55 tcbind |= t << TCBIND_CURVPE_SHIFT;
56 write_tc_c0_tcbind(tcbind);
57
58 /* Set exclusive TC, non-active, master */
59 vpeconf0 = read_vpe_c0_vpeconf0();
60 vpeconf0 &= ~(VPECONF0_XTC | VPECONF0_VPA);
61 vpeconf0 |= t << VPECONF0_XTC_SHIFT;
62 vpeconf0 |= VPECONF0_MVP;
63 write_vpe_c0_vpeconf0(vpeconf0);
64
65 /* Declare TC non-active, non-allocatable & interrupt exempt */
66 tcstatus = read_tc_c0_tcstatus();
67 tcstatus &= ~(TCSTATUS_A | TCSTATUS_DA);
68 tcstatus |= TCSTATUS_IXMT;
69 write_tc_c0_tcstatus(tcstatus);
70
71 /* Halt the TC */
72 write_tc_c0_tchalt(TCHALT_H);
73
74 /* Allow only 1 TC to execute */
75 vpecontrol = read_vpe_c0_vpecontrol();
76 vpecontrol &= ~VPECONTROL_TE;
77 write_vpe_c0_vpecontrol(vpecontrol);
78
79 /* Copy (most of) Status from VPE 0 */
80 status = read_c0_status();
81 status &= ~(ST0_IM | ST0_IE | ST0_KSU);
82 status |= ST0_CU0;
83 write_vpe_c0_status(status);
84
85 /* Copy Config from VPE 0 */
86 write_vpe_c0_config(read_c0_config());
87 write_vpe_c0_config7(read_c0_config7());
88
89 /* Ensure no software interrupts are pending */
90 write_vpe_c0_cause(0);
91
92 /* Sync Count */
93 write_vpe_c0_count(read_c0_count());
94 }
95 37
96 /* Leave VPE configuration state */ 38 write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF);
97 clear_c0_mvpcontrol(MVPCONTROL_VPC); 39 cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE_MSK;
40 return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;
98} 41}
99 42
100static void __init cps_smp_setup(void) 43static void __init cps_smp_setup(void)
101{ 44{
102 unsigned int ncores, nvpes, core_vpes; 45 unsigned int ncores, nvpes, core_vpes;
103 int c, v; 46 int c, v;
104 u32 core_cfg, *entry_code; 47 u32 *entry_code;
105 48
106 /* Detect & record VPE topology */ 49 /* Detect & record VPE topology */
107 ncores = mips_cm_numcores(); 50 ncores = mips_cm_numcores();
108 pr_info("VPE topology "); 51 pr_info("VPE topology ");
109 for (c = nvpes = 0; c < ncores; c++) { 52 for (c = nvpes = 0; c < ncores; c++) {
110 if (cpu_has_mipsmt && config_enabled(CONFIG_MIPS_MT_SMP)) { 53 core_vpes = core_vpe_count(c);
111 write_gcr_cl_other(c << CM_GCR_Cx_OTHER_CORENUM_SHF);
112 core_cfg = read_gcr_co_config();
113 core_vpes = ((core_cfg & CM_GCR_Cx_CONFIG_PVPE_MSK) >>
114 CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;
115 } else {
116 core_vpes = 1;
117 }
118
119 pr_cont("%c%u", c ? ',' : '{', core_vpes); 54 pr_cont("%c%u", c ? ',' : '{', core_vpes);
120 55
56 /* Use the number of VPEs in core 0 for smp_num_siblings */
57 if (!c)
58 smp_num_siblings = core_vpes;
59
121 for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) { 60 for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
122 cpu_data[nvpes + v].core = c; 61 cpu_data[nvpes + v].core = c;
123#ifdef CONFIG_MIPS_MT_SMP 62#ifdef CONFIG_MIPS_MT_SMP
@@ -140,12 +79,8 @@ static void __init cps_smp_setup(void)
140 /* Core 0 is powered up (we're running on it) */ 79 /* Core 0 is powered up (we're running on it) */
141 bitmap_set(core_power, 0, 1); 80 bitmap_set(core_power, 0, 1);
142 81
143 /* Disable MT - we only want to run 1 TC per VPE */
144 if (cpu_has_mipsmt)
145 dmt();
146
147 /* Initialise core 0 */ 82 /* Initialise core 0 */
148 init_core(); 83 mips_cps_core_init();
149 84
150 /* Patch the start of mips_cps_core_entry to provide the CM base */ 85 /* Patch the start of mips_cps_core_entry to provide the CM base */
151 entry_code = (u32 *)&mips_cps_core_entry; 86 entry_code = (u32 *)&mips_cps_core_entry;
@@ -157,15 +92,60 @@ static void __init cps_smp_setup(void)
157 92
158static void __init cps_prepare_cpus(unsigned int max_cpus) 93static void __init cps_prepare_cpus(unsigned int max_cpus)
159{ 94{
95 unsigned ncores, core_vpes, c;
96
160 mips_mt_set_cpuoptions(); 97 mips_mt_set_cpuoptions();
98
99 /* Allocate core boot configuration structs */
100 ncores = mips_cm_numcores();
101 mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg),
102 GFP_KERNEL);
103 if (!mips_cps_core_bootcfg) {
104 pr_err("Failed to allocate boot config for %u cores\n", ncores);
105 goto err_out;
106 }
107
108 /* Allocate VPE boot configuration structs */
109 for (c = 0; c < ncores; c++) {
110 core_vpes = core_vpe_count(c);
111 mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes,
112 sizeof(*mips_cps_core_bootcfg[c].vpe_config),
113 GFP_KERNEL);
114 if (!mips_cps_core_bootcfg[c].vpe_config) {
115 pr_err("Failed to allocate %u VPE boot configs\n",
116 core_vpes);
117 goto err_out;
118 }
119 }
120
121 /* Mark this CPU as booted */
122 atomic_set(&mips_cps_core_bootcfg[current_cpu_data.core].vpe_mask,
123 1 << cpu_vpe_id(&current_cpu_data));
124
125 return;
126err_out:
127 /* Clean up allocations */
128 if (mips_cps_core_bootcfg) {
129 for (c = 0; c < ncores; c++)
130 kfree(mips_cps_core_bootcfg[c].vpe_config);
131 kfree(mips_cps_core_bootcfg);
132 mips_cps_core_bootcfg = NULL;
133 }
134
135 /* Effectively disable SMP by declaring CPUs not present */
136 for_each_possible_cpu(c) {
137 if (c == 0)
138 continue;
139 set_cpu_present(c, false);
140 }
161} 141}
162 142
163static void boot_core(struct boot_config *cfg) 143static void boot_core(unsigned core)
164{ 144{
165 u32 access; 145 u32 access;
166 146
167 /* Select the appropriate core */ 147 /* Select the appropriate core */
168 write_gcr_cl_other(cfg->core << CM_GCR_Cx_OTHER_CORENUM_SHF); 148 write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF);
169 149
170 /* Set its reset vector */ 150 /* Set its reset vector */
171 write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry)); 151 write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
@@ -175,15 +155,12 @@ static void boot_core(struct boot_config *cfg)
175 155
176 /* Ensure the core can access the GCRs */ 156 /* Ensure the core can access the GCRs */
177 access = read_gcr_access(); 157 access = read_gcr_access();
178 access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + cfg->core); 158 access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + core);
179 write_gcr_access(access); 159 write_gcr_access(access);
180 160
181 /* Copy cfg */
182 mips_cps_bootcfg = *cfg;
183
184 if (mips_cpc_present()) { 161 if (mips_cpc_present()) {
185 /* Select the appropriate core */ 162 /* Select the appropriate core */
186 write_cpc_cl_other(cfg->core << CPC_Cx_OTHER_CORENUM_SHF); 163 write_cpc_cl_other(core << CPC_Cx_OTHER_CORENUM_SHF);
187 164
188 /* Reset the core */ 165 /* Reset the core */
189 write_cpc_co_cmd(CPC_Cx_CMD_RESET); 166 write_cpc_co_cmd(CPC_Cx_CMD_RESET);
@@ -193,77 +170,47 @@ static void boot_core(struct boot_config *cfg)
193 } 170 }
194 171
195 /* The core is now powered up */ 172 /* The core is now powered up */
196 bitmap_set(core_power, cfg->core, 1); 173 bitmap_set(core_power, core, 1);
197} 174}
198 175
199static void boot_vpe(void *info) 176static void remote_vpe_boot(void *dummy)
200{ 177{
201 struct boot_config *cfg = info; 178 mips_cps_boot_vpes();
202 u32 tcstatus, vpeconf0;
203
204 /* Enter VPE configuration state */
205 dvpe();
206 set_c0_mvpcontrol(MVPCONTROL_VPC);
207
208 settc(cfg->vpe);
209
210 /* Set the TC restart PC */
211 write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
212
213 /* Activate the TC, allow interrupts */
214 tcstatus = read_tc_c0_tcstatus();
215 tcstatus &= ~TCSTATUS_IXMT;
216 tcstatus |= TCSTATUS_A;
217 write_tc_c0_tcstatus(tcstatus);
218
219 /* Clear the TC halt bit */
220 write_tc_c0_tchalt(0);
221
222 /* Activate the VPE */
223 vpeconf0 = read_vpe_c0_vpeconf0();
224 vpeconf0 |= VPECONF0_VPA;
225 write_vpe_c0_vpeconf0(vpeconf0);
226
227 /* Set the stack & global pointer registers */
228 write_tc_gpr_sp(cfg->sp);
229 write_tc_gpr_gp(cfg->gp);
230
231 /* Leave VPE configuration state */
232 clear_c0_mvpcontrol(MVPCONTROL_VPC);
233
234 /* Enable other VPEs to execute */
235 evpe(EVPE_ENABLE);
236} 179}
237 180
238static void cps_boot_secondary(int cpu, struct task_struct *idle) 181static void cps_boot_secondary(int cpu, struct task_struct *idle)
239{ 182{
240 struct boot_config cfg; 183 unsigned core = cpu_data[cpu].core;
184 unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
185 struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
186 struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
241 unsigned int remote; 187 unsigned int remote;
242 int err; 188 int err;
243 189
244 cfg.core = cpu_data[cpu].core; 190 vpe_cfg->pc = (unsigned long)&smp_bootstrap;
245 cfg.vpe = cpu_vpe_id(&cpu_data[cpu]); 191 vpe_cfg->sp = __KSTK_TOS(idle);
246 cfg.pc = (unsigned long)&smp_bootstrap; 192 vpe_cfg->gp = (unsigned long)task_thread_info(idle);
247 cfg.sp = __KSTK_TOS(idle);
248 cfg.gp = (unsigned long)task_thread_info(idle);
249 193
250 if (!test_bit(cfg.core, core_power)) { 194 atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask);
195
196 if (!test_bit(core, core_power)) {
251 /* Boot a VPE on a powered down core */ 197 /* Boot a VPE on a powered down core */
252 boot_core(&cfg); 198 boot_core(core);
253 return; 199 return;
254 } 200 }
255 201
256 if (cfg.core != current_cpu_data.core) { 202 if (core != current_cpu_data.core) {
257 /* Boot a VPE on another powered up core */ 203 /* Boot a VPE on another powered up core */
258 for (remote = 0; remote < NR_CPUS; remote++) { 204 for (remote = 0; remote < NR_CPUS; remote++) {
259 if (cpu_data[remote].core != cfg.core) 205 if (cpu_data[remote].core != core)
260 continue; 206 continue;
261 if (cpu_online(remote)) 207 if (cpu_online(remote))
262 break; 208 break;
263 } 209 }
264 BUG_ON(remote >= NR_CPUS); 210 BUG_ON(remote >= NR_CPUS);
265 211
266 err = smp_call_function_single(remote, boot_vpe, &cfg, 1); 212 err = smp_call_function_single(remote, remote_vpe_boot,
213 NULL, 1);
267 if (err) 214 if (err)
268 panic("Failed to call remote CPU\n"); 215 panic("Failed to call remote CPU\n");
269 return; 216 return;
@@ -272,7 +219,7 @@ static void cps_boot_secondary(int cpu, struct task_struct *idle)
272 BUG_ON(!cpu_has_mipsmt); 219 BUG_ON(!cpu_has_mipsmt);
273 220
274 /* Boot a VPE on this core */ 221 /* Boot a VPE on this core */
275 boot_vpe(&cfg); 222 mips_cps_boot_vpes();
276} 223}
277 224
278static void cps_init_secondary(void) 225static void cps_init_secondary(void)
@@ -281,10 +228,6 @@ static void cps_init_secondary(void)
281 if (cpu_has_mipsmt) 228 if (cpu_has_mipsmt)
282 dmt(); 229 dmt();
283 230
284 /* TODO: revisit this assumption once hotplug is implemented */
285 if (cpu_vpe_id(&current_cpu_data) == 0)
286 init_core();
287
288 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | 231 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
289 STATUSF_IP6 | STATUSF_IP7); 232 STATUSF_IP6 | STATUSF_IP7);
290} 233}