aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/cavium-octeon/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/cavium-octeon/smp.c')
-rw-r--r--arch/mips/cavium-octeon/smp.c170
1 files changed, 85 insertions, 85 deletions
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 6d99b9d8887d..391cefe556b3 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 2004-2008 Cavium Networks 6 * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks
7 */ 7 */
8#include <linux/cpu.h> 8#include <linux/cpu.h>
9#include <linux/init.h> 9#include <linux/init.h>
@@ -27,7 +27,8 @@ volatile unsigned long octeon_processor_sp;
27volatile unsigned long octeon_processor_gp; 27volatile unsigned long octeon_processor_gp;
28 28
29#ifdef CONFIG_HOTPLUG_CPU 29#ifdef CONFIG_HOTPLUG_CPU
30static unsigned int InitTLBStart_addr; 30uint64_t octeon_bootloader_entry_addr;
31EXPORT_SYMBOL(octeon_bootloader_entry_addr);
31#endif 32#endif
32 33
33static irqreturn_t mailbox_interrupt(int irq, void *dev_id) 34static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
@@ -80,20 +81,13 @@ static inline void octeon_send_ipi_mask(const struct cpumask *mask,
80static void octeon_smp_hotplug_setup(void) 81static void octeon_smp_hotplug_setup(void)
81{ 82{
82#ifdef CONFIG_HOTPLUG_CPU 83#ifdef CONFIG_HOTPLUG_CPU
83 uint32_t labi_signature; 84 struct linux_app_boot_info *labi;
84 85
85 labi_signature = 86 labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
86 cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, 87 if (labi->labi_signature != LABI_SIGNATURE)
87 LABI_ADDR_IN_BOOTLOADER + 88 panic("The bootloader version on this board is incorrect.");
88 offsetof(struct linux_app_boot_info, 89
89 labi_signature))); 90 octeon_bootloader_entry_addr = labi->InitTLBStart_addr;
90 if (labi_signature != LABI_SIGNATURE)
91 pr_err("The bootloader version on this board is incorrect\n");
92 InitTLBStart_addr =
93 cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
94 LABI_ADDR_IN_BOOTLOADER +
95 offsetof(struct linux_app_boot_info,
96 InitTLBStart_addr)));
97#endif 91#endif
98} 92}
99 93
@@ -102,24 +96,47 @@ static void octeon_smp_setup(void)
102 const int coreid = cvmx_get_core_num(); 96 const int coreid = cvmx_get_core_num();
103 int cpus; 97 int cpus;
104 int id; 98 int id;
105
106 int core_mask = octeon_get_boot_coremask(); 99 int core_mask = octeon_get_boot_coremask();
100#ifdef CONFIG_HOTPLUG_CPU
101 unsigned int num_cores = cvmx_octeon_num_cores();
102#endif
103
104 /* The present CPUs are initially just the boot cpu (CPU 0). */
105 for (id = 0; id < NR_CPUS; id++) {
106 set_cpu_possible(id, id == 0);
107 set_cpu_present(id, id == 0);
108 }
107 109
108 cpus_clear(cpu_possible_map);
109 __cpu_number_map[coreid] = 0; 110 __cpu_number_map[coreid] = 0;
110 __cpu_logical_map[0] = coreid; 111 __cpu_logical_map[0] = coreid;
111 cpu_set(0, cpu_possible_map);
112 112
113 /* The present CPUs get the lowest CPU numbers. */
113 cpus = 1; 114 cpus = 1;
114 for (id = 0; id < 16; id++) { 115 for (id = 0; id < NR_CPUS; id++) {
115 if ((id != coreid) && (core_mask & (1 << id))) { 116 if ((id != coreid) && (core_mask & (1 << id))) {
116 cpu_set(cpus, cpu_possible_map); 117 set_cpu_possible(cpus, true);
118 set_cpu_present(cpus, true);
117 __cpu_number_map[id] = cpus; 119 __cpu_number_map[id] = cpus;
118 __cpu_logical_map[cpus] = id; 120 __cpu_logical_map[cpus] = id;
119 cpus++; 121 cpus++;
120 } 122 }
121 } 123 }
122 cpu_present_map = cpu_possible_map; 124
125#ifdef CONFIG_HOTPLUG_CPU
126 /*
127 * The possible CPUs are all those present on the chip. We
128 * will assign CPU numbers for possible cores as well. Cores
129 * are always consecutively numberd from 0.
130 */
131 for (id = 0; id < num_cores && id < NR_CPUS; id++) {
132 if (!(core_mask & (1 << id))) {
133 set_cpu_possible(cpus, true);
134 __cpu_number_map[id] = cpus;
135 __cpu_logical_map[cpus] = id;
136 cpus++;
137 }
138 }
139#endif
123 140
124 octeon_smp_hotplug_setup(); 141 octeon_smp_hotplug_setup();
125} 142}
@@ -158,18 +175,21 @@ static void octeon_init_secondary(void)
158{ 175{
159 const int coreid = cvmx_get_core_num(); 176 const int coreid = cvmx_get_core_num();
160 union cvmx_ciu_intx_sum0 interrupt_enable; 177 union cvmx_ciu_intx_sum0 interrupt_enable;
178 unsigned int sr;
161 179
162#ifdef CONFIG_HOTPLUG_CPU 180#ifdef CONFIG_HOTPLUG_CPU
163 unsigned int cur_exception_base; 181 struct linux_app_boot_info *labi;
164 182
165 cur_exception_base = cvmx_read64_uint32( 183 labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
166 CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, 184
167 LABI_ADDR_IN_BOOTLOADER + 185 if (labi->labi_signature != LABI_SIGNATURE)
168 offsetof(struct linux_app_boot_info, 186 panic("The bootloader version on this board is incorrect.");
169 cur_exception_base)));
170 /* cur_exception_base is incremented in bootloader after setting */
171 write_c0_ebase((unsigned int)(cur_exception_base - EXCEPTION_BASE_INCR));
172#endif 187#endif
188
189 sr = set_c0_status(ST0_BEV);
190 write_c0_ebase((u32)ebase);
191 write_c0_status(sr);
192
173 octeon_check_cpu_bist(); 193 octeon_check_cpu_bist();
174 octeon_init_cvmcount(); 194 octeon_init_cvmcount();
175 /* 195 /*
@@ -276,8 +296,8 @@ static int octeon_cpu_disable(void)
276static void octeon_cpu_die(unsigned int cpu) 296static void octeon_cpu_die(unsigned int cpu)
277{ 297{
278 int coreid = cpu_logical_map(cpu); 298 int coreid = cpu_logical_map(cpu);
279 uint32_t avail_coremask; 299 uint32_t mask, new_mask;
280 struct cvmx_bootmem_named_block_desc *block_desc; 300 const struct cvmx_bootmem_named_block_desc *block_desc;
281 301
282 while (per_cpu(cpu_state, cpu) != CPU_DEAD) 302 while (per_cpu(cpu_state, cpu) != CPU_DEAD)
283 cpu_relax(); 303 cpu_relax();
@@ -286,52 +306,40 @@ static void octeon_cpu_die(unsigned int cpu)
286 * This is a bit complicated strategics of getting/settig available 306 * This is a bit complicated strategics of getting/settig available
287 * cores mask, copied from bootloader 307 * cores mask, copied from bootloader
288 */ 308 */
309
310 mask = 1 << coreid;
289 /* LINUX_APP_BOOT_BLOCK is initialized in bootoct binary */ 311 /* LINUX_APP_BOOT_BLOCK is initialized in bootoct binary */
290 block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME); 312 block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
291 313
292 if (!block_desc) { 314 if (!block_desc) {
293 avail_coremask = 315 struct linux_app_boot_info *labi;
294 cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
295 LABI_ADDR_IN_BOOTLOADER +
296 offsetof
297 (struct linux_app_boot_info,
298 avail_coremask)));
299 } else { /* alternative, already initialized */
300 avail_coremask =
301 cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
302 block_desc->base_addr +
303 AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK));
304 }
305 316
306 avail_coremask |= 1 << coreid; 317 labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
307 318
308 /* Setting avail_coremask for bootoct binary */ 319 labi->avail_coremask |= mask;
309 if (!block_desc) { 320 new_mask = labi->avail_coremask;
310 cvmx_write64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, 321 } else { /* alternative, already initialized */
311 LABI_ADDR_IN_BOOTLOADER + 322 uint32_t *p = (uint32_t *)PHYS_TO_XKSEG_CACHED(block_desc->base_addr +
312 offsetof(struct linux_app_boot_info, 323 AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK);
313 avail_coremask)), 324 *p |= mask;
314 avail_coremask); 325 new_mask = *p;
315 } else {
316 cvmx_write64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
317 block_desc->base_addr +
318 AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK),
319 avail_coremask);
320 } 326 }
321 327
322 pr_info("Reset core %d. Available Coremask = %x\n", coreid, 328 pr_info("Reset core %d. Available Coremask = 0x%x \n", coreid, new_mask);
323 avail_coremask); 329 mb();
324 cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid); 330 cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
325 cvmx_write_csr(CVMX_CIU_PP_RST, 0); 331 cvmx_write_csr(CVMX_CIU_PP_RST, 0);
326} 332}
327 333
328void play_dead(void) 334void play_dead(void)
329{ 335{
330 int coreid = cvmx_get_core_num(); 336 int cpu = cpu_number_map(cvmx_get_core_num());
331 337
332 idle_task_exit(); 338 idle_task_exit();
333 octeon_processor_boot = 0xff; 339 octeon_processor_boot = 0xff;
334 per_cpu(cpu_state, coreid) = CPU_DEAD; 340 per_cpu(cpu_state, cpu) = CPU_DEAD;
341
342 mb();
335 343
336 while (1) /* core will be reset here */ 344 while (1) /* core will be reset here */
337 ; 345 ;
@@ -344,29 +352,27 @@ static void start_after_reset(void)
344 kernel_entry(0, 0, 0); /* set a2 = 0 for secondary core */ 352 kernel_entry(0, 0, 0); /* set a2 = 0 for secondary core */
345} 353}
346 354
347int octeon_update_boot_vector(unsigned int cpu) 355static int octeon_update_boot_vector(unsigned int cpu)
348{ 356{
349 357
350 int coreid = cpu_logical_map(cpu); 358 int coreid = cpu_logical_map(cpu);
351 unsigned int avail_coremask; 359 uint32_t avail_coremask;
352 struct cvmx_bootmem_named_block_desc *block_desc; 360 const struct cvmx_bootmem_named_block_desc *block_desc;
353 struct boot_init_vector *boot_vect = 361 struct boot_init_vector *boot_vect =
354 (struct boot_init_vector *) cvmx_phys_to_ptr(0x0 + 362 (struct boot_init_vector *)PHYS_TO_XKSEG_CACHED(BOOTLOADER_BOOT_VECTOR);
355 BOOTLOADER_BOOT_VECTOR);
356 363
357 block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME); 364 block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
358 365
359 if (!block_desc) { 366 if (!block_desc) {
360 avail_coremask = 367 struct linux_app_boot_info *labi;
361 cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, 368
362 LABI_ADDR_IN_BOOTLOADER + 369 labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
363 offsetof(struct linux_app_boot_info, 370
364 avail_coremask))); 371 avail_coremask = labi->avail_coremask;
372 labi->avail_coremask &= ~(1 << coreid);
365 } else { /* alternative, already initialized */ 373 } else { /* alternative, already initialized */
366 avail_coremask = 374 avail_coremask = *(uint32_t *)PHYS_TO_XKSEG_CACHED(
367 cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, 375 block_desc->base_addr + AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK);
368 block_desc->base_addr +
369 AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK));
370 } 376 }
371 377
372 if (!(avail_coremask & (1 << coreid))) { 378 if (!(avail_coremask & (1 << coreid))) {
@@ -377,9 +383,9 @@ int octeon_update_boot_vector(unsigned int cpu)
377 383
378 boot_vect[coreid].app_start_func_addr = 384 boot_vect[coreid].app_start_func_addr =
379 (uint32_t) (unsigned long) start_after_reset; 385 (uint32_t) (unsigned long) start_after_reset;
380 boot_vect[coreid].code_addr = InitTLBStart_addr; 386 boot_vect[coreid].code_addr = octeon_bootloader_entry_addr;
381 387
382 CVMX_SYNC; 388 mb();
383 389
384 cvmx_write_csr(CVMX_CIU_NMI, (1 << coreid) & avail_coremask); 390 cvmx_write_csr(CVMX_CIU_NMI, (1 << coreid) & avail_coremask);
385 391
@@ -405,17 +411,11 @@ static int __cpuinit octeon_cpu_callback(struct notifier_block *nfb,
405 return NOTIFY_OK; 411 return NOTIFY_OK;
406} 412}
407 413
408static struct notifier_block __cpuinitdata octeon_cpu_notifier = {
409 .notifier_call = octeon_cpu_callback,
410};
411
412static int __cpuinit register_cavium_notifier(void) 414static int __cpuinit register_cavium_notifier(void)
413{ 415{
414 register_hotcpu_notifier(&octeon_cpu_notifier); 416 hotcpu_notifier(octeon_cpu_callback, 0);
415
416 return 0; 417 return 0;
417} 418}
418
419late_initcall(register_cavium_notifier); 419late_initcall(register_cavium_notifier);
420 420
421#endif /* CONFIG_HOTPLUG_CPU */ 421#endif /* CONFIG_HOTPLUG_CPU */