aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mn10300/kernel/smp.c
diff options
context:
space:
mode:
authorAkira Takeuchi <takeuchi.akr@jp.panasonic.com>2010-10-27 12:28:55 -0400
committerDavid Howells <dhowells@redhat.com>2010-10-27 12:28:55 -0400
commit368dd5acd154b09c043cc4392a74da01599b37d5 (patch)
treedd94ae3d044f6e774dec2437613515bd6b46dacb /arch/mn10300/kernel/smp.c
parent04157a6e7df99fd5ed64955233d6e00ab6613614 (diff)
MN10300: And Panasonic AM34 subarch and implement SMP
Implement the Panasonic MN10300 AM34 CPU subarch and implement SMP support for MN10300. Also implement support for the MN2WS0060 processor and the ASB2364 evaluation board which are AM34 based. Signed-off-by: Akira Takeuchi <takeuchi.akr@jp.panasonic.com> Signed-off-by: Kiyoshi Owada <owada.kiyoshi@jp.panasonic.com> Signed-off-by: David Howells <dhowells@redhat.com>
Diffstat (limited to 'arch/mn10300/kernel/smp.c')
-rw-r--r--arch/mn10300/kernel/smp.c1141
1 files changed, 1141 insertions, 0 deletions
diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c
new file mode 100644
index 000000000000..b80234c28e0d
--- /dev/null
+++ b/arch/mn10300/kernel/smp.c
@@ -0,0 +1,1141 @@
1/* SMP support routines.
2 *
3 * Copyright (C) 2006-2008 Panasonic Corporation
4 * All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/interrupt.h>
17#include <linux/spinlock.h>
18#include <linux/init.h>
19#include <linux/jiffies.h>
20#include <linux/cpumask.h>
21#include <linux/err.h>
22#include <linux/kernel.h>
23#include <linux/delay.h>
24#include <linux/sched.h>
25#include <linux/profile.h>
26#include <linux/smp.h>
27#include <asm/tlbflush.h>
28#include <asm/system.h>
29#include <asm/bitops.h>
30#include <asm/processor.h>
31#include <asm/bug.h>
32#include <asm/exceptions.h>
33#include <asm/hardirq.h>
34#include <asm/fpu.h>
35#include <asm/mmu_context.h>
36#include <asm/thread_info.h>
37#include <asm/cpu-regs.h>
38#include <asm/intctl-regs.h>
39#include "internal.h"
40
41#ifdef CONFIG_HOTPLUG_CPU
42#include <linux/cpu.h>
43#include <asm/cacheflush.h>
44
45static unsigned long sleep_mode[NR_CPUS];
46
47static void run_sleep_cpu(unsigned int cpu);
48static void run_wakeup_cpu(unsigned int cpu);
49#endif /* CONFIG_HOTPLUG_CPU */
50
51/*
52 * Debug Message function
53 */
54
55#undef DEBUG_SMP
56#ifdef DEBUG_SMP
57#define Dprintk(fmt, ...) printk(KERN_DEBUG fmt, ##__VA_ARGS__)
58#else
59#define Dprintk(fmt, ...) no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
60#endif
61
62/* timeout value in msec for smp_nmi_call_function. zero is no timeout. */
63#define CALL_FUNCTION_NMI_IPI_TIMEOUT 0
64
65/*
66 * Structure and data for smp_nmi_call_function().
67 */
68struct nmi_call_data_struct {
69 smp_call_func_t func;
70 void *info;
71 cpumask_t started;
72 cpumask_t finished;
73 int wait;
74 char size_alignment[0]
75 __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
76} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
77
78static DEFINE_SPINLOCK(smp_nmi_call_lock);
79static struct nmi_call_data_struct *nmi_call_data;
80
81/*
82 * Data structures and variables
83 */
84static cpumask_t cpu_callin_map; /* Bitmask of callin CPUs */
85static cpumask_t cpu_callout_map; /* Bitmask of callout CPUs */
86cpumask_t cpu_boot_map; /* Bitmask of boot APs */
87unsigned long start_stack[NR_CPUS - 1];
88
89/*
90 * Per CPU parameters
91 */
92struct mn10300_cpuinfo cpu_data[NR_CPUS] __cacheline_aligned;
93
94static int cpucount; /* The count of boot CPUs */
95static cpumask_t smp_commenced_mask;
96cpumask_t cpu_initialized __initdata = CPU_MASK_NONE;
97
98/*
99 * Function Prototypes
100 */
101static int do_boot_cpu(int);
102static void smp_show_cpu_info(int cpu_id);
103static void smp_callin(void);
104static void smp_online(void);
105static void smp_store_cpu_info(int);
106static void smp_cpu_init(void);
107static void smp_tune_scheduling(void);
108static void send_IPI_mask(const cpumask_t *cpumask, int irq);
109static void init_ipi(void);
110
111/*
112 * IPI Initialization interrupt definitions
113 */
114static void mn10300_ipi_disable(unsigned int irq);
115static void mn10300_ipi_enable(unsigned int irq);
116static void mn10300_ipi_ack(unsigned int irq);
117static void mn10300_ipi_nop(unsigned int irq);
118
119static struct irq_chip mn10300_ipi_type = {
120 .name = "cpu_ipi",
121 .disable = mn10300_ipi_disable,
122 .enable = mn10300_ipi_enable,
123 .ack = mn10300_ipi_ack,
124 .eoi = mn10300_ipi_nop
125};
126
127static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id);
128static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id);
129static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id);
130
131static struct irqaction reschedule_ipi = {
132 .handler = smp_reschedule_interrupt,
133 .name = "smp reschedule IPI"
134};
135static struct irqaction call_function_ipi = {
136 .handler = smp_call_function_interrupt,
137 .name = "smp call function IPI"
138};
139static struct irqaction local_timer_ipi = {
140 .handler = smp_ipi_timer_interrupt,
141 .flags = IRQF_DISABLED,
142 .name = "smp local timer IPI"
143};
144
145/**
146 * init_ipi - Initialise the IPI mechanism
147 */
148static void init_ipi(void)
149{
150 unsigned long flags;
151 u16 tmp16;
152
153 /* set up the reschedule IPI */
154 set_irq_chip_and_handler(RESCHEDULE_IPI,
155 &mn10300_ipi_type, handle_percpu_irq);
156 setup_irq(RESCHEDULE_IPI, &reschedule_ipi);
157 set_intr_level(RESCHEDULE_IPI, RESCHEDULE_GxICR_LV);
158 mn10300_ipi_enable(RESCHEDULE_IPI);
159
160 /* set up the call function IPI */
161 set_irq_chip_and_handler(CALL_FUNC_SINGLE_IPI,
162 &mn10300_ipi_type, handle_percpu_irq);
163 setup_irq(CALL_FUNC_SINGLE_IPI, &call_function_ipi);
164 set_intr_level(CALL_FUNC_SINGLE_IPI, CALL_FUNCTION_GxICR_LV);
165 mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
166
167 /* set up the local timer IPI */
168 set_irq_chip_and_handler(LOCAL_TIMER_IPI,
169 &mn10300_ipi_type, handle_percpu_irq);
170 setup_irq(LOCAL_TIMER_IPI, &local_timer_ipi);
171 set_intr_level(LOCAL_TIMER_IPI, LOCAL_TIMER_GxICR_LV);
172 mn10300_ipi_enable(LOCAL_TIMER_IPI);
173
174#ifdef CONFIG_MN10300_CACHE_ENABLED
175 /* set up the cache flush IPI */
176 flags = arch_local_cli_save();
177 __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(FLUSH_CACHE_GxICR_LV),
178 mn10300_low_ipi_handler);
179 GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
180 mn10300_ipi_enable(FLUSH_CACHE_IPI);
181 arch_local_irq_restore(flags);
182#endif
183
184 /* set up the NMI call function IPI */
185 flags = arch_local_cli_save();
186 GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
187 tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
188 arch_local_irq_restore(flags);
189
190 /* set up the SMP boot IPI */
191 flags = arch_local_cli_save();
192 __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(SMP_BOOT_GxICR_LV),
193 mn10300_low_ipi_handler);
194 arch_local_irq_restore(flags);
195}
196
197/**
198 * mn10300_ipi_shutdown - Shut down handling of an IPI
199 * @irq: The IPI to be shut down.
200 */
201static void mn10300_ipi_shutdown(unsigned int irq)
202{
203 unsigned long flags;
204 u16 tmp;
205
206 flags = arch_local_cli_save();
207
208 tmp = GxICR(irq);
209 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
210 tmp = GxICR(irq);
211
212 arch_local_irq_restore(flags);
213}
214
215/**
216 * mn10300_ipi_enable - Enable an IPI
217 * @irq: The IPI to be enabled.
218 */
219static void mn10300_ipi_enable(unsigned int irq)
220{
221 unsigned long flags;
222 u16 tmp;
223
224 flags = arch_local_cli_save();
225
226 tmp = GxICR(irq);
227 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
228 tmp = GxICR(irq);
229
230 arch_local_irq_restore(flags);
231}
232
233/**
234 * mn10300_ipi_disable - Disable an IPI
235 * @irq: The IPI to be disabled.
236 */
237static void mn10300_ipi_disable(unsigned int irq)
238{
239 unsigned long flags;
240 u16 tmp;
241
242 flags = arch_local_cli_save();
243
244 tmp = GxICR(irq);
245 GxICR(irq) = tmp & GxICR_LEVEL;
246 tmp = GxICR(irq);
247
248 arch_local_irq_restore(flags);
249}
250
251/**
252 * mn10300_ipi_ack - Acknowledge an IPI interrupt in the PIC
253 * @irq: The IPI to be acknowledged.
254 *
255 * Clear the interrupt detection flag for the IPI on the appropriate interrupt
256 * channel in the PIC.
257 */
258static void mn10300_ipi_ack(unsigned int irq)
259{
260 unsigned long flags;
261 u16 tmp;
262
263 flags = arch_local_cli_save();
264 GxICR_u8(irq) = GxICR_DETECT;
265 tmp = GxICR(irq);
266 arch_local_irq_restore(flags);
267}
268
269/**
270 * mn10300_ipi_nop - Dummy IPI action
271 * @irq: The IPI to be acted upon.
272 */
273static void mn10300_ipi_nop(unsigned int irq)
274{
275}
276
277/**
278 * send_IPI_mask - Send IPIs to all CPUs in list
279 * @cpumask: The list of CPUs to target.
280 * @irq: The IPI request to be sent.
281 *
282 * Send the specified IPI to all the CPUs in the list, not waiting for them to
283 * finish before returning. The caller is responsible for synchronisation if
284 * that is needed.
285 */
286static void send_IPI_mask(const cpumask_t *cpumask, int irq)
287{
288 int i;
289 u16 tmp;
290
291 for (i = 0; i < NR_CPUS; i++) {
292 if (cpu_isset(i, *cpumask)) {
293 /* send IPI */
294 tmp = CROSS_GxICR(irq, i);
295 CROSS_GxICR(irq, i) =
296 tmp | GxICR_REQUEST | GxICR_DETECT;
297 tmp = CROSS_GxICR(irq, i); /* flush write buffer */
298 }
299 }
300}
301
302/**
303 * send_IPI_self - Send an IPI to this CPU.
304 * @irq: The IPI request to be sent.
305 *
306 * Send the specified IPI to the current CPU.
307 */
308void send_IPI_self(int irq)
309{
310 send_IPI_mask(cpumask_of(smp_processor_id()), irq);
311}
312
313/**
314 * send_IPI_allbutself - Send IPIs to all the other CPUs.
315 * @irq: The IPI request to be sent.
316 *
317 * Send the specified IPI to all CPUs in the system barring the current one,
318 * not waiting for them to finish before returning. The caller is responsible
319 * for synchronisation if that is needed.
320 */
321void send_IPI_allbutself(int irq)
322{
323 cpumask_t cpumask;
324
325 cpumask = cpu_online_map;
326 cpu_clear(smp_processor_id(), cpumask);
327 send_IPI_mask(&cpumask, irq);
328}
329
330void arch_send_call_function_ipi_mask(const struct cpumask *mask)
331{
332 BUG();
333 /*send_IPI_mask(mask, CALL_FUNCTION_IPI);*/
334}
335
336void arch_send_call_function_single_ipi(int cpu)
337{
338 send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI);
339}
340
341/**
342 * smp_send_reschedule - Send reschedule IPI to a CPU
343 * @cpu: The CPU to target.
344 */
345void smp_send_reschedule(int cpu)
346{
347 send_IPI_mask(cpumask_of(cpu), RESCHEDULE_IPI);
348}
349
350/**
351 * smp_nmi_call_function - Send a call function NMI IPI to all CPUs
352 * @func: The function to ask to be run.
353 * @info: The context data to pass to that function.
354 * @wait: If true, wait (atomically) until function is run on all CPUs.
355 *
356 * Send a non-maskable request to all CPUs in the system, requesting them to
357 * run the specified function with the given context data, and, potentially, to
358 * wait for completion of that function on all CPUs.
359 *
360 * Returns 0 if successful, -ETIMEDOUT if we were asked to wait, but hit the
361 * timeout.
362 */
363int smp_nmi_call_function(smp_call_func_t func, void *info, int wait)
364{
365 struct nmi_call_data_struct data;
366 unsigned long flags;
367 unsigned int cnt;
368 int cpus, ret = 0;
369
370 cpus = num_online_cpus() - 1;
371 if (cpus < 1)
372 return 0;
373
374 data.func = func;
375 data.info = info;
376 data.started = cpu_online_map;
377 cpu_clear(smp_processor_id(), data.started);
378 data.wait = wait;
379 if (wait)
380 data.finished = data.started;
381
382 spin_lock_irqsave(&smp_nmi_call_lock, flags);
383 nmi_call_data = &data;
384 smp_mb();
385
386 /* Send a message to all other CPUs and wait for them to respond */
387 send_IPI_allbutself(CALL_FUNCTION_NMI_IPI);
388
389 /* Wait for response */
390 if (CALL_FUNCTION_NMI_IPI_TIMEOUT > 0) {
391 for (cnt = 0;
392 cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
393 !cpus_empty(data.started);
394 cnt++)
395 mdelay(1);
396
397 if (wait && cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT) {
398 for (cnt = 0;
399 cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
400 !cpus_empty(data.finished);
401 cnt++)
402 mdelay(1);
403 }
404
405 if (cnt >= CALL_FUNCTION_NMI_IPI_TIMEOUT)
406 ret = -ETIMEDOUT;
407
408 } else {
409 /* If timeout value is zero, wait until cpumask has been
410 * cleared */
411 while (!cpus_empty(data.started))
412 barrier();
413 if (wait)
414 while (!cpus_empty(data.finished))
415 barrier();
416 }
417
418 spin_unlock_irqrestore(&smp_nmi_call_lock, flags);
419 return ret;
420}
421
422/**
423 * stop_this_cpu - Callback to stop a CPU.
424 * @unused: Callback context (ignored).
425 */
426void stop_this_cpu(void *unused)
427{
428 static volatile int stopflag;
429 unsigned long flags;
430
431#ifdef CONFIG_GDBSTUB
432 /* In case of single stepping smp_send_stop by other CPU,
433 * clear procindebug to avoid deadlock.
434 */
435 atomic_set(&procindebug[smp_processor_id()], 0);
436#endif /* CONFIG_GDBSTUB */
437
438 flags = arch_local_cli_save();
439 cpu_clear(smp_processor_id(), cpu_online_map);
440
441 while (!stopflag)
442 cpu_relax();
443
444 cpu_set(smp_processor_id(), cpu_online_map);
445 arch_local_irq_restore(flags);
446}
447
448/**
449 * smp_send_stop - Send a stop request to all CPUs.
450 */
451void smp_send_stop(void)
452{
453 smp_nmi_call_function(stop_this_cpu, NULL, 0);
454}
455
456/**
457 * smp_reschedule_interrupt - Reschedule IPI handler
458 * @irq: The interrupt number.
459 * @dev_id: The device ID.
460 *
461 * We need do nothing here, since the scheduling will be effected on our way
462 * back through entry.S.
463 *
464 * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
465 */
466static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id)
467{
468 /* do nothing */
469 return IRQ_HANDLED;
470}
471
472/**
473 * smp_call_function_interrupt - Call function IPI handler
474 * @irq: The interrupt number.
475 * @dev_id: The device ID.
476 *
477 * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
478 */
479static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id)
480{
481 /* generic_smp_call_function_interrupt(); */
482 generic_smp_call_function_single_interrupt();
483 return IRQ_HANDLED;
484}
485
486/**
487 * smp_nmi_call_function_interrupt - Non-maskable call function IPI handler
488 */
489void smp_nmi_call_function_interrupt(void)
490{
491 smp_call_func_t func = nmi_call_data->func;
492 void *info = nmi_call_data->info;
493 int wait = nmi_call_data->wait;
494
495 /* Notify the initiating CPU that I've grabbed the data and am about to
496 * execute the function
497 */
498 smp_mb();
499 cpu_clear(smp_processor_id(), nmi_call_data->started);
500 (*func)(info);
501
502 if (wait) {
503 smp_mb();
504 cpu_clear(smp_processor_id(), nmi_call_data->finished);
505 }
506}
507
508/**
509 * smp_ipi_timer_interrupt - Local timer IPI handler
510 * @irq: The interrupt number.
511 * @dev_id: The device ID.
512 *
513 * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
514 */
515static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id)
516{
517 return local_timer_interrupt();
518}
519
520void __init smp_init_cpus(void)
521{
522 int i;
523 for (i = 0; i < NR_CPUS; i++) {
524 set_cpu_possible(i, true);
525 set_cpu_present(i, true);
526 }
527}
528
529/**
530 * smp_cpu_init - Initialise AP in start_secondary.
531 *
532 * For this Application Processor, set up init_mm, initialise FPU and set
533 * interrupt level 0-6 setting.
534 */
535static void __init smp_cpu_init(void)
536{
537 unsigned long flags;
538 int cpu_id = smp_processor_id();
539 u16 tmp16;
540
541 if (test_and_set_bit(cpu_id, &cpu_initialized)) {
542 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu_id);
543 for (;;)
544 local_irq_enable();
545 }
546 printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
547
548 atomic_inc(&init_mm.mm_count);
549 current->active_mm = &init_mm;
550 BUG_ON(current->mm);
551
552 enter_lazy_tlb(&init_mm, current);
553
554 /* Force FPU initialization */
555 clear_using_fpu(current);
556
557 GxICR(CALL_FUNC_SINGLE_IPI) = CALL_FUNCTION_GxICR_LV | GxICR_DETECT;
558 mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
559
560 GxICR(LOCAL_TIMER_IPI) = LOCAL_TIMER_GxICR_LV | GxICR_DETECT;
561 mn10300_ipi_enable(LOCAL_TIMER_IPI);
562
563 GxICR(RESCHEDULE_IPI) = RESCHEDULE_GxICR_LV | GxICR_DETECT;
564 mn10300_ipi_enable(RESCHEDULE_IPI);
565
566#ifdef CONFIG_MN10300_CACHE_ENABLED
567 GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
568 mn10300_ipi_enable(FLUSH_CACHE_IPI);
569#endif
570
571 mn10300_ipi_shutdown(SMP_BOOT_IRQ);
572
573 /* Set up the non-maskable call function IPI */
574 flags = arch_local_cli_save();
575 GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
576 tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
577 arch_local_irq_restore(flags);
578}
579
580/**
581 * smp_prepare_cpu_init - Initialise CPU in startup_secondary
582 *
583 * Set interrupt level 0-6 setting and init ICR of gdbstub.
584 */
585void smp_prepare_cpu_init(void)
586{
587 int loop;
588
589 /* Set the interrupt vector registers */
590 IVAR0 = EXCEP_IRQ_LEVEL0;
591 IVAR1 = EXCEP_IRQ_LEVEL1;
592 IVAR2 = EXCEP_IRQ_LEVEL2;
593 IVAR3 = EXCEP_IRQ_LEVEL3;
594 IVAR4 = EXCEP_IRQ_LEVEL4;
595 IVAR5 = EXCEP_IRQ_LEVEL5;
596 IVAR6 = EXCEP_IRQ_LEVEL6;
597
598 /* Disable all interrupts and set to priority 6 (lowest) */
599 for (loop = 0; loop < GxICR_NUM_IRQS; loop++)
600 GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT;
601
602#ifdef CONFIG_GDBSTUB
603 /* initialise GDB-stub */
604 do {
605 unsigned long flags;
606 u16 tmp16;
607
608 flags = arch_local_cli_save();
609 GxICR(GDB_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
610 tmp16 = GxICR(GDB_NMI_IPI);
611 arch_local_irq_restore(flags);
612 } while (0);
613#endif
614}
615
616/**
617 * start_secondary - Activate a secondary CPU (AP)
618 * @unused: Thread parameter (ignored).
619 */
620int __init start_secondary(void *unused)
621{
622 smp_cpu_init();
623
624 smp_callin();
625 while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
626 cpu_relax();
627
628 local_flush_tlb();
629 preempt_disable();
630 smp_online();
631
632 cpu_idle();
633 return 0;
634}
635
636/**
637 * smp_prepare_cpus - Boot up secondary CPUs (APs)
638 * @max_cpus: Maximum number of CPUs to boot.
639 *
640 * Call do_boot_cpu, and boot up APs.
641 */
642void __init smp_prepare_cpus(unsigned int max_cpus)
643{
644 int phy_id;
645
646 /* Setup boot CPU information */
647 smp_store_cpu_info(0);
648 smp_tune_scheduling();
649
650 init_ipi();
651
652 /* If SMP should be disabled, then finish */
653 if (max_cpus == 0) {
654 printk(KERN_INFO "SMP mode deactivated.\n");
655 goto smp_done;
656 }
657
658 /* Boot secondary CPUs (for which phy_id > 0) */
659 for (phy_id = 0; phy_id < NR_CPUS; phy_id++) {
660 /* Don't boot primary CPU */
661 if (max_cpus <= cpucount + 1)
662 continue;
663 if (phy_id != 0)
664 do_boot_cpu(phy_id);
665 set_cpu_possible(phy_id, true);
666 smp_show_cpu_info(phy_id);
667 }
668
669smp_done:
670 Dprintk("Boot done.\n");
671}
672
673/**
674 * smp_store_cpu_info - Save a CPU's information
675 * @cpu: The CPU to save for.
676 *
677 * Save boot_cpu_data and jiffy for the specified CPU.
678 */
679static void __init smp_store_cpu_info(int cpu)
680{
681 struct mn10300_cpuinfo *ci = &cpu_data[cpu];
682
683 *ci = boot_cpu_data;
684 ci->loops_per_jiffy = loops_per_jiffy;
685 ci->type = CPUREV;
686}
687
688/**
689 * smp_tune_scheduling - Set time slice value
690 *
691 * Nothing to do here.
692 */
693static void __init smp_tune_scheduling(void)
694{
695}
696
697/**
698 * do_boot_cpu: Boot up one CPU
699 * @phy_id: Physical ID of CPU to boot.
700 *
701 * Send an IPI to a secondary CPU to boot it. Returns 0 on success, 1
702 * otherwise.
703 */
704static int __init do_boot_cpu(int phy_id)
705{
706 struct task_struct *idle;
707 unsigned long send_status, callin_status;
708 int timeout, cpu_id;
709
710 send_status = GxICR_REQUEST;
711 callin_status = 0;
712 timeout = 0;
713 cpu_id = phy_id;
714
715 cpucount++;
716
717 /* Create idle thread for this CPU */
718 idle = fork_idle(cpu_id);
719 if (IS_ERR(idle))
720 panic("Failed fork for CPU#%d.", cpu_id);
721
722 idle->thread.pc = (unsigned long)start_secondary;
723
724 printk(KERN_NOTICE "Booting CPU#%d\n", cpu_id);
725 start_stack[cpu_id - 1] = idle->thread.sp;
726
727 task_thread_info(idle)->cpu = cpu_id;
728
729 /* Send boot IPI to AP */
730 send_IPI_mask(cpumask_of(phy_id), SMP_BOOT_IRQ);
731
732 Dprintk("Waiting for send to finish...\n");
733
734 /* Wait for AP's IPI receive in 100[ms] */
735 do {
736 udelay(1000);
737 send_status =
738 CROSS_GxICR(SMP_BOOT_IRQ, phy_id) & GxICR_REQUEST;
739 } while (send_status == GxICR_REQUEST && timeout++ < 100);
740
741 Dprintk("Waiting for cpu_callin_map.\n");
742
743 if (send_status == 0) {
744 /* Allow AP to start initializing */
745 cpu_set(cpu_id, cpu_callout_map);
746
747 /* Wait for setting cpu_callin_map */
748 timeout = 0;
749 do {
750 udelay(1000);
751 callin_status = cpu_isset(cpu_id, cpu_callin_map);
752 } while (callin_status == 0 && timeout++ < 5000);
753
754 if (callin_status == 0)
755 Dprintk("Not responding.\n");
756 } else {
757 printk(KERN_WARNING "IPI not delivered.\n");
758 }
759
760 if (send_status == GxICR_REQUEST || callin_status == 0) {
761 cpu_clear(cpu_id, cpu_callout_map);
762 cpu_clear(cpu_id, cpu_callin_map);
763 cpu_clear(cpu_id, cpu_initialized);
764 cpucount--;
765 return 1;
766 }
767 return 0;
768}
769
770/**
771 * smp_show_cpu_info - Show SMP CPU information
772 * @cpu: The CPU of interest.
773 */
774static void __init smp_show_cpu_info(int cpu)
775{
776 struct mn10300_cpuinfo *ci = &cpu_data[cpu];
777
778 printk(KERN_INFO
779 "CPU#%d : ioclk speed: %lu.%02luMHz : bogomips : %lu.%02lu\n",
780 cpu,
781 MN10300_IOCLK / 1000000,
782 (MN10300_IOCLK / 10000) % 100,
783 ci->loops_per_jiffy / (500000 / HZ),
784 (ci->loops_per_jiffy / (5000 / HZ)) % 100);
785}
786
787/**
788 * smp_callin - Set cpu_callin_map of the current CPU ID
789 */
790static void __init smp_callin(void)
791{
792 unsigned long timeout;
793 int cpu;
794
795 cpu = smp_processor_id();
796 timeout = jiffies + (2 * HZ);
797
798 if (cpu_isset(cpu, cpu_callin_map)) {
799 printk(KERN_ERR "CPU#%d already present.\n", cpu);
800 BUG();
801 }
802 Dprintk("CPU#%d waiting for CALLOUT\n", cpu);
803
804 /* Wait for AP startup 2s total */
805 while (time_before(jiffies, timeout)) {
806 if (cpu_isset(cpu, cpu_callout_map))
807 break;
808 cpu_relax();
809 }
810
811 if (!time_before(jiffies, timeout)) {
812 printk(KERN_ERR
813 "BUG: CPU#%d started up but did not get a callout!\n",
814 cpu);
815 BUG();
816 }
817
818#ifdef CONFIG_CALIBRATE_DELAY
819 calibrate_delay(); /* Get our bogomips */
820#endif
821
822 /* Save our processor parameters */
823 smp_store_cpu_info(cpu);
824
825 /* Allow the boot processor to continue */
826 cpu_set(cpu, cpu_callin_map);
827}
828
829/**
830 * smp_online - Set cpu_online_map
831 */
832static void __init smp_online(void)
833{
834 int cpu;
835
836 cpu = smp_processor_id();
837
838 local_irq_enable();
839
840 cpu_set(cpu, cpu_online_map);
841 smp_wmb();
842}
843
844/**
845 * smp_cpus_done -
846 * @max_cpus: Maximum CPU count.
847 *
848 * Do nothing.
849 */
850void __init smp_cpus_done(unsigned int max_cpus)
851{
852}
853
854/*
855 * smp_prepare_boot_cpu - Set up stuff for the boot processor.
856 *
857 * Set up the cpu_online_map, cpu_callout_map and cpu_callin_map of the boot
858 * processor (CPU 0).
859 */
860void __devinit smp_prepare_boot_cpu(void)
861{
862 cpu_set(0, cpu_callout_map);
863 cpu_set(0, cpu_callin_map);
864 current_thread_info()->cpu = 0;
865}
866
867/*
868 * initialize_secondary - Initialise a secondary CPU (Application Processor).
869 *
870 * Set SP register and jump to thread's PC address.
871 */
872void initialize_secondary(void)
873{
874 asm volatile (
875 "mov %0,sp \n"
876 "jmp (%1) \n"
877 :
878 : "a"(current->thread.sp), "a"(current->thread.pc));
879}
880
881/**
882 * __cpu_up - Set smp_commenced_mask for the nominated CPU
883 * @cpu: The target CPU.
884 */
885int __devinit __cpu_up(unsigned int cpu)
886{
887 int timeout;
888
889#ifdef CONFIG_HOTPLUG_CPU
890 if (num_online_cpus() == 1)
891 disable_hlt();
892 if (sleep_mode[cpu])
893 run_wakeup_cpu(cpu);
894#endif /* CONFIG_HOTPLUG_CPU */
895
896 cpu_set(cpu, smp_commenced_mask);
897
898 /* Wait 5s total for a response */
899 for (timeout = 0 ; timeout < 5000 ; timeout++) {
900 if (cpu_isset(cpu, cpu_online_map))
901 break;
902 udelay(1000);
903 }
904
905 BUG_ON(!cpu_isset(cpu, cpu_online_map));
906 return 0;
907}
908
909/**
910 * setup_profiling_timer - Set up the profiling timer
911 * @multiplier - The frequency multiplier to use
912 *
913 * The frequency of the profiling timer can be changed by writing a multiplier
914 * value into /proc/profile.
915 */
916int setup_profiling_timer(unsigned int multiplier)
917{
918 return -EINVAL;
919}
920
921/*
922 * CPU hotplug routines
923 */
924#ifdef CONFIG_HOTPLUG_CPU
925
926static DEFINE_PER_CPU(struct cpu, cpu_devices);
927
928static int __init topology_init(void)
929{
930 int cpu, ret;
931
932 for_each_cpu(cpu) {
933 ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
934 if (ret)
935 printk(KERN_WARNING
936 "topology_init: register_cpu %d failed (%d)\n",
937 cpu, ret);
938 }
939 return 0;
940}
941
942subsys_initcall(topology_init);
943
944int __cpu_disable(void)
945{
946 int cpu = smp_processor_id();
947 if (cpu == 0)
948 return -EBUSY;
949
950 migrate_irqs();
951 cpu_clear(cpu, current->active_mm->cpu_vm_mask);
952 return 0;
953}
954
955void __cpu_die(unsigned int cpu)
956{
957 run_sleep_cpu(cpu);
958
959 if (num_online_cpus() == 1)
960 enable_hlt();
961}
962
963#ifdef CONFIG_MN10300_CACHE_ENABLED
964static inline void hotplug_cpu_disable_cache(void)
965{
966 int tmp;
967 asm volatile(
968 " movhu (%1),%0 \n"
969 " and %2,%0 \n"
970 " movhu %0,(%1) \n"
971 "1: movhu (%1),%0 \n"
972 " btst %3,%0 \n"
973 " bne 1b \n"
974 : "=&r"(tmp)
975 : "a"(&CHCTR),
976 "i"(~(CHCTR_ICEN | CHCTR_DCEN)),
977 "i"(CHCTR_ICBUSY | CHCTR_DCBUSY)
978 : "memory", "cc");
979}
980
981static inline void hotplug_cpu_enable_cache(void)
982{
983 int tmp;
984 asm volatile(
985 "movhu (%1),%0 \n"
986 "or %2,%0 \n"
987 "movhu %0,(%1) \n"
988 : "=&r"(tmp)
989 : "a"(&CHCTR),
990 "i"(CHCTR_ICEN | CHCTR_DCEN)
991 : "memory", "cc");
992}
993
994static inline void hotplug_cpu_invalidate_cache(void)
995{
996 int tmp;
997 asm volatile (
998 "movhu (%1),%0 \n"
999 "or %2,%0 \n"
1000 "movhu %0,(%1) \n"
1001 : "=&r"(tmp)
1002 : "a"(&CHCTR),
1003 "i"(CHCTR_ICINV | CHCTR_DCINV)
1004 : "cc");
1005}
1006
1007#else /* CONFIG_MN10300_CACHE_ENABLED */
1008#define hotplug_cpu_disable_cache() do {} while (0)
1009#define hotplug_cpu_enable_cache() do {} while (0)
1010#define hotplug_cpu_invalidate_cache() do {} while (0)
1011#endif /* CONFIG_MN10300_CACHE_ENABLED */
1012
1013/**
1014 * hotplug_cpu_nmi_call_function - Call a function on other CPUs for hotplug
1015 * @cpumask: List of target CPUs.
1016 * @func: The function to call on those CPUs.
1017 * @info: The context data for the function to be called.
1018 * @wait: Whether to wait for the calls to complete.
1019 *
1020 * Non-maskably call a function on another CPU for hotplug purposes.
1021 *
1022 * This function must be called with maskable interrupts disabled.
1023 */
1024static int hotplug_cpu_nmi_call_function(cpumask_t cpumask,
1025 smp_call_func_t func, void *info,
1026 int wait)
1027{
1028 /*
1029 * The address and the size of nmi_call_func_mask_data
1030 * need to be aligned on L1_CACHE_BYTES.
1031 */
1032 static struct nmi_call_data_struct nmi_call_func_mask_data
1033 __cacheline_aligned;
1034 unsigned long start, end;
1035
1036 start = (unsigned long)&nmi_call_func_mask_data;
1037 end = start + sizeof(struct nmi_call_data_struct);
1038
1039 nmi_call_func_mask_data.func = func;
1040 nmi_call_func_mask_data.info = info;
1041 nmi_call_func_mask_data.started = cpumask;
1042 nmi_call_func_mask_data.wait = wait;
1043 if (wait)
1044 nmi_call_func_mask_data.finished = cpumask;
1045
1046 spin_lock(&smp_nmi_call_lock);
1047 nmi_call_data = &nmi_call_func_mask_data;
1048 mn10300_local_dcache_flush_range(start, end);
1049 smp_wmb();
1050
1051 send_IPI_mask(cpumask, CALL_FUNCTION_NMI_IPI);
1052
1053 do {
1054 mn10300_local_dcache_inv_range(start, end);
1055 barrier();
1056 } while (!cpus_empty(nmi_call_func_mask_data.started));
1057
1058 if (wait) {
1059 do {
1060 mn10300_local_dcache_inv_range(start, end);
1061 barrier();
1062 } while (!cpus_empty(nmi_call_func_mask_data.finished));
1063 }
1064
1065 spin_unlock(&smp_nmi_call_lock);
1066 return 0;
1067}
1068
1069static void restart_wakeup_cpu(void)
1070{
1071 unsigned int cpu = smp_processor_id();
1072
1073 cpu_set(cpu, cpu_callin_map);
1074 local_flush_tlb();
1075 cpu_set(cpu, cpu_online_map);
1076 smp_wmb();
1077}
1078
1079static void prepare_sleep_cpu(void *unused)
1080{
1081 sleep_mode[smp_processor_id()] = 1;
1082 smp_mb();
1083 mn10300_local_dcache_flush_inv();
1084 hotplug_cpu_disable_cache();
1085 hotplug_cpu_invalidate_cache();
1086}
1087
1088/* when this function called, IE=0, NMID=0. */
1089static void sleep_cpu(void *unused)
1090{
1091 unsigned int cpu_id = smp_processor_id();
1092 /*
1093 * CALL_FUNCTION_NMI_IPI for wakeup_cpu() shall not be requested,
1094 * before this cpu goes in SLEEP mode.
1095 */
1096 do {
1097 smp_mb();
1098 __sleep_cpu();
1099 } while (sleep_mode[cpu_id]);
1100 restart_wakeup_cpu();
1101}
1102
1103static void run_sleep_cpu(unsigned int cpu)
1104{
1105 unsigned long flags;
1106 cpumask_t cpumask = cpumask_of(cpu);
1107
1108 flags = arch_local_cli_save();
1109 hotplug_cpu_nmi_call_function(cpumask, prepare_sleep_cpu, NULL, 1);
1110 hotplug_cpu_nmi_call_function(cpumask, sleep_cpu, NULL, 0);
1111 udelay(1); /* delay for the cpu to sleep. */
1112 arch_local_irq_restore(flags);
1113}
1114
1115static void wakeup_cpu(void)
1116{
1117 hotplug_cpu_invalidate_cache();
1118 hotplug_cpu_enable_cache();
1119 smp_mb();
1120 sleep_mode[smp_processor_id()] = 0;
1121}
1122
1123static void run_wakeup_cpu(unsigned int cpu)
1124{
1125 unsigned long flags;
1126
1127 flags = arch_local_cli_save();
1128#if NR_CPUS == 2
1129 mn10300_local_dcache_flush_inv();
1130#else
1131 /*
1132 * Before waking up the cpu,
1133 * all online cpus should stop and flush D-Cache for global data.
1134 */
1135#error not support NR_CPUS > 2, when CONFIG_HOTPLUG_CPU=y.
1136#endif
1137 hotplug_cpu_nmi_call_function(cpumask_of(cpu), wakeup_cpu, NULL, 1);
1138 arch_local_irq_restore(flags);
1139}
1140
1141#endif /* CONFIG_HOTPLUG_CPU */