aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mn10300/kernel
diff options
context:
space:
mode:
authorAkira Takeuchi <takeuchi.akr@jp.panasonic.com>2010-10-27 12:28:55 -0400
committerDavid Howells <dhowells@redhat.com>2010-10-27 12:28:55 -0400
commit368dd5acd154b09c043cc4392a74da01599b37d5 (patch)
treedd94ae3d044f6e774dec2437613515bd6b46dacb /arch/mn10300/kernel
parent04157a6e7df99fd5ed64955233d6e00ab6613614 (diff)
MN10300: And Panasonic AM34 subarch and implement SMP
Implement the Panasonic MN10300 AM34 CPU subarch and implement SMP support for MN10300. Also implement support for the MN2WS0060 processor and the ASB2364 evaluation board which are AM34 based. Signed-off-by: Akira Takeuchi <takeuchi.akr@jp.panasonic.com> Signed-off-by: Kiyoshi Owada <owada.kiyoshi@jp.panasonic.com> Signed-off-by: David Howells <dhowells@redhat.com>
Diffstat (limited to 'arch/mn10300/kernel')
-rw-r--r--arch/mn10300/kernel/Makefile3
-rw-r--r--arch/mn10300/kernel/asm-offsets.c2
-rw-r--r--arch/mn10300/kernel/entry.S113
-rw-r--r--arch/mn10300/kernel/gdb-io-serial-low.S5
-rw-r--r--arch/mn10300/kernel/gdb-io-serial.c37
-rw-r--r--arch/mn10300/kernel/gdb-io-ttysm.c24
-rw-r--r--arch/mn10300/kernel/gdb-stub.c17
-rw-r--r--arch/mn10300/kernel/head.S196
-rw-r--r--arch/mn10300/kernel/internal.h12
-rw-r--r--arch/mn10300/kernel/irq.c266
-rw-r--r--arch/mn10300/kernel/mn10300-serial-low.S6
-rw-r--r--arch/mn10300/kernel/mn10300-serial.c210
-rw-r--r--arch/mn10300/kernel/mn10300-watchdog-low.S9
-rw-r--r--arch/mn10300/kernel/mn10300-watchdog.c100
-rw-r--r--arch/mn10300/kernel/process.c41
-rw-r--r--arch/mn10300/kernel/profile.c2
-rw-r--r--arch/mn10300/kernel/rtc.c41
-rw-r--r--arch/mn10300/kernel/setup.c75
-rw-r--r--arch/mn10300/kernel/smp-low.S97
-rw-r--r--arch/mn10300/kernel/smp.c1141
-rw-r--r--arch/mn10300/kernel/switch_to.S17
-rw-r--r--arch/mn10300/kernel/time.c32
-rw-r--r--arch/mn10300/kernel/traps.c18
23 files changed, 2224 insertions, 240 deletions
diff --git a/arch/mn10300/kernel/Makefile b/arch/mn10300/kernel/Makefile
index 99022351717a..5b41192f496b 100644
--- a/arch/mn10300/kernel/Makefile
+++ b/arch/mn10300/kernel/Makefile
@@ -10,8 +10,9 @@ obj-y := process.o signal.o entry.o traps.o irq.o \
10 ptrace.o setup.o time.o sys_mn10300.o io.o kthread.o \ 10 ptrace.o setup.o time.o sys_mn10300.o io.o kthread.o \
11 switch_to.o mn10300_ksyms.o kernel_execve.o $(fpu-obj-y) 11 switch_to.o mn10300_ksyms.o kernel_execve.o $(fpu-obj-y)
12 12
13obj-$(CONFIG_MN10300_WD_TIMER) += mn10300-watchdog.o mn10300-watchdog-low.o 13obj-$(CONFIG_SMP) += smp.o smp-low.o
14 14
15obj-$(CONFIG_MN10300_WD_TIMER) += mn10300-watchdog.o mn10300-watchdog-low.o
15 16
16obj-$(CONFIG_MN10300_TTYSM) += mn10300-serial.o mn10300-serial-low.o \ 17obj-$(CONFIG_MN10300_TTYSM) += mn10300-serial.o mn10300-serial-low.o \
17 mn10300-debug.o 18 mn10300-debug.o
diff --git a/arch/mn10300/kernel/asm-offsets.c b/arch/mn10300/kernel/asm-offsets.c
index 78e290e342fc..54cc5b6b13f2 100644
--- a/arch/mn10300/kernel/asm-offsets.c
+++ b/arch/mn10300/kernel/asm-offsets.c
@@ -66,7 +66,7 @@ void foo(void)
66 OFFSET(THREAD_SP, thread_struct, sp); 66 OFFSET(THREAD_SP, thread_struct, sp);
67 OFFSET(THREAD_A3, thread_struct, a3); 67 OFFSET(THREAD_A3, thread_struct, a3);
68 OFFSET(THREAD_USP, thread_struct, usp); 68 OFFSET(THREAD_USP, thread_struct, usp);
69 OFFSET(THREAD_FRAME, thread_struct, __frame); 69 OFFSET(THREAD_FRAME, thread_struct, frame);
70#ifdef CONFIG_FPU 70#ifdef CONFIG_FPU
71 OFFSET(THREAD_FPU_FLAGS, thread_struct, fpu_flags); 71 OFFSET(THREAD_FPU_FLAGS, thread_struct, fpu_flags);
72 OFFSET(THREAD_FPU_STATE, thread_struct, fpu_state); 72 OFFSET(THREAD_FPU_STATE, thread_struct, fpu_state);
diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S
index 355f68176771..f00b9bafcd3e 100644
--- a/arch/mn10300/kernel/entry.S
+++ b/arch/mn10300/kernel/entry.S
@@ -28,25 +28,17 @@
28#include <asm/asm-offsets.h> 28#include <asm/asm-offsets.h>
29#include <asm/frame.inc> 29#include <asm/frame.inc>
30 30
31#if defined(CONFIG_SMP) && defined(CONFIG_GDBSTUB)
32#include <asm/gdb-stub.h>
33#endif /* CONFIG_SMP && CONFIG_GDBSTUB */
34
31#ifdef CONFIG_PREEMPT 35#ifdef CONFIG_PREEMPT
32#define preempt_stop __cli 36#define preempt_stop LOCAL_IRQ_DISABLE
33#else 37#else
34#define preempt_stop 38#define preempt_stop
35#define resume_kernel restore_all 39#define resume_kernel restore_all
36#endif 40#endif
37 41
38 .macro __cli
39 and ~EPSW_IM,epsw
40 or EPSW_IE|MN10300_CLI_LEVEL,epsw
41 nop
42 nop
43 nop
44 .endm
45 .macro __sti
46 or EPSW_IE|EPSW_IM_7,epsw
47 .endm
48
49
50 .am33_2 42 .am33_2
51 43
52############################################################################### 44###############################################################################
@@ -88,7 +80,7 @@ syscall_call:
88syscall_exit: 80syscall_exit:
89 # make sure we don't miss an interrupt setting need_resched or 81 # make sure we don't miss an interrupt setting need_resched or
90 # sigpending between sampling and the rti 82 # sigpending between sampling and the rti
91 __cli 83 LOCAL_IRQ_DISABLE
92 mov (TI_flags,a2),d2 84 mov (TI_flags,a2),d2
93 btst _TIF_ALLWORK_MASK,d2 85 btst _TIF_ALLWORK_MASK,d2
94 bne syscall_exit_work 86 bne syscall_exit_work
@@ -105,7 +97,7 @@ restore_all:
105syscall_exit_work: 97syscall_exit_work:
106 btst _TIF_SYSCALL_TRACE,d2 98 btst _TIF_SYSCALL_TRACE,d2
107 beq work_pending 99 beq work_pending
108 __sti # could let syscall_trace_exit() call 100 LOCAL_IRQ_ENABLE # could let syscall_trace_exit() call
109 # schedule() instead 101 # schedule() instead
110 mov fp,d0 102 mov fp,d0
111 call syscall_trace_exit[],0 # do_syscall_trace(regs) 103 call syscall_trace_exit[],0 # do_syscall_trace(regs)
@@ -121,7 +113,7 @@ work_resched:
121 113
122 # make sure we don't miss an interrupt setting need_resched or 114 # make sure we don't miss an interrupt setting need_resched or
123 # sigpending between sampling and the rti 115 # sigpending between sampling and the rti
124 __cli 116 LOCAL_IRQ_DISABLE
125 117
126 # is there any work to be done other than syscall tracing? 118 # is there any work to be done other than syscall tracing?
127 mov (TI_flags,a2),d2 119 mov (TI_flags,a2),d2
@@ -168,7 +160,7 @@ ret_from_intr:
168ENTRY(resume_userspace) 160ENTRY(resume_userspace)
169 # make sure we don't miss an interrupt setting need_resched or 161 # make sure we don't miss an interrupt setting need_resched or
170 # sigpending between sampling and the rti 162 # sigpending between sampling and the rti
171 __cli 163 LOCAL_IRQ_DISABLE
172 164
173 # is there any work to be done on int/exception return? 165 # is there any work to be done on int/exception return?
174 mov (TI_flags,a2),d2 166 mov (TI_flags,a2),d2
@@ -178,7 +170,7 @@ ENTRY(resume_userspace)
178 170
179#ifdef CONFIG_PREEMPT 171#ifdef CONFIG_PREEMPT
180ENTRY(resume_kernel) 172ENTRY(resume_kernel)
181 __cli 173 LOCAL_IRQ_DISABLE
182 mov (TI_preempt_count,a2),d0 # non-zero preempt_count ? 174 mov (TI_preempt_count,a2),d0 # non-zero preempt_count ?
183 cmp 0,d0 175 cmp 0,d0
184 bne restore_all 176 bne restore_all
@@ -281,6 +273,79 @@ ENTRY(nmi_handler)
281 add -4,sp 273 add -4,sp
282 mov d0,(sp) 274 mov d0,(sp)
283 mov (TBR),d0 275 mov (TBR),d0
276
277#ifdef CONFIG_SMP
278 add -4,sp
279 mov d0,(sp) # save d0(TBR)
280 movhu (NMIAGR),d0
281 and NMIAGR_GN,d0
282 lsr 0x2,d0
283 cmp CALL_FUNCTION_NMI_IPI,d0
284 bne 5f # if not call function, jump
285
286 # function call nmi ipi
287 add 4,sp # no need to store TBR
288 mov GxICR_DETECT,d0 # clear NMI request
289 movbu d0,(GxICR(CALL_FUNCTION_NMI_IPI))
290 movhu (GxICR(CALL_FUNCTION_NMI_IPI)),d0
291 and ~EPSW_NMID,epsw # enable NMI
292
293 mov (sp),d0 # restore d0
294 SAVE_ALL
295 call smp_nmi_call_function_interrupt[],0
296 RESTORE_ALL
297
2985:
299#ifdef CONFIG_GDBSTUB
300 cmp GDB_NMI_IPI,d0
301 bne 3f # if not gdb nmi ipi, jump
302
303 # gdb nmi ipi
304 add 4,sp # no need to store TBR
305 mov GxICR_DETECT,d0 # clear NMI
306 movbu d0,(GxICR(GDB_NMI_IPI))
307 movhu (GxICR(GDB_NMI_IPI)),d0
308 and ~EPSW_NMID,epsw # enable NMI
309#ifdef CONFIG_MN10300_CACHE_ENABLED
310 mov (gdbstub_nmi_opr_type),d0
311 cmp GDBSTUB_NMI_CACHE_PURGE,d0
312 bne 4f # if not gdb cache purge, jump
313
314 # gdb cache purge nmi ipi
315 add -20,sp
316 mov d1,(4,sp)
317 mov a0,(8,sp)
318 mov a1,(12,sp)
319 mov mdr,d0
320 mov d0,(16,sp)
321 call gdbstub_local_purge_cache[],0
322 mov 0x1,d0
323 mov (CPUID),d1
324 asl d1,d0
325 mov gdbstub_nmi_cpumask,a0
326 bclr d0,(a0)
327 mov (4,sp),d1
328 mov (8,sp),a0
329 mov (12,sp),a1
330 mov (16,sp),d0
331 mov d0,mdr
332 add 20,sp
333 mov (sp),d0
334 add 4,sp
335 rti
3364:
337#endif /* CONFIG_MN10300_CACHE_ENABLED */
338 # gdb wait nmi ipi
339 mov (sp),d0
340 SAVE_ALL
341 call gdbstub_nmi_wait[],0
342 RESTORE_ALL
3433:
344#endif /* CONFIG_GDBSTUB */
345 mov (sp),d0 # restore TBR to d0
346 add 4,sp
347#endif /* CONFIG_SMP */
348
284 bra __common_exception_nonmi 349 bra __common_exception_nonmi
285 350
286ENTRY(__common_exception) 351ENTRY(__common_exception)
@@ -314,15 +379,21 @@ __common_exception_nonmi:
314 mov d0,(REG_ORIG_D0,fp) 379 mov d0,(REG_ORIG_D0,fp)
315 380
316#ifdef CONFIG_GDBSTUB 381#ifdef CONFIG_GDBSTUB
382#ifdef CONFIG_SMP
383 call gdbstub_busy_check[],0
384 and d0,d0 # check return value
385 beq 2f
386#else /* CONFIG_SMP */
317 btst 0x01,(gdbstub_busy) 387 btst 0x01,(gdbstub_busy)
318 beq 2f 388 beq 2f
389#endif /* CONFIG_SMP */
319 and ~EPSW_IE,epsw 390 and ~EPSW_IE,epsw
320 mov fp,d0 391 mov fp,d0
321 mov a2,d1 392 mov a2,d1
322 call gdbstub_exception[],0 # gdbstub itself caused an exception 393 call gdbstub_exception[],0 # gdbstub itself caused an exception
323 bra restore_all 394 bra restore_all
3242: 3952:
325#endif 396#endif /* CONFIG_GDBSTUB */
326 397
327 mov fp,d0 # arg 0: stacked register file 398 mov fp,d0 # arg 0: stacked register file
328 mov a2,d1 # arg 1: exception number 399 mov a2,d1 # arg 1: exception number
@@ -357,11 +428,7 @@ ENTRY(set_excp_vector)
357 add exception_table,d0 428 add exception_table,d0
358 mov d1,(d0) 429 mov d1,(d0)
359 mov 4,d1 430 mov 4,d1
360#if defined(CONFIG_MN10300_CACHE_WBACK)
361 jmp mn10300_dcache_flush_inv_range2
362#else
363 ret [],0 431 ret [],0
364#endif
365 432
366############################################################################### 433###############################################################################
367# 434#
diff --git a/arch/mn10300/kernel/gdb-io-serial-low.S b/arch/mn10300/kernel/gdb-io-serial-low.S
index 4998b24f5d3a..b1d0152e96cb 100644
--- a/arch/mn10300/kernel/gdb-io-serial-low.S
+++ b/arch/mn10300/kernel/gdb-io-serial-low.S
@@ -18,6 +18,7 @@
18#include <asm/thread_info.h> 18#include <asm/thread_info.h>
19#include <asm/frame.inc> 19#include <asm/frame.inc>
20#include <asm/intctl-regs.h> 20#include <asm/intctl-regs.h>
21#include <asm/irqflags.h>
21#include <unit/serial.h> 22#include <unit/serial.h>
22 23
23 .text 24 .text
@@ -69,7 +70,7 @@ gdbstub_io_rx_overflow:
69 bra gdbstub_io_rx_done 70 bra gdbstub_io_rx_done
70 71
71gdbstub_io_rx_enter: 72gdbstub_io_rx_enter:
72 or EPSW_IE|EPSW_IM_1,epsw 73 LOCAL_CHANGE_INTR_MASK_LEVEL(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL+1))
73 add -4,sp 74 add -4,sp
74 SAVE_ALL 75 SAVE_ALL
75 76
@@ -80,7 +81,7 @@ gdbstub_io_rx_enter:
80 mov fp,d0 81 mov fp,d0
81 call gdbstub_rx_irq[],0 # gdbstub_rx_irq(regs,excep) 82 call gdbstub_rx_irq[],0 # gdbstub_rx_irq(regs,excep)
82 83
83 and ~EPSW_IE,epsw 84 LOCAL_CLI
84 bclr 0x01,(gdbstub_busy) 85 bclr 0x01,(gdbstub_busy)
85 86
86 .globl gdbstub_return 87 .globl gdbstub_return
diff --git a/arch/mn10300/kernel/gdb-io-serial.c b/arch/mn10300/kernel/gdb-io-serial.c
index ae663dc717e9..0d5d63c91dc3 100644
--- a/arch/mn10300/kernel/gdb-io-serial.c
+++ b/arch/mn10300/kernel/gdb-io-serial.c
@@ -23,6 +23,7 @@
23#include <asm/exceptions.h> 23#include <asm/exceptions.h>
24#include <asm/serial-regs.h> 24#include <asm/serial-regs.h>
25#include <unit/serial.h> 25#include <unit/serial.h>
26#include <asm/smp.h>
26 27
27/* 28/*
28 * initialise the GDB stub 29 * initialise the GDB stub
@@ -45,22 +46,34 @@ void gdbstub_io_init(void)
45 XIRQxICR(GDBPORT_SERIAL_IRQ) = 0; 46 XIRQxICR(GDBPORT_SERIAL_IRQ) = 0;
46 tmp = XIRQxICR(GDBPORT_SERIAL_IRQ); 47 tmp = XIRQxICR(GDBPORT_SERIAL_IRQ);
47 48
49#if CONFIG_GDBSTUB_IRQ_LEVEL == 0
48 IVAR0 = EXCEP_IRQ_LEVEL0; 50 IVAR0 = EXCEP_IRQ_LEVEL0;
49 set_intr_stub(EXCEP_IRQ_LEVEL0, gdbstub_io_rx_handler); 51#elif CONFIG_GDBSTUB_IRQ_LEVEL == 1
52 IVAR1 = EXCEP_IRQ_LEVEL1;
53#elif CONFIG_GDBSTUB_IRQ_LEVEL == 2
54 IVAR2 = EXCEP_IRQ_LEVEL2;
55#elif CONFIG_GDBSTUB_IRQ_LEVEL == 3
56 IVAR3 = EXCEP_IRQ_LEVEL3;
57#elif CONFIG_GDBSTUB_IRQ_LEVEL == 4
58 IVAR4 = EXCEP_IRQ_LEVEL4;
59#elif CONFIG_GDBSTUB_IRQ_LEVEL == 5
60 IVAR5 = EXCEP_IRQ_LEVEL5;
61#else
62#error "Unknown irq level for gdbstub."
63#endif
64
65 set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL),
66 gdbstub_io_rx_handler);
50 67
51 XIRQxICR(GDBPORT_SERIAL_IRQ) &= ~GxICR_REQUEST; 68 XIRQxICR(GDBPORT_SERIAL_IRQ) &= ~GxICR_REQUEST;
52 XIRQxICR(GDBPORT_SERIAL_IRQ) = GxICR_ENABLE | GxICR_LEVEL_0; 69 XIRQxICR(GDBPORT_SERIAL_IRQ) =
70 GxICR_ENABLE | NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL);
53 tmp = XIRQxICR(GDBPORT_SERIAL_IRQ); 71 tmp = XIRQxICR(GDBPORT_SERIAL_IRQ);
54 72
55 GDBPORT_SERIAL_IER = UART_IER_RDI | UART_IER_RLSI; 73 GDBPORT_SERIAL_IER = UART_IER_RDI | UART_IER_RLSI;
56 74
57 /* permit level 0 IRQs to take place */ 75 /* permit level 0 IRQs to take place */
58 asm volatile( 76 local_change_intr_mask_level(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
59 " and %0,epsw \n"
60 " or %1,epsw \n"
61 :
62 : "i"(~EPSW_IM), "i"(EPSW_IE | EPSW_IM_1)
63 );
64} 77}
65 78
66/* 79/*
@@ -87,6 +100,9 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
87{ 100{
88 unsigned ix; 101 unsigned ix;
89 u8 ch, st; 102 u8 ch, st;
103#if defined(CONFIG_MN10300_WD_TIMER)
104 int cpu;
105#endif
90 106
91 *_ch = 0xff; 107 *_ch = 0xff;
92 108
@@ -104,8 +120,9 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
104 if (nonblock) 120 if (nonblock)
105 return -EAGAIN; 121 return -EAGAIN;
106#ifdef CONFIG_MN10300_WD_TIMER 122#ifdef CONFIG_MN10300_WD_TIMER
107 watchdog_alert_counter = 0; 123 for (cpu = 0; cpu < NR_CPUS; cpu++)
108#endif /* CONFIG_MN10300_WD_TIMER */ 124 watchdog_alert_counter[cpu] = 0;
125#endif
109 goto try_again; 126 goto try_again;
110 } 127 }
111 128
diff --git a/arch/mn10300/kernel/gdb-io-ttysm.c b/arch/mn10300/kernel/gdb-io-ttysm.c
index a560bbc3137d..97dfda23342c 100644
--- a/arch/mn10300/kernel/gdb-io-ttysm.c
+++ b/arch/mn10300/kernel/gdb-io-ttysm.c
@@ -58,9 +58,12 @@ void __init gdbstub_io_init(void)
58 gdbstub_io_set_baud(115200); 58 gdbstub_io_set_baud(115200);
59 59
60 /* we want to get serial receive interrupts */ 60 /* we want to get serial receive interrupts */
61 set_intr_level(gdbstub_port->rx_irq, GxICR_LEVEL_0); 61 set_intr_level(gdbstub_port->rx_irq,
62 set_intr_level(gdbstub_port->tx_irq, GxICR_LEVEL_0); 62 NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL));
63 set_intr_stub(EXCEP_IRQ_LEVEL0, gdbstub_io_rx_handler); 63 set_intr_level(gdbstub_port->tx_irq,
64 NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL));
65 set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL),
66 gdbstub_io_rx_handler);
64 67
65 *gdbstub_port->rx_icr |= GxICR_ENABLE; 68 *gdbstub_port->rx_icr |= GxICR_ENABLE;
66 tmp = *gdbstub_port->rx_icr; 69 tmp = *gdbstub_port->rx_icr;
@@ -84,12 +87,7 @@ void __init gdbstub_io_init(void)
84 tmp = *gdbstub_port->_control; 87 tmp = *gdbstub_port->_control;
85 88
86 /* permit level 0 IRQs only */ 89 /* permit level 0 IRQs only */
87 asm volatile( 90 local_change_intr_mask_level(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
88 " and %0,epsw \n"
89 " or %1,epsw \n"
90 :
91 : "i"(~EPSW_IM), "i"(EPSW_IE|EPSW_IM_1)
92 );
93} 91}
94 92
95/* 93/*
@@ -184,6 +182,9 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
184{ 182{
185 unsigned ix; 183 unsigned ix;
186 u8 ch, st; 184 u8 ch, st;
185#if defined(CONFIG_MN10300_WD_TIMER)
186 int cpu;
187#endif
187 188
188 *_ch = 0xff; 189 *_ch = 0xff;
189 190
@@ -201,8 +202,9 @@ try_again:
201 if (nonblock) 202 if (nonblock)
202 return -EAGAIN; 203 return -EAGAIN;
203#ifdef CONFIG_MN10300_WD_TIMER 204#ifdef CONFIG_MN10300_WD_TIMER
204 watchdog_alert_counter = 0; 205 for (cpu = 0; cpu < NR_CPUS; cpu++)
205#endif /* CONFIG_MN10300_WD_TIMER */ 206 watchdog_alert_counter[cpu] = 0;
207#endif
206 goto try_again; 208 goto try_again;
207 } 209 }
208 210
diff --git a/arch/mn10300/kernel/gdb-stub.c b/arch/mn10300/kernel/gdb-stub.c
index 41b11706c8ed..a5fc3f05309b 100644
--- a/arch/mn10300/kernel/gdb-stub.c
+++ b/arch/mn10300/kernel/gdb-stub.c
@@ -440,15 +440,11 @@ static const unsigned char gdbstub_insn_sizes[256] =
440 440
441static int __gdbstub_mark_bp(u8 *addr, int ix) 441static int __gdbstub_mark_bp(u8 *addr, int ix)
442{ 442{
443 if (addr < (u8 *) 0x70000000UL) 443 /* vmalloc area */
444 return 0; 444 if (((u8 *) VMALLOC_START <= addr) && (addr < (u8 *) VMALLOC_END))
445 /* 70000000-7fffffff: vmalloc area */
446 if (addr < (u8 *) 0x80000000UL)
447 goto okay; 445 goto okay;
448 if (addr < (u8 *) 0x8c000000UL) 446 /* SRAM, SDRAM */
449 return 0; 447 if (((u8 *) 0x80000000UL <= addr) && (addr < (u8 *) 0xa0000000UL))
450 /* 8c000000-93ffffff: SRAM, SDRAM */
451 if (addr < (u8 *) 0x94000000UL)
452 goto okay; 448 goto okay;
453 return 0; 449 return 0;
454 450
@@ -1197,9 +1193,8 @@ static int gdbstub(struct pt_regs *regs, enum exception_code excep)
1197 mn10300_set_gdbleds(1); 1193 mn10300_set_gdbleds(1);
1198 1194
1199 asm volatile("mov mdr,%0" : "=d"(mdr)); 1195 asm volatile("mov mdr,%0" : "=d"(mdr));
1200 asm volatile("mov epsw,%0" : "=d"(epsw)); 1196 local_save_flags(epsw);
1201 asm volatile("mov %0,epsw" 1197 local_change_intr_mask_level(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
1202 :: "d"((epsw & ~EPSW_IM) | EPSW_IE | EPSW_IM_1));
1203 1198
1204 gdbstub_store_fpu(); 1199 gdbstub_store_fpu();
1205 1200
diff --git a/arch/mn10300/kernel/head.S b/arch/mn10300/kernel/head.S
index a81e34fba651..73e00fc78072 100644
--- a/arch/mn10300/kernel/head.S
+++ b/arch/mn10300/kernel/head.S
@@ -19,6 +19,12 @@
19#include <asm/frame.inc> 19#include <asm/frame.inc>
20#include <asm/param.h> 20#include <asm/param.h>
21#include <unit/serial.h> 21#include <unit/serial.h>
22#ifdef CONFIG_SMP
23#include <asm/smp.h>
24#include <asm/intctl-regs.h>
25#include <asm/cpu-regs.h>
26#include <proc/smp-regs.h>
27#endif /* CONFIG_SMP */
22 28
23 __HEAD 29 __HEAD
24 30
@@ -30,17 +36,51 @@
30 .globl _start 36 .globl _start
31 .type _start,@function 37 .type _start,@function
32_start: 38_start:
39#ifdef CONFIG_SMP
40 #
41 # If this is a secondary CPU (AP), then deal with that elsewhere
42 #
43 mov (CPUID),d3
44 and CPUID_MASK,d3
45 bne startup_secondary
46
47 #
48 # We're dealing with the primary CPU (BP) here, then.
49 # Keep BP's D0,D1,D2 register for boot check.
50 #
51
52 # Set up the Boot IPI for each secondary CPU
53 mov 0x1,a0
54loop_set_secondary_icr:
55 mov a0,a1
56 asl CROSS_ICR_CPU_SHIFT,a1
57 add CROSS_GxICR(SMP_BOOT_IRQ,0),a1
58 movhu (a1),d3
59 or GxICR_ENABLE|GxICR_LEVEL_0,d3
60 movhu d3,(a1)
61 movhu (a1),d3 # flush
62 inc a0
63 cmp NR_CPUS,a0
64 bne loop_set_secondary_icr
65#endif /* CONFIG_SMP */
66
33 # save commandline pointer 67 # save commandline pointer
34 mov d0,a3 68 mov d0,a3
35 69
36 # preload the PGD pointer register 70 # preload the PGD pointer register
37 mov swapper_pg_dir,d0 71 mov swapper_pg_dir,d0
38 mov d0,(PTBR) 72 mov d0,(PTBR)
73 clr d0
74 movbu d0,(PIDR)
39 75
40 # turn on the TLBs 76 # turn on the TLBs
41 mov MMUCTR_IIV|MMUCTR_DIV,d0 77 mov MMUCTR_IIV|MMUCTR_DIV,d0
42 mov d0,(MMUCTR) 78 mov d0,(MMUCTR)
79#ifdef CONFIG_AM34_2
80 mov MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE|MMUCTR_WTE,d0
81#else
43 mov MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE,d0 82 mov MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE,d0
83#endif
44 mov d0,(MMUCTR) 84 mov d0,(MMUCTR)
45 85
46 # turn on AM33v2 exception handling mode and set the trap table base 86 # turn on AM33v2 exception handling mode and set the trap table base
@@ -51,6 +91,11 @@ _start:
51 mov d0,(TBR) 91 mov d0,(TBR)
52 92
53 # invalidate and enable both of the caches 93 # invalidate and enable both of the caches
94#ifdef CONFIG_SMP
95 mov ECHCTR,a0
96 clr d0
97 mov d0,(a0)
98#endif
54 mov CHCTR,a0 99 mov CHCTR,a0
55 clr d0 100 clr d0
56 movhu d0,(a0) # turn off first 101 movhu d0,(a0) # turn off first
@@ -206,6 +251,44 @@ __no_parameters:
206 call processor_init[],0 251 call processor_init[],0
207 call unit_init[],0 252 call unit_init[],0
208 253
254#ifdef CONFIG_SMP
255 # mark the primary CPU in cpu_boot_map
256 mov cpu_boot_map,a0
257 mov 0x1,d0
258 mov d0,(a0)
259
260 # signal each secondary CPU to begin booting
261 mov 0x1,d2 # CPU ID
262
263loop_request_boot_secondary:
264 mov d2,a0
265 # send SMP_BOOT_IPI to secondary CPU
266 asl CROSS_ICR_CPU_SHIFT,a0
267 add CROSS_GxICR(SMP_BOOT_IRQ,0),a0
268 movhu (a0),d0
269 or GxICR_REQUEST|GxICR_DETECT,d0
270 movhu d0,(a0)
271 movhu (a0),d0 # flush
272
273 # wait up to 100ms for AP's IPI to be received
274 clr d3
275wait_on_secondary_boot:
276 mov DELAY_TIME_BOOT_IPI,d0
277 call __delay[],0
278 inc d3
279 mov cpu_boot_map,a0
280 mov (a0),d0
281 lsr d2,d0
282 btst 0x1,d0
283 bne 1f
284 cmp TIME_OUT_COUNT_BOOT_IPI,d3
285 bne wait_on_secondary_boot
2861:
287 inc d2
288 cmp NR_CPUS,d2
289 bne loop_request_boot_secondary
290#endif /* CONFIG_SMP */
291
209#ifdef CONFIG_GDBSTUB 292#ifdef CONFIG_GDBSTUB
210 call gdbstub_init[],0 293 call gdbstub_init[],0
211 294
@@ -217,7 +300,118 @@ __gdbstub_pause:
217#endif 300#endif
218 301
219 jmp start_kernel 302 jmp start_kernel
220 .size _start, _start-. 303 .size _start,.-_start
304
305###############################################################################
306#
307# Secondary CPU boot point
308#
309###############################################################################
310#ifdef CONFIG_SMP
311startup_secondary:
312 # preload the PGD pointer register
313 mov swapper_pg_dir,d0
314 mov d0,(PTBR)
315 clr d0
316 movbu d0,(PIDR)
317
318 # turn on the TLBs
319 mov MMUCTR_IIV|MMUCTR_DIV,d0
320 mov d0,(MMUCTR)
321#ifdef CONFIG_AM34_2
322 mov MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE|MMUCTR_WTE,d0
323#else
324 mov MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE,d0
325#endif
326 mov d0,(MMUCTR)
327
328 # turn on AM33v2 exception handling mode and set the trap table base
329 movhu (CPUP),d0
330 or CPUP_EXM_AM33V2,d0
331 movhu d0,(CPUP)
332
333 # set the interrupt vector table
334 mov CONFIG_INTERRUPT_VECTOR_BASE,d0
335 mov d0,(TBR)
336
337 # invalidate and enable both of the caches
338 mov ECHCTR,a0
339 clr d0
340 mov d0,(a0)
341 mov CHCTR,a0
342 clr d0
343 movhu d0,(a0) # turn off first
344 mov CHCTR_ICINV|CHCTR_DCINV,d0
345 movhu d0,(a0)
346 setlb
347 mov (a0),d0
348 btst CHCTR_ICBUSY|CHCTR_DCBUSY,d0 # wait till not busy (use CPU loop buffer)
349 lne
350
351#ifdef CONFIG_MN10300_CACHE_ENABLED
352#ifdef CONFIG_MN10300_CACHE_WBACK
353#ifndef CONFIG_MN10300_CACHE_WBACK_NOWRALLOC
354 mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK,d0
355#else
356 mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK|CHCTR_DCALMD,d0
357#endif /* !NOWRALLOC */
358#else
359 mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRTHROUGH,d0
360#endif /* WBACK */
361 movhu d0,(a0) # enable
362#endif /* ENABLED */
363
364 # Clear the boot IPI interrupt for this CPU
365 movhu (GxICR(SMP_BOOT_IRQ)),d0
366 and ~GxICR_REQUEST,d0
367 movhu d0,(GxICR(SMP_BOOT_IRQ))
368 movhu (GxICR(SMP_BOOT_IRQ)),d0 # flush
369
370 /* get stack */
371 mov CONFIG_INTERRUPT_VECTOR_BASE + CONFIG_BOOT_STACK_OFFSET,a0
372 mov (CPUID),d0
373 and CPUID_MASK,d0
374 mulu CONFIG_BOOT_STACK_SIZE,d0
375 sub d0,a0
376 mov a0,sp
377
378 # init interrupt for AP
379 call smp_prepare_cpu_init[],0
380
381 # mark this secondary CPU in cpu_boot_map
382 mov (CPUID),d0
383 mov 0x1,d1
384 asl d0,d1
385 mov cpu_boot_map,a0
386 bset d1,(a0)
387
388 or EPSW_IE|EPSW_IM_1,epsw # permit level 0 interrupts
389 nop
390 nop
391#ifdef CONFIG_MN10300_CACHE_WBACK
392 # flush the local cache if it's in writeback mode
393 call mn10300_local_dcache_flush_inv[],0
394 setlb
395 mov (CHCTR),d0
396 btst CHCTR_DCBUSY,d0 # wait till not busy (use CPU loop buffer)
397 lne
398#endif
399
400 # now sleep waiting for further instructions
401secondary_sleep:
402 mov CPUM_SLEEP,d0
403 movhu d0,(CPUM)
404 nop
405 nop
406 bra secondary_sleep
407 .size startup_secondary,.-startup_secondary
408#endif /* CONFIG_SMP */
409
410###############################################################################
411#
412#
413#
414###############################################################################
221ENTRY(__head_end) 415ENTRY(__head_end)
222 416
223/* 417/*
diff --git a/arch/mn10300/kernel/internal.h b/arch/mn10300/kernel/internal.h
index eee2eee86267..3b1f48b7e7f4 100644
--- a/arch/mn10300/kernel/internal.h
+++ b/arch/mn10300/kernel/internal.h
@@ -18,3 +18,15 @@ extern int kernel_thread_helper(int);
18 * entry.S 18 * entry.S
19 */ 19 */
20extern void ret_from_fork(struct task_struct *) __attribute__((noreturn)); 20extern void ret_from_fork(struct task_struct *) __attribute__((noreturn));
21
22/*
23 * smp-low.S
24 */
25#ifdef CONFIG_SMP
26extern void mn10300_low_ipi_handler(void);
27#endif
28
29/*
30 * time.c
31 */
32extern irqreturn_t local_timer_interrupt(void);
diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c
index b5b970d2954a..80f15725ecad 100644
--- a/arch/mn10300/kernel/irq.c
+++ b/arch/mn10300/kernel/irq.c
@@ -12,11 +12,34 @@
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <linux/kernel_stat.h> 13#include <linux/kernel_stat.h>
14#include <linux/seq_file.h> 14#include <linux/seq_file.h>
15#include <linux/cpumask.h>
15#include <asm/setup.h> 16#include <asm/setup.h>
17#include <asm/serial-regs.h>
16 18
17unsigned long __mn10300_irq_enabled_epsw = EPSW_IE | EPSW_IM_7; 19#ifdef CONFIG_SMP
20#undef GxICR
21#define GxICR(X) CROSS_GxICR(X, irq_affinity_online[X])
22
23#undef GxICR_u8
24#define GxICR_u8(X) CROSS_GxICR_u8(X, irq_affinity_online[X])
25#endif /* CONFIG_SMP */
26
27unsigned long __mn10300_irq_enabled_epsw[NR_CPUS] __cacheline_aligned_in_smp = {
28 [0 ... NR_CPUS - 1] = EPSW_IE | EPSW_IM_7
29};
18EXPORT_SYMBOL(__mn10300_irq_enabled_epsw); 30EXPORT_SYMBOL(__mn10300_irq_enabled_epsw);
19 31
32#ifdef CONFIG_SMP
33static char irq_affinity_online[NR_IRQS] = {
34 [0 ... NR_IRQS - 1] = 0
35};
36
37#define NR_IRQ_WORDS ((NR_IRQS + 31) / 32)
38static unsigned long irq_affinity_request[NR_IRQ_WORDS] = {
39 [0 ... NR_IRQ_WORDS - 1] = 0
40};
41#endif /* CONFIG_SMP */
42
20atomic_t irq_err_count; 43atomic_t irq_err_count;
21 44
22/* 45/*
@@ -24,30 +47,65 @@ atomic_t irq_err_count;
24 */ 47 */
25static void mn10300_cpupic_ack(unsigned int irq) 48static void mn10300_cpupic_ack(unsigned int irq)
26{ 49{
50 unsigned long flags;
51 u16 tmp;
52
53 flags = arch_local_cli_save();
54 GxICR_u8(irq) = GxICR_DETECT;
55 tmp = GxICR(irq);
56 arch_local_irq_restore(flags);
57}
58
59static void __mask_and_set_icr(unsigned int irq,
60 unsigned int mask, unsigned int set)
61{
62 unsigned long flags;
27 u16 tmp; 63 u16 tmp;
28 *(volatile u8 *) &GxICR(irq) = GxICR_DETECT; 64
65 flags = arch_local_cli_save();
29 tmp = GxICR(irq); 66 tmp = GxICR(irq);
67 GxICR(irq) = (tmp & mask) | set;
68 tmp = GxICR(irq);
69 arch_local_irq_restore(flags);
30} 70}
31 71
32static void mn10300_cpupic_mask(unsigned int irq) 72static void mn10300_cpupic_mask(unsigned int irq)
33{ 73{
34 u16 tmp = GxICR(irq); 74 __mask_and_set_icr(irq, GxICR_LEVEL, 0);
35 GxICR(irq) = (tmp & GxICR_LEVEL);
36 tmp = GxICR(irq);
37} 75}
38 76
39static void mn10300_cpupic_mask_ack(unsigned int irq) 77static void mn10300_cpupic_mask_ack(unsigned int irq)
40{ 78{
41 u16 tmp = GxICR(irq); 79#ifdef CONFIG_SMP
42 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT; 80 unsigned long flags;
43 tmp = GxICR(irq); 81 u16 tmp;
82
83 flags = arch_local_cli_save();
84
85 if (!test_and_clear_bit(irq, irq_affinity_request)) {
86 tmp = GxICR(irq);
87 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
88 tmp = GxICR(irq);
89 } else {
90 u16 tmp2;
91 tmp = GxICR(irq);
92 GxICR(irq) = (tmp & GxICR_LEVEL);
93 tmp2 = GxICR(irq);
94
95 irq_affinity_online[irq] = any_online_cpu(*irq_desc[irq].affinity);
96 GxICR(irq) = (tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT;
97 tmp = GxICR(irq);
98 }
99
100 arch_local_irq_restore(flags);
101#else /* CONFIG_SMP */
102 __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_DETECT);
103#endif /* CONFIG_SMP */
44} 104}
45 105
46static void mn10300_cpupic_unmask(unsigned int irq) 106static void mn10300_cpupic_unmask(unsigned int irq)
47{ 107{
48 u16 tmp = GxICR(irq); 108 __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE);
49 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
50 tmp = GxICR(irq);
51} 109}
52 110
53static void mn10300_cpupic_unmask_clear(unsigned int irq) 111static void mn10300_cpupic_unmask_clear(unsigned int irq)
@@ -56,11 +114,89 @@ static void mn10300_cpupic_unmask_clear(unsigned int irq)
56 * device has ceased to assert its interrupt line and the interrupt 114 * device has ceased to assert its interrupt line and the interrupt
57 * channel has been disabled in the PIC, so for level-triggered 115 * channel has been disabled in the PIC, so for level-triggered
58 * interrupts we need to clear the request bit when we re-enable */ 116 * interrupts we need to clear the request bit when we re-enable */
59 u16 tmp = GxICR(irq); 117#ifdef CONFIG_SMP
60 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT; 118 unsigned long flags;
61 tmp = GxICR(irq); 119 u16 tmp;
120
121 flags = arch_local_cli_save();
122
123 if (!test_and_clear_bit(irq, irq_affinity_request)) {
124 tmp = GxICR(irq);
125 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
126 tmp = GxICR(irq);
127 } else {
128 tmp = GxICR(irq);
129
130 irq_affinity_online[irq] = any_online_cpu(*irq_desc[irq].affinity);
131 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
132 tmp = GxICR(irq);
133 }
134
135 arch_local_irq_restore(flags);
136#else /* CONFIG_SMP */
137 __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE | GxICR_DETECT);
138#endif /* CONFIG_SMP */
62} 139}
63 140
141#ifdef CONFIG_SMP
142static int
143mn10300_cpupic_setaffinity(unsigned int irq, const struct cpumask *mask)
144{
145 unsigned long flags;
146 int err;
147
148 flags = arch_local_cli_save();
149
150 /* check irq no */
151 switch (irq) {
152 case TMJCIRQ:
153 case RESCHEDULE_IPI:
154 case CALL_FUNC_SINGLE_IPI:
155 case LOCAL_TIMER_IPI:
156 case FLUSH_CACHE_IPI:
157 case CALL_FUNCTION_NMI_IPI:
158 case GDB_NMI_IPI:
159#ifdef CONFIG_MN10300_TTYSM0
160 case SC0RXIRQ:
161 case SC0TXIRQ:
162#ifdef CONFIG_MN10300_TTYSM0_TIMER8
163 case TM8IRQ:
164#elif CONFIG_MN10300_TTYSM0_TIMER2
165 case TM2IRQ:
166#endif /* CONFIG_MN10300_TTYSM0_TIMER8 */
167#endif /* CONFIG_MN10300_TTYSM0 */
168
169#ifdef CONFIG_MN10300_TTYSM1
170 case SC1RXIRQ:
171 case SC1TXIRQ:
172#ifdef CONFIG_MN10300_TTYSM1_TIMER12
173 case TM12IRQ:
174#elif CONFIG_MN10300_TTYSM1_TIMER9
175 case TM9IRQ:
176#elif CONFIG_MN10300_TTYSM1_TIMER3
177 case TM3IRQ:
178#endif /* CONFIG_MN10300_TTYSM1_TIMER12 */
179#endif /* CONFIG_MN10300_TTYSM1 */
180
181#ifdef CONFIG_MN10300_TTYSM2
182 case SC2RXIRQ:
183 case SC2TXIRQ:
184 case TM10IRQ:
185#endif /* CONFIG_MN10300_TTYSM2 */
186 err = -1;
187 break;
188
189 default:
190 set_bit(irq, irq_affinity_request);
191 err = 0;
192 break;
193 }
194
195 arch_local_irq_restore(flags);
196 return err;
197}
198#endif /* CONFIG_SMP */
199
64/* 200/*
65 * MN10300 PIC level-triggered IRQ handling. 201 * MN10300 PIC level-triggered IRQ handling.
66 * 202 *
@@ -79,6 +215,9 @@ static struct irq_chip mn10300_cpu_pic_level = {
79 .mask = mn10300_cpupic_mask, 215 .mask = mn10300_cpupic_mask,
80 .mask_ack = mn10300_cpupic_mask, 216 .mask_ack = mn10300_cpupic_mask,
81 .unmask = mn10300_cpupic_unmask_clear, 217 .unmask = mn10300_cpupic_unmask_clear,
218#ifdef CONFIG_SMP
219 .set_affinity = mn10300_cpupic_setaffinity,
220#endif /* CONFIG_SMP */
82}; 221};
83 222
84/* 223/*
@@ -94,6 +233,9 @@ static struct irq_chip mn10300_cpu_pic_edge = {
94 .mask = mn10300_cpupic_mask, 233 .mask = mn10300_cpupic_mask,
95 .mask_ack = mn10300_cpupic_mask_ack, 234 .mask_ack = mn10300_cpupic_mask_ack,
96 .unmask = mn10300_cpupic_unmask, 235 .unmask = mn10300_cpupic_unmask,
236#ifdef CONFIG_SMP
237 .set_affinity = mn10300_cpupic_setaffinity,
238#endif /* CONFIG_SMP */
97}; 239};
98 240
99/* 241/*
@@ -111,14 +253,34 @@ void ack_bad_irq(int irq)
111 */ 253 */
112void set_intr_level(int irq, u16 level) 254void set_intr_level(int irq, u16 level)
113{ 255{
114 u16 tmp; 256 BUG_ON(in_interrupt());
115 257
116 if (in_interrupt()) 258 __mask_and_set_icr(irq, GxICR_ENABLE, level);
117 BUG(); 259}
118 260
119 tmp = GxICR(irq); 261void mn10300_intc_set_level(unsigned int irq, unsigned int level)
120 GxICR(irq) = (tmp & GxICR_ENABLE) | level; 262{
121 tmp = GxICR(irq); 263 set_intr_level(irq, NUM2GxICR_LEVEL(level) & GxICR_LEVEL);
264}
265
266void mn10300_intc_clear(unsigned int irq)
267{
268 __mask_and_set_icr(irq, GxICR_LEVEL | GxICR_ENABLE, GxICR_DETECT);
269}
270
271void mn10300_intc_set(unsigned int irq)
272{
273 __mask_and_set_icr(irq, 0, GxICR_REQUEST | GxICR_DETECT);
274}
275
276void mn10300_intc_enable(unsigned int irq)
277{
278 mn10300_cpupic_unmask(irq);
279}
280
281void mn10300_intc_disable(unsigned int irq)
282{
283 mn10300_cpupic_mask(irq);
122} 284}
123 285
124/* 286/*
@@ -126,7 +288,7 @@ void set_intr_level(int irq, u16 level)
126 * than before 288 * than before
127 * - see Documentation/mn10300/features.txt 289 * - see Documentation/mn10300/features.txt
128 */ 290 */
129void set_intr_postackable(int irq) 291void mn10300_set_lateack_irq_type(int irq)
130{ 292{
131 set_irq_chip_and_handler(irq, &mn10300_cpu_pic_level, 293 set_irq_chip_and_handler(irq, &mn10300_cpu_pic_level,
132 handle_level_irq); 294 handle_level_irq);
@@ -147,6 +309,7 @@ void __init init_IRQ(void)
147 * interrupts */ 309 * interrupts */
148 set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge, 310 set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge,
149 handle_level_irq); 311 handle_level_irq);
312
150 unit_init_IRQ(); 313 unit_init_IRQ();
151} 314}
152 315
@@ -156,6 +319,7 @@ void __init init_IRQ(void)
156asmlinkage void do_IRQ(void) 319asmlinkage void do_IRQ(void)
157{ 320{
158 unsigned long sp, epsw, irq_disabled_epsw, old_irq_enabled_epsw; 321 unsigned long sp, epsw, irq_disabled_epsw, old_irq_enabled_epsw;
322 unsigned int cpu_id = smp_processor_id();
159 int irq; 323 int irq;
160 324
161 sp = current_stack_pointer(); 325 sp = current_stack_pointer();
@@ -163,12 +327,14 @@ asmlinkage void do_IRQ(void)
163 327
164 /* make sure local_irq_enable() doesn't muck up the interrupt priority 328 /* make sure local_irq_enable() doesn't muck up the interrupt priority
165 * setting in EPSW */ 329 * setting in EPSW */
166 old_irq_enabled_epsw = __mn10300_irq_enabled_epsw; 330 old_irq_enabled_epsw = __mn10300_irq_enabled_epsw[cpu_id];
167 local_save_flags(epsw); 331 local_save_flags(epsw);
168 __mn10300_irq_enabled_epsw = EPSW_IE | (EPSW_IM & epsw); 332 __mn10300_irq_enabled_epsw[cpu_id] = EPSW_IE | (EPSW_IM & epsw);
169 irq_disabled_epsw = EPSW_IE | MN10300_CLI_LEVEL; 333 irq_disabled_epsw = EPSW_IE | MN10300_CLI_LEVEL;
170 334
171 __IRQ_STAT(smp_processor_id(), __irq_count)++; 335#ifdef CONFIG_MN10300_WD_TIMER
336 __IRQ_STAT(cpu_id, __irq_count)++;
337#endif
172 338
173 irq_enter(); 339 irq_enter();
174 340
@@ -188,7 +354,7 @@ asmlinkage void do_IRQ(void)
188 local_irq_restore(epsw); 354 local_irq_restore(epsw);
189 } 355 }
190 356
191 __mn10300_irq_enabled_epsw = old_irq_enabled_epsw; 357 __mn10300_irq_enabled_epsw[cpu_id] = old_irq_enabled_epsw;
192 358
193 irq_exit(); 359 irq_exit();
194} 360}
@@ -239,11 +405,13 @@ int show_interrupts(struct seq_file *p, void *v)
239 405
240 /* polish off with NMI and error counters */ 406 /* polish off with NMI and error counters */
241 case NR_IRQS: 407 case NR_IRQS:
408#ifdef CONFIG_MN10300_WD_TIMER
242 seq_printf(p, "NMI: "); 409 seq_printf(p, "NMI: ");
243 for (j = 0; j < NR_CPUS; j++) 410 for (j = 0; j < NR_CPUS; j++)
244 if (cpu_online(j)) 411 if (cpu_online(j))
245 seq_printf(p, "%10u ", nmi_count(j)); 412 seq_printf(p, "%10u ", nmi_count(j));
246 seq_putc(p, '\n'); 413 seq_putc(p, '\n');
414#endif
247 415
248 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); 416 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
249 break; 417 break;
@@ -251,3 +419,51 @@ int show_interrupts(struct seq_file *p, void *v)
251 419
252 return 0; 420 return 0;
253} 421}
422
423#ifdef CONFIG_HOTPLUG_CPU
424void migrate_irqs(void)
425{
426 irq_desc_t *desc;
427 int irq;
428 unsigned int self, new;
429 unsigned long flags;
430
431 self = smp_processor_id();
432 for (irq = 0; irq < NR_IRQS; irq++) {
433 desc = irq_desc + irq;
434
435 if (desc->status == IRQ_PER_CPU)
436 continue;
437
438 if (cpu_isset(self, irq_desc[irq].affinity) &&
439 !cpus_intersects(irq_affinity[irq], cpu_online_map)) {
440 int cpu_id;
441 cpu_id = first_cpu(cpu_online_map);
442 cpu_set(cpu_id, irq_desc[irq].affinity);
443 }
444 /* We need to operate irq_affinity_online atomically. */
445 arch_local_cli_save(flags);
446 if (irq_affinity_online[irq] == self) {
447 u16 x, tmp;
448
449 x = CROSS_GxICR(irq, self);
450 CROSS_GxICR(irq, self) = x & GxICR_LEVEL;
451 tmp = CROSS_GxICR(irq, self);
452
453 new = any_online_cpu(irq_desc[irq].affinity);
454 irq_affinity_online[irq] = new;
455
456 CROSS_GxICR(irq, new) =
457 (x & GxICR_LEVEL) | GxICR_DETECT;
458 tmp = CROSS_GxICR(irq, new);
459
460 x &= GxICR_LEVEL | GxICR_ENABLE;
461 if (CROSS_GxICR(irq, self) & GxICR_REQUEST)
462 x |= GxICR_REQUEST | GxICR_DETECT;
463 CROSS_GxICR(irq, new) = x;
464 tmp = CROSS_GxICR(irq, new);
465 }
466 arch_local_irq_restore(flags);
467 }
468}
469#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/mn10300/kernel/mn10300-serial-low.S b/arch/mn10300/kernel/mn10300-serial-low.S
index 66702d256610..dfc1b6f2fa9a 100644
--- a/arch/mn10300/kernel/mn10300-serial-low.S
+++ b/arch/mn10300/kernel/mn10300-serial-low.S
@@ -39,7 +39,7 @@
39############################################################################### 39###############################################################################
40 .balign L1_CACHE_BYTES 40 .balign L1_CACHE_BYTES
41ENTRY(mn10300_serial_vdma_interrupt) 41ENTRY(mn10300_serial_vdma_interrupt)
42 or EPSW_IE,psw # permit overriding by 42# or EPSW_IE,psw # permit overriding by
43 # debugging interrupts 43 # debugging interrupts
44 movm [d2,d3,a2,a3,exreg0],(sp) 44 movm [d2,d3,a2,a3,exreg0],(sp)
45 45
@@ -164,7 +164,7 @@ mnsc_vdma_tx_noint:
164 rti 164 rti
165 165
166mnsc_vdma_tx_empty: 166mnsc_vdma_tx_empty:
167 mov +(GxICR_LEVEL_1|GxICR_DETECT),d2 167 mov +(NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL)|GxICR_DETECT),d2
168 movhu d2,(e3) # disable the interrupt 168 movhu d2,(e3) # disable the interrupt
169 movhu (e3),d2 # flush 169 movhu (e3),d2 # flush
170 170
@@ -175,7 +175,7 @@ mnsc_vdma_tx_break:
175 movhu (SCxCTR,e2),d2 # turn on break mode 175 movhu (SCxCTR,e2),d2 # turn on break mode
176 or SC01CTR_BKE,d2 176 or SC01CTR_BKE,d2
177 movhu d2,(SCxCTR,e2) 177 movhu d2,(SCxCTR,e2)
178 mov +(GxICR_LEVEL_1|GxICR_DETECT),d2 178 mov +(NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL)|GxICR_DETECT),d2
179 movhu d2,(e3) # disable transmit interrupts on this 179 movhu d2,(e3) # disable transmit interrupts on this
180 # channel 180 # channel
181 movhu (e3),d2 # flush 181 movhu (e3),d2 # flush
diff --git a/arch/mn10300/kernel/mn10300-serial.c b/arch/mn10300/kernel/mn10300-serial.c
index db509dd80565..996384dba45d 100644
--- a/arch/mn10300/kernel/mn10300-serial.c
+++ b/arch/mn10300/kernel/mn10300-serial.c
@@ -44,6 +44,11 @@ static const char serial_revdate[] = "2007-11-06";
44#include <unit/timex.h> 44#include <unit/timex.h>
45#include "mn10300-serial.h" 45#include "mn10300-serial.h"
46 46
47#ifdef CONFIG_SMP
48#undef GxICR
49#define GxICR(X) CROSS_GxICR(X, 0)
50#endif /* CONFIG_SMP */
51
47#define kenter(FMT, ...) \ 52#define kenter(FMT, ...) \
48 printk(KERN_DEBUG "-->%s(" FMT ")\n", __func__, ##__VA_ARGS__) 53 printk(KERN_DEBUG "-->%s(" FMT ")\n", __func__, ##__VA_ARGS__)
49#define _enter(FMT, ...) \ 54#define _enter(FMT, ...) \
@@ -57,6 +62,11 @@ static const char serial_revdate[] = "2007-11-06";
57#define _proto(FMT, ...) \ 62#define _proto(FMT, ...) \
58 no_printk(KERN_DEBUG "### MNSERIAL " FMT " ###\n", ##__VA_ARGS__) 63 no_printk(KERN_DEBUG "### MNSERIAL " FMT " ###\n", ##__VA_ARGS__)
59 64
65#ifndef CODMSB
66/* c_cflag bit meaning */
67#define CODMSB 004000000000 /* change Transfer bit-order */
68#endif
69
60#define NR_UARTS 3 70#define NR_UARTS 3
61 71
62#ifdef CONFIG_MN10300_TTYSM_CONSOLE 72#ifdef CONFIG_MN10300_TTYSM_CONSOLE
@@ -152,26 +162,35 @@ struct mn10300_serial_port mn10300_serial_port_sif0 = {
152 .name = "ttySM0", 162 .name = "ttySM0",
153 ._iobase = &SC0CTR, 163 ._iobase = &SC0CTR,
154 ._control = &SC0CTR, 164 ._control = &SC0CTR,
155 ._status = (volatile u8 *) &SC0STR, 165 ._status = (volatile u8 *)&SC0STR,
156 ._intr = &SC0ICR, 166 ._intr = &SC0ICR,
157 ._rxb = &SC0RXB, 167 ._rxb = &SC0RXB,
158 ._txb = &SC0TXB, 168 ._txb = &SC0TXB,
159 .rx_name = "ttySM0:Rx", 169 .rx_name = "ttySM0:Rx",
160 .tx_name = "ttySM0:Tx", 170 .tx_name = "ttySM0:Tx",
161#ifdef CONFIG_MN10300_TTYSM0_TIMER8 171#if defined(CONFIG_MN10300_TTYSM0_TIMER8)
162 .tm_name = "ttySM0:Timer8", 172 .tm_name = "ttySM0:Timer8",
163 ._tmxmd = &TM8MD, 173 ._tmxmd = &TM8MD,
164 ._tmxbr = &TM8BR, 174 ._tmxbr = &TM8BR,
165 ._tmicr = &TM8ICR, 175 ._tmicr = &TM8ICR,
166 .tm_irq = TM8IRQ, 176 .tm_irq = TM8IRQ,
167 .div_timer = MNSCx_DIV_TIMER_16BIT, 177 .div_timer = MNSCx_DIV_TIMER_16BIT,
168#else /* CONFIG_MN10300_TTYSM0_TIMER2 */ 178#elif defined(CONFIG_MN10300_TTYSM0_TIMER0)
179 .tm_name = "ttySM0:Timer0",
180 ._tmxmd = &TM0MD,
181 ._tmxbr = (volatile u16 *)&TM0BR,
182 ._tmicr = &TM0ICR,
183 .tm_irq = TM0IRQ,
184 .div_timer = MNSCx_DIV_TIMER_8BIT,
185#elif defined(CONFIG_MN10300_TTYSM0_TIMER2)
169 .tm_name = "ttySM0:Timer2", 186 .tm_name = "ttySM0:Timer2",
170 ._tmxmd = &TM2MD, 187 ._tmxmd = &TM2MD,
171 ._tmxbr = (volatile u16 *) &TM2BR, 188 ._tmxbr = (volatile u16 *)&TM2BR,
172 ._tmicr = &TM2ICR, 189 ._tmicr = &TM2ICR,
173 .tm_irq = TM2IRQ, 190 .tm_irq = TM2IRQ,
174 .div_timer = MNSCx_DIV_TIMER_8BIT, 191 .div_timer = MNSCx_DIV_TIMER_8BIT,
192#else
193#error "Unknown config for ttySM0"
175#endif 194#endif
176 .rx_irq = SC0RXIRQ, 195 .rx_irq = SC0RXIRQ,
177 .tx_irq = SC0TXIRQ, 196 .tx_irq = SC0TXIRQ,
@@ -205,26 +224,35 @@ struct mn10300_serial_port mn10300_serial_port_sif1 = {
205 .name = "ttySM1", 224 .name = "ttySM1",
206 ._iobase = &SC1CTR, 225 ._iobase = &SC1CTR,
207 ._control = &SC1CTR, 226 ._control = &SC1CTR,
208 ._status = (volatile u8 *) &SC1STR, 227 ._status = (volatile u8 *)&SC1STR,
209 ._intr = &SC1ICR, 228 ._intr = &SC1ICR,
210 ._rxb = &SC1RXB, 229 ._rxb = &SC1RXB,
211 ._txb = &SC1TXB, 230 ._txb = &SC1TXB,
212 .rx_name = "ttySM1:Rx", 231 .rx_name = "ttySM1:Rx",
213 .tx_name = "ttySM1:Tx", 232 .tx_name = "ttySM1:Tx",
214#ifdef CONFIG_MN10300_TTYSM1_TIMER9 233#if defined(CONFIG_MN10300_TTYSM1_TIMER9)
215 .tm_name = "ttySM1:Timer9", 234 .tm_name = "ttySM1:Timer9",
216 ._tmxmd = &TM9MD, 235 ._tmxmd = &TM9MD,
217 ._tmxbr = &TM9BR, 236 ._tmxbr = &TM9BR,
218 ._tmicr = &TM9ICR, 237 ._tmicr = &TM9ICR,
219 .tm_irq = TM9IRQ, 238 .tm_irq = TM9IRQ,
220 .div_timer = MNSCx_DIV_TIMER_16BIT, 239 .div_timer = MNSCx_DIV_TIMER_16BIT,
221#else /* CONFIG_MN10300_TTYSM1_TIMER3 */ 240#elif defined(CONFIG_MN10300_TTYSM1_TIMER3)
222 .tm_name = "ttySM1:Timer3", 241 .tm_name = "ttySM1:Timer3",
223 ._tmxmd = &TM3MD, 242 ._tmxmd = &TM3MD,
224 ._tmxbr = (volatile u16 *) &TM3BR, 243 ._tmxbr = (volatile u16 *)&TM3BR,
225 ._tmicr = &TM3ICR, 244 ._tmicr = &TM3ICR,
226 .tm_irq = TM3IRQ, 245 .tm_irq = TM3IRQ,
227 .div_timer = MNSCx_DIV_TIMER_8BIT, 246 .div_timer = MNSCx_DIV_TIMER_8BIT,
247#elif defined(CONFIG_MN10300_TTYSM1_TIMER12)
248 .tm_name = "ttySM1/Timer12",
249 ._tmxmd = &TM12MD,
250 ._tmxbr = &TM12BR,
251 ._tmicr = &TM12ICR,
252 .tm_irq = TM12IRQ,
253 .div_timer = MNSCx_DIV_TIMER_16BIT,
254#else
255#error "Unknown config for ttySM1"
228#endif 256#endif
229 .rx_irq = SC1RXIRQ, 257 .rx_irq = SC1RXIRQ,
230 .tx_irq = SC1TXIRQ, 258 .tx_irq = SC1TXIRQ,
@@ -260,20 +288,45 @@ struct mn10300_serial_port mn10300_serial_port_sif2 = {
260 .uart.lock = 288 .uart.lock =
261 __SPIN_LOCK_UNLOCKED(mn10300_serial_port_sif2.uart.lock), 289 __SPIN_LOCK_UNLOCKED(mn10300_serial_port_sif2.uart.lock),
262 .name = "ttySM2", 290 .name = "ttySM2",
263 .rx_name = "ttySM2:Rx",
264 .tx_name = "ttySM2:Tx",
265 .tm_name = "ttySM2:Timer10",
266 ._iobase = &SC2CTR, 291 ._iobase = &SC2CTR,
267 ._control = &SC2CTR, 292 ._control = &SC2CTR,
268 ._status = &SC2STR, 293 ._status = (volatile u8 *)&SC2STR,
269 ._intr = &SC2ICR, 294 ._intr = &SC2ICR,
270 ._rxb = &SC2RXB, 295 ._rxb = &SC2RXB,
271 ._txb = &SC2TXB, 296 ._txb = &SC2TXB,
297 .rx_name = "ttySM2:Rx",
298 .tx_name = "ttySM2:Tx",
299#if defined(CONFIG_MN10300_TTYSM2_TIMER10)
300 .tm_name = "ttySM2/Timer10",
272 ._tmxmd = &TM10MD, 301 ._tmxmd = &TM10MD,
273 ._tmxbr = &TM10BR, 302 ._tmxbr = &TM10BR,
274 ._tmicr = &TM10ICR, 303 ._tmicr = &TM10ICR,
275 .tm_irq = TM10IRQ, 304 .tm_irq = TM10IRQ,
276 .div_timer = MNSCx_DIV_TIMER_16BIT, 305 .div_timer = MNSCx_DIV_TIMER_16BIT,
306#elif defined(CONFIG_MN10300_TTYSM2_TIMER9)
307 .tm_name = "ttySM2/Timer9",
308 ._tmxmd = &TM9MD,
309 ._tmxbr = &TM9BR,
310 ._tmicr = &TM9ICR,
311 .tm_irq = TM9IRQ,
312 .div_timer = MNSCx_DIV_TIMER_16BIT,
313#elif defined(CONFIG_MN10300_TTYSM2_TIMER1)
314 .tm_name = "ttySM2/Timer1",
315 ._tmxmd = &TM1MD,
316 ._tmxbr = (volatile u16 *)&TM1BR,
317 ._tmicr = &TM1ICR,
318 .tm_irq = TM1IRQ,
319 .div_timer = MNSCx_DIV_TIMER_8BIT,
320#elif defined(CONFIG_MN10300_TTYSM2_TIMER3)
321 .tm_name = "ttySM2/Timer3",
322 ._tmxmd = &TM3MD,
323 ._tmxbr = (volatile u16 *)&TM3BR,
324 ._tmicr = &TM3ICR,
325 .tm_irq = TM3IRQ,
326 .div_timer = MNSCx_DIV_TIMER_8BIT,
327#else
328#error "Unknown config for ttySM2"
329#endif
277 .rx_irq = SC2RXIRQ, 330 .rx_irq = SC2RXIRQ,
278 .tx_irq = SC2TXIRQ, 331 .tx_irq = SC2TXIRQ,
279 .rx_icr = &GxICR(SC2RXIRQ), 332 .rx_icr = &GxICR(SC2RXIRQ),
@@ -322,9 +375,13 @@ struct mn10300_serial_port *mn10300_serial_ports[NR_UARTS + 1] = {
322 */ 375 */
323static void mn10300_serial_mask_ack(unsigned int irq) 376static void mn10300_serial_mask_ack(unsigned int irq)
324{ 377{
378 unsigned long flags;
325 u16 tmp; 379 u16 tmp;
380
381 flags = arch_local_cli_save();
326 GxICR(irq) = GxICR_LEVEL_6; 382 GxICR(irq) = GxICR_LEVEL_6;
327 tmp = GxICR(irq); /* flush write buffer */ 383 tmp = GxICR(irq); /* flush write buffer */
384 arch_local_irq_restore(flags);
328} 385}
329 386
330static void mn10300_serial_nop(unsigned int irq) 387static void mn10300_serial_nop(unsigned int irq)
@@ -348,23 +405,36 @@ struct mn10300_serial_int mn10300_serial_int_tbl[NR_IRQS];
348 405
349static void mn10300_serial_dis_tx_intr(struct mn10300_serial_port *port) 406static void mn10300_serial_dis_tx_intr(struct mn10300_serial_port *port)
350{ 407{
408 unsigned long flags;
351 u16 x; 409 u16 x;
352 *port->tx_icr = GxICR_LEVEL_1 | GxICR_DETECT; 410
411 flags = arch_local_cli_save();
412 *port->tx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
353 x = *port->tx_icr; 413 x = *port->tx_icr;
414 arch_local_irq_restore(flags);
354} 415}
355 416
356static void mn10300_serial_en_tx_intr(struct mn10300_serial_port *port) 417static void mn10300_serial_en_tx_intr(struct mn10300_serial_port *port)
357{ 418{
419 unsigned long flags;
358 u16 x; 420 u16 x;
359 *port->tx_icr = GxICR_LEVEL_1 | GxICR_ENABLE; 421
422 flags = arch_local_cli_save();
423 *port->tx_icr =
424 NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL) | GxICR_ENABLE;
360 x = *port->tx_icr; 425 x = *port->tx_icr;
426 arch_local_irq_restore(flags);
361} 427}
362 428
363static void mn10300_serial_dis_rx_intr(struct mn10300_serial_port *port) 429static void mn10300_serial_dis_rx_intr(struct mn10300_serial_port *port)
364{ 430{
431 unsigned long flags;
365 u16 x; 432 u16 x;
366 *port->rx_icr = GxICR_LEVEL_1 | GxICR_DETECT; 433
434 flags = arch_local_cli_save();
435 *port->rx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
367 x = *port->rx_icr; 436 x = *port->rx_icr;
437 arch_local_irq_restore(flags);
368} 438}
369 439
370/* 440/*
@@ -650,7 +720,7 @@ static unsigned int mn10300_serial_tx_empty(struct uart_port *_port)
650static void mn10300_serial_set_mctrl(struct uart_port *_port, 720static void mn10300_serial_set_mctrl(struct uart_port *_port,
651 unsigned int mctrl) 721 unsigned int mctrl)
652{ 722{
653 struct mn10300_serial_port *port = 723 struct mn10300_serial_port *port __attribute__ ((unused)) =
654 container_of(_port, struct mn10300_serial_port, uart); 724 container_of(_port, struct mn10300_serial_port, uart);
655 725
656 _enter("%s,%x", port->name, mctrl); 726 _enter("%s,%x", port->name, mctrl);
@@ -706,6 +776,7 @@ static void mn10300_serial_start_tx(struct uart_port *_port)
706 UART_XMIT_SIZE)); 776 UART_XMIT_SIZE));
707 777
708 /* kick the virtual DMA controller */ 778 /* kick the virtual DMA controller */
779 arch_local_cli();
709 x = *port->tx_icr; 780 x = *port->tx_icr;
710 x |= GxICR_ENABLE; 781 x |= GxICR_ENABLE;
711 782
@@ -716,10 +787,14 @@ static void mn10300_serial_start_tx(struct uart_port *_port)
716 787
717 _debug("CTR=%04hx ICR=%02hx STR=%04x TMD=%02hx TBR=%04hx ICR=%04hx", 788 _debug("CTR=%04hx ICR=%02hx STR=%04x TMD=%02hx TBR=%04hx ICR=%04hx",
718 *port->_control, *port->_intr, *port->_status, 789 *port->_control, *port->_intr, *port->_status,
719 *port->_tmxmd, *port->_tmxbr, *port->tx_icr); 790 *port->_tmxmd,
791 (port->div_timer == MNSCx_DIV_TIMER_8BIT) ?
792 *(volatile u8 *)port->_tmxbr : *port->_tmxbr,
793 *port->tx_icr);
720 794
721 *port->tx_icr = x; 795 *port->tx_icr = x;
722 x = *port->tx_icr; 796 x = *port->tx_icr;
797 arch_local_sti();
723} 798}
724 799
725/* 800/*
@@ -842,8 +917,10 @@ static int mn10300_serial_startup(struct uart_port *_port)
842 pint->port = port; 917 pint->port = port;
843 pint->vdma = mn10300_serial_vdma_tx_handler; 918 pint->vdma = mn10300_serial_vdma_tx_handler;
844 919
845 set_intr_level(port->rx_irq, GxICR_LEVEL_1); 920 set_intr_level(port->rx_irq,
846 set_intr_level(port->tx_irq, GxICR_LEVEL_1); 921 NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL));
922 set_intr_level(port->tx_irq,
923 NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL));
847 set_irq_chip(port->tm_irq, &mn10300_serial_pic); 924 set_irq_chip(port->tm_irq, &mn10300_serial_pic);
848 925
849 if (request_irq(port->rx_irq, mn10300_serial_interrupt, 926 if (request_irq(port->rx_irq, mn10300_serial_interrupt,
@@ -876,6 +953,7 @@ error:
876 */ 953 */
877static void mn10300_serial_shutdown(struct uart_port *_port) 954static void mn10300_serial_shutdown(struct uart_port *_port)
878{ 955{
956 u16 x;
879 struct mn10300_serial_port *port = 957 struct mn10300_serial_port *port =
880 container_of(_port, struct mn10300_serial_port, uart); 958 container_of(_port, struct mn10300_serial_port, uart);
881 959
@@ -897,8 +975,12 @@ static void mn10300_serial_shutdown(struct uart_port *_port)
897 free_irq(port->rx_irq, port); 975 free_irq(port->rx_irq, port);
898 free_irq(port->tx_irq, port); 976 free_irq(port->tx_irq, port);
899 977
900 *port->rx_icr = GxICR_LEVEL_1; 978 arch_local_cli();
901 *port->tx_icr = GxICR_LEVEL_1; 979 *port->rx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
980 x = *port->rx_icr;
981 *port->tx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
982 x = *port->tx_icr;
983 arch_local_sti();
902} 984}
903 985
904/* 986/*
@@ -947,11 +1029,66 @@ static void mn10300_serial_change_speed(struct mn10300_serial_port *port,
947 /* Determine divisor based on baud rate */ 1029 /* Determine divisor based on baud rate */
948 battempt = 0; 1030 battempt = 0;
949 1031
950 if (div_timer == MNSCx_DIV_TIMER_16BIT) 1032 switch (port->uart.line) {
951 scxctr |= SC0CTR_CK_TM8UFLOW_8; /* ( == SC1CTR_CK_TM9UFLOW_8 1033#ifdef CONFIG_MN10300_TTYSM0
952 * == SC2CTR_CK_TM10UFLOW) */ 1034 case 0: /* ttySM0 */
953 else if (div_timer == MNSCx_DIV_TIMER_8BIT) 1035#if defined(CONFIG_MN10300_TTYSM0_TIMER8)
1036 scxctr |= SC0CTR_CK_TM8UFLOW_8;
1037#elif defined(CONFIG_MN10300_TTYSM0_TIMER0)
1038 scxctr |= SC0CTR_CK_TM0UFLOW_8;
1039#elif defined(CONFIG_MN10300_TTYSM0_TIMER2)
954 scxctr |= SC0CTR_CK_TM2UFLOW_8; 1040 scxctr |= SC0CTR_CK_TM2UFLOW_8;
1041#else
1042#error "Unknown config for ttySM0"
1043#endif
1044 break;
1045#endif /* CONFIG_MN10300_TTYSM0 */
1046
1047#ifdef CONFIG_MN10300_TTYSM1
1048 case 1: /* ttySM1 */
1049#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
1050#if defined(CONFIG_MN10300_TTYSM1_TIMER9)
1051 scxctr |= SC1CTR_CK_TM9UFLOW_8;
1052#elif defined(CONFIG_MN10300_TTYSM1_TIMER3)
1053 scxctr |= SC1CTR_CK_TM3UFLOW_8;
1054#else
1055#error "Unknown config for ttySM1"
1056#endif
1057#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
1058#if defined(CONFIG_MN10300_TTYSM1_TIMER12)
1059 scxctr |= SC1CTR_CK_TM12UFLOW_8;
1060#else
1061#error "Unknown config for ttySM1"
1062#endif
1063#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
1064 break;
1065#endif /* CONFIG_MN10300_TTYSM1 */
1066
1067#ifdef CONFIG_MN10300_TTYSM2
1068 case 2: /* ttySM2 */
1069#if defined(CONFIG_AM33_2)
1070#if defined(CONFIG_MN10300_TTYSM2_TIMER10)
1071 scxctr |= SC2CTR_CK_TM10UFLOW;
1072#else
1073#error "Unknown config for ttySM2"
1074#endif
1075#else /* CONFIG_AM33_2 */
1076#if defined(CONFIG_MN10300_TTYSM2_TIMER9)
1077 scxctr |= SC2CTR_CK_TM9UFLOW_8;
1078#elif defined(CONFIG_MN10300_TTYSM2_TIMER1)
1079 scxctr |= SC2CTR_CK_TM1UFLOW_8;
1080#elif defined(CONFIG_MN10300_TTYSM2_TIMER3)
1081 scxctr |= SC2CTR_CK_TM3UFLOW_8;
1082#else
1083#error "Unknown config for ttySM2"
1084#endif
1085#endif /* CONFIG_AM33_2 */
1086 break;
1087#endif /* CONFIG_MN10300_TTYSM2 */
1088
1089 default:
1090 break;
1091 }
955 1092
956try_alternative: 1093try_alternative:
957 baud = uart_get_baud_rate(&port->uart, new, old, 0, 1094 baud = uart_get_baud_rate(&port->uart, new, old, 0,
@@ -1195,6 +1332,12 @@ static void mn10300_serial_set_termios(struct uart_port *_port,
1195 ctr &= ~SC2CTR_TWE; 1332 ctr &= ~SC2CTR_TWE;
1196 *port->_control = ctr; 1333 *port->_control = ctr;
1197 } 1334 }
1335
1336 /* change Transfer bit-order (LSB/MSB) */
1337 if (new->c_cflag & CODMSB)
1338 *port->_control |= SC01CTR_OD_MSBFIRST; /* MSB MODE */
1339 else
1340 *port->_control &= ~SC01CTR_OD_MSBFIRST; /* LSB MODE */
1198} 1341}
1199 1342
1200/* 1343/*
@@ -1302,11 +1445,16 @@ static int __init mn10300_serial_init(void)
1302 printk(KERN_INFO "%s version %s (%s)\n", 1445 printk(KERN_INFO "%s version %s (%s)\n",
1303 serial_name, serial_version, serial_revdate); 1446 serial_name, serial_version, serial_revdate);
1304 1447
1305#ifdef CONFIG_MN10300_TTYSM2 1448#if defined(CONFIG_MN10300_TTYSM2) && defined(CONFIG_AM33_2)
1306 SC2TIM = 8; /* make the baud base of timer 2 IOCLK/8 */ 1449 {
1450 int tmp;
1451 SC2TIM = 8; /* make the baud base of timer 2 IOCLK/8 */
1452 tmp = SC2TIM;
1453 }
1307#endif 1454#endif
1308 1455
1309 set_intr_stub(EXCEP_IRQ_LEVEL1, mn10300_serial_vdma_interrupt); 1456 set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL),
1457 mn10300_serial_vdma_interrupt);
1310 1458
1311 ret = uart_register_driver(&mn10300_serial_driver); 1459 ret = uart_register_driver(&mn10300_serial_driver);
1312 if (!ret) { 1460 if (!ret) {
@@ -1366,9 +1514,11 @@ static void mn10300_serial_console_write(struct console *co,
1366 port = mn10300_serial_ports[co->index]; 1514 port = mn10300_serial_ports[co->index];
1367 1515
1368 /* firstly hijack the serial port from the "virtual DMA" controller */ 1516 /* firstly hijack the serial port from the "virtual DMA" controller */
1517 arch_local_cli();
1369 txicr = *port->tx_icr; 1518 txicr = *port->tx_icr;
1370 *port->tx_icr = GxICR_LEVEL_1; 1519 *port->tx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
1371 tmp = *port->tx_icr; 1520 tmp = *port->tx_icr;
1521 arch_local_sti();
1372 1522
1373 /* the transmitter may be disabled */ 1523 /* the transmitter may be disabled */
1374 scxctr = *port->_control; 1524 scxctr = *port->_control;
@@ -1422,8 +1572,10 @@ static void mn10300_serial_console_write(struct console *co,
1422 if (!(scxctr & SC01CTR_TXE)) 1572 if (!(scxctr & SC01CTR_TXE))
1423 *port->_control = scxctr; 1573 *port->_control = scxctr;
1424 1574
1575 arch_local_cli();
1425 *port->tx_icr = txicr; 1576 *port->tx_icr = txicr;
1426 tmp = *port->tx_icr; 1577 tmp = *port->tx_icr;
1578 arch_local_sti();
1427} 1579}
1428 1580
1429/* 1581/*
diff --git a/arch/mn10300/kernel/mn10300-watchdog-low.S b/arch/mn10300/kernel/mn10300-watchdog-low.S
index 996244745cca..f2f5c9cfaabd 100644
--- a/arch/mn10300/kernel/mn10300-watchdog-low.S
+++ b/arch/mn10300/kernel/mn10300-watchdog-low.S
@@ -16,6 +16,7 @@
16#include <asm/intctl-regs.h> 16#include <asm/intctl-regs.h>
17#include <asm/timer-regs.h> 17#include <asm/timer-regs.h>
18#include <asm/frame.inc> 18#include <asm/frame.inc>
19#include <linux/threads.h>
19 20
20 .text 21 .text
21 22
@@ -53,7 +54,13 @@ watchdog_handler:
53 .type touch_nmi_watchdog,@function 54 .type touch_nmi_watchdog,@function
54touch_nmi_watchdog: 55touch_nmi_watchdog:
55 clr d0 56 clr d0
56 mov d0,(watchdog_alert_counter) 57 clr d1
58 mov watchdog_alert_counter, a0
59 setlb
60 mov d0, (a0+)
61 inc d1
62 cmp NR_CPUS, d1
63 lne
57 ret [],0 64 ret [],0
58 65
59 .size touch_nmi_watchdog,.-touch_nmi_watchdog 66 .size touch_nmi_watchdog,.-touch_nmi_watchdog
diff --git a/arch/mn10300/kernel/mn10300-watchdog.c b/arch/mn10300/kernel/mn10300-watchdog.c
index f362d9d138f1..965dd61656c3 100644
--- a/arch/mn10300/kernel/mn10300-watchdog.c
+++ b/arch/mn10300/kernel/mn10300-watchdog.c
@@ -30,7 +30,7 @@
30static DEFINE_SPINLOCK(watchdog_print_lock); 30static DEFINE_SPINLOCK(watchdog_print_lock);
31static unsigned int watchdog; 31static unsigned int watchdog;
32static unsigned int watchdog_hz = 1; 32static unsigned int watchdog_hz = 1;
33unsigned int watchdog_alert_counter; 33unsigned int watchdog_alert_counter[NR_CPUS];
34 34
35EXPORT_SYMBOL(touch_nmi_watchdog); 35EXPORT_SYMBOL(touch_nmi_watchdog);
36 36
@@ -39,9 +39,6 @@ EXPORT_SYMBOL(touch_nmi_watchdog);
39 * is to check its timer makes IRQ counts. If they are not 39 * is to check its timer makes IRQ counts. If they are not
40 * changing then that CPU has some problem. 40 * changing then that CPU has some problem.
41 * 41 *
42 * as these watchdog NMI IRQs are generated on every CPU, we only
43 * have to check the current processor.
44 *
45 * since NMIs dont listen to _any_ locks, we have to be extremely 42 * since NMIs dont listen to _any_ locks, we have to be extremely
46 * careful not to rely on unsafe variables. The printk might lock 43 * careful not to rely on unsafe variables. The printk might lock
47 * up though, so we have to break up any console locks first ... 44 * up though, so we have to break up any console locks first ...
@@ -69,8 +66,8 @@ int __init check_watchdog(void)
69 66
70 printk(KERN_INFO "OK.\n"); 67 printk(KERN_INFO "OK.\n");
71 68
72 /* now that we know it works we can reduce NMI frequency to 69 /* now that we know it works we can reduce NMI frequency to something
73 * something more reasonable; makes a difference in some configs 70 * more reasonable; makes a difference in some configs
74 */ 71 */
75 watchdog_hz = 1; 72 watchdog_hz = 1;
76 73
@@ -121,15 +118,22 @@ void __init watchdog_go(void)
121 } 118 }
122} 119}
123 120
121#ifdef CONFIG_SMP
122static void watchdog_dump_register(void *dummy)
123{
124 printk(KERN_ERR "--- Register Dump (CPU%d) ---\n", CPUID);
125 show_registers(__frame);
126}
127#endif
128
124asmlinkage 129asmlinkage
125void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep) 130void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
126{ 131{
127
128 /* 132 /*
129 * Since current-> is always on the stack, and we always switch 133 * Since current-> is always on the stack, and we always switch
130 * the stack NMI-atomically, it's safe to use smp_processor_id(). 134 * the stack NMI-atomically, it's safe to use smp_processor_id().
131 */ 135 */
132 int sum, cpu = smp_processor_id(); 136 int sum, cpu;
133 int irq = NMIIRQ; 137 int irq = NMIIRQ;
134 u8 wdt, tmp; 138 u8 wdt, tmp;
135 139
@@ -138,43 +142,61 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
138 tmp = WDCTR; 142 tmp = WDCTR;
139 NMICR = NMICR_WDIF; 143 NMICR = NMICR_WDIF;
140 144
141 nmi_count(cpu)++; 145 nmi_count(smp_processor_id())++;
142 kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); 146 kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
143 sum = irq_stat[cpu].__irq_count; 147
144 148 for_each_online_cpu(cpu) {
145 if (last_irq_sums[cpu] == sum) { 149
146 /* 150 sum = irq_stat[cpu].__irq_count;
147 * Ayiee, looks like this CPU is stuck ... 151
148 * wait a few IRQs (5 seconds) before doing the oops ... 152 if ((last_irq_sums[cpu] == sum)
149 */ 153#if defined(CONFIG_GDBSTUB) && defined(CONFIG_SMP)
150 watchdog_alert_counter++; 154 && !(CHK_GDBSTUB_BUSY()
151 if (watchdog_alert_counter == 5 * watchdog_hz) { 155 || atomic_read(&cpu_doing_single_step))
152 spin_lock(&watchdog_print_lock); 156#endif
157 ) {
153 /* 158 /*
154 * We are in trouble anyway, lets at least try 159 * Ayiee, looks like this CPU is stuck ...
155 * to get a message out. 160 * wait a few IRQs (5 seconds) before doing the oops ...
156 */ 161 */
157 bust_spinlocks(1); 162 watchdog_alert_counter[cpu]++;
158 printk(KERN_ERR 163 if (watchdog_alert_counter[cpu] == 5 * watchdog_hz) {
159 "NMI Watchdog detected LOCKUP on CPU%d," 164 spin_lock(&watchdog_print_lock);
160 " pc %08lx, registers:\n", 165 /*
161 cpu, regs->pc); 166 * We are in trouble anyway, lets at least try
162 show_registers(regs); 167 * to get a message out.
163 printk("console shuts up ...\n"); 168 */
164 console_silent(); 169 bust_spinlocks(1);
165 spin_unlock(&watchdog_print_lock); 170 printk(KERN_ERR
166 bust_spinlocks(0); 171 "NMI Watchdog detected LOCKUP on CPU%d,"
172 " pc %08lx, registers:\n",
173 cpu, regs->pc);
174#ifdef CONFIG_SMP
175 printk(KERN_ERR
176 "--- Register Dump (CPU%d) ---\n",
177 CPUID);
178#endif
179 show_registers(regs);
180#ifdef CONFIG_SMP
181 smp_nmi_call_function(watchdog_dump_register,
182 NULL, 1);
183#endif
184 printk(KERN_NOTICE "console shuts up ...\n");
185 console_silent();
186 spin_unlock(&watchdog_print_lock);
187 bust_spinlocks(0);
167#ifdef CONFIG_GDBSTUB 188#ifdef CONFIG_GDBSTUB
168 if (gdbstub_busy) 189 if (CHK_GDBSTUB_BUSY_AND_ACTIVE())
169 gdbstub_exception(regs, excep); 190 gdbstub_exception(regs, excep);
170 else 191 else
171 gdbstub_intercept(regs, excep); 192 gdbstub_intercept(regs, excep);
172#endif 193#endif
173 do_exit(SIGSEGV); 194 do_exit(SIGSEGV);
195 }
196 } else {
197 last_irq_sums[cpu] = sum;
198 watchdog_alert_counter[cpu] = 0;
174 } 199 }
175 } else {
176 last_irq_sums[cpu] = sum;
177 watchdog_alert_counter = 0;
178 } 200 }
179 201
180 WDCTR = wdt | WDCTR_WDRST; 202 WDCTR = wdt | WDCTR_WDRST;
diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c
index 243e33cd874b..b2e85ed73a54 100644
--- a/arch/mn10300/kernel/process.c
+++ b/arch/mn10300/kernel/process.c
@@ -57,6 +57,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
57void (*pm_power_off)(void); 57void (*pm_power_off)(void);
58EXPORT_SYMBOL(pm_power_off); 58EXPORT_SYMBOL(pm_power_off);
59 59
60#if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
60/* 61/*
61 * we use this if we don't have any better idle routine 62 * we use this if we don't have any better idle routine
62 */ 63 */
@@ -69,6 +70,35 @@ static void default_idle(void)
69 local_irq_enable(); 70 local_irq_enable();
70} 71}
71 72
73#else /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */
74/*
75 * On SMP it's slightly faster (but much more power-consuming!)
76 * to poll the ->work.need_resched flag instead of waiting for the
77 * cross-CPU IPI to arrive. Use this option with caution.
78 */
79static inline void poll_idle(void)
80{
81 int oldval;
82
83 local_irq_enable();
84
85 /*
86 * Deal with another CPU just having chosen a thread to
87 * run here:
88 */
89 oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
90
91 if (!oldval) {
92 set_thread_flag(TIF_POLLING_NRFLAG);
93 while (!need_resched())
94 cpu_relax();
95 clear_thread_flag(TIF_POLLING_NRFLAG);
96 } else {
97 set_need_resched();
98 }
99}
100#endif /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */
101
72/* 102/*
73 * the idle thread 103 * the idle thread
74 * - there's no useful work to be done, so just try to conserve power and have 104 * - there's no useful work to be done, so just try to conserve power and have
@@ -77,8 +107,6 @@ static void default_idle(void)
77 */ 107 */
78void cpu_idle(void) 108void cpu_idle(void)
79{ 109{
80 int cpu = smp_processor_id();
81
82 /* endless idle loop with no priority at all */ 110 /* endless idle loop with no priority at all */
83 for (;;) { 111 for (;;) {
84 while (!need_resched()) { 112 while (!need_resched()) {
@@ -86,8 +114,13 @@ void cpu_idle(void)
86 114
87 smp_rmb(); 115 smp_rmb();
88 idle = pm_idle; 116 idle = pm_idle;
89 if (!idle) 117 if (!idle) {
118#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
119 idle = poll_idle;
120#else /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
90 idle = default_idle; 121 idle = default_idle;
122#endif /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
123 }
91 idle(); 124 idle();
92 } 125 }
93 126
@@ -233,7 +266,7 @@ int copy_thread(unsigned long clone_flags,
233 } 266 }
234 267
235 /* set up things up so the scheduler can start the new task */ 268 /* set up things up so the scheduler can start the new task */
236 p->thread.__frame = c_kregs; 269 p->thread.frame = c_kregs;
237 p->thread.a3 = (unsigned long) c_kregs; 270 p->thread.a3 = (unsigned long) c_kregs;
238 p->thread.sp = c_ksp; 271 p->thread.sp = c_ksp;
239 p->thread.pc = (unsigned long) ret_from_fork; 272 p->thread.pc = (unsigned long) ret_from_fork;
diff --git a/arch/mn10300/kernel/profile.c b/arch/mn10300/kernel/profile.c
index 20d7d0306b16..4f342f75d00c 100644
--- a/arch/mn10300/kernel/profile.c
+++ b/arch/mn10300/kernel/profile.c
@@ -41,7 +41,7 @@ static __init int profile_init(void)
41 tmp = TM11ICR; 41 tmp = TM11ICR;
42 42
43 printk(KERN_INFO "Profiling initiated on timer 11, priority 0, %uHz\n", 43 printk(KERN_INFO "Profiling initiated on timer 11, priority 0, %uHz\n",
44 mn10300_ioclk / 8 / (TM11BR + 1)); 44 MN10300_IOCLK / 8 / (TM11BR + 1));
45 printk(KERN_INFO "Profile histogram stored %p-%p\n", 45 printk(KERN_INFO "Profile histogram stored %p-%p\n",
46 prof_buffer, (u8 *)(prof_buffer + prof_len) - 1); 46 prof_buffer, (u8 *)(prof_buffer + prof_len) - 1);
47 47
diff --git a/arch/mn10300/kernel/rtc.c b/arch/mn10300/kernel/rtc.c
index 4eef0e7224f6..e9e20f9a4dd3 100644
--- a/arch/mn10300/kernel/rtc.c
+++ b/arch/mn10300/kernel/rtc.c
@@ -20,18 +20,22 @@
20DEFINE_SPINLOCK(rtc_lock); 20DEFINE_SPINLOCK(rtc_lock);
21EXPORT_SYMBOL(rtc_lock); 21EXPORT_SYMBOL(rtc_lock);
22 22
23/* time for RTC to update itself in ioclks */ 23/*
24static unsigned long mn10300_rtc_update_period; 24 * Read the current RTC time
25 25 */
26void read_persistent_clock(struct timespec *ts) 26void read_persistent_clock(struct timespec *ts)
27{ 27{
28 struct rtc_time tm; 28 struct rtc_time tm;
29 29
30 get_rtc_time(&tm); 30 get_rtc_time(&tm);
31 31
32 ts->tv_sec = mktime(tm.tm_year, tm.tm_mon, tm.tm_mday,
33 tm.tm_hour, tm.tm_min, tm.tm_sec);
34 ts->tv_nsec = 0; 32 ts->tv_nsec = 0;
33 ts->tv_sec = mktime(tm.tm_year, tm.tm_mon, tm.tm_mday,
34 tm.tm_hour, tm.tm_min, tm.tm_sec);
35
36 /* if rtc is way off in the past, set something reasonable */
37 if (ts->tv_sec < 0)
38 ts->tv_sec = mktime(2009, 1, 1, 12, 0, 0);
35} 39}
36 40
37/* 41/*
@@ -115,39 +119,14 @@ int update_persistent_clock(struct timespec now)
115 */ 119 */
116void __init calibrate_clock(void) 120void __init calibrate_clock(void)
117{ 121{
118 unsigned long count0, counth, count1;
119 unsigned char status; 122 unsigned char status;
120 123
121 /* make sure the RTC is running and is set to operate in 24hr mode */ 124 /* make sure the RTC is running and is set to operate in 24hr mode */
122 status = RTSRC; 125 status = RTSRC;
123 RTCRB |= RTCRB_SET; 126 RTCRB |= RTCRB_SET;
124 RTCRB |= RTCRB_TM_24HR; 127 RTCRB |= RTCRB_TM_24HR;
128 RTCRB &= ~RTCRB_DM_BINARY;
125 RTCRA |= RTCRA_DVR; 129 RTCRA |= RTCRA_DVR;
126 RTCRA &= ~RTCRA_DVR; 130 RTCRA &= ~RTCRA_DVR;
127 RTCRB &= ~RTCRB_SET; 131 RTCRB &= ~RTCRB_SET;
128
129 /* work out the clock speed by counting clock cycles between ends of
130 * the RTC update cycle - track the RTC through one complete update
131 * cycle (1 second)
132 */
133 startup_timestamp_counter();
134
135 while (!(RTCRA & RTCRA_UIP)) {}
136 while ((RTCRA & RTCRA_UIP)) {}
137
138 count0 = TMTSCBC;
139
140 while (!(RTCRA & RTCRA_UIP)) {}
141
142 counth = TMTSCBC;
143
144 while ((RTCRA & RTCRA_UIP)) {}
145
146 count1 = TMTSCBC;
147
148 shutdown_timestamp_counter();
149
150 MN10300_TSCCLK = count0 - count1; /* the timers count down */
151 mn10300_rtc_update_period = counth - count1;
152 MN10300_TSC_PER_HZ = MN10300_TSCCLK / HZ;
153} 132}
diff --git a/arch/mn10300/kernel/setup.c b/arch/mn10300/kernel/setup.c
index d464affcba0e..12514570ed5d 100644
--- a/arch/mn10300/kernel/setup.c
+++ b/arch/mn10300/kernel/setup.c
@@ -22,6 +22,7 @@
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/bootmem.h> 23#include <linux/bootmem.h>
24#include <linux/seq_file.h> 24#include <linux/seq_file.h>
25#include <linux/cpu.h>
25#include <asm/processor.h> 26#include <asm/processor.h>
26#include <linux/console.h> 27#include <linux/console.h>
27#include <asm/uaccess.h> 28#include <asm/uaccess.h>
@@ -30,7 +31,6 @@
30#include <asm/io.h> 31#include <asm/io.h>
31#include <asm/smp.h> 32#include <asm/smp.h>
32#include <proc/proc.h> 33#include <proc/proc.h>
33#include <asm/busctl-regs.h>
34#include <asm/fpu.h> 34#include <asm/fpu.h>
35#include <asm/sections.h> 35#include <asm/sections.h>
36 36
@@ -64,11 +64,13 @@ unsigned long memory_size;
64struct thread_info *__current_ti = &init_thread_union.thread_info; 64struct thread_info *__current_ti = &init_thread_union.thread_info;
65struct task_struct *__current = &init_task; 65struct task_struct *__current = &init_task;
66 66
67#define mn10300_known_cpus 3 67#define mn10300_known_cpus 5
68static const char *const mn10300_cputypes[] = { 68static const char *const mn10300_cputypes[] = {
69 "am33v1", 69 "am33-1",
70 "am33v2", 70 "am33-2",
71 "am34v1", 71 "am34-1",
72 "am33-3",
73 "am34-2",
72 "unknown" 74 "unknown"
73}; 75};
74 76
@@ -123,6 +125,7 @@ void __init setup_arch(char **cmdline_p)
123 125
124 cpu_init(); 126 cpu_init();
125 unit_setup(); 127 unit_setup();
128 smp_init_cpus();
126 parse_mem_cmdline(cmdline_p); 129 parse_mem_cmdline(cmdline_p);
127 130
128 init_mm.start_code = (unsigned long)&_text; 131 init_mm.start_code = (unsigned long)&_text;
@@ -179,7 +182,6 @@ void __init setup_arch(char **cmdline_p)
179void __init cpu_init(void) 182void __init cpu_init(void)
180{ 183{
181 unsigned long cpurev = CPUREV, type; 184 unsigned long cpurev = CPUREV, type;
182 unsigned long base, size;
183 185
184 type = (CPUREV & CPUREV_TYPE) >> CPUREV_TYPE_S; 186 type = (CPUREV & CPUREV_TYPE) >> CPUREV_TYPE_S;
185 if (type > mn10300_known_cpus) 187 if (type > mn10300_known_cpus)
@@ -189,47 +191,46 @@ void __init cpu_init(void)
189 mn10300_cputypes[type], 191 mn10300_cputypes[type],
190 (cpurev & CPUREV_REVISION) >> CPUREV_REVISION_S); 192 (cpurev & CPUREV_REVISION) >> CPUREV_REVISION_S);
191 193
192 /* determine the memory size and base from the memory controller regs */ 194 get_mem_info(&phys_memory_base, &memory_size);
193 memory_size = 0; 195 phys_memory_end = phys_memory_base + memory_size;
194
195 base = SDBASE(0);
196 if (base & SDBASE_CE) {
197 size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT;
198 size = ~size + 1;
199 base &= SDBASE_CBA;
200 196
201 printk(KERN_INFO "SDRAM[0]: %luMb @%08lx\n", size >> 20, base); 197 fpu_init_state();
202 memory_size += size; 198}
203 phys_memory_base = base;
204 }
205 199
206 base = SDBASE(1); 200static struct cpu cpu_devices[NR_CPUS];
207 if (base & SDBASE_CE) {
208 size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT;
209 size = ~size + 1;
210 base &= SDBASE_CBA;
211 201
212 printk(KERN_INFO "SDRAM[1]: %luMb @%08lx\n", size >> 20, base); 202static int __init topology_init(void)
213 memory_size += size; 203{
214 if (phys_memory_base == 0) 204 int i;
215 phys_memory_base = base;
216 }
217 205
218 phys_memory_end = phys_memory_base + memory_size; 206 for_each_present_cpu(i)
207 register_cpu(&cpu_devices[i], i);
219 208
220#ifdef CONFIG_FPU 209 return 0;
221 fpu_init_state();
222#endif
223} 210}
224 211
212subsys_initcall(topology_init);
213
225/* 214/*
226 * Get CPU information for use by the procfs. 215 * Get CPU information for use by the procfs.
227 */ 216 */
228static int show_cpuinfo(struct seq_file *m, void *v) 217static int show_cpuinfo(struct seq_file *m, void *v)
229{ 218{
219#ifdef CONFIG_SMP
220 struct mn10300_cpuinfo *c = v;
221 unsigned long cpu_id = c - cpu_data;
222 unsigned long cpurev = c->type, type, icachesz, dcachesz;
223#else /* CONFIG_SMP */
224 unsigned long cpu_id = 0;
230 unsigned long cpurev = CPUREV, type, icachesz, dcachesz; 225 unsigned long cpurev = CPUREV, type, icachesz, dcachesz;
226#endif /* CONFIG_SMP */
231 227
232 type = (CPUREV & CPUREV_TYPE) >> CPUREV_TYPE_S; 228#ifdef CONFIG_SMP
229 if (!cpu_online(cpu_id))
230 return 0;
231#endif
232
233 type = (cpurev & CPUREV_TYPE) >> CPUREV_TYPE_S;
233 if (type > mn10300_known_cpus) 234 if (type > mn10300_known_cpus)
234 type = mn10300_known_cpus; 235 type = mn10300_known_cpus;
235 236
@@ -244,13 +245,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
244 1024; 245 1024;
245 246
246 seq_printf(m, 247 seq_printf(m,
247 "processor : 0\n" 248 "processor : %ld\n"
248 "vendor_id : Matsushita\n" 249 "vendor_id : Matsushita\n"
249 "cpu core : %s\n" 250 "cpu core : %s\n"
250 "cpu rev : %lu\n" 251 "cpu rev : %lu\n"
251 "model name : " PROCESSOR_MODEL_NAME "\n" 252 "model name : " PROCESSOR_MODEL_NAME "\n"
252 "icache size: %lu\n" 253 "icache size: %lu\n"
253 "dcache size: %lu\n", 254 "dcache size: %lu\n",
255 cpu_id,
254 mn10300_cputypes[type], 256 mn10300_cputypes[type],
255 (cpurev & CPUREV_REVISION) >> CPUREV_REVISION_S, 257 (cpurev & CPUREV_REVISION) >> CPUREV_REVISION_S,
256 icachesz, 258 icachesz,
@@ -262,8 +264,13 @@ static int show_cpuinfo(struct seq_file *m, void *v)
262 "bogomips : %lu.%02lu\n\n", 264 "bogomips : %lu.%02lu\n\n",
263 MN10300_IOCLK / 1000000, 265 MN10300_IOCLK / 1000000,
264 (MN10300_IOCLK / 10000) % 100, 266 (MN10300_IOCLK / 10000) % 100,
267#ifdef CONFIG_SMP
268 c->loops_per_jiffy / (500000 / HZ),
269 (c->loops_per_jiffy / (5000 / HZ)) % 100
270#else /* CONFIG_SMP */
265 loops_per_jiffy / (500000 / HZ), 271 loops_per_jiffy / (500000 / HZ),
266 (loops_per_jiffy / (5000 / HZ)) % 100 272 (loops_per_jiffy / (5000 / HZ)) % 100
273#endif /* CONFIG_SMP */
267 ); 274 );
268 275
269 return 0; 276 return 0;
diff --git a/arch/mn10300/kernel/smp-low.S b/arch/mn10300/kernel/smp-low.S
new file mode 100644
index 000000000000..72938cefc05e
--- /dev/null
+++ b/arch/mn10300/kernel/smp-low.S
@@ -0,0 +1,97 @@
1/* SMP IPI low-level handler
2 *
3 * Copyright (C) 2006-2007 Matsushita Electric Industrial Co., Ltd.
4 * All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <linux/sys.h>
14#include <linux/linkage.h>
15#include <asm/smp.h>
16#include <asm/system.h>
17#include <asm/thread_info.h>
18#include <asm/cpu-regs.h>
19#include <proc/smp-regs.h>
20#include <asm/asm-offsets.h>
21#include <asm/frame.inc>
22
23 .am33_2
24
25###############################################################################
26#
27# IPI interrupt handler
28#
29###############################################################################
30 .globl mn10300_low_ipi_handler
31mn10300_low_ipi_handler:
32 add -4,sp
33 mov d0,(sp)
34 movhu (IAGR),d0
35 and IAGR_GN,d0
36 lsr 0x2,d0
37#ifdef CONFIG_MN10300_CACHE_ENABLED
38 cmp FLUSH_CACHE_IPI,d0
39 beq mn10300_flush_cache_ipi
40#endif
41 cmp SMP_BOOT_IRQ,d0
42 beq mn10300_smp_boot_ipi
43 /* OTHERS */
44 mov (sp),d0
45 add 4,sp
46#ifdef CONFIG_GDBSTUB
47 jmp gdbstub_io_rx_handler
48#else
49 jmp end
50#endif
51
52###############################################################################
53#
54# Cache flush IPI interrupt handler
55#
56###############################################################################
57#ifdef CONFIG_MN10300_CACHE_ENABLED
58mn10300_flush_cache_ipi:
59 mov (sp),d0
60 add 4,sp
61
62 /* FLUSH_CACHE_IPI */
63 add -4,sp
64 SAVE_ALL
65 mov GxICR_DETECT,d2
66 movbu d2,(GxICR(FLUSH_CACHE_IPI)) # ACK the interrupt
67 movhu (GxICR(FLUSH_CACHE_IPI)),d2
68 call smp_cache_interrupt[],0
69 RESTORE_ALL
70 jmp end
71#endif
72
73###############################################################################
74#
75# SMP boot CPU IPI interrupt handler
76#
77###############################################################################
78mn10300_smp_boot_ipi:
79 /* clear interrupt */
80 movhu (GxICR(SMP_BOOT_IRQ)),d0
81 and ~GxICR_REQUEST,d0
82 movhu d0,(GxICR(SMP_BOOT_IRQ))
83 mov (sp),d0
84 add 4,sp
85
86 # get stack
87 mov (CPUID),a0
88 add -1,a0
89 add a0,a0
90 add a0,a0
91 mov (start_stack,a0),a0
92 mov a0,sp
93 jmp initialize_secondary
94
95
96# Jump here after RTI to suppress the icache lookahead
97end:
diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c
new file mode 100644
index 000000000000..b80234c28e0d
--- /dev/null
+++ b/arch/mn10300/kernel/smp.c
@@ -0,0 +1,1141 @@
1/* SMP support routines.
2 *
3 * Copyright (C) 2006-2008 Panasonic Corporation
4 * All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/interrupt.h>
17#include <linux/spinlock.h>
18#include <linux/init.h>
19#include <linux/jiffies.h>
20#include <linux/cpumask.h>
21#include <linux/err.h>
22#include <linux/kernel.h>
23#include <linux/delay.h>
24#include <linux/sched.h>
25#include <linux/profile.h>
26#include <linux/smp.h>
27#include <asm/tlbflush.h>
28#include <asm/system.h>
29#include <asm/bitops.h>
30#include <asm/processor.h>
31#include <asm/bug.h>
32#include <asm/exceptions.h>
33#include <asm/hardirq.h>
34#include <asm/fpu.h>
35#include <asm/mmu_context.h>
36#include <asm/thread_info.h>
37#include <asm/cpu-regs.h>
38#include <asm/intctl-regs.h>
39#include "internal.h"
40
41#ifdef CONFIG_HOTPLUG_CPU
42#include <linux/cpu.h>
43#include <asm/cacheflush.h>
44
45static unsigned long sleep_mode[NR_CPUS];
46
47static void run_sleep_cpu(unsigned int cpu);
48static void run_wakeup_cpu(unsigned int cpu);
49#endif /* CONFIG_HOTPLUG_CPU */
50
51/*
52 * Debug Message function
53 */
54
55#undef DEBUG_SMP
56#ifdef DEBUG_SMP
57#define Dprintk(fmt, ...) printk(KERN_DEBUG fmt, ##__VA_ARGS__)
58#else
59#define Dprintk(fmt, ...) no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
60#endif
61
62/* timeout value in msec for smp_nmi_call_function. zero is no timeout. */
63#define CALL_FUNCTION_NMI_IPI_TIMEOUT 0
64
65/*
66 * Structure and data for smp_nmi_call_function().
67 */
68struct nmi_call_data_struct {
69 smp_call_func_t func;
70 void *info;
71 cpumask_t started;
72 cpumask_t finished;
73 int wait;
74 char size_alignment[0]
75 __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
76} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
77
78static DEFINE_SPINLOCK(smp_nmi_call_lock);
79static struct nmi_call_data_struct *nmi_call_data;
80
81/*
82 * Data structures and variables
83 */
84static cpumask_t cpu_callin_map; /* Bitmask of callin CPUs */
85static cpumask_t cpu_callout_map; /* Bitmask of callout CPUs */
86cpumask_t cpu_boot_map; /* Bitmask of boot APs */
87unsigned long start_stack[NR_CPUS - 1];
88
89/*
90 * Per CPU parameters
91 */
92struct mn10300_cpuinfo cpu_data[NR_CPUS] __cacheline_aligned;
93
94static int cpucount; /* The count of boot CPUs */
95static cpumask_t smp_commenced_mask;
96cpumask_t cpu_initialized __initdata = CPU_MASK_NONE;
97
98/*
99 * Function Prototypes
100 */
101static int do_boot_cpu(int);
102static void smp_show_cpu_info(int cpu_id);
103static void smp_callin(void);
104static void smp_online(void);
105static void smp_store_cpu_info(int);
106static void smp_cpu_init(void);
107static void smp_tune_scheduling(void);
108static void send_IPI_mask(const cpumask_t *cpumask, int irq);
109static void init_ipi(void);
110
111/*
112 * IPI Initialization interrupt definitions
113 */
114static void mn10300_ipi_disable(unsigned int irq);
115static void mn10300_ipi_enable(unsigned int irq);
116static void mn10300_ipi_ack(unsigned int irq);
117static void mn10300_ipi_nop(unsigned int irq);
118
119static struct irq_chip mn10300_ipi_type = {
120 .name = "cpu_ipi",
121 .disable = mn10300_ipi_disable,
122 .enable = mn10300_ipi_enable,
123 .ack = mn10300_ipi_ack,
124 .eoi = mn10300_ipi_nop
125};
126
127static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id);
128static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id);
129static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id);
130
131static struct irqaction reschedule_ipi = {
132 .handler = smp_reschedule_interrupt,
133 .name = "smp reschedule IPI"
134};
135static struct irqaction call_function_ipi = {
136 .handler = smp_call_function_interrupt,
137 .name = "smp call function IPI"
138};
139static struct irqaction local_timer_ipi = {
140 .handler = smp_ipi_timer_interrupt,
141 .flags = IRQF_DISABLED,
142 .name = "smp local timer IPI"
143};
144
145/**
146 * init_ipi - Initialise the IPI mechanism
147 */
148static void init_ipi(void)
149{
150 unsigned long flags;
151 u16 tmp16;
152
153 /* set up the reschedule IPI */
154 set_irq_chip_and_handler(RESCHEDULE_IPI,
155 &mn10300_ipi_type, handle_percpu_irq);
156 setup_irq(RESCHEDULE_IPI, &reschedule_ipi);
157 set_intr_level(RESCHEDULE_IPI, RESCHEDULE_GxICR_LV);
158 mn10300_ipi_enable(RESCHEDULE_IPI);
159
160 /* set up the call function IPI */
161 set_irq_chip_and_handler(CALL_FUNC_SINGLE_IPI,
162 &mn10300_ipi_type, handle_percpu_irq);
163 setup_irq(CALL_FUNC_SINGLE_IPI, &call_function_ipi);
164 set_intr_level(CALL_FUNC_SINGLE_IPI, CALL_FUNCTION_GxICR_LV);
165 mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
166
167 /* set up the local timer IPI */
168 set_irq_chip_and_handler(LOCAL_TIMER_IPI,
169 &mn10300_ipi_type, handle_percpu_irq);
170 setup_irq(LOCAL_TIMER_IPI, &local_timer_ipi);
171 set_intr_level(LOCAL_TIMER_IPI, LOCAL_TIMER_GxICR_LV);
172 mn10300_ipi_enable(LOCAL_TIMER_IPI);
173
174#ifdef CONFIG_MN10300_CACHE_ENABLED
175 /* set up the cache flush IPI */
176 flags = arch_local_cli_save();
177 __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(FLUSH_CACHE_GxICR_LV),
178 mn10300_low_ipi_handler);
179 GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
180 mn10300_ipi_enable(FLUSH_CACHE_IPI);
181 arch_local_irq_restore(flags);
182#endif
183
184 /* set up the NMI call function IPI */
185 flags = arch_local_cli_save();
186 GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
187 tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
188 arch_local_irq_restore(flags);
189
190 /* set up the SMP boot IPI */
191 flags = arch_local_cli_save();
192 __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(SMP_BOOT_GxICR_LV),
193 mn10300_low_ipi_handler);
194 arch_local_irq_restore(flags);
195}
196
197/**
198 * mn10300_ipi_shutdown - Shut down handling of an IPI
199 * @irq: The IPI to be shut down.
200 */
201static void mn10300_ipi_shutdown(unsigned int irq)
202{
203 unsigned long flags;
204 u16 tmp;
205
206 flags = arch_local_cli_save();
207
208 tmp = GxICR(irq);
209 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
210 tmp = GxICR(irq);
211
212 arch_local_irq_restore(flags);
213}
214
215/**
216 * mn10300_ipi_enable - Enable an IPI
217 * @irq: The IPI to be enabled.
218 */
219static void mn10300_ipi_enable(unsigned int irq)
220{
221 unsigned long flags;
222 u16 tmp;
223
224 flags = arch_local_cli_save();
225
226 tmp = GxICR(irq);
227 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
228 tmp = GxICR(irq);
229
230 arch_local_irq_restore(flags);
231}
232
233/**
234 * mn10300_ipi_disable - Disable an IPI
235 * @irq: The IPI to be disabled.
236 */
237static void mn10300_ipi_disable(unsigned int irq)
238{
239 unsigned long flags;
240 u16 tmp;
241
242 flags = arch_local_cli_save();
243
244 tmp = GxICR(irq);
245 GxICR(irq) = tmp & GxICR_LEVEL;
246 tmp = GxICR(irq);
247
248 arch_local_irq_restore(flags);
249}
250
251/**
252 * mn10300_ipi_ack - Acknowledge an IPI interrupt in the PIC
253 * @irq: The IPI to be acknowledged.
254 *
255 * Clear the interrupt detection flag for the IPI on the appropriate interrupt
256 * channel in the PIC.
257 */
258static void mn10300_ipi_ack(unsigned int irq)
259{
260 unsigned long flags;
261 u16 tmp;
262
263 flags = arch_local_cli_save();
264 GxICR_u8(irq) = GxICR_DETECT;
265 tmp = GxICR(irq);
266 arch_local_irq_restore(flags);
267}
268
269/**
270 * mn10300_ipi_nop - Dummy IPI action
271 * @irq: The IPI to be acted upon.
272 */
273static void mn10300_ipi_nop(unsigned int irq)
274{
275}
276
277/**
278 * send_IPI_mask - Send IPIs to all CPUs in list
279 * @cpumask: The list of CPUs to target.
280 * @irq: The IPI request to be sent.
281 *
282 * Send the specified IPI to all the CPUs in the list, not waiting for them to
283 * finish before returning. The caller is responsible for synchronisation if
284 * that is needed.
285 */
286static void send_IPI_mask(const cpumask_t *cpumask, int irq)
287{
288 int i;
289 u16 tmp;
290
291 for (i = 0; i < NR_CPUS; i++) {
292 if (cpu_isset(i, *cpumask)) {
293 /* send IPI */
294 tmp = CROSS_GxICR(irq, i);
295 CROSS_GxICR(irq, i) =
296 tmp | GxICR_REQUEST | GxICR_DETECT;
297 tmp = CROSS_GxICR(irq, i); /* flush write buffer */
298 }
299 }
300}
301
302/**
303 * send_IPI_self - Send an IPI to this CPU.
304 * @irq: The IPI request to be sent.
305 *
306 * Send the specified IPI to the current CPU.
307 */
308void send_IPI_self(int irq)
309{
310 send_IPI_mask(cpumask_of(smp_processor_id()), irq);
311}
312
313/**
314 * send_IPI_allbutself - Send IPIs to all the other CPUs.
315 * @irq: The IPI request to be sent.
316 *
317 * Send the specified IPI to all CPUs in the system barring the current one,
318 * not waiting for them to finish before returning. The caller is responsible
319 * for synchronisation if that is needed.
320 */
321void send_IPI_allbutself(int irq)
322{
323 cpumask_t cpumask;
324
325 cpumask = cpu_online_map;
326 cpu_clear(smp_processor_id(), cpumask);
327 send_IPI_mask(&cpumask, irq);
328}
329
330void arch_send_call_function_ipi_mask(const struct cpumask *mask)
331{
332 BUG();
333 /*send_IPI_mask(mask, CALL_FUNCTION_IPI);*/
334}
335
336void arch_send_call_function_single_ipi(int cpu)
337{
338 send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI);
339}
340
341/**
342 * smp_send_reschedule - Send reschedule IPI to a CPU
343 * @cpu: The CPU to target.
344 */
345void smp_send_reschedule(int cpu)
346{
347 send_IPI_mask(cpumask_of(cpu), RESCHEDULE_IPI);
348}
349
350/**
351 * smp_nmi_call_function - Send a call function NMI IPI to all CPUs
352 * @func: The function to ask to be run.
353 * @info: The context data to pass to that function.
354 * @wait: If true, wait (atomically) until function is run on all CPUs.
355 *
356 * Send a non-maskable request to all CPUs in the system, requesting them to
357 * run the specified function with the given context data, and, potentially, to
358 * wait for completion of that function on all CPUs.
359 *
360 * Returns 0 if successful, -ETIMEDOUT if we were asked to wait, but hit the
361 * timeout.
362 */
363int smp_nmi_call_function(smp_call_func_t func, void *info, int wait)
364{
365 struct nmi_call_data_struct data;
366 unsigned long flags;
367 unsigned int cnt;
368 int cpus, ret = 0;
369
370 cpus = num_online_cpus() - 1;
371 if (cpus < 1)
372 return 0;
373
374 data.func = func;
375 data.info = info;
376 data.started = cpu_online_map;
377 cpu_clear(smp_processor_id(), data.started);
378 data.wait = wait;
379 if (wait)
380 data.finished = data.started;
381
382 spin_lock_irqsave(&smp_nmi_call_lock, flags);
383 nmi_call_data = &data;
384 smp_mb();
385
386 /* Send a message to all other CPUs and wait for them to respond */
387 send_IPI_allbutself(CALL_FUNCTION_NMI_IPI);
388
389 /* Wait for response */
390 if (CALL_FUNCTION_NMI_IPI_TIMEOUT > 0) {
391 for (cnt = 0;
392 cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
393 !cpus_empty(data.started);
394 cnt++)
395 mdelay(1);
396
397 if (wait && cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT) {
398 for (cnt = 0;
399 cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
400 !cpus_empty(data.finished);
401 cnt++)
402 mdelay(1);
403 }
404
405 if (cnt >= CALL_FUNCTION_NMI_IPI_TIMEOUT)
406 ret = -ETIMEDOUT;
407
408 } else {
409 /* If timeout value is zero, wait until cpumask has been
410 * cleared */
411 while (!cpus_empty(data.started))
412 barrier();
413 if (wait)
414 while (!cpus_empty(data.finished))
415 barrier();
416 }
417
418 spin_unlock_irqrestore(&smp_nmi_call_lock, flags);
419 return ret;
420}
421
422/**
423 * stop_this_cpu - Callback to stop a CPU.
424 * @unused: Callback context (ignored).
425 */
426void stop_this_cpu(void *unused)
427{
428 static volatile int stopflag;
429 unsigned long flags;
430
431#ifdef CONFIG_GDBSTUB
432 /* In case of single stepping smp_send_stop by other CPU,
433 * clear procindebug to avoid deadlock.
434 */
435 atomic_set(&procindebug[smp_processor_id()], 0);
436#endif /* CONFIG_GDBSTUB */
437
438 flags = arch_local_cli_save();
439 cpu_clear(smp_processor_id(), cpu_online_map);
440
441 while (!stopflag)
442 cpu_relax();
443
444 cpu_set(smp_processor_id(), cpu_online_map);
445 arch_local_irq_restore(flags);
446}
447
448/**
449 * smp_send_stop - Send a stop request to all CPUs.
450 */
451void smp_send_stop(void)
452{
453 smp_nmi_call_function(stop_this_cpu, NULL, 0);
454}
455
456/**
457 * smp_reschedule_interrupt - Reschedule IPI handler
458 * @irq: The interrupt number.
459 * @dev_id: The device ID.
460 *
461 * We need do nothing here, since the scheduling will be effected on our way
462 * back through entry.S.
463 *
464 * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
465 */
466static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id)
467{
468 /* do nothing */
469 return IRQ_HANDLED;
470}
471
472/**
473 * smp_call_function_interrupt - Call function IPI handler
474 * @irq: The interrupt number.
475 * @dev_id: The device ID.
476 *
477 * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
478 */
479static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id)
480{
481 /* generic_smp_call_function_interrupt(); */
482 generic_smp_call_function_single_interrupt();
483 return IRQ_HANDLED;
484}
485
486/**
487 * smp_nmi_call_function_interrupt - Non-maskable call function IPI handler
488 */
489void smp_nmi_call_function_interrupt(void)
490{
491 smp_call_func_t func = nmi_call_data->func;
492 void *info = nmi_call_data->info;
493 int wait = nmi_call_data->wait;
494
495 /* Notify the initiating CPU that I've grabbed the data and am about to
496 * execute the function
497 */
498 smp_mb();
499 cpu_clear(smp_processor_id(), nmi_call_data->started);
500 (*func)(info);
501
502 if (wait) {
503 smp_mb();
504 cpu_clear(smp_processor_id(), nmi_call_data->finished);
505 }
506}
507
508/**
509 * smp_ipi_timer_interrupt - Local timer IPI handler
510 * @irq: The interrupt number.
511 * @dev_id: The device ID.
512 *
513 * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
514 */
515static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id)
516{
517 return local_timer_interrupt();
518}
519
520void __init smp_init_cpus(void)
521{
522 int i;
523 for (i = 0; i < NR_CPUS; i++) {
524 set_cpu_possible(i, true);
525 set_cpu_present(i, true);
526 }
527}
528
529/**
530 * smp_cpu_init - Initialise AP in start_secondary.
531 *
532 * For this Application Processor, set up init_mm, initialise FPU and set
533 * interrupt level 0-6 setting.
534 */
535static void __init smp_cpu_init(void)
536{
537 unsigned long flags;
538 int cpu_id = smp_processor_id();
539 u16 tmp16;
540
541 if (test_and_set_bit(cpu_id, &cpu_initialized)) {
542 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu_id);
543 for (;;)
544 local_irq_enable();
545 }
546 printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
547
548 atomic_inc(&init_mm.mm_count);
549 current->active_mm = &init_mm;
550 BUG_ON(current->mm);
551
552 enter_lazy_tlb(&init_mm, current);
553
554 /* Force FPU initialization */
555 clear_using_fpu(current);
556
557 GxICR(CALL_FUNC_SINGLE_IPI) = CALL_FUNCTION_GxICR_LV | GxICR_DETECT;
558 mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
559
560 GxICR(LOCAL_TIMER_IPI) = LOCAL_TIMER_GxICR_LV | GxICR_DETECT;
561 mn10300_ipi_enable(LOCAL_TIMER_IPI);
562
563 GxICR(RESCHEDULE_IPI) = RESCHEDULE_GxICR_LV | GxICR_DETECT;
564 mn10300_ipi_enable(RESCHEDULE_IPI);
565
566#ifdef CONFIG_MN10300_CACHE_ENABLED
567 GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
568 mn10300_ipi_enable(FLUSH_CACHE_IPI);
569#endif
570
571 mn10300_ipi_shutdown(SMP_BOOT_IRQ);
572
573 /* Set up the non-maskable call function IPI */
574 flags = arch_local_cli_save();
575 GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
576 tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
577 arch_local_irq_restore(flags);
578}
579
580/**
581 * smp_prepare_cpu_init - Initialise CPU in startup_secondary
582 *
583 * Set interrupt level 0-6 setting and init ICR of gdbstub.
584 */
585void smp_prepare_cpu_init(void)
586{
587 int loop;
588
589 /* Set the interrupt vector registers */
590 IVAR0 = EXCEP_IRQ_LEVEL0;
591 IVAR1 = EXCEP_IRQ_LEVEL1;
592 IVAR2 = EXCEP_IRQ_LEVEL2;
593 IVAR3 = EXCEP_IRQ_LEVEL3;
594 IVAR4 = EXCEP_IRQ_LEVEL4;
595 IVAR5 = EXCEP_IRQ_LEVEL5;
596 IVAR6 = EXCEP_IRQ_LEVEL6;
597
598 /* Disable all interrupts and set to priority 6 (lowest) */
599 for (loop = 0; loop < GxICR_NUM_IRQS; loop++)
600 GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT;
601
602#ifdef CONFIG_GDBSTUB
603 /* initialise GDB-stub */
604 do {
605 unsigned long flags;
606 u16 tmp16;
607
608 flags = arch_local_cli_save();
609 GxICR(GDB_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
610 tmp16 = GxICR(GDB_NMI_IPI);
611 arch_local_irq_restore(flags);
612 } while (0);
613#endif
614}
615
616/**
617 * start_secondary - Activate a secondary CPU (AP)
618 * @unused: Thread parameter (ignored).
619 */
620int __init start_secondary(void *unused)
621{
622 smp_cpu_init();
623
624 smp_callin();
625 while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
626 cpu_relax();
627
628 local_flush_tlb();
629 preempt_disable();
630 smp_online();
631
632 cpu_idle();
633 return 0;
634}
635
636/**
637 * smp_prepare_cpus - Boot up secondary CPUs (APs)
638 * @max_cpus: Maximum number of CPUs to boot.
639 *
640 * Call do_boot_cpu, and boot up APs.
641 */
642void __init smp_prepare_cpus(unsigned int max_cpus)
643{
644 int phy_id;
645
646 /* Setup boot CPU information */
647 smp_store_cpu_info(0);
648 smp_tune_scheduling();
649
650 init_ipi();
651
652 /* If SMP should be disabled, then finish */
653 if (max_cpus == 0) {
654 printk(KERN_INFO "SMP mode deactivated.\n");
655 goto smp_done;
656 }
657
658 /* Boot secondary CPUs (for which phy_id > 0) */
659 for (phy_id = 0; phy_id < NR_CPUS; phy_id++) {
660 /* Don't boot primary CPU */
661 if (max_cpus <= cpucount + 1)
662 continue;
663 if (phy_id != 0)
664 do_boot_cpu(phy_id);
665 set_cpu_possible(phy_id, true);
666 smp_show_cpu_info(phy_id);
667 }
668
669smp_done:
670 Dprintk("Boot done.\n");
671}
672
673/**
674 * smp_store_cpu_info - Save a CPU's information
675 * @cpu: The CPU to save for.
676 *
677 * Save boot_cpu_data and jiffy for the specified CPU.
678 */
679static void __init smp_store_cpu_info(int cpu)
680{
681 struct mn10300_cpuinfo *ci = &cpu_data[cpu];
682
683 *ci = boot_cpu_data;
684 ci->loops_per_jiffy = loops_per_jiffy;
685 ci->type = CPUREV;
686}
687
688/**
689 * smp_tune_scheduling - Set time slice value
690 *
691 * Nothing to do here.
692 */
693static void __init smp_tune_scheduling(void)
694{
695}
696
697/**
698 * do_boot_cpu: Boot up one CPU
699 * @phy_id: Physical ID of CPU to boot.
700 *
701 * Send an IPI to a secondary CPU to boot it. Returns 0 on success, 1
702 * otherwise.
703 */
704static int __init do_boot_cpu(int phy_id)
705{
706 struct task_struct *idle;
707 unsigned long send_status, callin_status;
708 int timeout, cpu_id;
709
710 send_status = GxICR_REQUEST;
711 callin_status = 0;
712 timeout = 0;
713 cpu_id = phy_id;
714
715 cpucount++;
716
717 /* Create idle thread for this CPU */
718 idle = fork_idle(cpu_id);
719 if (IS_ERR(idle))
720 panic("Failed fork for CPU#%d.", cpu_id);
721
722 idle->thread.pc = (unsigned long)start_secondary;
723
724 printk(KERN_NOTICE "Booting CPU#%d\n", cpu_id);
725 start_stack[cpu_id - 1] = idle->thread.sp;
726
727 task_thread_info(idle)->cpu = cpu_id;
728
729 /* Send boot IPI to AP */
730 send_IPI_mask(cpumask_of(phy_id), SMP_BOOT_IRQ);
731
732 Dprintk("Waiting for send to finish...\n");
733
734 /* Wait for AP's IPI receive in 100[ms] */
735 do {
736 udelay(1000);
737 send_status =
738 CROSS_GxICR(SMP_BOOT_IRQ, phy_id) & GxICR_REQUEST;
739 } while (send_status == GxICR_REQUEST && timeout++ < 100);
740
741 Dprintk("Waiting for cpu_callin_map.\n");
742
743 if (send_status == 0) {
744 /* Allow AP to start initializing */
745 cpu_set(cpu_id, cpu_callout_map);
746
747 /* Wait for setting cpu_callin_map */
748 timeout = 0;
749 do {
750 udelay(1000);
751 callin_status = cpu_isset(cpu_id, cpu_callin_map);
752 } while (callin_status == 0 && timeout++ < 5000);
753
754 if (callin_status == 0)
755 Dprintk("Not responding.\n");
756 } else {
757 printk(KERN_WARNING "IPI not delivered.\n");
758 }
759
760 if (send_status == GxICR_REQUEST || callin_status == 0) {
761 cpu_clear(cpu_id, cpu_callout_map);
762 cpu_clear(cpu_id, cpu_callin_map);
763 cpu_clear(cpu_id, cpu_initialized);
764 cpucount--;
765 return 1;
766 }
767 return 0;
768}
769
770/**
771 * smp_show_cpu_info - Show SMP CPU information
772 * @cpu: The CPU of interest.
773 */
774static void __init smp_show_cpu_info(int cpu)
775{
776 struct mn10300_cpuinfo *ci = &cpu_data[cpu];
777
778 printk(KERN_INFO
779 "CPU#%d : ioclk speed: %lu.%02luMHz : bogomips : %lu.%02lu\n",
780 cpu,
781 MN10300_IOCLK / 1000000,
782 (MN10300_IOCLK / 10000) % 100,
783 ci->loops_per_jiffy / (500000 / HZ),
784 (ci->loops_per_jiffy / (5000 / HZ)) % 100);
785}
786
787/**
788 * smp_callin - Set cpu_callin_map of the current CPU ID
789 */
790static void __init smp_callin(void)
791{
792 unsigned long timeout;
793 int cpu;
794
795 cpu = smp_processor_id();
796 timeout = jiffies + (2 * HZ);
797
798 if (cpu_isset(cpu, cpu_callin_map)) {
799 printk(KERN_ERR "CPU#%d already present.\n", cpu);
800 BUG();
801 }
802 Dprintk("CPU#%d waiting for CALLOUT\n", cpu);
803
804 /* Wait for AP startup 2s total */
805 while (time_before(jiffies, timeout)) {
806 if (cpu_isset(cpu, cpu_callout_map))
807 break;
808 cpu_relax();
809 }
810
811 if (!time_before(jiffies, timeout)) {
812 printk(KERN_ERR
813 "BUG: CPU#%d started up but did not get a callout!\n",
814 cpu);
815 BUG();
816 }
817
818#ifdef CONFIG_CALIBRATE_DELAY
819 calibrate_delay(); /* Get our bogomips */
820#endif
821
822 /* Save our processor parameters */
823 smp_store_cpu_info(cpu);
824
825 /* Allow the boot processor to continue */
826 cpu_set(cpu, cpu_callin_map);
827}
828
829/**
830 * smp_online - Set cpu_online_map
831 */
832static void __init smp_online(void)
833{
834 int cpu;
835
836 cpu = smp_processor_id();
837
838 local_irq_enable();
839
840 cpu_set(cpu, cpu_online_map);
841 smp_wmb();
842}
843
844/**
845 * smp_cpus_done -
846 * @max_cpus: Maximum CPU count.
847 *
848 * Do nothing.
849 */
850void __init smp_cpus_done(unsigned int max_cpus)
851{
852}
853
854/*
855 * smp_prepare_boot_cpu - Set up stuff for the boot processor.
856 *
857 * Set up the cpu_online_map, cpu_callout_map and cpu_callin_map of the boot
858 * processor (CPU 0).
859 */
860void __devinit smp_prepare_boot_cpu(void)
861{
862 cpu_set(0, cpu_callout_map);
863 cpu_set(0, cpu_callin_map);
864 current_thread_info()->cpu = 0;
865}
866
867/*
868 * initialize_secondary - Initialise a secondary CPU (Application Processor).
869 *
870 * Set SP register and jump to thread's PC address.
871 */
872void initialize_secondary(void)
873{
874 asm volatile (
875 "mov %0,sp \n"
876 "jmp (%1) \n"
877 :
878 : "a"(current->thread.sp), "a"(current->thread.pc));
879}
880
881/**
882 * __cpu_up - Set smp_commenced_mask for the nominated CPU
883 * @cpu: The target CPU.
884 */
885int __devinit __cpu_up(unsigned int cpu)
886{
887 int timeout;
888
889#ifdef CONFIG_HOTPLUG_CPU
890 if (num_online_cpus() == 1)
891 disable_hlt();
892 if (sleep_mode[cpu])
893 run_wakeup_cpu(cpu);
894#endif /* CONFIG_HOTPLUG_CPU */
895
896 cpu_set(cpu, smp_commenced_mask);
897
898 /* Wait 5s total for a response */
899 for (timeout = 0 ; timeout < 5000 ; timeout++) {
900 if (cpu_isset(cpu, cpu_online_map))
901 break;
902 udelay(1000);
903 }
904
905 BUG_ON(!cpu_isset(cpu, cpu_online_map));
906 return 0;
907}
908
909/**
910 * setup_profiling_timer - Set up the profiling timer
911 * @multiplier - The frequency multiplier to use
912 *
913 * The frequency of the profiling timer can be changed by writing a multiplier
914 * value into /proc/profile.
915 */
916int setup_profiling_timer(unsigned int multiplier)
917{
918 return -EINVAL;
919}
920
921/*
922 * CPU hotplug routines
923 */
924#ifdef CONFIG_HOTPLUG_CPU
925
926static DEFINE_PER_CPU(struct cpu, cpu_devices);
927
928static int __init topology_init(void)
929{
930 int cpu, ret;
931
932 for_each_cpu(cpu) {
933 ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
934 if (ret)
935 printk(KERN_WARNING
936 "topology_init: register_cpu %d failed (%d)\n",
937 cpu, ret);
938 }
939 return 0;
940}
941
942subsys_initcall(topology_init);
943
944int __cpu_disable(void)
945{
946 int cpu = smp_processor_id();
947 if (cpu == 0)
948 return -EBUSY;
949
950 migrate_irqs();
951 cpu_clear(cpu, current->active_mm->cpu_vm_mask);
952 return 0;
953}
954
955void __cpu_die(unsigned int cpu)
956{
957 run_sleep_cpu(cpu);
958
959 if (num_online_cpus() == 1)
960 enable_hlt();
961}
962
963#ifdef CONFIG_MN10300_CACHE_ENABLED
964static inline void hotplug_cpu_disable_cache(void)
965{
966 int tmp;
967 asm volatile(
968 " movhu (%1),%0 \n"
969 " and %2,%0 \n"
970 " movhu %0,(%1) \n"
971 "1: movhu (%1),%0 \n"
972 " btst %3,%0 \n"
973 " bne 1b \n"
974 : "=&r"(tmp)
975 : "a"(&CHCTR),
976 "i"(~(CHCTR_ICEN | CHCTR_DCEN)),
977 "i"(CHCTR_ICBUSY | CHCTR_DCBUSY)
978 : "memory", "cc");
979}
980
981static inline void hotplug_cpu_enable_cache(void)
982{
983 int tmp;
984 asm volatile(
985 "movhu (%1),%0 \n"
986 "or %2,%0 \n"
987 "movhu %0,(%1) \n"
988 : "=&r"(tmp)
989 : "a"(&CHCTR),
990 "i"(CHCTR_ICEN | CHCTR_DCEN)
991 : "memory", "cc");
992}
993
994static inline void hotplug_cpu_invalidate_cache(void)
995{
996 int tmp;
997 asm volatile (
998 "movhu (%1),%0 \n"
999 "or %2,%0 \n"
1000 "movhu %0,(%1) \n"
1001 : "=&r"(tmp)
1002 : "a"(&CHCTR),
1003 "i"(CHCTR_ICINV | CHCTR_DCINV)
1004 : "cc");
1005}
1006
1007#else /* CONFIG_MN10300_CACHE_ENABLED */
1008#define hotplug_cpu_disable_cache() do {} while (0)
1009#define hotplug_cpu_enable_cache() do {} while (0)
1010#define hotplug_cpu_invalidate_cache() do {} while (0)
1011#endif /* CONFIG_MN10300_CACHE_ENABLED */
1012
1013/**
1014 * hotplug_cpu_nmi_call_function - Call a function on other CPUs for hotplug
1015 * @cpumask: List of target CPUs.
1016 * @func: The function to call on those CPUs.
1017 * @info: The context data for the function to be called.
1018 * @wait: Whether to wait for the calls to complete.
1019 *
1020 * Non-maskably call a function on another CPU for hotplug purposes.
1021 *
1022 * This function must be called with maskable interrupts disabled.
1023 */
1024static int hotplug_cpu_nmi_call_function(cpumask_t cpumask,
1025 smp_call_func_t func, void *info,
1026 int wait)
1027{
1028 /*
1029 * The address and the size of nmi_call_func_mask_data
1030 * need to be aligned on L1_CACHE_BYTES.
1031 */
1032 static struct nmi_call_data_struct nmi_call_func_mask_data
1033 __cacheline_aligned;
1034 unsigned long start, end;
1035
1036 start = (unsigned long)&nmi_call_func_mask_data;
1037 end = start + sizeof(struct nmi_call_data_struct);
1038
1039 nmi_call_func_mask_data.func = func;
1040 nmi_call_func_mask_data.info = info;
1041 nmi_call_func_mask_data.started = cpumask;
1042 nmi_call_func_mask_data.wait = wait;
1043 if (wait)
1044 nmi_call_func_mask_data.finished = cpumask;
1045
1046 spin_lock(&smp_nmi_call_lock);
1047 nmi_call_data = &nmi_call_func_mask_data;
1048 mn10300_local_dcache_flush_range(start, end);
1049 smp_wmb();
1050
1051 send_IPI_mask(cpumask, CALL_FUNCTION_NMI_IPI);
1052
1053 do {
1054 mn10300_local_dcache_inv_range(start, end);
1055 barrier();
1056 } while (!cpus_empty(nmi_call_func_mask_data.started));
1057
1058 if (wait) {
1059 do {
1060 mn10300_local_dcache_inv_range(start, end);
1061 barrier();
1062 } while (!cpus_empty(nmi_call_func_mask_data.finished));
1063 }
1064
1065 spin_unlock(&smp_nmi_call_lock);
1066 return 0;
1067}
1068
1069static void restart_wakeup_cpu(void)
1070{
1071 unsigned int cpu = smp_processor_id();
1072
1073 cpu_set(cpu, cpu_callin_map);
1074 local_flush_tlb();
1075 cpu_set(cpu, cpu_online_map);
1076 smp_wmb();
1077}
1078
1079static void prepare_sleep_cpu(void *unused)
1080{
1081 sleep_mode[smp_processor_id()] = 1;
1082 smp_mb();
1083 mn10300_local_dcache_flush_inv();
1084 hotplug_cpu_disable_cache();
1085 hotplug_cpu_invalidate_cache();
1086}
1087
1088/* when this function called, IE=0, NMID=0. */
1089static void sleep_cpu(void *unused)
1090{
1091 unsigned int cpu_id = smp_processor_id();
1092 /*
1093 * CALL_FUNCTION_NMI_IPI for wakeup_cpu() shall not be requested,
1094 * before this cpu goes in SLEEP mode.
1095 */
1096 do {
1097 smp_mb();
1098 __sleep_cpu();
1099 } while (sleep_mode[cpu_id]);
1100 restart_wakeup_cpu();
1101}
1102
1103static void run_sleep_cpu(unsigned int cpu)
1104{
1105 unsigned long flags;
1106 cpumask_t cpumask = cpumask_of(cpu);
1107
1108 flags = arch_local_cli_save();
1109 hotplug_cpu_nmi_call_function(cpumask, prepare_sleep_cpu, NULL, 1);
1110 hotplug_cpu_nmi_call_function(cpumask, sleep_cpu, NULL, 0);
1111 udelay(1); /* delay for the cpu to sleep. */
1112 arch_local_irq_restore(flags);
1113}
1114
1115static void wakeup_cpu(void)
1116{
1117 hotplug_cpu_invalidate_cache();
1118 hotplug_cpu_enable_cache();
1119 smp_mb();
1120 sleep_mode[smp_processor_id()] = 0;
1121}
1122
1123static void run_wakeup_cpu(unsigned int cpu)
1124{
1125 unsigned long flags;
1126
1127 flags = arch_local_cli_save();
1128#if NR_CPUS == 2
1129 mn10300_local_dcache_flush_inv();
1130#else
1131 /*
1132 * Before waking up the cpu,
1133 * all online cpus should stop and flush D-Cache for global data.
1134 */
1135#error not support NR_CPUS > 2, when CONFIG_HOTPLUG_CPU=y.
1136#endif
1137 hotplug_cpu_nmi_call_function(cpumask_of(cpu), wakeup_cpu, NULL, 1);
1138 arch_local_irq_restore(flags);
1139}
1140
1141#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/mn10300/kernel/switch_to.S b/arch/mn10300/kernel/switch_to.S
index 630aad71b946..b08cb2e3aebd 100644
--- a/arch/mn10300/kernel/switch_to.S
+++ b/arch/mn10300/kernel/switch_to.S
@@ -15,6 +15,9 @@
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <asm/thread_info.h> 16#include <asm/thread_info.h>
17#include <asm/cpu-regs.h> 17#include <asm/cpu-regs.h>
18#ifdef CONFIG_SMP
19#include <proc/smp-regs.h>
20#endif /* CONFIG_SMP */
18 21
19 .text 22 .text
20 23
@@ -35,7 +38,14 @@ ENTRY(__switch_to)
35 mov d1,a1 38 mov d1,a1
36 39
37 # save prev context 40 # save prev context
41#ifdef CONFIG_SMP
42 mov (CPUID),a2
43 add a2,a2
44 add a2,a2
45 mov (___frame,a2),d0
46#else /* CONFIG_SMP */
38 mov (__frame),d0 47 mov (__frame),d0
48#endif /* CONFIG_SMP */
39 mov d0,(THREAD_FRAME,a0) 49 mov d0,(THREAD_FRAME,a0)
40 mov __switch_back,d0 50 mov __switch_back,d0
41 mov d0,(THREAD_PC,a0) 51 mov d0,(THREAD_PC,a0)
@@ -59,7 +69,14 @@ ENTRY(__switch_to)
59#endif 69#endif
60 70
61 mov (THREAD_FRAME,a1),a2 71 mov (THREAD_FRAME,a1),a2
72#ifdef CONFIG_SMP
73 mov (CPUID),a0
74 add a0,a0
75 add a0,a0
76 mov a2,(___frame,a0)
77#else /* CONFIG_SMP */
62 mov a2,(__frame) 78 mov a2,(__frame)
79#endif /* CONFIG_SMP */
63 mov (THREAD_PC,a1),a2 80 mov (THREAD_PC,a1),a2
64 mov d2,d0 # for ret_from_fork 81 mov d2,d0 # for ret_from_fork
65 mov d0,a0 # for __switch_to 82 mov d0,a0 # for __switch_to
diff --git a/arch/mn10300/kernel/time.c b/arch/mn10300/kernel/time.c
index 0b5c856b4266..0cb9bdb3b6bd 100644
--- a/arch/mn10300/kernel/time.c
+++ b/arch/mn10300/kernel/time.c
@@ -22,12 +22,7 @@
22#include <asm/processor.h> 22#include <asm/processor.h>
23#include <asm/intctl-regs.h> 23#include <asm/intctl-regs.h>
24#include <asm/rtc.h> 24#include <asm/rtc.h>
25 25#include "internal.h"
26#ifdef CONFIG_MN10300_RTC
27unsigned long mn10300_ioclk; /* system I/O clock frequency */
28unsigned long mn10300_iobclk; /* system I/O clock frequency */
29unsigned long mn10300_tsc_per_HZ; /* number of ioclks per jiffy */
30#endif /* CONFIG_MN10300_RTC */
31 26
32static unsigned long mn10300_last_tsc; /* time-stamp counter at last time 27static unsigned long mn10300_last_tsc; /* time-stamp counter at last time
33 * interrupt occurred */ 28 * interrupt occurred */
@@ -95,6 +90,19 @@ static void __init mn10300_sched_clock_init(void)
95 __muldiv64u(NSEC_PER_SEC, 1 << 16, MN10300_TSCCLK); 90 __muldiv64u(NSEC_PER_SEC, 1 << 16, MN10300_TSCCLK);
96} 91}
97 92
93/**
94 * local_timer_interrupt - Local timer interrupt handler
95 *
96 * Handle local timer interrupts for this CPU. They may have been propagated
97 * to this CPU from the CPU that actually gets them by way of an IPI.
98 */
99irqreturn_t local_timer_interrupt(void)
100{
101 profile_tick(CPU_PROFILING);
102 update_process_times(user_mode(get_irq_regs()));
103 return IRQ_HANDLED;
104}
105
98/* 106/*
99 * advance the kernel's time keeping clocks (xtime and jiffies) 107 * advance the kernel's time keeping clocks (xtime and jiffies)
100 * - we use Timer 0 & 1 cascaded as a clock to nudge us the next time 108 * - we use Timer 0 & 1 cascaded as a clock to nudge us the next time
@@ -103,6 +111,7 @@ static void __init mn10300_sched_clock_init(void)
103static irqreturn_t timer_interrupt(int irq, void *dev_id) 111static irqreturn_t timer_interrupt(int irq, void *dev_id)
104{ 112{
105 unsigned tsc, elapse; 113 unsigned tsc, elapse;
114 irqreturn_t ret;
106 115
107 write_seqlock(&xtime_lock); 116 write_seqlock(&xtime_lock);
108 117
@@ -114,15 +123,16 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
114 mn10300_last_tsc -= MN10300_TSC_PER_HZ; 123 mn10300_last_tsc -= MN10300_TSC_PER_HZ;
115 124
116 /* advance the kernel's time tracking system */ 125 /* advance the kernel's time tracking system */
117 profile_tick(CPU_PROFILING);
118 do_timer(1); 126 do_timer(1);
119 } 127 }
120 128
121 write_sequnlock(&xtime_lock); 129 write_sequnlock(&xtime_lock);
122 130
123 update_process_times(user_mode(get_irq_regs())); 131 ret = local_timer_interrupt();
124 132#ifdef CONFIG_SMP
125 return IRQ_HANDLED; 133 send_IPI_allbutself(LOCAL_TIMER_IPI);
134#endif
135 return ret;
126} 136}
127 137
128/* 138/*
@@ -148,7 +158,7 @@ void __init time_init(void)
148 /* use timer 0 & 1 cascaded to tick at as close to HZ as possible */ 158 /* use timer 0 & 1 cascaded to tick at as close to HZ as possible */
149 setup_irq(TMJCIRQ, &timer_irq); 159 setup_irq(TMJCIRQ, &timer_irq);
150 160
151 set_intr_level(TMJCIRQ, TMJCICR_LEVEL); 161 set_intr_level(TMJCIRQ, NUM2GxICR_LEVEL(CONFIG_TIMER_IRQ_LEVEL));
152 162
153 startup_jiffies_counter(); 163 startup_jiffies_counter();
154 164
diff --git a/arch/mn10300/kernel/traps.c b/arch/mn10300/kernel/traps.c
index 716a221df2f9..c924a1dd3323 100644
--- a/arch/mn10300/kernel/traps.c
+++ b/arch/mn10300/kernel/traps.c
@@ -45,8 +45,13 @@
45#error "INTERRUPT_VECTOR_BASE not aligned to 16MiB boundary!" 45#error "INTERRUPT_VECTOR_BASE not aligned to 16MiB boundary!"
46#endif 46#endif
47 47
48#ifdef CONFIG_SMP
49struct pt_regs *___frame[NR_CPUS]; /* current frame pointer */
50EXPORT_SYMBOL(___frame);
51#else /* CONFIG_SMP */
48struct pt_regs *__frame; /* current frame pointer */ 52struct pt_regs *__frame; /* current frame pointer */
49EXPORT_SYMBOL(__frame); 53EXPORT_SYMBOL(__frame);
54#endif /* CONFIG_SMP */
50 55
51int kstack_depth_to_print = 24; 56int kstack_depth_to_print = 24;
52 57
@@ -221,11 +226,14 @@ void show_registers_only(struct pt_regs *regs)
221 printk(KERN_EMERG "threadinfo=%p task=%p)\n", 226 printk(KERN_EMERG "threadinfo=%p task=%p)\n",
222 current_thread_info(), current); 227 current_thread_info(), current);
223 228
224 if ((unsigned long) current >= 0x90000000UL && 229 if ((unsigned long) current >= PAGE_OFFSET &&
225 (unsigned long) current < 0x94000000UL) 230 (unsigned long) current < (unsigned long)high_memory)
226 printk(KERN_EMERG "Process %s (pid: %d)\n", 231 printk(KERN_EMERG "Process %s (pid: %d)\n",
227 current->comm, current->pid); 232 current->comm, current->pid);
228 233
234#ifdef CONFIG_SMP
235 printk(KERN_EMERG "CPUID: %08x\n", CPUID);
236#endif
229 printk(KERN_EMERG "CPUP: %04hx\n", CPUP); 237 printk(KERN_EMERG "CPUP: %04hx\n", CPUP);
230 printk(KERN_EMERG "TBR: %08x\n", TBR); 238 printk(KERN_EMERG "TBR: %08x\n", TBR);
231 printk(KERN_EMERG "DEAR: %08x\n", DEAR); 239 printk(KERN_EMERG "DEAR: %08x\n", DEAR);
@@ -521,8 +529,12 @@ void __init set_intr_stub(enum exception_code code, void *handler)
521{ 529{
522 unsigned long addr; 530 unsigned long addr;
523 u8 *vector = (u8 *)(CONFIG_INTERRUPT_VECTOR_BASE + code); 531 u8 *vector = (u8 *)(CONFIG_INTERRUPT_VECTOR_BASE + code);
532 unsigned long flags;
524 533
525 addr = (unsigned long) handler - (unsigned long) vector; 534 addr = (unsigned long) handler - (unsigned long) vector;
535
536 flags = arch_local_cli_save();
537
526 vector[0] = 0xdc; /* JMP handler */ 538 vector[0] = 0xdc; /* JMP handler */
527 vector[1] = addr; 539 vector[1] = addr;
528 vector[2] = addr >> 8; 540 vector[2] = addr >> 8;
@@ -532,6 +544,8 @@ void __init set_intr_stub(enum exception_code code, void *handler)
532 vector[6] = 0xcb; 544 vector[6] = 0xcb;
533 vector[7] = 0xcb; 545 vector[7] = 0xcb;
534 546
547 arch_local_irq_restore(flags);
548
535#ifndef CONFIG_MN10300_CACHE_SNOOP 549#ifndef CONFIG_MN10300_CACHE_SNOOP
536 mn10300_dcache_flush_inv(); 550 mn10300_dcache_flush_inv();
537 mn10300_icache_inv(); 551 mn10300_icache_inv();