diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-27 21:53:26 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-27 21:53:26 -0400 |
commit | bdab225015fbbb45ccd8913f5d7c01b2bf67d8b2 (patch) | |
tree | 5ef62301face958977a084bf2b6c5300296a25f2 /arch/mn10300/kernel | |
parent | 7c5814c7199851c5fe9395d08fc1ab3c8c1531ea (diff) | |
parent | 7c7fcf762e405eb040ee10d22d656a791f616122 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-mn10300
* git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-mn10300: (44 commits)
MN10300: Save frame pointer in thread_info struct rather than global var
MN10300: Change "Matsushita" to "Panasonic".
MN10300: Create a defconfig for the ASB2364 board
MN10300: Update the ASB2303 defconfig
MN10300: ASB2364: Add support for SMSC911X and SMC911X
MN10300: ASB2364: Handle the IRQ multiplexer in the FPGA
MN10300: Generic time support
MN10300: Specify an ELF HWCAP flag for MN10300 Atomic Operations Unit support
MN10300: Map userspace atomic op regs as a vmalloc page
MN10300: And Panasonic AM34 subarch and implement SMP
MN10300: Delete idle_timestamp from irq_cpustat_t
MN10300: Make various interrupt priority settings configurable
MN10300: Optimise do_csum()
MN10300: Implement atomic ops using atomic ops unit
MN10300: Make the FPU operate in non-lazy mode under SMP
MN10300: SMP TLB flushing
MN10300: Use the [ID]PTEL2 registers rather than [ID]PTEL for TLB control
MN10300: Make the use of PIDR to mark TLB entries controllable
MN10300: Rename __flush_tlb*() to local_flush_tlb*()
MN10300: AM34 erratum requires MMUCTR read and write on exception entry
...
Diffstat (limited to 'arch/mn10300/kernel')
31 files changed, 2809 insertions, 532 deletions
diff --git a/arch/mn10300/kernel/Makefile b/arch/mn10300/kernel/Makefile index 23f2ab67574c..8f5f1e81baf5 100644 --- a/arch/mn10300/kernel/Makefile +++ b/arch/mn10300/kernel/Makefile | |||
@@ -3,13 +3,16 @@ | |||
3 | # | 3 | # |
4 | extra-y := head.o init_task.o vmlinux.lds | 4 | extra-y := head.o init_task.o vmlinux.lds |
5 | 5 | ||
6 | obj-y := process.o signal.o entry.o fpu.o traps.o irq.o \ | 6 | fpu-obj-y := fpu-nofpu.o fpu-nofpu-low.o |
7 | fpu-obj-$(CONFIG_FPU) := fpu.o fpu-low.o | ||
8 | |||
9 | obj-y := process.o signal.o entry.o traps.o irq.o \ | ||
7 | ptrace.o setup.o time.o sys_mn10300.o io.o kthread.o \ | 10 | ptrace.o setup.o time.o sys_mn10300.o io.o kthread.o \ |
8 | switch_to.o mn10300_ksyms.o kernel_execve.o | 11 | switch_to.o mn10300_ksyms.o kernel_execve.o $(fpu-obj-y) |
9 | 12 | ||
10 | obj-$(CONFIG_MN10300_WD_TIMER) += mn10300-watchdog.o mn10300-watchdog-low.o | 13 | obj-$(CONFIG_SMP) += smp.o smp-low.o |
11 | 14 | ||
12 | obj-$(CONFIG_FPU) += fpu-low.o | 15 | obj-$(CONFIG_MN10300_WD_TIMER) += mn10300-watchdog.o mn10300-watchdog-low.o |
13 | 16 | ||
14 | obj-$(CONFIG_MN10300_TTYSM) += mn10300-serial.o mn10300-serial-low.o \ | 17 | obj-$(CONFIG_MN10300_TTYSM) += mn10300-serial.o mn10300-serial-low.o \ |
15 | mn10300-debug.o | 18 | mn10300-debug.o |
@@ -17,7 +20,7 @@ obj-$(CONFIG_GDBSTUB) += gdb-stub.o gdb-low.o | |||
17 | obj-$(CONFIG_GDBSTUB_ON_TTYSx) += gdb-io-serial.o gdb-io-serial-low.o | 20 | obj-$(CONFIG_GDBSTUB_ON_TTYSx) += gdb-io-serial.o gdb-io-serial-low.o |
18 | obj-$(CONFIG_GDBSTUB_ON_TTYSMx) += gdb-io-ttysm.o gdb-io-ttysm-low.o | 21 | obj-$(CONFIG_GDBSTUB_ON_TTYSMx) += gdb-io-ttysm.o gdb-io-ttysm-low.o |
19 | 22 | ||
20 | ifneq ($(CONFIG_MN10300_CACHE_DISABLED),y) | 23 | ifeq ($(CONFIG_MN10300_CACHE_ENABLED),y) |
21 | obj-$(CONFIG_GDBSTUB) += gdb-cache.o | 24 | obj-$(CONFIG_GDBSTUB) += gdb-cache.o |
22 | endif | 25 | endif |
23 | 26 | ||
@@ -25,3 +28,5 @@ obj-$(CONFIG_MN10300_RTC) += rtc.o | |||
25 | obj-$(CONFIG_PROFILE) += profile.o profile-low.o | 28 | obj-$(CONFIG_PROFILE) += profile.o profile-low.o |
26 | obj-$(CONFIG_MODULES) += module.o | 29 | obj-$(CONFIG_MODULES) += module.o |
27 | obj-$(CONFIG_KPROBES) += kprobes.o | 30 | obj-$(CONFIG_KPROBES) += kprobes.o |
31 | obj-$(CONFIG_CSRC_MN10300) += csrc-mn10300.o | ||
32 | obj-$(CONFIG_CEVT_MN10300) += cevt-mn10300.o | ||
diff --git a/arch/mn10300/kernel/asm-offsets.c b/arch/mn10300/kernel/asm-offsets.c index 02dc7e461fef..96f24fab7de6 100644 --- a/arch/mn10300/kernel/asm-offsets.c +++ b/arch/mn10300/kernel/asm-offsets.c | |||
@@ -23,6 +23,7 @@ void foo(void) | |||
23 | 23 | ||
24 | OFFSET(TI_task, thread_info, task); | 24 | OFFSET(TI_task, thread_info, task); |
25 | OFFSET(TI_exec_domain, thread_info, exec_domain); | 25 | OFFSET(TI_exec_domain, thread_info, exec_domain); |
26 | OFFSET(TI_frame, thread_info, frame); | ||
26 | OFFSET(TI_flags, thread_info, flags); | 27 | OFFSET(TI_flags, thread_info, flags); |
27 | OFFSET(TI_cpu, thread_info, cpu); | 28 | OFFSET(TI_cpu, thread_info, cpu); |
28 | OFFSET(TI_preempt_count, thread_info, preempt_count); | 29 | OFFSET(TI_preempt_count, thread_info, preempt_count); |
@@ -66,7 +67,15 @@ void foo(void) | |||
66 | OFFSET(THREAD_SP, thread_struct, sp); | 67 | OFFSET(THREAD_SP, thread_struct, sp); |
67 | OFFSET(THREAD_A3, thread_struct, a3); | 68 | OFFSET(THREAD_A3, thread_struct, a3); |
68 | OFFSET(THREAD_USP, thread_struct, usp); | 69 | OFFSET(THREAD_USP, thread_struct, usp); |
69 | OFFSET(THREAD_FRAME, thread_struct, __frame); | 70 | #ifdef CONFIG_FPU |
71 | OFFSET(THREAD_FPU_FLAGS, thread_struct, fpu_flags); | ||
72 | OFFSET(THREAD_FPU_STATE, thread_struct, fpu_state); | ||
73 | DEFINE(__THREAD_USING_FPU, THREAD_USING_FPU); | ||
74 | DEFINE(__THREAD_HAS_FPU, THREAD_HAS_FPU); | ||
75 | #endif /* CONFIG_FPU */ | ||
76 | BLANK(); | ||
77 | |||
78 | OFFSET(TASK_THREAD, task_struct, thread); | ||
70 | BLANK(); | 79 | BLANK(); |
71 | 80 | ||
72 | DEFINE(CLONE_VM_asm, CLONE_VM); | 81 | DEFINE(CLONE_VM_asm, CLONE_VM); |
diff --git a/arch/mn10300/kernel/cevt-mn10300.c b/arch/mn10300/kernel/cevt-mn10300.c new file mode 100644 index 000000000000..d4cb535bf786 --- /dev/null +++ b/arch/mn10300/kernel/cevt-mn10300.c | |||
@@ -0,0 +1,131 @@ | |||
1 | /* MN10300 clockevents | ||
2 | * | ||
3 | * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by Mark Salter (msalter@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <linux/clockchips.h> | ||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/percpu.h> | ||
14 | #include <linux/smp.h> | ||
15 | #include <asm/timex.h> | ||
16 | #include "internal.h" | ||
17 | |||
18 | #ifdef CONFIG_SMP | ||
19 | #if (CONFIG_NR_CPUS > 2) && !defined(CONFIG_GEENERIC_CLOCKEVENTS_BROADCAST) | ||
20 | #error "This doesn't scale well! Need per-core local timers." | ||
21 | #endif | ||
22 | #else /* CONFIG_SMP */ | ||
23 | #define stop_jiffies_counter1() | ||
24 | #define reload_jiffies_counter1(x) | ||
25 | #define TMJC1IRQ TMJCIRQ | ||
26 | #endif | ||
27 | |||
28 | |||
29 | static int next_event(unsigned long delta, | ||
30 | struct clock_event_device *evt) | ||
31 | { | ||
32 | unsigned int cpu = smp_processor_id(); | ||
33 | |||
34 | if (cpu == 0) { | ||
35 | stop_jiffies_counter(); | ||
36 | reload_jiffies_counter(delta - 1); | ||
37 | } else { | ||
38 | stop_jiffies_counter1(); | ||
39 | reload_jiffies_counter1(delta - 1); | ||
40 | } | ||
41 | return 0; | ||
42 | } | ||
43 | |||
44 | static void set_clock_mode(enum clock_event_mode mode, | ||
45 | struct clock_event_device *evt) | ||
46 | { | ||
47 | /* Nothing to do ... */ | ||
48 | } | ||
49 | |||
50 | static DEFINE_PER_CPU(struct clock_event_device, mn10300_clockevent_device); | ||
51 | static DEFINE_PER_CPU(struct irqaction, timer_irq); | ||
52 | |||
53 | static irqreturn_t timer_interrupt(int irq, void *dev_id) | ||
54 | { | ||
55 | struct clock_event_device *cd; | ||
56 | unsigned int cpu = smp_processor_id(); | ||
57 | |||
58 | if (cpu == 0) | ||
59 | stop_jiffies_counter(); | ||
60 | else | ||
61 | stop_jiffies_counter1(); | ||
62 | |||
63 | cd = &per_cpu(mn10300_clockevent_device, cpu); | ||
64 | cd->event_handler(cd); | ||
65 | |||
66 | return IRQ_HANDLED; | ||
67 | } | ||
68 | |||
69 | static void event_handler(struct clock_event_device *dev) | ||
70 | { | ||
71 | } | ||
72 | |||
73 | int __init init_clockevents(void) | ||
74 | { | ||
75 | struct clock_event_device *cd; | ||
76 | struct irqaction *iact; | ||
77 | unsigned int cpu = smp_processor_id(); | ||
78 | |||
79 | cd = &per_cpu(mn10300_clockevent_device, cpu); | ||
80 | |||
81 | if (cpu == 0) { | ||
82 | stop_jiffies_counter(); | ||
83 | cd->irq = TMJCIRQ; | ||
84 | } else { | ||
85 | stop_jiffies_counter1(); | ||
86 | cd->irq = TMJC1IRQ; | ||
87 | } | ||
88 | |||
89 | cd->name = "Timestamp"; | ||
90 | cd->features = CLOCK_EVT_FEAT_ONESHOT; | ||
91 | |||
92 | /* Calculate the min / max delta */ | ||
93 | clockevent_set_clock(cd, MN10300_JCCLK); | ||
94 | |||
95 | cd->max_delta_ns = clockevent_delta2ns(TMJCBR_MAX, cd); | ||
96 | cd->min_delta_ns = clockevent_delta2ns(100, cd); | ||
97 | |||
98 | cd->rating = 200; | ||
99 | cd->cpumask = cpumask_of(smp_processor_id()); | ||
100 | cd->set_mode = set_clock_mode; | ||
101 | cd->event_handler = event_handler; | ||
102 | cd->set_next_event = next_event; | ||
103 | |||
104 | iact = &per_cpu(timer_irq, cpu); | ||
105 | iact->flags = IRQF_DISABLED | IRQF_SHARED | IRQF_TIMER; | ||
106 | iact->handler = timer_interrupt; | ||
107 | |||
108 | clockevents_register_device(cd); | ||
109 | |||
110 | #if defined(CONFIG_SMP) && !defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) | ||
111 | /* setup timer irq affinity so it only runs on this cpu */ | ||
112 | { | ||
113 | struct irq_desc *desc; | ||
114 | desc = irq_to_desc(cd->irq); | ||
115 | cpumask_copy(desc->affinity, cpumask_of(cpu)); | ||
116 | iact->flags |= IRQF_NOBALANCING; | ||
117 | } | ||
118 | #endif | ||
119 | |||
120 | if (cpu == 0) { | ||
121 | reload_jiffies_counter(MN10300_JC_PER_HZ - 1); | ||
122 | iact->name = "CPU0 Timer"; | ||
123 | } else { | ||
124 | reload_jiffies_counter1(MN10300_JC_PER_HZ - 1); | ||
125 | iact->name = "CPU1 Timer"; | ||
126 | } | ||
127 | |||
128 | setup_jiffies_interrupt(cd->irq, iact); | ||
129 | |||
130 | return 0; | ||
131 | } | ||
diff --git a/arch/mn10300/kernel/csrc-mn10300.c b/arch/mn10300/kernel/csrc-mn10300.c new file mode 100644 index 000000000000..ba2f0c4d6e01 --- /dev/null +++ b/arch/mn10300/kernel/csrc-mn10300.c | |||
@@ -0,0 +1,35 @@ | |||
1 | /* MN10300 clocksource | ||
2 | * | ||
3 | * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by Mark Salter (msalter@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <linux/clocksource.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <asm/timex.h> | ||
14 | #include "internal.h" | ||
15 | |||
16 | static cycle_t mn10300_read(struct clocksource *cs) | ||
17 | { | ||
18 | return read_timestamp_counter(); | ||
19 | } | ||
20 | |||
21 | static struct clocksource clocksource_mn10300 = { | ||
22 | .name = "TSC", | ||
23 | .rating = 200, | ||
24 | .read = mn10300_read, | ||
25 | .mask = CLOCKSOURCE_MASK(32), | ||
26 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
27 | }; | ||
28 | |||
29 | int __init init_clocksource(void) | ||
30 | { | ||
31 | startup_timestamp_counter(); | ||
32 | clocksource_set_clock(&clocksource_mn10300, MN10300_TSCCLK); | ||
33 | clocksource_register(&clocksource_mn10300); | ||
34 | return 0; | ||
35 | } | ||
diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S index 3d394b4eefba..f00b9bafcd3e 100644 --- a/arch/mn10300/kernel/entry.S +++ b/arch/mn10300/kernel/entry.S | |||
@@ -28,25 +28,17 @@ | |||
28 | #include <asm/asm-offsets.h> | 28 | #include <asm/asm-offsets.h> |
29 | #include <asm/frame.inc> | 29 | #include <asm/frame.inc> |
30 | 30 | ||
31 | #if defined(CONFIG_SMP) && defined(CONFIG_GDBSTUB) | ||
32 | #include <asm/gdb-stub.h> | ||
33 | #endif /* CONFIG_SMP && CONFIG_GDBSTUB */ | ||
34 | |||
31 | #ifdef CONFIG_PREEMPT | 35 | #ifdef CONFIG_PREEMPT |
32 | #define preempt_stop __cli | 36 | #define preempt_stop LOCAL_IRQ_DISABLE |
33 | #else | 37 | #else |
34 | #define preempt_stop | 38 | #define preempt_stop |
35 | #define resume_kernel restore_all | 39 | #define resume_kernel restore_all |
36 | #endif | 40 | #endif |
37 | 41 | ||
38 | .macro __cli | ||
39 | and ~EPSW_IM,epsw | ||
40 | or EPSW_IE|MN10300_CLI_LEVEL,epsw | ||
41 | nop | ||
42 | nop | ||
43 | nop | ||
44 | .endm | ||
45 | .macro __sti | ||
46 | or EPSW_IE|EPSW_IM_7,epsw | ||
47 | .endm | ||
48 | |||
49 | |||
50 | .am33_2 | 42 | .am33_2 |
51 | 43 | ||
52 | ############################################################################### | 44 | ############################################################################### |
@@ -88,7 +80,7 @@ syscall_call: | |||
88 | syscall_exit: | 80 | syscall_exit: |
89 | # make sure we don't miss an interrupt setting need_resched or | 81 | # make sure we don't miss an interrupt setting need_resched or |
90 | # sigpending between sampling and the rti | 82 | # sigpending between sampling and the rti |
91 | __cli | 83 | LOCAL_IRQ_DISABLE |
92 | mov (TI_flags,a2),d2 | 84 | mov (TI_flags,a2),d2 |
93 | btst _TIF_ALLWORK_MASK,d2 | 85 | btst _TIF_ALLWORK_MASK,d2 |
94 | bne syscall_exit_work | 86 | bne syscall_exit_work |
@@ -105,7 +97,7 @@ restore_all: | |||
105 | syscall_exit_work: | 97 | syscall_exit_work: |
106 | btst _TIF_SYSCALL_TRACE,d2 | 98 | btst _TIF_SYSCALL_TRACE,d2 |
107 | beq work_pending | 99 | beq work_pending |
108 | __sti # could let syscall_trace_exit() call | 100 | LOCAL_IRQ_ENABLE # could let syscall_trace_exit() call |
109 | # schedule() instead | 101 | # schedule() instead |
110 | mov fp,d0 | 102 | mov fp,d0 |
111 | call syscall_trace_exit[],0 # do_syscall_trace(regs) | 103 | call syscall_trace_exit[],0 # do_syscall_trace(regs) |
@@ -121,7 +113,7 @@ work_resched: | |||
121 | 113 | ||
122 | # make sure we don't miss an interrupt setting need_resched or | 114 | # make sure we don't miss an interrupt setting need_resched or |
123 | # sigpending between sampling and the rti | 115 | # sigpending between sampling and the rti |
124 | __cli | 116 | LOCAL_IRQ_DISABLE |
125 | 117 | ||
126 | # is there any work to be done other than syscall tracing? | 118 | # is there any work to be done other than syscall tracing? |
127 | mov (TI_flags,a2),d2 | 119 | mov (TI_flags,a2),d2 |
@@ -168,7 +160,7 @@ ret_from_intr: | |||
168 | ENTRY(resume_userspace) | 160 | ENTRY(resume_userspace) |
169 | # make sure we don't miss an interrupt setting need_resched or | 161 | # make sure we don't miss an interrupt setting need_resched or |
170 | # sigpending between sampling and the rti | 162 | # sigpending between sampling and the rti |
171 | __cli | 163 | LOCAL_IRQ_DISABLE |
172 | 164 | ||
173 | # is there any work to be done on int/exception return? | 165 | # is there any work to be done on int/exception return? |
174 | mov (TI_flags,a2),d2 | 166 | mov (TI_flags,a2),d2 |
@@ -178,7 +170,7 @@ ENTRY(resume_userspace) | |||
178 | 170 | ||
179 | #ifdef CONFIG_PREEMPT | 171 | #ifdef CONFIG_PREEMPT |
180 | ENTRY(resume_kernel) | 172 | ENTRY(resume_kernel) |
181 | __cli | 173 | LOCAL_IRQ_DISABLE |
182 | mov (TI_preempt_count,a2),d0 # non-zero preempt_count ? | 174 | mov (TI_preempt_count,a2),d0 # non-zero preempt_count ? |
183 | cmp 0,d0 | 175 | cmp 0,d0 |
184 | bne restore_all | 176 | bne restore_all |
@@ -216,31 +208,6 @@ ENTRY(irq_handler) | |||
216 | 208 | ||
217 | ############################################################################### | 209 | ############################################################################### |
218 | # | 210 | # |
219 | # Monitor Signal handler entry point | ||
220 | # | ||
221 | ############################################################################### | ||
222 | ENTRY(monitor_signal) | ||
223 | movbu (0xae000001),d1 | ||
224 | cmp 1,d1 | ||
225 | beq monsignal | ||
226 | ret [],0 | ||
227 | |||
228 | monsignal: | ||
229 | or EPSW_NMID,epsw | ||
230 | mov d0,a0 | ||
231 | mov a0,sp | ||
232 | mov (REG_EPSW,fp),d1 | ||
233 | and ~EPSW_nSL,d1 | ||
234 | mov d1,(REG_EPSW,fp) | ||
235 | movm (sp),[d2,d3,a2,a3,exreg0,exreg1,exother] | ||
236 | mov (sp),a1 | ||
237 | mov a1,usp | ||
238 | movm (sp),[other] | ||
239 | add 4,sp | ||
240 | here: jmp 0x8e000008-here+0x8e000008 | ||
241 | |||
242 | ############################################################################### | ||
243 | # | ||
244 | # Double Fault handler entry point | 211 | # Double Fault handler entry point |
245 | # - note that there will not be a stack, D0/A0 will hold EPSW/PC as were | 212 | # - note that there will not be a stack, D0/A0 will hold EPSW/PC as were |
246 | # | 213 | # |
@@ -276,6 +243,10 @@ double_fault_loop: | |||
276 | ENTRY(raw_bus_error) | 243 | ENTRY(raw_bus_error) |
277 | add -4,sp | 244 | add -4,sp |
278 | mov d0,(sp) | 245 | mov d0,(sp) |
246 | #if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR) | ||
247 | mov (MMUCTR),d0 | ||
248 | mov d0,(MMUCTR) | ||
249 | #endif | ||
279 | mov (BCBERR),d0 # what | 250 | mov (BCBERR),d0 # what |
280 | btst BCBERR_BEMR_DMA,d0 # see if it was an external bus error | 251 | btst BCBERR_BEMR_DMA,d0 # see if it was an external bus error |
281 | beq __common_exception_aux # it wasn't | 252 | beq __common_exception_aux # it wasn't |
@@ -302,11 +273,88 @@ ENTRY(nmi_handler) | |||
302 | add -4,sp | 273 | add -4,sp |
303 | mov d0,(sp) | 274 | mov d0,(sp) |
304 | mov (TBR),d0 | 275 | mov (TBR),d0 |
276 | |||
277 | #ifdef CONFIG_SMP | ||
278 | add -4,sp | ||
279 | mov d0,(sp) # save d0(TBR) | ||
280 | movhu (NMIAGR),d0 | ||
281 | and NMIAGR_GN,d0 | ||
282 | lsr 0x2,d0 | ||
283 | cmp CALL_FUNCTION_NMI_IPI,d0 | ||
284 | bne 5f # if not call function, jump | ||
285 | |||
286 | # function call nmi ipi | ||
287 | add 4,sp # no need to store TBR | ||
288 | mov GxICR_DETECT,d0 # clear NMI request | ||
289 | movbu d0,(GxICR(CALL_FUNCTION_NMI_IPI)) | ||
290 | movhu (GxICR(CALL_FUNCTION_NMI_IPI)),d0 | ||
291 | and ~EPSW_NMID,epsw # enable NMI | ||
292 | |||
293 | mov (sp),d0 # restore d0 | ||
294 | SAVE_ALL | ||
295 | call smp_nmi_call_function_interrupt[],0 | ||
296 | RESTORE_ALL | ||
297 | |||
298 | 5: | ||
299 | #ifdef CONFIG_GDBSTUB | ||
300 | cmp GDB_NMI_IPI,d0 | ||
301 | bne 3f # if not gdb nmi ipi, jump | ||
302 | |||
303 | # gdb nmi ipi | ||
304 | add 4,sp # no need to store TBR | ||
305 | mov GxICR_DETECT,d0 # clear NMI | ||
306 | movbu d0,(GxICR(GDB_NMI_IPI)) | ||
307 | movhu (GxICR(GDB_NMI_IPI)),d0 | ||
308 | and ~EPSW_NMID,epsw # enable NMI | ||
309 | #ifdef CONFIG_MN10300_CACHE_ENABLED | ||
310 | mov (gdbstub_nmi_opr_type),d0 | ||
311 | cmp GDBSTUB_NMI_CACHE_PURGE,d0 | ||
312 | bne 4f # if not gdb cache purge, jump | ||
313 | |||
314 | # gdb cache purge nmi ipi | ||
315 | add -20,sp | ||
316 | mov d1,(4,sp) | ||
317 | mov a0,(8,sp) | ||
318 | mov a1,(12,sp) | ||
319 | mov mdr,d0 | ||
320 | mov d0,(16,sp) | ||
321 | call gdbstub_local_purge_cache[],0 | ||
322 | mov 0x1,d0 | ||
323 | mov (CPUID),d1 | ||
324 | asl d1,d0 | ||
325 | mov gdbstub_nmi_cpumask,a0 | ||
326 | bclr d0,(a0) | ||
327 | mov (4,sp),d1 | ||
328 | mov (8,sp),a0 | ||
329 | mov (12,sp),a1 | ||
330 | mov (16,sp),d0 | ||
331 | mov d0,mdr | ||
332 | add 20,sp | ||
333 | mov (sp),d0 | ||
334 | add 4,sp | ||
335 | rti | ||
336 | 4: | ||
337 | #endif /* CONFIG_MN10300_CACHE_ENABLED */ | ||
338 | # gdb wait nmi ipi | ||
339 | mov (sp),d0 | ||
340 | SAVE_ALL | ||
341 | call gdbstub_nmi_wait[],0 | ||
342 | RESTORE_ALL | ||
343 | 3: | ||
344 | #endif /* CONFIG_GDBSTUB */ | ||
345 | mov (sp),d0 # restore TBR to d0 | ||
346 | add 4,sp | ||
347 | #endif /* CONFIG_SMP */ | ||
348 | |||
305 | bra __common_exception_nonmi | 349 | bra __common_exception_nonmi |
306 | 350 | ||
307 | ENTRY(__common_exception) | 351 | ENTRY(__common_exception) |
308 | add -4,sp | 352 | add -4,sp |
309 | mov d0,(sp) | 353 | mov d0,(sp) |
354 | #if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR) | ||
355 | mov (MMUCTR),d0 | ||
356 | mov d0,(MMUCTR) | ||
357 | #endif | ||
310 | 358 | ||
311 | __common_exception_aux: | 359 | __common_exception_aux: |
312 | mov (TBR),d0 | 360 | mov (TBR),d0 |
@@ -331,15 +379,21 @@ __common_exception_nonmi: | |||
331 | mov d0,(REG_ORIG_D0,fp) | 379 | mov d0,(REG_ORIG_D0,fp) |
332 | 380 | ||
333 | #ifdef CONFIG_GDBSTUB | 381 | #ifdef CONFIG_GDBSTUB |
382 | #ifdef CONFIG_SMP | ||
383 | call gdbstub_busy_check[],0 | ||
384 | and d0,d0 # check return value | ||
385 | beq 2f | ||
386 | #else /* CONFIG_SMP */ | ||
334 | btst 0x01,(gdbstub_busy) | 387 | btst 0x01,(gdbstub_busy) |
335 | beq 2f | 388 | beq 2f |
389 | #endif /* CONFIG_SMP */ | ||
336 | and ~EPSW_IE,epsw | 390 | and ~EPSW_IE,epsw |
337 | mov fp,d0 | 391 | mov fp,d0 |
338 | mov a2,d1 | 392 | mov a2,d1 |
339 | call gdbstub_exception[],0 # gdbstub itself caused an exception | 393 | call gdbstub_exception[],0 # gdbstub itself caused an exception |
340 | bra restore_all | 394 | bra restore_all |
341 | 2: | 395 | 2: |
342 | #endif | 396 | #endif /* CONFIG_GDBSTUB */ |
343 | 397 | ||
344 | mov fp,d0 # arg 0: stacked register file | 398 | mov fp,d0 # arg 0: stacked register file |
345 | mov a2,d1 # arg 1: exception number | 399 | mov a2,d1 # arg 1: exception number |
@@ -374,11 +428,7 @@ ENTRY(set_excp_vector) | |||
374 | add exception_table,d0 | 428 | add exception_table,d0 |
375 | mov d1,(d0) | 429 | mov d1,(d0) |
376 | mov 4,d1 | 430 | mov 4,d1 |
377 | #if defined(CONFIG_MN10300_CACHE_WBACK) | ||
378 | jmp mn10300_dcache_flush_inv_range2 | ||
379 | #else | ||
380 | ret [],0 | 431 | ret [],0 |
381 | #endif | ||
382 | 432 | ||
383 | ############################################################################### | 433 | ############################################################################### |
384 | # | 434 | # |
diff --git a/arch/mn10300/kernel/fpu-low.S b/arch/mn10300/kernel/fpu-low.S index 96cfd47e68d5..78df25cfae29 100644 --- a/arch/mn10300/kernel/fpu-low.S +++ b/arch/mn10300/kernel/fpu-low.S | |||
@@ -8,25 +8,14 @@ | |||
8 | * as published by the Free Software Foundation; either version | 8 | * as published by the Free Software Foundation; either version |
9 | * 2 of the Licence, or (at your option) any later version. | 9 | * 2 of the Licence, or (at your option) any later version. |
10 | */ | 10 | */ |
11 | #include <linux/linkage.h> | ||
11 | #include <asm/cpu-regs.h> | 12 | #include <asm/cpu-regs.h> |
13 | #include <asm/smp.h> | ||
14 | #include <asm/thread_info.h> | ||
15 | #include <asm/asm-offsets.h> | ||
16 | #include <asm/frame.inc> | ||
12 | 17 | ||
13 | ############################################################################### | 18 | .macro FPU_INIT_STATE_ALL |
14 | # | ||
15 | # void fpu_init_state(void) | ||
16 | # - initialise the FPU | ||
17 | # | ||
18 | ############################################################################### | ||
19 | .globl fpu_init_state | ||
20 | .type fpu_init_state,@function | ||
21 | fpu_init_state: | ||
22 | mov epsw,d0 | ||
23 | or EPSW_FE,epsw | ||
24 | |||
25 | #ifdef CONFIG_MN10300_PROC_MN103E010 | ||
26 | nop | ||
27 | nop | ||
28 | nop | ||
29 | #endif | ||
30 | fmov 0,fs0 | 19 | fmov 0,fs0 |
31 | fmov fs0,fs1 | 20 | fmov fs0,fs1 |
32 | fmov fs0,fs2 | 21 | fmov fs0,fs2 |
@@ -60,7 +49,100 @@ fpu_init_state: | |||
60 | fmov fs0,fs30 | 49 | fmov fs0,fs30 |
61 | fmov fs0,fs31 | 50 | fmov fs0,fs31 |
62 | fmov FPCR_INIT,fpcr | 51 | fmov FPCR_INIT,fpcr |
52 | .endm | ||
53 | |||
54 | .macro FPU_SAVE_ALL areg,dreg | ||
55 | fmov fs0,(\areg+) | ||
56 | fmov fs1,(\areg+) | ||
57 | fmov fs2,(\areg+) | ||
58 | fmov fs3,(\areg+) | ||
59 | fmov fs4,(\areg+) | ||
60 | fmov fs5,(\areg+) | ||
61 | fmov fs6,(\areg+) | ||
62 | fmov fs7,(\areg+) | ||
63 | fmov fs8,(\areg+) | ||
64 | fmov fs9,(\areg+) | ||
65 | fmov fs10,(\areg+) | ||
66 | fmov fs11,(\areg+) | ||
67 | fmov fs12,(\areg+) | ||
68 | fmov fs13,(\areg+) | ||
69 | fmov fs14,(\areg+) | ||
70 | fmov fs15,(\areg+) | ||
71 | fmov fs16,(\areg+) | ||
72 | fmov fs17,(\areg+) | ||
73 | fmov fs18,(\areg+) | ||
74 | fmov fs19,(\areg+) | ||
75 | fmov fs20,(\areg+) | ||
76 | fmov fs21,(\areg+) | ||
77 | fmov fs22,(\areg+) | ||
78 | fmov fs23,(\areg+) | ||
79 | fmov fs24,(\areg+) | ||
80 | fmov fs25,(\areg+) | ||
81 | fmov fs26,(\areg+) | ||
82 | fmov fs27,(\areg+) | ||
83 | fmov fs28,(\areg+) | ||
84 | fmov fs29,(\areg+) | ||
85 | fmov fs30,(\areg+) | ||
86 | fmov fs31,(\areg+) | ||
87 | fmov fpcr,\dreg | ||
88 | mov \dreg,(\areg) | ||
89 | .endm | ||
90 | |||
91 | .macro FPU_RESTORE_ALL areg,dreg | ||
92 | fmov (\areg+),fs0 | ||
93 | fmov (\areg+),fs1 | ||
94 | fmov (\areg+),fs2 | ||
95 | fmov (\areg+),fs3 | ||
96 | fmov (\areg+),fs4 | ||
97 | fmov (\areg+),fs5 | ||
98 | fmov (\areg+),fs6 | ||
99 | fmov (\areg+),fs7 | ||
100 | fmov (\areg+),fs8 | ||
101 | fmov (\areg+),fs9 | ||
102 | fmov (\areg+),fs10 | ||
103 | fmov (\areg+),fs11 | ||
104 | fmov (\areg+),fs12 | ||
105 | fmov (\areg+),fs13 | ||
106 | fmov (\areg+),fs14 | ||
107 | fmov (\areg+),fs15 | ||
108 | fmov (\areg+),fs16 | ||
109 | fmov (\areg+),fs17 | ||
110 | fmov (\areg+),fs18 | ||
111 | fmov (\areg+),fs19 | ||
112 | fmov (\areg+),fs20 | ||
113 | fmov (\areg+),fs21 | ||
114 | fmov (\areg+),fs22 | ||
115 | fmov (\areg+),fs23 | ||
116 | fmov (\areg+),fs24 | ||
117 | fmov (\areg+),fs25 | ||
118 | fmov (\areg+),fs26 | ||
119 | fmov (\areg+),fs27 | ||
120 | fmov (\areg+),fs28 | ||
121 | fmov (\areg+),fs29 | ||
122 | fmov (\areg+),fs30 | ||
123 | fmov (\areg+),fs31 | ||
124 | mov (\areg),\dreg | ||
125 | fmov \dreg,fpcr | ||
126 | .endm | ||
63 | 127 | ||
128 | ############################################################################### | ||
129 | # | ||
130 | # void fpu_init_state(void) | ||
131 | # - initialise the FPU | ||
132 | # | ||
133 | ############################################################################### | ||
134 | .globl fpu_init_state | ||
135 | .type fpu_init_state,@function | ||
136 | fpu_init_state: | ||
137 | mov epsw,d0 | ||
138 | or EPSW_FE,epsw | ||
139 | |||
140 | #ifdef CONFIG_MN10300_PROC_MN103E010 | ||
141 | nop | ||
142 | nop | ||
143 | nop | ||
144 | #endif | ||
145 | FPU_INIT_STATE_ALL | ||
64 | #ifdef CONFIG_MN10300_PROC_MN103E010 | 146 | #ifdef CONFIG_MN10300_PROC_MN103E010 |
65 | nop | 147 | nop |
66 | nop | 148 | nop |
@@ -89,40 +171,7 @@ fpu_save: | |||
89 | nop | 171 | nop |
90 | #endif | 172 | #endif |
91 | mov d0,a0 | 173 | mov d0,a0 |
92 | fmov fs0,(a0+) | 174 | FPU_SAVE_ALL a0,d0 |
93 | fmov fs1,(a0+) | ||
94 | fmov fs2,(a0+) | ||
95 | fmov fs3,(a0+) | ||
96 | fmov fs4,(a0+) | ||
97 | fmov fs5,(a0+) | ||
98 | fmov fs6,(a0+) | ||
99 | fmov fs7,(a0+) | ||
100 | fmov fs8,(a0+) | ||
101 | fmov fs9,(a0+) | ||
102 | fmov fs10,(a0+) | ||
103 | fmov fs11,(a0+) | ||
104 | fmov fs12,(a0+) | ||
105 | fmov fs13,(a0+) | ||
106 | fmov fs14,(a0+) | ||
107 | fmov fs15,(a0+) | ||
108 | fmov fs16,(a0+) | ||
109 | fmov fs17,(a0+) | ||
110 | fmov fs18,(a0+) | ||
111 | fmov fs19,(a0+) | ||
112 | fmov fs20,(a0+) | ||
113 | fmov fs21,(a0+) | ||
114 | fmov fs22,(a0+) | ||
115 | fmov fs23,(a0+) | ||
116 | fmov fs24,(a0+) | ||
117 | fmov fs25,(a0+) | ||
118 | fmov fs26,(a0+) | ||
119 | fmov fs27,(a0+) | ||
120 | fmov fs28,(a0+) | ||
121 | fmov fs29,(a0+) | ||
122 | fmov fs30,(a0+) | ||
123 | fmov fs31,(a0+) | ||
124 | fmov fpcr,d0 | ||
125 | mov d0,(a0) | ||
126 | #ifdef CONFIG_MN10300_PROC_MN103E010 | 175 | #ifdef CONFIG_MN10300_PROC_MN103E010 |
127 | nop | 176 | nop |
128 | nop | 177 | nop |
@@ -135,63 +184,75 @@ fpu_save: | |||
135 | 184 | ||
136 | ############################################################################### | 185 | ############################################################################### |
137 | # | 186 | # |
138 | # void fpu_restore(struct fpu_state_struct *) | 187 | # void fpu_disabled(void) |
139 | # - restore the fpu state | 188 | # - handle an exception due to the FPU being disabled |
140 | # - note that an FPU Operational exception might occur during this process | 189 | # when CONFIG_FPU is enabled |
141 | # | 190 | # |
142 | ############################################################################### | 191 | ############################################################################### |
143 | .globl fpu_restore | 192 | .type fpu_disabled,@function |
144 | .type fpu_restore,@function | 193 | .globl fpu_disabled |
145 | fpu_restore: | 194 | fpu_disabled: |
146 | mov epsw,d1 | 195 | or EPSW_nAR|EPSW_FE,epsw |
147 | or EPSW_FE,epsw /* enable the FPU so we can access it */ | ||
148 | |||
149 | #ifdef CONFIG_MN10300_PROC_MN103E010 | ||
150 | nop | 196 | nop |
151 | nop | 197 | nop |
152 | #endif | ||
153 | mov d0,a0 | ||
154 | fmov (a0+),fs0 | ||
155 | fmov (a0+),fs1 | ||
156 | fmov (a0+),fs2 | ||
157 | fmov (a0+),fs3 | ||
158 | fmov (a0+),fs4 | ||
159 | fmov (a0+),fs5 | ||
160 | fmov (a0+),fs6 | ||
161 | fmov (a0+),fs7 | ||
162 | fmov (a0+),fs8 | ||
163 | fmov (a0+),fs9 | ||
164 | fmov (a0+),fs10 | ||
165 | fmov (a0+),fs11 | ||
166 | fmov (a0+),fs12 | ||
167 | fmov (a0+),fs13 | ||
168 | fmov (a0+),fs14 | ||
169 | fmov (a0+),fs15 | ||
170 | fmov (a0+),fs16 | ||
171 | fmov (a0+),fs17 | ||
172 | fmov (a0+),fs18 | ||
173 | fmov (a0+),fs19 | ||
174 | fmov (a0+),fs20 | ||
175 | fmov (a0+),fs21 | ||
176 | fmov (a0+),fs22 | ||
177 | fmov (a0+),fs23 | ||
178 | fmov (a0+),fs24 | ||
179 | fmov (a0+),fs25 | ||
180 | fmov (a0+),fs26 | ||
181 | fmov (a0+),fs27 | ||
182 | fmov (a0+),fs28 | ||
183 | fmov (a0+),fs29 | ||
184 | fmov (a0+),fs30 | ||
185 | fmov (a0+),fs31 | ||
186 | mov (a0),d0 | ||
187 | fmov d0,fpcr | ||
188 | #ifdef CONFIG_MN10300_PROC_MN103E010 | ||
189 | nop | 198 | nop |
199 | |||
200 | mov sp,a1 | ||
201 | mov (a1),d1 /* get epsw of user context */ | ||
202 | and ~(THREAD_SIZE-1),a1 /* a1: (thread_info *ti) */ | ||
203 | mov (TI_task,a1),a2 /* a2: (task_struct *tsk) */ | ||
204 | btst EPSW_nSL,d1 | ||
205 | beq fpu_used_in_kernel | ||
206 | |||
207 | or EPSW_FE,d1 | ||
208 | mov d1,(sp) | ||
209 | mov (TASK_THREAD+THREAD_FPU_FLAGS,a2),d1 | ||
210 | #ifndef CONFIG_LAZY_SAVE_FPU | ||
211 | or __THREAD_HAS_FPU,d1 | ||
212 | mov d1,(TASK_THREAD+THREAD_FPU_FLAGS,a2) | ||
213 | #else /* !CONFIG_LAZY_SAVE_FPU */ | ||
214 | mov (fpu_state_owner),a0 | ||
215 | cmp 0,a0 | ||
216 | beq fpu_regs_save_end | ||
217 | |||
218 | mov (TASK_THREAD+THREAD_UREGS,a0),a1 | ||
219 | add TASK_THREAD+THREAD_FPU_STATE,a0 | ||
220 | FPU_SAVE_ALL a0,d0 | ||
221 | |||
222 | mov (REG_EPSW,a1),d0 | ||
223 | and ~EPSW_FE,d0 | ||
224 | mov d0,(REG_EPSW,a1) | ||
225 | |||
226 | fpu_regs_save_end: | ||
227 | mov a2,(fpu_state_owner) | ||
228 | #endif /* !CONFIG_LAZY_SAVE_FPU */ | ||
229 | |||
230 | btst __THREAD_USING_FPU,d1 | ||
231 | beq fpu_regs_init | ||
232 | add TASK_THREAD+THREAD_FPU_STATE,a2 | ||
233 | FPU_RESTORE_ALL a2,d0 | ||
234 | rti | ||
235 | |||
236 | fpu_regs_init: | ||
237 | FPU_INIT_STATE_ALL | ||
238 | add TASK_THREAD+THREAD_FPU_FLAGS,a2 | ||
239 | bset __THREAD_USING_FPU,(0,a2) | ||
240 | rti | ||
241 | |||
242 | fpu_used_in_kernel: | ||
243 | and ~(EPSW_nAR|EPSW_FE),epsw | ||
190 | nop | 244 | nop |
191 | nop | 245 | nop |
192 | #endif | ||
193 | 246 | ||
194 | mov d1,epsw | 247 | add -4,sp |
195 | ret [],0 | 248 | SAVE_ALL |
249 | mov -1,d0 | ||
250 | mov d0,(REG_ORIG_D0,fp) | ||
251 | |||
252 | and ~EPSW_NMID,epsw | ||
253 | |||
254 | mov fp,d0 | ||
255 | call fpu_disabled_in_kernel[],0 | ||
256 | jmp ret_from_exception | ||
196 | 257 | ||
197 | .size fpu_restore,.-fpu_restore | 258 | .size fpu_disabled,.-fpu_disabled |
diff --git a/arch/mn10300/kernel/fpu-nofpu-low.S b/arch/mn10300/kernel/fpu-nofpu-low.S new file mode 100644 index 000000000000..7ea087a549f4 --- /dev/null +++ b/arch/mn10300/kernel/fpu-nofpu-low.S | |||
@@ -0,0 +1,39 @@ | |||
1 | /* MN10300 Low level FPU management operations | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <linux/linkage.h> | ||
12 | #include <asm/cpu-regs.h> | ||
13 | #include <asm/smp.h> | ||
14 | #include <asm/thread_info.h> | ||
15 | #include <asm/asm-offsets.h> | ||
16 | #include <asm/frame.inc> | ||
17 | |||
18 | ############################################################################### | ||
19 | # | ||
20 | # void fpu_disabled(void) | ||
21 | # - handle an exception due to the FPU being disabled | ||
22 | # when CONFIG_FPU is disabled | ||
23 | # | ||
24 | ############################################################################### | ||
25 | .type fpu_disabled,@function | ||
26 | .globl fpu_disabled | ||
27 | fpu_disabled: | ||
28 | add -4,sp | ||
29 | SAVE_ALL | ||
30 | mov -1,d0 | ||
31 | mov d0,(REG_ORIG_D0,fp) | ||
32 | |||
33 | and ~EPSW_NMID,epsw | ||
34 | |||
35 | mov fp,d0 | ||
36 | call unexpected_fpu_exception[],0 | ||
37 | jmp ret_from_exception | ||
38 | |||
39 | .size fpu_disabled,.-fpu_disabled | ||
diff --git a/arch/mn10300/kernel/fpu-nofpu.c b/arch/mn10300/kernel/fpu-nofpu.c new file mode 100644 index 000000000000..31c765b92c5d --- /dev/null +++ b/arch/mn10300/kernel/fpu-nofpu.c | |||
@@ -0,0 +1,30 @@ | |||
1 | /* MN10300 FPU management | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <asm/fpu.h> | ||
12 | |||
13 | /* | ||
14 | * handle an FPU operational exception | ||
15 | * - there's a possibility that if the FPU is asynchronous, the signal might | ||
16 | * be meant for a process other than the current one | ||
17 | */ | ||
18 | asmlinkage | ||
19 | void unexpected_fpu_exception(struct pt_regs *regs, enum exception_code code) | ||
20 | { | ||
21 | panic("An FPU exception was received, but there's no FPU enabled."); | ||
22 | } | ||
23 | |||
24 | /* | ||
25 | * fill in the FPU structure for a core dump | ||
26 | */ | ||
27 | int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpreg) | ||
28 | { | ||
29 | return 0; /* not valid */ | ||
30 | } | ||
diff --git a/arch/mn10300/kernel/fpu.c b/arch/mn10300/kernel/fpu.c index e705f25ad5ff..5f9c3fa19a85 100644 --- a/arch/mn10300/kernel/fpu.c +++ b/arch/mn10300/kernel/fpu.c | |||
@@ -12,56 +12,19 @@ | |||
12 | #include <asm/fpu.h> | 12 | #include <asm/fpu.h> |
13 | #include <asm/elf.h> | 13 | #include <asm/elf.h> |
14 | #include <asm/exceptions.h> | 14 | #include <asm/exceptions.h> |
15 | #include <asm/system.h> | ||
15 | 16 | ||
17 | #ifdef CONFIG_LAZY_SAVE_FPU | ||
16 | struct task_struct *fpu_state_owner; | 18 | struct task_struct *fpu_state_owner; |
19 | #endif | ||
17 | 20 | ||
18 | /* | 21 | /* |
19 | * handle an exception due to the FPU being disabled | 22 | * error functions in FPU disabled exception |
20 | */ | 23 | */ |
21 | asmlinkage void fpu_disabled(struct pt_regs *regs, enum exception_code code) | 24 | asmlinkage void fpu_disabled_in_kernel(struct pt_regs *regs) |
22 | { | 25 | { |
23 | struct task_struct *tsk = current; | 26 | die_if_no_fixup("An FPU Disabled exception happened in kernel space\n", |
24 | 27 | regs, EXCEP_FPU_DISABLED); | |
25 | if (!user_mode(regs)) | ||
26 | die_if_no_fixup("An FPU Disabled exception happened in" | ||
27 | " kernel space\n", | ||
28 | regs, code); | ||
29 | |||
30 | #ifdef CONFIG_FPU | ||
31 | preempt_disable(); | ||
32 | |||
33 | /* transfer the last process's FPU state to memory */ | ||
34 | if (fpu_state_owner) { | ||
35 | fpu_save(&fpu_state_owner->thread.fpu_state); | ||
36 | fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE; | ||
37 | } | ||
38 | |||
39 | /* the current process now owns the FPU state */ | ||
40 | fpu_state_owner = tsk; | ||
41 | regs->epsw |= EPSW_FE; | ||
42 | |||
43 | /* load the FPU with the current process's FPU state or invent a new | ||
44 | * clean one if the process doesn't have one */ | ||
45 | if (is_using_fpu(tsk)) { | ||
46 | fpu_restore(&tsk->thread.fpu_state); | ||
47 | } else { | ||
48 | fpu_init_state(); | ||
49 | set_using_fpu(tsk); | ||
50 | } | ||
51 | |||
52 | preempt_enable(); | ||
53 | #else | ||
54 | { | ||
55 | siginfo_t info; | ||
56 | |||
57 | info.si_signo = SIGFPE; | ||
58 | info.si_errno = 0; | ||
59 | info.si_addr = (void *) tsk->thread.uregs->pc; | ||
60 | info.si_code = FPE_FLTINV; | ||
61 | |||
62 | force_sig_info(SIGFPE, &info, tsk); | ||
63 | } | ||
64 | #endif /* CONFIG_FPU */ | ||
65 | } | 28 | } |
66 | 29 | ||
67 | /* | 30 | /* |
@@ -71,15 +34,16 @@ asmlinkage void fpu_disabled(struct pt_regs *regs, enum exception_code code) | |||
71 | */ | 34 | */ |
72 | asmlinkage void fpu_exception(struct pt_regs *regs, enum exception_code code) | 35 | asmlinkage void fpu_exception(struct pt_regs *regs, enum exception_code code) |
73 | { | 36 | { |
74 | struct task_struct *tsk = fpu_state_owner; | 37 | struct task_struct *tsk = current; |
75 | siginfo_t info; | 38 | siginfo_t info; |
39 | u32 fpcr; | ||
76 | 40 | ||
77 | if (!user_mode(regs)) | 41 | if (!user_mode(regs)) |
78 | die_if_no_fixup("An FPU Operation exception happened in" | 42 | die_if_no_fixup("An FPU Operation exception happened in" |
79 | " kernel space\n", | 43 | " kernel space\n", |
80 | regs, code); | 44 | regs, code); |
81 | 45 | ||
82 | if (!tsk) | 46 | if (!is_using_fpu(tsk)) |
83 | die_if_no_fixup("An FPU Operation exception happened," | 47 | die_if_no_fixup("An FPU Operation exception happened," |
84 | " but the FPU is not in use", | 48 | " but the FPU is not in use", |
85 | regs, code); | 49 | regs, code); |
@@ -89,48 +53,45 @@ asmlinkage void fpu_exception(struct pt_regs *regs, enum exception_code code) | |||
89 | info.si_addr = (void *) tsk->thread.uregs->pc; | 53 | info.si_addr = (void *) tsk->thread.uregs->pc; |
90 | info.si_code = FPE_FLTINV; | 54 | info.si_code = FPE_FLTINV; |
91 | 55 | ||
92 | #ifdef CONFIG_FPU | 56 | unlazy_fpu(tsk); |
93 | { | ||
94 | u32 fpcr; | ||
95 | 57 | ||
96 | /* get FPCR (we need to enable the FPU whilst we do this) */ | 58 | fpcr = tsk->thread.fpu_state.fpcr; |
97 | asm volatile(" or %1,epsw \n" | 59 | |
98 | #ifdef CONFIG_MN10300_PROC_MN103E010 | 60 | if (fpcr & FPCR_EC_Z) |
99 | " nop \n" | 61 | info.si_code = FPE_FLTDIV; |
100 | " nop \n" | 62 | else if (fpcr & FPCR_EC_O) |
101 | " nop \n" | 63 | info.si_code = FPE_FLTOVF; |
102 | #endif | 64 | else if (fpcr & FPCR_EC_U) |
103 | " fmov fpcr,%0 \n" | 65 | info.si_code = FPE_FLTUND; |
104 | #ifdef CONFIG_MN10300_PROC_MN103E010 | 66 | else if (fpcr & FPCR_EC_I) |
105 | " nop \n" | 67 | info.si_code = FPE_FLTRES; |
106 | " nop \n" | ||
107 | " nop \n" | ||
108 | #endif | ||
109 | " and %2,epsw \n" | ||
110 | : "=&d"(fpcr) | ||
111 | : "i"(EPSW_FE), "i"(~EPSW_FE) | ||
112 | ); | ||
113 | |||
114 | if (fpcr & FPCR_EC_Z) | ||
115 | info.si_code = FPE_FLTDIV; | ||
116 | else if (fpcr & FPCR_EC_O) | ||
117 | info.si_code = FPE_FLTOVF; | ||
118 | else if (fpcr & FPCR_EC_U) | ||
119 | info.si_code = FPE_FLTUND; | ||
120 | else if (fpcr & FPCR_EC_I) | ||
121 | info.si_code = FPE_FLTRES; | ||
122 | } | ||
123 | #endif | ||
124 | 68 | ||
125 | force_sig_info(SIGFPE, &info, tsk); | 69 | force_sig_info(SIGFPE, &info, tsk); |
126 | } | 70 | } |
127 | 71 | ||
128 | /* | 72 | /* |
73 | * handle an FPU invalid_op exception | ||
74 | * - Derived from DO_EINFO() macro in arch/mn10300/kernel/traps.c | ||
75 | */ | ||
76 | asmlinkage void fpu_invalid_op(struct pt_regs *regs, enum exception_code code) | ||
77 | { | ||
78 | siginfo_t info; | ||
79 | |||
80 | if (!user_mode(regs)) | ||
81 | die_if_no_fixup("FPU invalid opcode", regs, code); | ||
82 | |||
83 | info.si_signo = SIGILL; | ||
84 | info.si_errno = 0; | ||
85 | info.si_code = ILL_COPROC; | ||
86 | info.si_addr = (void *) regs->pc; | ||
87 | force_sig_info(info.si_signo, &info, current); | ||
88 | } | ||
89 | |||
90 | /* | ||
129 | * save the FPU state to a signal context | 91 | * save the FPU state to a signal context |
130 | */ | 92 | */ |
131 | int fpu_setup_sigcontext(struct fpucontext *fpucontext) | 93 | int fpu_setup_sigcontext(struct fpucontext *fpucontext) |
132 | { | 94 | { |
133 | #ifdef CONFIG_FPU | ||
134 | struct task_struct *tsk = current; | 95 | struct task_struct *tsk = current; |
135 | 96 | ||
136 | if (!is_using_fpu(tsk)) | 97 | if (!is_using_fpu(tsk)) |
@@ -142,11 +103,19 @@ int fpu_setup_sigcontext(struct fpucontext *fpucontext) | |||
142 | */ | 103 | */ |
143 | preempt_disable(); | 104 | preempt_disable(); |
144 | 105 | ||
106 | #ifndef CONFIG_LAZY_SAVE_FPU | ||
107 | if (tsk->thread.fpu_flags & THREAD_HAS_FPU) { | ||
108 | fpu_save(&tsk->thread.fpu_state); | ||
109 | tsk->thread.uregs->epsw &= ~EPSW_FE; | ||
110 | tsk->thread.fpu_flags &= ~THREAD_HAS_FPU; | ||
111 | } | ||
112 | #else /* !CONFIG_LAZY_SAVE_FPU */ | ||
145 | if (fpu_state_owner == tsk) { | 113 | if (fpu_state_owner == tsk) { |
146 | fpu_save(&tsk->thread.fpu_state); | 114 | fpu_save(&tsk->thread.fpu_state); |
147 | fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE; | 115 | fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE; |
148 | fpu_state_owner = NULL; | 116 | fpu_state_owner = NULL; |
149 | } | 117 | } |
118 | #endif /* !CONFIG_LAZY_SAVE_FPU */ | ||
150 | 119 | ||
151 | preempt_enable(); | 120 | preempt_enable(); |
152 | 121 | ||
@@ -161,9 +130,6 @@ int fpu_setup_sigcontext(struct fpucontext *fpucontext) | |||
161 | return -1; | 130 | return -1; |
162 | 131 | ||
163 | return 1; | 132 | return 1; |
164 | #else | ||
165 | return 0; | ||
166 | #endif | ||
167 | } | 133 | } |
168 | 134 | ||
169 | /* | 135 | /* |
@@ -171,17 +137,23 @@ int fpu_setup_sigcontext(struct fpucontext *fpucontext) | |||
171 | */ | 137 | */ |
172 | void fpu_kill_state(struct task_struct *tsk) | 138 | void fpu_kill_state(struct task_struct *tsk) |
173 | { | 139 | { |
174 | #ifdef CONFIG_FPU | ||
175 | /* disown anything left in the FPU */ | 140 | /* disown anything left in the FPU */ |
176 | preempt_disable(); | 141 | preempt_disable(); |
177 | 142 | ||
143 | #ifndef CONFIG_LAZY_SAVE_FPU | ||
144 | if (tsk->thread.fpu_flags & THREAD_HAS_FPU) { | ||
145 | tsk->thread.uregs->epsw &= ~EPSW_FE; | ||
146 | tsk->thread.fpu_flags &= ~THREAD_HAS_FPU; | ||
147 | } | ||
148 | #else /* !CONFIG_LAZY_SAVE_FPU */ | ||
178 | if (fpu_state_owner == tsk) { | 149 | if (fpu_state_owner == tsk) { |
179 | fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE; | 150 | fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE; |
180 | fpu_state_owner = NULL; | 151 | fpu_state_owner = NULL; |
181 | } | 152 | } |
153 | #endif /* !CONFIG_LAZY_SAVE_FPU */ | ||
182 | 154 | ||
183 | preempt_enable(); | 155 | preempt_enable(); |
184 | #endif | 156 | |
185 | /* we no longer have a valid current FPU state */ | 157 | /* we no longer have a valid current FPU state */ |
186 | clear_using_fpu(tsk); | 158 | clear_using_fpu(tsk); |
187 | } | 159 | } |
@@ -195,8 +167,7 @@ int fpu_restore_sigcontext(struct fpucontext *fpucontext) | |||
195 | int ret; | 167 | int ret; |
196 | 168 | ||
197 | /* load up the old FPU state */ | 169 | /* load up the old FPU state */ |
198 | ret = copy_from_user(&tsk->thread.fpu_state, | 170 | ret = copy_from_user(&tsk->thread.fpu_state, fpucontext, |
199 | fpucontext, | ||
200 | min(sizeof(struct fpu_state_struct), | 171 | min(sizeof(struct fpu_state_struct), |
201 | sizeof(struct fpucontext))); | 172 | sizeof(struct fpucontext))); |
202 | if (!ret) | 173 | if (!ret) |
diff --git a/arch/mn10300/kernel/gdb-io-serial-low.S b/arch/mn10300/kernel/gdb-io-serial-low.S index 4998b24f5d3a..b1d0152e96cb 100644 --- a/arch/mn10300/kernel/gdb-io-serial-low.S +++ b/arch/mn10300/kernel/gdb-io-serial-low.S | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <asm/thread_info.h> | 18 | #include <asm/thread_info.h> |
19 | #include <asm/frame.inc> | 19 | #include <asm/frame.inc> |
20 | #include <asm/intctl-regs.h> | 20 | #include <asm/intctl-regs.h> |
21 | #include <asm/irqflags.h> | ||
21 | #include <unit/serial.h> | 22 | #include <unit/serial.h> |
22 | 23 | ||
23 | .text | 24 | .text |
@@ -69,7 +70,7 @@ gdbstub_io_rx_overflow: | |||
69 | bra gdbstub_io_rx_done | 70 | bra gdbstub_io_rx_done |
70 | 71 | ||
71 | gdbstub_io_rx_enter: | 72 | gdbstub_io_rx_enter: |
72 | or EPSW_IE|EPSW_IM_1,epsw | 73 | LOCAL_CHANGE_INTR_MASK_LEVEL(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL+1)) |
73 | add -4,sp | 74 | add -4,sp |
74 | SAVE_ALL | 75 | SAVE_ALL |
75 | 76 | ||
@@ -80,7 +81,7 @@ gdbstub_io_rx_enter: | |||
80 | mov fp,d0 | 81 | mov fp,d0 |
81 | call gdbstub_rx_irq[],0 # gdbstub_rx_irq(regs,excep) | 82 | call gdbstub_rx_irq[],0 # gdbstub_rx_irq(regs,excep) |
82 | 83 | ||
83 | and ~EPSW_IE,epsw | 84 | LOCAL_CLI |
84 | bclr 0x01,(gdbstub_busy) | 85 | bclr 0x01,(gdbstub_busy) |
85 | 86 | ||
86 | .globl gdbstub_return | 87 | .globl gdbstub_return |
diff --git a/arch/mn10300/kernel/gdb-io-serial.c b/arch/mn10300/kernel/gdb-io-serial.c index ae663dc717e9..0d5d63c91dc3 100644 --- a/arch/mn10300/kernel/gdb-io-serial.c +++ b/arch/mn10300/kernel/gdb-io-serial.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <asm/exceptions.h> | 23 | #include <asm/exceptions.h> |
24 | #include <asm/serial-regs.h> | 24 | #include <asm/serial-regs.h> |
25 | #include <unit/serial.h> | 25 | #include <unit/serial.h> |
26 | #include <asm/smp.h> | ||
26 | 27 | ||
27 | /* | 28 | /* |
28 | * initialise the GDB stub | 29 | * initialise the GDB stub |
@@ -45,22 +46,34 @@ void gdbstub_io_init(void) | |||
45 | XIRQxICR(GDBPORT_SERIAL_IRQ) = 0; | 46 | XIRQxICR(GDBPORT_SERIAL_IRQ) = 0; |
46 | tmp = XIRQxICR(GDBPORT_SERIAL_IRQ); | 47 | tmp = XIRQxICR(GDBPORT_SERIAL_IRQ); |
47 | 48 | ||
49 | #if CONFIG_GDBSTUB_IRQ_LEVEL == 0 | ||
48 | IVAR0 = EXCEP_IRQ_LEVEL0; | 50 | IVAR0 = EXCEP_IRQ_LEVEL0; |
49 | set_intr_stub(EXCEP_IRQ_LEVEL0, gdbstub_io_rx_handler); | 51 | #elif CONFIG_GDBSTUB_IRQ_LEVEL == 1 |
52 | IVAR1 = EXCEP_IRQ_LEVEL1; | ||
53 | #elif CONFIG_GDBSTUB_IRQ_LEVEL == 2 | ||
54 | IVAR2 = EXCEP_IRQ_LEVEL2; | ||
55 | #elif CONFIG_GDBSTUB_IRQ_LEVEL == 3 | ||
56 | IVAR3 = EXCEP_IRQ_LEVEL3; | ||
57 | #elif CONFIG_GDBSTUB_IRQ_LEVEL == 4 | ||
58 | IVAR4 = EXCEP_IRQ_LEVEL4; | ||
59 | #elif CONFIG_GDBSTUB_IRQ_LEVEL == 5 | ||
60 | IVAR5 = EXCEP_IRQ_LEVEL5; | ||
61 | #else | ||
62 | #error "Unknown irq level for gdbstub." | ||
63 | #endif | ||
64 | |||
65 | set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL), | ||
66 | gdbstub_io_rx_handler); | ||
50 | 67 | ||
51 | XIRQxICR(GDBPORT_SERIAL_IRQ) &= ~GxICR_REQUEST; | 68 | XIRQxICR(GDBPORT_SERIAL_IRQ) &= ~GxICR_REQUEST; |
52 | XIRQxICR(GDBPORT_SERIAL_IRQ) = GxICR_ENABLE | GxICR_LEVEL_0; | 69 | XIRQxICR(GDBPORT_SERIAL_IRQ) = |
70 | GxICR_ENABLE | NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL); | ||
53 | tmp = XIRQxICR(GDBPORT_SERIAL_IRQ); | 71 | tmp = XIRQxICR(GDBPORT_SERIAL_IRQ); |
54 | 72 | ||
55 | GDBPORT_SERIAL_IER = UART_IER_RDI | UART_IER_RLSI; | 73 | GDBPORT_SERIAL_IER = UART_IER_RDI | UART_IER_RLSI; |
56 | 74 | ||
57 | /* permit level 0 IRQs to take place */ | 75 | /* permit level 0 IRQs to take place */ |
58 | asm volatile( | 76 | local_change_intr_mask_level(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1)); |
59 | " and %0,epsw \n" | ||
60 | " or %1,epsw \n" | ||
61 | : | ||
62 | : "i"(~EPSW_IM), "i"(EPSW_IE | EPSW_IM_1) | ||
63 | ); | ||
64 | } | 77 | } |
65 | 78 | ||
66 | /* | 79 | /* |
@@ -87,6 +100,9 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock) | |||
87 | { | 100 | { |
88 | unsigned ix; | 101 | unsigned ix; |
89 | u8 ch, st; | 102 | u8 ch, st; |
103 | #if defined(CONFIG_MN10300_WD_TIMER) | ||
104 | int cpu; | ||
105 | #endif | ||
90 | 106 | ||
91 | *_ch = 0xff; | 107 | *_ch = 0xff; |
92 | 108 | ||
@@ -104,8 +120,9 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock) | |||
104 | if (nonblock) | 120 | if (nonblock) |
105 | return -EAGAIN; | 121 | return -EAGAIN; |
106 | #ifdef CONFIG_MN10300_WD_TIMER | 122 | #ifdef CONFIG_MN10300_WD_TIMER |
107 | watchdog_alert_counter = 0; | 123 | for (cpu = 0; cpu < NR_CPUS; cpu++) |
108 | #endif /* CONFIG_MN10300_WD_TIMER */ | 124 | watchdog_alert_counter[cpu] = 0; |
125 | #endif | ||
109 | goto try_again; | 126 | goto try_again; |
110 | } | 127 | } |
111 | 128 | ||
diff --git a/arch/mn10300/kernel/gdb-io-ttysm.c b/arch/mn10300/kernel/gdb-io-ttysm.c index a560bbc3137d..97dfda23342c 100644 --- a/arch/mn10300/kernel/gdb-io-ttysm.c +++ b/arch/mn10300/kernel/gdb-io-ttysm.c | |||
@@ -58,9 +58,12 @@ void __init gdbstub_io_init(void) | |||
58 | gdbstub_io_set_baud(115200); | 58 | gdbstub_io_set_baud(115200); |
59 | 59 | ||
60 | /* we want to get serial receive interrupts */ | 60 | /* we want to get serial receive interrupts */ |
61 | set_intr_level(gdbstub_port->rx_irq, GxICR_LEVEL_0); | 61 | set_intr_level(gdbstub_port->rx_irq, |
62 | set_intr_level(gdbstub_port->tx_irq, GxICR_LEVEL_0); | 62 | NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL)); |
63 | set_intr_stub(EXCEP_IRQ_LEVEL0, gdbstub_io_rx_handler); | 63 | set_intr_level(gdbstub_port->tx_irq, |
64 | NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL)); | ||
65 | set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL), | ||
66 | gdbstub_io_rx_handler); | ||
64 | 67 | ||
65 | *gdbstub_port->rx_icr |= GxICR_ENABLE; | 68 | *gdbstub_port->rx_icr |= GxICR_ENABLE; |
66 | tmp = *gdbstub_port->rx_icr; | 69 | tmp = *gdbstub_port->rx_icr; |
@@ -84,12 +87,7 @@ void __init gdbstub_io_init(void) | |||
84 | tmp = *gdbstub_port->_control; | 87 | tmp = *gdbstub_port->_control; |
85 | 88 | ||
86 | /* permit level 0 IRQs only */ | 89 | /* permit level 0 IRQs only */ |
87 | asm volatile( | 90 | local_change_intr_mask_level(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1)); |
88 | " and %0,epsw \n" | ||
89 | " or %1,epsw \n" | ||
90 | : | ||
91 | : "i"(~EPSW_IM), "i"(EPSW_IE|EPSW_IM_1) | ||
92 | ); | ||
93 | } | 91 | } |
94 | 92 | ||
95 | /* | 93 | /* |
@@ -184,6 +182,9 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock) | |||
184 | { | 182 | { |
185 | unsigned ix; | 183 | unsigned ix; |
186 | u8 ch, st; | 184 | u8 ch, st; |
185 | #if defined(CONFIG_MN10300_WD_TIMER) | ||
186 | int cpu; | ||
187 | #endif | ||
187 | 188 | ||
188 | *_ch = 0xff; | 189 | *_ch = 0xff; |
189 | 190 | ||
@@ -201,8 +202,9 @@ try_again: | |||
201 | if (nonblock) | 202 | if (nonblock) |
202 | return -EAGAIN; | 203 | return -EAGAIN; |
203 | #ifdef CONFIG_MN10300_WD_TIMER | 204 | #ifdef CONFIG_MN10300_WD_TIMER |
204 | watchdog_alert_counter = 0; | 205 | for (cpu = 0; cpu < NR_CPUS; cpu++) |
205 | #endif /* CONFIG_MN10300_WD_TIMER */ | 206 | watchdog_alert_counter[cpu] = 0; |
207 | #endif | ||
206 | goto try_again; | 208 | goto try_again; |
207 | } | 209 | } |
208 | 210 | ||
diff --git a/arch/mn10300/kernel/gdb-stub.c b/arch/mn10300/kernel/gdb-stub.c index 41b11706c8ed..a5fc3f05309b 100644 --- a/arch/mn10300/kernel/gdb-stub.c +++ b/arch/mn10300/kernel/gdb-stub.c | |||
@@ -440,15 +440,11 @@ static const unsigned char gdbstub_insn_sizes[256] = | |||
440 | 440 | ||
441 | static int __gdbstub_mark_bp(u8 *addr, int ix) | 441 | static int __gdbstub_mark_bp(u8 *addr, int ix) |
442 | { | 442 | { |
443 | if (addr < (u8 *) 0x70000000UL) | 443 | /* vmalloc area */ |
444 | return 0; | 444 | if (((u8 *) VMALLOC_START <= addr) && (addr < (u8 *) VMALLOC_END)) |
445 | /* 70000000-7fffffff: vmalloc area */ | ||
446 | if (addr < (u8 *) 0x80000000UL) | ||
447 | goto okay; | 445 | goto okay; |
448 | if (addr < (u8 *) 0x8c000000UL) | 446 | /* SRAM, SDRAM */ |
449 | return 0; | 447 | if (((u8 *) 0x80000000UL <= addr) && (addr < (u8 *) 0xa0000000UL)) |
450 | /* 8c000000-93ffffff: SRAM, SDRAM */ | ||
451 | if (addr < (u8 *) 0x94000000UL) | ||
452 | goto okay; | 448 | goto okay; |
453 | return 0; | 449 | return 0; |
454 | 450 | ||
@@ -1197,9 +1193,8 @@ static int gdbstub(struct pt_regs *regs, enum exception_code excep) | |||
1197 | mn10300_set_gdbleds(1); | 1193 | mn10300_set_gdbleds(1); |
1198 | 1194 | ||
1199 | asm volatile("mov mdr,%0" : "=d"(mdr)); | 1195 | asm volatile("mov mdr,%0" : "=d"(mdr)); |
1200 | asm volatile("mov epsw,%0" : "=d"(epsw)); | 1196 | local_save_flags(epsw); |
1201 | asm volatile("mov %0,epsw" | 1197 | local_change_intr_mask_level(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1)); |
1202 | :: "d"((epsw & ~EPSW_IM) | EPSW_IE | EPSW_IM_1)); | ||
1203 | 1198 | ||
1204 | gdbstub_store_fpu(); | 1199 | gdbstub_store_fpu(); |
1205 | 1200 | ||
diff --git a/arch/mn10300/kernel/head.S b/arch/mn10300/kernel/head.S index 14f27f3bfaf4..73e00fc78072 100644 --- a/arch/mn10300/kernel/head.S +++ b/arch/mn10300/kernel/head.S | |||
@@ -19,6 +19,12 @@ | |||
19 | #include <asm/frame.inc> | 19 | #include <asm/frame.inc> |
20 | #include <asm/param.h> | 20 | #include <asm/param.h> |
21 | #include <unit/serial.h> | 21 | #include <unit/serial.h> |
22 | #ifdef CONFIG_SMP | ||
23 | #include <asm/smp.h> | ||
24 | #include <asm/intctl-regs.h> | ||
25 | #include <asm/cpu-regs.h> | ||
26 | #include <proc/smp-regs.h> | ||
27 | #endif /* CONFIG_SMP */ | ||
22 | 28 | ||
23 | __HEAD | 29 | __HEAD |
24 | 30 | ||
@@ -30,17 +36,51 @@ | |||
30 | .globl _start | 36 | .globl _start |
31 | .type _start,@function | 37 | .type _start,@function |
32 | _start: | 38 | _start: |
39 | #ifdef CONFIG_SMP | ||
40 | # | ||
41 | # If this is a secondary CPU (AP), then deal with that elsewhere | ||
42 | # | ||
43 | mov (CPUID),d3 | ||
44 | and CPUID_MASK,d3 | ||
45 | bne startup_secondary | ||
46 | |||
47 | # | ||
48 | # We're dealing with the primary CPU (BP) here, then. | ||
49 | # Keep BP's D0,D1,D2 register for boot check. | ||
50 | # | ||
51 | |||
52 | # Set up the Boot IPI for each secondary CPU | ||
53 | mov 0x1,a0 | ||
54 | loop_set_secondary_icr: | ||
55 | mov a0,a1 | ||
56 | asl CROSS_ICR_CPU_SHIFT,a1 | ||
57 | add CROSS_GxICR(SMP_BOOT_IRQ,0),a1 | ||
58 | movhu (a1),d3 | ||
59 | or GxICR_ENABLE|GxICR_LEVEL_0,d3 | ||
60 | movhu d3,(a1) | ||
61 | movhu (a1),d3 # flush | ||
62 | inc a0 | ||
63 | cmp NR_CPUS,a0 | ||
64 | bne loop_set_secondary_icr | ||
65 | #endif /* CONFIG_SMP */ | ||
66 | |||
33 | # save commandline pointer | 67 | # save commandline pointer |
34 | mov d0,a3 | 68 | mov d0,a3 |
35 | 69 | ||
36 | # preload the PGD pointer register | 70 | # preload the PGD pointer register |
37 | mov swapper_pg_dir,d0 | 71 | mov swapper_pg_dir,d0 |
38 | mov d0,(PTBR) | 72 | mov d0,(PTBR) |
73 | clr d0 | ||
74 | movbu d0,(PIDR) | ||
39 | 75 | ||
40 | # turn on the TLBs | 76 | # turn on the TLBs |
41 | mov MMUCTR_IIV|MMUCTR_DIV,d0 | 77 | mov MMUCTR_IIV|MMUCTR_DIV,d0 |
42 | mov d0,(MMUCTR) | 78 | mov d0,(MMUCTR) |
79 | #ifdef CONFIG_AM34_2 | ||
80 | mov MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE|MMUCTR_WTE,d0 | ||
81 | #else | ||
43 | mov MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE,d0 | 82 | mov MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE,d0 |
83 | #endif | ||
44 | mov d0,(MMUCTR) | 84 | mov d0,(MMUCTR) |
45 | 85 | ||
46 | # turn on AM33v2 exception handling mode and set the trap table base | 86 | # turn on AM33v2 exception handling mode and set the trap table base |
@@ -51,6 +91,11 @@ _start: | |||
51 | mov d0,(TBR) | 91 | mov d0,(TBR) |
52 | 92 | ||
53 | # invalidate and enable both of the caches | 93 | # invalidate and enable both of the caches |
94 | #ifdef CONFIG_SMP | ||
95 | mov ECHCTR,a0 | ||
96 | clr d0 | ||
97 | mov d0,(a0) | ||
98 | #endif | ||
54 | mov CHCTR,a0 | 99 | mov CHCTR,a0 |
55 | clr d0 | 100 | clr d0 |
56 | movhu d0,(a0) # turn off first | 101 | movhu d0,(a0) # turn off first |
@@ -61,18 +106,18 @@ _start: | |||
61 | btst CHCTR_ICBUSY|CHCTR_DCBUSY,d0 # wait till not busy | 106 | btst CHCTR_ICBUSY|CHCTR_DCBUSY,d0 # wait till not busy |
62 | lne | 107 | lne |
63 | 108 | ||
64 | #ifndef CONFIG_MN10300_CACHE_DISABLED | 109 | #ifdef CONFIG_MN10300_CACHE_ENABLED |
65 | #ifdef CONFIG_MN10300_CACHE_WBACK | 110 | #ifdef CONFIG_MN10300_CACHE_WBACK |
66 | #ifndef CONFIG_MN10300_CACHE_WBACK_NOWRALLOC | 111 | #ifndef CONFIG_MN10300_CACHE_WBACK_NOWRALLOC |
67 | mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK,d0 | 112 | mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK,d0 |
68 | #else | 113 | #else |
69 | mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK|CHCTR_DCALMD,d0 | 114 | mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK|CHCTR_DCALMD,d0 |
70 | #endif /* CACHE_DISABLED */ | 115 | #endif /* NOWRALLOC */ |
71 | #else | 116 | #else |
72 | mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRTHROUGH,d0 | 117 | mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRTHROUGH,d0 |
73 | #endif /* WBACK */ | 118 | #endif /* WBACK */ |
74 | movhu d0,(a0) # enable | 119 | movhu d0,(a0) # enable |
75 | #endif /* NOWRALLOC */ | 120 | #endif /* ENABLED */ |
76 | 121 | ||
77 | # turn on RTS on the debug serial port if applicable | 122 | # turn on RTS on the debug serial port if applicable |
78 | #ifdef CONFIG_MN10300_UNIT_ASB2305 | 123 | #ifdef CONFIG_MN10300_UNIT_ASB2305 |
@@ -206,6 +251,44 @@ __no_parameters: | |||
206 | call processor_init[],0 | 251 | call processor_init[],0 |
207 | call unit_init[],0 | 252 | call unit_init[],0 |
208 | 253 | ||
254 | #ifdef CONFIG_SMP | ||
255 | # mark the primary CPU in cpu_boot_map | ||
256 | mov cpu_boot_map,a0 | ||
257 | mov 0x1,d0 | ||
258 | mov d0,(a0) | ||
259 | |||
260 | # signal each secondary CPU to begin booting | ||
261 | mov 0x1,d2 # CPU ID | ||
262 | |||
263 | loop_request_boot_secondary: | ||
264 | mov d2,a0 | ||
265 | # send SMP_BOOT_IPI to secondary CPU | ||
266 | asl CROSS_ICR_CPU_SHIFT,a0 | ||
267 | add CROSS_GxICR(SMP_BOOT_IRQ,0),a0 | ||
268 | movhu (a0),d0 | ||
269 | or GxICR_REQUEST|GxICR_DETECT,d0 | ||
270 | movhu d0,(a0) | ||
271 | movhu (a0),d0 # flush | ||
272 | |||
273 | # wait up to 100ms for AP's IPI to be received | ||
274 | clr d3 | ||
275 | wait_on_secondary_boot: | ||
276 | mov DELAY_TIME_BOOT_IPI,d0 | ||
277 | call __delay[],0 | ||
278 | inc d3 | ||
279 | mov cpu_boot_map,a0 | ||
280 | mov (a0),d0 | ||
281 | lsr d2,d0 | ||
282 | btst 0x1,d0 | ||
283 | bne 1f | ||
284 | cmp TIME_OUT_COUNT_BOOT_IPI,d3 | ||
285 | bne wait_on_secondary_boot | ||
286 | 1: | ||
287 | inc d2 | ||
288 | cmp NR_CPUS,d2 | ||
289 | bne loop_request_boot_secondary | ||
290 | #endif /* CONFIG_SMP */ | ||
291 | |||
209 | #ifdef CONFIG_GDBSTUB | 292 | #ifdef CONFIG_GDBSTUB |
210 | call gdbstub_init[],0 | 293 | call gdbstub_init[],0 |
211 | 294 | ||
@@ -217,7 +300,118 @@ __gdbstub_pause: | |||
217 | #endif | 300 | #endif |
218 | 301 | ||
219 | jmp start_kernel | 302 | jmp start_kernel |
220 | .size _start, _start-. | 303 | .size _start,.-_start |
304 | |||
305 | ############################################################################### | ||
306 | # | ||
307 | # Secondary CPU boot point | ||
308 | # | ||
309 | ############################################################################### | ||
310 | #ifdef CONFIG_SMP | ||
311 | startup_secondary: | ||
312 | # preload the PGD pointer register | ||
313 | mov swapper_pg_dir,d0 | ||
314 | mov d0,(PTBR) | ||
315 | clr d0 | ||
316 | movbu d0,(PIDR) | ||
317 | |||
318 | # turn on the TLBs | ||
319 | mov MMUCTR_IIV|MMUCTR_DIV,d0 | ||
320 | mov d0,(MMUCTR) | ||
321 | #ifdef CONFIG_AM34_2 | ||
322 | mov MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE|MMUCTR_WTE,d0 | ||
323 | #else | ||
324 | mov MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE,d0 | ||
325 | #endif | ||
326 | mov d0,(MMUCTR) | ||
327 | |||
328 | # turn on AM33v2 exception handling mode and set the trap table base | ||
329 | movhu (CPUP),d0 | ||
330 | or CPUP_EXM_AM33V2,d0 | ||
331 | movhu d0,(CPUP) | ||
332 | |||
333 | # set the interrupt vector table | ||
334 | mov CONFIG_INTERRUPT_VECTOR_BASE,d0 | ||
335 | mov d0,(TBR) | ||
336 | |||
337 | # invalidate and enable both of the caches | ||
338 | mov ECHCTR,a0 | ||
339 | clr d0 | ||
340 | mov d0,(a0) | ||
341 | mov CHCTR,a0 | ||
342 | clr d0 | ||
343 | movhu d0,(a0) # turn off first | ||
344 | mov CHCTR_ICINV|CHCTR_DCINV,d0 | ||
345 | movhu d0,(a0) | ||
346 | setlb | ||
347 | mov (a0),d0 | ||
348 | btst CHCTR_ICBUSY|CHCTR_DCBUSY,d0 # wait till not busy (use CPU loop buffer) | ||
349 | lne | ||
350 | |||
351 | #ifdef CONFIG_MN10300_CACHE_ENABLED | ||
352 | #ifdef CONFIG_MN10300_CACHE_WBACK | ||
353 | #ifndef CONFIG_MN10300_CACHE_WBACK_NOWRALLOC | ||
354 | mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK,d0 | ||
355 | #else | ||
356 | mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK|CHCTR_DCALMD,d0 | ||
357 | #endif /* !NOWRALLOC */ | ||
358 | #else | ||
359 | mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRTHROUGH,d0 | ||
360 | #endif /* WBACK */ | ||
361 | movhu d0,(a0) # enable | ||
362 | #endif /* ENABLED */ | ||
363 | |||
364 | # Clear the boot IPI interrupt for this CPU | ||
365 | movhu (GxICR(SMP_BOOT_IRQ)),d0 | ||
366 | and ~GxICR_REQUEST,d0 | ||
367 | movhu d0,(GxICR(SMP_BOOT_IRQ)) | ||
368 | movhu (GxICR(SMP_BOOT_IRQ)),d0 # flush | ||
369 | |||
370 | /* get stack */ | ||
371 | mov CONFIG_INTERRUPT_VECTOR_BASE + CONFIG_BOOT_STACK_OFFSET,a0 | ||
372 | mov (CPUID),d0 | ||
373 | and CPUID_MASK,d0 | ||
374 | mulu CONFIG_BOOT_STACK_SIZE,d0 | ||
375 | sub d0,a0 | ||
376 | mov a0,sp | ||
377 | |||
378 | # init interrupt for AP | ||
379 | call smp_prepare_cpu_init[],0 | ||
380 | |||
381 | # mark this secondary CPU in cpu_boot_map | ||
382 | mov (CPUID),d0 | ||
383 | mov 0x1,d1 | ||
384 | asl d0,d1 | ||
385 | mov cpu_boot_map,a0 | ||
386 | bset d1,(a0) | ||
387 | |||
388 | or EPSW_IE|EPSW_IM_1,epsw # permit level 0 interrupts | ||
389 | nop | ||
390 | nop | ||
391 | #ifdef CONFIG_MN10300_CACHE_WBACK | ||
392 | # flush the local cache if it's in writeback mode | ||
393 | call mn10300_local_dcache_flush_inv[],0 | ||
394 | setlb | ||
395 | mov (CHCTR),d0 | ||
396 | btst CHCTR_DCBUSY,d0 # wait till not busy (use CPU loop buffer) | ||
397 | lne | ||
398 | #endif | ||
399 | |||
400 | # now sleep waiting for further instructions | ||
401 | secondary_sleep: | ||
402 | mov CPUM_SLEEP,d0 | ||
403 | movhu d0,(CPUM) | ||
404 | nop | ||
405 | nop | ||
406 | bra secondary_sleep | ||
407 | .size startup_secondary,.-startup_secondary | ||
408 | #endif /* CONFIG_SMP */ | ||
409 | |||
410 | ############################################################################### | ||
411 | # | ||
412 | # | ||
413 | # | ||
414 | ############################################################################### | ||
221 | ENTRY(__head_end) | 415 | ENTRY(__head_end) |
222 | 416 | ||
223 | /* | 417 | /* |
diff --git a/arch/mn10300/kernel/internal.h b/arch/mn10300/kernel/internal.h index eee2eee86267..6a064ab5af07 100644 --- a/arch/mn10300/kernel/internal.h +++ b/arch/mn10300/kernel/internal.h | |||
@@ -9,6 +9,9 @@ | |||
9 | * 2 of the Licence, or (at your option) any later version. | 9 | * 2 of the Licence, or (at your option) any later version. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | struct clocksource; | ||
13 | struct clock_event_device; | ||
14 | |||
12 | /* | 15 | /* |
13 | * kthread.S | 16 | * kthread.S |
14 | */ | 17 | */ |
@@ -18,3 +21,25 @@ extern int kernel_thread_helper(int); | |||
18 | * entry.S | 21 | * entry.S |
19 | */ | 22 | */ |
20 | extern void ret_from_fork(struct task_struct *) __attribute__((noreturn)); | 23 | extern void ret_from_fork(struct task_struct *) __attribute__((noreturn)); |
24 | |||
25 | /* | ||
26 | * smp-low.S | ||
27 | */ | ||
28 | #ifdef CONFIG_SMP | ||
29 | extern void mn10300_low_ipi_handler(void); | ||
30 | #endif | ||
31 | |||
32 | /* | ||
33 | * time.c | ||
34 | */ | ||
35 | extern irqreturn_t local_timer_interrupt(void); | ||
36 | |||
37 | /* | ||
38 | * time.c | ||
39 | */ | ||
40 | #ifdef CONFIG_CEVT_MN10300 | ||
41 | extern void clockevent_set_clock(struct clock_event_device *, unsigned int); | ||
42 | #endif | ||
43 | #ifdef CONFIG_CSRC_MN10300 | ||
44 | extern void clocksource_set_clock(struct clocksource *, unsigned int); | ||
45 | #endif | ||
diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c index e2d5ed891f37..c2e44597c22b 100644 --- a/arch/mn10300/kernel/irq.c +++ b/arch/mn10300/kernel/irq.c | |||
@@ -12,11 +12,26 @@ | |||
12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
13 | #include <linux/kernel_stat.h> | 13 | #include <linux/kernel_stat.h> |
14 | #include <linux/seq_file.h> | 14 | #include <linux/seq_file.h> |
15 | #include <linux/cpumask.h> | ||
15 | #include <asm/setup.h> | 16 | #include <asm/setup.h> |
17 | #include <asm/serial-regs.h> | ||
16 | 18 | ||
17 | unsigned long __mn10300_irq_enabled_epsw = EPSW_IE | EPSW_IM_7; | 19 | unsigned long __mn10300_irq_enabled_epsw[NR_CPUS] __cacheline_aligned_in_smp = { |
20 | [0 ... NR_CPUS - 1] = EPSW_IE | EPSW_IM_7 | ||
21 | }; | ||
18 | EXPORT_SYMBOL(__mn10300_irq_enabled_epsw); | 22 | EXPORT_SYMBOL(__mn10300_irq_enabled_epsw); |
19 | 23 | ||
24 | #ifdef CONFIG_SMP | ||
25 | static char irq_affinity_online[NR_IRQS] = { | ||
26 | [0 ... NR_IRQS - 1] = 0 | ||
27 | }; | ||
28 | |||
29 | #define NR_IRQ_WORDS ((NR_IRQS + 31) / 32) | ||
30 | static unsigned long irq_affinity_request[NR_IRQ_WORDS] = { | ||
31 | [0 ... NR_IRQ_WORDS - 1] = 0 | ||
32 | }; | ||
33 | #endif /* CONFIG_SMP */ | ||
34 | |||
20 | atomic_t irq_err_count; | 35 | atomic_t irq_err_count; |
21 | 36 | ||
22 | /* | 37 | /* |
@@ -24,30 +39,67 @@ atomic_t irq_err_count; | |||
24 | */ | 39 | */ |
25 | static void mn10300_cpupic_ack(unsigned int irq) | 40 | static void mn10300_cpupic_ack(unsigned int irq) |
26 | { | 41 | { |
42 | unsigned long flags; | ||
27 | u16 tmp; | 43 | u16 tmp; |
28 | *(volatile u8 *) &GxICR(irq) = GxICR_DETECT; | 44 | |
45 | flags = arch_local_cli_save(); | ||
46 | GxICR_u8(irq) = GxICR_DETECT; | ||
29 | tmp = GxICR(irq); | 47 | tmp = GxICR(irq); |
48 | arch_local_irq_restore(flags); | ||
30 | } | 49 | } |
31 | 50 | ||
32 | static void mn10300_cpupic_mask(unsigned int irq) | 51 | static void __mask_and_set_icr(unsigned int irq, |
52 | unsigned int mask, unsigned int set) | ||
33 | { | 53 | { |
34 | u16 tmp = GxICR(irq); | 54 | unsigned long flags; |
35 | GxICR(irq) = (tmp & GxICR_LEVEL); | 55 | u16 tmp; |
56 | |||
57 | flags = arch_local_cli_save(); | ||
58 | tmp = GxICR(irq); | ||
59 | GxICR(irq) = (tmp & mask) | set; | ||
36 | tmp = GxICR(irq); | 60 | tmp = GxICR(irq); |
61 | arch_local_irq_restore(flags); | ||
62 | } | ||
63 | |||
64 | static void mn10300_cpupic_mask(unsigned int irq) | ||
65 | { | ||
66 | __mask_and_set_icr(irq, GxICR_LEVEL, 0); | ||
37 | } | 67 | } |
38 | 68 | ||
39 | static void mn10300_cpupic_mask_ack(unsigned int irq) | 69 | static void mn10300_cpupic_mask_ack(unsigned int irq) |
40 | { | 70 | { |
41 | u16 tmp = GxICR(irq); | 71 | #ifdef CONFIG_SMP |
42 | GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT; | 72 | unsigned long flags; |
43 | tmp = GxICR(irq); | 73 | u16 tmp; |
74 | |||
75 | flags = arch_local_cli_save(); | ||
76 | |||
77 | if (!test_and_clear_bit(irq, irq_affinity_request)) { | ||
78 | tmp = GxICR(irq); | ||
79 | GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT; | ||
80 | tmp = GxICR(irq); | ||
81 | } else { | ||
82 | u16 tmp2; | ||
83 | tmp = GxICR(irq); | ||
84 | GxICR(irq) = (tmp & GxICR_LEVEL); | ||
85 | tmp2 = GxICR(irq); | ||
86 | |||
87 | irq_affinity_online[irq] = | ||
88 | any_online_cpu(*irq_desc[irq].affinity); | ||
89 | CROSS_GxICR(irq, irq_affinity_online[irq]) = | ||
90 | (tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT; | ||
91 | tmp = CROSS_GxICR(irq, irq_affinity_online[irq]); | ||
92 | } | ||
93 | |||
94 | arch_local_irq_restore(flags); | ||
95 | #else /* CONFIG_SMP */ | ||
96 | __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_DETECT); | ||
97 | #endif /* CONFIG_SMP */ | ||
44 | } | 98 | } |
45 | 99 | ||
46 | static void mn10300_cpupic_unmask(unsigned int irq) | 100 | static void mn10300_cpupic_unmask(unsigned int irq) |
47 | { | 101 | { |
48 | u16 tmp = GxICR(irq); | 102 | __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE); |
49 | GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE; | ||
50 | tmp = GxICR(irq); | ||
51 | } | 103 | } |
52 | 104 | ||
53 | static void mn10300_cpupic_unmask_clear(unsigned int irq) | 105 | static void mn10300_cpupic_unmask_clear(unsigned int irq) |
@@ -56,11 +108,89 @@ static void mn10300_cpupic_unmask_clear(unsigned int irq) | |||
56 | * device has ceased to assert its interrupt line and the interrupt | 108 | * device has ceased to assert its interrupt line and the interrupt |
57 | * channel has been disabled in the PIC, so for level-triggered | 109 | * channel has been disabled in the PIC, so for level-triggered |
58 | * interrupts we need to clear the request bit when we re-enable */ | 110 | * interrupts we need to clear the request bit when we re-enable */ |
59 | u16 tmp = GxICR(irq); | 111 | #ifdef CONFIG_SMP |
60 | GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT; | 112 | unsigned long flags; |
61 | tmp = GxICR(irq); | 113 | u16 tmp; |
114 | |||
115 | flags = arch_local_cli_save(); | ||
116 | |||
117 | if (!test_and_clear_bit(irq, irq_affinity_request)) { | ||
118 | tmp = GxICR(irq); | ||
119 | GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT; | ||
120 | tmp = GxICR(irq); | ||
121 | } else { | ||
122 | tmp = GxICR(irq); | ||
123 | |||
124 | irq_affinity_online[irq] = any_online_cpu(*irq_desc[irq].affinity); | ||
125 | CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT; | ||
126 | tmp = CROSS_GxICR(irq, irq_affinity_online[irq]); | ||
127 | } | ||
128 | |||
129 | arch_local_irq_restore(flags); | ||
130 | #else /* CONFIG_SMP */ | ||
131 | __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE | GxICR_DETECT); | ||
132 | #endif /* CONFIG_SMP */ | ||
62 | } | 133 | } |
63 | 134 | ||
135 | #ifdef CONFIG_SMP | ||
136 | static int | ||
137 | mn10300_cpupic_setaffinity(unsigned int irq, const struct cpumask *mask) | ||
138 | { | ||
139 | unsigned long flags; | ||
140 | int err; | ||
141 | |||
142 | flags = arch_local_cli_save(); | ||
143 | |||
144 | /* check irq no */ | ||
145 | switch (irq) { | ||
146 | case TMJCIRQ: | ||
147 | case RESCHEDULE_IPI: | ||
148 | case CALL_FUNC_SINGLE_IPI: | ||
149 | case LOCAL_TIMER_IPI: | ||
150 | case FLUSH_CACHE_IPI: | ||
151 | case CALL_FUNCTION_NMI_IPI: | ||
152 | case GDB_NMI_IPI: | ||
153 | #ifdef CONFIG_MN10300_TTYSM0 | ||
154 | case SC0RXIRQ: | ||
155 | case SC0TXIRQ: | ||
156 | #ifdef CONFIG_MN10300_TTYSM0_TIMER8 | ||
157 | case TM8IRQ: | ||
158 | #elif CONFIG_MN10300_TTYSM0_TIMER2 | ||
159 | case TM2IRQ: | ||
160 | #endif /* CONFIG_MN10300_TTYSM0_TIMER8 */ | ||
161 | #endif /* CONFIG_MN10300_TTYSM0 */ | ||
162 | |||
163 | #ifdef CONFIG_MN10300_TTYSM1 | ||
164 | case SC1RXIRQ: | ||
165 | case SC1TXIRQ: | ||
166 | #ifdef CONFIG_MN10300_TTYSM1_TIMER12 | ||
167 | case TM12IRQ: | ||
168 | #elif CONFIG_MN10300_TTYSM1_TIMER9 | ||
169 | case TM9IRQ: | ||
170 | #elif CONFIG_MN10300_TTYSM1_TIMER3 | ||
171 | case TM3IRQ: | ||
172 | #endif /* CONFIG_MN10300_TTYSM1_TIMER12 */ | ||
173 | #endif /* CONFIG_MN10300_TTYSM1 */ | ||
174 | |||
175 | #ifdef CONFIG_MN10300_TTYSM2 | ||
176 | case SC2RXIRQ: | ||
177 | case SC2TXIRQ: | ||
178 | case TM10IRQ: | ||
179 | #endif /* CONFIG_MN10300_TTYSM2 */ | ||
180 | err = -1; | ||
181 | break; | ||
182 | |||
183 | default: | ||
184 | set_bit(irq, irq_affinity_request); | ||
185 | err = 0; | ||
186 | break; | ||
187 | } | ||
188 | |||
189 | arch_local_irq_restore(flags); | ||
190 | return err; | ||
191 | } | ||
192 | #endif /* CONFIG_SMP */ | ||
193 | |||
64 | /* | 194 | /* |
65 | * MN10300 PIC level-triggered IRQ handling. | 195 | * MN10300 PIC level-triggered IRQ handling. |
66 | * | 196 | * |
@@ -79,6 +209,9 @@ static struct irq_chip mn10300_cpu_pic_level = { | |||
79 | .mask = mn10300_cpupic_mask, | 209 | .mask = mn10300_cpupic_mask, |
80 | .mask_ack = mn10300_cpupic_mask, | 210 | .mask_ack = mn10300_cpupic_mask, |
81 | .unmask = mn10300_cpupic_unmask_clear, | 211 | .unmask = mn10300_cpupic_unmask_clear, |
212 | #ifdef CONFIG_SMP | ||
213 | .set_affinity = mn10300_cpupic_setaffinity, | ||
214 | #endif | ||
82 | }; | 215 | }; |
83 | 216 | ||
84 | /* | 217 | /* |
@@ -94,6 +227,9 @@ static struct irq_chip mn10300_cpu_pic_edge = { | |||
94 | .mask = mn10300_cpupic_mask, | 227 | .mask = mn10300_cpupic_mask, |
95 | .mask_ack = mn10300_cpupic_mask_ack, | 228 | .mask_ack = mn10300_cpupic_mask_ack, |
96 | .unmask = mn10300_cpupic_unmask, | 229 | .unmask = mn10300_cpupic_unmask, |
230 | #ifdef CONFIG_SMP | ||
231 | .set_affinity = mn10300_cpupic_setaffinity, | ||
232 | #endif | ||
97 | }; | 233 | }; |
98 | 234 | ||
99 | /* | 235 | /* |
@@ -111,14 +247,34 @@ void ack_bad_irq(int irq) | |||
111 | */ | 247 | */ |
112 | void set_intr_level(int irq, u16 level) | 248 | void set_intr_level(int irq, u16 level) |
113 | { | 249 | { |
114 | u16 tmp; | 250 | BUG_ON(in_interrupt()); |
115 | 251 | ||
116 | if (in_interrupt()) | 252 | __mask_and_set_icr(irq, GxICR_ENABLE, level); |
117 | BUG(); | 253 | } |
118 | 254 | ||
119 | tmp = GxICR(irq); | 255 | void mn10300_intc_set_level(unsigned int irq, unsigned int level) |
120 | GxICR(irq) = (tmp & GxICR_ENABLE) | level; | 256 | { |
121 | tmp = GxICR(irq); | 257 | set_intr_level(irq, NUM2GxICR_LEVEL(level) & GxICR_LEVEL); |
258 | } | ||
259 | |||
260 | void mn10300_intc_clear(unsigned int irq) | ||
261 | { | ||
262 | __mask_and_set_icr(irq, GxICR_LEVEL | GxICR_ENABLE, GxICR_DETECT); | ||
263 | } | ||
264 | |||
265 | void mn10300_intc_set(unsigned int irq) | ||
266 | { | ||
267 | __mask_and_set_icr(irq, 0, GxICR_REQUEST | GxICR_DETECT); | ||
268 | } | ||
269 | |||
270 | void mn10300_intc_enable(unsigned int irq) | ||
271 | { | ||
272 | mn10300_cpupic_unmask(irq); | ||
273 | } | ||
274 | |||
275 | void mn10300_intc_disable(unsigned int irq) | ||
276 | { | ||
277 | mn10300_cpupic_mask(irq); | ||
122 | } | 278 | } |
123 | 279 | ||
124 | /* | 280 | /* |
@@ -126,7 +282,7 @@ void set_intr_level(int irq, u16 level) | |||
126 | * than before | 282 | * than before |
127 | * - see Documentation/mn10300/features.txt | 283 | * - see Documentation/mn10300/features.txt |
128 | */ | 284 | */ |
129 | void set_intr_postackable(int irq) | 285 | void mn10300_set_lateack_irq_type(int irq) |
130 | { | 286 | { |
131 | set_irq_chip_and_handler(irq, &mn10300_cpu_pic_level, | 287 | set_irq_chip_and_handler(irq, &mn10300_cpu_pic_level, |
132 | handle_level_irq); | 288 | handle_level_irq); |
@@ -147,6 +303,7 @@ void __init init_IRQ(void) | |||
147 | * interrupts */ | 303 | * interrupts */ |
148 | set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge, | 304 | set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge, |
149 | handle_level_irq); | 305 | handle_level_irq); |
306 | |||
150 | unit_init_IRQ(); | 307 | unit_init_IRQ(); |
151 | } | 308 | } |
152 | 309 | ||
@@ -156,20 +313,22 @@ void __init init_IRQ(void) | |||
156 | asmlinkage void do_IRQ(void) | 313 | asmlinkage void do_IRQ(void) |
157 | { | 314 | { |
158 | unsigned long sp, epsw, irq_disabled_epsw, old_irq_enabled_epsw; | 315 | unsigned long sp, epsw, irq_disabled_epsw, old_irq_enabled_epsw; |
316 | unsigned int cpu_id = smp_processor_id(); | ||
159 | int irq; | 317 | int irq; |
160 | 318 | ||
161 | sp = current_stack_pointer(); | 319 | sp = current_stack_pointer(); |
162 | if (sp - (sp & ~(THREAD_SIZE - 1)) < STACK_WARN) | 320 | BUG_ON(sp - (sp & ~(THREAD_SIZE - 1)) < STACK_WARN); |
163 | BUG(); | ||
164 | 321 | ||
165 | /* make sure local_irq_enable() doesn't muck up the interrupt priority | 322 | /* make sure local_irq_enable() doesn't muck up the interrupt priority |
166 | * setting in EPSW */ | 323 | * setting in EPSW */ |
167 | old_irq_enabled_epsw = __mn10300_irq_enabled_epsw; | 324 | old_irq_enabled_epsw = __mn10300_irq_enabled_epsw[cpu_id]; |
168 | local_save_flags(epsw); | 325 | local_save_flags(epsw); |
169 | __mn10300_irq_enabled_epsw = EPSW_IE | (EPSW_IM & epsw); | 326 | __mn10300_irq_enabled_epsw[cpu_id] = EPSW_IE | (EPSW_IM & epsw); |
170 | irq_disabled_epsw = EPSW_IE | MN10300_CLI_LEVEL; | 327 | irq_disabled_epsw = EPSW_IE | MN10300_CLI_LEVEL; |
171 | 328 | ||
172 | __IRQ_STAT(smp_processor_id(), __irq_count)++; | 329 | #ifdef CONFIG_MN10300_WD_TIMER |
330 | __IRQ_STAT(cpu_id, __irq_count)++; | ||
331 | #endif | ||
173 | 332 | ||
174 | irq_enter(); | 333 | irq_enter(); |
175 | 334 | ||
@@ -189,7 +348,7 @@ asmlinkage void do_IRQ(void) | |||
189 | local_irq_restore(epsw); | 348 | local_irq_restore(epsw); |
190 | } | 349 | } |
191 | 350 | ||
192 | __mn10300_irq_enabled_epsw = old_irq_enabled_epsw; | 351 | __mn10300_irq_enabled_epsw[cpu_id] = old_irq_enabled_epsw; |
193 | 352 | ||
194 | irq_exit(); | 353 | irq_exit(); |
195 | } | 354 | } |
@@ -222,9 +381,16 @@ int show_interrupts(struct seq_file *p, void *v) | |||
222 | seq_printf(p, "%3d: ", i); | 381 | seq_printf(p, "%3d: ", i); |
223 | for_each_present_cpu(cpu) | 382 | for_each_present_cpu(cpu) |
224 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu)); | 383 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu)); |
225 | seq_printf(p, " %14s.%u", irq_desc[i].chip->name, | 384 | |
226 | (GxICR(i) & GxICR_LEVEL) >> | 385 | if (i < NR_CPU_IRQS) |
227 | GxICR_LEVEL_SHIFT); | 386 | seq_printf(p, " %14s.%u", |
387 | irq_desc[i].chip->name, | ||
388 | (GxICR(i) & GxICR_LEVEL) >> | ||
389 | GxICR_LEVEL_SHIFT); | ||
390 | else | ||
391 | seq_printf(p, " %14s", | ||
392 | irq_desc[i].chip->name); | ||
393 | |||
228 | seq_printf(p, " %s", action->name); | 394 | seq_printf(p, " %s", action->name); |
229 | 395 | ||
230 | for (action = action->next; | 396 | for (action = action->next; |
@@ -240,11 +406,13 @@ int show_interrupts(struct seq_file *p, void *v) | |||
240 | 406 | ||
241 | /* polish off with NMI and error counters */ | 407 | /* polish off with NMI and error counters */ |
242 | case NR_IRQS: | 408 | case NR_IRQS: |
409 | #ifdef CONFIG_MN10300_WD_TIMER | ||
243 | seq_printf(p, "NMI: "); | 410 | seq_printf(p, "NMI: "); |
244 | for (j = 0; j < NR_CPUS; j++) | 411 | for (j = 0; j < NR_CPUS; j++) |
245 | if (cpu_online(j)) | 412 | if (cpu_online(j)) |
246 | seq_printf(p, "%10u ", nmi_count(j)); | 413 | seq_printf(p, "%10u ", nmi_count(j)); |
247 | seq_putc(p, '\n'); | 414 | seq_putc(p, '\n'); |
415 | #endif | ||
248 | 416 | ||
249 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); | 417 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); |
250 | break; | 418 | break; |
@@ -252,3 +420,51 @@ int show_interrupts(struct seq_file *p, void *v) | |||
252 | 420 | ||
253 | return 0; | 421 | return 0; |
254 | } | 422 | } |
423 | |||
424 | #ifdef CONFIG_HOTPLUG_CPU | ||
425 | void migrate_irqs(void) | ||
426 | { | ||
427 | irq_desc_t *desc; | ||
428 | int irq; | ||
429 | unsigned int self, new; | ||
430 | unsigned long flags; | ||
431 | |||
432 | self = smp_processor_id(); | ||
433 | for (irq = 0; irq < NR_IRQS; irq++) { | ||
434 | desc = irq_desc + irq; | ||
435 | |||
436 | if (desc->status == IRQ_PER_CPU) | ||
437 | continue; | ||
438 | |||
439 | if (cpu_isset(self, irq_desc[irq].affinity) && | ||
440 | !cpus_intersects(irq_affinity[irq], cpu_online_map)) { | ||
441 | int cpu_id; | ||
442 | cpu_id = first_cpu(cpu_online_map); | ||
443 | cpu_set(cpu_id, irq_desc[irq].affinity); | ||
444 | } | ||
445 | /* We need to operate irq_affinity_online atomically. */ | ||
446 | arch_local_cli_save(flags); | ||
447 | if (irq_affinity_online[irq] == self) { | ||
448 | u16 x, tmp; | ||
449 | |||
450 | x = GxICR(irq); | ||
451 | GxICR(irq) = x & GxICR_LEVEL; | ||
452 | tmp = GxICR(irq); | ||
453 | |||
454 | new = any_online_cpu(irq_desc[irq].affinity); | ||
455 | irq_affinity_online[irq] = new; | ||
456 | |||
457 | CROSS_GxICR(irq, new) = | ||
458 | (x & GxICR_LEVEL) | GxICR_DETECT; | ||
459 | tmp = CROSS_GxICR(irq, new); | ||
460 | |||
461 | x &= GxICR_LEVEL | GxICR_ENABLE; | ||
462 | if (GxICR(irq) & GxICR_REQUEST) { | ||
463 | x |= GxICR_REQUEST | GxICR_DETECT; | ||
464 | CROSS_GxICR(irq, new) = x; | ||
465 | tmp = CROSS_GxICR(irq, new); | ||
466 | } | ||
467 | arch_local_irq_restore(flags); | ||
468 | } | ||
469 | } | ||
470 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
diff --git a/arch/mn10300/kernel/kprobes.c b/arch/mn10300/kernel/kprobes.c index 67e6389d625a..0311a7fcea16 100644 --- a/arch/mn10300/kernel/kprobes.c +++ b/arch/mn10300/kernel/kprobes.c | |||
@@ -377,8 +377,10 @@ void __kprobes arch_arm_kprobe(struct kprobe *p) | |||
377 | 377 | ||
378 | void __kprobes arch_disarm_kprobe(struct kprobe *p) | 378 | void __kprobes arch_disarm_kprobe(struct kprobe *p) |
379 | { | 379 | { |
380 | #ifndef CONFIG_MN10300_CACHE_SNOOP | ||
380 | mn10300_dcache_flush(); | 381 | mn10300_dcache_flush(); |
381 | mn10300_icache_inv(); | 382 | mn10300_icache_inv(); |
383 | #endif | ||
382 | } | 384 | } |
383 | 385 | ||
384 | void arch_remove_kprobe(struct kprobe *p) | 386 | void arch_remove_kprobe(struct kprobe *p) |
@@ -390,8 +392,10 @@ void __kprobes disarm_kprobe(struct kprobe *p, struct pt_regs *regs) | |||
390 | { | 392 | { |
391 | *p->addr = p->opcode; | 393 | *p->addr = p->opcode; |
392 | regs->pc = (unsigned long) p->addr; | 394 | regs->pc = (unsigned long) p->addr; |
395 | #ifndef CONFIG_MN10300_CACHE_SNOOP | ||
393 | mn10300_dcache_flush(); | 396 | mn10300_dcache_flush(); |
394 | mn10300_icache_inv(); | 397 | mn10300_icache_inv(); |
398 | #endif | ||
395 | } | 399 | } |
396 | 400 | ||
397 | static inline | 401 | static inline |
diff --git a/arch/mn10300/kernel/mn10300-serial-low.S b/arch/mn10300/kernel/mn10300-serial-low.S index 66702d256610..dfc1b6f2fa9a 100644 --- a/arch/mn10300/kernel/mn10300-serial-low.S +++ b/arch/mn10300/kernel/mn10300-serial-low.S | |||
@@ -39,7 +39,7 @@ | |||
39 | ############################################################################### | 39 | ############################################################################### |
40 | .balign L1_CACHE_BYTES | 40 | .balign L1_CACHE_BYTES |
41 | ENTRY(mn10300_serial_vdma_interrupt) | 41 | ENTRY(mn10300_serial_vdma_interrupt) |
42 | or EPSW_IE,psw # permit overriding by | 42 | # or EPSW_IE,psw # permit overriding by |
43 | # debugging interrupts | 43 | # debugging interrupts |
44 | movm [d2,d3,a2,a3,exreg0],(sp) | 44 | movm [d2,d3,a2,a3,exreg0],(sp) |
45 | 45 | ||
@@ -164,7 +164,7 @@ mnsc_vdma_tx_noint: | |||
164 | rti | 164 | rti |
165 | 165 | ||
166 | mnsc_vdma_tx_empty: | 166 | mnsc_vdma_tx_empty: |
167 | mov +(GxICR_LEVEL_1|GxICR_DETECT),d2 | 167 | mov +(NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL)|GxICR_DETECT),d2 |
168 | movhu d2,(e3) # disable the interrupt | 168 | movhu d2,(e3) # disable the interrupt |
169 | movhu (e3),d2 # flush | 169 | movhu (e3),d2 # flush |
170 | 170 | ||
@@ -175,7 +175,7 @@ mnsc_vdma_tx_break: | |||
175 | movhu (SCxCTR,e2),d2 # turn on break mode | 175 | movhu (SCxCTR,e2),d2 # turn on break mode |
176 | or SC01CTR_BKE,d2 | 176 | or SC01CTR_BKE,d2 |
177 | movhu d2,(SCxCTR,e2) | 177 | movhu d2,(SCxCTR,e2) |
178 | mov +(GxICR_LEVEL_1|GxICR_DETECT),d2 | 178 | mov +(NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL)|GxICR_DETECT),d2 |
179 | movhu d2,(e3) # disable transmit interrupts on this | 179 | movhu d2,(e3) # disable transmit interrupts on this |
180 | # channel | 180 | # channel |
181 | movhu (e3),d2 # flush | 181 | movhu (e3),d2 # flush |
diff --git a/arch/mn10300/kernel/mn10300-serial.c b/arch/mn10300/kernel/mn10300-serial.c index db509dd80565..996384dba45d 100644 --- a/arch/mn10300/kernel/mn10300-serial.c +++ b/arch/mn10300/kernel/mn10300-serial.c | |||
@@ -44,6 +44,11 @@ static const char serial_revdate[] = "2007-11-06"; | |||
44 | #include <unit/timex.h> | 44 | #include <unit/timex.h> |
45 | #include "mn10300-serial.h" | 45 | #include "mn10300-serial.h" |
46 | 46 | ||
47 | #ifdef CONFIG_SMP | ||
48 | #undef GxICR | ||
49 | #define GxICR(X) CROSS_GxICR(X, 0) | ||
50 | #endif /* CONFIG_SMP */ | ||
51 | |||
47 | #define kenter(FMT, ...) \ | 52 | #define kenter(FMT, ...) \ |
48 | printk(KERN_DEBUG "-->%s(" FMT ")\n", __func__, ##__VA_ARGS__) | 53 | printk(KERN_DEBUG "-->%s(" FMT ")\n", __func__, ##__VA_ARGS__) |
49 | #define _enter(FMT, ...) \ | 54 | #define _enter(FMT, ...) \ |
@@ -57,6 +62,11 @@ static const char serial_revdate[] = "2007-11-06"; | |||
57 | #define _proto(FMT, ...) \ | 62 | #define _proto(FMT, ...) \ |
58 | no_printk(KERN_DEBUG "### MNSERIAL " FMT " ###\n", ##__VA_ARGS__) | 63 | no_printk(KERN_DEBUG "### MNSERIAL " FMT " ###\n", ##__VA_ARGS__) |
59 | 64 | ||
65 | #ifndef CODMSB | ||
66 | /* c_cflag bit meaning */ | ||
67 | #define CODMSB 004000000000 /* change Transfer bit-order */ | ||
68 | #endif | ||
69 | |||
60 | #define NR_UARTS 3 | 70 | #define NR_UARTS 3 |
61 | 71 | ||
62 | #ifdef CONFIG_MN10300_TTYSM_CONSOLE | 72 | #ifdef CONFIG_MN10300_TTYSM_CONSOLE |
@@ -152,26 +162,35 @@ struct mn10300_serial_port mn10300_serial_port_sif0 = { | |||
152 | .name = "ttySM0", | 162 | .name = "ttySM0", |
153 | ._iobase = &SC0CTR, | 163 | ._iobase = &SC0CTR, |
154 | ._control = &SC0CTR, | 164 | ._control = &SC0CTR, |
155 | ._status = (volatile u8 *) &SC0STR, | 165 | ._status = (volatile u8 *)&SC0STR, |
156 | ._intr = &SC0ICR, | 166 | ._intr = &SC0ICR, |
157 | ._rxb = &SC0RXB, | 167 | ._rxb = &SC0RXB, |
158 | ._txb = &SC0TXB, | 168 | ._txb = &SC0TXB, |
159 | .rx_name = "ttySM0:Rx", | 169 | .rx_name = "ttySM0:Rx", |
160 | .tx_name = "ttySM0:Tx", | 170 | .tx_name = "ttySM0:Tx", |
161 | #ifdef CONFIG_MN10300_TTYSM0_TIMER8 | 171 | #if defined(CONFIG_MN10300_TTYSM0_TIMER8) |
162 | .tm_name = "ttySM0:Timer8", | 172 | .tm_name = "ttySM0:Timer8", |
163 | ._tmxmd = &TM8MD, | 173 | ._tmxmd = &TM8MD, |
164 | ._tmxbr = &TM8BR, | 174 | ._tmxbr = &TM8BR, |
165 | ._tmicr = &TM8ICR, | 175 | ._tmicr = &TM8ICR, |
166 | .tm_irq = TM8IRQ, | 176 | .tm_irq = TM8IRQ, |
167 | .div_timer = MNSCx_DIV_TIMER_16BIT, | 177 | .div_timer = MNSCx_DIV_TIMER_16BIT, |
168 | #else /* CONFIG_MN10300_TTYSM0_TIMER2 */ | 178 | #elif defined(CONFIG_MN10300_TTYSM0_TIMER0) |
179 | .tm_name = "ttySM0:Timer0", | ||
180 | ._tmxmd = &TM0MD, | ||
181 | ._tmxbr = (volatile u16 *)&TM0BR, | ||
182 | ._tmicr = &TM0ICR, | ||
183 | .tm_irq = TM0IRQ, | ||
184 | .div_timer = MNSCx_DIV_TIMER_8BIT, | ||
185 | #elif defined(CONFIG_MN10300_TTYSM0_TIMER2) | ||
169 | .tm_name = "ttySM0:Timer2", | 186 | .tm_name = "ttySM0:Timer2", |
170 | ._tmxmd = &TM2MD, | 187 | ._tmxmd = &TM2MD, |
171 | ._tmxbr = (volatile u16 *) &TM2BR, | 188 | ._tmxbr = (volatile u16 *)&TM2BR, |
172 | ._tmicr = &TM2ICR, | 189 | ._tmicr = &TM2ICR, |
173 | .tm_irq = TM2IRQ, | 190 | .tm_irq = TM2IRQ, |
174 | .div_timer = MNSCx_DIV_TIMER_8BIT, | 191 | .div_timer = MNSCx_DIV_TIMER_8BIT, |
192 | #else | ||
193 | #error "Unknown config for ttySM0" | ||
175 | #endif | 194 | #endif |
176 | .rx_irq = SC0RXIRQ, | 195 | .rx_irq = SC0RXIRQ, |
177 | .tx_irq = SC0TXIRQ, | 196 | .tx_irq = SC0TXIRQ, |
@@ -205,26 +224,35 @@ struct mn10300_serial_port mn10300_serial_port_sif1 = { | |||
205 | .name = "ttySM1", | 224 | .name = "ttySM1", |
206 | ._iobase = &SC1CTR, | 225 | ._iobase = &SC1CTR, |
207 | ._control = &SC1CTR, | 226 | ._control = &SC1CTR, |
208 | ._status = (volatile u8 *) &SC1STR, | 227 | ._status = (volatile u8 *)&SC1STR, |
209 | ._intr = &SC1ICR, | 228 | ._intr = &SC1ICR, |
210 | ._rxb = &SC1RXB, | 229 | ._rxb = &SC1RXB, |
211 | ._txb = &SC1TXB, | 230 | ._txb = &SC1TXB, |
212 | .rx_name = "ttySM1:Rx", | 231 | .rx_name = "ttySM1:Rx", |
213 | .tx_name = "ttySM1:Tx", | 232 | .tx_name = "ttySM1:Tx", |
214 | #ifdef CONFIG_MN10300_TTYSM1_TIMER9 | 233 | #if defined(CONFIG_MN10300_TTYSM1_TIMER9) |
215 | .tm_name = "ttySM1:Timer9", | 234 | .tm_name = "ttySM1:Timer9", |
216 | ._tmxmd = &TM9MD, | 235 | ._tmxmd = &TM9MD, |
217 | ._tmxbr = &TM9BR, | 236 | ._tmxbr = &TM9BR, |
218 | ._tmicr = &TM9ICR, | 237 | ._tmicr = &TM9ICR, |
219 | .tm_irq = TM9IRQ, | 238 | .tm_irq = TM9IRQ, |
220 | .div_timer = MNSCx_DIV_TIMER_16BIT, | 239 | .div_timer = MNSCx_DIV_TIMER_16BIT, |
221 | #else /* CONFIG_MN10300_TTYSM1_TIMER3 */ | 240 | #elif defined(CONFIG_MN10300_TTYSM1_TIMER3) |
222 | .tm_name = "ttySM1:Timer3", | 241 | .tm_name = "ttySM1:Timer3", |
223 | ._tmxmd = &TM3MD, | 242 | ._tmxmd = &TM3MD, |
224 | ._tmxbr = (volatile u16 *) &TM3BR, | 243 | ._tmxbr = (volatile u16 *)&TM3BR, |
225 | ._tmicr = &TM3ICR, | 244 | ._tmicr = &TM3ICR, |
226 | .tm_irq = TM3IRQ, | 245 | .tm_irq = TM3IRQ, |
227 | .div_timer = MNSCx_DIV_TIMER_8BIT, | 246 | .div_timer = MNSCx_DIV_TIMER_8BIT, |
247 | #elif defined(CONFIG_MN10300_TTYSM1_TIMER12) | ||
248 | .tm_name = "ttySM1/Timer12", | ||
249 | ._tmxmd = &TM12MD, | ||
250 | ._tmxbr = &TM12BR, | ||
251 | ._tmicr = &TM12ICR, | ||
252 | .tm_irq = TM12IRQ, | ||
253 | .div_timer = MNSCx_DIV_TIMER_16BIT, | ||
254 | #else | ||
255 | #error "Unknown config for ttySM1" | ||
228 | #endif | 256 | #endif |
229 | .rx_irq = SC1RXIRQ, | 257 | .rx_irq = SC1RXIRQ, |
230 | .tx_irq = SC1TXIRQ, | 258 | .tx_irq = SC1TXIRQ, |
@@ -260,20 +288,45 @@ struct mn10300_serial_port mn10300_serial_port_sif2 = { | |||
260 | .uart.lock = | 288 | .uart.lock = |
261 | __SPIN_LOCK_UNLOCKED(mn10300_serial_port_sif2.uart.lock), | 289 | __SPIN_LOCK_UNLOCKED(mn10300_serial_port_sif2.uart.lock), |
262 | .name = "ttySM2", | 290 | .name = "ttySM2", |
263 | .rx_name = "ttySM2:Rx", | ||
264 | .tx_name = "ttySM2:Tx", | ||
265 | .tm_name = "ttySM2:Timer10", | ||
266 | ._iobase = &SC2CTR, | 291 | ._iobase = &SC2CTR, |
267 | ._control = &SC2CTR, | 292 | ._control = &SC2CTR, |
268 | ._status = &SC2STR, | 293 | ._status = (volatile u8 *)&SC2STR, |
269 | ._intr = &SC2ICR, | 294 | ._intr = &SC2ICR, |
270 | ._rxb = &SC2RXB, | 295 | ._rxb = &SC2RXB, |
271 | ._txb = &SC2TXB, | 296 | ._txb = &SC2TXB, |
297 | .rx_name = "ttySM2:Rx", | ||
298 | .tx_name = "ttySM2:Tx", | ||
299 | #if defined(CONFIG_MN10300_TTYSM2_TIMER10) | ||
300 | .tm_name = "ttySM2/Timer10", | ||
272 | ._tmxmd = &TM10MD, | 301 | ._tmxmd = &TM10MD, |
273 | ._tmxbr = &TM10BR, | 302 | ._tmxbr = &TM10BR, |
274 | ._tmicr = &TM10ICR, | 303 | ._tmicr = &TM10ICR, |
275 | .tm_irq = TM10IRQ, | 304 | .tm_irq = TM10IRQ, |
276 | .div_timer = MNSCx_DIV_TIMER_16BIT, | 305 | .div_timer = MNSCx_DIV_TIMER_16BIT, |
306 | #elif defined(CONFIG_MN10300_TTYSM2_TIMER9) | ||
307 | .tm_name = "ttySM2/Timer9", | ||
308 | ._tmxmd = &TM9MD, | ||
309 | ._tmxbr = &TM9BR, | ||
310 | ._tmicr = &TM9ICR, | ||
311 | .tm_irq = TM9IRQ, | ||
312 | .div_timer = MNSCx_DIV_TIMER_16BIT, | ||
313 | #elif defined(CONFIG_MN10300_TTYSM2_TIMER1) | ||
314 | .tm_name = "ttySM2/Timer1", | ||
315 | ._tmxmd = &TM1MD, | ||
316 | ._tmxbr = (volatile u16 *)&TM1BR, | ||
317 | ._tmicr = &TM1ICR, | ||
318 | .tm_irq = TM1IRQ, | ||
319 | .div_timer = MNSCx_DIV_TIMER_8BIT, | ||
320 | #elif defined(CONFIG_MN10300_TTYSM2_TIMER3) | ||
321 | .tm_name = "ttySM2/Timer3", | ||
322 | ._tmxmd = &TM3MD, | ||
323 | ._tmxbr = (volatile u16 *)&TM3BR, | ||
324 | ._tmicr = &TM3ICR, | ||
325 | .tm_irq = TM3IRQ, | ||
326 | .div_timer = MNSCx_DIV_TIMER_8BIT, | ||
327 | #else | ||
328 | #error "Unknown config for ttySM2" | ||
329 | #endif | ||
277 | .rx_irq = SC2RXIRQ, | 330 | .rx_irq = SC2RXIRQ, |
278 | .tx_irq = SC2TXIRQ, | 331 | .tx_irq = SC2TXIRQ, |
279 | .rx_icr = &GxICR(SC2RXIRQ), | 332 | .rx_icr = &GxICR(SC2RXIRQ), |
@@ -322,9 +375,13 @@ struct mn10300_serial_port *mn10300_serial_ports[NR_UARTS + 1] = { | |||
322 | */ | 375 | */ |
323 | static void mn10300_serial_mask_ack(unsigned int irq) | 376 | static void mn10300_serial_mask_ack(unsigned int irq) |
324 | { | 377 | { |
378 | unsigned long flags; | ||
325 | u16 tmp; | 379 | u16 tmp; |
380 | |||
381 | flags = arch_local_cli_save(); | ||
326 | GxICR(irq) = GxICR_LEVEL_6; | 382 | GxICR(irq) = GxICR_LEVEL_6; |
327 | tmp = GxICR(irq); /* flush write buffer */ | 383 | tmp = GxICR(irq); /* flush write buffer */ |
384 | arch_local_irq_restore(flags); | ||
328 | } | 385 | } |
329 | 386 | ||
330 | static void mn10300_serial_nop(unsigned int irq) | 387 | static void mn10300_serial_nop(unsigned int irq) |
@@ -348,23 +405,36 @@ struct mn10300_serial_int mn10300_serial_int_tbl[NR_IRQS]; | |||
348 | 405 | ||
349 | static void mn10300_serial_dis_tx_intr(struct mn10300_serial_port *port) | 406 | static void mn10300_serial_dis_tx_intr(struct mn10300_serial_port *port) |
350 | { | 407 | { |
408 | unsigned long flags; | ||
351 | u16 x; | 409 | u16 x; |
352 | *port->tx_icr = GxICR_LEVEL_1 | GxICR_DETECT; | 410 | |
411 | flags = arch_local_cli_save(); | ||
412 | *port->tx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL); | ||
353 | x = *port->tx_icr; | 413 | x = *port->tx_icr; |
414 | arch_local_irq_restore(flags); | ||
354 | } | 415 | } |
355 | 416 | ||
356 | static void mn10300_serial_en_tx_intr(struct mn10300_serial_port *port) | 417 | static void mn10300_serial_en_tx_intr(struct mn10300_serial_port *port) |
357 | { | 418 | { |
419 | unsigned long flags; | ||
358 | u16 x; | 420 | u16 x; |
359 | *port->tx_icr = GxICR_LEVEL_1 | GxICR_ENABLE; | 421 | |
422 | flags = arch_local_cli_save(); | ||
423 | *port->tx_icr = | ||
424 | NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL) | GxICR_ENABLE; | ||
360 | x = *port->tx_icr; | 425 | x = *port->tx_icr; |
426 | arch_local_irq_restore(flags); | ||
361 | } | 427 | } |
362 | 428 | ||
363 | static void mn10300_serial_dis_rx_intr(struct mn10300_serial_port *port) | 429 | static void mn10300_serial_dis_rx_intr(struct mn10300_serial_port *port) |
364 | { | 430 | { |
431 | unsigned long flags; | ||
365 | u16 x; | 432 | u16 x; |
366 | *port->rx_icr = GxICR_LEVEL_1 | GxICR_DETECT; | 433 | |
434 | flags = arch_local_cli_save(); | ||
435 | *port->rx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL); | ||
367 | x = *port->rx_icr; | 436 | x = *port->rx_icr; |
437 | arch_local_irq_restore(flags); | ||
368 | } | 438 | } |
369 | 439 | ||
370 | /* | 440 | /* |
@@ -650,7 +720,7 @@ static unsigned int mn10300_serial_tx_empty(struct uart_port *_port) | |||
650 | static void mn10300_serial_set_mctrl(struct uart_port *_port, | 720 | static void mn10300_serial_set_mctrl(struct uart_port *_port, |
651 | unsigned int mctrl) | 721 | unsigned int mctrl) |
652 | { | 722 | { |
653 | struct mn10300_serial_port *port = | 723 | struct mn10300_serial_port *port __attribute__ ((unused)) = |
654 | container_of(_port, struct mn10300_serial_port, uart); | 724 | container_of(_port, struct mn10300_serial_port, uart); |
655 | 725 | ||
656 | _enter("%s,%x", port->name, mctrl); | 726 | _enter("%s,%x", port->name, mctrl); |
@@ -706,6 +776,7 @@ static void mn10300_serial_start_tx(struct uart_port *_port) | |||
706 | UART_XMIT_SIZE)); | 776 | UART_XMIT_SIZE)); |
707 | 777 | ||
708 | /* kick the virtual DMA controller */ | 778 | /* kick the virtual DMA controller */ |
779 | arch_local_cli(); | ||
709 | x = *port->tx_icr; | 780 | x = *port->tx_icr; |
710 | x |= GxICR_ENABLE; | 781 | x |= GxICR_ENABLE; |
711 | 782 | ||
@@ -716,10 +787,14 @@ static void mn10300_serial_start_tx(struct uart_port *_port) | |||
716 | 787 | ||
717 | _debug("CTR=%04hx ICR=%02hx STR=%04x TMD=%02hx TBR=%04hx ICR=%04hx", | 788 | _debug("CTR=%04hx ICR=%02hx STR=%04x TMD=%02hx TBR=%04hx ICR=%04hx", |
718 | *port->_control, *port->_intr, *port->_status, | 789 | *port->_control, *port->_intr, *port->_status, |
719 | *port->_tmxmd, *port->_tmxbr, *port->tx_icr); | 790 | *port->_tmxmd, |
791 | (port->div_timer == MNSCx_DIV_TIMER_8BIT) ? | ||
792 | *(volatile u8 *)port->_tmxbr : *port->_tmxbr, | ||
793 | *port->tx_icr); | ||
720 | 794 | ||
721 | *port->tx_icr = x; | 795 | *port->tx_icr = x; |
722 | x = *port->tx_icr; | 796 | x = *port->tx_icr; |
797 | arch_local_sti(); | ||
723 | } | 798 | } |
724 | 799 | ||
725 | /* | 800 | /* |
@@ -842,8 +917,10 @@ static int mn10300_serial_startup(struct uart_port *_port) | |||
842 | pint->port = port; | 917 | pint->port = port; |
843 | pint->vdma = mn10300_serial_vdma_tx_handler; | 918 | pint->vdma = mn10300_serial_vdma_tx_handler; |
844 | 919 | ||
845 | set_intr_level(port->rx_irq, GxICR_LEVEL_1); | 920 | set_intr_level(port->rx_irq, |
846 | set_intr_level(port->tx_irq, GxICR_LEVEL_1); | 921 | NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL)); |
922 | set_intr_level(port->tx_irq, | ||
923 | NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL)); | ||
847 | set_irq_chip(port->tm_irq, &mn10300_serial_pic); | 924 | set_irq_chip(port->tm_irq, &mn10300_serial_pic); |
848 | 925 | ||
849 | if (request_irq(port->rx_irq, mn10300_serial_interrupt, | 926 | if (request_irq(port->rx_irq, mn10300_serial_interrupt, |
@@ -876,6 +953,7 @@ error: | |||
876 | */ | 953 | */ |
877 | static void mn10300_serial_shutdown(struct uart_port *_port) | 954 | static void mn10300_serial_shutdown(struct uart_port *_port) |
878 | { | 955 | { |
956 | u16 x; | ||
879 | struct mn10300_serial_port *port = | 957 | struct mn10300_serial_port *port = |
880 | container_of(_port, struct mn10300_serial_port, uart); | 958 | container_of(_port, struct mn10300_serial_port, uart); |
881 | 959 | ||
@@ -897,8 +975,12 @@ static void mn10300_serial_shutdown(struct uart_port *_port) | |||
897 | free_irq(port->rx_irq, port); | 975 | free_irq(port->rx_irq, port); |
898 | free_irq(port->tx_irq, port); | 976 | free_irq(port->tx_irq, port); |
899 | 977 | ||
900 | *port->rx_icr = GxICR_LEVEL_1; | 978 | arch_local_cli(); |
901 | *port->tx_icr = GxICR_LEVEL_1; | 979 | *port->rx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL); |
980 | x = *port->rx_icr; | ||
981 | *port->tx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL); | ||
982 | x = *port->tx_icr; | ||
983 | arch_local_sti(); | ||
902 | } | 984 | } |
903 | 985 | ||
904 | /* | 986 | /* |
@@ -947,11 +1029,66 @@ static void mn10300_serial_change_speed(struct mn10300_serial_port *port, | |||
947 | /* Determine divisor based on baud rate */ | 1029 | /* Determine divisor based on baud rate */ |
948 | battempt = 0; | 1030 | battempt = 0; |
949 | 1031 | ||
950 | if (div_timer == MNSCx_DIV_TIMER_16BIT) | 1032 | switch (port->uart.line) { |
951 | scxctr |= SC0CTR_CK_TM8UFLOW_8; /* ( == SC1CTR_CK_TM9UFLOW_8 | 1033 | #ifdef CONFIG_MN10300_TTYSM0 |
952 | * == SC2CTR_CK_TM10UFLOW) */ | 1034 | case 0: /* ttySM0 */ |
953 | else if (div_timer == MNSCx_DIV_TIMER_8BIT) | 1035 | #if defined(CONFIG_MN10300_TTYSM0_TIMER8) |
1036 | scxctr |= SC0CTR_CK_TM8UFLOW_8; | ||
1037 | #elif defined(CONFIG_MN10300_TTYSM0_TIMER0) | ||
1038 | scxctr |= SC0CTR_CK_TM0UFLOW_8; | ||
1039 | #elif defined(CONFIG_MN10300_TTYSM0_TIMER2) | ||
954 | scxctr |= SC0CTR_CK_TM2UFLOW_8; | 1040 | scxctr |= SC0CTR_CK_TM2UFLOW_8; |
1041 | #else | ||
1042 | #error "Unknown config for ttySM0" | ||
1043 | #endif | ||
1044 | break; | ||
1045 | #endif /* CONFIG_MN10300_TTYSM0 */ | ||
1046 | |||
1047 | #ifdef CONFIG_MN10300_TTYSM1 | ||
1048 | case 1: /* ttySM1 */ | ||
1049 | #if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3) | ||
1050 | #if defined(CONFIG_MN10300_TTYSM1_TIMER9) | ||
1051 | scxctr |= SC1CTR_CK_TM9UFLOW_8; | ||
1052 | #elif defined(CONFIG_MN10300_TTYSM1_TIMER3) | ||
1053 | scxctr |= SC1CTR_CK_TM3UFLOW_8; | ||
1054 | #else | ||
1055 | #error "Unknown config for ttySM1" | ||
1056 | #endif | ||
1057 | #else /* CONFIG_AM33_2 || CONFIG_AM33_3 */ | ||
1058 | #if defined(CONFIG_MN10300_TTYSM1_TIMER12) | ||
1059 | scxctr |= SC1CTR_CK_TM12UFLOW_8; | ||
1060 | #else | ||
1061 | #error "Unknown config for ttySM1" | ||
1062 | #endif | ||
1063 | #endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */ | ||
1064 | break; | ||
1065 | #endif /* CONFIG_MN10300_TTYSM1 */ | ||
1066 | |||
1067 | #ifdef CONFIG_MN10300_TTYSM2 | ||
1068 | case 2: /* ttySM2 */ | ||
1069 | #if defined(CONFIG_AM33_2) | ||
1070 | #if defined(CONFIG_MN10300_TTYSM2_TIMER10) | ||
1071 | scxctr |= SC2CTR_CK_TM10UFLOW; | ||
1072 | #else | ||
1073 | #error "Unknown config for ttySM2" | ||
1074 | #endif | ||
1075 | #else /* CONFIG_AM33_2 */ | ||
1076 | #if defined(CONFIG_MN10300_TTYSM2_TIMER9) | ||
1077 | scxctr |= SC2CTR_CK_TM9UFLOW_8; | ||
1078 | #elif defined(CONFIG_MN10300_TTYSM2_TIMER1) | ||
1079 | scxctr |= SC2CTR_CK_TM1UFLOW_8; | ||
1080 | #elif defined(CONFIG_MN10300_TTYSM2_TIMER3) | ||
1081 | scxctr |= SC2CTR_CK_TM3UFLOW_8; | ||
1082 | #else | ||
1083 | #error "Unknown config for ttySM2" | ||
1084 | #endif | ||
1085 | #endif /* CONFIG_AM33_2 */ | ||
1086 | break; | ||
1087 | #endif /* CONFIG_MN10300_TTYSM2 */ | ||
1088 | |||
1089 | default: | ||
1090 | break; | ||
1091 | } | ||
955 | 1092 | ||
956 | try_alternative: | 1093 | try_alternative: |
957 | baud = uart_get_baud_rate(&port->uart, new, old, 0, | 1094 | baud = uart_get_baud_rate(&port->uart, new, old, 0, |
@@ -1195,6 +1332,12 @@ static void mn10300_serial_set_termios(struct uart_port *_port, | |||
1195 | ctr &= ~SC2CTR_TWE; | 1332 | ctr &= ~SC2CTR_TWE; |
1196 | *port->_control = ctr; | 1333 | *port->_control = ctr; |
1197 | } | 1334 | } |
1335 | |||
1336 | /* change Transfer bit-order (LSB/MSB) */ | ||
1337 | if (new->c_cflag & CODMSB) | ||
1338 | *port->_control |= SC01CTR_OD_MSBFIRST; /* MSB MODE */ | ||
1339 | else | ||
1340 | *port->_control &= ~SC01CTR_OD_MSBFIRST; /* LSB MODE */ | ||
1198 | } | 1341 | } |
1199 | 1342 | ||
1200 | /* | 1343 | /* |
@@ -1302,11 +1445,16 @@ static int __init mn10300_serial_init(void) | |||
1302 | printk(KERN_INFO "%s version %s (%s)\n", | 1445 | printk(KERN_INFO "%s version %s (%s)\n", |
1303 | serial_name, serial_version, serial_revdate); | 1446 | serial_name, serial_version, serial_revdate); |
1304 | 1447 | ||
1305 | #ifdef CONFIG_MN10300_TTYSM2 | 1448 | #if defined(CONFIG_MN10300_TTYSM2) && defined(CONFIG_AM33_2) |
1306 | SC2TIM = 8; /* make the baud base of timer 2 IOCLK/8 */ | 1449 | { |
1450 | int tmp; | ||
1451 | SC2TIM = 8; /* make the baud base of timer 2 IOCLK/8 */ | ||
1452 | tmp = SC2TIM; | ||
1453 | } | ||
1307 | #endif | 1454 | #endif |
1308 | 1455 | ||
1309 | set_intr_stub(EXCEP_IRQ_LEVEL1, mn10300_serial_vdma_interrupt); | 1456 | set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL), |
1457 | mn10300_serial_vdma_interrupt); | ||
1310 | 1458 | ||
1311 | ret = uart_register_driver(&mn10300_serial_driver); | 1459 | ret = uart_register_driver(&mn10300_serial_driver); |
1312 | if (!ret) { | 1460 | if (!ret) { |
@@ -1366,9 +1514,11 @@ static void mn10300_serial_console_write(struct console *co, | |||
1366 | port = mn10300_serial_ports[co->index]; | 1514 | port = mn10300_serial_ports[co->index]; |
1367 | 1515 | ||
1368 | /* firstly hijack the serial port from the "virtual DMA" controller */ | 1516 | /* firstly hijack the serial port from the "virtual DMA" controller */ |
1517 | arch_local_cli(); | ||
1369 | txicr = *port->tx_icr; | 1518 | txicr = *port->tx_icr; |
1370 | *port->tx_icr = GxICR_LEVEL_1; | 1519 | *port->tx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL); |
1371 | tmp = *port->tx_icr; | 1520 | tmp = *port->tx_icr; |
1521 | arch_local_sti(); | ||
1372 | 1522 | ||
1373 | /* the transmitter may be disabled */ | 1523 | /* the transmitter may be disabled */ |
1374 | scxctr = *port->_control; | 1524 | scxctr = *port->_control; |
@@ -1422,8 +1572,10 @@ static void mn10300_serial_console_write(struct console *co, | |||
1422 | if (!(scxctr & SC01CTR_TXE)) | 1572 | if (!(scxctr & SC01CTR_TXE)) |
1423 | *port->_control = scxctr; | 1573 | *port->_control = scxctr; |
1424 | 1574 | ||
1575 | arch_local_cli(); | ||
1425 | *port->tx_icr = txicr; | 1576 | *port->tx_icr = txicr; |
1426 | tmp = *port->tx_icr; | 1577 | tmp = *port->tx_icr; |
1578 | arch_local_sti(); | ||
1427 | } | 1579 | } |
1428 | 1580 | ||
1429 | /* | 1581 | /* |
diff --git a/arch/mn10300/kernel/mn10300-watchdog-low.S b/arch/mn10300/kernel/mn10300-watchdog-low.S index 996244745cca..f2f5c9cfaabd 100644 --- a/arch/mn10300/kernel/mn10300-watchdog-low.S +++ b/arch/mn10300/kernel/mn10300-watchdog-low.S | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <asm/intctl-regs.h> | 16 | #include <asm/intctl-regs.h> |
17 | #include <asm/timer-regs.h> | 17 | #include <asm/timer-regs.h> |
18 | #include <asm/frame.inc> | 18 | #include <asm/frame.inc> |
19 | #include <linux/threads.h> | ||
19 | 20 | ||
20 | .text | 21 | .text |
21 | 22 | ||
@@ -53,7 +54,13 @@ watchdog_handler: | |||
53 | .type touch_nmi_watchdog,@function | 54 | .type touch_nmi_watchdog,@function |
54 | touch_nmi_watchdog: | 55 | touch_nmi_watchdog: |
55 | clr d0 | 56 | clr d0 |
56 | mov d0,(watchdog_alert_counter) | 57 | clr d1 |
58 | mov watchdog_alert_counter, a0 | ||
59 | setlb | ||
60 | mov d0, (a0+) | ||
61 | inc d1 | ||
62 | cmp NR_CPUS, d1 | ||
63 | lne | ||
57 | ret [],0 | 64 | ret [],0 |
58 | 65 | ||
59 | .size touch_nmi_watchdog,.-touch_nmi_watchdog | 66 | .size touch_nmi_watchdog,.-touch_nmi_watchdog |
diff --git a/arch/mn10300/kernel/mn10300-watchdog.c b/arch/mn10300/kernel/mn10300-watchdog.c index f362d9d138f1..c5e12bfd9fcd 100644 --- a/arch/mn10300/kernel/mn10300-watchdog.c +++ b/arch/mn10300/kernel/mn10300-watchdog.c | |||
@@ -30,7 +30,7 @@ | |||
30 | static DEFINE_SPINLOCK(watchdog_print_lock); | 30 | static DEFINE_SPINLOCK(watchdog_print_lock); |
31 | static unsigned int watchdog; | 31 | static unsigned int watchdog; |
32 | static unsigned int watchdog_hz = 1; | 32 | static unsigned int watchdog_hz = 1; |
33 | unsigned int watchdog_alert_counter; | 33 | unsigned int watchdog_alert_counter[NR_CPUS]; |
34 | 34 | ||
35 | EXPORT_SYMBOL(touch_nmi_watchdog); | 35 | EXPORT_SYMBOL(touch_nmi_watchdog); |
36 | 36 | ||
@@ -39,9 +39,6 @@ EXPORT_SYMBOL(touch_nmi_watchdog); | |||
39 | * is to check its timer makes IRQ counts. If they are not | 39 | * is to check its timer makes IRQ counts. If they are not |
40 | * changing then that CPU has some problem. | 40 | * changing then that CPU has some problem. |
41 | * | 41 | * |
42 | * as these watchdog NMI IRQs are generated on every CPU, we only | ||
43 | * have to check the current processor. | ||
44 | * | ||
45 | * since NMIs dont listen to _any_ locks, we have to be extremely | 42 | * since NMIs dont listen to _any_ locks, we have to be extremely |
46 | * careful not to rely on unsafe variables. The printk might lock | 43 | * careful not to rely on unsafe variables. The printk might lock |
47 | * up though, so we have to break up any console locks first ... | 44 | * up though, so we have to break up any console locks first ... |
@@ -69,8 +66,8 @@ int __init check_watchdog(void) | |||
69 | 66 | ||
70 | printk(KERN_INFO "OK.\n"); | 67 | printk(KERN_INFO "OK.\n"); |
71 | 68 | ||
72 | /* now that we know it works we can reduce NMI frequency to | 69 | /* now that we know it works we can reduce NMI frequency to something |
73 | * something more reasonable; makes a difference in some configs | 70 | * more reasonable; makes a difference in some configs |
74 | */ | 71 | */ |
75 | watchdog_hz = 1; | 72 | watchdog_hz = 1; |
76 | 73 | ||
@@ -121,15 +118,22 @@ void __init watchdog_go(void) | |||
121 | } | 118 | } |
122 | } | 119 | } |
123 | 120 | ||
121 | #ifdef CONFIG_SMP | ||
122 | static void watchdog_dump_register(void *dummy) | ||
123 | { | ||
124 | printk(KERN_ERR "--- Register Dump (CPU%d) ---\n", CPUID); | ||
125 | show_registers(current_frame()); | ||
126 | } | ||
127 | #endif | ||
128 | |||
124 | asmlinkage | 129 | asmlinkage |
125 | void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep) | 130 | void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep) |
126 | { | 131 | { |
127 | |||
128 | /* | 132 | /* |
129 | * Since current-> is always on the stack, and we always switch | 133 | * Since current-> is always on the stack, and we always switch |
130 | * the stack NMI-atomically, it's safe to use smp_processor_id(). | 134 | * the stack NMI-atomically, it's safe to use smp_processor_id(). |
131 | */ | 135 | */ |
132 | int sum, cpu = smp_processor_id(); | 136 | int sum, cpu; |
133 | int irq = NMIIRQ; | 137 | int irq = NMIIRQ; |
134 | u8 wdt, tmp; | 138 | u8 wdt, tmp; |
135 | 139 | ||
@@ -138,43 +142,61 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep) | |||
138 | tmp = WDCTR; | 142 | tmp = WDCTR; |
139 | NMICR = NMICR_WDIF; | 143 | NMICR = NMICR_WDIF; |
140 | 144 | ||
141 | nmi_count(cpu)++; | 145 | nmi_count(smp_processor_id())++; |
142 | kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); | 146 | kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); |
143 | sum = irq_stat[cpu].__irq_count; | 147 | |
144 | 148 | for_each_online_cpu(cpu) { | |
145 | if (last_irq_sums[cpu] == sum) { | 149 | |
146 | /* | 150 | sum = irq_stat[cpu].__irq_count; |
147 | * Ayiee, looks like this CPU is stuck ... | 151 | |
148 | * wait a few IRQs (5 seconds) before doing the oops ... | 152 | if ((last_irq_sums[cpu] == sum) |
149 | */ | 153 | #if defined(CONFIG_GDBSTUB) && defined(CONFIG_SMP) |
150 | watchdog_alert_counter++; | 154 | && !(CHK_GDBSTUB_BUSY() |
151 | if (watchdog_alert_counter == 5 * watchdog_hz) { | 155 | || atomic_read(&cpu_doing_single_step)) |
152 | spin_lock(&watchdog_print_lock); | 156 | #endif |
157 | ) { | ||
153 | /* | 158 | /* |
154 | * We are in trouble anyway, lets at least try | 159 | * Ayiee, looks like this CPU is stuck ... |
155 | * to get a message out. | 160 | * wait a few IRQs (5 seconds) before doing the oops ... |
156 | */ | 161 | */ |
157 | bust_spinlocks(1); | 162 | watchdog_alert_counter[cpu]++; |
158 | printk(KERN_ERR | 163 | if (watchdog_alert_counter[cpu] == 5 * watchdog_hz) { |
159 | "NMI Watchdog detected LOCKUP on CPU%d," | 164 | spin_lock(&watchdog_print_lock); |
160 | " pc %08lx, registers:\n", | 165 | /* |
161 | cpu, regs->pc); | 166 | * We are in trouble anyway, lets at least try |
162 | show_registers(regs); | 167 | * to get a message out. |
163 | printk("console shuts up ...\n"); | 168 | */ |
164 | console_silent(); | 169 | bust_spinlocks(1); |
165 | spin_unlock(&watchdog_print_lock); | 170 | printk(KERN_ERR |
166 | bust_spinlocks(0); | 171 | "NMI Watchdog detected LOCKUP on CPU%d," |
172 | " pc %08lx, registers:\n", | ||
173 | cpu, regs->pc); | ||
174 | #ifdef CONFIG_SMP | ||
175 | printk(KERN_ERR | ||
176 | "--- Register Dump (CPU%d) ---\n", | ||
177 | CPUID); | ||
178 | #endif | ||
179 | show_registers(regs); | ||
180 | #ifdef CONFIG_SMP | ||
181 | smp_nmi_call_function(watchdog_dump_register, | ||
182 | NULL, 1); | ||
183 | #endif | ||
184 | printk(KERN_NOTICE "console shuts up ...\n"); | ||
185 | console_silent(); | ||
186 | spin_unlock(&watchdog_print_lock); | ||
187 | bust_spinlocks(0); | ||
167 | #ifdef CONFIG_GDBSTUB | 188 | #ifdef CONFIG_GDBSTUB |
168 | if (gdbstub_busy) | 189 | if (CHK_GDBSTUB_BUSY_AND_ACTIVE()) |
169 | gdbstub_exception(regs, excep); | 190 | gdbstub_exception(regs, excep); |
170 | else | 191 | else |
171 | gdbstub_intercept(regs, excep); | 192 | gdbstub_intercept(regs, excep); |
172 | #endif | 193 | #endif |
173 | do_exit(SIGSEGV); | 194 | do_exit(SIGSEGV); |
195 | } | ||
196 | } else { | ||
197 | last_irq_sums[cpu] = sum; | ||
198 | watchdog_alert_counter[cpu] = 0; | ||
174 | } | 199 | } |
175 | } else { | ||
176 | last_irq_sums[cpu] = sum; | ||
177 | watchdog_alert_counter = 0; | ||
178 | } | 200 | } |
179 | 201 | ||
180 | WDCTR = wdt | WDCTR_WDRST; | 202 | WDCTR = wdt | WDCTR_WDRST; |
diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c index f48373e2bc1c..0d0f8049a17b 100644 --- a/arch/mn10300/kernel/process.c +++ b/arch/mn10300/kernel/process.c | |||
@@ -57,6 +57,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk) | |||
57 | void (*pm_power_off)(void); | 57 | void (*pm_power_off)(void); |
58 | EXPORT_SYMBOL(pm_power_off); | 58 | EXPORT_SYMBOL(pm_power_off); |
59 | 59 | ||
60 | #if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU) | ||
60 | /* | 61 | /* |
61 | * we use this if we don't have any better idle routine | 62 | * we use this if we don't have any better idle routine |
62 | */ | 63 | */ |
@@ -69,6 +70,35 @@ static void default_idle(void) | |||
69 | local_irq_enable(); | 70 | local_irq_enable(); |
70 | } | 71 | } |
71 | 72 | ||
73 | #else /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */ | ||
74 | /* | ||
75 | * On SMP it's slightly faster (but much more power-consuming!) | ||
76 | * to poll the ->work.need_resched flag instead of waiting for the | ||
77 | * cross-CPU IPI to arrive. Use this option with caution. | ||
78 | */ | ||
79 | static inline void poll_idle(void) | ||
80 | { | ||
81 | int oldval; | ||
82 | |||
83 | local_irq_enable(); | ||
84 | |||
85 | /* | ||
86 | * Deal with another CPU just having chosen a thread to | ||
87 | * run here: | ||
88 | */ | ||
89 | oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); | ||
90 | |||
91 | if (!oldval) { | ||
92 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
93 | while (!need_resched()) | ||
94 | cpu_relax(); | ||
95 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
96 | } else { | ||
97 | set_need_resched(); | ||
98 | } | ||
99 | } | ||
100 | #endif /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */ | ||
101 | |||
72 | /* | 102 | /* |
73 | * the idle thread | 103 | * the idle thread |
74 | * - there's no useful work to be done, so just try to conserve power and have | 104 | * - there's no useful work to be done, so just try to conserve power and have |
@@ -77,8 +107,6 @@ static void default_idle(void) | |||
77 | */ | 107 | */ |
78 | void cpu_idle(void) | 108 | void cpu_idle(void) |
79 | { | 109 | { |
80 | int cpu = smp_processor_id(); | ||
81 | |||
82 | /* endless idle loop with no priority at all */ | 110 | /* endless idle loop with no priority at all */ |
83 | for (;;) { | 111 | for (;;) { |
84 | while (!need_resched()) { | 112 | while (!need_resched()) { |
@@ -86,10 +114,13 @@ void cpu_idle(void) | |||
86 | 114 | ||
87 | smp_rmb(); | 115 | smp_rmb(); |
88 | idle = pm_idle; | 116 | idle = pm_idle; |
89 | if (!idle) | 117 | if (!idle) { |
118 | #if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU) | ||
119 | idle = poll_idle; | ||
120 | #else /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */ | ||
90 | idle = default_idle; | 121 | idle = default_idle; |
91 | 122 | #endif /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */ | |
92 | irq_stat[cpu].idle_timestamp = jiffies; | 123 | } |
93 | idle(); | 124 | idle(); |
94 | } | 125 | } |
95 | 126 | ||
@@ -197,6 +228,7 @@ int copy_thread(unsigned long clone_flags, | |||
197 | unsigned long c_usp, unsigned long ustk_size, | 228 | unsigned long c_usp, unsigned long ustk_size, |
198 | struct task_struct *p, struct pt_regs *kregs) | 229 | struct task_struct *p, struct pt_regs *kregs) |
199 | { | 230 | { |
231 | struct thread_info *ti = task_thread_info(p); | ||
200 | struct pt_regs *c_uregs, *c_kregs, *uregs; | 232 | struct pt_regs *c_uregs, *c_kregs, *uregs; |
201 | unsigned long c_ksp; | 233 | unsigned long c_ksp; |
202 | 234 | ||
@@ -217,7 +249,7 @@ int copy_thread(unsigned long clone_flags, | |||
217 | 249 | ||
218 | /* the new TLS pointer is passed in as arg #5 to sys_clone() */ | 250 | /* the new TLS pointer is passed in as arg #5 to sys_clone() */ |
219 | if (clone_flags & CLONE_SETTLS) | 251 | if (clone_flags & CLONE_SETTLS) |
220 | c_uregs->e2 = __frame->d3; | 252 | c_uregs->e2 = current_frame()->d3; |
221 | 253 | ||
222 | /* set up the return kernel frame if called from kernel_thread() */ | 254 | /* set up the return kernel frame if called from kernel_thread() */ |
223 | c_kregs = c_uregs; | 255 | c_kregs = c_uregs; |
@@ -235,7 +267,7 @@ int copy_thread(unsigned long clone_flags, | |||
235 | } | 267 | } |
236 | 268 | ||
237 | /* set up things up so the scheduler can start the new task */ | 269 | /* set up things up so the scheduler can start the new task */ |
238 | p->thread.__frame = c_kregs; | 270 | ti->frame = c_kregs; |
239 | p->thread.a3 = (unsigned long) c_kregs; | 271 | p->thread.a3 = (unsigned long) c_kregs; |
240 | p->thread.sp = c_ksp; | 272 | p->thread.sp = c_ksp; |
241 | p->thread.pc = (unsigned long) ret_from_fork; | 273 | p->thread.pc = (unsigned long) ret_from_fork; |
@@ -247,25 +279,26 @@ int copy_thread(unsigned long clone_flags, | |||
247 | 279 | ||
248 | /* | 280 | /* |
249 | * clone a process | 281 | * clone a process |
250 | * - tlsptr is retrieved by copy_thread() from __frame->d3 | 282 | * - tlsptr is retrieved by copy_thread() from current_frame()->d3 |
251 | */ | 283 | */ |
252 | asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp, | 284 | asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp, |
253 | int __user *parent_tidptr, int __user *child_tidptr, | 285 | int __user *parent_tidptr, int __user *child_tidptr, |
254 | int __user *tlsptr) | 286 | int __user *tlsptr) |
255 | { | 287 | { |
256 | return do_fork(clone_flags, newsp ?: __frame->sp, __frame, 0, | 288 | return do_fork(clone_flags, newsp ?: current_frame()->sp, |
257 | parent_tidptr, child_tidptr); | 289 | current_frame(), 0, parent_tidptr, child_tidptr); |
258 | } | 290 | } |
259 | 291 | ||
260 | asmlinkage long sys_fork(void) | 292 | asmlinkage long sys_fork(void) |
261 | { | 293 | { |
262 | return do_fork(SIGCHLD, __frame->sp, __frame, 0, NULL, NULL); | 294 | return do_fork(SIGCHLD, current_frame()->sp, |
295 | current_frame(), 0, NULL, NULL); | ||
263 | } | 296 | } |
264 | 297 | ||
265 | asmlinkage long sys_vfork(void) | 298 | asmlinkage long sys_vfork(void) |
266 | { | 299 | { |
267 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, __frame->sp, __frame, | 300 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, current_frame()->sp, |
268 | 0, NULL, NULL); | 301 | current_frame(), 0, NULL, NULL); |
269 | } | 302 | } |
270 | 303 | ||
271 | asmlinkage long sys_execve(const char __user *name, | 304 | asmlinkage long sys_execve(const char __user *name, |
@@ -279,7 +312,7 @@ asmlinkage long sys_execve(const char __user *name, | |||
279 | error = PTR_ERR(filename); | 312 | error = PTR_ERR(filename); |
280 | if (IS_ERR(filename)) | 313 | if (IS_ERR(filename)) |
281 | return error; | 314 | return error; |
282 | error = do_execve(filename, argv, envp, __frame); | 315 | error = do_execve(filename, argv, envp, current_frame()); |
283 | putname(filename); | 316 | putname(filename); |
284 | return error; | 317 | return error; |
285 | } | 318 | } |
diff --git a/arch/mn10300/kernel/profile.c b/arch/mn10300/kernel/profile.c index 20d7d0306b16..4f342f75d00c 100644 --- a/arch/mn10300/kernel/profile.c +++ b/arch/mn10300/kernel/profile.c | |||
@@ -41,7 +41,7 @@ static __init int profile_init(void) | |||
41 | tmp = TM11ICR; | 41 | tmp = TM11ICR; |
42 | 42 | ||
43 | printk(KERN_INFO "Profiling initiated on timer 11, priority 0, %uHz\n", | 43 | printk(KERN_INFO "Profiling initiated on timer 11, priority 0, %uHz\n", |
44 | mn10300_ioclk / 8 / (TM11BR + 1)); | 44 | MN10300_IOCLK / 8 / (TM11BR + 1)); |
45 | printk(KERN_INFO "Profile histogram stored %p-%p\n", | 45 | printk(KERN_INFO "Profile histogram stored %p-%p\n", |
46 | prof_buffer, (u8 *)(prof_buffer + prof_len) - 1); | 46 | prof_buffer, (u8 *)(prof_buffer + prof_len) - 1); |
47 | 47 | ||
diff --git a/arch/mn10300/kernel/rtc.c b/arch/mn10300/kernel/rtc.c index 4eef0e7224f6..e9e20f9a4dd3 100644 --- a/arch/mn10300/kernel/rtc.c +++ b/arch/mn10300/kernel/rtc.c | |||
@@ -20,18 +20,22 @@ | |||
20 | DEFINE_SPINLOCK(rtc_lock); | 20 | DEFINE_SPINLOCK(rtc_lock); |
21 | EXPORT_SYMBOL(rtc_lock); | 21 | EXPORT_SYMBOL(rtc_lock); |
22 | 22 | ||
23 | /* time for RTC to update itself in ioclks */ | 23 | /* |
24 | static unsigned long mn10300_rtc_update_period; | 24 | * Read the current RTC time |
25 | 25 | */ | |
26 | void read_persistent_clock(struct timespec *ts) | 26 | void read_persistent_clock(struct timespec *ts) |
27 | { | 27 | { |
28 | struct rtc_time tm; | 28 | struct rtc_time tm; |
29 | 29 | ||
30 | get_rtc_time(&tm); | 30 | get_rtc_time(&tm); |
31 | 31 | ||
32 | ts->tv_sec = mktime(tm.tm_year, tm.tm_mon, tm.tm_mday, | ||
33 | tm.tm_hour, tm.tm_min, tm.tm_sec); | ||
34 | ts->tv_nsec = 0; | 32 | ts->tv_nsec = 0; |
33 | ts->tv_sec = mktime(tm.tm_year, tm.tm_mon, tm.tm_mday, | ||
34 | tm.tm_hour, tm.tm_min, tm.tm_sec); | ||
35 | |||
36 | /* if rtc is way off in the past, set something reasonable */ | ||
37 | if (ts->tv_sec < 0) | ||
38 | ts->tv_sec = mktime(2009, 1, 1, 12, 0, 0); | ||
35 | } | 39 | } |
36 | 40 | ||
37 | /* | 41 | /* |
@@ -115,39 +119,14 @@ int update_persistent_clock(struct timespec now) | |||
115 | */ | 119 | */ |
116 | void __init calibrate_clock(void) | 120 | void __init calibrate_clock(void) |
117 | { | 121 | { |
118 | unsigned long count0, counth, count1; | ||
119 | unsigned char status; | 122 | unsigned char status; |
120 | 123 | ||
121 | /* make sure the RTC is running and is set to operate in 24hr mode */ | 124 | /* make sure the RTC is running and is set to operate in 24hr mode */ |
122 | status = RTSRC; | 125 | status = RTSRC; |
123 | RTCRB |= RTCRB_SET; | 126 | RTCRB |= RTCRB_SET; |
124 | RTCRB |= RTCRB_TM_24HR; | 127 | RTCRB |= RTCRB_TM_24HR; |
128 | RTCRB &= ~RTCRB_DM_BINARY; | ||
125 | RTCRA |= RTCRA_DVR; | 129 | RTCRA |= RTCRA_DVR; |
126 | RTCRA &= ~RTCRA_DVR; | 130 | RTCRA &= ~RTCRA_DVR; |
127 | RTCRB &= ~RTCRB_SET; | 131 | RTCRB &= ~RTCRB_SET; |
128 | |||
129 | /* work out the clock speed by counting clock cycles between ends of | ||
130 | * the RTC update cycle - track the RTC through one complete update | ||
131 | * cycle (1 second) | ||
132 | */ | ||
133 | startup_timestamp_counter(); | ||
134 | |||
135 | while (!(RTCRA & RTCRA_UIP)) {} | ||
136 | while ((RTCRA & RTCRA_UIP)) {} | ||
137 | |||
138 | count0 = TMTSCBC; | ||
139 | |||
140 | while (!(RTCRA & RTCRA_UIP)) {} | ||
141 | |||
142 | counth = TMTSCBC; | ||
143 | |||
144 | while ((RTCRA & RTCRA_UIP)) {} | ||
145 | |||
146 | count1 = TMTSCBC; | ||
147 | |||
148 | shutdown_timestamp_counter(); | ||
149 | |||
150 | MN10300_TSCCLK = count0 - count1; /* the timers count down */ | ||
151 | mn10300_rtc_update_period = counth - count1; | ||
152 | MN10300_TSC_PER_HZ = MN10300_TSCCLK / HZ; | ||
153 | } | 132 | } |
diff --git a/arch/mn10300/kernel/setup.c b/arch/mn10300/kernel/setup.c index d464affcba0e..9e7a3209a3e1 100644 --- a/arch/mn10300/kernel/setup.c +++ b/arch/mn10300/kernel/setup.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/bootmem.h> | 23 | #include <linux/bootmem.h> |
24 | #include <linux/seq_file.h> | 24 | #include <linux/seq_file.h> |
25 | #include <linux/cpu.h> | ||
25 | #include <asm/processor.h> | 26 | #include <asm/processor.h> |
26 | #include <linux/console.h> | 27 | #include <linux/console.h> |
27 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
@@ -30,7 +31,6 @@ | |||
30 | #include <asm/io.h> | 31 | #include <asm/io.h> |
31 | #include <asm/smp.h> | 32 | #include <asm/smp.h> |
32 | #include <proc/proc.h> | 33 | #include <proc/proc.h> |
33 | #include <asm/busctl-regs.h> | ||
34 | #include <asm/fpu.h> | 34 | #include <asm/fpu.h> |
35 | #include <asm/sections.h> | 35 | #include <asm/sections.h> |
36 | 36 | ||
@@ -64,11 +64,13 @@ unsigned long memory_size; | |||
64 | struct thread_info *__current_ti = &init_thread_union.thread_info; | 64 | struct thread_info *__current_ti = &init_thread_union.thread_info; |
65 | struct task_struct *__current = &init_task; | 65 | struct task_struct *__current = &init_task; |
66 | 66 | ||
67 | #define mn10300_known_cpus 3 | 67 | #define mn10300_known_cpus 5 |
68 | static const char *const mn10300_cputypes[] = { | 68 | static const char *const mn10300_cputypes[] = { |
69 | "am33v1", | 69 | "am33-1", |
70 | "am33v2", | 70 | "am33-2", |
71 | "am34v1", | 71 | "am34-1", |
72 | "am33-3", | ||
73 | "am34-2", | ||
72 | "unknown" | 74 | "unknown" |
73 | }; | 75 | }; |
74 | 76 | ||
@@ -123,6 +125,7 @@ void __init setup_arch(char **cmdline_p) | |||
123 | 125 | ||
124 | cpu_init(); | 126 | cpu_init(); |
125 | unit_setup(); | 127 | unit_setup(); |
128 | smp_init_cpus(); | ||
126 | parse_mem_cmdline(cmdline_p); | 129 | parse_mem_cmdline(cmdline_p); |
127 | 130 | ||
128 | init_mm.start_code = (unsigned long)&_text; | 131 | init_mm.start_code = (unsigned long)&_text; |
@@ -179,57 +182,55 @@ void __init setup_arch(char **cmdline_p) | |||
179 | void __init cpu_init(void) | 182 | void __init cpu_init(void) |
180 | { | 183 | { |
181 | unsigned long cpurev = CPUREV, type; | 184 | unsigned long cpurev = CPUREV, type; |
182 | unsigned long base, size; | ||
183 | 185 | ||
184 | type = (CPUREV & CPUREV_TYPE) >> CPUREV_TYPE_S; | 186 | type = (CPUREV & CPUREV_TYPE) >> CPUREV_TYPE_S; |
185 | if (type > mn10300_known_cpus) | 187 | if (type > mn10300_known_cpus) |
186 | type = mn10300_known_cpus; | 188 | type = mn10300_known_cpus; |
187 | 189 | ||
188 | printk(KERN_INFO "Matsushita %s, rev %ld\n", | 190 | printk(KERN_INFO "Panasonic %s, rev %ld\n", |
189 | mn10300_cputypes[type], | 191 | mn10300_cputypes[type], |
190 | (cpurev & CPUREV_REVISION) >> CPUREV_REVISION_S); | 192 | (cpurev & CPUREV_REVISION) >> CPUREV_REVISION_S); |
191 | 193 | ||
192 | /* determine the memory size and base from the memory controller regs */ | 194 | get_mem_info(&phys_memory_base, &memory_size); |
193 | memory_size = 0; | 195 | phys_memory_end = phys_memory_base + memory_size; |
194 | |||
195 | base = SDBASE(0); | ||
196 | if (base & SDBASE_CE) { | ||
197 | size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT; | ||
198 | size = ~size + 1; | ||
199 | base &= SDBASE_CBA; | ||
200 | 196 | ||
201 | printk(KERN_INFO "SDRAM[0]: %luMb @%08lx\n", size >> 20, base); | 197 | fpu_init_state(); |
202 | memory_size += size; | 198 | } |
203 | phys_memory_base = base; | ||
204 | } | ||
205 | 199 | ||
206 | base = SDBASE(1); | 200 | static struct cpu cpu_devices[NR_CPUS]; |
207 | if (base & SDBASE_CE) { | ||
208 | size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT; | ||
209 | size = ~size + 1; | ||
210 | base &= SDBASE_CBA; | ||
211 | 201 | ||
212 | printk(KERN_INFO "SDRAM[1]: %luMb @%08lx\n", size >> 20, base); | 202 | static int __init topology_init(void) |
213 | memory_size += size; | 203 | { |
214 | if (phys_memory_base == 0) | 204 | int i; |
215 | phys_memory_base = base; | ||
216 | } | ||
217 | 205 | ||
218 | phys_memory_end = phys_memory_base + memory_size; | 206 | for_each_present_cpu(i) |
207 | register_cpu(&cpu_devices[i], i); | ||
219 | 208 | ||
220 | #ifdef CONFIG_FPU | 209 | return 0; |
221 | fpu_init_state(); | ||
222 | #endif | ||
223 | } | 210 | } |
224 | 211 | ||
212 | subsys_initcall(topology_init); | ||
213 | |||
225 | /* | 214 | /* |
226 | * Get CPU information for use by the procfs. | 215 | * Get CPU information for use by the procfs. |
227 | */ | 216 | */ |
228 | static int show_cpuinfo(struct seq_file *m, void *v) | 217 | static int show_cpuinfo(struct seq_file *m, void *v) |
229 | { | 218 | { |
219 | #ifdef CONFIG_SMP | ||
220 | struct mn10300_cpuinfo *c = v; | ||
221 | unsigned long cpu_id = c - cpu_data; | ||
222 | unsigned long cpurev = c->type, type, icachesz, dcachesz; | ||
223 | #else /* CONFIG_SMP */ | ||
224 | unsigned long cpu_id = 0; | ||
230 | unsigned long cpurev = CPUREV, type, icachesz, dcachesz; | 225 | unsigned long cpurev = CPUREV, type, icachesz, dcachesz; |
226 | #endif /* CONFIG_SMP */ | ||
231 | 227 | ||
232 | type = (CPUREV & CPUREV_TYPE) >> CPUREV_TYPE_S; | 228 | #ifdef CONFIG_SMP |
229 | if (!cpu_online(cpu_id)) | ||
230 | return 0; | ||
231 | #endif | ||
232 | |||
233 | type = (cpurev & CPUREV_TYPE) >> CPUREV_TYPE_S; | ||
233 | if (type > mn10300_known_cpus) | 234 | if (type > mn10300_known_cpus) |
234 | type = mn10300_known_cpus; | 235 | type = mn10300_known_cpus; |
235 | 236 | ||
@@ -244,13 +245,14 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
244 | 1024; | 245 | 1024; |
245 | 246 | ||
246 | seq_printf(m, | 247 | seq_printf(m, |
247 | "processor : 0\n" | 248 | "processor : %ld\n" |
248 | "vendor_id : Matsushita\n" | 249 | "vendor_id : " PROCESSOR_VENDOR_NAME "\n" |
249 | "cpu core : %s\n" | 250 | "cpu core : %s\n" |
250 | "cpu rev : %lu\n" | 251 | "cpu rev : %lu\n" |
251 | "model name : " PROCESSOR_MODEL_NAME "\n" | 252 | "model name : " PROCESSOR_MODEL_NAME "\n" |
252 | "icache size: %lu\n" | 253 | "icache size: %lu\n" |
253 | "dcache size: %lu\n", | 254 | "dcache size: %lu\n", |
255 | cpu_id, | ||
254 | mn10300_cputypes[type], | 256 | mn10300_cputypes[type], |
255 | (cpurev & CPUREV_REVISION) >> CPUREV_REVISION_S, | 257 | (cpurev & CPUREV_REVISION) >> CPUREV_REVISION_S, |
256 | icachesz, | 258 | icachesz, |
@@ -262,8 +264,13 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
262 | "bogomips : %lu.%02lu\n\n", | 264 | "bogomips : %lu.%02lu\n\n", |
263 | MN10300_IOCLK / 1000000, | 265 | MN10300_IOCLK / 1000000, |
264 | (MN10300_IOCLK / 10000) % 100, | 266 | (MN10300_IOCLK / 10000) % 100, |
267 | #ifdef CONFIG_SMP | ||
268 | c->loops_per_jiffy / (500000 / HZ), | ||
269 | (c->loops_per_jiffy / (5000 / HZ)) % 100 | ||
270 | #else /* CONFIG_SMP */ | ||
265 | loops_per_jiffy / (500000 / HZ), | 271 | loops_per_jiffy / (500000 / HZ), |
266 | (loops_per_jiffy / (5000 / HZ)) % 100 | 272 | (loops_per_jiffy / (5000 / HZ)) % 100 |
273 | #endif /* CONFIG_SMP */ | ||
267 | ); | 274 | ); |
268 | 275 | ||
269 | return 0; | 276 | return 0; |
diff --git a/arch/mn10300/kernel/signal.c b/arch/mn10300/kernel/signal.c index d4de05ab7864..690f4e9507d7 100644 --- a/arch/mn10300/kernel/signal.c +++ b/arch/mn10300/kernel/signal.c | |||
@@ -91,7 +91,7 @@ asmlinkage long sys_sigaction(int sig, | |||
91 | */ | 91 | */ |
92 | asmlinkage long sys_sigaltstack(const stack_t __user *uss, stack_t *uoss) | 92 | asmlinkage long sys_sigaltstack(const stack_t __user *uss, stack_t *uoss) |
93 | { | 93 | { |
94 | return do_sigaltstack(uss, uoss, __frame->sp); | 94 | return do_sigaltstack(uss, uoss, current_frame()->sp); |
95 | } | 95 | } |
96 | 96 | ||
97 | /* | 97 | /* |
@@ -156,10 +156,11 @@ badframe: | |||
156 | */ | 156 | */ |
157 | asmlinkage long sys_sigreturn(void) | 157 | asmlinkage long sys_sigreturn(void) |
158 | { | 158 | { |
159 | struct sigframe __user *frame = (struct sigframe __user *) __frame->sp; | 159 | struct sigframe __user *frame; |
160 | sigset_t set; | 160 | sigset_t set; |
161 | long d0; | 161 | long d0; |
162 | 162 | ||
163 | frame = (struct sigframe __user *) current_frame()->sp; | ||
163 | if (verify_area(VERIFY_READ, frame, sizeof(*frame))) | 164 | if (verify_area(VERIFY_READ, frame, sizeof(*frame))) |
164 | goto badframe; | 165 | goto badframe; |
165 | if (__get_user(set.sig[0], &frame->sc.oldmask)) | 166 | if (__get_user(set.sig[0], &frame->sc.oldmask)) |
@@ -176,7 +177,7 @@ asmlinkage long sys_sigreturn(void) | |||
176 | recalc_sigpending(); | 177 | recalc_sigpending(); |
177 | spin_unlock_irq(¤t->sighand->siglock); | 178 | spin_unlock_irq(¤t->sighand->siglock); |
178 | 179 | ||
179 | if (restore_sigcontext(__frame, &frame->sc, &d0)) | 180 | if (restore_sigcontext(current_frame(), &frame->sc, &d0)) |
180 | goto badframe; | 181 | goto badframe; |
181 | 182 | ||
182 | return d0; | 183 | return d0; |
@@ -191,11 +192,11 @@ badframe: | |||
191 | */ | 192 | */ |
192 | asmlinkage long sys_rt_sigreturn(void) | 193 | asmlinkage long sys_rt_sigreturn(void) |
193 | { | 194 | { |
194 | struct rt_sigframe __user *frame = | 195 | struct rt_sigframe __user *frame; |
195 | (struct rt_sigframe __user *) __frame->sp; | ||
196 | sigset_t set; | 196 | sigset_t set; |
197 | unsigned long d0; | 197 | long d0; |
198 | 198 | ||
199 | frame = (struct rt_sigframe __user *) current_frame()->sp; | ||
199 | if (verify_area(VERIFY_READ, frame, sizeof(*frame))) | 200 | if (verify_area(VERIFY_READ, frame, sizeof(*frame))) |
200 | goto badframe; | 201 | goto badframe; |
201 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) | 202 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) |
@@ -207,10 +208,11 @@ asmlinkage long sys_rt_sigreturn(void) | |||
207 | recalc_sigpending(); | 208 | recalc_sigpending(); |
208 | spin_unlock_irq(¤t->sighand->siglock); | 209 | spin_unlock_irq(¤t->sighand->siglock); |
209 | 210 | ||
210 | if (restore_sigcontext(__frame, &frame->uc.uc_mcontext, &d0)) | 211 | if (restore_sigcontext(current_frame(), &frame->uc.uc_mcontext, &d0)) |
211 | goto badframe; | 212 | goto badframe; |
212 | 213 | ||
213 | if (do_sigaltstack(&frame->uc.uc_stack, NULL, __frame->sp) == -EFAULT) | 214 | if (do_sigaltstack(&frame->uc.uc_stack, NULL, current_frame()->sp) == |
215 | -EFAULT) | ||
214 | goto badframe; | 216 | goto badframe; |
215 | 217 | ||
216 | return d0; | 218 | return d0; |
@@ -572,7 +574,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags) | |||
572 | 574 | ||
573 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | 575 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
574 | clear_thread_flag(TIF_NOTIFY_RESUME); | 576 | clear_thread_flag(TIF_NOTIFY_RESUME); |
575 | tracehook_notify_resume(__frame); | 577 | tracehook_notify_resume(current_frame()); |
576 | if (current->replacement_session_keyring) | 578 | if (current->replacement_session_keyring) |
577 | key_replace_session_keyring(); | 579 | key_replace_session_keyring(); |
578 | } | 580 | } |
diff --git a/arch/mn10300/kernel/smp-low.S b/arch/mn10300/kernel/smp-low.S new file mode 100644 index 000000000000..72938cefc05e --- /dev/null +++ b/arch/mn10300/kernel/smp-low.S | |||
@@ -0,0 +1,97 @@ | |||
1 | /* SMP IPI low-level handler | ||
2 | * | ||
3 | * Copyright (C) 2006-2007 Matsushita Electric Industrial Co., Ltd. | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <linux/sys.h> | ||
14 | #include <linux/linkage.h> | ||
15 | #include <asm/smp.h> | ||
16 | #include <asm/system.h> | ||
17 | #include <asm/thread_info.h> | ||
18 | #include <asm/cpu-regs.h> | ||
19 | #include <proc/smp-regs.h> | ||
20 | #include <asm/asm-offsets.h> | ||
21 | #include <asm/frame.inc> | ||
22 | |||
23 | .am33_2 | ||
24 | |||
25 | ############################################################################### | ||
26 | # | ||
27 | # IPI interrupt handler | ||
28 | # | ||
29 | ############################################################################### | ||
30 | .globl mn10300_low_ipi_handler | ||
31 | mn10300_low_ipi_handler: | ||
32 | add -4,sp | ||
33 | mov d0,(sp) | ||
34 | movhu (IAGR),d0 | ||
35 | and IAGR_GN,d0 | ||
36 | lsr 0x2,d0 | ||
37 | #ifdef CONFIG_MN10300_CACHE_ENABLED | ||
38 | cmp FLUSH_CACHE_IPI,d0 | ||
39 | beq mn10300_flush_cache_ipi | ||
40 | #endif | ||
41 | cmp SMP_BOOT_IRQ,d0 | ||
42 | beq mn10300_smp_boot_ipi | ||
43 | /* OTHERS */ | ||
44 | mov (sp),d0 | ||
45 | add 4,sp | ||
46 | #ifdef CONFIG_GDBSTUB | ||
47 | jmp gdbstub_io_rx_handler | ||
48 | #else | ||
49 | jmp end | ||
50 | #endif | ||
51 | |||
52 | ############################################################################### | ||
53 | # | ||
54 | # Cache flush IPI interrupt handler | ||
55 | # | ||
56 | ############################################################################### | ||
57 | #ifdef CONFIG_MN10300_CACHE_ENABLED | ||
58 | mn10300_flush_cache_ipi: | ||
59 | mov (sp),d0 | ||
60 | add 4,sp | ||
61 | |||
62 | /* FLUSH_CACHE_IPI */ | ||
63 | add -4,sp | ||
64 | SAVE_ALL | ||
65 | mov GxICR_DETECT,d2 | ||
66 | movbu d2,(GxICR(FLUSH_CACHE_IPI)) # ACK the interrupt | ||
67 | movhu (GxICR(FLUSH_CACHE_IPI)),d2 | ||
68 | call smp_cache_interrupt[],0 | ||
69 | RESTORE_ALL | ||
70 | jmp end | ||
71 | #endif | ||
72 | |||
73 | ############################################################################### | ||
74 | # | ||
75 | # SMP boot CPU IPI interrupt handler | ||
76 | # | ||
77 | ############################################################################### | ||
78 | mn10300_smp_boot_ipi: | ||
79 | /* clear interrupt */ | ||
80 | movhu (GxICR(SMP_BOOT_IRQ)),d0 | ||
81 | and ~GxICR_REQUEST,d0 | ||
82 | movhu d0,(GxICR(SMP_BOOT_IRQ)) | ||
83 | mov (sp),d0 | ||
84 | add 4,sp | ||
85 | |||
86 | # get stack | ||
87 | mov (CPUID),a0 | ||
88 | add -1,a0 | ||
89 | add a0,a0 | ||
90 | add a0,a0 | ||
91 | mov (start_stack,a0),a0 | ||
92 | mov a0,sp | ||
93 | jmp initialize_secondary | ||
94 | |||
95 | |||
96 | # Jump here after RTI to suppress the icache lookahead | ||
97 | end: | ||
diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c new file mode 100644 index 000000000000..0dcd1c686ba8 --- /dev/null +++ b/arch/mn10300/kernel/smp.c | |||
@@ -0,0 +1,1152 @@ | |||
1 | /* SMP support routines. | ||
2 | * | ||
3 | * Copyright (C) 2006-2008 Panasonic Corporation | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * version 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | */ | ||
15 | |||
16 | #include <linux/interrupt.h> | ||
17 | #include <linux/spinlock.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/jiffies.h> | ||
20 | #include <linux/cpumask.h> | ||
21 | #include <linux/err.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/delay.h> | ||
24 | #include <linux/sched.h> | ||
25 | #include <linux/profile.h> | ||
26 | #include <linux/smp.h> | ||
27 | #include <asm/tlbflush.h> | ||
28 | #include <asm/system.h> | ||
29 | #include <asm/bitops.h> | ||
30 | #include <asm/processor.h> | ||
31 | #include <asm/bug.h> | ||
32 | #include <asm/exceptions.h> | ||
33 | #include <asm/hardirq.h> | ||
34 | #include <asm/fpu.h> | ||
35 | #include <asm/mmu_context.h> | ||
36 | #include <asm/thread_info.h> | ||
37 | #include <asm/cpu-regs.h> | ||
38 | #include <asm/intctl-regs.h> | ||
39 | #include "internal.h" | ||
40 | |||
41 | #ifdef CONFIG_HOTPLUG_CPU | ||
42 | #include <linux/cpu.h> | ||
43 | #include <asm/cacheflush.h> | ||
44 | |||
45 | static unsigned long sleep_mode[NR_CPUS]; | ||
46 | |||
47 | static void run_sleep_cpu(unsigned int cpu); | ||
48 | static void run_wakeup_cpu(unsigned int cpu); | ||
49 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
50 | |||
51 | /* | ||
52 | * Debug Message function | ||
53 | */ | ||
54 | |||
55 | #undef DEBUG_SMP | ||
56 | #ifdef DEBUG_SMP | ||
57 | #define Dprintk(fmt, ...) printk(KERN_DEBUG fmt, ##__VA_ARGS__) | ||
58 | #else | ||
59 | #define Dprintk(fmt, ...) no_printk(KERN_DEBUG fmt, ##__VA_ARGS__) | ||
60 | #endif | ||
61 | |||
62 | /* timeout value in msec for smp_nmi_call_function. zero is no timeout. */ | ||
63 | #define CALL_FUNCTION_NMI_IPI_TIMEOUT 0 | ||
64 | |||
65 | /* | ||
66 | * Structure and data for smp_nmi_call_function(). | ||
67 | */ | ||
68 | struct nmi_call_data_struct { | ||
69 | smp_call_func_t func; | ||
70 | void *info; | ||
71 | cpumask_t started; | ||
72 | cpumask_t finished; | ||
73 | int wait; | ||
74 | char size_alignment[0] | ||
75 | __attribute__ ((__aligned__(SMP_CACHE_BYTES))); | ||
76 | } __attribute__ ((__aligned__(SMP_CACHE_BYTES))); | ||
77 | |||
78 | static DEFINE_SPINLOCK(smp_nmi_call_lock); | ||
79 | static struct nmi_call_data_struct *nmi_call_data; | ||
80 | |||
81 | /* | ||
82 | * Data structures and variables | ||
83 | */ | ||
84 | static cpumask_t cpu_callin_map; /* Bitmask of callin CPUs */ | ||
85 | static cpumask_t cpu_callout_map; /* Bitmask of callout CPUs */ | ||
86 | cpumask_t cpu_boot_map; /* Bitmask of boot APs */ | ||
87 | unsigned long start_stack[NR_CPUS - 1]; | ||
88 | |||
89 | /* | ||
90 | * Per CPU parameters | ||
91 | */ | ||
92 | struct mn10300_cpuinfo cpu_data[NR_CPUS] __cacheline_aligned; | ||
93 | |||
94 | static int cpucount; /* The count of boot CPUs */ | ||
95 | static cpumask_t smp_commenced_mask; | ||
96 | cpumask_t cpu_initialized __initdata = CPU_MASK_NONE; | ||
97 | |||
98 | /* | ||
99 | * Function Prototypes | ||
100 | */ | ||
101 | static int do_boot_cpu(int); | ||
102 | static void smp_show_cpu_info(int cpu_id); | ||
103 | static void smp_callin(void); | ||
104 | static void smp_online(void); | ||
105 | static void smp_store_cpu_info(int); | ||
106 | static void smp_cpu_init(void); | ||
107 | static void smp_tune_scheduling(void); | ||
108 | static void send_IPI_mask(const cpumask_t *cpumask, int irq); | ||
109 | static void init_ipi(void); | ||
110 | |||
111 | /* | ||
112 | * IPI Initialization interrupt definitions | ||
113 | */ | ||
114 | static void mn10300_ipi_disable(unsigned int irq); | ||
115 | static void mn10300_ipi_enable(unsigned int irq); | ||
116 | static void mn10300_ipi_ack(unsigned int irq); | ||
117 | static void mn10300_ipi_nop(unsigned int irq); | ||
118 | |||
119 | static struct irq_chip mn10300_ipi_type = { | ||
120 | .name = "cpu_ipi", | ||
121 | .disable = mn10300_ipi_disable, | ||
122 | .enable = mn10300_ipi_enable, | ||
123 | .ack = mn10300_ipi_ack, | ||
124 | .eoi = mn10300_ipi_nop | ||
125 | }; | ||
126 | |||
127 | static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id); | ||
128 | static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id); | ||
129 | |||
130 | static struct irqaction reschedule_ipi = { | ||
131 | .handler = smp_reschedule_interrupt, | ||
132 | .name = "smp reschedule IPI" | ||
133 | }; | ||
134 | static struct irqaction call_function_ipi = { | ||
135 | .handler = smp_call_function_interrupt, | ||
136 | .name = "smp call function IPI" | ||
137 | }; | ||
138 | |||
139 | #if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) | ||
140 | static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id); | ||
141 | static struct irqaction local_timer_ipi = { | ||
142 | .handler = smp_ipi_timer_interrupt, | ||
143 | .flags = IRQF_DISABLED, | ||
144 | .name = "smp local timer IPI" | ||
145 | }; | ||
146 | #endif | ||
147 | |||
148 | /** | ||
149 | * init_ipi - Initialise the IPI mechanism | ||
150 | */ | ||
151 | static void init_ipi(void) | ||
152 | { | ||
153 | unsigned long flags; | ||
154 | u16 tmp16; | ||
155 | |||
156 | /* set up the reschedule IPI */ | ||
157 | set_irq_chip_and_handler(RESCHEDULE_IPI, | ||
158 | &mn10300_ipi_type, handle_percpu_irq); | ||
159 | setup_irq(RESCHEDULE_IPI, &reschedule_ipi); | ||
160 | set_intr_level(RESCHEDULE_IPI, RESCHEDULE_GxICR_LV); | ||
161 | mn10300_ipi_enable(RESCHEDULE_IPI); | ||
162 | |||
163 | /* set up the call function IPI */ | ||
164 | set_irq_chip_and_handler(CALL_FUNC_SINGLE_IPI, | ||
165 | &mn10300_ipi_type, handle_percpu_irq); | ||
166 | setup_irq(CALL_FUNC_SINGLE_IPI, &call_function_ipi); | ||
167 | set_intr_level(CALL_FUNC_SINGLE_IPI, CALL_FUNCTION_GxICR_LV); | ||
168 | mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI); | ||
169 | |||
170 | /* set up the local timer IPI */ | ||
171 | #if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \ | ||
172 | defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) | ||
173 | set_irq_chip_and_handler(LOCAL_TIMER_IPI, | ||
174 | &mn10300_ipi_type, handle_percpu_irq); | ||
175 | setup_irq(LOCAL_TIMER_IPI, &local_timer_ipi); | ||
176 | set_intr_level(LOCAL_TIMER_IPI, LOCAL_TIMER_GxICR_LV); | ||
177 | mn10300_ipi_enable(LOCAL_TIMER_IPI); | ||
178 | #endif | ||
179 | |||
180 | #ifdef CONFIG_MN10300_CACHE_ENABLED | ||
181 | /* set up the cache flush IPI */ | ||
182 | flags = arch_local_cli_save(); | ||
183 | __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(FLUSH_CACHE_GxICR_LV), | ||
184 | mn10300_low_ipi_handler); | ||
185 | GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT; | ||
186 | mn10300_ipi_enable(FLUSH_CACHE_IPI); | ||
187 | arch_local_irq_restore(flags); | ||
188 | #endif | ||
189 | |||
190 | /* set up the NMI call function IPI */ | ||
191 | flags = arch_local_cli_save(); | ||
192 | GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT; | ||
193 | tmp16 = GxICR(CALL_FUNCTION_NMI_IPI); | ||
194 | arch_local_irq_restore(flags); | ||
195 | |||
196 | /* set up the SMP boot IPI */ | ||
197 | flags = arch_local_cli_save(); | ||
198 | __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(SMP_BOOT_GxICR_LV), | ||
199 | mn10300_low_ipi_handler); | ||
200 | arch_local_irq_restore(flags); | ||
201 | } | ||
202 | |||
203 | /** | ||
204 | * mn10300_ipi_shutdown - Shut down handling of an IPI | ||
205 | * @irq: The IPI to be shut down. | ||
206 | */ | ||
207 | static void mn10300_ipi_shutdown(unsigned int irq) | ||
208 | { | ||
209 | unsigned long flags; | ||
210 | u16 tmp; | ||
211 | |||
212 | flags = arch_local_cli_save(); | ||
213 | |||
214 | tmp = GxICR(irq); | ||
215 | GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT; | ||
216 | tmp = GxICR(irq); | ||
217 | |||
218 | arch_local_irq_restore(flags); | ||
219 | } | ||
220 | |||
221 | /** | ||
222 | * mn10300_ipi_enable - Enable an IPI | ||
223 | * @irq: The IPI to be enabled. | ||
224 | */ | ||
225 | static void mn10300_ipi_enable(unsigned int irq) | ||
226 | { | ||
227 | unsigned long flags; | ||
228 | u16 tmp; | ||
229 | |||
230 | flags = arch_local_cli_save(); | ||
231 | |||
232 | tmp = GxICR(irq); | ||
233 | GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE; | ||
234 | tmp = GxICR(irq); | ||
235 | |||
236 | arch_local_irq_restore(flags); | ||
237 | } | ||
238 | |||
239 | /** | ||
240 | * mn10300_ipi_disable - Disable an IPI | ||
241 | * @irq: The IPI to be disabled. | ||
242 | */ | ||
243 | static void mn10300_ipi_disable(unsigned int irq) | ||
244 | { | ||
245 | unsigned long flags; | ||
246 | u16 tmp; | ||
247 | |||
248 | flags = arch_local_cli_save(); | ||
249 | |||
250 | tmp = GxICR(irq); | ||
251 | GxICR(irq) = tmp & GxICR_LEVEL; | ||
252 | tmp = GxICR(irq); | ||
253 | |||
254 | arch_local_irq_restore(flags); | ||
255 | } | ||
256 | |||
257 | /** | ||
258 | * mn10300_ipi_ack - Acknowledge an IPI interrupt in the PIC | ||
259 | * @irq: The IPI to be acknowledged. | ||
260 | * | ||
261 | * Clear the interrupt detection flag for the IPI on the appropriate interrupt | ||
262 | * channel in the PIC. | ||
263 | */ | ||
264 | static void mn10300_ipi_ack(unsigned int irq) | ||
265 | { | ||
266 | unsigned long flags; | ||
267 | u16 tmp; | ||
268 | |||
269 | flags = arch_local_cli_save(); | ||
270 | GxICR_u8(irq) = GxICR_DETECT; | ||
271 | tmp = GxICR(irq); | ||
272 | arch_local_irq_restore(flags); | ||
273 | } | ||
274 | |||
275 | /** | ||
276 | * mn10300_ipi_nop - Dummy IPI action | ||
277 | * @irq: The IPI to be acted upon. | ||
278 | */ | ||
279 | static void mn10300_ipi_nop(unsigned int irq) | ||
280 | { | ||
281 | } | ||
282 | |||
283 | /** | ||
284 | * send_IPI_mask - Send IPIs to all CPUs in list | ||
285 | * @cpumask: The list of CPUs to target. | ||
286 | * @irq: The IPI request to be sent. | ||
287 | * | ||
288 | * Send the specified IPI to all the CPUs in the list, not waiting for them to | ||
289 | * finish before returning. The caller is responsible for synchronisation if | ||
290 | * that is needed. | ||
291 | */ | ||
292 | static void send_IPI_mask(const cpumask_t *cpumask, int irq) | ||
293 | { | ||
294 | int i; | ||
295 | u16 tmp; | ||
296 | |||
297 | for (i = 0; i < NR_CPUS; i++) { | ||
298 | if (cpu_isset(i, *cpumask)) { | ||
299 | /* send IPI */ | ||
300 | tmp = CROSS_GxICR(irq, i); | ||
301 | CROSS_GxICR(irq, i) = | ||
302 | tmp | GxICR_REQUEST | GxICR_DETECT; | ||
303 | tmp = CROSS_GxICR(irq, i); /* flush write buffer */ | ||
304 | } | ||
305 | } | ||
306 | } | ||
307 | |||
308 | /** | ||
309 | * send_IPI_self - Send an IPI to this CPU. | ||
310 | * @irq: The IPI request to be sent. | ||
311 | * | ||
312 | * Send the specified IPI to the current CPU. | ||
313 | */ | ||
314 | void send_IPI_self(int irq) | ||
315 | { | ||
316 | send_IPI_mask(cpumask_of(smp_processor_id()), irq); | ||
317 | } | ||
318 | |||
319 | /** | ||
320 | * send_IPI_allbutself - Send IPIs to all the other CPUs. | ||
321 | * @irq: The IPI request to be sent. | ||
322 | * | ||
323 | * Send the specified IPI to all CPUs in the system barring the current one, | ||
324 | * not waiting for them to finish before returning. The caller is responsible | ||
325 | * for synchronisation if that is needed. | ||
326 | */ | ||
327 | void send_IPI_allbutself(int irq) | ||
328 | { | ||
329 | cpumask_t cpumask; | ||
330 | |||
331 | cpumask = cpu_online_map; | ||
332 | cpu_clear(smp_processor_id(), cpumask); | ||
333 | send_IPI_mask(&cpumask, irq); | ||
334 | } | ||
335 | |||
336 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) | ||
337 | { | ||
338 | BUG(); | ||
339 | /*send_IPI_mask(mask, CALL_FUNCTION_IPI);*/ | ||
340 | } | ||
341 | |||
342 | void arch_send_call_function_single_ipi(int cpu) | ||
343 | { | ||
344 | send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI); | ||
345 | } | ||
346 | |||
347 | /** | ||
348 | * smp_send_reschedule - Send reschedule IPI to a CPU | ||
349 | * @cpu: The CPU to target. | ||
350 | */ | ||
351 | void smp_send_reschedule(int cpu) | ||
352 | { | ||
353 | send_IPI_mask(cpumask_of(cpu), RESCHEDULE_IPI); | ||
354 | } | ||
355 | |||
356 | /** | ||
357 | * smp_nmi_call_function - Send a call function NMI IPI to all CPUs | ||
358 | * @func: The function to ask to be run. | ||
359 | * @info: The context data to pass to that function. | ||
360 | * @wait: If true, wait (atomically) until function is run on all CPUs. | ||
361 | * | ||
362 | * Send a non-maskable request to all CPUs in the system, requesting them to | ||
363 | * run the specified function with the given context data, and, potentially, to | ||
364 | * wait for completion of that function on all CPUs. | ||
365 | * | ||
366 | * Returns 0 if successful, -ETIMEDOUT if we were asked to wait, but hit the | ||
367 | * timeout. | ||
368 | */ | ||
369 | int smp_nmi_call_function(smp_call_func_t func, void *info, int wait) | ||
370 | { | ||
371 | struct nmi_call_data_struct data; | ||
372 | unsigned long flags; | ||
373 | unsigned int cnt; | ||
374 | int cpus, ret = 0; | ||
375 | |||
376 | cpus = num_online_cpus() - 1; | ||
377 | if (cpus < 1) | ||
378 | return 0; | ||
379 | |||
380 | data.func = func; | ||
381 | data.info = info; | ||
382 | data.started = cpu_online_map; | ||
383 | cpu_clear(smp_processor_id(), data.started); | ||
384 | data.wait = wait; | ||
385 | if (wait) | ||
386 | data.finished = data.started; | ||
387 | |||
388 | spin_lock_irqsave(&smp_nmi_call_lock, flags); | ||
389 | nmi_call_data = &data; | ||
390 | smp_mb(); | ||
391 | |||
392 | /* Send a message to all other CPUs and wait for them to respond */ | ||
393 | send_IPI_allbutself(CALL_FUNCTION_NMI_IPI); | ||
394 | |||
395 | /* Wait for response */ | ||
396 | if (CALL_FUNCTION_NMI_IPI_TIMEOUT > 0) { | ||
397 | for (cnt = 0; | ||
398 | cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT && | ||
399 | !cpus_empty(data.started); | ||
400 | cnt++) | ||
401 | mdelay(1); | ||
402 | |||
403 | if (wait && cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT) { | ||
404 | for (cnt = 0; | ||
405 | cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT && | ||
406 | !cpus_empty(data.finished); | ||
407 | cnt++) | ||
408 | mdelay(1); | ||
409 | } | ||
410 | |||
411 | if (cnt >= CALL_FUNCTION_NMI_IPI_TIMEOUT) | ||
412 | ret = -ETIMEDOUT; | ||
413 | |||
414 | } else { | ||
415 | /* If timeout value is zero, wait until cpumask has been | ||
416 | * cleared */ | ||
417 | while (!cpus_empty(data.started)) | ||
418 | barrier(); | ||
419 | if (wait) | ||
420 | while (!cpus_empty(data.finished)) | ||
421 | barrier(); | ||
422 | } | ||
423 | |||
424 | spin_unlock_irqrestore(&smp_nmi_call_lock, flags); | ||
425 | return ret; | ||
426 | } | ||
427 | |||
428 | /** | ||
429 | * stop_this_cpu - Callback to stop a CPU. | ||
430 | * @unused: Callback context (ignored). | ||
431 | */ | ||
432 | void stop_this_cpu(void *unused) | ||
433 | { | ||
434 | static volatile int stopflag; | ||
435 | unsigned long flags; | ||
436 | |||
437 | #ifdef CONFIG_GDBSTUB | ||
438 | /* In case of single stepping smp_send_stop by other CPU, | ||
439 | * clear procindebug to avoid deadlock. | ||
440 | */ | ||
441 | atomic_set(&procindebug[smp_processor_id()], 0); | ||
442 | #endif /* CONFIG_GDBSTUB */ | ||
443 | |||
444 | flags = arch_local_cli_save(); | ||
445 | cpu_clear(smp_processor_id(), cpu_online_map); | ||
446 | |||
447 | while (!stopflag) | ||
448 | cpu_relax(); | ||
449 | |||
450 | cpu_set(smp_processor_id(), cpu_online_map); | ||
451 | arch_local_irq_restore(flags); | ||
452 | } | ||
453 | |||
454 | /** | ||
455 | * smp_send_stop - Send a stop request to all CPUs. | ||
456 | */ | ||
457 | void smp_send_stop(void) | ||
458 | { | ||
459 | smp_nmi_call_function(stop_this_cpu, NULL, 0); | ||
460 | } | ||
461 | |||
462 | /** | ||
463 | * smp_reschedule_interrupt - Reschedule IPI handler | ||
464 | * @irq: The interrupt number. | ||
465 | * @dev_id: The device ID. | ||
466 | * | ||
467 | * We need do nothing here, since the scheduling will be effected on our way | ||
468 | * back through entry.S. | ||
469 | * | ||
470 | * Returns IRQ_HANDLED to indicate we handled the interrupt successfully. | ||
471 | */ | ||
472 | static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id) | ||
473 | { | ||
474 | /* do nothing */ | ||
475 | return IRQ_HANDLED; | ||
476 | } | ||
477 | |||
478 | /** | ||
479 | * smp_call_function_interrupt - Call function IPI handler | ||
480 | * @irq: The interrupt number. | ||
481 | * @dev_id: The device ID. | ||
482 | * | ||
483 | * Returns IRQ_HANDLED to indicate we handled the interrupt successfully. | ||
484 | */ | ||
485 | static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id) | ||
486 | { | ||
487 | /* generic_smp_call_function_interrupt(); */ | ||
488 | generic_smp_call_function_single_interrupt(); | ||
489 | return IRQ_HANDLED; | ||
490 | } | ||
491 | |||
492 | /** | ||
493 | * smp_nmi_call_function_interrupt - Non-maskable call function IPI handler | ||
494 | */ | ||
495 | void smp_nmi_call_function_interrupt(void) | ||
496 | { | ||
497 | smp_call_func_t func = nmi_call_data->func; | ||
498 | void *info = nmi_call_data->info; | ||
499 | int wait = nmi_call_data->wait; | ||
500 | |||
501 | /* Notify the initiating CPU that I've grabbed the data and am about to | ||
502 | * execute the function | ||
503 | */ | ||
504 | smp_mb(); | ||
505 | cpu_clear(smp_processor_id(), nmi_call_data->started); | ||
506 | (*func)(info); | ||
507 | |||
508 | if (wait) { | ||
509 | smp_mb(); | ||
510 | cpu_clear(smp_processor_id(), nmi_call_data->finished); | ||
511 | } | ||
512 | } | ||
513 | |||
514 | #if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \ | ||
515 | defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) | ||
516 | /** | ||
517 | * smp_ipi_timer_interrupt - Local timer IPI handler | ||
518 | * @irq: The interrupt number. | ||
519 | * @dev_id: The device ID. | ||
520 | * | ||
521 | * Returns IRQ_HANDLED to indicate we handled the interrupt successfully. | ||
522 | */ | ||
523 | static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id) | ||
524 | { | ||
525 | return local_timer_interrupt(); | ||
526 | } | ||
527 | #endif | ||
528 | |||
529 | void __init smp_init_cpus(void) | ||
530 | { | ||
531 | int i; | ||
532 | for (i = 0; i < NR_CPUS; i++) { | ||
533 | set_cpu_possible(i, true); | ||
534 | set_cpu_present(i, true); | ||
535 | } | ||
536 | } | ||
537 | |||
538 | /** | ||
539 | * smp_cpu_init - Initialise AP in start_secondary. | ||
540 | * | ||
541 | * For this Application Processor, set up init_mm, initialise FPU and set | ||
542 | * interrupt level 0-6 setting. | ||
543 | */ | ||
544 | static void __init smp_cpu_init(void) | ||
545 | { | ||
546 | unsigned long flags; | ||
547 | int cpu_id = smp_processor_id(); | ||
548 | u16 tmp16; | ||
549 | |||
550 | if (test_and_set_bit(cpu_id, &cpu_initialized)) { | ||
551 | printk(KERN_WARNING "CPU#%d already initialized!\n", cpu_id); | ||
552 | for (;;) | ||
553 | local_irq_enable(); | ||
554 | } | ||
555 | printk(KERN_INFO "Initializing CPU#%d\n", cpu_id); | ||
556 | |||
557 | atomic_inc(&init_mm.mm_count); | ||
558 | current->active_mm = &init_mm; | ||
559 | BUG_ON(current->mm); | ||
560 | |||
561 | enter_lazy_tlb(&init_mm, current); | ||
562 | |||
563 | /* Force FPU initialization */ | ||
564 | clear_using_fpu(current); | ||
565 | |||
566 | GxICR(CALL_FUNC_SINGLE_IPI) = CALL_FUNCTION_GxICR_LV | GxICR_DETECT; | ||
567 | mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI); | ||
568 | |||
569 | GxICR(LOCAL_TIMER_IPI) = LOCAL_TIMER_GxICR_LV | GxICR_DETECT; | ||
570 | mn10300_ipi_enable(LOCAL_TIMER_IPI); | ||
571 | |||
572 | GxICR(RESCHEDULE_IPI) = RESCHEDULE_GxICR_LV | GxICR_DETECT; | ||
573 | mn10300_ipi_enable(RESCHEDULE_IPI); | ||
574 | |||
575 | #ifdef CONFIG_MN10300_CACHE_ENABLED | ||
576 | GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT; | ||
577 | mn10300_ipi_enable(FLUSH_CACHE_IPI); | ||
578 | #endif | ||
579 | |||
580 | mn10300_ipi_shutdown(SMP_BOOT_IRQ); | ||
581 | |||
582 | /* Set up the non-maskable call function IPI */ | ||
583 | flags = arch_local_cli_save(); | ||
584 | GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT; | ||
585 | tmp16 = GxICR(CALL_FUNCTION_NMI_IPI); | ||
586 | arch_local_irq_restore(flags); | ||
587 | } | ||
588 | |||
589 | /** | ||
590 | * smp_prepare_cpu_init - Initialise CPU in startup_secondary | ||
591 | * | ||
592 | * Set interrupt level 0-6 setting and init ICR of gdbstub. | ||
593 | */ | ||
594 | void smp_prepare_cpu_init(void) | ||
595 | { | ||
596 | int loop; | ||
597 | |||
598 | /* Set the interrupt vector registers */ | ||
599 | IVAR0 = EXCEP_IRQ_LEVEL0; | ||
600 | IVAR1 = EXCEP_IRQ_LEVEL1; | ||
601 | IVAR2 = EXCEP_IRQ_LEVEL2; | ||
602 | IVAR3 = EXCEP_IRQ_LEVEL3; | ||
603 | IVAR4 = EXCEP_IRQ_LEVEL4; | ||
604 | IVAR5 = EXCEP_IRQ_LEVEL5; | ||
605 | IVAR6 = EXCEP_IRQ_LEVEL6; | ||
606 | |||
607 | /* Disable all interrupts and set to priority 6 (lowest) */ | ||
608 | for (loop = 0; loop < GxICR_NUM_IRQS; loop++) | ||
609 | GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT; | ||
610 | |||
611 | #ifdef CONFIG_GDBSTUB | ||
612 | /* initialise GDB-stub */ | ||
613 | do { | ||
614 | unsigned long flags; | ||
615 | u16 tmp16; | ||
616 | |||
617 | flags = arch_local_cli_save(); | ||
618 | GxICR(GDB_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT; | ||
619 | tmp16 = GxICR(GDB_NMI_IPI); | ||
620 | arch_local_irq_restore(flags); | ||
621 | } while (0); | ||
622 | #endif | ||
623 | } | ||
624 | |||
625 | /** | ||
626 | * start_secondary - Activate a secondary CPU (AP) | ||
627 | * @unused: Thread parameter (ignored). | ||
628 | */ | ||
629 | int __init start_secondary(void *unused) | ||
630 | { | ||
631 | smp_cpu_init(); | ||
632 | smp_callin(); | ||
633 | while (!cpu_isset(smp_processor_id(), smp_commenced_mask)) | ||
634 | cpu_relax(); | ||
635 | |||
636 | local_flush_tlb(); | ||
637 | preempt_disable(); | ||
638 | smp_online(); | ||
639 | |||
640 | #ifdef CONFIG_GENERIC_CLOCKEVENTS | ||
641 | init_clockevents(); | ||
642 | #endif | ||
643 | cpu_idle(); | ||
644 | return 0; | ||
645 | } | ||
646 | |||
647 | /** | ||
648 | * smp_prepare_cpus - Boot up secondary CPUs (APs) | ||
649 | * @max_cpus: Maximum number of CPUs to boot. | ||
650 | * | ||
651 | * Call do_boot_cpu, and boot up APs. | ||
652 | */ | ||
653 | void __init smp_prepare_cpus(unsigned int max_cpus) | ||
654 | { | ||
655 | int phy_id; | ||
656 | |||
657 | /* Setup boot CPU information */ | ||
658 | smp_store_cpu_info(0); | ||
659 | smp_tune_scheduling(); | ||
660 | |||
661 | init_ipi(); | ||
662 | |||
663 | /* If SMP should be disabled, then finish */ | ||
664 | if (max_cpus == 0) { | ||
665 | printk(KERN_INFO "SMP mode deactivated.\n"); | ||
666 | goto smp_done; | ||
667 | } | ||
668 | |||
669 | /* Boot secondary CPUs (for which phy_id > 0) */ | ||
670 | for (phy_id = 0; phy_id < NR_CPUS; phy_id++) { | ||
671 | /* Don't boot primary CPU */ | ||
672 | if (max_cpus <= cpucount + 1) | ||
673 | continue; | ||
674 | if (phy_id != 0) | ||
675 | do_boot_cpu(phy_id); | ||
676 | set_cpu_possible(phy_id, true); | ||
677 | smp_show_cpu_info(phy_id); | ||
678 | } | ||
679 | |||
680 | smp_done: | ||
681 | Dprintk("Boot done.\n"); | ||
682 | } | ||
683 | |||
684 | /** | ||
685 | * smp_store_cpu_info - Save a CPU's information | ||
686 | * @cpu: The CPU to save for. | ||
687 | * | ||
688 | * Save boot_cpu_data and jiffy for the specified CPU. | ||
689 | */ | ||
690 | static void __init smp_store_cpu_info(int cpu) | ||
691 | { | ||
692 | struct mn10300_cpuinfo *ci = &cpu_data[cpu]; | ||
693 | |||
694 | *ci = boot_cpu_data; | ||
695 | ci->loops_per_jiffy = loops_per_jiffy; | ||
696 | ci->type = CPUREV; | ||
697 | } | ||
698 | |||
699 | /** | ||
700 | * smp_tune_scheduling - Set time slice value | ||
701 | * | ||
702 | * Nothing to do here. | ||
703 | */ | ||
704 | static void __init smp_tune_scheduling(void) | ||
705 | { | ||
706 | } | ||
707 | |||
708 | /** | ||
709 | * do_boot_cpu: Boot up one CPU | ||
710 | * @phy_id: Physical ID of CPU to boot. | ||
711 | * | ||
712 | * Send an IPI to a secondary CPU to boot it. Returns 0 on success, 1 | ||
713 | * otherwise. | ||
714 | */ | ||
715 | static int __init do_boot_cpu(int phy_id) | ||
716 | { | ||
717 | struct task_struct *idle; | ||
718 | unsigned long send_status, callin_status; | ||
719 | int timeout, cpu_id; | ||
720 | |||
721 | send_status = GxICR_REQUEST; | ||
722 | callin_status = 0; | ||
723 | timeout = 0; | ||
724 | cpu_id = phy_id; | ||
725 | |||
726 | cpucount++; | ||
727 | |||
728 | /* Create idle thread for this CPU */ | ||
729 | idle = fork_idle(cpu_id); | ||
730 | if (IS_ERR(idle)) | ||
731 | panic("Failed fork for CPU#%d.", cpu_id); | ||
732 | |||
733 | idle->thread.pc = (unsigned long)start_secondary; | ||
734 | |||
735 | printk(KERN_NOTICE "Booting CPU#%d\n", cpu_id); | ||
736 | start_stack[cpu_id - 1] = idle->thread.sp; | ||
737 | |||
738 | task_thread_info(idle)->cpu = cpu_id; | ||
739 | |||
740 | /* Send boot IPI to AP */ | ||
741 | send_IPI_mask(cpumask_of(phy_id), SMP_BOOT_IRQ); | ||
742 | |||
743 | Dprintk("Waiting for send to finish...\n"); | ||
744 | |||
745 | /* Wait for AP's IPI receive in 100[ms] */ | ||
746 | do { | ||
747 | udelay(1000); | ||
748 | send_status = | ||
749 | CROSS_GxICR(SMP_BOOT_IRQ, phy_id) & GxICR_REQUEST; | ||
750 | } while (send_status == GxICR_REQUEST && timeout++ < 100); | ||
751 | |||
752 | Dprintk("Waiting for cpu_callin_map.\n"); | ||
753 | |||
754 | if (send_status == 0) { | ||
755 | /* Allow AP to start initializing */ | ||
756 | cpu_set(cpu_id, cpu_callout_map); | ||
757 | |||
758 | /* Wait for setting cpu_callin_map */ | ||
759 | timeout = 0; | ||
760 | do { | ||
761 | udelay(1000); | ||
762 | callin_status = cpu_isset(cpu_id, cpu_callin_map); | ||
763 | } while (callin_status == 0 && timeout++ < 5000); | ||
764 | |||
765 | if (callin_status == 0) | ||
766 | Dprintk("Not responding.\n"); | ||
767 | } else { | ||
768 | printk(KERN_WARNING "IPI not delivered.\n"); | ||
769 | } | ||
770 | |||
771 | if (send_status == GxICR_REQUEST || callin_status == 0) { | ||
772 | cpu_clear(cpu_id, cpu_callout_map); | ||
773 | cpu_clear(cpu_id, cpu_callin_map); | ||
774 | cpu_clear(cpu_id, cpu_initialized); | ||
775 | cpucount--; | ||
776 | return 1; | ||
777 | } | ||
778 | return 0; | ||
779 | } | ||
780 | |||
781 | /** | ||
782 | * smp_show_cpu_info - Show SMP CPU information | ||
783 | * @cpu: The CPU of interest. | ||
784 | */ | ||
785 | static void __init smp_show_cpu_info(int cpu) | ||
786 | { | ||
787 | struct mn10300_cpuinfo *ci = &cpu_data[cpu]; | ||
788 | |||
789 | printk(KERN_INFO | ||
790 | "CPU#%d : ioclk speed: %lu.%02luMHz : bogomips : %lu.%02lu\n", | ||
791 | cpu, | ||
792 | MN10300_IOCLK / 1000000, | ||
793 | (MN10300_IOCLK / 10000) % 100, | ||
794 | ci->loops_per_jiffy / (500000 / HZ), | ||
795 | (ci->loops_per_jiffy / (5000 / HZ)) % 100); | ||
796 | } | ||
797 | |||
798 | /** | ||
799 | * smp_callin - Set cpu_callin_map of the current CPU ID | ||
800 | */ | ||
801 | static void __init smp_callin(void) | ||
802 | { | ||
803 | unsigned long timeout; | ||
804 | int cpu; | ||
805 | |||
806 | cpu = smp_processor_id(); | ||
807 | timeout = jiffies + (2 * HZ); | ||
808 | |||
809 | if (cpu_isset(cpu, cpu_callin_map)) { | ||
810 | printk(KERN_ERR "CPU#%d already present.\n", cpu); | ||
811 | BUG(); | ||
812 | } | ||
813 | Dprintk("CPU#%d waiting for CALLOUT\n", cpu); | ||
814 | |||
815 | /* Wait for AP startup 2s total */ | ||
816 | while (time_before(jiffies, timeout)) { | ||
817 | if (cpu_isset(cpu, cpu_callout_map)) | ||
818 | break; | ||
819 | cpu_relax(); | ||
820 | } | ||
821 | |||
822 | if (!time_before(jiffies, timeout)) { | ||
823 | printk(KERN_ERR | ||
824 | "BUG: CPU#%d started up but did not get a callout!\n", | ||
825 | cpu); | ||
826 | BUG(); | ||
827 | } | ||
828 | |||
829 | #ifdef CONFIG_CALIBRATE_DELAY | ||
830 | calibrate_delay(); /* Get our bogomips */ | ||
831 | #endif | ||
832 | |||
833 | /* Save our processor parameters */ | ||
834 | smp_store_cpu_info(cpu); | ||
835 | |||
836 | /* Allow the boot processor to continue */ | ||
837 | cpu_set(cpu, cpu_callin_map); | ||
838 | } | ||
839 | |||
840 | /** | ||
841 | * smp_online - Set cpu_online_map | ||
842 | */ | ||
843 | static void __init smp_online(void) | ||
844 | { | ||
845 | int cpu; | ||
846 | |||
847 | cpu = smp_processor_id(); | ||
848 | |||
849 | local_irq_enable(); | ||
850 | |||
851 | cpu_set(cpu, cpu_online_map); | ||
852 | smp_wmb(); | ||
853 | } | ||
854 | |||
855 | /** | ||
856 | * smp_cpus_done - | ||
857 | * @max_cpus: Maximum CPU count. | ||
858 | * | ||
859 | * Do nothing. | ||
860 | */ | ||
861 | void __init smp_cpus_done(unsigned int max_cpus) | ||
862 | { | ||
863 | } | ||
864 | |||
865 | /* | ||
866 | * smp_prepare_boot_cpu - Set up stuff for the boot processor. | ||
867 | * | ||
868 | * Set up the cpu_online_map, cpu_callout_map and cpu_callin_map of the boot | ||
869 | * processor (CPU 0). | ||
870 | */ | ||
871 | void __devinit smp_prepare_boot_cpu(void) | ||
872 | { | ||
873 | cpu_set(0, cpu_callout_map); | ||
874 | cpu_set(0, cpu_callin_map); | ||
875 | current_thread_info()->cpu = 0; | ||
876 | } | ||
877 | |||
878 | /* | ||
879 | * initialize_secondary - Initialise a secondary CPU (Application Processor). | ||
880 | * | ||
881 | * Set SP register and jump to thread's PC address. | ||
882 | */ | ||
883 | void initialize_secondary(void) | ||
884 | { | ||
885 | asm volatile ( | ||
886 | "mov %0,sp \n" | ||
887 | "jmp (%1) \n" | ||
888 | : | ||
889 | : "a"(current->thread.sp), "a"(current->thread.pc)); | ||
890 | } | ||
891 | |||
892 | /** | ||
893 | * __cpu_up - Set smp_commenced_mask for the nominated CPU | ||
894 | * @cpu: The target CPU. | ||
895 | */ | ||
896 | int __devinit __cpu_up(unsigned int cpu) | ||
897 | { | ||
898 | int timeout; | ||
899 | |||
900 | #ifdef CONFIG_HOTPLUG_CPU | ||
901 | if (num_online_cpus() == 1) | ||
902 | disable_hlt(); | ||
903 | if (sleep_mode[cpu]) | ||
904 | run_wakeup_cpu(cpu); | ||
905 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
906 | |||
907 | cpu_set(cpu, smp_commenced_mask); | ||
908 | |||
909 | /* Wait 5s total for a response */ | ||
910 | for (timeout = 0 ; timeout < 5000 ; timeout++) { | ||
911 | if (cpu_isset(cpu, cpu_online_map)) | ||
912 | break; | ||
913 | udelay(1000); | ||
914 | } | ||
915 | |||
916 | BUG_ON(!cpu_isset(cpu, cpu_online_map)); | ||
917 | return 0; | ||
918 | } | ||
919 | |||
920 | /** | ||
921 | * setup_profiling_timer - Set up the profiling timer | ||
922 | * @multiplier - The frequency multiplier to use | ||
923 | * | ||
924 | * The frequency of the profiling timer can be changed by writing a multiplier | ||
925 | * value into /proc/profile. | ||
926 | */ | ||
927 | int setup_profiling_timer(unsigned int multiplier) | ||
928 | { | ||
929 | return -EINVAL; | ||
930 | } | ||
931 | |||
932 | /* | ||
933 | * CPU hotplug routines | ||
934 | */ | ||
935 | #ifdef CONFIG_HOTPLUG_CPU | ||
936 | |||
937 | static DEFINE_PER_CPU(struct cpu, cpu_devices); | ||
938 | |||
939 | static int __init topology_init(void) | ||
940 | { | ||
941 | int cpu, ret; | ||
942 | |||
943 | for_each_cpu(cpu) { | ||
944 | ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL); | ||
945 | if (ret) | ||
946 | printk(KERN_WARNING | ||
947 | "topology_init: register_cpu %d failed (%d)\n", | ||
948 | cpu, ret); | ||
949 | } | ||
950 | return 0; | ||
951 | } | ||
952 | |||
953 | subsys_initcall(topology_init); | ||
954 | |||
955 | int __cpu_disable(void) | ||
956 | { | ||
957 | int cpu = smp_processor_id(); | ||
958 | if (cpu == 0) | ||
959 | return -EBUSY; | ||
960 | |||
961 | migrate_irqs(); | ||
962 | cpu_clear(cpu, current->active_mm->cpu_vm_mask); | ||
963 | return 0; | ||
964 | } | ||
965 | |||
966 | void __cpu_die(unsigned int cpu) | ||
967 | { | ||
968 | run_sleep_cpu(cpu); | ||
969 | |||
970 | if (num_online_cpus() == 1) | ||
971 | enable_hlt(); | ||
972 | } | ||
973 | |||
974 | #ifdef CONFIG_MN10300_CACHE_ENABLED | ||
975 | static inline void hotplug_cpu_disable_cache(void) | ||
976 | { | ||
977 | int tmp; | ||
978 | asm volatile( | ||
979 | " movhu (%1),%0 \n" | ||
980 | " and %2,%0 \n" | ||
981 | " movhu %0,(%1) \n" | ||
982 | "1: movhu (%1),%0 \n" | ||
983 | " btst %3,%0 \n" | ||
984 | " bne 1b \n" | ||
985 | : "=&r"(tmp) | ||
986 | : "a"(&CHCTR), | ||
987 | "i"(~(CHCTR_ICEN | CHCTR_DCEN)), | ||
988 | "i"(CHCTR_ICBUSY | CHCTR_DCBUSY) | ||
989 | : "memory", "cc"); | ||
990 | } | ||
991 | |||
992 | static inline void hotplug_cpu_enable_cache(void) | ||
993 | { | ||
994 | int tmp; | ||
995 | asm volatile( | ||
996 | "movhu (%1),%0 \n" | ||
997 | "or %2,%0 \n" | ||
998 | "movhu %0,(%1) \n" | ||
999 | : "=&r"(tmp) | ||
1000 | : "a"(&CHCTR), | ||
1001 | "i"(CHCTR_ICEN | CHCTR_DCEN) | ||
1002 | : "memory", "cc"); | ||
1003 | } | ||
1004 | |||
1005 | static inline void hotplug_cpu_invalidate_cache(void) | ||
1006 | { | ||
1007 | int tmp; | ||
1008 | asm volatile ( | ||
1009 | "movhu (%1),%0 \n" | ||
1010 | "or %2,%0 \n" | ||
1011 | "movhu %0,(%1) \n" | ||
1012 | : "=&r"(tmp) | ||
1013 | : "a"(&CHCTR), | ||
1014 | "i"(CHCTR_ICINV | CHCTR_DCINV) | ||
1015 | : "cc"); | ||
1016 | } | ||
1017 | |||
1018 | #else /* CONFIG_MN10300_CACHE_ENABLED */ | ||
1019 | #define hotplug_cpu_disable_cache() do {} while (0) | ||
1020 | #define hotplug_cpu_enable_cache() do {} while (0) | ||
1021 | #define hotplug_cpu_invalidate_cache() do {} while (0) | ||
1022 | #endif /* CONFIG_MN10300_CACHE_ENABLED */ | ||
1023 | |||
1024 | /** | ||
1025 | * hotplug_cpu_nmi_call_function - Call a function on other CPUs for hotplug | ||
1026 | * @cpumask: List of target CPUs. | ||
1027 | * @func: The function to call on those CPUs. | ||
1028 | * @info: The context data for the function to be called. | ||
1029 | * @wait: Whether to wait for the calls to complete. | ||
1030 | * | ||
1031 | * Non-maskably call a function on another CPU for hotplug purposes. | ||
1032 | * | ||
1033 | * This function must be called with maskable interrupts disabled. | ||
1034 | */ | ||
1035 | static int hotplug_cpu_nmi_call_function(cpumask_t cpumask, | ||
1036 | smp_call_func_t func, void *info, | ||
1037 | int wait) | ||
1038 | { | ||
1039 | /* | ||
1040 | * The address and the size of nmi_call_func_mask_data | ||
1041 | * need to be aligned on L1_CACHE_BYTES. | ||
1042 | */ | ||
1043 | static struct nmi_call_data_struct nmi_call_func_mask_data | ||
1044 | __cacheline_aligned; | ||
1045 | unsigned long start, end; | ||
1046 | |||
1047 | start = (unsigned long)&nmi_call_func_mask_data; | ||
1048 | end = start + sizeof(struct nmi_call_data_struct); | ||
1049 | |||
1050 | nmi_call_func_mask_data.func = func; | ||
1051 | nmi_call_func_mask_data.info = info; | ||
1052 | nmi_call_func_mask_data.started = cpumask; | ||
1053 | nmi_call_func_mask_data.wait = wait; | ||
1054 | if (wait) | ||
1055 | nmi_call_func_mask_data.finished = cpumask; | ||
1056 | |||
1057 | spin_lock(&smp_nmi_call_lock); | ||
1058 | nmi_call_data = &nmi_call_func_mask_data; | ||
1059 | mn10300_local_dcache_flush_range(start, end); | ||
1060 | smp_wmb(); | ||
1061 | |||
1062 | send_IPI_mask(cpumask, CALL_FUNCTION_NMI_IPI); | ||
1063 | |||
1064 | do { | ||
1065 | mn10300_local_dcache_inv_range(start, end); | ||
1066 | barrier(); | ||
1067 | } while (!cpus_empty(nmi_call_func_mask_data.started)); | ||
1068 | |||
1069 | if (wait) { | ||
1070 | do { | ||
1071 | mn10300_local_dcache_inv_range(start, end); | ||
1072 | barrier(); | ||
1073 | } while (!cpus_empty(nmi_call_func_mask_data.finished)); | ||
1074 | } | ||
1075 | |||
1076 | spin_unlock(&smp_nmi_call_lock); | ||
1077 | return 0; | ||
1078 | } | ||
1079 | |||
1080 | static void restart_wakeup_cpu(void) | ||
1081 | { | ||
1082 | unsigned int cpu = smp_processor_id(); | ||
1083 | |||
1084 | cpu_set(cpu, cpu_callin_map); | ||
1085 | local_flush_tlb(); | ||
1086 | cpu_set(cpu, cpu_online_map); | ||
1087 | smp_wmb(); | ||
1088 | } | ||
1089 | |||
1090 | static void prepare_sleep_cpu(void *unused) | ||
1091 | { | ||
1092 | sleep_mode[smp_processor_id()] = 1; | ||
1093 | smp_mb(); | ||
1094 | mn10300_local_dcache_flush_inv(); | ||
1095 | hotplug_cpu_disable_cache(); | ||
1096 | hotplug_cpu_invalidate_cache(); | ||
1097 | } | ||
1098 | |||
1099 | /* when this function called, IE=0, NMID=0. */ | ||
1100 | static void sleep_cpu(void *unused) | ||
1101 | { | ||
1102 | unsigned int cpu_id = smp_processor_id(); | ||
1103 | /* | ||
1104 | * CALL_FUNCTION_NMI_IPI for wakeup_cpu() shall not be requested, | ||
1105 | * before this cpu goes in SLEEP mode. | ||
1106 | */ | ||
1107 | do { | ||
1108 | smp_mb(); | ||
1109 | __sleep_cpu(); | ||
1110 | } while (sleep_mode[cpu_id]); | ||
1111 | restart_wakeup_cpu(); | ||
1112 | } | ||
1113 | |||
1114 | static void run_sleep_cpu(unsigned int cpu) | ||
1115 | { | ||
1116 | unsigned long flags; | ||
1117 | cpumask_t cpumask = cpumask_of(cpu); | ||
1118 | |||
1119 | flags = arch_local_cli_save(); | ||
1120 | hotplug_cpu_nmi_call_function(cpumask, prepare_sleep_cpu, NULL, 1); | ||
1121 | hotplug_cpu_nmi_call_function(cpumask, sleep_cpu, NULL, 0); | ||
1122 | udelay(1); /* delay for the cpu to sleep. */ | ||
1123 | arch_local_irq_restore(flags); | ||
1124 | } | ||
1125 | |||
1126 | static void wakeup_cpu(void) | ||
1127 | { | ||
1128 | hotplug_cpu_invalidate_cache(); | ||
1129 | hotplug_cpu_enable_cache(); | ||
1130 | smp_mb(); | ||
1131 | sleep_mode[smp_processor_id()] = 0; | ||
1132 | } | ||
1133 | |||
1134 | static void run_wakeup_cpu(unsigned int cpu) | ||
1135 | { | ||
1136 | unsigned long flags; | ||
1137 | |||
1138 | flags = arch_local_cli_save(); | ||
1139 | #if NR_CPUS == 2 | ||
1140 | mn10300_local_dcache_flush_inv(); | ||
1141 | #else | ||
1142 | /* | ||
1143 | * Before waking up the cpu, | ||
1144 | * all online cpus should stop and flush D-Cache for global data. | ||
1145 | */ | ||
1146 | #error not support NR_CPUS > 2, when CONFIG_HOTPLUG_CPU=y. | ||
1147 | #endif | ||
1148 | hotplug_cpu_nmi_call_function(cpumask_of(cpu), wakeup_cpu, NULL, 1); | ||
1149 | arch_local_irq_restore(flags); | ||
1150 | } | ||
1151 | |||
1152 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
diff --git a/arch/mn10300/kernel/switch_to.S b/arch/mn10300/kernel/switch_to.S index 630aad71b946..9074d0fb8788 100644 --- a/arch/mn10300/kernel/switch_to.S +++ b/arch/mn10300/kernel/switch_to.S | |||
@@ -15,6 +15,9 @@ | |||
15 | #include <linux/linkage.h> | 15 | #include <linux/linkage.h> |
16 | #include <asm/thread_info.h> | 16 | #include <asm/thread_info.h> |
17 | #include <asm/cpu-regs.h> | 17 | #include <asm/cpu-regs.h> |
18 | #ifdef CONFIG_SMP | ||
19 | #include <proc/smp-regs.h> | ||
20 | #endif /* CONFIG_SMP */ | ||
18 | 21 | ||
19 | .text | 22 | .text |
20 | 23 | ||
@@ -35,8 +38,6 @@ ENTRY(__switch_to) | |||
35 | mov d1,a1 | 38 | mov d1,a1 |
36 | 39 | ||
37 | # save prev context | 40 | # save prev context |
38 | mov (__frame),d0 | ||
39 | mov d0,(THREAD_FRAME,a0) | ||
40 | mov __switch_back,d0 | 41 | mov __switch_back,d0 |
41 | mov d0,(THREAD_PC,a0) | 42 | mov d0,(THREAD_PC,a0) |
42 | mov sp,a2 | 43 | mov sp,a2 |
@@ -58,8 +59,6 @@ ENTRY(__switch_to) | |||
58 | mov a2,e2 | 59 | mov a2,e2 |
59 | #endif | 60 | #endif |
60 | 61 | ||
61 | mov (THREAD_FRAME,a1),a2 | ||
62 | mov a2,(__frame) | ||
63 | mov (THREAD_PC,a1),a2 | 62 | mov (THREAD_PC,a1),a2 |
64 | mov d2,d0 # for ret_from_fork | 63 | mov d2,d0 # for ret_from_fork |
65 | mov d0,a0 # for __switch_to | 64 | mov d0,a0 # for __switch_to |
diff --git a/arch/mn10300/kernel/time.c b/arch/mn10300/kernel/time.c index 8f7f6d22783d..f860a340acc9 100644 --- a/arch/mn10300/kernel/time.c +++ b/arch/mn10300/kernel/time.c | |||
@@ -17,29 +17,18 @@ | |||
17 | #include <linux/smp.h> | 17 | #include <linux/smp.h> |
18 | #include <linux/profile.h> | 18 | #include <linux/profile.h> |
19 | #include <linux/cnt32_to_63.h> | 19 | #include <linux/cnt32_to_63.h> |
20 | #include <linux/clocksource.h> | ||
21 | #include <linux/clockchips.h> | ||
20 | #include <asm/irq.h> | 22 | #include <asm/irq.h> |
21 | #include <asm/div64.h> | 23 | #include <asm/div64.h> |
22 | #include <asm/processor.h> | 24 | #include <asm/processor.h> |
23 | #include <asm/intctl-regs.h> | 25 | #include <asm/intctl-regs.h> |
24 | #include <asm/rtc.h> | 26 | #include <asm/rtc.h> |
25 | 27 | #include "internal.h" | |
26 | #ifdef CONFIG_MN10300_RTC | ||
27 | unsigned long mn10300_ioclk; /* system I/O clock frequency */ | ||
28 | unsigned long mn10300_iobclk; /* system I/O clock frequency */ | ||
29 | unsigned long mn10300_tsc_per_HZ; /* number of ioclks per jiffy */ | ||
30 | #endif /* CONFIG_MN10300_RTC */ | ||
31 | 28 | ||
32 | static unsigned long mn10300_last_tsc; /* time-stamp counter at last time | 29 | static unsigned long mn10300_last_tsc; /* time-stamp counter at last time |
33 | * interrupt occurred */ | 30 | * interrupt occurred */ |
34 | 31 | ||
35 | static irqreturn_t timer_interrupt(int irq, void *dev_id); | ||
36 | |||
37 | static struct irqaction timer_irq = { | ||
38 | .handler = timer_interrupt, | ||
39 | .flags = IRQF_DISABLED | IRQF_SHARED | IRQF_TIMER, | ||
40 | .name = "timer", | ||
41 | }; | ||
42 | |||
43 | static unsigned long sched_clock_multiplier; | 32 | static unsigned long sched_clock_multiplier; |
44 | 33 | ||
45 | /* | 34 | /* |
@@ -54,9 +43,12 @@ unsigned long long sched_clock(void) | |||
54 | unsigned long tsc, tmp; | 43 | unsigned long tsc, tmp; |
55 | unsigned product[3]; /* 96-bit intermediate value */ | 44 | unsigned product[3]; /* 96-bit intermediate value */ |
56 | 45 | ||
46 | /* cnt32_to_63() is not safe with preemption */ | ||
47 | preempt_disable(); | ||
48 | |||
57 | /* read the TSC value | 49 | /* read the TSC value |
58 | */ | 50 | */ |
59 | tsc = 0 - get_cycles(); /* get_cycles() counts down */ | 51 | tsc = get_cycles(); |
60 | 52 | ||
61 | /* expand to 64-bits. | 53 | /* expand to 64-bits. |
62 | * - sched_clock() must be called once a minute or better or the | 54 | * - sched_clock() must be called once a minute or better or the |
@@ -64,6 +56,8 @@ unsigned long long sched_clock(void) | |||
64 | */ | 56 | */ |
65 | tsc64.ll = cnt32_to_63(tsc) & 0x7fffffffffffffffULL; | 57 | tsc64.ll = cnt32_to_63(tsc) & 0x7fffffffffffffffULL; |
66 | 58 | ||
59 | preempt_enable(); | ||
60 | |||
67 | /* scale the 64-bit TSC value to a nanosecond value via a 96-bit | 61 | /* scale the 64-bit TSC value to a nanosecond value via a 96-bit |
68 | * intermediate | 62 | * intermediate |
69 | */ | 63 | */ |
@@ -90,6 +84,20 @@ static void __init mn10300_sched_clock_init(void) | |||
90 | __muldiv64u(NSEC_PER_SEC, 1 << 16, MN10300_TSCCLK); | 84 | __muldiv64u(NSEC_PER_SEC, 1 << 16, MN10300_TSCCLK); |
91 | } | 85 | } |
92 | 86 | ||
87 | /** | ||
88 | * local_timer_interrupt - Local timer interrupt handler | ||
89 | * | ||
90 | * Handle local timer interrupts for this CPU. They may have been propagated | ||
91 | * to this CPU from the CPU that actually gets them by way of an IPI. | ||
92 | */ | ||
93 | irqreturn_t local_timer_interrupt(void) | ||
94 | { | ||
95 | profile_tick(CPU_PROFILING); | ||
96 | update_process_times(user_mode(get_irq_regs())); | ||
97 | return IRQ_HANDLED; | ||
98 | } | ||
99 | |||
100 | #ifndef CONFIG_GENERIC_TIME | ||
93 | /* | 101 | /* |
94 | * advance the kernel's time keeping clocks (xtime and jiffies) | 102 | * advance the kernel's time keeping clocks (xtime and jiffies) |
95 | * - we use Timer 0 & 1 cascaded as a clock to nudge us the next time | 103 | * - we use Timer 0 & 1 cascaded as a clock to nudge us the next time |
@@ -98,27 +106,73 @@ static void __init mn10300_sched_clock_init(void) | |||
98 | static irqreturn_t timer_interrupt(int irq, void *dev_id) | 106 | static irqreturn_t timer_interrupt(int irq, void *dev_id) |
99 | { | 107 | { |
100 | unsigned tsc, elapse; | 108 | unsigned tsc, elapse; |
109 | irqreturn_t ret; | ||
101 | 110 | ||
102 | write_seqlock(&xtime_lock); | 111 | write_seqlock(&xtime_lock); |
103 | 112 | ||
104 | while (tsc = get_cycles(), | 113 | while (tsc = get_cycles(), |
105 | elapse = mn10300_last_tsc - tsc, /* time elapsed since last | 114 | elapse = tsc - mn10300_last_tsc, /* time elapsed since last |
106 | * tick */ | 115 | * tick */ |
107 | elapse > MN10300_TSC_PER_HZ | 116 | elapse > MN10300_TSC_PER_HZ |
108 | ) { | 117 | ) { |
109 | mn10300_last_tsc -= MN10300_TSC_PER_HZ; | 118 | mn10300_last_tsc += MN10300_TSC_PER_HZ; |
110 | 119 | ||
111 | /* advance the kernel's time tracking system */ | 120 | /* advance the kernel's time tracking system */ |
112 | profile_tick(CPU_PROFILING); | ||
113 | do_timer(1); | 121 | do_timer(1); |
114 | } | 122 | } |
115 | 123 | ||
116 | write_sequnlock(&xtime_lock); | 124 | write_sequnlock(&xtime_lock); |
117 | 125 | ||
118 | update_process_times(user_mode(get_irq_regs())); | 126 | ret = local_timer_interrupt(); |
127 | #ifdef CONFIG_SMP | ||
128 | send_IPI_allbutself(LOCAL_TIMER_IPI); | ||
129 | #endif | ||
130 | return ret; | ||
131 | } | ||
119 | 132 | ||
120 | return IRQ_HANDLED; | 133 | static struct irqaction timer_irq = { |
134 | .handler = timer_interrupt, | ||
135 | .flags = IRQF_DISABLED | IRQF_SHARED | IRQF_TIMER, | ||
136 | .name = "timer", | ||
137 | }; | ||
138 | #endif /* CONFIG_GENERIC_TIME */ | ||
139 | |||
140 | #ifdef CONFIG_CSRC_MN10300 | ||
141 | void __init clocksource_set_clock(struct clocksource *cs, unsigned int clock) | ||
142 | { | ||
143 | u64 temp; | ||
144 | u32 shift; | ||
145 | |||
146 | /* Find a shift value */ | ||
147 | for (shift = 32; shift > 0; shift--) { | ||
148 | temp = (u64) NSEC_PER_SEC << shift; | ||
149 | do_div(temp, clock); | ||
150 | if ((temp >> 32) == 0) | ||
151 | break; | ||
152 | } | ||
153 | cs->shift = shift; | ||
154 | cs->mult = (u32) temp; | ||
121 | } | 155 | } |
156 | #endif | ||
157 | |||
158 | #if CONFIG_CEVT_MN10300 | ||
159 | void __cpuinit clockevent_set_clock(struct clock_event_device *cd, | ||
160 | unsigned int clock) | ||
161 | { | ||
162 | u64 temp; | ||
163 | u32 shift; | ||
164 | |||
165 | /* Find a shift value */ | ||
166 | for (shift = 32; shift > 0; shift--) { | ||
167 | temp = (u64) clock << shift; | ||
168 | do_div(temp, NSEC_PER_SEC); | ||
169 | if ((temp >> 32) == 0) | ||
170 | break; | ||
171 | } | ||
172 | cd->shift = shift; | ||
173 | cd->mult = (u32) temp; | ||
174 | } | ||
175 | #endif | ||
122 | 176 | ||
123 | /* | 177 | /* |
124 | * initialise the various timers used by the main part of the kernel | 178 | * initialise the various timers used by the main part of the kernel |
@@ -131,21 +185,25 @@ void __init time_init(void) | |||
131 | */ | 185 | */ |
132 | TMPSCNT |= TMPSCNT_ENABLE; | 186 | TMPSCNT |= TMPSCNT_ENABLE; |
133 | 187 | ||
188 | #ifdef CONFIG_GENERIC_TIME | ||
189 | init_clocksource(); | ||
190 | #else | ||
134 | startup_timestamp_counter(); | 191 | startup_timestamp_counter(); |
192 | #endif | ||
135 | 193 | ||
136 | printk(KERN_INFO | 194 | printk(KERN_INFO |
137 | "timestamp counter I/O clock running at %lu.%02lu" | 195 | "timestamp counter I/O clock running at %lu.%02lu" |
138 | " (calibrated against RTC)\n", | 196 | " (calibrated against RTC)\n", |
139 | MN10300_TSCCLK / 1000000, (MN10300_TSCCLK / 10000) % 100); | 197 | MN10300_TSCCLK / 1000000, (MN10300_TSCCLK / 10000) % 100); |
140 | 198 | ||
141 | mn10300_last_tsc = TMTSCBC; | 199 | mn10300_last_tsc = read_timestamp_counter(); |
142 | |||
143 | /* use timer 0 & 1 cascaded to tick at as close to HZ as possible */ | ||
144 | setup_irq(TMJCIRQ, &timer_irq); | ||
145 | 200 | ||
146 | set_intr_level(TMJCIRQ, TMJCICR_LEVEL); | 201 | #ifdef CONFIG_GENERIC_CLOCKEVENTS |
147 | 202 | init_clockevents(); | |
148 | startup_jiffies_counter(); | 203 | #else |
204 | reload_jiffies_counter(MN10300_JC_PER_HZ - 1); | ||
205 | setup_jiffies_interrupt(TMJCIRQ, &timer_irq, CONFIG_TIMER_IRQ_LEVEL); | ||
206 | #endif | ||
149 | 207 | ||
150 | #ifdef CONFIG_MN10300_WD_TIMER | 208 | #ifdef CONFIG_MN10300_WD_TIMER |
151 | /* start the watchdog timer */ | 209 | /* start the watchdog timer */ |
diff --git a/arch/mn10300/kernel/traps.c b/arch/mn10300/kernel/traps.c index 91365adba4f5..b90c3f160c77 100644 --- a/arch/mn10300/kernel/traps.c +++ b/arch/mn10300/kernel/traps.c | |||
@@ -45,9 +45,6 @@ | |||
45 | #error "INTERRUPT_VECTOR_BASE not aligned to 16MiB boundary!" | 45 | #error "INTERRUPT_VECTOR_BASE not aligned to 16MiB boundary!" |
46 | #endif | 46 | #endif |
47 | 47 | ||
48 | struct pt_regs *__frame; /* current frame pointer */ | ||
49 | EXPORT_SYMBOL(__frame); | ||
50 | |||
51 | int kstack_depth_to_print = 24; | 48 | int kstack_depth_to_print = 24; |
52 | 49 | ||
53 | spinlock_t die_lock = __SPIN_LOCK_UNLOCKED(die_lock); | 50 | spinlock_t die_lock = __SPIN_LOCK_UNLOCKED(die_lock); |
@@ -101,7 +98,6 @@ DO_EINFO(SIGILL, {}, "invalid opcode", invalid_op, ILL_ILLOPC); | |||
101 | DO_EINFO(SIGILL, {}, "invalid ex opcode", invalid_exop, ILL_ILLOPC); | 98 | DO_EINFO(SIGILL, {}, "invalid ex opcode", invalid_exop, ILL_ILLOPC); |
102 | DO_EINFO(SIGBUS, {}, "invalid address", mem_error, BUS_ADRERR); | 99 | DO_EINFO(SIGBUS, {}, "invalid address", mem_error, BUS_ADRERR); |
103 | DO_EINFO(SIGBUS, {}, "bus error", bus_error, BUS_ADRERR); | 100 | DO_EINFO(SIGBUS, {}, "bus error", bus_error, BUS_ADRERR); |
104 | DO_EINFO(SIGILL, {}, "FPU invalid opcode", fpu_invalid_op, ILL_COPROC); | ||
105 | 101 | ||
106 | DO_ERROR(SIGTRAP, | 102 | DO_ERROR(SIGTRAP, |
107 | #ifndef CONFIG_MN10300_USING_JTAG | 103 | #ifndef CONFIG_MN10300_USING_JTAG |
@@ -222,11 +218,14 @@ void show_registers_only(struct pt_regs *regs) | |||
222 | printk(KERN_EMERG "threadinfo=%p task=%p)\n", | 218 | printk(KERN_EMERG "threadinfo=%p task=%p)\n", |
223 | current_thread_info(), current); | 219 | current_thread_info(), current); |
224 | 220 | ||
225 | if ((unsigned long) current >= 0x90000000UL && | 221 | if ((unsigned long) current >= PAGE_OFFSET && |
226 | (unsigned long) current < 0x94000000UL) | 222 | (unsigned long) current < (unsigned long)high_memory) |
227 | printk(KERN_EMERG "Process %s (pid: %d)\n", | 223 | printk(KERN_EMERG "Process %s (pid: %d)\n", |
228 | current->comm, current->pid); | 224 | current->comm, current->pid); |
229 | 225 | ||
226 | #ifdef CONFIG_SMP | ||
227 | printk(KERN_EMERG "CPUID: %08x\n", CPUID); | ||
228 | #endif | ||
230 | printk(KERN_EMERG "CPUP: %04hx\n", CPUP); | 229 | printk(KERN_EMERG "CPUP: %04hx\n", CPUP); |
231 | printk(KERN_EMERG "TBR: %08x\n", TBR); | 230 | printk(KERN_EMERG "TBR: %08x\n", TBR); |
232 | printk(KERN_EMERG "DEAR: %08x\n", DEAR); | 231 | printk(KERN_EMERG "DEAR: %08x\n", DEAR); |
@@ -522,8 +521,12 @@ void __init set_intr_stub(enum exception_code code, void *handler) | |||
522 | { | 521 | { |
523 | unsigned long addr; | 522 | unsigned long addr; |
524 | u8 *vector = (u8 *)(CONFIG_INTERRUPT_VECTOR_BASE + code); | 523 | u8 *vector = (u8 *)(CONFIG_INTERRUPT_VECTOR_BASE + code); |
524 | unsigned long flags; | ||
525 | 525 | ||
526 | addr = (unsigned long) handler - (unsigned long) vector; | 526 | addr = (unsigned long) handler - (unsigned long) vector; |
527 | |||
528 | flags = arch_local_cli_save(); | ||
529 | |||
527 | vector[0] = 0xdc; /* JMP handler */ | 530 | vector[0] = 0xdc; /* JMP handler */ |
528 | vector[1] = addr; | 531 | vector[1] = addr; |
529 | vector[2] = addr >> 8; | 532 | vector[2] = addr >> 8; |
@@ -533,30 +536,12 @@ void __init set_intr_stub(enum exception_code code, void *handler) | |||
533 | vector[6] = 0xcb; | 536 | vector[6] = 0xcb; |
534 | vector[7] = 0xcb; | 537 | vector[7] = 0xcb; |
535 | 538 | ||
536 | mn10300_dcache_flush_inv(); | 539 | arch_local_irq_restore(flags); |
537 | mn10300_icache_inv(); | ||
538 | } | ||
539 | |||
540 | /* | ||
541 | * set an interrupt stub to invoke the JTAG unit and then jump to a handler | ||
542 | */ | ||
543 | void __init set_jtag_stub(enum exception_code code, void *handler) | ||
544 | { | ||
545 | unsigned long addr; | ||
546 | u8 *vector = (u8 *)(CONFIG_INTERRUPT_VECTOR_BASE + code); | ||
547 | |||
548 | addr = (unsigned long) handler - ((unsigned long) vector + 1); | ||
549 | vector[0] = 0xff; /* PI to jump into JTAG debugger */ | ||
550 | vector[1] = 0xdc; /* jmp handler */ | ||
551 | vector[2] = addr; | ||
552 | vector[3] = addr >> 8; | ||
553 | vector[4] = addr >> 16; | ||
554 | vector[5] = addr >> 24; | ||
555 | vector[6] = 0xcb; | ||
556 | vector[7] = 0xcb; | ||
557 | 540 | ||
541 | #ifndef CONFIG_MN10300_CACHE_SNOOP | ||
558 | mn10300_dcache_flush_inv(); | 542 | mn10300_dcache_flush_inv(); |
559 | flush_icache_range((unsigned long) vector, (unsigned long) vector + 8); | 543 | mn10300_icache_inv(); |
544 | #endif | ||
560 | } | 545 | } |
561 | 546 | ||
562 | /* | 547 | /* |
@@ -581,7 +566,6 @@ void __init trap_init(void) | |||
581 | set_excp_vector(EXCEP_PRIVINSACC, insn_acc_error); | 566 | set_excp_vector(EXCEP_PRIVINSACC, insn_acc_error); |
582 | set_excp_vector(EXCEP_PRIVDATACC, data_acc_error); | 567 | set_excp_vector(EXCEP_PRIVDATACC, data_acc_error); |
583 | set_excp_vector(EXCEP_DATINSACC, insn_acc_error); | 568 | set_excp_vector(EXCEP_DATINSACC, insn_acc_error); |
584 | set_excp_vector(EXCEP_FPU_DISABLED, fpu_disabled); | ||
585 | set_excp_vector(EXCEP_FPU_UNIMPINS, fpu_invalid_op); | 569 | set_excp_vector(EXCEP_FPU_UNIMPINS, fpu_invalid_op); |
586 | set_excp_vector(EXCEP_FPU_OPERATION, fpu_exception); | 570 | set_excp_vector(EXCEP_FPU_OPERATION, fpu_exception); |
587 | 571 | ||