aboutsummaryrefslogtreecommitdiffstats
path: root/arch/m68k/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/m68k/kernel')
-rw-r--r--arch/m68k/kernel/Makefile_mm17
-rw-r--r--arch/m68k/kernel/Makefile_no10
-rw-r--r--arch/m68k/kernel/dma_mm.c130
-rw-r--r--arch/m68k/kernel/dma_no.c74
-rw-r--r--arch/m68k/kernel/entry_mm.S409
-rw-r--r--arch/m68k/kernel/entry_no.S133
-rw-r--r--arch/m68k/kernel/init_task.c36
-rw-r--r--arch/m68k/kernel/process_mm.c354
-rw-r--r--arch/m68k/kernel/process_no.c406
-rw-r--r--arch/m68k/kernel/ptrace_mm.c277
-rw-r--r--arch/m68k/kernel/ptrace_no.c255
-rw-r--r--arch/m68k/kernel/signal_mm.c1017
-rw-r--r--arch/m68k/kernel/signal_no.c765
-rw-r--r--arch/m68k/kernel/time_mm.c114
-rw-r--r--arch/m68k/kernel/time_no.c87
-rw-r--r--arch/m68k/kernel/traps_mm.c1207
-rw-r--r--arch/m68k/kernel/traps_no.c361
-rw-r--r--arch/m68k/kernel/vmlinux.lds_mm.S10
-rw-r--r--arch/m68k/kernel/vmlinux.lds_no.S188
19 files changed, 5850 insertions, 0 deletions
diff --git a/arch/m68k/kernel/Makefile_mm b/arch/m68k/kernel/Makefile_mm
new file mode 100644
index 00000000000..aced6780457
--- /dev/null
+++ b/arch/m68k/kernel/Makefile_mm
@@ -0,0 +1,17 @@
1#
2# Makefile for the linux kernel.
3#
4
5ifndef CONFIG_SUN3
6 extra-y := head.o
7else
8 extra-y := sun3-head.o
9endif
10extra-y += vmlinux.lds
11
12obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o module.o \
13 sys_m68k.o time.o setup.o m68k_ksyms.o devres.o syscalltable.o
14
15devres-y = ../../../kernel/irq/devres.o
16
17obj-y$(CONFIG_MMU_SUN3) += dma.o # no, it's not a typo
diff --git a/arch/m68k/kernel/Makefile_no b/arch/m68k/kernel/Makefile_no
new file mode 100644
index 00000000000..37c3fc074c0
--- /dev/null
+++ b/arch/m68k/kernel/Makefile_no
@@ -0,0 +1,10 @@
1#
2# Makefile for arch/m68knommu/kernel.
3#
4
5extra-y := vmlinux.lds
6
7obj-y += dma.o entry.o init_task.o irq.o m68k_ksyms.o process.o ptrace.o \
8 setup.o signal.o syscalltable.o sys_m68k.o time.o traps.o
9
10obj-$(CONFIG_MODULES) += module.o
diff --git a/arch/m68k/kernel/dma_mm.c b/arch/m68k/kernel/dma_mm.c
new file mode 100644
index 00000000000..4bbb3c2a888
--- /dev/null
+++ b/arch/m68k/kernel/dma_mm.c
@@ -0,0 +1,130 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file COPYING in the main directory of this archive
4 * for more details.
5 */
6
7#undef DEBUG
8
9#include <linux/dma-mapping.h>
10#include <linux/device.h>
11#include <linux/kernel.h>
12#include <linux/scatterlist.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
15
16#include <asm/pgalloc.h>
17
18void *dma_alloc_coherent(struct device *dev, size_t size,
19 dma_addr_t *handle, gfp_t flag)
20{
21 struct page *page, **map;
22 pgprot_t pgprot;
23 void *addr;
24 int i, order;
25
26 pr_debug("dma_alloc_coherent: %d,%x\n", size, flag);
27
28 size = PAGE_ALIGN(size);
29 order = get_order(size);
30
31 page = alloc_pages(flag, order);
32 if (!page)
33 return NULL;
34
35 *handle = page_to_phys(page);
36 map = kmalloc(sizeof(struct page *) << order, flag & ~__GFP_DMA);
37 if (!map) {
38 __free_pages(page, order);
39 return NULL;
40 }
41 split_page(page, order);
42
43 order = 1 << order;
44 size >>= PAGE_SHIFT;
45 map[0] = page;
46 for (i = 1; i < size; i++)
47 map[i] = page + i;
48 for (; i < order; i++)
49 __free_page(page + i);
50 pgprot = __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
51 if (CPU_IS_040_OR_060)
52 pgprot_val(pgprot) |= _PAGE_GLOBAL040 | _PAGE_NOCACHE_S;
53 else
54 pgprot_val(pgprot) |= _PAGE_NOCACHE030;
55 addr = vmap(map, size, VM_MAP, pgprot);
56 kfree(map);
57
58 return addr;
59}
60EXPORT_SYMBOL(dma_alloc_coherent);
61
62void dma_free_coherent(struct device *dev, size_t size,
63 void *addr, dma_addr_t handle)
64{
65 pr_debug("dma_free_coherent: %p, %x\n", addr, handle);
66 vfree(addr);
67}
68EXPORT_SYMBOL(dma_free_coherent);
69
70void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
71 size_t size, enum dma_data_direction dir)
72{
73 switch (dir) {
74 case DMA_TO_DEVICE:
75 cache_push(handle, size);
76 break;
77 case DMA_FROM_DEVICE:
78 cache_clear(handle, size);
79 break;
80 default:
81 if (printk_ratelimit())
82 printk("dma_sync_single_for_device: unsupported dir %u\n", dir);
83 break;
84 }
85}
86EXPORT_SYMBOL(dma_sync_single_for_device);
87
88void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
89 enum dma_data_direction dir)
90{
91 int i;
92
93 for (i = 0; i < nents; sg++, i++)
94 dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir);
95}
96EXPORT_SYMBOL(dma_sync_sg_for_device);
97
98dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size,
99 enum dma_data_direction dir)
100{
101 dma_addr_t handle = virt_to_bus(addr);
102
103 dma_sync_single_for_device(dev, handle, size, dir);
104 return handle;
105}
106EXPORT_SYMBOL(dma_map_single);
107
108dma_addr_t dma_map_page(struct device *dev, struct page *page,
109 unsigned long offset, size_t size,
110 enum dma_data_direction dir)
111{
112 dma_addr_t handle = page_to_phys(page) + offset;
113
114 dma_sync_single_for_device(dev, handle, size, dir);
115 return handle;
116}
117EXPORT_SYMBOL(dma_map_page);
118
119int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
120 enum dma_data_direction dir)
121{
122 int i;
123
124 for (i = 0; i < nents; sg++, i++) {
125 sg->dma_address = sg_phys(sg);
126 dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir);
127 }
128 return nents;
129}
130EXPORT_SYMBOL(dma_map_sg);
diff --git a/arch/m68k/kernel/dma_no.c b/arch/m68k/kernel/dma_no.c
new file mode 100644
index 00000000000..fc61541aeb7
--- /dev/null
+++ b/arch/m68k/kernel/dma_no.c
@@ -0,0 +1,74 @@
1/*
2 * Dynamic DMA mapping support.
3 *
4 * We never have any address translations to worry about, so this
5 * is just alloc/free.
6 */
7
8#include <linux/types.h>
9#include <linux/gfp.h>
10#include <linux/mm.h>
11#include <linux/device.h>
12#include <linux/dma-mapping.h>
13#include <asm/cacheflush.h>
14
15void *dma_alloc_coherent(struct device *dev, size_t size,
16 dma_addr_t *dma_handle, gfp_t gfp)
17{
18 void *ret;
19 /* ignore region specifiers */
20 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
21
22 if (dev == NULL || (*dev->dma_mask < 0xffffffff))
23 gfp |= GFP_DMA;
24 ret = (void *)__get_free_pages(gfp, get_order(size));
25
26 if (ret != NULL) {
27 memset(ret, 0, size);
28 *dma_handle = virt_to_phys(ret);
29 }
30 return ret;
31}
32
33void dma_free_coherent(struct device *dev, size_t size,
34 void *vaddr, dma_addr_t dma_handle)
35{
36 free_pages((unsigned long)vaddr, get_order(size));
37}
38
39void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
40 size_t size, enum dma_data_direction dir)
41{
42 switch (dir) {
43 case DMA_TO_DEVICE:
44 flush_dcache_range(handle, size);
45 break;
46 case DMA_FROM_DEVICE:
47 /* Should be clear already */
48 break;
49 default:
50 if (printk_ratelimit())
51 printk("dma_sync_single_for_device: unsupported dir %u\n", dir);
52 break;
53 }
54}
55
56EXPORT_SYMBOL(dma_sync_single_for_device);
57dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size,
58 enum dma_data_direction dir)
59{
60 dma_addr_t handle = virt_to_phys(addr);
61 flush_dcache_range(handle, size);
62 return handle;
63}
64EXPORT_SYMBOL(dma_map_single);
65
66dma_addr_t dma_map_page(struct device *dev, struct page *page,
67 unsigned long offset, size_t size,
68 enum dma_data_direction dir)
69{
70 dma_addr_t handle = page_to_phys(page) + offset;
71 dma_sync_single_for_device(dev, handle, size, dir);
72 return handle;
73}
74EXPORT_SYMBOL(dma_map_page);
diff --git a/arch/m68k/kernel/entry_mm.S b/arch/m68k/kernel/entry_mm.S
new file mode 100644
index 00000000000..bd0ec05263b
--- /dev/null
+++ b/arch/m68k/kernel/entry_mm.S
@@ -0,0 +1,409 @@
1/* -*- mode: asm -*-
2 *
3 * linux/arch/m68k/kernel/entry.S
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file README.legal in the main directory of this archive
9 * for more details.
10 *
11 * Linux/m68k support by Hamish Macdonald
12 *
13 * 68060 fixes by Jesper Skov
14 *
15 */
16
17/*
18 * entry.S contains the system-call and fault low-level handling routines.
19 * This also contains the timer-interrupt handler, as well as all interrupts
20 * and faults that can result in a task-switch.
21 *
22 * NOTE: This code handles signal-recognition, which happens every time
23 * after a timer-interrupt and after each system call.
24 *
25 */
26
27/*
28 * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so
29 * all pointers that used to be 'current' are now entry
30 * number 0 in the 'current_set' list.
31 *
32 * 6/05/00 RZ: addedd writeback completion after return from sighandler
33 * for 68040
34 */
35
36#include <linux/linkage.h>
37#include <asm/entry.h>
38#include <asm/errno.h>
39#include <asm/setup.h>
40#include <asm/segment.h>
41#include <asm/traps.h>
42#include <asm/unistd.h>
43
44#include <asm/asm-offsets.h>
45
46.globl system_call, buserr, trap, resume
47.globl sys_call_table
48.globl sys_fork, sys_clone, sys_vfork
49.globl ret_from_interrupt, bad_interrupt
50.globl auto_irqhandler_fixup
51.globl user_irqvec_fixup, user_irqhandler_fixup
52
53.text
54ENTRY(buserr)
55 SAVE_ALL_INT
56 GET_CURRENT(%d0)
57 movel %sp,%sp@- | stack frame pointer argument
58 bsrl buserr_c
59 addql #4,%sp
60 jra .Lret_from_exception
61
62ENTRY(trap)
63 SAVE_ALL_INT
64 GET_CURRENT(%d0)
65 movel %sp,%sp@- | stack frame pointer argument
66 bsrl trap_c
67 addql #4,%sp
68 jra .Lret_from_exception
69
70 | After a fork we jump here directly from resume,
71 | so that %d1 contains the previous task
72 | schedule_tail now used regardless of CONFIG_SMP
73ENTRY(ret_from_fork)
74 movel %d1,%sp@-
75 jsr schedule_tail
76 addql #4,%sp
77 jra .Lret_from_exception
78
79do_trace_entry:
80 movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
81 subql #4,%sp
82 SAVE_SWITCH_STACK
83 jbsr syscall_trace
84 RESTORE_SWITCH_STACK
85 addql #4,%sp
86 movel %sp@(PT_OFF_ORIG_D0),%d0
87 cmpl #NR_syscalls,%d0
88 jcs syscall
89badsys:
90 movel #-ENOSYS,%sp@(PT_OFF_D0)
91 jra ret_from_syscall
92
93do_trace_exit:
94 subql #4,%sp
95 SAVE_SWITCH_STACK
96 jbsr syscall_trace
97 RESTORE_SWITCH_STACK
98 addql #4,%sp
99 jra .Lret_from_exception
100
101ENTRY(ret_from_signal)
102 tstb %curptr@(TASK_INFO+TINFO_FLAGS+2)
103 jge 1f
104 jbsr syscall_trace
1051: RESTORE_SWITCH_STACK
106 addql #4,%sp
107/* on 68040 complete pending writebacks if any */
108#ifdef CONFIG_M68040
109 bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0
110 subql #7,%d0 | bus error frame ?
111 jbne 1f
112 movel %sp,%sp@-
113 jbsr berr_040cleanup
114 addql #4,%sp
1151:
116#endif
117 jra .Lret_from_exception
118
119ENTRY(system_call)
120 SAVE_ALL_SYS
121
122 GET_CURRENT(%d1)
123 | save top of frame
124 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
125
126 | syscall trace?
127 tstb %curptr@(TASK_INFO+TINFO_FLAGS+2)
128 jmi do_trace_entry
129 cmpl #NR_syscalls,%d0
130 jcc badsys
131syscall:
132 jbsr @(sys_call_table,%d0:l:4)@(0)
133 movel %d0,%sp@(PT_OFF_D0) | save the return value
134ret_from_syscall:
135 |oriw #0x0700,%sr
136 movew %curptr@(TASK_INFO+TINFO_FLAGS+2),%d0
137 jne syscall_exit_work
1381: RESTORE_ALL
139
140syscall_exit_work:
141 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
142 bnes 1b | if so, skip resched, signals
143 lslw #1,%d0
144 jcs do_trace_exit
145 jmi do_delayed_trace
146 lslw #8,%d0
147 jmi do_signal_return
148 pea resume_userspace
149 jra schedule
150
151
152ENTRY(ret_from_exception)
153.Lret_from_exception:
154 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
155 bnes 1f | if so, skip resched, signals
156 | only allow interrupts when we are really the last one on the
157 | kernel stack, otherwise stack overflow can occur during
158 | heavy interrupt load
159 andw #ALLOWINT,%sr
160
161resume_userspace:
162 moveb %curptr@(TASK_INFO+TINFO_FLAGS+3),%d0
163 jne exit_work
1641: RESTORE_ALL
165
166exit_work:
167 | save top of frame
168 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
169 lslb #1,%d0
170 jmi do_signal_return
171 pea resume_userspace
172 jra schedule
173
174
175do_signal_return:
176 |andw #ALLOWINT,%sr
177 subql #4,%sp | dummy return address
178 SAVE_SWITCH_STACK
179 pea %sp@(SWITCH_STACK_SIZE)
180 bsrl do_signal
181 addql #4,%sp
182 RESTORE_SWITCH_STACK
183 addql #4,%sp
184 jbra resume_userspace
185
186do_delayed_trace:
187 bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR
188 pea 1 | send SIGTRAP
189 movel %curptr,%sp@-
190 pea LSIGTRAP
191 jbsr send_sig
192 addql #8,%sp
193 addql #4,%sp
194 jbra resume_userspace
195
196
197/* This is the main interrupt handler for autovector interrupts */
198
199ENTRY(auto_inthandler)
200 SAVE_ALL_INT
201 GET_CURRENT(%d0)
202 addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
203 | put exception # in d0
204 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
205 subw #VEC_SPUR,%d0
206
207 movel %sp,%sp@-
208 movel %d0,%sp@- | put vector # on stack
209auto_irqhandler_fixup = . + 2
210 jsr __m68k_handle_int | process the IRQ
211 addql #8,%sp | pop parameters off stack
212
213ret_from_interrupt:
214 subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
215 jeq ret_from_last_interrupt
2162: RESTORE_ALL
217
218 ALIGN
219ret_from_last_interrupt:
220 moveq #(~ALLOWINT>>8)&0xff,%d0
221 andb %sp@(PT_OFF_SR),%d0
222 jne 2b
223
224 /* check if we need to do software interrupts */
225 tstl irq_stat+CPUSTAT_SOFTIRQ_PENDING
226 jeq .Lret_from_exception
227 pea ret_from_exception
228 jra do_softirq
229
230/* Handler for user defined interrupt vectors */
231
232ENTRY(user_inthandler)
233 SAVE_ALL_INT
234 GET_CURRENT(%d0)
235 addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
236 | put exception # in d0
237 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
238user_irqvec_fixup = . + 2
239 subw #VEC_USER,%d0
240
241 movel %sp,%sp@-
242 movel %d0,%sp@- | put vector # on stack
243user_irqhandler_fixup = . + 2
244 jsr __m68k_handle_int | process the IRQ
245 addql #8,%sp | pop parameters off stack
246
247 subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
248 jeq ret_from_last_interrupt
249 RESTORE_ALL
250
251/* Handler for uninitialized and spurious interrupts */
252
253ENTRY(bad_inthandler)
254 SAVE_ALL_INT
255 GET_CURRENT(%d0)
256 addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
257
258 movel %sp,%sp@-
259 jsr handle_badint
260 addql #4,%sp
261
262 subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
263 jeq ret_from_last_interrupt
264 RESTORE_ALL
265
266
267ENTRY(sys_fork)
268 SAVE_SWITCH_STACK
269 pea %sp@(SWITCH_STACK_SIZE)
270 jbsr m68k_fork
271 addql #4,%sp
272 RESTORE_SWITCH_STACK
273 rts
274
275ENTRY(sys_clone)
276 SAVE_SWITCH_STACK
277 pea %sp@(SWITCH_STACK_SIZE)
278 jbsr m68k_clone
279 addql #4,%sp
280 RESTORE_SWITCH_STACK
281 rts
282
283ENTRY(sys_vfork)
284 SAVE_SWITCH_STACK
285 pea %sp@(SWITCH_STACK_SIZE)
286 jbsr m68k_vfork
287 addql #4,%sp
288 RESTORE_SWITCH_STACK
289 rts
290
291ENTRY(sys_sigreturn)
292 SAVE_SWITCH_STACK
293 jbsr do_sigreturn
294 RESTORE_SWITCH_STACK
295 rts
296
297ENTRY(sys_rt_sigreturn)
298 SAVE_SWITCH_STACK
299 jbsr do_rt_sigreturn
300 RESTORE_SWITCH_STACK
301 rts
302
303resume:
304 /*
305 * Beware - when entering resume, prev (the current task) is
306 * in a0, next (the new task) is in a1,so don't change these
307 * registers until their contents are no longer needed.
308 */
309
310 /* save sr */
311 movew %sr,%a0@(TASK_THREAD+THREAD_SR)
312
313 /* save fs (sfc,%dfc) (may be pointing to kernel memory) */
314 movec %sfc,%d0
315 movew %d0,%a0@(TASK_THREAD+THREAD_FS)
316
317 /* save usp */
318 /* it is better to use a movel here instead of a movew 8*) */
319 movec %usp,%d0
320 movel %d0,%a0@(TASK_THREAD+THREAD_USP)
321
322 /* save non-scratch registers on stack */
323 SAVE_SWITCH_STACK
324
325 /* save current kernel stack pointer */
326 movel %sp,%a0@(TASK_THREAD+THREAD_KSP)
327
328 /* save floating point context */
329#ifndef CONFIG_M68KFPU_EMU_ONLY
330#ifdef CONFIG_M68KFPU_EMU
331 tstl m68k_fputype
332 jeq 3f
333#endif
334 fsave %a0@(TASK_THREAD+THREAD_FPSTATE)
335
336#if defined(CONFIG_M68060)
337#if !defined(CPU_M68060_ONLY)
338 btst #3,m68k_cputype+3
339 beqs 1f
340#endif
341 /* The 060 FPU keeps status in bits 15-8 of the first longword */
342 tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2)
343 jeq 3f
344#if !defined(CPU_M68060_ONLY)
345 jra 2f
346#endif
347#endif /* CONFIG_M68060 */
348#if !defined(CPU_M68060_ONLY)
3491: tstb %a0@(TASK_THREAD+THREAD_FPSTATE)
350 jeq 3f
351#endif
3522: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
353 fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
3543:
355#endif /* CONFIG_M68KFPU_EMU_ONLY */
356 /* Return previous task in %d1 */
357 movel %curptr,%d1
358
359 /* switch to new task (a1 contains new task) */
360 movel %a1,%curptr
361
362 /* restore floating point context */
363#ifndef CONFIG_M68KFPU_EMU_ONLY
364#ifdef CONFIG_M68KFPU_EMU
365 tstl m68k_fputype
366 jeq 4f
367#endif
368#if defined(CONFIG_M68060)
369#if !defined(CPU_M68060_ONLY)
370 btst #3,m68k_cputype+3
371 beqs 1f
372#endif
373 /* The 060 FPU keeps status in bits 15-8 of the first longword */
374 tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2)
375 jeq 3f
376#if !defined(CPU_M68060_ONLY)
377 jra 2f
378#endif
379#endif /* CONFIG_M68060 */
380#if !defined(CPU_M68060_ONLY)
3811: tstb %a1@(TASK_THREAD+THREAD_FPSTATE)
382 jeq 3f
383#endif
3842: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
385 fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
3863: frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
3874:
388#endif /* CONFIG_M68KFPU_EMU_ONLY */
389
390 /* restore the kernel stack pointer */
391 movel %a1@(TASK_THREAD+THREAD_KSP),%sp
392
393 /* restore non-scratch registers */
394 RESTORE_SWITCH_STACK
395
396 /* restore user stack pointer */
397 movel %a1@(TASK_THREAD+THREAD_USP),%a0
398 movel %a0,%usp
399
400 /* restore fs (sfc,%dfc) */
401 movew %a1@(TASK_THREAD+THREAD_FS),%a0
402 movec %a0,%sfc
403 movec %a0,%dfc
404
405 /* restore status register */
406 movew %a1@(TASK_THREAD+THREAD_SR),%sr
407
408 rts
409
diff --git a/arch/m68k/kernel/entry_no.S b/arch/m68k/kernel/entry_no.S
new file mode 100644
index 00000000000..5f0f6b598b5
--- /dev/null
+++ b/arch/m68k/kernel/entry_no.S
@@ -0,0 +1,133 @@
1/*
2 * linux/arch/m68knommu/kernel/entry.S
3 *
4 * Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
5 * Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
6 * Kenneth Albanowski <kjahds@kjahds.com>,
7 * Copyright (C) 2000 Lineo Inc. (www.lineo.com)
8 *
9 * Based on:
10 *
11 * linux/arch/m68k/kernel/entry.S
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 *
15 * This file is subject to the terms and conditions of the GNU General Public
16 * License. See the file README.legal in the main directory of this archive
17 * for more details.
18 *
19 * Linux/m68k support by Hamish Macdonald
20 *
21 * 68060 fixes by Jesper Skov
22 * ColdFire support by Greg Ungerer (gerg@snapgear.com)
23 * 5307 fixes by David W. Miller
24 * linux 2.4 support David McCullough <davidm@snapgear.com>
25 */
26
27#include <linux/linkage.h>
28#include <asm/errno.h>
29#include <asm/setup.h>
30#include <asm/segment.h>
31#include <asm/asm-offsets.h>
32#include <asm/entry.h>
33#include <asm/unistd.h>
34
35.text
36
37.globl buserr
38.globl trap
39.globl ret_from_exception
40.globl ret_from_signal
41.globl sys_fork
42.globl sys_clone
43.globl sys_vfork
44
45ENTRY(buserr)
46 SAVE_ALL
47 moveq #-1,%d0
48 movel %d0,%sp@(PT_OFF_ORIG_D0)
49 movel %sp,%sp@- /* stack frame pointer argument */
50 jsr buserr_c
51 addql #4,%sp
52 jra ret_from_exception
53
54ENTRY(trap)
55 SAVE_ALL
56 moveq #-1,%d0
57 movel %d0,%sp@(PT_OFF_ORIG_D0)
58 movel %sp,%sp@- /* stack frame pointer argument */
59 jsr trap_c
60 addql #4,%sp
61 jra ret_from_exception
62
63#ifdef TRAP_DBG_INTERRUPT
64
65.globl dbginterrupt
66ENTRY(dbginterrupt)
67 SAVE_ALL
68 moveq #-1,%d0
69 movel %d0,%sp@(PT_OFF_ORIG_D0)
70 movel %sp,%sp@- /* stack frame pointer argument */
71 jsr dbginterrupt_c
72 addql #4,%sp
73 jra ret_from_exception
74#endif
75
76ENTRY(reschedule)
77 /* save top of frame */
78 pea %sp@
79 jbsr set_esp0
80 addql #4,%sp
81 pea ret_from_exception
82 jmp schedule
83
84ENTRY(ret_from_fork)
85 movel %d1,%sp@-
86 jsr schedule_tail
87 addql #4,%sp
88 jra ret_from_exception
89
90ENTRY(sys_fork)
91 SAVE_SWITCH_STACK
92 pea %sp@(SWITCH_STACK_SIZE)
93 jbsr m68k_fork
94 addql #4,%sp
95 RESTORE_SWITCH_STACK
96 rts
97
98ENTRY(sys_vfork)
99 SAVE_SWITCH_STACK
100 pea %sp@(SWITCH_STACK_SIZE)
101 jbsr m68k_vfork
102 addql #4,%sp
103 RESTORE_SWITCH_STACK
104 rts
105
106ENTRY(sys_clone)
107 SAVE_SWITCH_STACK
108 pea %sp@(SWITCH_STACK_SIZE)
109 jbsr m68k_clone
110 addql #4,%sp
111 RESTORE_SWITCH_STACK
112 rts
113
114ENTRY(sys_sigreturn)
115 SAVE_SWITCH_STACK
116 jbsr do_sigreturn
117 RESTORE_SWITCH_STACK
118 rts
119
120ENTRY(sys_rt_sigreturn)
121 SAVE_SWITCH_STACK
122 jbsr do_rt_sigreturn
123 RESTORE_SWITCH_STACK
124 rts
125
126ENTRY(ret_from_user_signal)
127 moveq #__NR_sigreturn,%d0
128 trap #0
129
130ENTRY(ret_from_user_rt_signal)
131 movel #__NR_rt_sigreturn,%d0
132 trap #0
133
diff --git a/arch/m68k/kernel/init_task.c b/arch/m68k/kernel/init_task.c
new file mode 100644
index 00000000000..cbf9dc3cc51
--- /dev/null
+++ b/arch/m68k/kernel/init_task.c
@@ -0,0 +1,36 @@
1/*
2 * linux/arch/m68knommu/kernel/init_task.c
3 */
4#include <linux/mm.h>
5#include <linux/module.h>
6#include <linux/sched.h>
7#include <linux/init.h>
8#include <linux/init_task.h>
9#include <linux/fs.h>
10#include <linux/mqueue.h>
11
12#include <asm/uaccess.h>
13#include <asm/pgtable.h>
14
15static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
16static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17/*
18 * Initial task structure.
19 *
20 * All other task structs will be allocated on slabs in fork.c
21 */
22__asm__(".align 4");
23struct task_struct init_task = INIT_TASK(init_task);
24
25EXPORT_SYMBOL(init_task);
26
27/*
28 * Initial thread structure.
29 *
30 * We need to make sure that this is 8192-byte aligned due to the
31 * way process stacks are handled. This is done by having a special
32 * "init_task" linker map entry..
33 */
34union thread_union init_thread_union __init_task_data =
35 { INIT_THREAD_INFO(init_task) };
36
diff --git a/arch/m68k/kernel/process_mm.c b/arch/m68k/kernel/process_mm.c
new file mode 100644
index 00000000000..1bc223aa07e
--- /dev/null
+++ b/arch/m68k/kernel/process_mm.c
@@ -0,0 +1,354 @@
1/*
2 * linux/arch/m68k/kernel/process.c
3 *
4 * Copyright (C) 1995 Hamish Macdonald
5 *
6 * 68060 fixes by Jesper Skov
7 */
8
9/*
10 * This file handles the architecture-dependent parts of process handling..
11 */
12
13#include <linux/errno.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/mm.h>
18#include <linux/slab.h>
19#include <linux/fs.h>
20#include <linux/smp.h>
21#include <linux/stddef.h>
22#include <linux/unistd.h>
23#include <linux/ptrace.h>
24#include <linux/user.h>
25#include <linux/reboot.h>
26#include <linux/init_task.h>
27#include <linux/mqueue.h>
28
29#include <asm/uaccess.h>
30#include <asm/system.h>
31#include <asm/traps.h>
32#include <asm/machdep.h>
33#include <asm/setup.h>
34#include <asm/pgtable.h>
35
36/*
37 * Initial task/thread structure. Make this a per-architecture thing,
38 * because different architectures tend to have different
39 * alignment requirements and potentially different initial
40 * setup.
41 */
42static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
43static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
44union thread_union init_thread_union __init_task_data
45 __attribute__((aligned(THREAD_SIZE))) =
46 { INIT_THREAD_INFO(init_task) };
47
48/* initial task structure */
49struct task_struct init_task = INIT_TASK(init_task);
50
51EXPORT_SYMBOL(init_task);
52
53asmlinkage void ret_from_fork(void);
54
55
56/*
57 * Return saved PC from a blocked thread
58 */
59unsigned long thread_saved_pc(struct task_struct *tsk)
60{
61 struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp;
62 /* Check whether the thread is blocked in resume() */
63 if (in_sched_functions(sw->retpc))
64 return ((unsigned long *)sw->a6)[1];
65 else
66 return sw->retpc;
67}
68
69/*
70 * The idle loop on an m68k..
71 */
72static void default_idle(void)
73{
74 if (!need_resched())
75#if defined(MACH_ATARI_ONLY)
76 /* block out HSYNC on the atari (falcon) */
77 __asm__("stop #0x2200" : : : "cc");
78#else
79 __asm__("stop #0x2000" : : : "cc");
80#endif
81}
82
83void (*idle)(void) = default_idle;
84
85/*
86 * The idle thread. There's no useful work to be
87 * done, so just try to conserve power and have a
88 * low exit latency (ie sit in a loop waiting for
89 * somebody to say that they'd like to reschedule)
90 */
91void cpu_idle(void)
92{
93 /* endless idle loop with no priority at all */
94 while (1) {
95 while (!need_resched())
96 idle();
97 preempt_enable_no_resched();
98 schedule();
99 preempt_disable();
100 }
101}
102
103void machine_restart(char * __unused)
104{
105 if (mach_reset)
106 mach_reset();
107 for (;;);
108}
109
110void machine_halt(void)
111{
112 if (mach_halt)
113 mach_halt();
114 for (;;);
115}
116
117void machine_power_off(void)
118{
119 if (mach_power_off)
120 mach_power_off();
121 for (;;);
122}
123
124void (*pm_power_off)(void) = machine_power_off;
125EXPORT_SYMBOL(pm_power_off);
126
127void show_regs(struct pt_regs * regs)
128{
129 printk("\n");
130 printk("Format %02x Vector: %04x PC: %08lx Status: %04x %s\n",
131 regs->format, regs->vector, regs->pc, regs->sr, print_tainted());
132 printk("ORIG_D0: %08lx D0: %08lx A2: %08lx A1: %08lx\n",
133 regs->orig_d0, regs->d0, regs->a2, regs->a1);
134 printk("A0: %08lx D5: %08lx D4: %08lx\n",
135 regs->a0, regs->d5, regs->d4);
136 printk("D3: %08lx D2: %08lx D1: %08lx\n",
137 regs->d3, regs->d2, regs->d1);
138 if (!(regs->sr & PS_S))
139 printk("USP: %08lx\n", rdusp());
140}
141
142/*
143 * Create a kernel thread
144 */
145int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
146{
147 int pid;
148 mm_segment_t fs;
149
150 fs = get_fs();
151 set_fs (KERNEL_DS);
152
153 {
154 register long retval __asm__ ("d0");
155 register long clone_arg __asm__ ("d1") = flags | CLONE_VM | CLONE_UNTRACED;
156
157 retval = __NR_clone;
158 __asm__ __volatile__
159 ("clrl %%d2\n\t"
160 "trap #0\n\t" /* Linux/m68k system call */
161 "tstl %0\n\t" /* child or parent */
162 "jne 1f\n\t" /* parent - jump */
163 "lea %%sp@(%c7),%6\n\t" /* reload current */
164 "movel %6@,%6\n\t"
165 "movel %3,%%sp@-\n\t" /* push argument */
166 "jsr %4@\n\t" /* call fn */
167 "movel %0,%%d1\n\t" /* pass exit value */
168 "movel %2,%%d0\n\t" /* exit */
169 "trap #0\n"
170 "1:"
171 : "+d" (retval)
172 : "i" (__NR_clone), "i" (__NR_exit),
173 "r" (arg), "a" (fn), "d" (clone_arg), "r" (current),
174 "i" (-THREAD_SIZE)
175 : "d2");
176
177 pid = retval;
178 }
179
180 set_fs (fs);
181 return pid;
182}
183EXPORT_SYMBOL(kernel_thread);
184
185void flush_thread(void)
186{
187 unsigned long zero = 0;
188
189 current->thread.fs = __USER_DS;
190 if (!FPU_IS_EMU)
191 asm volatile (".chip 68k/68881\n\t"
192 "frestore %0@\n\t"
193 ".chip 68k" : : "a" (&zero));
194}
195
196/*
197 * "m68k_fork()".. By the time we get here, the
198 * non-volatile registers have also been saved on the
199 * stack. We do some ugly pointer stuff here.. (see
200 * also copy_thread)
201 */
202
203asmlinkage int m68k_fork(struct pt_regs *regs)
204{
205 return do_fork(SIGCHLD, rdusp(), regs, 0, NULL, NULL);
206}
207
208asmlinkage int m68k_vfork(struct pt_regs *regs)
209{
210 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0,
211 NULL, NULL);
212}
213
214asmlinkage int m68k_clone(struct pt_regs *regs)
215{
216 unsigned long clone_flags;
217 unsigned long newsp;
218 int __user *parent_tidptr, *child_tidptr;
219
220 /* syscall2 puts clone_flags in d1 and usp in d2 */
221 clone_flags = regs->d1;
222 newsp = regs->d2;
223 parent_tidptr = (int __user *)regs->d3;
224 child_tidptr = (int __user *)regs->d4;
225 if (!newsp)
226 newsp = rdusp();
227 return do_fork(clone_flags, newsp, regs, 0,
228 parent_tidptr, child_tidptr);
229}
230
231int copy_thread(unsigned long clone_flags, unsigned long usp,
232 unsigned long unused,
233 struct task_struct * p, struct pt_regs * regs)
234{
235 struct pt_regs * childregs;
236 struct switch_stack * childstack, *stack;
237 unsigned long *retp;
238
239 childregs = (struct pt_regs *) (task_stack_page(p) + THREAD_SIZE) - 1;
240
241 *childregs = *regs;
242 childregs->d0 = 0;
243
244 retp = ((unsigned long *) regs);
245 stack = ((struct switch_stack *) retp) - 1;
246
247 childstack = ((struct switch_stack *) childregs) - 1;
248 *childstack = *stack;
249 childstack->retpc = (unsigned long)ret_from_fork;
250
251 p->thread.usp = usp;
252 p->thread.ksp = (unsigned long)childstack;
253
254 if (clone_flags & CLONE_SETTLS)
255 task_thread_info(p)->tp_value = regs->d5;
256
257 /*
258 * Must save the current SFC/DFC value, NOT the value when
259 * the parent was last descheduled - RGH 10-08-96
260 */
261 p->thread.fs = get_fs().seg;
262
263 if (!FPU_IS_EMU) {
264 /* Copy the current fpu state */
265 asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0]) : "memory");
266
267 if (!CPU_IS_060 ? p->thread.fpstate[0] : p->thread.fpstate[2])
268 asm volatile ("fmovemx %/fp0-%/fp7,%0\n\t"
269 "fmoveml %/fpiar/%/fpcr/%/fpsr,%1"
270 : : "m" (p->thread.fp[0]), "m" (p->thread.fpcntl[0])
271 : "memory");
272 /* Restore the state in case the fpu was busy */
273 asm volatile ("frestore %0" : : "m" (p->thread.fpstate[0]));
274 }
275
276 return 0;
277}
278
279/* Fill in the fpu structure for a core dump. */
280
281int dump_fpu (struct pt_regs *regs, struct user_m68kfp_struct *fpu)
282{
283 char fpustate[216];
284
285 if (FPU_IS_EMU) {
286 int i;
287
288 memcpy(fpu->fpcntl, current->thread.fpcntl, 12);
289 memcpy(fpu->fpregs, current->thread.fp, 96);
290 /* Convert internal fpu reg representation
291 * into long double format
292 */
293 for (i = 0; i < 24; i += 3)
294 fpu->fpregs[i] = ((fpu->fpregs[i] & 0xffff0000) << 15) |
295 ((fpu->fpregs[i] & 0x0000ffff) << 16);
296 return 1;
297 }
298
299 /* First dump the fpu context to avoid protocol violation. */
300 asm volatile ("fsave %0" :: "m" (fpustate[0]) : "memory");
301 if (!CPU_IS_060 ? !fpustate[0] : !fpustate[2])
302 return 0;
303
304 asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0"
305 :: "m" (fpu->fpcntl[0])
306 : "memory");
307 asm volatile ("fmovemx %/fp0-%/fp7,%0"
308 :: "m" (fpu->fpregs[0])
309 : "memory");
310 return 1;
311}
312EXPORT_SYMBOL(dump_fpu);
313
314/*
315 * sys_execve() executes a new program.
316 */
317asmlinkage int sys_execve(const char __user *name,
318 const char __user *const __user *argv,
319 const char __user *const __user *envp)
320{
321 int error;
322 char * filename;
323 struct pt_regs *regs = (struct pt_regs *) &name;
324
325 filename = getname(name);
326 error = PTR_ERR(filename);
327 if (IS_ERR(filename))
328 return error;
329 error = do_execve(filename, argv, envp, regs);
330 putname(filename);
331 return error;
332}
333
334unsigned long get_wchan(struct task_struct *p)
335{
336 unsigned long fp, pc;
337 unsigned long stack_page;
338 int count = 0;
339 if (!p || p == current || p->state == TASK_RUNNING)
340 return 0;
341
342 stack_page = (unsigned long)task_stack_page(p);
343 fp = ((struct switch_stack *)p->thread.ksp)->a6;
344 do {
345 if (fp < stack_page+sizeof(struct thread_info) ||
346 fp >= 8184+stack_page)
347 return 0;
348 pc = ((unsigned long *)fp)[1];
349 if (!in_sched_functions(pc))
350 return pc;
351 fp = *(unsigned long *) fp;
352 } while (count++ < 16);
353 return 0;
354}
diff --git a/arch/m68k/kernel/process_no.c b/arch/m68k/kernel/process_no.c
new file mode 100644
index 00000000000..69c1803fcf1
--- /dev/null
+++ b/arch/m68k/kernel/process_no.c
@@ -0,0 +1,406 @@
1/*
2 * linux/arch/m68knommu/kernel/process.c
3 *
4 * Copyright (C) 1995 Hamish Macdonald
5 *
6 * 68060 fixes by Jesper Skov
7 *
8 * uClinux changes
9 * Copyright (C) 2000-2002, David McCullough <davidm@snapgear.com>
10 */
11
12/*
13 * This file handles the architecture-dependent parts of process handling..
14 */
15
16#include <linux/module.h>
17#include <linux/errno.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/mm.h>
21#include <linux/smp.h>
22#include <linux/stddef.h>
23#include <linux/unistd.h>
24#include <linux/ptrace.h>
25#include <linux/user.h>
26#include <linux/interrupt.h>
27#include <linux/reboot.h>
28#include <linux/fs.h>
29#include <linux/slab.h>
30
31#include <asm/uaccess.h>
32#include <asm/system.h>
33#include <asm/traps.h>
34#include <asm/machdep.h>
35#include <asm/setup.h>
36#include <asm/pgtable.h>
37
38asmlinkage void ret_from_fork(void);
39
40/*
41 * The following aren't currently used.
42 */
43void (*pm_idle)(void);
44EXPORT_SYMBOL(pm_idle);
45
46void (*pm_power_off)(void);
47EXPORT_SYMBOL(pm_power_off);
48
49/*
50 * The idle loop on an m68knommu..
51 */
52static void default_idle(void)
53{
54 local_irq_disable();
55 while (!need_resched()) {
56 /* This stop will re-enable interrupts */
57 __asm__("stop #0x2000" : : : "cc");
58 local_irq_disable();
59 }
60 local_irq_enable();
61}
62
63void (*idle)(void) = default_idle;
64
65/*
66 * The idle thread. There's no useful work to be
67 * done, so just try to conserve power and have a
68 * low exit latency (ie sit in a loop waiting for
69 * somebody to say that they'd like to reschedule)
70 */
71void cpu_idle(void)
72{
73 /* endless idle loop with no priority at all */
74 while (1) {
75 idle();
76 preempt_enable_no_resched();
77 schedule();
78 preempt_disable();
79 }
80}
81
82void machine_restart(char * __unused)
83{
84 if (mach_reset)
85 mach_reset();
86 for (;;);
87}
88
89void machine_halt(void)
90{
91 if (mach_halt)
92 mach_halt();
93 for (;;);
94}
95
96void machine_power_off(void)
97{
98 if (mach_power_off)
99 mach_power_off();
100 for (;;);
101}
102
103void show_regs(struct pt_regs * regs)
104{
105 printk(KERN_NOTICE "\n");
106 printk(KERN_NOTICE "Format %02x Vector: %04x PC: %08lx Status: %04x %s\n",
107 regs->format, regs->vector, regs->pc, regs->sr, print_tainted());
108 printk(KERN_NOTICE "ORIG_D0: %08lx D0: %08lx A2: %08lx A1: %08lx\n",
109 regs->orig_d0, regs->d0, regs->a2, regs->a1);
110 printk(KERN_NOTICE "A0: %08lx D5: %08lx D4: %08lx\n",
111 regs->a0, regs->d5, regs->d4);
112 printk(KERN_NOTICE "D3: %08lx D2: %08lx D1: %08lx\n",
113 regs->d3, regs->d2, regs->d1);
114 if (!(regs->sr & PS_S))
115 printk(KERN_NOTICE "USP: %08lx\n", rdusp());
116}
117
118/*
119 * Create a kernel thread
120 */
121int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
122{
123 int retval;
124 long clone_arg = flags | CLONE_VM;
125 mm_segment_t fs;
126
127 fs = get_fs();
128 set_fs(KERNEL_DS);
129
130 __asm__ __volatile__ (
131 "movel %%sp, %%d2\n\t"
132 "movel %5, %%d1\n\t"
133 "movel %1, %%d0\n\t"
134 "trap #0\n\t"
135 "cmpl %%sp, %%d2\n\t"
136 "jeq 1f\n\t"
137 "movel %3, %%sp@-\n\t"
138 "jsr %4@\n\t"
139 "movel %2, %%d0\n\t"
140 "trap #0\n"
141 "1:\n\t"
142 "movel %%d0, %0\n"
143 : "=d" (retval)
144 : "i" (__NR_clone),
145 "i" (__NR_exit),
146 "a" (arg),
147 "a" (fn),
148 "a" (clone_arg)
149 : "cc", "%d0", "%d1", "%d2");
150
151 set_fs(fs);
152 return retval;
153}
154EXPORT_SYMBOL(kernel_thread);
155
156void flush_thread(void)
157{
158#ifdef CONFIG_FPU
159 unsigned long zero = 0;
160#endif
161
162 current->thread.fs = __USER_DS;
163#ifdef CONFIG_FPU
164 if (!FPU_IS_EMU)
165 asm volatile (".chip 68k/68881\n\t"
166 "frestore %0@\n\t"
167 ".chip 68k" : : "a" (&zero));
168#endif
169}
170
171/*
172 * "m68k_fork()".. By the time we get here, the
173 * non-volatile registers have also been saved on the
174 * stack. We do some ugly pointer stuff here.. (see
175 * also copy_thread)
176 */
177
178asmlinkage int m68k_fork(struct pt_regs *regs)
179{
180 /* fork almost works, enough to trick you into looking elsewhere :-( */
181 return(-EINVAL);
182}
183
184asmlinkage int m68k_vfork(struct pt_regs *regs)
185{
186 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0, NULL, NULL);
187}
188
189asmlinkage int m68k_clone(struct pt_regs *regs)
190{
191 unsigned long clone_flags;
192 unsigned long newsp;
193
194 /* syscall2 puts clone_flags in d1 and usp in d2 */
195 clone_flags = regs->d1;
196 newsp = regs->d2;
197 if (!newsp)
198 newsp = rdusp();
199 return do_fork(clone_flags, newsp, regs, 0, NULL, NULL);
200}
201
202int copy_thread(unsigned long clone_flags,
203 unsigned long usp, unsigned long topstk,
204 struct task_struct * p, struct pt_regs * regs)
205{
206 struct pt_regs * childregs;
207 struct switch_stack * childstack, *stack;
208 unsigned long *retp;
209
210 childregs = (struct pt_regs *) (task_stack_page(p) + THREAD_SIZE) - 1;
211
212 *childregs = *regs;
213 childregs->d0 = 0;
214
215 retp = ((unsigned long *) regs);
216 stack = ((struct switch_stack *) retp) - 1;
217
218 childstack = ((struct switch_stack *) childregs) - 1;
219 *childstack = *stack;
220 childstack->retpc = (unsigned long)ret_from_fork;
221
222 p->thread.usp = usp;
223 p->thread.ksp = (unsigned long)childstack;
224
225 if (clone_flags & CLONE_SETTLS)
226 task_thread_info(p)->tp_value = regs->d5;
227
228 /*
229 * Must save the current SFC/DFC value, NOT the value when
230 * the parent was last descheduled - RGH 10-08-96
231 */
232 p->thread.fs = get_fs().seg;
233
234#ifdef CONFIG_FPU
235 if (!FPU_IS_EMU) {
236 /* Copy the current fpu state */
237 asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0]) : "memory");
238
239 if (p->thread.fpstate[0])
240 asm volatile ("fmovemx %/fp0-%/fp7,%0\n\t"
241 "fmoveml %/fpiar/%/fpcr/%/fpsr,%1"
242 : : "m" (p->thread.fp[0]), "m" (p->thread.fpcntl[0])
243 : "memory");
244 /* Restore the state in case the fpu was busy */
245 asm volatile ("frestore %0" : : "m" (p->thread.fpstate[0]));
246 }
247#endif
248
249 return 0;
250}
251
252/* Fill in the fpu structure for a core dump. */
253
254int dump_fpu(struct pt_regs *regs, struct user_m68kfp_struct *fpu)
255{
256#ifdef CONFIG_FPU
257 char fpustate[216];
258
259 if (FPU_IS_EMU) {
260 int i;
261
262 memcpy(fpu->fpcntl, current->thread.fpcntl, 12);
263 memcpy(fpu->fpregs, current->thread.fp, 96);
264 /* Convert internal fpu reg representation
265 * into long double format
266 */
267 for (i = 0; i < 24; i += 3)
268 fpu->fpregs[i] = ((fpu->fpregs[i] & 0xffff0000) << 15) |
269 ((fpu->fpregs[i] & 0x0000ffff) << 16);
270 return 1;
271 }
272
273 /* First dump the fpu context to avoid protocol violation. */
274 asm volatile ("fsave %0" :: "m" (fpustate[0]) : "memory");
275 if (!fpustate[0])
276 return 0;
277
278 asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0"
279 :: "m" (fpu->fpcntl[0])
280 : "memory");
281 asm volatile ("fmovemx %/fp0-%/fp7,%0"
282 :: "m" (fpu->fpregs[0])
283 : "memory");
284#endif
285 return 1;
286}
287EXPORT_SYMBOL(dump_fpu);
288
289/*
290 * Generic dumping code. Used for panic and debug.
291 */
292void dump(struct pt_regs *fp)
293{
294 unsigned long *sp;
295 unsigned char *tp;
296 int i;
297
298 printk(KERN_EMERG "\nCURRENT PROCESS:\n\n");
299 printk(KERN_EMERG "COMM=%s PID=%d\n", current->comm, current->pid);
300
301 if (current->mm) {
302 printk(KERN_EMERG "TEXT=%08x-%08x DATA=%08x-%08x BSS=%08x-%08x\n",
303 (int) current->mm->start_code,
304 (int) current->mm->end_code,
305 (int) current->mm->start_data,
306 (int) current->mm->end_data,
307 (int) current->mm->end_data,
308 (int) current->mm->brk);
309 printk(KERN_EMERG "USER-STACK=%08x KERNEL-STACK=%08x\n\n",
310 (int) current->mm->start_stack,
311 (int)(((unsigned long) current) + THREAD_SIZE));
312 }
313
314 printk(KERN_EMERG "PC: %08lx\n", fp->pc);
315 printk(KERN_EMERG "SR: %08lx SP: %08lx\n", (long) fp->sr, (long) fp);
316 printk(KERN_EMERG "d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n",
317 fp->d0, fp->d1, fp->d2, fp->d3);
318 printk(KERN_EMERG "d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n",
319 fp->d4, fp->d5, fp->a0, fp->a1);
320 printk(KERN_EMERG "\nUSP: %08x TRAPFRAME: %p\n",
321 (unsigned int) rdusp(), fp);
322
323 printk(KERN_EMERG "\nCODE:");
324 tp = ((unsigned char *) fp->pc) - 0x20;
325 for (sp = (unsigned long *) tp, i = 0; (i < 0x40); i += 4) {
326 if ((i % 0x10) == 0)
327 printk(KERN_EMERG "%p: ", tp + i);
328 printk("%08x ", (int) *sp++);
329 }
330 printk(KERN_EMERG "\n");
331
332 printk(KERN_EMERG "KERNEL STACK:");
333 tp = ((unsigned char *) fp) - 0x40;
334 for (sp = (unsigned long *) tp, i = 0; (i < 0xc0); i += 4) {
335 if ((i % 0x10) == 0)
336 printk(KERN_EMERG "%p: ", tp + i);
337 printk("%08x ", (int) *sp++);
338 }
339 printk(KERN_EMERG "\n");
340
341 printk(KERN_EMERG "USER STACK:");
342 tp = (unsigned char *) (rdusp() - 0x10);
343 for (sp = (unsigned long *) tp, i = 0; (i < 0x80); i += 4) {
344 if ((i % 0x10) == 0)
345 printk(KERN_EMERG "%p: ", tp + i);
346 printk("%08x ", (int) *sp++);
347 }
348 printk(KERN_EMERG "\n");
349}
350
351/*
352 * sys_execve() executes a new program.
353 */
354asmlinkage int sys_execve(const char *name,
355 const char *const *argv,
356 const char *const *envp)
357{
358 int error;
359 char * filename;
360 struct pt_regs *regs = (struct pt_regs *) &name;
361
362 filename = getname(name);
363 error = PTR_ERR(filename);
364 if (IS_ERR(filename))
365 return error;
366 error = do_execve(filename, argv, envp, regs);
367 putname(filename);
368 return error;
369}
370
371unsigned long get_wchan(struct task_struct *p)
372{
373 unsigned long fp, pc;
374 unsigned long stack_page;
375 int count = 0;
376 if (!p || p == current || p->state == TASK_RUNNING)
377 return 0;
378
379 stack_page = (unsigned long)p;
380 fp = ((struct switch_stack *)p->thread.ksp)->a6;
381 do {
382 if (fp < stack_page+sizeof(struct thread_info) ||
383 fp >= THREAD_SIZE-8+stack_page)
384 return 0;
385 pc = ((unsigned long *)fp)[1];
386 if (!in_sched_functions(pc))
387 return pc;
388 fp = *(unsigned long *) fp;
389 } while (count++ < 16);
390 return 0;
391}
392
393/*
394 * Return saved PC of a blocked thread.
395 */
396unsigned long thread_saved_pc(struct task_struct *tsk)
397{
398 struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp;
399
400 /* Check whether the thread is blocked in resume() */
401 if (in_sched_functions(sw->retpc))
402 return ((unsigned long *)sw->a6)[1];
403 else
404 return sw->retpc;
405}
406
diff --git a/arch/m68k/kernel/ptrace_mm.c b/arch/m68k/kernel/ptrace_mm.c
new file mode 100644
index 00000000000..0b252683cef
--- /dev/null
+++ b/arch/m68k/kernel/ptrace_mm.c
@@ -0,0 +1,277 @@
1/*
2 * linux/arch/m68k/kernel/ptrace.c
3 *
4 * Copyright (C) 1994 by Hamish Macdonald
5 * Taken from linux/kernel/ptrace.c and modified for M680x0.
6 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
7 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of
10 * this archive for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/mm.h>
16#include <linux/smp.h>
17#include <linux/errno.h>
18#include <linux/ptrace.h>
19#include <linux/user.h>
20#include <linux/signal.h>
21
22#include <asm/uaccess.h>
23#include <asm/page.h>
24#include <asm/pgtable.h>
25#include <asm/system.h>
26#include <asm/processor.h>
27
28/*
29 * does not yet catch signals sent when the child dies.
30 * in exit.c or in signal.c.
31 */
32
33/* determines which bits in the SR the user has access to. */
34/* 1 = access 0 = no access */
35#define SR_MASK 0x001f
36
37/* sets the trace bits. */
38#define TRACE_BITS 0xC000
39#define T1_BIT 0x8000
40#define T0_BIT 0x4000
41
42/* Find the stack offset for a register, relative to thread.esp0. */
43#define PT_REG(reg) ((long)&((struct pt_regs *)0)->reg)
44#define SW_REG(reg) ((long)&((struct switch_stack *)0)->reg \
45 - sizeof(struct switch_stack))
46/* Mapping from PT_xxx to the stack offset at which the register is
47 saved. Notice that usp has no stack-slot and needs to be treated
48 specially (see get_reg/put_reg below). */
49static const int regoff[] = {
50 [0] = PT_REG(d1),
51 [1] = PT_REG(d2),
52 [2] = PT_REG(d3),
53 [3] = PT_REG(d4),
54 [4] = PT_REG(d5),
55 [5] = SW_REG(d6),
56 [6] = SW_REG(d7),
57 [7] = PT_REG(a0),
58 [8] = PT_REG(a1),
59 [9] = PT_REG(a2),
60 [10] = SW_REG(a3),
61 [11] = SW_REG(a4),
62 [12] = SW_REG(a5),
63 [13] = SW_REG(a6),
64 [14] = PT_REG(d0),
65 [15] = -1,
66 [16] = PT_REG(orig_d0),
67 [17] = PT_REG(sr),
68 [18] = PT_REG(pc),
69};
70
71/*
72 * Get contents of register REGNO in task TASK.
73 */
74static inline long get_reg(struct task_struct *task, int regno)
75{
76 unsigned long *addr;
77
78 if (regno == PT_USP)
79 addr = &task->thread.usp;
80 else if (regno < ARRAY_SIZE(regoff))
81 addr = (unsigned long *)(task->thread.esp0 + regoff[regno]);
82 else
83 return 0;
84 /* Need to take stkadj into account. */
85 if (regno == PT_SR || regno == PT_PC) {
86 long stkadj = *(long *)(task->thread.esp0 + PT_REG(stkadj));
87 addr = (unsigned long *) ((unsigned long)addr + stkadj);
88 /* The sr is actually a 16 bit register. */
89 if (regno == PT_SR)
90 return *(unsigned short *)addr;
91 }
92 return *addr;
93}
94
95/*
96 * Write contents of register REGNO in task TASK.
97 */
98static inline int put_reg(struct task_struct *task, int regno,
99 unsigned long data)
100{
101 unsigned long *addr;
102
103 if (regno == PT_USP)
104 addr = &task->thread.usp;
105 else if (regno < ARRAY_SIZE(regoff))
106 addr = (unsigned long *)(task->thread.esp0 + regoff[regno]);
107 else
108 return -1;
109 /* Need to take stkadj into account. */
110 if (regno == PT_SR || regno == PT_PC) {
111 long stkadj = *(long *)(task->thread.esp0 + PT_REG(stkadj));
112 addr = (unsigned long *) ((unsigned long)addr + stkadj);
113 /* The sr is actually a 16 bit register. */
114 if (regno == PT_SR) {
115 *(unsigned short *)addr = data;
116 return 0;
117 }
118 }
119 *addr = data;
120 return 0;
121}
122
123/*
124 * Make sure the single step bit is not set.
125 */
126static inline void singlestep_disable(struct task_struct *child)
127{
128 unsigned long tmp = get_reg(child, PT_SR) & ~TRACE_BITS;
129 put_reg(child, PT_SR, tmp);
130 clear_tsk_thread_flag(child, TIF_DELAYED_TRACE);
131}
132
133/*
134 * Called by kernel/ptrace.c when detaching..
135 */
136void ptrace_disable(struct task_struct *child)
137{
138 singlestep_disable(child);
139}
140
141void user_enable_single_step(struct task_struct *child)
142{
143 unsigned long tmp = get_reg(child, PT_SR) & ~TRACE_BITS;
144 put_reg(child, PT_SR, tmp | T1_BIT);
145 set_tsk_thread_flag(child, TIF_DELAYED_TRACE);
146}
147
148void user_enable_block_step(struct task_struct *child)
149{
150 unsigned long tmp = get_reg(child, PT_SR) & ~TRACE_BITS;
151 put_reg(child, PT_SR, tmp | T0_BIT);
152}
153
154void user_disable_single_step(struct task_struct *child)
155{
156 singlestep_disable(child);
157}
158
159long arch_ptrace(struct task_struct *child, long request,
160 unsigned long addr, unsigned long data)
161{
162 unsigned long tmp;
163 int i, ret = 0;
164 int regno = addr >> 2; /* temporary hack. */
165 unsigned long __user *datap = (unsigned long __user *) data;
166
167 switch (request) {
168 /* read the word at location addr in the USER area. */
169 case PTRACE_PEEKUSR:
170 if (addr & 3)
171 goto out_eio;
172
173 if (regno >= 0 && regno < 19) {
174 tmp = get_reg(child, regno);
175 } else if (regno >= 21 && regno < 49) {
176 tmp = child->thread.fp[regno - 21];
177 /* Convert internal fpu reg representation
178 * into long double format
179 */
180 if (FPU_IS_EMU && (regno < 45) && !(regno % 3))
181 tmp = ((tmp & 0xffff0000) << 15) |
182 ((tmp & 0x0000ffff) << 16);
183 } else
184 goto out_eio;
185 ret = put_user(tmp, datap);
186 break;
187
188 case PTRACE_POKEUSR:
189 /* write the word at location addr in the USER area */
190 if (addr & 3)
191 goto out_eio;
192
193 if (regno == PT_SR) {
194 data &= SR_MASK;
195 data |= get_reg(child, PT_SR) & ~SR_MASK;
196 }
197 if (regno >= 0 && regno < 19) {
198 if (put_reg(child, regno, data))
199 goto out_eio;
200 } else if (regno >= 21 && regno < 48) {
201 /* Convert long double format
202 * into internal fpu reg representation
203 */
204 if (FPU_IS_EMU && (regno < 45) && !(regno % 3)) {
205 data <<= 15;
206 data = (data & 0xffff0000) |
207 ((data & 0x0000ffff) >> 1);
208 }
209 child->thread.fp[regno - 21] = data;
210 } else
211 goto out_eio;
212 break;
213
214 case PTRACE_GETREGS: /* Get all gp regs from the child. */
215 for (i = 0; i < 19; i++) {
216 tmp = get_reg(child, i);
217 ret = put_user(tmp, datap);
218 if (ret)
219 break;
220 datap++;
221 }
222 break;
223
224 case PTRACE_SETREGS: /* Set all gp regs in the child. */
225 for (i = 0; i < 19; i++) {
226 ret = get_user(tmp, datap);
227 if (ret)
228 break;
229 if (i == PT_SR) {
230 tmp &= SR_MASK;
231 tmp |= get_reg(child, PT_SR) & ~SR_MASK;
232 }
233 put_reg(child, i, tmp);
234 datap++;
235 }
236 break;
237
238 case PTRACE_GETFPREGS: /* Get the child FPU state. */
239 if (copy_to_user(datap, &child->thread.fp,
240 sizeof(struct user_m68kfp_struct)))
241 ret = -EFAULT;
242 break;
243
244 case PTRACE_SETFPREGS: /* Set the child FPU state. */
245 if (copy_from_user(&child->thread.fp, datap,
246 sizeof(struct user_m68kfp_struct)))
247 ret = -EFAULT;
248 break;
249
250 case PTRACE_GET_THREAD_AREA:
251 ret = put_user(task_thread_info(child)->tp_value, datap);
252 break;
253
254 default:
255 ret = ptrace_request(child, request, addr, data);
256 break;
257 }
258
259 return ret;
260out_eio:
261 return -EIO;
262}
263
264asmlinkage void syscall_trace(void)
265{
266 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
267 ? 0x80 : 0));
268 /*
269 * this isn't the same as continuing with a signal, but it will do
270 * for normal use. strace only continues with a signal if the
271 * stopping signal is not SIGTRAP. -brl
272 */
273 if (current->exit_code) {
274 send_sig(current->exit_code, current, 1);
275 current->exit_code = 0;
276 }
277}
diff --git a/arch/m68k/kernel/ptrace_no.c b/arch/m68k/kernel/ptrace_no.c
new file mode 100644
index 00000000000..6709fb70733
--- /dev/null
+++ b/arch/m68k/kernel/ptrace_no.c
@@ -0,0 +1,255 @@
1/*
2 * linux/arch/m68knommu/kernel/ptrace.c
3 *
4 * Copyright (C) 1994 by Hamish Macdonald
5 * Taken from linux/kernel/ptrace.c and modified for M680x0.
6 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
7 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of
10 * this archive for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/mm.h>
16#include <linux/smp.h>
17#include <linux/errno.h>
18#include <linux/ptrace.h>
19#include <linux/user.h>
20#include <linux/signal.h>
21#include <linux/tracehook.h>
22
23#include <asm/uaccess.h>
24#include <asm/page.h>
25#include <asm/pgtable.h>
26#include <asm/system.h>
27#include <asm/processor.h>
28
29/*
30 * does not yet catch signals sent when the child dies.
31 * in exit.c or in signal.c.
32 */
33
34/* determines which bits in the SR the user has access to. */
35/* 1 = access 0 = no access */
36#define SR_MASK 0x001f
37
38/* sets the trace bits. */
39#define TRACE_BITS 0x8000
40
41/* Find the stack offset for a register, relative to thread.esp0. */
42#define PT_REG(reg) ((long)&((struct pt_regs *)0)->reg)
43#define SW_REG(reg) ((long)&((struct switch_stack *)0)->reg \
44 - sizeof(struct switch_stack))
45/* Mapping from PT_xxx to the stack offset at which the register is
46 saved. Notice that usp has no stack-slot and needs to be treated
47 specially (see get_reg/put_reg below). */
48static int regoff[] = {
49 PT_REG(d1), PT_REG(d2), PT_REG(d3), PT_REG(d4),
50 PT_REG(d5), SW_REG(d6), SW_REG(d7), PT_REG(a0),
51 PT_REG(a1), PT_REG(a2), SW_REG(a3), SW_REG(a4),
52 SW_REG(a5), SW_REG(a6), PT_REG(d0), -1,
53 PT_REG(orig_d0), PT_REG(sr), PT_REG(pc),
54};
55
56/*
57 * Get contents of register REGNO in task TASK.
58 */
59static inline long get_reg(struct task_struct *task, int regno)
60{
61 unsigned long *addr;
62
63 if (regno == PT_USP)
64 addr = &task->thread.usp;
65 else if (regno < ARRAY_SIZE(regoff))
66 addr = (unsigned long *)(task->thread.esp0 + regoff[regno]);
67 else
68 return 0;
69 return *addr;
70}
71
72/*
73 * Write contents of register REGNO in task TASK.
74 */
75static inline int put_reg(struct task_struct *task, int regno,
76 unsigned long data)
77{
78 unsigned long *addr;
79
80 if (regno == PT_USP)
81 addr = &task->thread.usp;
82 else if (regno < ARRAY_SIZE(regoff))
83 addr = (unsigned long *) (task->thread.esp0 + regoff[regno]);
84 else
85 return -1;
86 *addr = data;
87 return 0;
88}
89
90void user_enable_single_step(struct task_struct *task)
91{
92 unsigned long srflags;
93 srflags = get_reg(task, PT_SR) | (TRACE_BITS << 16);
94 put_reg(task, PT_SR, srflags);
95}
96
97void user_disable_single_step(struct task_struct *task)
98{
99 unsigned long srflags;
100 srflags = get_reg(task, PT_SR) & ~(TRACE_BITS << 16);
101 put_reg(task, PT_SR, srflags);
102}
103
104/*
105 * Called by kernel/ptrace.c when detaching..
106 *
107 * Make sure the single step bit is not set.
108 */
109void ptrace_disable(struct task_struct *child)
110{
111 /* make sure the single step bit is not set. */
112 user_disable_single_step(child);
113}
114
115long arch_ptrace(struct task_struct *child, long request,
116 unsigned long addr, unsigned long data)
117{
118 int ret;
119 int regno = addr >> 2;
120 unsigned long __user *datap = (unsigned long __user *) data;
121
122 switch (request) {
123 /* read the word at location addr in the USER area. */
124 case PTRACE_PEEKUSR: {
125 unsigned long tmp;
126
127 ret = -EIO;
128 if ((addr & 3) || addr > sizeof(struct user) - 3)
129 break;
130
131 tmp = 0; /* Default return condition */
132 ret = -EIO;
133 if (regno < 19) {
134 tmp = get_reg(child, regno);
135 if (regno == PT_SR)
136 tmp >>= 16;
137 } else if (regno >= 21 && regno < 49) {
138 tmp = child->thread.fp[regno - 21];
139 } else if (regno == 49) {
140 tmp = child->mm->start_code;
141 } else if (regno == 50) {
142 tmp = child->mm->start_data;
143 } else if (regno == 51) {
144 tmp = child->mm->end_code;
145 } else
146 break;
147 ret = put_user(tmp, datap);
148 break;
149 }
150
151 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
152 ret = -EIO;
153 if ((addr & 3) || addr > sizeof(struct user) - 3)
154 break;
155
156 if (regno == PT_SR) {
157 data &= SR_MASK;
158 data <<= 16;
159 data |= get_reg(child, PT_SR) & ~(SR_MASK << 16);
160 }
161 if (regno < 19) {
162 if (put_reg(child, regno, data))
163 break;
164 ret = 0;
165 break;
166 }
167 if (regno >= 21 && regno < 48)
168 {
169 child->thread.fp[regno - 21] = data;
170 ret = 0;
171 }
172 break;
173
174 case PTRACE_GETREGS: { /* Get all gp regs from the child. */
175 int i;
176 unsigned long tmp;
177 for (i = 0; i < 19; i++) {
178 tmp = get_reg(child, i);
179 if (i == PT_SR)
180 tmp >>= 16;
181 if (put_user(tmp, datap)) {
182 ret = -EFAULT;
183 break;
184 }
185 datap++;
186 }
187 ret = 0;
188 break;
189 }
190
191 case PTRACE_SETREGS: { /* Set all gp regs in the child. */
192 int i;
193 unsigned long tmp;
194 for (i = 0; i < 19; i++) {
195 if (get_user(tmp, datap)) {
196 ret = -EFAULT;
197 break;
198 }
199 if (i == PT_SR) {
200 tmp &= SR_MASK;
201 tmp <<= 16;
202 tmp |= get_reg(child, PT_SR) & ~(SR_MASK << 16);
203 }
204 put_reg(child, i, tmp);
205 datap++;
206 }
207 ret = 0;
208 break;
209 }
210
211#ifdef PTRACE_GETFPREGS
212 case PTRACE_GETFPREGS: { /* Get the child FPU state. */
213 ret = 0;
214 if (copy_to_user(datap, &child->thread.fp,
215 sizeof(struct user_m68kfp_struct)))
216 ret = -EFAULT;
217 break;
218 }
219#endif
220
221#ifdef PTRACE_SETFPREGS
222 case PTRACE_SETFPREGS: { /* Set the child FPU state. */
223 ret = 0;
224 if (copy_from_user(&child->thread.fp, datap,
225 sizeof(struct user_m68kfp_struct)))
226 ret = -EFAULT;
227 break;
228 }
229#endif
230
231 case PTRACE_GET_THREAD_AREA:
232 ret = put_user(task_thread_info(child)->tp_value, datap);
233 break;
234
235 default:
236 ret = ptrace_request(child, request, addr, data);
237 break;
238 }
239 return ret;
240}
241
242asmlinkage int syscall_trace_enter(void)
243{
244 int ret = 0;
245
246 if (test_thread_flag(TIF_SYSCALL_TRACE))
247 ret = tracehook_report_syscall_entry(task_pt_regs(current));
248 return ret;
249}
250
251asmlinkage void syscall_trace_leave(void)
252{
253 if (test_thread_flag(TIF_SYSCALL_TRACE))
254 tracehook_report_syscall_exit(task_pt_regs(current), 0);
255}
diff --git a/arch/m68k/kernel/signal_mm.c b/arch/m68k/kernel/signal_mm.c
new file mode 100644
index 00000000000..a0afc239304
--- /dev/null
+++ b/arch/m68k/kernel/signal_mm.c
@@ -0,0 +1,1017 @@
1/*
2 * linux/arch/m68k/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
8 * for more details.
9 */
10
11/*
12 * Linux/m68k support by Hamish Macdonald
13 *
14 * 68060 fixes by Jesper Skov
15 *
16 * 1997-12-01 Modified for POSIX.1b signals by Andreas Schwab
17 *
18 * mathemu support by Roman Zippel
19 * (Note: fpstate in the signal context is completely ignored for the emulator
20 * and the internal floating point format is put on stack)
21 */
22
23/*
24 * ++roman (07/09/96): implemented signal stacks (specially for tosemu on
25 * Atari :-) Current limitation: Only one sigstack can be active at one time.
26 * If a second signal with SA_ONSTACK set arrives while working on a sigstack,
27 * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested
28 * signal handlers!
29 */
30
31#include <linux/sched.h>
32#include <linux/mm.h>
33#include <linux/kernel.h>
34#include <linux/signal.h>
35#include <linux/syscalls.h>
36#include <linux/errno.h>
37#include <linux/wait.h>
38#include <linux/ptrace.h>
39#include <linux/unistd.h>
40#include <linux/stddef.h>
41#include <linux/highuid.h>
42#include <linux/personality.h>
43#include <linux/tty.h>
44#include <linux/binfmts.h>
45#include <linux/module.h>
46
47#include <asm/setup.h>
48#include <asm/uaccess.h>
49#include <asm/pgtable.h>
50#include <asm/traps.h>
51#include <asm/ucontext.h>
52
53#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
54
55static const int frame_extra_sizes[16] = {
56 [1] = -1, /* sizeof(((struct frame *)0)->un.fmt1), */
57 [2] = sizeof(((struct frame *)0)->un.fmt2),
58 [3] = sizeof(((struct frame *)0)->un.fmt3),
59 [4] = sizeof(((struct frame *)0)->un.fmt4),
60 [5] = -1, /* sizeof(((struct frame *)0)->un.fmt5), */
61 [6] = -1, /* sizeof(((struct frame *)0)->un.fmt6), */
62 [7] = sizeof(((struct frame *)0)->un.fmt7),
63 [8] = -1, /* sizeof(((struct frame *)0)->un.fmt8), */
64 [9] = sizeof(((struct frame *)0)->un.fmt9),
65 [10] = sizeof(((struct frame *)0)->un.fmta),
66 [11] = sizeof(((struct frame *)0)->un.fmtb),
67 [12] = -1, /* sizeof(((struct frame *)0)->un.fmtc), */
68 [13] = -1, /* sizeof(((struct frame *)0)->un.fmtd), */
69 [14] = -1, /* sizeof(((struct frame *)0)->un.fmte), */
70 [15] = -1, /* sizeof(((struct frame *)0)->un.fmtf), */
71};
72
73int handle_kernel_fault(struct pt_regs *regs)
74{
75 const struct exception_table_entry *fixup;
76 struct pt_regs *tregs;
77
78 /* Are we prepared to handle this kernel fault? */
79 fixup = search_exception_tables(regs->pc);
80 if (!fixup)
81 return 0;
82
83 /* Create a new four word stack frame, discarding the old one. */
84 regs->stkadj = frame_extra_sizes[regs->format];
85 tregs = (struct pt_regs *)((long)regs + regs->stkadj);
86 tregs->vector = regs->vector;
87 tregs->format = 0;
88 tregs->pc = fixup->fixup;
89 tregs->sr = regs->sr;
90
91 return 1;
92}
93
94/*
95 * Atomically swap in the new signal mask, and wait for a signal.
96 */
97asmlinkage int
98sys_sigsuspend(int unused0, int unused1, old_sigset_t mask)
99{
100 mask &= _BLOCKABLE;
101 spin_lock_irq(&current->sighand->siglock);
102 current->saved_sigmask = current->blocked;
103 siginitset(&current->blocked, mask);
104 recalc_sigpending();
105 spin_unlock_irq(&current->sighand->siglock);
106
107 current->state = TASK_INTERRUPTIBLE;
108 schedule();
109 set_restore_sigmask();
110
111 return -ERESTARTNOHAND;
112}
113
114asmlinkage int
115sys_sigaction(int sig, const struct old_sigaction __user *act,
116 struct old_sigaction __user *oact)
117{
118 struct k_sigaction new_ka, old_ka;
119 int ret;
120
121 if (act) {
122 old_sigset_t mask;
123 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
124 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
125 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
126 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
127 __get_user(mask, &act->sa_mask))
128 return -EFAULT;
129 siginitset(&new_ka.sa.sa_mask, mask);
130 }
131
132 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
133
134 if (!ret && oact) {
135 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
136 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
137 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
138 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
139 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
140 return -EFAULT;
141 }
142
143 return ret;
144}
145
146asmlinkage int
147sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
148{
149 return do_sigaltstack(uss, uoss, rdusp());
150}
151
152
153/*
154 * Do a signal return; undo the signal stack.
155 *
156 * Keep the return code on the stack quadword aligned!
157 * That makes the cache flush below easier.
158 */
159
160struct sigframe
161{
162 char __user *pretcode;
163 int sig;
164 int code;
165 struct sigcontext __user *psc;
166 char retcode[8];
167 unsigned long extramask[_NSIG_WORDS-1];
168 struct sigcontext sc;
169};
170
171struct rt_sigframe
172{
173 char __user *pretcode;
174 int sig;
175 struct siginfo __user *pinfo;
176 void __user *puc;
177 char retcode[8];
178 struct siginfo info;
179 struct ucontext uc;
180};
181
182
183static unsigned char fpu_version; /* version number of fpu, set by setup_frame */
184
185static inline int restore_fpu_state(struct sigcontext *sc)
186{
187 int err = 1;
188
189 if (FPU_IS_EMU) {
190 /* restore registers */
191 memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12);
192 memcpy(current->thread.fp, sc->sc_fpregs, 24);
193 return 0;
194 }
195
196 if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
197 /* Verify the frame format. */
198 if (!CPU_IS_060 && (sc->sc_fpstate[0] != fpu_version))
199 goto out;
200 if (CPU_IS_020_OR_030) {
201 if (m68k_fputype & FPU_68881 &&
202 !(sc->sc_fpstate[1] == 0x18 || sc->sc_fpstate[1] == 0xb4))
203 goto out;
204 if (m68k_fputype & FPU_68882 &&
205 !(sc->sc_fpstate[1] == 0x38 || sc->sc_fpstate[1] == 0xd4))
206 goto out;
207 } else if (CPU_IS_040) {
208 if (!(sc->sc_fpstate[1] == 0x00 ||
209 sc->sc_fpstate[1] == 0x28 ||
210 sc->sc_fpstate[1] == 0x60))
211 goto out;
212 } else if (CPU_IS_060) {
213 if (!(sc->sc_fpstate[3] == 0x00 ||
214 sc->sc_fpstate[3] == 0x60 ||
215 sc->sc_fpstate[3] == 0xe0))
216 goto out;
217 } else
218 goto out;
219
220 __asm__ volatile (".chip 68k/68881\n\t"
221 "fmovemx %0,%%fp0-%%fp1\n\t"
222 "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
223 ".chip 68k"
224 : /* no outputs */
225 : "m" (*sc->sc_fpregs), "m" (*sc->sc_fpcntl));
226 }
227 __asm__ volatile (".chip 68k/68881\n\t"
228 "frestore %0\n\t"
229 ".chip 68k" : : "m" (*sc->sc_fpstate));
230 err = 0;
231
232out:
233 return err;
234}
235
236#define FPCONTEXT_SIZE 216
237#define uc_fpstate uc_filler[0]
238#define uc_formatvec uc_filler[FPCONTEXT_SIZE/4]
239#define uc_extra uc_filler[FPCONTEXT_SIZE/4+1]
240
241static inline int rt_restore_fpu_state(struct ucontext __user *uc)
242{
243 unsigned char fpstate[FPCONTEXT_SIZE];
244 int context_size = CPU_IS_060 ? 8 : 0;
245 fpregset_t fpregs;
246 int err = 1;
247
248 if (FPU_IS_EMU) {
249 /* restore fpu control register */
250 if (__copy_from_user(current->thread.fpcntl,
251 uc->uc_mcontext.fpregs.f_fpcntl, 12))
252 goto out;
253 /* restore all other fpu register */
254 if (__copy_from_user(current->thread.fp,
255 uc->uc_mcontext.fpregs.f_fpregs, 96))
256 goto out;
257 return 0;
258 }
259
260 if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
261 goto out;
262 if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
263 if (!CPU_IS_060)
264 context_size = fpstate[1];
265 /* Verify the frame format. */
266 if (!CPU_IS_060 && (fpstate[0] != fpu_version))
267 goto out;
268 if (CPU_IS_020_OR_030) {
269 if (m68k_fputype & FPU_68881 &&
270 !(context_size == 0x18 || context_size == 0xb4))
271 goto out;
272 if (m68k_fputype & FPU_68882 &&
273 !(context_size == 0x38 || context_size == 0xd4))
274 goto out;
275 } else if (CPU_IS_040) {
276 if (!(context_size == 0x00 ||
277 context_size == 0x28 ||
278 context_size == 0x60))
279 goto out;
280 } else if (CPU_IS_060) {
281 if (!(fpstate[3] == 0x00 ||
282 fpstate[3] == 0x60 ||
283 fpstate[3] == 0xe0))
284 goto out;
285 } else
286 goto out;
287 if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
288 sizeof(fpregs)))
289 goto out;
290 __asm__ volatile (".chip 68k/68881\n\t"
291 "fmovemx %0,%%fp0-%%fp7\n\t"
292 "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
293 ".chip 68k"
294 : /* no outputs */
295 : "m" (*fpregs.f_fpregs),
296 "m" (*fpregs.f_fpcntl));
297 }
298 if (context_size &&
299 __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
300 context_size))
301 goto out;
302 __asm__ volatile (".chip 68k/68881\n\t"
303 "frestore %0\n\t"
304 ".chip 68k" : : "m" (*fpstate));
305 err = 0;
306
307out:
308 return err;
309}
310
311static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,
312 void __user *fp)
313{
314 int fsize = frame_extra_sizes[formatvec >> 12];
315 if (fsize < 0) {
316 /*
317 * user process trying to return with weird frame format
318 */
319#ifdef DEBUG
320 printk("user process returning with weird frame format\n");
321#endif
322 return 1;
323 }
324 if (!fsize) {
325 regs->format = formatvec >> 12;
326 regs->vector = formatvec & 0xfff;
327 } else {
328 struct switch_stack *sw = (struct switch_stack *)regs - 1;
329 unsigned long buf[fsize / 2]; /* yes, twice as much */
330
331 /* that'll make sure that expansion won't crap over data */
332 if (copy_from_user(buf + fsize / 4, fp, fsize))
333 return 1;
334
335 /* point of no return */
336 regs->format = formatvec >> 12;
337 regs->vector = formatvec & 0xfff;
338#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
339 __asm__ __volatile__
340 (" movel %0,%/a0\n\t"
341 " subl %1,%/a0\n\t" /* make room on stack */
342 " movel %/a0,%/sp\n\t" /* set stack pointer */
343 /* move switch_stack and pt_regs */
344 "1: movel %0@+,%/a0@+\n\t"
345 " dbra %2,1b\n\t"
346 " lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */
347 " lsrl #2,%1\n\t"
348 " subql #1,%1\n\t"
349 /* copy to the gap we'd made */
350 "2: movel %4@+,%/a0@+\n\t"
351 " dbra %1,2b\n\t"
352 " bral ret_from_signal\n"
353 : /* no outputs, it doesn't ever return */
354 : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
355 "n" (frame_offset), "a" (buf + fsize/4)
356 : "a0");
357#undef frame_offset
358 }
359 return 0;
360}
361
362static inline int
363restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __user *fp)
364{
365 int formatvec;
366 struct sigcontext context;
367 int err;
368
369 /* Always make any pending restarted system calls return -EINTR */
370 current_thread_info()->restart_block.fn = do_no_restart_syscall;
371
372 /* get previous context */
373 if (copy_from_user(&context, usc, sizeof(context)))
374 goto badframe;
375
376 /* restore passed registers */
377 regs->d0 = context.sc_d0;
378 regs->d1 = context.sc_d1;
379 regs->a0 = context.sc_a0;
380 regs->a1 = context.sc_a1;
381 regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff);
382 regs->pc = context.sc_pc;
383 regs->orig_d0 = -1; /* disable syscall checks */
384 wrusp(context.sc_usp);
385 formatvec = context.sc_formatvec;
386
387 err = restore_fpu_state(&context);
388
389 if (err || mangle_kernel_stack(regs, formatvec, fp))
390 goto badframe;
391
392 return 0;
393
394badframe:
395 return 1;
396}
397
398static inline int
399rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
400 struct ucontext __user *uc)
401{
402 int temp;
403 greg_t __user *gregs = uc->uc_mcontext.gregs;
404 unsigned long usp;
405 int err;
406
407 /* Always make any pending restarted system calls return -EINTR */
408 current_thread_info()->restart_block.fn = do_no_restart_syscall;
409
410 err = __get_user(temp, &uc->uc_mcontext.version);
411 if (temp != MCONTEXT_VERSION)
412 goto badframe;
413 /* restore passed registers */
414 err |= __get_user(regs->d0, &gregs[0]);
415 err |= __get_user(regs->d1, &gregs[1]);
416 err |= __get_user(regs->d2, &gregs[2]);
417 err |= __get_user(regs->d3, &gregs[3]);
418 err |= __get_user(regs->d4, &gregs[4]);
419 err |= __get_user(regs->d5, &gregs[5]);
420 err |= __get_user(sw->d6, &gregs[6]);
421 err |= __get_user(sw->d7, &gregs[7]);
422 err |= __get_user(regs->a0, &gregs[8]);
423 err |= __get_user(regs->a1, &gregs[9]);
424 err |= __get_user(regs->a2, &gregs[10]);
425 err |= __get_user(sw->a3, &gregs[11]);
426 err |= __get_user(sw->a4, &gregs[12]);
427 err |= __get_user(sw->a5, &gregs[13]);
428 err |= __get_user(sw->a6, &gregs[14]);
429 err |= __get_user(usp, &gregs[15]);
430 wrusp(usp);
431 err |= __get_user(regs->pc, &gregs[16]);
432 err |= __get_user(temp, &gregs[17]);
433 regs->sr = (regs->sr & 0xff00) | (temp & 0xff);
434 regs->orig_d0 = -1; /* disable syscall checks */
435 err |= __get_user(temp, &uc->uc_formatvec);
436
437 err |= rt_restore_fpu_state(uc);
438
439 if (err || do_sigaltstack(&uc->uc_stack, NULL, usp) == -EFAULT)
440 goto badframe;
441
442 if (mangle_kernel_stack(regs, temp, &uc->uc_extra))
443 goto badframe;
444
445 return 0;
446
447badframe:
448 return 1;
449}
450
451asmlinkage int do_sigreturn(unsigned long __unused)
452{
453 struct switch_stack *sw = (struct switch_stack *) &__unused;
454 struct pt_regs *regs = (struct pt_regs *) (sw + 1);
455 unsigned long usp = rdusp();
456 struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
457 sigset_t set;
458
459 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
460 goto badframe;
461 if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
462 (_NSIG_WORDS > 1 &&
463 __copy_from_user(&set.sig[1], &frame->extramask,
464 sizeof(frame->extramask))))
465 goto badframe;
466
467 sigdelsetmask(&set, ~_BLOCKABLE);
468 current->blocked = set;
469 recalc_sigpending();
470
471 if (restore_sigcontext(regs, &frame->sc, frame + 1))
472 goto badframe;
473 return regs->d0;
474
475badframe:
476 force_sig(SIGSEGV, current);
477 return 0;
478}
479
480asmlinkage int do_rt_sigreturn(unsigned long __unused)
481{
482 struct switch_stack *sw = (struct switch_stack *) &__unused;
483 struct pt_regs *regs = (struct pt_regs *) (sw + 1);
484 unsigned long usp = rdusp();
485 struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);
486 sigset_t set;
487
488 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
489 goto badframe;
490 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
491 goto badframe;
492
493 sigdelsetmask(&set, ~_BLOCKABLE);
494 current->blocked = set;
495 recalc_sigpending();
496
497 if (rt_restore_ucontext(regs, sw, &frame->uc))
498 goto badframe;
499 return regs->d0;
500
501badframe:
502 force_sig(SIGSEGV, current);
503 return 0;
504}
505
506/*
507 * Set up a signal frame.
508 */
509
510static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
511{
512 if (FPU_IS_EMU) {
513 /* save registers */
514 memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12);
515 memcpy(sc->sc_fpregs, current->thread.fp, 24);
516 return;
517 }
518
519 __asm__ volatile (".chip 68k/68881\n\t"
520 "fsave %0\n\t"
521 ".chip 68k"
522 : : "m" (*sc->sc_fpstate) : "memory");
523
524 if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
525 fpu_version = sc->sc_fpstate[0];
526 if (CPU_IS_020_OR_030 &&
527 regs->vector >= (VEC_FPBRUC * 4) &&
528 regs->vector <= (VEC_FPNAN * 4)) {
529 /* Clear pending exception in 68882 idle frame */
530 if (*(unsigned short *) sc->sc_fpstate == 0x1f38)
531 sc->sc_fpstate[0x38] |= 1 << 3;
532 }
533 __asm__ volatile (".chip 68k/68881\n\t"
534 "fmovemx %%fp0-%%fp1,%0\n\t"
535 "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
536 ".chip 68k"
537 : "=m" (*sc->sc_fpregs),
538 "=m" (*sc->sc_fpcntl)
539 : /* no inputs */
540 : "memory");
541 }
542}
543
544static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
545{
546 unsigned char fpstate[FPCONTEXT_SIZE];
547 int context_size = CPU_IS_060 ? 8 : 0;
548 int err = 0;
549
550 if (FPU_IS_EMU) {
551 /* save fpu control register */
552 err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpcntl,
553 current->thread.fpcntl, 12);
554 /* save all other fpu register */
555 err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs,
556 current->thread.fp, 96);
557 return err;
558 }
559
560 __asm__ volatile (".chip 68k/68881\n\t"
561 "fsave %0\n\t"
562 ".chip 68k"
563 : : "m" (*fpstate) : "memory");
564
565 err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate);
566 if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
567 fpregset_t fpregs;
568 if (!CPU_IS_060)
569 context_size = fpstate[1];
570 fpu_version = fpstate[0];
571 if (CPU_IS_020_OR_030 &&
572 regs->vector >= (VEC_FPBRUC * 4) &&
573 regs->vector <= (VEC_FPNAN * 4)) {
574 /* Clear pending exception in 68882 idle frame */
575 if (*(unsigned short *) fpstate == 0x1f38)
576 fpstate[0x38] |= 1 << 3;
577 }
578 __asm__ volatile (".chip 68k/68881\n\t"
579 "fmovemx %%fp0-%%fp7,%0\n\t"
580 "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
581 ".chip 68k"
582 : "=m" (*fpregs.f_fpregs),
583 "=m" (*fpregs.f_fpcntl)
584 : /* no inputs */
585 : "memory");
586 err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs,
587 sizeof(fpregs));
588 }
589 if (context_size)
590 err |= copy_to_user((long __user *)&uc->uc_fpstate + 1, fpstate + 4,
591 context_size);
592 return err;
593}
594
595static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
596 unsigned long mask)
597{
598 sc->sc_mask = mask;
599 sc->sc_usp = rdusp();
600 sc->sc_d0 = regs->d0;
601 sc->sc_d1 = regs->d1;
602 sc->sc_a0 = regs->a0;
603 sc->sc_a1 = regs->a1;
604 sc->sc_sr = regs->sr;
605 sc->sc_pc = regs->pc;
606 sc->sc_formatvec = regs->format << 12 | regs->vector;
607 save_fpu_state(sc, regs);
608}
609
610static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
611{
612 struct switch_stack *sw = (struct switch_stack *)regs - 1;
613 greg_t __user *gregs = uc->uc_mcontext.gregs;
614 int err = 0;
615
616 err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
617 err |= __put_user(regs->d0, &gregs[0]);
618 err |= __put_user(regs->d1, &gregs[1]);
619 err |= __put_user(regs->d2, &gregs[2]);
620 err |= __put_user(regs->d3, &gregs[3]);
621 err |= __put_user(regs->d4, &gregs[4]);
622 err |= __put_user(regs->d5, &gregs[5]);
623 err |= __put_user(sw->d6, &gregs[6]);
624 err |= __put_user(sw->d7, &gregs[7]);
625 err |= __put_user(regs->a0, &gregs[8]);
626 err |= __put_user(regs->a1, &gregs[9]);
627 err |= __put_user(regs->a2, &gregs[10]);
628 err |= __put_user(sw->a3, &gregs[11]);
629 err |= __put_user(sw->a4, &gregs[12]);
630 err |= __put_user(sw->a5, &gregs[13]);
631 err |= __put_user(sw->a6, &gregs[14]);
632 err |= __put_user(rdusp(), &gregs[15]);
633 err |= __put_user(regs->pc, &gregs[16]);
634 err |= __put_user(regs->sr, &gregs[17]);
635 err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec);
636 err |= rt_save_fpu_state(uc, regs);
637 return err;
638}
639
640static inline void push_cache (unsigned long vaddr)
641{
642 /*
643 * Using the old cache_push_v() was really a big waste.
644 *
645 * What we are trying to do is to flush 8 bytes to ram.
646 * Flushing 2 cache lines of 16 bytes is much cheaper than
647 * flushing 1 or 2 pages, as previously done in
648 * cache_push_v().
649 * Jes
650 */
651 if (CPU_IS_040) {
652 unsigned long temp;
653
654 __asm__ __volatile__ (".chip 68040\n\t"
655 "nop\n\t"
656 "ptestr (%1)\n\t"
657 "movec %%mmusr,%0\n\t"
658 ".chip 68k"
659 : "=r" (temp)
660 : "a" (vaddr));
661
662 temp &= PAGE_MASK;
663 temp |= vaddr & ~PAGE_MASK;
664
665 __asm__ __volatile__ (".chip 68040\n\t"
666 "nop\n\t"
667 "cpushl %%bc,(%0)\n\t"
668 ".chip 68k"
669 : : "a" (temp));
670 }
671 else if (CPU_IS_060) {
672 unsigned long temp;
673 __asm__ __volatile__ (".chip 68060\n\t"
674 "plpar (%0)\n\t"
675 ".chip 68k"
676 : "=a" (temp)
677 : "0" (vaddr));
678 __asm__ __volatile__ (".chip 68060\n\t"
679 "cpushl %%bc,(%0)\n\t"
680 ".chip 68k"
681 : : "a" (temp));
682 }
683 else {
684 /*
685 * 68030/68020 have no writeback cache;
686 * still need to clear icache.
687 * Note that vaddr is guaranteed to be long word aligned.
688 */
689 unsigned long temp;
690 asm volatile ("movec %%cacr,%0" : "=r" (temp));
691 temp += 4;
692 asm volatile ("movec %0,%%caar\n\t"
693 "movec %1,%%cacr"
694 : : "r" (vaddr), "r" (temp));
695 asm volatile ("movec %0,%%caar\n\t"
696 "movec %1,%%cacr"
697 : : "r" (vaddr + 4), "r" (temp));
698 }
699}
700
701static inline void __user *
702get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
703{
704 unsigned long usp;
705
706 /* Default to using normal stack. */
707 usp = rdusp();
708
709 /* This is the X/Open sanctioned signal stack switching. */
710 if (ka->sa.sa_flags & SA_ONSTACK) {
711 if (!sas_ss_flags(usp))
712 usp = current->sas_ss_sp + current->sas_ss_size;
713 }
714 return (void __user *)((usp - frame_size) & -8UL);
715}
716
717static int setup_frame (int sig, struct k_sigaction *ka,
718 sigset_t *set, struct pt_regs *regs)
719{
720 struct sigframe __user *frame;
721 int fsize = frame_extra_sizes[regs->format];
722 struct sigcontext context;
723 int err = 0;
724
725 if (fsize < 0) {
726#ifdef DEBUG
727 printk ("setup_frame: Unknown frame format %#x\n",
728 regs->format);
729#endif
730 goto give_sigsegv;
731 }
732
733 frame = get_sigframe(ka, regs, sizeof(*frame) + fsize);
734
735 if (fsize)
736 err |= copy_to_user (frame + 1, regs + 1, fsize);
737
738 err |= __put_user((current_thread_info()->exec_domain
739 && current_thread_info()->exec_domain->signal_invmap
740 && sig < 32
741 ? current_thread_info()->exec_domain->signal_invmap[sig]
742 : sig),
743 &frame->sig);
744
745 err |= __put_user(regs->vector, &frame->code);
746 err |= __put_user(&frame->sc, &frame->psc);
747
748 if (_NSIG_WORDS > 1)
749 err |= copy_to_user(frame->extramask, &set->sig[1],
750 sizeof(frame->extramask));
751
752 setup_sigcontext(&context, regs, set->sig[0]);
753 err |= copy_to_user (&frame->sc, &context, sizeof(context));
754
755 /* Set up to return from userspace. */
756 err |= __put_user(frame->retcode, &frame->pretcode);
757 /* moveq #,d0; trap #0 */
758 err |= __put_user(0x70004e40 + (__NR_sigreturn << 16),
759 (long __user *)(frame->retcode));
760
761 if (err)
762 goto give_sigsegv;
763
764 push_cache ((unsigned long) &frame->retcode);
765
766 /*
767 * Set up registers for signal handler. All the state we are about
768 * to destroy is successfully copied to sigframe.
769 */
770 wrusp ((unsigned long) frame);
771 regs->pc = (unsigned long) ka->sa.sa_handler;
772
773 /*
774 * This is subtle; if we build more than one sigframe, all but the
775 * first one will see frame format 0 and have fsize == 0, so we won't
776 * screw stkadj.
777 */
778 if (fsize)
779 regs->stkadj = fsize;
780
781 /* Prepare to skip over the extra stuff in the exception frame. */
782 if (regs->stkadj) {
783 struct pt_regs *tregs =
784 (struct pt_regs *)((ulong)regs + regs->stkadj);
785#ifdef DEBUG
786 printk("Performing stackadjust=%04x\n", regs->stkadj);
787#endif
788 /* This must be copied with decreasing addresses to
789 handle overlaps. */
790 tregs->vector = 0;
791 tregs->format = 0;
792 tregs->pc = regs->pc;
793 tregs->sr = regs->sr;
794 }
795 return 0;
796
797give_sigsegv:
798 force_sigsegv(sig, current);
799 return err;
800}
801
802static int setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
803 sigset_t *set, struct pt_regs *regs)
804{
805 struct rt_sigframe __user *frame;
806 int fsize = frame_extra_sizes[regs->format];
807 int err = 0;
808
809 if (fsize < 0) {
810#ifdef DEBUG
811 printk ("setup_frame: Unknown frame format %#x\n",
812 regs->format);
813#endif
814 goto give_sigsegv;
815 }
816
817 frame = get_sigframe(ka, regs, sizeof(*frame));
818
819 if (fsize)
820 err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize);
821
822 err |= __put_user((current_thread_info()->exec_domain
823 && current_thread_info()->exec_domain->signal_invmap
824 && sig < 32
825 ? current_thread_info()->exec_domain->signal_invmap[sig]
826 : sig),
827 &frame->sig);
828 err |= __put_user(&frame->info, &frame->pinfo);
829 err |= __put_user(&frame->uc, &frame->puc);
830 err |= copy_siginfo_to_user(&frame->info, info);
831
832 /* Create the ucontext. */
833 err |= __put_user(0, &frame->uc.uc_flags);
834 err |= __put_user(NULL, &frame->uc.uc_link);
835 err |= __put_user((void __user *)current->sas_ss_sp,
836 &frame->uc.uc_stack.ss_sp);
837 err |= __put_user(sas_ss_flags(rdusp()),
838 &frame->uc.uc_stack.ss_flags);
839 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
840 err |= rt_setup_ucontext(&frame->uc, regs);
841 err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set));
842
843 /* Set up to return from userspace. */
844 err |= __put_user(frame->retcode, &frame->pretcode);
845#ifdef __mcoldfire__
846 /* movel #__NR_rt_sigreturn,d0; trap #0 */
847 err |= __put_user(0x203c0000, (long __user *)(frame->retcode + 0));
848 err |= __put_user(0x00004e40 + (__NR_rt_sigreturn << 16),
849 (long __user *)(frame->retcode + 4));
850#else
851 /* moveq #,d0; notb d0; trap #0 */
852 err |= __put_user(0x70004600 + ((__NR_rt_sigreturn ^ 0xff) << 16),
853 (long __user *)(frame->retcode + 0));
854 err |= __put_user(0x4e40, (short __user *)(frame->retcode + 4));
855#endif
856
857 if (err)
858 goto give_sigsegv;
859
860 push_cache ((unsigned long) &frame->retcode);
861
862 /*
863 * Set up registers for signal handler. All the state we are about
864 * to destroy is successfully copied to sigframe.
865 */
866 wrusp ((unsigned long) frame);
867 regs->pc = (unsigned long) ka->sa.sa_handler;
868
869 /*
870 * This is subtle; if we build more than one sigframe, all but the
871 * first one will see frame format 0 and have fsize == 0, so we won't
872 * screw stkadj.
873 */
874 if (fsize)
875 regs->stkadj = fsize;
876
877 /* Prepare to skip over the extra stuff in the exception frame. */
878 if (regs->stkadj) {
879 struct pt_regs *tregs =
880 (struct pt_regs *)((ulong)regs + regs->stkadj);
881#ifdef DEBUG
882 printk("Performing stackadjust=%04x\n", regs->stkadj);
883#endif
884 /* This must be copied with decreasing addresses to
885 handle overlaps. */
886 tregs->vector = 0;
887 tregs->format = 0;
888 tregs->pc = regs->pc;
889 tregs->sr = regs->sr;
890 }
891 return 0;
892
893give_sigsegv:
894 force_sigsegv(sig, current);
895 return err;
896}
897
898static inline void
899handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
900{
901 switch (regs->d0) {
902 case -ERESTARTNOHAND:
903 if (!has_handler)
904 goto do_restart;
905 regs->d0 = -EINTR;
906 break;
907
908 case -ERESTART_RESTARTBLOCK:
909 if (!has_handler) {
910 regs->d0 = __NR_restart_syscall;
911 regs->pc -= 2;
912 break;
913 }
914 regs->d0 = -EINTR;
915 break;
916
917 case -ERESTARTSYS:
918 if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
919 regs->d0 = -EINTR;
920 break;
921 }
922 /* fallthrough */
923 case -ERESTARTNOINTR:
924 do_restart:
925 regs->d0 = regs->orig_d0;
926 regs->pc -= 2;
927 break;
928 }
929}
930
931void ptrace_signal_deliver(struct pt_regs *regs, void *cookie)
932{
933 if (regs->orig_d0 < 0)
934 return;
935 switch (regs->d0) {
936 case -ERESTARTNOHAND:
937 case -ERESTARTSYS:
938 case -ERESTARTNOINTR:
939 regs->d0 = regs->orig_d0;
940 regs->orig_d0 = -1;
941 regs->pc -= 2;
942 break;
943 }
944}
945
946/*
947 * OK, we're invoking a handler
948 */
949static void
950handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
951 sigset_t *oldset, struct pt_regs *regs)
952{
953 int err;
954 /* are we from a system call? */
955 if (regs->orig_d0 >= 0)
956 /* If so, check system call restarting.. */
957 handle_restart(regs, ka, 1);
958
959 /* set up the stack frame */
960 if (ka->sa.sa_flags & SA_SIGINFO)
961 err = setup_rt_frame(sig, ka, info, oldset, regs);
962 else
963 err = setup_frame(sig, ka, oldset, regs);
964
965 if (err)
966 return;
967
968 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
969 if (!(ka->sa.sa_flags & SA_NODEFER))
970 sigaddset(&current->blocked,sig);
971 recalc_sigpending();
972
973 if (test_thread_flag(TIF_DELAYED_TRACE)) {
974 regs->sr &= ~0x8000;
975 send_sig(SIGTRAP, current, 1);
976 }
977
978 clear_thread_flag(TIF_RESTORE_SIGMASK);
979}
980
981/*
982 * Note that 'init' is a special process: it doesn't get signals it doesn't
983 * want to handle. Thus you cannot kill init even with a SIGKILL even by
984 * mistake.
985 */
986asmlinkage void do_signal(struct pt_regs *regs)
987{
988 siginfo_t info;
989 struct k_sigaction ka;
990 int signr;
991 sigset_t *oldset;
992
993 current->thread.esp0 = (unsigned long) regs;
994
995 if (test_thread_flag(TIF_RESTORE_SIGMASK))
996 oldset = &current->saved_sigmask;
997 else
998 oldset = &current->blocked;
999
1000 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
1001 if (signr > 0) {
1002 /* Whee! Actually deliver the signal. */
1003 handle_signal(signr, &ka, &info, oldset, regs);
1004 return;
1005 }
1006
1007 /* Did we come from a system call? */
1008 if (regs->orig_d0 >= 0)
1009 /* Restart the system call - no handlers present */
1010 handle_restart(regs, NULL, 0);
1011
1012 /* If there's no signal to deliver, we just restore the saved mask. */
1013 if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
1014 clear_thread_flag(TIF_RESTORE_SIGMASK);
1015 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
1016 }
1017}
diff --git a/arch/m68k/kernel/signal_no.c b/arch/m68k/kernel/signal_no.c
new file mode 100644
index 00000000000..36a81bb6835
--- /dev/null
+++ b/arch/m68k/kernel/signal_no.c
@@ -0,0 +1,765 @@
1/*
2 * linux/arch/m68knommu/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
8 * for more details.
9 */
10
11/*
12 * Linux/m68k support by Hamish Macdonald
13 *
14 * 68060 fixes by Jesper Skov
15 *
16 * 1997-12-01 Modified for POSIX.1b signals by Andreas Schwab
17 *
18 * mathemu support by Roman Zippel
19 * (Note: fpstate in the signal context is completely ignored for the emulator
20 * and the internal floating point format is put on stack)
21 */
22
23/*
24 * ++roman (07/09/96): implemented signal stacks (specially for tosemu on
25 * Atari :-) Current limitation: Only one sigstack can be active at one time.
26 * If a second signal with SA_ONSTACK set arrives while working on a sigstack,
27 * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested
28 * signal handlers!
29 */
30
31#include <linux/sched.h>
32#include <linux/mm.h>
33#include <linux/kernel.h>
34#include <linux/signal.h>
35#include <linux/syscalls.h>
36#include <linux/errno.h>
37#include <linux/wait.h>
38#include <linux/ptrace.h>
39#include <linux/unistd.h>
40#include <linux/stddef.h>
41#include <linux/highuid.h>
42#include <linux/tty.h>
43#include <linux/personality.h>
44#include <linux/binfmts.h>
45
46#include <asm/setup.h>
47#include <asm/uaccess.h>
48#include <asm/pgtable.h>
49#include <asm/traps.h>
50#include <asm/ucontext.h>
51
52#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
53
54void ret_from_user_signal(void);
55void ret_from_user_rt_signal(void);
56
57/*
58 * Atomically swap in the new signal mask, and wait for a signal.
59 */
60asmlinkage int
61sys_sigsuspend(int unused0, int unused1, old_sigset_t mask)
62{
63 mask &= _BLOCKABLE;
64 spin_lock_irq(&current->sighand->siglock);
65 current->saved_sigmask = current->blocked;
66 siginitset(&current->blocked, mask);
67 recalc_sigpending();
68 spin_unlock_irq(&current->sighand->siglock);
69
70 current->state = TASK_INTERRUPTIBLE;
71 schedule();
72 set_restore_sigmask();
73
74 return -ERESTARTNOHAND;
75}
76
77asmlinkage int
78sys_sigaction(int sig, const struct old_sigaction __user *act,
79 struct old_sigaction __user *oact)
80{
81 struct k_sigaction new_ka, old_ka;
82 int ret;
83
84 if (act) {
85 old_sigset_t mask;
86 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
87 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
88 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
89 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
90 __get_user(mask, &act->sa_mask))
91 return -EFAULT;
92 siginitset(&new_ka.sa.sa_mask, mask);
93 }
94
95 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
96
97 if (!ret && oact) {
98 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
99 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
100 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
101 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
102 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
103 return -EFAULT;
104 }
105
106 return ret;
107}
108
109asmlinkage int
110sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
111{
112 return do_sigaltstack(uss, uoss, rdusp());
113}
114
115
116/*
117 * Do a signal return; undo the signal stack.
118 *
119 * Keep the return code on the stack quadword aligned!
120 * That makes the cache flush below easier.
121 */
122
123struct sigframe
124{
125 char __user *pretcode;
126 int sig;
127 int code;
128 struct sigcontext __user *psc;
129 char retcode[8];
130 unsigned long extramask[_NSIG_WORDS-1];
131 struct sigcontext sc;
132};
133
134struct rt_sigframe
135{
136 char __user *pretcode;
137 int sig;
138 struct siginfo __user *pinfo;
139 void __user *puc;
140 char retcode[8];
141 struct siginfo info;
142 struct ucontext uc;
143};
144
145#ifdef CONFIG_FPU
146
147static unsigned char fpu_version = 0; /* version number of fpu, set by setup_frame */
148
149static inline int restore_fpu_state(struct sigcontext *sc)
150{
151 int err = 1;
152
153 if (FPU_IS_EMU) {
154 /* restore registers */
155 memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12);
156 memcpy(current->thread.fp, sc->sc_fpregs, 24);
157 return 0;
158 }
159
160 if (sc->sc_fpstate[0]) {
161 /* Verify the frame format. */
162 if (sc->sc_fpstate[0] != fpu_version)
163 goto out;
164
165 __asm__ volatile (".chip 68k/68881\n\t"
166 "fmovemx %0,%%fp0-%%fp1\n\t"
167 "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
168 ".chip 68k"
169 : /* no outputs */
170 : "m" (*sc->sc_fpregs), "m" (*sc->sc_fpcntl));
171 }
172 __asm__ volatile (".chip 68k/68881\n\t"
173 "frestore %0\n\t"
174 ".chip 68k" : : "m" (*sc->sc_fpstate));
175 err = 0;
176
177out:
178 return err;
179}
180
181#define FPCONTEXT_SIZE 216
182#define uc_fpstate uc_filler[0]
183#define uc_formatvec uc_filler[FPCONTEXT_SIZE/4]
184#define uc_extra uc_filler[FPCONTEXT_SIZE/4+1]
185
186static inline int rt_restore_fpu_state(struct ucontext __user *uc)
187{
188 unsigned char fpstate[FPCONTEXT_SIZE];
189 int context_size = 0;
190 fpregset_t fpregs;
191 int err = 1;
192
193 if (FPU_IS_EMU) {
194 /* restore fpu control register */
195 if (__copy_from_user(current->thread.fpcntl,
196 uc->uc_mcontext.fpregs.f_fpcntl, 12))
197 goto out;
198 /* restore all other fpu register */
199 if (__copy_from_user(current->thread.fp,
200 uc->uc_mcontext.fpregs.f_fpregs, 96))
201 goto out;
202 return 0;
203 }
204
205 if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
206 goto out;
207 if (fpstate[0]) {
208 context_size = fpstate[1];
209
210 /* Verify the frame format. */
211 if (fpstate[0] != fpu_version)
212 goto out;
213 if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
214 sizeof(fpregs)))
215 goto out;
216 __asm__ volatile (".chip 68k/68881\n\t"
217 "fmovemx %0,%%fp0-%%fp7\n\t"
218 "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
219 ".chip 68k"
220 : /* no outputs */
221 : "m" (*fpregs.f_fpregs),
222 "m" (*fpregs.f_fpcntl));
223 }
224 if (context_size &&
225 __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
226 context_size))
227 goto out;
228 __asm__ volatile (".chip 68k/68881\n\t"
229 "frestore %0\n\t"
230 ".chip 68k" : : "m" (*fpstate));
231 err = 0;
232
233out:
234 return err;
235}
236
237#endif
238
239static inline int
240restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __user *fp,
241 int *pd0)
242{
243 int formatvec;
244 struct sigcontext context;
245 int err = 0;
246
247 /* Always make any pending restarted system calls return -EINTR */
248 current_thread_info()->restart_block.fn = do_no_restart_syscall;
249
250 /* get previous context */
251 if (copy_from_user(&context, usc, sizeof(context)))
252 goto badframe;
253
254 /* restore passed registers */
255 regs->d1 = context.sc_d1;
256 regs->a0 = context.sc_a0;
257 regs->a1 = context.sc_a1;
258 ((struct switch_stack *)regs - 1)->a5 = context.sc_a5;
259 regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff);
260 regs->pc = context.sc_pc;
261 regs->orig_d0 = -1; /* disable syscall checks */
262 wrusp(context.sc_usp);
263 formatvec = context.sc_formatvec;
264 regs->format = formatvec >> 12;
265 regs->vector = formatvec & 0xfff;
266
267#ifdef CONFIG_FPU
268 err = restore_fpu_state(&context);
269#endif
270
271 *pd0 = context.sc_d0;
272 return err;
273
274badframe:
275 return 1;
276}
277
278static inline int
279rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
280 struct ucontext __user *uc, int *pd0)
281{
282 int temp;
283 greg_t __user *gregs = uc->uc_mcontext.gregs;
284 unsigned long usp;
285 int err;
286
287 /* Always make any pending restarted system calls return -EINTR */
288 current_thread_info()->restart_block.fn = do_no_restart_syscall;
289
290 err = __get_user(temp, &uc->uc_mcontext.version);
291 if (temp != MCONTEXT_VERSION)
292 goto badframe;
293 /* restore passed registers */
294 err |= __get_user(regs->d0, &gregs[0]);
295 err |= __get_user(regs->d1, &gregs[1]);
296 err |= __get_user(regs->d2, &gregs[2]);
297 err |= __get_user(regs->d3, &gregs[3]);
298 err |= __get_user(regs->d4, &gregs[4]);
299 err |= __get_user(regs->d5, &gregs[5]);
300 err |= __get_user(sw->d6, &gregs[6]);
301 err |= __get_user(sw->d7, &gregs[7]);
302 err |= __get_user(regs->a0, &gregs[8]);
303 err |= __get_user(regs->a1, &gregs[9]);
304 err |= __get_user(regs->a2, &gregs[10]);
305 err |= __get_user(sw->a3, &gregs[11]);
306 err |= __get_user(sw->a4, &gregs[12]);
307 err |= __get_user(sw->a5, &gregs[13]);
308 err |= __get_user(sw->a6, &gregs[14]);
309 err |= __get_user(usp, &gregs[15]);
310 wrusp(usp);
311 err |= __get_user(regs->pc, &gregs[16]);
312 err |= __get_user(temp, &gregs[17]);
313 regs->sr = (regs->sr & 0xff00) | (temp & 0xff);
314 regs->orig_d0 = -1; /* disable syscall checks */
315 regs->format = temp >> 12;
316 regs->vector = temp & 0xfff;
317
318 if (do_sigaltstack(&uc->uc_stack, NULL, usp) == -EFAULT)
319 goto badframe;
320
321 *pd0 = regs->d0;
322 return err;
323
324badframe:
325 return 1;
326}
327
328asmlinkage int do_sigreturn(unsigned long __unused)
329{
330 struct switch_stack *sw = (struct switch_stack *) &__unused;
331 struct pt_regs *regs = (struct pt_regs *) (sw + 1);
332 unsigned long usp = rdusp();
333 struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
334 sigset_t set;
335 int d0;
336
337 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
338 goto badframe;
339 if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
340 (_NSIG_WORDS > 1 &&
341 __copy_from_user(&set.sig[1], &frame->extramask,
342 sizeof(frame->extramask))))
343 goto badframe;
344
345 sigdelsetmask(&set, ~_BLOCKABLE);
346 spin_lock_irq(&current->sighand->siglock);
347 current->blocked = set;
348 recalc_sigpending();
349 spin_unlock_irq(&current->sighand->siglock);
350
351 if (restore_sigcontext(regs, &frame->sc, frame + 1, &d0))
352 goto badframe;
353 return d0;
354
355badframe:
356 force_sig(SIGSEGV, current);
357 return 0;
358}
359
360asmlinkage int do_rt_sigreturn(unsigned long __unused)
361{
362 struct switch_stack *sw = (struct switch_stack *) &__unused;
363 struct pt_regs *regs = (struct pt_regs *) (sw + 1);
364 unsigned long usp = rdusp();
365 struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);
366 sigset_t set;
367 int d0;
368
369 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
370 goto badframe;
371 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
372 goto badframe;
373
374 sigdelsetmask(&set, ~_BLOCKABLE);
375 spin_lock_irq(&current->sighand->siglock);
376 current->blocked = set;
377 recalc_sigpending();
378 spin_unlock_irq(&current->sighand->siglock);
379
380 if (rt_restore_ucontext(regs, sw, &frame->uc, &d0))
381 goto badframe;
382 return d0;
383
384badframe:
385 force_sig(SIGSEGV, current);
386 return 0;
387}
388
389#ifdef CONFIG_FPU
390/*
391 * Set up a signal frame.
392 */
393
394static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
395{
396 if (FPU_IS_EMU) {
397 /* save registers */
398 memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12);
399 memcpy(sc->sc_fpregs, current->thread.fp, 24);
400 return;
401 }
402
403 __asm__ volatile (".chip 68k/68881\n\t"
404 "fsave %0\n\t"
405 ".chip 68k"
406 : : "m" (*sc->sc_fpstate) : "memory");
407
408 if (sc->sc_fpstate[0]) {
409 fpu_version = sc->sc_fpstate[0];
410 __asm__ volatile (".chip 68k/68881\n\t"
411 "fmovemx %%fp0-%%fp1,%0\n\t"
412 "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
413 ".chip 68k"
414 : "=m" (*sc->sc_fpregs),
415 "=m" (*sc->sc_fpcntl)
416 : /* no inputs */
417 : "memory");
418 }
419}
420
421static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
422{
423 unsigned char fpstate[FPCONTEXT_SIZE];
424 int context_size = 0;
425 int err = 0;
426
427 if (FPU_IS_EMU) {
428 /* save fpu control register */
429 err |= copy_to_user(uc->uc_mcontext.fpregs.f_pcntl,
430 current->thread.fpcntl, 12);
431 /* save all other fpu register */
432 err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs,
433 current->thread.fp, 96);
434 return err;
435 }
436
437 __asm__ volatile (".chip 68k/68881\n\t"
438 "fsave %0\n\t"
439 ".chip 68k"
440 : : "m" (*fpstate) : "memory");
441
442 err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate);
443 if (fpstate[0]) {
444 fpregset_t fpregs;
445 context_size = fpstate[1];
446 fpu_version = fpstate[0];
447 __asm__ volatile (".chip 68k/68881\n\t"
448 "fmovemx %%fp0-%%fp7,%0\n\t"
449 "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
450 ".chip 68k"
451 : "=m" (*fpregs.f_fpregs),
452 "=m" (*fpregs.f_fpcntl)
453 : /* no inputs */
454 : "memory");
455 err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs,
456 sizeof(fpregs));
457 }
458 if (context_size)
459 err |= copy_to_user((long __user *)&uc->uc_fpstate + 1, fpstate + 4,
460 context_size);
461 return err;
462}
463
464#endif
465
466static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
467 unsigned long mask)
468{
469 sc->sc_mask = mask;
470 sc->sc_usp = rdusp();
471 sc->sc_d0 = regs->d0;
472 sc->sc_d1 = regs->d1;
473 sc->sc_a0 = regs->a0;
474 sc->sc_a1 = regs->a1;
475 sc->sc_a5 = ((struct switch_stack *)regs - 1)->a5;
476 sc->sc_sr = regs->sr;
477 sc->sc_pc = regs->pc;
478 sc->sc_formatvec = regs->format << 12 | regs->vector;
479#ifdef CONFIG_FPU
480 save_fpu_state(sc, regs);
481#endif
482}
483
484static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
485{
486 struct switch_stack *sw = (struct switch_stack *)regs - 1;
487 greg_t __user *gregs = uc->uc_mcontext.gregs;
488 int err = 0;
489
490 err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
491 err |= __put_user(regs->d0, &gregs[0]);
492 err |= __put_user(regs->d1, &gregs[1]);
493 err |= __put_user(regs->d2, &gregs[2]);
494 err |= __put_user(regs->d3, &gregs[3]);
495 err |= __put_user(regs->d4, &gregs[4]);
496 err |= __put_user(regs->d5, &gregs[5]);
497 err |= __put_user(sw->d6, &gregs[6]);
498 err |= __put_user(sw->d7, &gregs[7]);
499 err |= __put_user(regs->a0, &gregs[8]);
500 err |= __put_user(regs->a1, &gregs[9]);
501 err |= __put_user(regs->a2, &gregs[10]);
502 err |= __put_user(sw->a3, &gregs[11]);
503 err |= __put_user(sw->a4, &gregs[12]);
504 err |= __put_user(sw->a5, &gregs[13]);
505 err |= __put_user(sw->a6, &gregs[14]);
506 err |= __put_user(rdusp(), &gregs[15]);
507 err |= __put_user(regs->pc, &gregs[16]);
508 err |= __put_user(regs->sr, &gregs[17]);
509#ifdef CONFIG_FPU
510 err |= rt_save_fpu_state(uc, regs);
511#endif
512 return err;
513}
514
515static inline void __user *
516get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
517{
518 unsigned long usp;
519
520 /* Default to using normal stack. */
521 usp = rdusp();
522
523 /* This is the X/Open sanctioned signal stack switching. */
524 if (ka->sa.sa_flags & SA_ONSTACK) {
525 if (!sas_ss_flags(usp))
526 usp = current->sas_ss_sp + current->sas_ss_size;
527 }
528 return (void __user *)((usp - frame_size) & -8UL);
529}
530
531static int setup_frame (int sig, struct k_sigaction *ka,
532 sigset_t *set, struct pt_regs *regs)
533{
534 struct sigframe __user *frame;
535 struct sigcontext context;
536 int err = 0;
537
538 frame = get_sigframe(ka, regs, sizeof(*frame));
539
540 err |= __put_user((current_thread_info()->exec_domain
541 && current_thread_info()->exec_domain->signal_invmap
542 && sig < 32
543 ? current_thread_info()->exec_domain->signal_invmap[sig]
544 : sig),
545 &frame->sig);
546
547 err |= __put_user(regs->vector, &frame->code);
548 err |= __put_user(&frame->sc, &frame->psc);
549
550 if (_NSIG_WORDS > 1)
551 err |= copy_to_user(frame->extramask, &set->sig[1],
552 sizeof(frame->extramask));
553
554 setup_sigcontext(&context, regs, set->sig[0]);
555 err |= copy_to_user (&frame->sc, &context, sizeof(context));
556
557 /* Set up to return from userspace. */
558 err |= __put_user((void *) ret_from_user_signal, &frame->pretcode);
559
560 if (err)
561 goto give_sigsegv;
562
563 /* Set up registers for signal handler */
564 wrusp ((unsigned long) frame);
565 regs->pc = (unsigned long) ka->sa.sa_handler;
566 ((struct switch_stack *)regs - 1)->a5 = current->mm->start_data;
567 regs->format = 0x4; /*set format byte to make stack appear modulo 4
568 which it will be when doing the rte */
569
570adjust_stack:
571 /* Prepare to skip over the extra stuff in the exception frame. */
572 if (regs->stkadj) {
573 struct pt_regs *tregs =
574 (struct pt_regs *)((ulong)regs + regs->stkadj);
575#if defined(DEBUG)
576 printk(KERN_DEBUG "Performing stackadjust=%04x\n", regs->stkadj);
577#endif
578 /* This must be copied with decreasing addresses to
579 handle overlaps. */
580 tregs->vector = 0;
581 tregs->format = 0;
582 tregs->pc = regs->pc;
583 tregs->sr = regs->sr;
584 }
585 return err;
586
587give_sigsegv:
588 force_sigsegv(sig, current);
589 goto adjust_stack;
590}
591
592static int setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
593 sigset_t *set, struct pt_regs *regs)
594{
595 struct rt_sigframe __user *frame;
596 int err = 0;
597
598 frame = get_sigframe(ka, regs, sizeof(*frame));
599
600 err |= __put_user((current_thread_info()->exec_domain
601 && current_thread_info()->exec_domain->signal_invmap
602 && sig < 32
603 ? current_thread_info()->exec_domain->signal_invmap[sig]
604 : sig),
605 &frame->sig);
606 err |= __put_user(&frame->info, &frame->pinfo);
607 err |= __put_user(&frame->uc, &frame->puc);
608 err |= copy_siginfo_to_user(&frame->info, info);
609
610 /* Create the ucontext. */
611 err |= __put_user(0, &frame->uc.uc_flags);
612 err |= __put_user(NULL, &frame->uc.uc_link);
613 err |= __put_user((void __user *)current->sas_ss_sp,
614 &frame->uc.uc_stack.ss_sp);
615 err |= __put_user(sas_ss_flags(rdusp()),
616 &frame->uc.uc_stack.ss_flags);
617 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
618 err |= rt_setup_ucontext(&frame->uc, regs);
619 err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set));
620
621 /* Set up to return from userspace. */
622 err |= __put_user((void *) ret_from_user_rt_signal, &frame->pretcode);
623
624 if (err)
625 goto give_sigsegv;
626
627 /* Set up registers for signal handler */
628 wrusp ((unsigned long) frame);
629 regs->pc = (unsigned long) ka->sa.sa_handler;
630 ((struct switch_stack *)regs - 1)->a5 = current->mm->start_data;
631 regs->format = 0x4; /*set format byte to make stack appear modulo 4
632 which it will be when doing the rte */
633
634adjust_stack:
635 /* Prepare to skip over the extra stuff in the exception frame. */
636 if (regs->stkadj) {
637 struct pt_regs *tregs =
638 (struct pt_regs *)((ulong)regs + regs->stkadj);
639#if defined(DEBUG)
640 printk(KERN_DEBUG "Performing stackadjust=%04x\n", regs->stkadj);
641#endif
642 /* This must be copied with decreasing addresses to
643 handle overlaps. */
644 tregs->vector = 0;
645 tregs->format = 0;
646 tregs->pc = regs->pc;
647 tregs->sr = regs->sr;
648 }
649 return err;
650
651give_sigsegv:
652 force_sigsegv(sig, current);
653 goto adjust_stack;
654}
655
656static inline void
657handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
658{
659 switch (regs->d0) {
660 case -ERESTARTNOHAND:
661 if (!has_handler)
662 goto do_restart;
663 regs->d0 = -EINTR;
664 break;
665
666 case -ERESTART_RESTARTBLOCK:
667 if (!has_handler) {
668 regs->d0 = __NR_restart_syscall;
669 regs->pc -= 2;
670 break;
671 }
672 regs->d0 = -EINTR;
673 break;
674
675 case -ERESTARTSYS:
676 if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
677 regs->d0 = -EINTR;
678 break;
679 }
680 /* fallthrough */
681 case -ERESTARTNOINTR:
682 do_restart:
683 regs->d0 = regs->orig_d0;
684 regs->pc -= 2;
685 break;
686 }
687}
688
689/*
690 * OK, we're invoking a handler
691 */
692static void
693handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
694 sigset_t *oldset, struct pt_regs *regs)
695{
696 int err;
697 /* are we from a system call? */
698 if (regs->orig_d0 >= 0)
699 /* If so, check system call restarting.. */
700 handle_restart(regs, ka, 1);
701
702 /* set up the stack frame */
703 if (ka->sa.sa_flags & SA_SIGINFO)
704 err = setup_rt_frame(sig, ka, info, oldset, regs);
705 else
706 err = setup_frame(sig, ka, oldset, regs);
707
708 if (err)
709 return;
710
711 spin_lock_irq(&current->sighand->siglock);
712 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
713 if (!(ka->sa.sa_flags & SA_NODEFER))
714 sigaddset(&current->blocked,sig);
715 recalc_sigpending();
716 spin_unlock_irq(&current->sighand->siglock);
717
718 clear_thread_flag(TIF_RESTORE_SIGMASK);
719}
720
721/*
722 * Note that 'init' is a special process: it doesn't get signals it doesn't
723 * want to handle. Thus you cannot kill init even with a SIGKILL even by
724 * mistake.
725 */
726asmlinkage void do_signal(struct pt_regs *regs)
727{
728 struct k_sigaction ka;
729 siginfo_t info;
730 int signr;
731 sigset_t *oldset;
732
733 /*
734 * We want the common case to go fast, which
735 * is why we may in certain cases get here from
736 * kernel mode. Just return without doing anything
737 * if so.
738 */
739 if (!user_mode(regs))
740 return;
741
742 if (test_thread_flag(TIF_RESTORE_SIGMASK))
743 oldset = &current->saved_sigmask;
744 else
745 oldset = &current->blocked;
746
747 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
748 if (signr > 0) {
749 /* Whee! Actually deliver the signal. */
750 handle_signal(signr, &ka, &info, oldset, regs);
751 return;
752 }
753
754 /* Did we come from a system call? */
755 if (regs->orig_d0 >= 0) {
756 /* Restart the system call - no handlers present */
757 handle_restart(regs, NULL, 0);
758 }
759
760 /* If there's no signal to deliver, we just restore the saved mask. */
761 if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
762 clear_thread_flag(TIF_RESTORE_SIGMASK);
763 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
764 }
765}
diff --git a/arch/m68k/kernel/time_mm.c b/arch/m68k/kernel/time_mm.c
new file mode 100644
index 00000000000..18b34ee5db3
--- /dev/null
+++ b/arch/m68k/kernel/time_mm.c
@@ -0,0 +1,114 @@
1/*
2 * linux/arch/m68k/kernel/time.c
3 *
4 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
5 *
6 * This file contains the m68k-specific time handling details.
7 * Most of the stuff is located in the machine specific files.
8 *
9 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
10 * "A Kernel Model for Precision Timekeeping" by Dave Mills
11 */
12
13#include <linux/errno.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/param.h>
18#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/rtc.h>
21#include <linux/platform_device.h>
22
23#include <asm/machdep.h>
24#include <asm/io.h>
25#include <asm/irq_regs.h>
26
27#include <linux/time.h>
28#include <linux/timex.h>
29#include <linux/profile.h>
30
31static inline int set_rtc_mmss(unsigned long nowtime)
32{
33 if (mach_set_clock_mmss)
34 return mach_set_clock_mmss (nowtime);
35 return -1;
36}
37
38/*
39 * timer_interrupt() needs to keep up the real-time clock,
40 * as well as call the "xtime_update()" routine every clocktick
41 */
42static irqreturn_t timer_interrupt(int irq, void *dummy)
43{
44 xtime_update(1);
45 update_process_times(user_mode(get_irq_regs()));
46 profile_tick(CPU_PROFILING);
47
48#ifdef CONFIG_HEARTBEAT
49 /* use power LED as a heartbeat instead -- much more useful
50 for debugging -- based on the version for PReP by Cort */
51 /* acts like an actual heart beat -- ie thump-thump-pause... */
52 if (mach_heartbeat) {
53 static unsigned cnt = 0, period = 0, dist = 0;
54
55 if (cnt == 0 || cnt == dist)
56 mach_heartbeat( 1 );
57 else if (cnt == 7 || cnt == dist+7)
58 mach_heartbeat( 0 );
59
60 if (++cnt > period) {
61 cnt = 0;
62 /* The hyperbolic function below modifies the heartbeat period
63 * length in dependency of the current (5min) load. It goes
64 * through the points f(0)=126, f(1)=86, f(5)=51,
65 * f(inf)->30. */
66 period = ((672<<FSHIFT)/(5*avenrun[0]+(7<<FSHIFT))) + 30;
67 dist = period / 4;
68 }
69 }
70#endif /* CONFIG_HEARTBEAT */
71 return IRQ_HANDLED;
72}
73
74void read_persistent_clock(struct timespec *ts)
75{
76 struct rtc_time time;
77 ts->tv_sec = 0;
78 ts->tv_nsec = 0;
79
80 if (mach_hwclk) {
81 mach_hwclk(0, &time);
82
83 if ((time.tm_year += 1900) < 1970)
84 time.tm_year += 100;
85 ts->tv_sec = mktime(time.tm_year, time.tm_mon, time.tm_mday,
86 time.tm_hour, time.tm_min, time.tm_sec);
87 }
88}
89
90void __init time_init(void)
91{
92 mach_sched_init(timer_interrupt);
93}
94
95u32 arch_gettimeoffset(void)
96{
97 return mach_gettimeoffset() * 1000;
98}
99
100static int __init rtc_init(void)
101{
102 struct platform_device *pdev;
103
104 if (!mach_hwclk)
105 return -ENODEV;
106
107 pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
108 if (IS_ERR(pdev))
109 return PTR_ERR(pdev);
110
111 return 0;
112}
113
114module_init(rtc_init);
diff --git a/arch/m68k/kernel/time_no.c b/arch/m68k/kernel/time_no.c
new file mode 100644
index 00000000000..6623909f70e
--- /dev/null
+++ b/arch/m68k/kernel/time_no.c
@@ -0,0 +1,87 @@
1/*
2 * linux/arch/m68knommu/kernel/time.c
3 *
4 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
5 *
6 * This file contains the m68k-specific time handling details.
7 * Most of the stuff is located in the machine specific files.
8 *
9 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
10 * "A Kernel Model for Precision Timekeeping" by Dave Mills
11 */
12
13#include <linux/errno.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/param.h>
18#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/profile.h>
21#include <linux/time.h>
22#include <linux/timex.h>
23
24#include <asm/machdep.h>
25#include <asm/irq_regs.h>
26
27#define TICK_SIZE (tick_nsec / 1000)
28
29static inline int set_rtc_mmss(unsigned long nowtime)
30{
31 if (mach_set_clock_mmss)
32 return mach_set_clock_mmss (nowtime);
33 return -1;
34}
35
36#ifndef CONFIG_GENERIC_CLOCKEVENTS
37/*
38 * timer_interrupt() needs to keep up the real-time clock,
39 * as well as call the "xtime_update()" routine every clocktick
40 */
41irqreturn_t arch_timer_interrupt(int irq, void *dummy)
42{
43
44 if (current->pid)
45 profile_tick(CPU_PROFILING);
46
47 xtime_update(1);
48
49 update_process_times(user_mode(get_irq_regs()));
50
51 return(IRQ_HANDLED);
52}
53#endif
54
55static unsigned long read_rtc_mmss(void)
56{
57 unsigned int year, mon, day, hour, min, sec;
58
59 if (mach_gettod) {
60 mach_gettod(&year, &mon, &day, &hour, &min, &sec);
61 if ((year += 1900) < 1970)
62 year += 100;
63 } else {
64 year = 1970;
65 mon = day = 1;
66 hour = min = sec = 0;
67 }
68
69
70 return mktime(year, mon, day, hour, min, sec);
71}
72
73void read_persistent_clock(struct timespec *ts)
74{
75 ts->tv_sec = read_rtc_mmss();
76 ts->tv_nsec = 0;
77}
78
79int update_persistent_clock(struct timespec now)
80{
81 return set_rtc_mmss(now.tv_sec);
82}
83
84void time_init(void)
85{
86 hw_timer_init();
87}
diff --git a/arch/m68k/kernel/traps_mm.c b/arch/m68k/kernel/traps_mm.c
new file mode 100644
index 00000000000..4022bbc2887
--- /dev/null
+++ b/arch/m68k/kernel/traps_mm.c
@@ -0,0 +1,1207 @@
1/*
2 * linux/arch/m68k/kernel/traps.c
3 *
4 * Copyright (C) 1993, 1994 by Hamish Macdonald
5 *
6 * 68040 fixes by Michael Rausch
7 * 68040 fixes by Martin Apel
8 * 68040 fixes and writeback by Richard Zidlicky
9 * 68060 fixes by Roman Hodek
10 * 68060 fixes by Jesper Skov
11 *
12 * This file is subject to the terms and conditions of the GNU General Public
13 * License. See the file COPYING in the main directory of this archive
14 * for more details.
15 */
16
17/*
18 * Sets up all exception vectors
19 */
20
21#include <linux/sched.h>
22#include <linux/signal.h>
23#include <linux/kernel.h>
24#include <linux/mm.h>
25#include <linux/module.h>
26#include <linux/user.h>
27#include <linux/string.h>
28#include <linux/linkage.h>
29#include <linux/init.h>
30#include <linux/ptrace.h>
31#include <linux/kallsyms.h>
32
33#include <asm/setup.h>
34#include <asm/fpu.h>
35#include <asm/system.h>
36#include <asm/uaccess.h>
37#include <asm/traps.h>
38#include <asm/pgalloc.h>
39#include <asm/machdep.h>
40#include <asm/siginfo.h>
41
42/* assembler routines */
43asmlinkage void system_call(void);
44asmlinkage void buserr(void);
45asmlinkage void trap(void);
46asmlinkage void nmihandler(void);
47#ifdef CONFIG_M68KFPU_EMU
48asmlinkage void fpu_emu(void);
49#endif
50
51e_vector vectors[256];
52
53/* nmi handler for the Amiga */
54asm(".text\n"
55 __ALIGN_STR "\n"
56 "nmihandler: rte");
57
58/*
59 * this must be called very early as the kernel might
60 * use some instruction that are emulated on the 060
61 * and so we're prepared for early probe attempts (e.g. nf_init).
62 */
63void __init base_trap_init(void)
64{
65 if (MACH_IS_SUN3X) {
66 extern e_vector *sun3x_prom_vbr;
67
68 __asm__ volatile ("movec %%vbr, %0" : "=r" (sun3x_prom_vbr));
69 }
70
71 /* setup the exception vector table */
72 __asm__ volatile ("movec %0,%%vbr" : : "r" ((void*)vectors));
73
74 if (CPU_IS_060) {
75 /* set up ISP entry points */
76 asmlinkage void unimp_vec(void) asm ("_060_isp_unimp");
77
78 vectors[VEC_UNIMPII] = unimp_vec;
79 }
80
81 vectors[VEC_BUSERR] = buserr;
82 vectors[VEC_ILLEGAL] = trap;
83 vectors[VEC_SYS] = system_call;
84}
85
86void __init trap_init (void)
87{
88 int i;
89
90 for (i = VEC_SPUR; i <= VEC_INT7; i++)
91 vectors[i] = bad_inthandler;
92
93 for (i = 0; i < VEC_USER; i++)
94 if (!vectors[i])
95 vectors[i] = trap;
96
97 for (i = VEC_USER; i < 256; i++)
98 vectors[i] = bad_inthandler;
99
100#ifdef CONFIG_M68KFPU_EMU
101 if (FPU_IS_EMU)
102 vectors[VEC_LINE11] = fpu_emu;
103#endif
104
105 if (CPU_IS_040 && !FPU_IS_EMU) {
106 /* set up FPSP entry points */
107 asmlinkage void dz_vec(void) asm ("dz");
108 asmlinkage void inex_vec(void) asm ("inex");
109 asmlinkage void ovfl_vec(void) asm ("ovfl");
110 asmlinkage void unfl_vec(void) asm ("unfl");
111 asmlinkage void snan_vec(void) asm ("snan");
112 asmlinkage void operr_vec(void) asm ("operr");
113 asmlinkage void bsun_vec(void) asm ("bsun");
114 asmlinkage void fline_vec(void) asm ("fline");
115 asmlinkage void unsupp_vec(void) asm ("unsupp");
116
117 vectors[VEC_FPDIVZ] = dz_vec;
118 vectors[VEC_FPIR] = inex_vec;
119 vectors[VEC_FPOVER] = ovfl_vec;
120 vectors[VEC_FPUNDER] = unfl_vec;
121 vectors[VEC_FPNAN] = snan_vec;
122 vectors[VEC_FPOE] = operr_vec;
123 vectors[VEC_FPBRUC] = bsun_vec;
124 vectors[VEC_LINE11] = fline_vec;
125 vectors[VEC_FPUNSUP] = unsupp_vec;
126 }
127
128 if (CPU_IS_060 && !FPU_IS_EMU) {
129 /* set up IFPSP entry points */
130 asmlinkage void snan_vec6(void) asm ("_060_fpsp_snan");
131 asmlinkage void operr_vec6(void) asm ("_060_fpsp_operr");
132 asmlinkage void ovfl_vec6(void) asm ("_060_fpsp_ovfl");
133 asmlinkage void unfl_vec6(void) asm ("_060_fpsp_unfl");
134 asmlinkage void dz_vec6(void) asm ("_060_fpsp_dz");
135 asmlinkage void inex_vec6(void) asm ("_060_fpsp_inex");
136 asmlinkage void fline_vec6(void) asm ("_060_fpsp_fline");
137 asmlinkage void unsupp_vec6(void) asm ("_060_fpsp_unsupp");
138 asmlinkage void effadd_vec6(void) asm ("_060_fpsp_effadd");
139
140 vectors[VEC_FPNAN] = snan_vec6;
141 vectors[VEC_FPOE] = operr_vec6;
142 vectors[VEC_FPOVER] = ovfl_vec6;
143 vectors[VEC_FPUNDER] = unfl_vec6;
144 vectors[VEC_FPDIVZ] = dz_vec6;
145 vectors[VEC_FPIR] = inex_vec6;
146 vectors[VEC_LINE11] = fline_vec6;
147 vectors[VEC_FPUNSUP] = unsupp_vec6;
148 vectors[VEC_UNIMPEA] = effadd_vec6;
149 }
150
151 /* if running on an amiga, make the NMI interrupt do nothing */
152 if (MACH_IS_AMIGA) {
153 vectors[VEC_INT7] = nmihandler;
154 }
155}
156
157
158static const char *vec_names[] = {
159 [VEC_RESETSP] = "RESET SP",
160 [VEC_RESETPC] = "RESET PC",
161 [VEC_BUSERR] = "BUS ERROR",
162 [VEC_ADDRERR] = "ADDRESS ERROR",
163 [VEC_ILLEGAL] = "ILLEGAL INSTRUCTION",
164 [VEC_ZERODIV] = "ZERO DIVIDE",
165 [VEC_CHK] = "CHK",
166 [VEC_TRAP] = "TRAPcc",
167 [VEC_PRIV] = "PRIVILEGE VIOLATION",
168 [VEC_TRACE] = "TRACE",
169 [VEC_LINE10] = "LINE 1010",
170 [VEC_LINE11] = "LINE 1111",
171 [VEC_RESV12] = "UNASSIGNED RESERVED 12",
172 [VEC_COPROC] = "COPROCESSOR PROTOCOL VIOLATION",
173 [VEC_FORMAT] = "FORMAT ERROR",
174 [VEC_UNINT] = "UNINITIALIZED INTERRUPT",
175 [VEC_RESV16] = "UNASSIGNED RESERVED 16",
176 [VEC_RESV17] = "UNASSIGNED RESERVED 17",
177 [VEC_RESV18] = "UNASSIGNED RESERVED 18",
178 [VEC_RESV19] = "UNASSIGNED RESERVED 19",
179 [VEC_RESV20] = "UNASSIGNED RESERVED 20",
180 [VEC_RESV21] = "UNASSIGNED RESERVED 21",
181 [VEC_RESV22] = "UNASSIGNED RESERVED 22",
182 [VEC_RESV23] = "UNASSIGNED RESERVED 23",
183 [VEC_SPUR] = "SPURIOUS INTERRUPT",
184 [VEC_INT1] = "LEVEL 1 INT",
185 [VEC_INT2] = "LEVEL 2 INT",
186 [VEC_INT3] = "LEVEL 3 INT",
187 [VEC_INT4] = "LEVEL 4 INT",
188 [VEC_INT5] = "LEVEL 5 INT",
189 [VEC_INT6] = "LEVEL 6 INT",
190 [VEC_INT7] = "LEVEL 7 INT",
191 [VEC_SYS] = "SYSCALL",
192 [VEC_TRAP1] = "TRAP #1",
193 [VEC_TRAP2] = "TRAP #2",
194 [VEC_TRAP3] = "TRAP #3",
195 [VEC_TRAP4] = "TRAP #4",
196 [VEC_TRAP5] = "TRAP #5",
197 [VEC_TRAP6] = "TRAP #6",
198 [VEC_TRAP7] = "TRAP #7",
199 [VEC_TRAP8] = "TRAP #8",
200 [VEC_TRAP9] = "TRAP #9",
201 [VEC_TRAP10] = "TRAP #10",
202 [VEC_TRAP11] = "TRAP #11",
203 [VEC_TRAP12] = "TRAP #12",
204 [VEC_TRAP13] = "TRAP #13",
205 [VEC_TRAP14] = "TRAP #14",
206 [VEC_TRAP15] = "TRAP #15",
207 [VEC_FPBRUC] = "FPCP BSUN",
208 [VEC_FPIR] = "FPCP INEXACT",
209 [VEC_FPDIVZ] = "FPCP DIV BY 0",
210 [VEC_FPUNDER] = "FPCP UNDERFLOW",
211 [VEC_FPOE] = "FPCP OPERAND ERROR",
212 [VEC_FPOVER] = "FPCP OVERFLOW",
213 [VEC_FPNAN] = "FPCP SNAN",
214 [VEC_FPUNSUP] = "FPCP UNSUPPORTED OPERATION",
215 [VEC_MMUCFG] = "MMU CONFIGURATION ERROR",
216 [VEC_MMUILL] = "MMU ILLEGAL OPERATION ERROR",
217 [VEC_MMUACC] = "MMU ACCESS LEVEL VIOLATION ERROR",
218 [VEC_RESV59] = "UNASSIGNED RESERVED 59",
219 [VEC_UNIMPEA] = "UNASSIGNED RESERVED 60",
220 [VEC_UNIMPII] = "UNASSIGNED RESERVED 61",
221 [VEC_RESV62] = "UNASSIGNED RESERVED 62",
222 [VEC_RESV63] = "UNASSIGNED RESERVED 63",
223};
224
225static const char *space_names[] = {
226 [0] = "Space 0",
227 [USER_DATA] = "User Data",
228 [USER_PROGRAM] = "User Program",
229#ifndef CONFIG_SUN3
230 [3] = "Space 3",
231#else
232 [FC_CONTROL] = "Control",
233#endif
234 [4] = "Space 4",
235 [SUPER_DATA] = "Super Data",
236 [SUPER_PROGRAM] = "Super Program",
237 [CPU_SPACE] = "CPU"
238};
239
240void die_if_kernel(char *,struct pt_regs *,int);
241asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
242 unsigned long error_code);
243int send_fault_sig(struct pt_regs *regs);
244
245asmlinkage void trap_c(struct frame *fp);
246
247#if defined (CONFIG_M68060)
248static inline void access_error060 (struct frame *fp)
249{
250 unsigned long fslw = fp->un.fmt4.pc; /* is really FSLW for access error */
251
252#ifdef DEBUG
253 printk("fslw=%#lx, fa=%#lx\n", fslw, fp->un.fmt4.effaddr);
254#endif
255
256 if (fslw & MMU060_BPE) {
257 /* branch prediction error -> clear branch cache */
258 __asm__ __volatile__ ("movec %/cacr,%/d0\n\t"
259 "orl #0x00400000,%/d0\n\t"
260 "movec %/d0,%/cacr"
261 : : : "d0" );
262 /* return if there's no other error */
263 if (!(fslw & MMU060_ERR_BITS) && !(fslw & MMU060_SEE))
264 return;
265 }
266
267 if (fslw & (MMU060_DESC_ERR | MMU060_WP | MMU060_SP)) {
268 unsigned long errorcode;
269 unsigned long addr = fp->un.fmt4.effaddr;
270
271 if (fslw & MMU060_MA)
272 addr = (addr + PAGE_SIZE - 1) & PAGE_MASK;
273
274 errorcode = 1;
275 if (fslw & MMU060_DESC_ERR) {
276 __flush_tlb040_one(addr);
277 errorcode = 0;
278 }
279 if (fslw & MMU060_W)
280 errorcode |= 2;
281#ifdef DEBUG
282 printk("errorcode = %d\n", errorcode );
283#endif
284 do_page_fault(&fp->ptregs, addr, errorcode);
285 } else if (fslw & (MMU060_SEE)){
286 /* Software Emulation Error.
287 * fault during mem_read/mem_write in ifpsp060/os.S
288 */
289 send_fault_sig(&fp->ptregs);
290 } else if (!(fslw & (MMU060_RE|MMU060_WE)) ||
291 send_fault_sig(&fp->ptregs) > 0) {
292 printk("pc=%#lx, fa=%#lx\n", fp->ptregs.pc, fp->un.fmt4.effaddr);
293 printk( "68060 access error, fslw=%lx\n", fslw );
294 trap_c( fp );
295 }
296}
297#endif /* CONFIG_M68060 */
298
299#if defined (CONFIG_M68040)
300static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs)
301{
302 unsigned long mmusr;
303 mm_segment_t old_fs = get_fs();
304
305 set_fs(MAKE_MM_SEG(wbs));
306
307 if (iswrite)
308 asm volatile (".chip 68040; ptestw (%0); .chip 68k" : : "a" (addr));
309 else
310 asm volatile (".chip 68040; ptestr (%0); .chip 68k" : : "a" (addr));
311
312 asm volatile (".chip 68040; movec %%mmusr,%0; .chip 68k" : "=r" (mmusr));
313
314 set_fs(old_fs);
315
316 return mmusr;
317}
318
319static inline int do_040writeback1(unsigned short wbs, unsigned long wba,
320 unsigned long wbd)
321{
322 int res = 0;
323 mm_segment_t old_fs = get_fs();
324
325 /* set_fs can not be moved, otherwise put_user() may oops */
326 set_fs(MAKE_MM_SEG(wbs));
327
328 switch (wbs & WBSIZ_040) {
329 case BA_SIZE_BYTE:
330 res = put_user(wbd & 0xff, (char __user *)wba);
331 break;
332 case BA_SIZE_WORD:
333 res = put_user(wbd & 0xffff, (short __user *)wba);
334 break;
335 case BA_SIZE_LONG:
336 res = put_user(wbd, (int __user *)wba);
337 break;
338 }
339
340 /* set_fs can not be moved, otherwise put_user() may oops */
341 set_fs(old_fs);
342
343
344#ifdef DEBUG
345 printk("do_040writeback1, res=%d\n",res);
346#endif
347
348 return res;
349}
350
351/* after an exception in a writeback the stack frame corresponding
352 * to that exception is discarded, set a few bits in the old frame
353 * to simulate what it should look like
354 */
355static inline void fix_xframe040(struct frame *fp, unsigned long wba, unsigned short wbs)
356{
357 fp->un.fmt7.faddr = wba;
358 fp->un.fmt7.ssw = wbs & 0xff;
359 if (wba != current->thread.faddr)
360 fp->un.fmt7.ssw |= MA_040;
361}
362
363static inline void do_040writebacks(struct frame *fp)
364{
365 int res = 0;
366#if 0
367 if (fp->un.fmt7.wb1s & WBV_040)
368 printk("access_error040: cannot handle 1st writeback. oops.\n");
369#endif
370
371 if ((fp->un.fmt7.wb2s & WBV_040) &&
372 !(fp->un.fmt7.wb2s & WBTT_040)) {
373 res = do_040writeback1(fp->un.fmt7.wb2s, fp->un.fmt7.wb2a,
374 fp->un.fmt7.wb2d);
375 if (res)
376 fix_xframe040(fp, fp->un.fmt7.wb2a, fp->un.fmt7.wb2s);
377 else
378 fp->un.fmt7.wb2s = 0;
379 }
380
381 /* do the 2nd wb only if the first one was successful (except for a kernel wb) */
382 if (fp->un.fmt7.wb3s & WBV_040 && (!res || fp->un.fmt7.wb3s & 4)) {
383 res = do_040writeback1(fp->un.fmt7.wb3s, fp->un.fmt7.wb3a,
384 fp->un.fmt7.wb3d);
385 if (res)
386 {
387 fix_xframe040(fp, fp->un.fmt7.wb3a, fp->un.fmt7.wb3s);
388
389 fp->un.fmt7.wb2s = fp->un.fmt7.wb3s;
390 fp->un.fmt7.wb3s &= (~WBV_040);
391 fp->un.fmt7.wb2a = fp->un.fmt7.wb3a;
392 fp->un.fmt7.wb2d = fp->un.fmt7.wb3d;
393 }
394 else
395 fp->un.fmt7.wb3s = 0;
396 }
397
398 if (res)
399 send_fault_sig(&fp->ptregs);
400}
401
402/*
403 * called from sigreturn(), must ensure userspace code didn't
404 * manipulate exception frame to circumvent protection, then complete
405 * pending writebacks
406 * we just clear TM2 to turn it into a userspace access
407 */
408asmlinkage void berr_040cleanup(struct frame *fp)
409{
410 fp->un.fmt7.wb2s &= ~4;
411 fp->un.fmt7.wb3s &= ~4;
412
413 do_040writebacks(fp);
414}
415
416static inline void access_error040(struct frame *fp)
417{
418 unsigned short ssw = fp->un.fmt7.ssw;
419 unsigned long mmusr;
420
421#ifdef DEBUG
422 printk("ssw=%#x, fa=%#lx\n", ssw, fp->un.fmt7.faddr);
423 printk("wb1s=%#x, wb2s=%#x, wb3s=%#x\n", fp->un.fmt7.wb1s,
424 fp->un.fmt7.wb2s, fp->un.fmt7.wb3s);
425 printk ("wb2a=%lx, wb3a=%lx, wb2d=%lx, wb3d=%lx\n",
426 fp->un.fmt7.wb2a, fp->un.fmt7.wb3a,
427 fp->un.fmt7.wb2d, fp->un.fmt7.wb3d);
428#endif
429
430 if (ssw & ATC_040) {
431 unsigned long addr = fp->un.fmt7.faddr;
432 unsigned long errorcode;
433
434 /*
435 * The MMU status has to be determined AFTER the address
436 * has been corrected if there was a misaligned access (MA).
437 */
438 if (ssw & MA_040)
439 addr = (addr + 7) & -8;
440
441 /* MMU error, get the MMUSR info for this access */
442 mmusr = probe040(!(ssw & RW_040), addr, ssw);
443#ifdef DEBUG
444 printk("mmusr = %lx\n", mmusr);
445#endif
446 errorcode = 1;
447 if (!(mmusr & MMU_R_040)) {
448 /* clear the invalid atc entry */
449 __flush_tlb040_one(addr);
450 errorcode = 0;
451 }
452
453 /* despite what documentation seems to say, RMW
454 * accesses have always both the LK and RW bits set */
455 if (!(ssw & RW_040) || (ssw & LK_040))
456 errorcode |= 2;
457
458 if (do_page_fault(&fp->ptregs, addr, errorcode)) {
459#ifdef DEBUG
460 printk("do_page_fault() !=0\n");
461#endif
462 if (user_mode(&fp->ptregs)){
463 /* delay writebacks after signal delivery */
464#ifdef DEBUG
465 printk(".. was usermode - return\n");
466#endif
467 return;
468 }
469 /* disable writeback into user space from kernel
470 * (if do_page_fault didn't fix the mapping,
471 * the writeback won't do good)
472 */
473disable_wb:
474#ifdef DEBUG
475 printk(".. disabling wb2\n");
476#endif
477 if (fp->un.fmt7.wb2a == fp->un.fmt7.faddr)
478 fp->un.fmt7.wb2s &= ~WBV_040;
479 if (fp->un.fmt7.wb3a == fp->un.fmt7.faddr)
480 fp->un.fmt7.wb3s &= ~WBV_040;
481 }
482 } else {
483 /* In case of a bus error we either kill the process or expect
484 * the kernel to catch the fault, which then is also responsible
485 * for cleaning up the mess.
486 */
487 current->thread.signo = SIGBUS;
488 current->thread.faddr = fp->un.fmt7.faddr;
489 if (send_fault_sig(&fp->ptregs) >= 0)
490 printk("68040 bus error (ssw=%x, faddr=%lx)\n", ssw,
491 fp->un.fmt7.faddr);
492 goto disable_wb;
493 }
494
495 do_040writebacks(fp);
496}
497#endif /* CONFIG_M68040 */
498
499#if defined(CONFIG_SUN3)
500#include <asm/sun3mmu.h>
501
502extern int mmu_emu_handle_fault (unsigned long, int, int);
503
504/* sun3 version of bus_error030 */
505
506static inline void bus_error030 (struct frame *fp)
507{
508 unsigned char buserr_type = sun3_get_buserr ();
509 unsigned long addr, errorcode;
510 unsigned short ssw = fp->un.fmtb.ssw;
511 extern unsigned long _sun3_map_test_start, _sun3_map_test_end;
512
513#ifdef DEBUG
514 if (ssw & (FC | FB))
515 printk ("Instruction fault at %#010lx\n",
516 ssw & FC ?
517 fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2
518 :
519 fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
520 if (ssw & DF)
521 printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
522 ssw & RW ? "read" : "write",
523 fp->un.fmtb.daddr,
524 space_names[ssw & DFC], fp->ptregs.pc);
525#endif
526
527 /*
528 * Check if this page should be demand-mapped. This needs to go before
529 * the testing for a bad kernel-space access (demand-mapping applies
530 * to kernel accesses too).
531 */
532
533 if ((ssw & DF)
534 && (buserr_type & (SUN3_BUSERR_PROTERR | SUN3_BUSERR_INVALID))) {
535 if (mmu_emu_handle_fault (fp->un.fmtb.daddr, ssw & RW, 0))
536 return;
537 }
538
539 /* Check for kernel-space pagefault (BAD). */
540 if (fp->ptregs.sr & PS_S) {
541 /* kernel fault must be a data fault to user space */
542 if (! ((ssw & DF) && ((ssw & DFC) == USER_DATA))) {
543 // try checking the kernel mappings before surrender
544 if (mmu_emu_handle_fault (fp->un.fmtb.daddr, ssw & RW, 1))
545 return;
546 /* instruction fault or kernel data fault! */
547 if (ssw & (FC | FB))
548 printk ("Instruction fault at %#010lx\n",
549 fp->ptregs.pc);
550 if (ssw & DF) {
551 /* was this fault incurred testing bus mappings? */
552 if((fp->ptregs.pc >= (unsigned long)&_sun3_map_test_start) &&
553 (fp->ptregs.pc <= (unsigned long)&_sun3_map_test_end)) {
554 send_fault_sig(&fp->ptregs);
555 return;
556 }
557
558 printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
559 ssw & RW ? "read" : "write",
560 fp->un.fmtb.daddr,
561 space_names[ssw & DFC], fp->ptregs.pc);
562 }
563 printk ("BAD KERNEL BUSERR\n");
564
565 die_if_kernel("Oops", &fp->ptregs,0);
566 force_sig(SIGKILL, current);
567 return;
568 }
569 } else {
570 /* user fault */
571 if (!(ssw & (FC | FB)) && !(ssw & DF))
572 /* not an instruction fault or data fault! BAD */
573 panic ("USER BUSERR w/o instruction or data fault");
574 }
575
576
577 /* First handle the data fault, if any. */
578 if (ssw & DF) {
579 addr = fp->un.fmtb.daddr;
580
581// errorcode bit 0: 0 -> no page 1 -> protection fault
582// errorcode bit 1: 0 -> read fault 1 -> write fault
583
584// (buserr_type & SUN3_BUSERR_PROTERR) -> protection fault
585// (buserr_type & SUN3_BUSERR_INVALID) -> invalid page fault
586
587 if (buserr_type & SUN3_BUSERR_PROTERR)
588 errorcode = 0x01;
589 else if (buserr_type & SUN3_BUSERR_INVALID)
590 errorcode = 0x00;
591 else {
592#ifdef DEBUG
593 printk ("*** unexpected busfault type=%#04x\n", buserr_type);
594 printk ("invalid %s access at %#lx from pc %#lx\n",
595 !(ssw & RW) ? "write" : "read", addr,
596 fp->ptregs.pc);
597#endif
598 die_if_kernel ("Oops", &fp->ptregs, buserr_type);
599 force_sig (SIGBUS, current);
600 return;
601 }
602
603//todo: wtf is RM bit? --m
604 if (!(ssw & RW) || ssw & RM)
605 errorcode |= 0x02;
606
607 /* Handle page fault. */
608 do_page_fault (&fp->ptregs, addr, errorcode);
609
610 /* Retry the data fault now. */
611 return;
612 }
613
614 /* Now handle the instruction fault. */
615
616 /* Get the fault address. */
617 if (fp->ptregs.format == 0xA)
618 addr = fp->ptregs.pc + 4;
619 else
620 addr = fp->un.fmtb.baddr;
621 if (ssw & FC)
622 addr -= 2;
623
624 if (buserr_type & SUN3_BUSERR_INVALID) {
625 if (!mmu_emu_handle_fault (fp->un.fmtb.daddr, 1, 0))
626 do_page_fault (&fp->ptregs, addr, 0);
627 } else {
628#ifdef DEBUG
629 printk ("protection fault on insn access (segv).\n");
630#endif
631 force_sig (SIGSEGV, current);
632 }
633}
634#else
635#if defined(CPU_M68020_OR_M68030)
636static inline void bus_error030 (struct frame *fp)
637{
638 volatile unsigned short temp;
639 unsigned short mmusr;
640 unsigned long addr, errorcode;
641 unsigned short ssw = fp->un.fmtb.ssw;
642#ifdef DEBUG
643 unsigned long desc;
644
645 printk ("pid = %x ", current->pid);
646 printk ("SSW=%#06x ", ssw);
647
648 if (ssw & (FC | FB))
649 printk ("Instruction fault at %#010lx\n",
650 ssw & FC ?
651 fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2
652 :
653 fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
654 if (ssw & DF)
655 printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
656 ssw & RW ? "read" : "write",
657 fp->un.fmtb.daddr,
658 space_names[ssw & DFC], fp->ptregs.pc);
659#endif
660
661 /* ++andreas: If a data fault and an instruction fault happen
662 at the same time map in both pages. */
663
664 /* First handle the data fault, if any. */
665 if (ssw & DF) {
666 addr = fp->un.fmtb.daddr;
667
668#ifdef DEBUG
669 asm volatile ("ptestr %3,%2@,#7,%0\n\t"
670 "pmove %%psr,%1@"
671 : "=a&" (desc)
672 : "a" (&temp), "a" (addr), "d" (ssw));
673#else
674 asm volatile ("ptestr %2,%1@,#7\n\t"
675 "pmove %%psr,%0@"
676 : : "a" (&temp), "a" (addr), "d" (ssw));
677#endif
678 mmusr = temp;
679
680#ifdef DEBUG
681 printk("mmusr is %#x for addr %#lx in task %p\n",
682 mmusr, addr, current);
683 printk("descriptor address is %#lx, contents %#lx\n",
684 __va(desc), *(unsigned long *)__va(desc));
685#endif
686
687 errorcode = (mmusr & MMU_I) ? 0 : 1;
688 if (!(ssw & RW) || (ssw & RM))
689 errorcode |= 2;
690
691 if (mmusr & (MMU_I | MMU_WP)) {
692 if (ssw & 4) {
693 printk("Data %s fault at %#010lx in %s (pc=%#lx)\n",
694 ssw & RW ? "read" : "write",
695 fp->un.fmtb.daddr,
696 space_names[ssw & DFC], fp->ptregs.pc);
697 goto buserr;
698 }
699 /* Don't try to do anything further if an exception was
700 handled. */
701 if (do_page_fault (&fp->ptregs, addr, errorcode) < 0)
702 return;
703 } else if (!(mmusr & MMU_I)) {
704 /* probably a 020 cas fault */
705 if (!(ssw & RM) && send_fault_sig(&fp->ptregs) > 0)
706 printk("unexpected bus error (%#x,%#x)\n", ssw, mmusr);
707 } else if (mmusr & (MMU_B|MMU_L|MMU_S)) {
708 printk("invalid %s access at %#lx from pc %#lx\n",
709 !(ssw & RW) ? "write" : "read", addr,
710 fp->ptregs.pc);
711 die_if_kernel("Oops",&fp->ptregs,mmusr);
712 force_sig(SIGSEGV, current);
713 return;
714 } else {
715#if 0
716 static volatile long tlong;
717#endif
718
719 printk("weird %s access at %#lx from pc %#lx (ssw is %#x)\n",
720 !(ssw & RW) ? "write" : "read", addr,
721 fp->ptregs.pc, ssw);
722 asm volatile ("ptestr #1,%1@,#0\n\t"
723 "pmove %%psr,%0@"
724 : /* no outputs */
725 : "a" (&temp), "a" (addr));
726 mmusr = temp;
727
728 printk ("level 0 mmusr is %#x\n", mmusr);
729#if 0
730 asm volatile ("pmove %%tt0,%0@"
731 : /* no outputs */
732 : "a" (&tlong));
733 printk("tt0 is %#lx, ", tlong);
734 asm volatile ("pmove %%tt1,%0@"
735 : /* no outputs */
736 : "a" (&tlong));
737 printk("tt1 is %#lx\n", tlong);
738#endif
739#ifdef DEBUG
740 printk("Unknown SIGSEGV - 1\n");
741#endif
742 die_if_kernel("Oops",&fp->ptregs,mmusr);
743 force_sig(SIGSEGV, current);
744 return;
745 }
746
747 /* setup an ATC entry for the access about to be retried */
748 if (!(ssw & RW) || (ssw & RM))
749 asm volatile ("ploadw %1,%0@" : /* no outputs */
750 : "a" (addr), "d" (ssw));
751 else
752 asm volatile ("ploadr %1,%0@" : /* no outputs */
753 : "a" (addr), "d" (ssw));
754 }
755
756 /* Now handle the instruction fault. */
757
758 if (!(ssw & (FC|FB)))
759 return;
760
761 if (fp->ptregs.sr & PS_S) {
762 printk("Instruction fault at %#010lx\n",
763 fp->ptregs.pc);
764 buserr:
765 printk ("BAD KERNEL BUSERR\n");
766 die_if_kernel("Oops",&fp->ptregs,0);
767 force_sig(SIGKILL, current);
768 return;
769 }
770
771 /* get the fault address */
772 if (fp->ptregs.format == 10)
773 addr = fp->ptregs.pc + 4;
774 else
775 addr = fp->un.fmtb.baddr;
776 if (ssw & FC)
777 addr -= 2;
778
779 if ((ssw & DF) && ((addr ^ fp->un.fmtb.daddr) & PAGE_MASK) == 0)
780 /* Insn fault on same page as data fault. But we
781 should still create the ATC entry. */
782 goto create_atc_entry;
783
784#ifdef DEBUG
785 asm volatile ("ptestr #1,%2@,#7,%0\n\t"
786 "pmove %%psr,%1@"
787 : "=a&" (desc)
788 : "a" (&temp), "a" (addr));
789#else
790 asm volatile ("ptestr #1,%1@,#7\n\t"
791 "pmove %%psr,%0@"
792 : : "a" (&temp), "a" (addr));
793#endif
794 mmusr = temp;
795
796#ifdef DEBUG
797 printk ("mmusr is %#x for addr %#lx in task %p\n",
798 mmusr, addr, current);
799 printk ("descriptor address is %#lx, contents %#lx\n",
800 __va(desc), *(unsigned long *)__va(desc));
801#endif
802
803 if (mmusr & MMU_I)
804 do_page_fault (&fp->ptregs, addr, 0);
805 else if (mmusr & (MMU_B|MMU_L|MMU_S)) {
806 printk ("invalid insn access at %#lx from pc %#lx\n",
807 addr, fp->ptregs.pc);
808#ifdef DEBUG
809 printk("Unknown SIGSEGV - 2\n");
810#endif
811 die_if_kernel("Oops",&fp->ptregs,mmusr);
812 force_sig(SIGSEGV, current);
813 return;
814 }
815
816create_atc_entry:
817 /* setup an ATC entry for the access about to be retried */
818 asm volatile ("ploadr #2,%0@" : /* no outputs */
819 : "a" (addr));
820}
821#endif /* CPU_M68020_OR_M68030 */
822#endif /* !CONFIG_SUN3 */
823
824asmlinkage void buserr_c(struct frame *fp)
825{
826 /* Only set esp0 if coming from user mode */
827 if (user_mode(&fp->ptregs))
828 current->thread.esp0 = (unsigned long) fp;
829
830#ifdef DEBUG
831 printk ("*** Bus Error *** Format is %x\n", fp->ptregs.format);
832#endif
833
834 switch (fp->ptregs.format) {
835#if defined (CONFIG_M68060)
836 case 4: /* 68060 access error */
837 access_error060 (fp);
838 break;
839#endif
840#if defined (CONFIG_M68040)
841 case 0x7: /* 68040 access error */
842 access_error040 (fp);
843 break;
844#endif
845#if defined (CPU_M68020_OR_M68030)
846 case 0xa:
847 case 0xb:
848 bus_error030 (fp);
849 break;
850#endif
851 default:
852 die_if_kernel("bad frame format",&fp->ptregs,0);
853#ifdef DEBUG
854 printk("Unknown SIGSEGV - 4\n");
855#endif
856 force_sig(SIGSEGV, current);
857 }
858}
859
860
861static int kstack_depth_to_print = 48;
862
863void show_trace(unsigned long *stack)
864{
865 unsigned long *endstack;
866 unsigned long addr;
867 int i;
868
869 printk("Call Trace:");
870 addr = (unsigned long)stack + THREAD_SIZE - 1;
871 endstack = (unsigned long *)(addr & -THREAD_SIZE);
872 i = 0;
873 while (stack + 1 <= endstack) {
874 addr = *stack++;
875 /*
876 * If the address is either in the text segment of the
877 * kernel, or in the region which contains vmalloc'ed
878 * memory, it *may* be the address of a calling
879 * routine; if so, print it so that someone tracing
880 * down the cause of the crash will be able to figure
881 * out the call path that was taken.
882 */
883 if (__kernel_text_address(addr)) {
884#ifndef CONFIG_KALLSYMS
885 if (i % 5 == 0)
886 printk("\n ");
887#endif
888 printk(" [<%08lx>] %pS\n", addr, (void *)addr);
889 i++;
890 }
891 }
892 printk("\n");
893}
894
895void show_registers(struct pt_regs *regs)
896{
897 struct frame *fp = (struct frame *)regs;
898 mm_segment_t old_fs = get_fs();
899 u16 c, *cp;
900 unsigned long addr;
901 int i;
902
903 print_modules();
904 printk("PC: [<%08lx>] %pS\n", regs->pc, (void *)regs->pc);
905 printk("SR: %04x SP: %p a2: %08lx\n", regs->sr, regs, regs->a2);
906 printk("d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n",
907 regs->d0, regs->d1, regs->d2, regs->d3);
908 printk("d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n",
909 regs->d4, regs->d5, regs->a0, regs->a1);
910
911 printk("Process %s (pid: %d, task=%p)\n",
912 current->comm, task_pid_nr(current), current);
913 addr = (unsigned long)&fp->un;
914 printk("Frame format=%X ", regs->format);
915 switch (regs->format) {
916 case 0x2:
917 printk("instr addr=%08lx\n", fp->un.fmt2.iaddr);
918 addr += sizeof(fp->un.fmt2);
919 break;
920 case 0x3:
921 printk("eff addr=%08lx\n", fp->un.fmt3.effaddr);
922 addr += sizeof(fp->un.fmt3);
923 break;
924 case 0x4:
925 printk((CPU_IS_060 ? "fault addr=%08lx fslw=%08lx\n"
926 : "eff addr=%08lx pc=%08lx\n"),
927 fp->un.fmt4.effaddr, fp->un.fmt4.pc);
928 addr += sizeof(fp->un.fmt4);
929 break;
930 case 0x7:
931 printk("eff addr=%08lx ssw=%04x faddr=%08lx\n",
932 fp->un.fmt7.effaddr, fp->un.fmt7.ssw, fp->un.fmt7.faddr);
933 printk("wb 1 stat/addr/data: %04x %08lx %08lx\n",
934 fp->un.fmt7.wb1s, fp->un.fmt7.wb1a, fp->un.fmt7.wb1dpd0);
935 printk("wb 2 stat/addr/data: %04x %08lx %08lx\n",
936 fp->un.fmt7.wb2s, fp->un.fmt7.wb2a, fp->un.fmt7.wb2d);
937 printk("wb 3 stat/addr/data: %04x %08lx %08lx\n",
938 fp->un.fmt7.wb3s, fp->un.fmt7.wb3a, fp->un.fmt7.wb3d);
939 printk("push data: %08lx %08lx %08lx %08lx\n",
940 fp->un.fmt7.wb1dpd0, fp->un.fmt7.pd1, fp->un.fmt7.pd2,
941 fp->un.fmt7.pd3);
942 addr += sizeof(fp->un.fmt7);
943 break;
944 case 0x9:
945 printk("instr addr=%08lx\n", fp->un.fmt9.iaddr);
946 addr += sizeof(fp->un.fmt9);
947 break;
948 case 0xa:
949 printk("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n",
950 fp->un.fmta.ssw, fp->un.fmta.isc, fp->un.fmta.isb,
951 fp->un.fmta.daddr, fp->un.fmta.dobuf);
952 addr += sizeof(fp->un.fmta);
953 break;
954 case 0xb:
955 printk("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n",
956 fp->un.fmtb.ssw, fp->un.fmtb.isc, fp->un.fmtb.isb,
957 fp->un.fmtb.daddr, fp->un.fmtb.dobuf);
958 printk("baddr=%08lx dibuf=%08lx ver=%x\n",
959 fp->un.fmtb.baddr, fp->un.fmtb.dibuf, fp->un.fmtb.ver);
960 addr += sizeof(fp->un.fmtb);
961 break;
962 default:
963 printk("\n");
964 }
965 show_stack(NULL, (unsigned long *)addr);
966
967 printk("Code:");
968 set_fs(KERNEL_DS);
969 cp = (u16 *)regs->pc;
970 for (i = -8; i < 16; i++) {
971 if (get_user(c, cp + i) && i >= 0) {
972 printk(" Bad PC value.");
973 break;
974 }
975 printk(i ? " %04x" : " <%04x>", c);
976 }
977 set_fs(old_fs);
978 printk ("\n");
979}
980
981void show_stack(struct task_struct *task, unsigned long *stack)
982{
983 unsigned long *p;
984 unsigned long *endstack;
985 int i;
986
987 if (!stack) {
988 if (task)
989 stack = (unsigned long *)task->thread.esp0;
990 else
991 stack = (unsigned long *)&stack;
992 }
993 endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1) & -THREAD_SIZE);
994
995 printk("Stack from %08lx:", (unsigned long)stack);
996 p = stack;
997 for (i = 0; i < kstack_depth_to_print; i++) {
998 if (p + 1 > endstack)
999 break;
1000 if (i % 8 == 0)
1001 printk("\n ");
1002 printk(" %08lx", *p++);
1003 }
1004 printk("\n");
1005 show_trace(stack);
1006}
1007
1008/*
1009 * The architecture-independent backtrace generator
1010 */
1011void dump_stack(void)
1012{
1013 unsigned long stack;
1014
1015 show_trace(&stack);
1016}
1017
1018EXPORT_SYMBOL(dump_stack);
1019
1020void bad_super_trap (struct frame *fp)
1021{
1022 console_verbose();
1023 if (fp->ptregs.vector < 4 * ARRAY_SIZE(vec_names))
1024 printk ("*** %s *** FORMAT=%X\n",
1025 vec_names[(fp->ptregs.vector) >> 2],
1026 fp->ptregs.format);
1027 else
1028 printk ("*** Exception %d *** FORMAT=%X\n",
1029 (fp->ptregs.vector) >> 2,
1030 fp->ptregs.format);
1031 if (fp->ptregs.vector >> 2 == VEC_ADDRERR && CPU_IS_020_OR_030) {
1032 unsigned short ssw = fp->un.fmtb.ssw;
1033
1034 printk ("SSW=%#06x ", ssw);
1035
1036 if (ssw & RC)
1037 printk ("Pipe stage C instruction fault at %#010lx\n",
1038 (fp->ptregs.format) == 0xA ?
1039 fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2);
1040 if (ssw & RB)
1041 printk ("Pipe stage B instruction fault at %#010lx\n",
1042 (fp->ptregs.format) == 0xA ?
1043 fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
1044 if (ssw & DF)
1045 printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
1046 ssw & RW ? "read" : "write",
1047 fp->un.fmtb.daddr, space_names[ssw & DFC],
1048 fp->ptregs.pc);
1049 }
1050 printk ("Current process id is %d\n", task_pid_nr(current));
1051 die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0);
1052}
1053
1054asmlinkage void trap_c(struct frame *fp)
1055{
1056 int sig;
1057 siginfo_t info;
1058
1059 if (fp->ptregs.sr & PS_S) {
1060 if (fp->ptregs.vector == VEC_TRACE << 2) {
1061 /* traced a trapping instruction on a 68020/30,
1062 * real exception will be executed afterwards.
1063 */
1064 } else if (!handle_kernel_fault(&fp->ptregs))
1065 bad_super_trap(fp);
1066 return;
1067 }
1068
1069 /* send the appropriate signal to the user program */
1070 switch ((fp->ptregs.vector) >> 2) {
1071 case VEC_ADDRERR:
1072 info.si_code = BUS_ADRALN;
1073 sig = SIGBUS;
1074 break;
1075 case VEC_ILLEGAL:
1076 case VEC_LINE10:
1077 case VEC_LINE11:
1078 info.si_code = ILL_ILLOPC;
1079 sig = SIGILL;
1080 break;
1081 case VEC_PRIV:
1082 info.si_code = ILL_PRVOPC;
1083 sig = SIGILL;
1084 break;
1085 case VEC_COPROC:
1086 info.si_code = ILL_COPROC;
1087 sig = SIGILL;
1088 break;
1089 case VEC_TRAP1:
1090 case VEC_TRAP2:
1091 case VEC_TRAP3:
1092 case VEC_TRAP4:
1093 case VEC_TRAP5:
1094 case VEC_TRAP6:
1095 case VEC_TRAP7:
1096 case VEC_TRAP8:
1097 case VEC_TRAP9:
1098 case VEC_TRAP10:
1099 case VEC_TRAP11:
1100 case VEC_TRAP12:
1101 case VEC_TRAP13:
1102 case VEC_TRAP14:
1103 info.si_code = ILL_ILLTRP;
1104 sig = SIGILL;
1105 break;
1106 case VEC_FPBRUC:
1107 case VEC_FPOE:
1108 case VEC_FPNAN:
1109 info.si_code = FPE_FLTINV;
1110 sig = SIGFPE;
1111 break;
1112 case VEC_FPIR:
1113 info.si_code = FPE_FLTRES;
1114 sig = SIGFPE;
1115 break;
1116 case VEC_FPDIVZ:
1117 info.si_code = FPE_FLTDIV;
1118 sig = SIGFPE;
1119 break;
1120 case VEC_FPUNDER:
1121 info.si_code = FPE_FLTUND;
1122 sig = SIGFPE;
1123 break;
1124 case VEC_FPOVER:
1125 info.si_code = FPE_FLTOVF;
1126 sig = SIGFPE;
1127 break;
1128 case VEC_ZERODIV:
1129 info.si_code = FPE_INTDIV;
1130 sig = SIGFPE;
1131 break;
1132 case VEC_CHK:
1133 case VEC_TRAP:
1134 info.si_code = FPE_INTOVF;
1135 sig = SIGFPE;
1136 break;
1137 case VEC_TRACE: /* ptrace single step */
1138 info.si_code = TRAP_TRACE;
1139 sig = SIGTRAP;
1140 break;
1141 case VEC_TRAP15: /* breakpoint */
1142 info.si_code = TRAP_BRKPT;
1143 sig = SIGTRAP;
1144 break;
1145 default:
1146 info.si_code = ILL_ILLOPC;
1147 sig = SIGILL;
1148 break;
1149 }
1150 info.si_signo = sig;
1151 info.si_errno = 0;
1152 switch (fp->ptregs.format) {
1153 default:
1154 info.si_addr = (void *) fp->ptregs.pc;
1155 break;
1156 case 2:
1157 info.si_addr = (void *) fp->un.fmt2.iaddr;
1158 break;
1159 case 7:
1160 info.si_addr = (void *) fp->un.fmt7.effaddr;
1161 break;
1162 case 9:
1163 info.si_addr = (void *) fp->un.fmt9.iaddr;
1164 break;
1165 case 10:
1166 info.si_addr = (void *) fp->un.fmta.daddr;
1167 break;
1168 case 11:
1169 info.si_addr = (void *) fp->un.fmtb.daddr;
1170 break;
1171 }
1172 force_sig_info (sig, &info, current);
1173}
1174
1175void die_if_kernel (char *str, struct pt_regs *fp, int nr)
1176{
1177 if (!(fp->sr & PS_S))
1178 return;
1179
1180 console_verbose();
1181 printk("%s: %08x\n",str,nr);
1182 show_registers(fp);
1183 add_taint(TAINT_DIE);
1184 do_exit(SIGSEGV);
1185}
1186
1187/*
1188 * This function is called if an error occur while accessing
1189 * user-space from the fpsp040 code.
1190 */
1191asmlinkage void fpsp040_die(void)
1192{
1193 do_exit(SIGSEGV);
1194}
1195
1196#ifdef CONFIG_M68KFPU_EMU
1197asmlinkage void fpemu_signal(int signal, int code, void *addr)
1198{
1199 siginfo_t info;
1200
1201 info.si_signo = signal;
1202 info.si_errno = 0;
1203 info.si_code = code;
1204 info.si_addr = addr;
1205 force_sig_info(signal, &info, current);
1206}
1207#endif
diff --git a/arch/m68k/kernel/traps_no.c b/arch/m68k/kernel/traps_no.c
new file mode 100644
index 00000000000..e67b8c80695
--- /dev/null
+++ b/arch/m68k/kernel/traps_no.c
@@ -0,0 +1,361 @@
1/*
2 * linux/arch/m68knommu/kernel/traps.c
3 *
4 * Copyright (C) 1993, 1994 by Hamish Macdonald
5 *
6 * 68040 fixes by Michael Rausch
7 * 68040 fixes by Martin Apel
8 * 68060 fixes by Roman Hodek
9 * 68060 fixes by Jesper Skov
10 *
11 * This file is subject to the terms and conditions of the GNU General Public
12 * License. See the file COPYING in the main directory of this archive
13 * for more details.
14 */
15
16/*
17 * Sets up all exception vectors
18 */
19#include <linux/sched.h>
20#include <linux/signal.h>
21#include <linux/kernel.h>
22#include <linux/mm.h>
23#include <linux/module.h>
24#include <linux/types.h>
25#include <linux/user.h>
26#include <linux/string.h>
27#include <linux/linkage.h>
28#include <linux/init.h>
29#include <linux/ptrace.h>
30#include <linux/kallsyms.h>
31
32#include <asm/setup.h>
33#include <asm/fpu.h>
34#include <asm/system.h>
35#include <asm/uaccess.h>
36#include <asm/traps.h>
37#include <asm/pgtable.h>
38#include <asm/machdep.h>
39#include <asm/siginfo.h>
40
41static char const * const vec_names[] = {
42 "RESET SP", "RESET PC", "BUS ERROR", "ADDRESS ERROR",
43 "ILLEGAL INSTRUCTION", "ZERO DIVIDE", "CHK", "TRAPcc",
44 "PRIVILEGE VIOLATION", "TRACE", "LINE 1010", "LINE 1111",
45 "UNASSIGNED RESERVED 12", "COPROCESSOR PROTOCOL VIOLATION",
46 "FORMAT ERROR", "UNINITIALIZED INTERRUPT",
47 "UNASSIGNED RESERVED 16", "UNASSIGNED RESERVED 17",
48 "UNASSIGNED RESERVED 18", "UNASSIGNED RESERVED 19",
49 "UNASSIGNED RESERVED 20", "UNASSIGNED RESERVED 21",
50 "UNASSIGNED RESERVED 22", "UNASSIGNED RESERVED 23",
51 "SPURIOUS INTERRUPT", "LEVEL 1 INT", "LEVEL 2 INT", "LEVEL 3 INT",
52 "LEVEL 4 INT", "LEVEL 5 INT", "LEVEL 6 INT", "LEVEL 7 INT",
53 "SYSCALL", "TRAP #1", "TRAP #2", "TRAP #3",
54 "TRAP #4", "TRAP #5", "TRAP #6", "TRAP #7",
55 "TRAP #8", "TRAP #9", "TRAP #10", "TRAP #11",
56 "TRAP #12", "TRAP #13", "TRAP #14", "TRAP #15",
57 "FPCP BSUN", "FPCP INEXACT", "FPCP DIV BY 0", "FPCP UNDERFLOW",
58 "FPCP OPERAND ERROR", "FPCP OVERFLOW", "FPCP SNAN",
59 "FPCP UNSUPPORTED OPERATION",
60 "MMU CONFIGURATION ERROR"
61};
62
63void die_if_kernel(char *str, struct pt_regs *fp, int nr)
64{
65 if (!(fp->sr & PS_S))
66 return;
67
68 console_verbose();
69 printk(KERN_EMERG "%s: %08x\n",str,nr);
70 printk(KERN_EMERG "PC: [<%08lx>]\nSR: %04x SP: %p a2: %08lx\n",
71 fp->pc, fp->sr, fp, fp->a2);
72 printk(KERN_EMERG "d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n",
73 fp->d0, fp->d1, fp->d2, fp->d3);
74 printk(KERN_EMERG "d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n",
75 fp->d4, fp->d5, fp->a0, fp->a1);
76
77 printk(KERN_EMERG "Process %s (pid: %d, stackpage=%08lx)\n",
78 current->comm, current->pid, PAGE_SIZE+(unsigned long)current);
79 show_stack(NULL, (unsigned long *)(fp + 1));
80 add_taint(TAINT_DIE);
81 do_exit(SIGSEGV);
82}
83
84asmlinkage void buserr_c(struct frame *fp)
85{
86 /* Only set esp0 if coming from user mode */
87 if (user_mode(&fp->ptregs))
88 current->thread.esp0 = (unsigned long) fp;
89
90#if defined(DEBUG)
91 printk (KERN_DEBUG "*** Bus Error *** Format is %x\n", fp->ptregs.format);
92#endif
93
94 die_if_kernel("bad frame format",&fp->ptregs,0);
95#if defined(DEBUG)
96 printk(KERN_DEBUG "Unknown SIGSEGV - 4\n");
97#endif
98 force_sig(SIGSEGV, current);
99}
100
101static void print_this_address(unsigned long addr, int i)
102{
103#ifdef CONFIG_KALLSYMS
104 printk(KERN_EMERG " [%08lx] ", addr);
105 print_symbol(KERN_CONT "%s\n", addr);
106#else
107 if (i % 5)
108 printk(KERN_CONT " [%08lx] ", addr);
109 else
110 printk(KERN_EMERG " [%08lx] ", addr);
111 i++;
112#endif
113}
114
115int kstack_depth_to_print = 48;
116
117static void __show_stack(struct task_struct *task, unsigned long *stack)
118{
119 unsigned long *endstack, addr;
120#ifdef CONFIG_FRAME_POINTER
121 unsigned long *last_stack;
122#endif
123 int i;
124
125 if (!stack)
126 stack = (unsigned long *)task->thread.ksp;
127
128 addr = (unsigned long) stack;
129 endstack = (unsigned long *) PAGE_ALIGN(addr);
130
131 printk(KERN_EMERG "Stack from %08lx:", (unsigned long)stack);
132 for (i = 0; i < kstack_depth_to_print; i++) {
133 if (stack + 1 + i > endstack)
134 break;
135 if (i % 8 == 0)
136 printk(KERN_EMERG " ");
137 printk(KERN_CONT " %08lx", *(stack + i));
138 }
139 printk("\n");
140 i = 0;
141
142#ifdef CONFIG_FRAME_POINTER
143 printk(KERN_EMERG "Call Trace:\n");
144
145 last_stack = stack - 1;
146 while (stack <= endstack && stack > last_stack) {
147
148 addr = *(stack + 1);
149 print_this_address(addr, i);
150 i++;
151
152 last_stack = stack;
153 stack = (unsigned long *)*stack;
154 }
155 printk("\n");
156#else
157 printk(KERN_EMERG "Call Trace with CONFIG_FRAME_POINTER disabled:\n");
158 while (stack <= endstack) {
159 addr = *stack++;
160 /*
161 * If the address is either in the text segment of the kernel,
162 * or in a region which is occupied by a module then it *may*
163 * be the address of a calling routine; if so, print it so that
164 * someone tracing down the cause of the crash will be able to
165 * figure out the call path that was taken.
166 */
167 if (__kernel_text_address(addr)) {
168 print_this_address(addr, i);
169 i++;
170 }
171 }
172 printk(KERN_CONT "\n");
173#endif
174}
175
176void bad_super_trap(struct frame *fp)
177{
178 int vector = (fp->ptregs.vector >> 2) & 0xff;
179
180 console_verbose();
181 if (vector < ARRAY_SIZE(vec_names))
182 printk (KERN_WARNING "*** %s *** FORMAT=%X\n",
183 vec_names[vector],
184 fp->ptregs.format);
185 else
186 printk (KERN_WARNING "*** Exception %d *** FORMAT=%X\n",
187 vector,
188 fp->ptregs.format);
189 printk (KERN_WARNING "Current process id is %d\n", current->pid);
190 die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0);
191}
192
193asmlinkage void trap_c(struct frame *fp)
194{
195 int sig;
196 int vector = (fp->ptregs.vector >> 2) & 0xff;
197 siginfo_t info;
198
199 if (fp->ptregs.sr & PS_S) {
200 if (vector == VEC_TRACE) {
201 /* traced a trapping instruction */
202 } else
203 bad_super_trap(fp);
204 return;
205 }
206
207 /* send the appropriate signal to the user program */
208 switch (vector) {
209 case VEC_ADDRERR:
210 info.si_code = BUS_ADRALN;
211 sig = SIGBUS;
212 break;
213 case VEC_ILLEGAL:
214 case VEC_LINE10:
215 case VEC_LINE11:
216 info.si_code = ILL_ILLOPC;
217 sig = SIGILL;
218 break;
219 case VEC_PRIV:
220 info.si_code = ILL_PRVOPC;
221 sig = SIGILL;
222 break;
223 case VEC_COPROC:
224 info.si_code = ILL_COPROC;
225 sig = SIGILL;
226 break;
227 case VEC_TRAP1: /* gdbserver breakpoint */
228 fp->ptregs.pc -= 2;
229 info.si_code = TRAP_TRACE;
230 sig = SIGTRAP;
231 break;
232 case VEC_TRAP2:
233 case VEC_TRAP3:
234 case VEC_TRAP4:
235 case VEC_TRAP5:
236 case VEC_TRAP6:
237 case VEC_TRAP7:
238 case VEC_TRAP8:
239 case VEC_TRAP9:
240 case VEC_TRAP10:
241 case VEC_TRAP11:
242 case VEC_TRAP12:
243 case VEC_TRAP13:
244 case VEC_TRAP14:
245 info.si_code = ILL_ILLTRP;
246 sig = SIGILL;
247 break;
248 case VEC_FPBRUC:
249 case VEC_FPOE:
250 case VEC_FPNAN:
251 info.si_code = FPE_FLTINV;
252 sig = SIGFPE;
253 break;
254 case VEC_FPIR:
255 info.si_code = FPE_FLTRES;
256 sig = SIGFPE;
257 break;
258 case VEC_FPDIVZ:
259 info.si_code = FPE_FLTDIV;
260 sig = SIGFPE;
261 break;
262 case VEC_FPUNDER:
263 info.si_code = FPE_FLTUND;
264 sig = SIGFPE;
265 break;
266 case VEC_FPOVER:
267 info.si_code = FPE_FLTOVF;
268 sig = SIGFPE;
269 break;
270 case VEC_ZERODIV:
271 info.si_code = FPE_INTDIV;
272 sig = SIGFPE;
273 break;
274 case VEC_CHK:
275 case VEC_TRAP:
276 info.si_code = FPE_INTOVF;
277 sig = SIGFPE;
278 break;
279 case VEC_TRACE: /* ptrace single step */
280 info.si_code = TRAP_TRACE;
281 sig = SIGTRAP;
282 break;
283 case VEC_TRAP15: /* breakpoint */
284 info.si_code = TRAP_BRKPT;
285 sig = SIGTRAP;
286 break;
287 default:
288 info.si_code = ILL_ILLOPC;
289 sig = SIGILL;
290 break;
291 }
292 info.si_signo = sig;
293 info.si_errno = 0;
294 switch (fp->ptregs.format) {
295 default:
296 info.si_addr = (void *) fp->ptregs.pc;
297 break;
298 case 2:
299 info.si_addr = (void *) fp->un.fmt2.iaddr;
300 break;
301 case 7:
302 info.si_addr = (void *) fp->un.fmt7.effaddr;
303 break;
304 case 9:
305 info.si_addr = (void *) fp->un.fmt9.iaddr;
306 break;
307 case 10:
308 info.si_addr = (void *) fp->un.fmta.daddr;
309 break;
310 case 11:
311 info.si_addr = (void *) fp->un.fmtb.daddr;
312 break;
313 }
314 force_sig_info (sig, &info, current);
315}
316
317asmlinkage void set_esp0(unsigned long ssp)
318{
319 current->thread.esp0 = ssp;
320}
321
322/*
323 * The architecture-independent backtrace generator
324 */
325void dump_stack(void)
326{
327 /*
328 * We need frame pointers for this little trick, which works as follows:
329 *
330 * +------------+ 0x00
331 * | Next SP | -> 0x0c
332 * +------------+ 0x04
333 * | Caller |
334 * +------------+ 0x08
335 * | Local vars | -> our stack var
336 * +------------+ 0x0c
337 * | Next SP | -> 0x18, that is what we pass to show_stack()
338 * +------------+ 0x10
339 * | Caller |
340 * +------------+ 0x14
341 * | Local vars |
342 * +------------+ 0x18
343 * | ... |
344 * +------------+
345 */
346
347 unsigned long *stack;
348
349 stack = (unsigned long *)&stack;
350 stack++;
351 __show_stack(current, stack);
352}
353EXPORT_SYMBOL(dump_stack);
354
355void show_stack(struct task_struct *task, unsigned long *stack)
356{
357 if (!stack && !task)
358 dump_stack();
359 else
360 __show_stack(task, stack);
361}
diff --git a/arch/m68k/kernel/vmlinux.lds_mm.S b/arch/m68k/kernel/vmlinux.lds_mm.S
new file mode 100644
index 00000000000..99ba315bd0a
--- /dev/null
+++ b/arch/m68k/kernel/vmlinux.lds_mm.S
@@ -0,0 +1,10 @@
1PHDRS
2{
3 text PT_LOAD FILEHDR PHDRS FLAGS (7);
4 data PT_LOAD FLAGS (7);
5}
6#ifdef CONFIG_SUN3
7#include "vmlinux-sun3.lds"
8#else
9#include "vmlinux-std.lds"
10#endif
diff --git a/arch/m68k/kernel/vmlinux.lds_no.S b/arch/m68k/kernel/vmlinux.lds_no.S
new file mode 100644
index 00000000000..7dc4087a954
--- /dev/null
+++ b/arch/m68k/kernel/vmlinux.lds_no.S
@@ -0,0 +1,188 @@
1/*
2 * vmlinux.lds.S -- master linker script for m68knommu arch
3 *
4 * (C) Copyright 2002-2006, Greg Ungerer <gerg@snapgear.com>
5 *
6 * This linker script is equipped to build either ROM loaded or RAM
7 * run kernels.
8 */
9
10#include <asm-generic/vmlinux.lds.h>
11#include <asm/page.h>
12#include <asm/thread_info.h>
13
14#if defined(CONFIG_RAMKERNEL)
15#define RAM_START CONFIG_KERNELBASE
16#define RAM_LENGTH (CONFIG_RAMBASE + CONFIG_RAMSIZE - CONFIG_KERNELBASE)
17#define TEXT ram
18#define DATA ram
19#define INIT ram
20#define BSSS ram
21#endif
22#if defined(CONFIG_ROMKERNEL) || defined(CONFIG_HIMEMKERNEL)
23#define RAM_START CONFIG_RAMBASE
24#define RAM_LENGTH CONFIG_RAMSIZE
25#define ROMVEC_START CONFIG_ROMVEC
26#define ROMVEC_LENGTH CONFIG_ROMVECSIZE
27#define ROM_START CONFIG_ROMSTART
28#define ROM_LENGTH CONFIG_ROMSIZE
29#define TEXT rom
30#define DATA ram
31#define INIT ram
32#define BSSS ram
33#endif
34
35#ifndef DATA_ADDR
36#define DATA_ADDR
37#endif
38
39
40OUTPUT_ARCH(m68k)
41ENTRY(_start)
42
43MEMORY {
44 ram : ORIGIN = RAM_START, LENGTH = RAM_LENGTH
45#ifdef ROM_START
46 romvec : ORIGIN = ROMVEC_START, LENGTH = ROMVEC_LENGTH
47 rom : ORIGIN = ROM_START, LENGTH = ROM_LENGTH
48#endif
49}
50
51jiffies = jiffies_64 + 4;
52
53SECTIONS {
54
55#ifdef ROMVEC_START
56 . = ROMVEC_START ;
57 .romvec : {
58 __rom_start = . ;
59 _romvec = .;
60 *(.data..initvect)
61 } > romvec
62#endif
63
64 .text : {
65 _text = .;
66 _stext = . ;
67 HEAD_TEXT
68 TEXT_TEXT
69 SCHED_TEXT
70 LOCK_TEXT
71 *(.text..lock)
72
73 . = ALIGN(16); /* Exception table */
74 __start___ex_table = .;
75 *(__ex_table)
76 __stop___ex_table = .;
77
78 *(.rodata) *(.rodata.*)
79 *(__vermagic) /* Kernel version magic */
80 *(__markers_strings)
81 *(.rodata1)
82 *(.rodata.str1.1)
83
84 /* Kernel symbol table: Normal symbols */
85 . = ALIGN(4);
86 __start___ksymtab = .;
87 *(SORT(___ksymtab+*))
88 __stop___ksymtab = .;
89
90 /* Kernel symbol table: GPL-only symbols */
91 __start___ksymtab_gpl = .;
92 *(SORT(___ksymtab_gpl+*))
93 __stop___ksymtab_gpl = .;
94
95 /* Kernel symbol table: Normal unused symbols */
96 __start___ksymtab_unused = .;
97 *(SORT(___ksymtab_unused+*))
98 __stop___ksymtab_unused = .;
99
100 /* Kernel symbol table: GPL-only unused symbols */
101 __start___ksymtab_unused_gpl = .;
102 *(SORT(___ksymtab_unused_gpl+*))
103 __stop___ksymtab_unused_gpl = .;
104
105 /* Kernel symbol table: GPL-future symbols */
106 __start___ksymtab_gpl_future = .;
107 *(SORT(___ksymtab_gpl_future+*))
108 __stop___ksymtab_gpl_future = .;
109
110 /* Kernel symbol table: Normal symbols */
111 __start___kcrctab = .;
112 *(SORT(___kcrctab+*))
113 __stop___kcrctab = .;
114
115 /* Kernel symbol table: GPL-only symbols */
116 __start___kcrctab_gpl = .;
117 *(SORT(___kcrctab_gpl+*))
118 __stop___kcrctab_gpl = .;
119
120 /* Kernel symbol table: Normal unused symbols */
121 __start___kcrctab_unused = .;
122 *(SORT(___kcrctab_unused+*))
123 __stop___kcrctab_unused = .;
124
125 /* Kernel symbol table: GPL-only unused symbols */
126 __start___kcrctab_unused_gpl = .;
127 *(SORT(___kcrctab_unused_gpl+*))
128 __stop___kcrctab_unused_gpl = .;
129
130 /* Kernel symbol table: GPL-future symbols */
131 __start___kcrctab_gpl_future = .;
132 *(SORT(___kcrctab_gpl_future+*))
133 __stop___kcrctab_gpl_future = .;
134
135 /* Kernel symbol table: strings */
136 *(__ksymtab_strings)
137
138 /* Built-in module parameters */
139 . = ALIGN(4) ;
140 __start___param = .;
141 *(__param)
142 __stop___param = .;
143
144 /* Built-in module versions */
145 . = ALIGN(4) ;
146 __start___modver = .;
147 *(__modver)
148 __stop___modver = .;
149
150 . = ALIGN(4) ;
151 _etext = . ;
152 } > TEXT
153
154 .data DATA_ADDR : {
155 . = ALIGN(4);
156 _sdata = . ;
157 DATA_DATA
158 CACHELINE_ALIGNED_DATA(32)
159 PAGE_ALIGNED_DATA(PAGE_SIZE)
160 *(.data..shared_aligned)
161 INIT_TASK_DATA(THREAD_SIZE)
162 _edata = . ;
163 } > DATA
164
165 .init.text : {
166 . = ALIGN(PAGE_SIZE);
167 __init_begin = .;
168 } > INIT
169 INIT_TEXT_SECTION(PAGE_SIZE) > INIT
170 INIT_DATA_SECTION(16) > INIT
171 .init.data : {
172 . = ALIGN(PAGE_SIZE);
173 __init_end = .;
174 } > INIT
175
176 .bss : {
177 . = ALIGN(4);
178 _sbss = . ;
179 *(.bss)
180 *(COMMON)
181 . = ALIGN(4) ;
182 _ebss = . ;
183 _end = . ;
184 } > BSSS
185
186 DISCARDS
187}
188