aboutsummaryrefslogtreecommitdiffstats
path: root/arch/m68k/kernel
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
commit8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch)
treea8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /arch/m68k/kernel
parent406089d01562f1e2bf9f089fd7637009ebaad589 (diff)
Patched in Tegra support.
Diffstat (limited to 'arch/m68k/kernel')
-rw-r--r--arch/m68k/kernel/Makefile29
-rw-r--r--arch/m68k/kernel/asm-offsets.c3
-rw-r--r--arch/m68k/kernel/dma.c168
-rw-r--r--arch/m68k/kernel/entry.S456
-rw-r--r--arch/m68k/kernel/head.S117
-rw-r--r--arch/m68k/kernel/ints.c324
-rw-r--r--arch/m68k/kernel/irq.c1
-rw-r--r--arch/m68k/kernel/m68k_ksyms.c2
-rw-r--r--arch/m68k/kernel/module.c4
-rw-r--r--arch/m68k/kernel/pcibios.c104
-rw-r--r--arch/m68k/kernel/process.c302
-rw-r--r--arch/m68k/kernel/ptrace.c305
-rw-r--r--arch/m68k/kernel/setup_mm.c22
-rw-r--r--arch/m68k/kernel/setup_no.c19
-rw-r--r--arch/m68k/kernel/signal.c1184
-rw-r--r--arch/m68k/kernel/sys_m68k.c25
-rw-r--r--arch/m68k/kernel/syscalltable.S9
-rw-r--r--arch/m68k/kernel/time.c113
-rw-r--r--arch/m68k/kernel/traps.c1209
-rw-r--r--arch/m68k/kernel/vectors.c144
-rw-r--r--arch/m68k/kernel/vmlinux.lds.S15
21 files changed, 422 insertions, 4133 deletions
diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile
index 068ad49210d..c482ebc9dd5 100644
--- a/arch/m68k/kernel/Makefile
+++ b/arch/m68k/kernel/Makefile
@@ -1,26 +1,5 @@
1# 1ifdef CONFIG_MMU
2# Makefile for the linux kernel. 2include arch/m68k/kernel/Makefile_mm
3# 3else
4 4include arch/m68k/kernel/Makefile_no
5extra-$(CONFIG_AMIGA) := head.o
6extra-$(CONFIG_ATARI) := head.o
7extra-$(CONFIG_MAC) := head.o
8extra-$(CONFIG_APOLLO) := head.o
9extra-$(CONFIG_VME) := head.o
10extra-$(CONFIG_HP300) := head.o
11extra-$(CONFIG_Q40) := head.o
12extra-$(CONFIG_SUN3X) := head.o
13extra-$(CONFIG_SUN3) := sun3-head.o
14extra-y += vmlinux.lds
15
16obj-y := entry.o irq.o m68k_ksyms.o module.o process.o ptrace.o
17obj-y += setup.o signal.o sys_m68k.o syscalltable.o time.o traps.o
18
19obj-$(CONFIG_MMU_MOTOROLA) += ints.o vectors.o
20obj-$(CONFIG_MMU_SUN3) += ints.o vectors.o
21obj-$(CONFIG_PCI) += pcibios.o
22
23ifndef CONFIG_MMU_SUN3
24obj-y += dma.o
25endif 5endif
26
diff --git a/arch/m68k/kernel/asm-offsets.c b/arch/m68k/kernel/asm-offsets.c
index a972b00cd77..983fed9d469 100644
--- a/arch/m68k/kernel/asm-offsets.c
+++ b/arch/m68k/kernel/asm-offsets.c
@@ -24,7 +24,8 @@ int main(void)
24 /* offsets into the task struct */ 24 /* offsets into the task struct */
25 DEFINE(TASK_THREAD, offsetof(struct task_struct, thread)); 25 DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
26 DEFINE(TASK_MM, offsetof(struct task_struct, mm)); 26 DEFINE(TASK_MM, offsetof(struct task_struct, mm));
27 DEFINE(TASK_STACK, offsetof(struct task_struct, stack)); 27 DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info));
28 DEFINE(TASK_TINFO, offsetof(struct task_struct, thread.info));
28 29
29 /* offsets into the thread struct */ 30 /* offsets into the thread struct */
30 DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); 31 DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c
index e546a5534dd..90e8cb726c8 100644
--- a/arch/m68k/kernel/dma.c
+++ b/arch/m68k/kernel/dma.c
@@ -1,165 +1,5 @@
1/* 1#ifdef CONFIG_MMU
2 * This file is subject to the terms and conditions of the GNU General Public 2#include "dma_mm.c"
3 * License. See the file COPYING in the main directory of this archive
4 * for more details.
5 */
6
7#undef DEBUG
8
9#include <linux/dma-mapping.h>
10#include <linux/device.h>
11#include <linux/kernel.h>
12#include <linux/scatterlist.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
15#include <linux/export.h>
16
17#include <asm/pgalloc.h>
18
19#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
20
21void *dma_alloc_coherent(struct device *dev, size_t size,
22 dma_addr_t *handle, gfp_t flag)
23{
24 struct page *page, **map;
25 pgprot_t pgprot;
26 void *addr;
27 int i, order;
28
29 pr_debug("dma_alloc_coherent: %d,%x\n", size, flag);
30
31 size = PAGE_ALIGN(size);
32 order = get_order(size);
33
34 page = alloc_pages(flag, order);
35 if (!page)
36 return NULL;
37
38 *handle = page_to_phys(page);
39 map = kmalloc(sizeof(struct page *) << order, flag & ~__GFP_DMA);
40 if (!map) {
41 __free_pages(page, order);
42 return NULL;
43 }
44 split_page(page, order);
45
46 order = 1 << order;
47 size >>= PAGE_SHIFT;
48 map[0] = page;
49 for (i = 1; i < size; i++)
50 map[i] = page + i;
51 for (; i < order; i++)
52 __free_page(page + i);
53 pgprot = __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
54 if (CPU_IS_040_OR_060)
55 pgprot_val(pgprot) |= _PAGE_GLOBAL040 | _PAGE_NOCACHE_S;
56 else
57 pgprot_val(pgprot) |= _PAGE_NOCACHE030;
58 addr = vmap(map, size, VM_MAP, pgprot);
59 kfree(map);
60
61 return addr;
62}
63
64void dma_free_coherent(struct device *dev, size_t size,
65 void *addr, dma_addr_t handle)
66{
67 pr_debug("dma_free_coherent: %p, %x\n", addr, handle);
68 vfree(addr);
69}
70
71#else 3#else
72 4#include "dma_no.c"
73#include <asm/cacheflush.h> 5#endif
74
75void *dma_alloc_coherent(struct device *dev, size_t size,
76 dma_addr_t *dma_handle, gfp_t gfp)
77{
78 void *ret;
79 /* ignore region specifiers */
80 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
81
82 if (dev == NULL || (*dev->dma_mask < 0xffffffff))
83 gfp |= GFP_DMA;
84 ret = (void *)__get_free_pages(gfp, get_order(size));
85
86 if (ret != NULL) {
87 memset(ret, 0, size);
88 *dma_handle = virt_to_phys(ret);
89 }
90 return ret;
91}
92
93void dma_free_coherent(struct device *dev, size_t size,
94 void *vaddr, dma_addr_t dma_handle)
95{
96 free_pages((unsigned long)vaddr, get_order(size));
97}
98
99#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
100
101EXPORT_SYMBOL(dma_alloc_coherent);
102EXPORT_SYMBOL(dma_free_coherent);
103
104void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
105 size_t size, enum dma_data_direction dir)
106{
107 switch (dir) {
108 case DMA_BIDIRECTIONAL:
109 case DMA_TO_DEVICE:
110 cache_push(handle, size);
111 break;
112 case DMA_FROM_DEVICE:
113 cache_clear(handle, size);
114 break;
115 default:
116 if (printk_ratelimit())
117 printk("dma_sync_single_for_device: unsupported dir %u\n", dir);
118 break;
119 }
120}
121EXPORT_SYMBOL(dma_sync_single_for_device);
122
123void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
124 enum dma_data_direction dir)
125{
126 int i;
127
128 for (i = 0; i < nents; sg++, i++)
129 dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir);
130}
131EXPORT_SYMBOL(dma_sync_sg_for_device);
132
133dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size,
134 enum dma_data_direction dir)
135{
136 dma_addr_t handle = virt_to_bus(addr);
137
138 dma_sync_single_for_device(dev, handle, size, dir);
139 return handle;
140}
141EXPORT_SYMBOL(dma_map_single);
142
143dma_addr_t dma_map_page(struct device *dev, struct page *page,
144 unsigned long offset, size_t size,
145 enum dma_data_direction dir)
146{
147 dma_addr_t handle = page_to_phys(page) + offset;
148
149 dma_sync_single_for_device(dev, handle, size, dir);
150 return handle;
151}
152EXPORT_SYMBOL(dma_map_page);
153
154int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
155 enum dma_data_direction dir)
156{
157 int i;
158
159 for (i = 0; i < nents; sg++, i++) {
160 sg->dma_address = sg_phys(sg);
161 dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir);
162 }
163 return nents;
164}
165EXPORT_SYMBOL(dma_map_sg);
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
index a78f5649e8d..081cf96f243 100644
--- a/arch/m68k/kernel/entry.S
+++ b/arch/m68k/kernel/entry.S
@@ -1,455 +1,5 @@
1/* -*- mode: asm -*- 1#ifdef CONFIG_MMU
2 * 2#include "entry_mm.S"
3 * linux/arch/m68k/kernel/entry.S
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file README.legal in the main directory of this archive
9 * for more details.
10 *
11 * Linux/m68k support by Hamish Macdonald
12 *
13 * 68060 fixes by Jesper Skov
14 *
15 */
16
17/*
18 * entry.S contains the system-call and fault low-level handling routines.
19 * This also contains the timer-interrupt handler, as well as all interrupts
20 * and faults that can result in a task-switch.
21 *
22 * NOTE: This code handles signal-recognition, which happens every time
23 * after a timer-interrupt and after each system call.
24 *
25 */
26
27/*
28 * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so
29 * all pointers that used to be 'current' are now entry
30 * number 0 in the 'current_set' list.
31 *
32 * 6/05/00 RZ: addedd writeback completion after return from sighandler
33 * for 68040
34 */
35
36#include <linux/linkage.h>
37#include <asm/errno.h>
38#include <asm/setup.h>
39#include <asm/segment.h>
40#include <asm/traps.h>
41#include <asm/unistd.h>
42#include <asm/asm-offsets.h>
43#include <asm/entry.h>
44
45.globl system_call, buserr, trap, resume
46.globl sys_call_table
47.globl __sys_fork, __sys_clone, __sys_vfork
48.globl ret_from_interrupt, bad_interrupt
49.globl auto_irqhandler_fixup
50.globl user_irqvec_fixup
51
52.text
53ENTRY(__sys_fork)
54 SAVE_SWITCH_STACK
55 jbsr sys_fork
56 lea %sp@(24),%sp
57 rts
58
59ENTRY(__sys_clone)
60 SAVE_SWITCH_STACK
61 pea %sp@(SWITCH_STACK_SIZE)
62 jbsr m68k_clone
63 lea %sp@(28),%sp
64 rts
65
66ENTRY(__sys_vfork)
67 SAVE_SWITCH_STACK
68 jbsr sys_vfork
69 lea %sp@(24),%sp
70 rts
71
72ENTRY(sys_sigreturn)
73 SAVE_SWITCH_STACK
74 jbsr do_sigreturn
75 RESTORE_SWITCH_STACK
76 rts
77
78ENTRY(sys_rt_sigreturn)
79 SAVE_SWITCH_STACK
80 jbsr do_rt_sigreturn
81 RESTORE_SWITCH_STACK
82 rts
83
84ENTRY(buserr)
85 SAVE_ALL_INT
86 GET_CURRENT(%d0)
87 movel %sp,%sp@- | stack frame pointer argument
88 jbsr buserr_c
89 addql #4,%sp
90 jra ret_from_exception
91
92ENTRY(trap)
93 SAVE_ALL_INT
94 GET_CURRENT(%d0)
95 movel %sp,%sp@- | stack frame pointer argument
96 jbsr trap_c
97 addql #4,%sp
98 jra ret_from_exception
99
100 | After a fork we jump here directly from resume,
101 | so that %d1 contains the previous task
102 | schedule_tail now used regardless of CONFIG_SMP
103ENTRY(ret_from_fork)
104 movel %d1,%sp@-
105 jsr schedule_tail
106 addql #4,%sp
107 jra ret_from_exception
108
109ENTRY(ret_from_kernel_thread)
110 | a3 contains the kernel thread payload, d7 - its argument
111 movel %d1,%sp@-
112 jsr schedule_tail
113 movel %d7,(%sp)
114 jsr %a3@
115 addql #4,%sp
116 jra ret_from_exception
117
118#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
119
120#ifdef TRAP_DBG_INTERRUPT
121
122.globl dbginterrupt
123ENTRY(dbginterrupt)
124 SAVE_ALL_INT
125 GET_CURRENT(%d0)
126 movel %sp,%sp@- /* stack frame pointer argument */
127 jsr dbginterrupt_c
128 addql #4,%sp
129 jra ret_from_exception
130#endif
131
132ENTRY(reschedule)
133 /* save top of frame */
134 pea %sp@
135 jbsr set_esp0
136 addql #4,%sp
137 pea ret_from_exception
138 jmp schedule
139
140ENTRY(ret_from_user_signal)
141 moveq #__NR_sigreturn,%d0
142 trap #0
143
144ENTRY(ret_from_user_rt_signal)
145 movel #__NR_rt_sigreturn,%d0
146 trap #0
147
148#else 3#else
149 4#include "entry_no.S"
150do_trace_entry:
151 movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
152 subql #4,%sp
153 SAVE_SWITCH_STACK
154 jbsr syscall_trace
155 RESTORE_SWITCH_STACK
156 addql #4,%sp
157 movel %sp@(PT_OFF_ORIG_D0),%d0
158 cmpl #NR_syscalls,%d0
159 jcs syscall
160badsys:
161 movel #-ENOSYS,%sp@(PT_OFF_D0)
162 jra ret_from_syscall
163
164do_trace_exit:
165 subql #4,%sp
166 SAVE_SWITCH_STACK
167 jbsr syscall_trace
168 RESTORE_SWITCH_STACK
169 addql #4,%sp
170 jra .Lret_from_exception
171
172ENTRY(ret_from_signal)
173 movel %curptr@(TASK_STACK),%a1
174 tstb %a1@(TINFO_FLAGS+2)
175 jge 1f
176 jbsr syscall_trace
1771: RESTORE_SWITCH_STACK
178 addql #4,%sp
179/* on 68040 complete pending writebacks if any */
180#ifdef CONFIG_M68040
181 bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0
182 subql #7,%d0 | bus error frame ?
183 jbne 1f
184 movel %sp,%sp@-
185 jbsr berr_040cleanup
186 addql #4,%sp
1871:
188#endif
189 jra .Lret_from_exception
190
191ENTRY(system_call)
192 SAVE_ALL_SYS
193
194 GET_CURRENT(%d1)
195 movel %d1,%a1
196
197 | save top of frame
198 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
199
200 | syscall trace?
201 tstb %a1@(TINFO_FLAGS+2)
202 jmi do_trace_entry
203 cmpl #NR_syscalls,%d0
204 jcc badsys
205syscall:
206 jbsr @(sys_call_table,%d0:l:4)@(0)
207 movel %d0,%sp@(PT_OFF_D0) | save the return value
208ret_from_syscall:
209 |oriw #0x0700,%sr
210 movel %curptr@(TASK_STACK),%a1
211 movew %a1@(TINFO_FLAGS+2),%d0
212 jne syscall_exit_work
2131: RESTORE_ALL
214
215syscall_exit_work:
216 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
217 bnes 1b | if so, skip resched, signals
218 lslw #1,%d0
219 jcs do_trace_exit
220 jmi do_delayed_trace
221 lslw #8,%d0
222 jne do_signal_return
223 pea resume_userspace
224 jra schedule
225
226
227ENTRY(ret_from_exception)
228.Lret_from_exception:
229 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
230 bnes 1f | if so, skip resched, signals
231 | only allow interrupts when we are really the last one on the
232 | kernel stack, otherwise stack overflow can occur during
233 | heavy interrupt load
234 andw #ALLOWINT,%sr
235
236resume_userspace:
237 movel %curptr@(TASK_STACK),%a1
238 moveb %a1@(TINFO_FLAGS+3),%d0
239 jne exit_work
2401: RESTORE_ALL
241
242exit_work:
243 | save top of frame
244 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
245 lslb #1,%d0
246 jne do_signal_return
247 pea resume_userspace
248 jra schedule
249
250
251do_signal_return:
252 |andw #ALLOWINT,%sr
253 subql #4,%sp | dummy return address
254 SAVE_SWITCH_STACK
255 pea %sp@(SWITCH_STACK_SIZE)
256 bsrl do_notify_resume
257 addql #4,%sp
258 RESTORE_SWITCH_STACK
259 addql #4,%sp
260 jbra resume_userspace
261
262do_delayed_trace:
263 bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR
264 pea 1 | send SIGTRAP
265 movel %curptr,%sp@-
266 pea LSIGTRAP
267 jbsr send_sig
268 addql #8,%sp
269 addql #4,%sp
270 jbra resume_userspace
271
272
273/* This is the main interrupt handler for autovector interrupts */
274
275ENTRY(auto_inthandler)
276 SAVE_ALL_INT
277 GET_CURRENT(%d0)
278 movel %d0,%a1
279 addqb #1,%a1@(TINFO_PREEMPT+1)
280 | put exception # in d0
281 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
282 subw #VEC_SPUR,%d0
283
284 movel %sp,%sp@-
285 movel %d0,%sp@- | put vector # on stack
286auto_irqhandler_fixup = . + 2
287 jsr do_IRQ | process the IRQ
288 addql #8,%sp | pop parameters off stack
289
290ret_from_interrupt:
291 movel %curptr@(TASK_STACK),%a1
292 subqb #1,%a1@(TINFO_PREEMPT+1)
293 jeq ret_from_last_interrupt
2942: RESTORE_ALL
295
296 ALIGN
297ret_from_last_interrupt:
298 moveq #(~ALLOWINT>>8)&0xff,%d0
299 andb %sp@(PT_OFF_SR),%d0
300 jne 2b
301
302 /* check if we need to do software interrupts */
303 tstl irq_stat+CPUSTAT_SOFTIRQ_PENDING
304 jeq .Lret_from_exception
305 pea ret_from_exception
306 jra do_softirq
307
308/* Handler for user defined interrupt vectors */
309
310ENTRY(user_inthandler)
311 SAVE_ALL_INT
312 GET_CURRENT(%d0)
313 movel %d0,%a1
314 addqb #1,%a1@(TINFO_PREEMPT+1)
315 | put exception # in d0
316 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
317user_irqvec_fixup = . + 2
318 subw #VEC_USER,%d0
319
320 movel %sp,%sp@-
321 movel %d0,%sp@- | put vector # on stack
322 jsr do_IRQ | process the IRQ
323 addql #8,%sp | pop parameters off stack
324
325 movel %curptr@(TASK_STACK),%a1
326 subqb #1,%a1@(TINFO_PREEMPT+1)
327 jeq ret_from_last_interrupt
328 RESTORE_ALL
329
330/* Handler for uninitialized and spurious interrupts */
331
332ENTRY(bad_inthandler)
333 SAVE_ALL_INT
334 GET_CURRENT(%d0)
335 movel %d0,%a1
336 addqb #1,%a1@(TINFO_PREEMPT+1)
337
338 movel %sp,%sp@-
339 jsr handle_badint
340 addql #4,%sp
341
342 movel %curptr@(TASK_STACK),%a1
343 subqb #1,%a1@(TINFO_PREEMPT+1)
344 jeq ret_from_last_interrupt
345 RESTORE_ALL
346
347
348resume:
349 /*
350 * Beware - when entering resume, prev (the current task) is
351 * in a0, next (the new task) is in a1,so don't change these
352 * registers until their contents are no longer needed.
353 */
354
355 /* save sr */
356 movew %sr,%a0@(TASK_THREAD+THREAD_SR)
357
358 /* save fs (sfc,%dfc) (may be pointing to kernel memory) */
359 movec %sfc,%d0
360 movew %d0,%a0@(TASK_THREAD+THREAD_FS)
361
362 /* save usp */
363 /* it is better to use a movel here instead of a movew 8*) */
364 movec %usp,%d0
365 movel %d0,%a0@(TASK_THREAD+THREAD_USP)
366
367 /* save non-scratch registers on stack */
368 SAVE_SWITCH_STACK
369
370 /* save current kernel stack pointer */
371 movel %sp,%a0@(TASK_THREAD+THREAD_KSP)
372
373 /* save floating point context */
374#ifndef CONFIG_M68KFPU_EMU_ONLY
375#ifdef CONFIG_M68KFPU_EMU
376 tstl m68k_fputype
377 jeq 3f
378#endif
379 fsave %a0@(TASK_THREAD+THREAD_FPSTATE)
380
381#if defined(CONFIG_M68060)
382#if !defined(CPU_M68060_ONLY)
383 btst #3,m68k_cputype+3
384 beqs 1f
385#endif
386 /* The 060 FPU keeps status in bits 15-8 of the first longword */
387 tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2)
388 jeq 3f
389#if !defined(CPU_M68060_ONLY)
390 jra 2f
391#endif
392#endif /* CONFIG_M68060 */
393#if !defined(CPU_M68060_ONLY)
3941: tstb %a0@(TASK_THREAD+THREAD_FPSTATE)
395 jeq 3f
396#endif
3972: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
398 fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
3993:
400#endif /* CONFIG_M68KFPU_EMU_ONLY */
401 /* Return previous task in %d1 */
402 movel %curptr,%d1
403
404 /* switch to new task (a1 contains new task) */
405 movel %a1,%curptr
406
407 /* restore floating point context */
408#ifndef CONFIG_M68KFPU_EMU_ONLY
409#ifdef CONFIG_M68KFPU_EMU
410 tstl m68k_fputype
411 jeq 4f
412#endif
413#if defined(CONFIG_M68060)
414#if !defined(CPU_M68060_ONLY)
415 btst #3,m68k_cputype+3
416 beqs 1f
417#endif
418 /* The 060 FPU keeps status in bits 15-8 of the first longword */
419 tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2)
420 jeq 3f
421#if !defined(CPU_M68060_ONLY)
422 jra 2f
423#endif
424#endif /* CONFIG_M68060 */
425#if !defined(CPU_M68060_ONLY)
4261: tstb %a1@(TASK_THREAD+THREAD_FPSTATE)
427 jeq 3f
428#endif 5#endif
4292: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
430 fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
4313: frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
4324:
433#endif /* CONFIG_M68KFPU_EMU_ONLY */
434
435 /* restore the kernel stack pointer */
436 movel %a1@(TASK_THREAD+THREAD_KSP),%sp
437
438 /* restore non-scratch registers */
439 RESTORE_SWITCH_STACK
440
441 /* restore user stack pointer */
442 movel %a1@(TASK_THREAD+THREAD_USP),%a0
443 movel %a0,%usp
444
445 /* restore fs (sfc,%dfc) */
446 movew %a1@(TASK_THREAD+THREAD_FS),%a0
447 movec %a0,%sfc
448 movec %a0,%dfc
449
450 /* restore status register */
451 movew %a1@(TASK_THREAD+THREAD_SR),%sr
452
453 rts
454
455#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S
index d197e7ff62c..27622b3273c 100644
--- a/arch/m68k/kernel/head.S
+++ b/arch/m68k/kernel/head.S
@@ -250,8 +250,9 @@
250 * USE_MFP: Use the ST-MFP port (Modem1) for serial debug. 250 * USE_MFP: Use the ST-MFP port (Modem1) for serial debug.
251 * 251 *
252 * Macintosh constants: 252 * Macintosh constants:
253 * MAC_USE_SCC_A: Use SCC port A (modem) for serial debug and early console. 253 * MAC_SERIAL_DEBUG: Turns on serial debug output for the Macintosh.
254 * MAC_USE_SCC_B: Use SCC port B (printer) for serial debug and early console. 254 * MAC_USE_SCC_A: Use the SCC port A (modem) for serial debug.
255 * MAC_USE_SCC_B: Use the SCC port B (printer) for serial debug (default).
255 */ 256 */
256 257
257#include <linux/linkage.h> 258#include <linux/linkage.h>
@@ -267,25 +268,33 @@
267 268
268#include <asm/machw.h> 269#include <asm/machw.h>
269 270
271/*
272 * Macintosh console support
273 */
274
270#ifdef CONFIG_FRAMEBUFFER_CONSOLE 275#ifdef CONFIG_FRAMEBUFFER_CONSOLE
271#define CONSOLE 276#define CONSOLE
272#define CONSOLE_PENGUIN 277#define CONSOLE_PENGUIN
273#endif 278#endif
274 279
275#ifdef CONFIG_EARLY_PRINTK 280/*
276#define SERIAL_DEBUG 281 * Macintosh serial debug support; outputs boot info to the printer
277#else 282 * and/or modem serial ports
278#undef SERIAL_DEBUG 283 */
279#endif 284#undef MAC_SERIAL_DEBUG
280
281#else /* !CONFIG_MAC */
282 285
283#define SERIAL_DEBUG 286/*
287 * Macintosh serial debug port selection; define one or both;
288 * requires MAC_SERIAL_DEBUG to be defined
289 */
290#define MAC_USE_SCC_A /* Macintosh modem serial port */
291#define MAC_USE_SCC_B /* Macintosh printer serial port */
284 292
285#endif /* !CONFIG_MAC */ 293#endif /* CONFIG_MAC */
286 294
287#undef MMU_PRINT 295#undef MMU_PRINT
288#undef MMU_NOCACHE_KERNEL 296#undef MMU_NOCACHE_KERNEL
297#define SERIAL_DEBUG
289#undef DEBUG 298#undef DEBUG
290 299
291/* 300/*
@@ -646,11 +655,11 @@ ENTRY(__start)
646 lea %pc@(L(mac_rowbytes)),%a1 655 lea %pc@(L(mac_rowbytes)),%a1
647 movel %a0@,%a1@ 656 movel %a0@,%a1@
648 657
649#ifdef SERIAL_DEBUG 658#ifdef MAC_SERIAL_DEBUG
650 get_bi_record BI_MAC_SCCBASE 659 get_bi_record BI_MAC_SCCBASE
651 lea %pc@(L(mac_sccbase)),%a1 660 lea %pc@(L(mac_sccbase)),%a1
652 movel %a0@,%a1@ 661 movel %a0@,%a1@
653#endif 662#endif /* MAC_SERIAL_DEBUG */
654 663
655#if 0 664#if 0
656 /* 665 /*
@@ -1418,7 +1427,7 @@ L(mmu_fixup_done):
1418 subl %d0,L(console_font) 1427 subl %d0,L(console_font)
1419 subl %d0,L(console_font_data) 1428 subl %d0,L(console_font_data)
1420#endif 1429#endif
1421#ifdef SERIAL_DEBUG 1430#ifdef MAC_SERIAL_DEBUG
1422 orl #0x50000000,L(mac_sccbase) 1431 orl #0x50000000,L(mac_sccbase)
1423#endif 1432#endif
14241: 14331:
@@ -1908,7 +1917,7 @@ mmu_030_print:
1908 jbne 30b 1917 jbne 30b
1909 1918
1910mmu_print_done: 1919mmu_print_done:
1911 puts "\n" 1920 puts "\n\n"
1912 1921
1913func_return mmu_print 1922func_return mmu_print
1914 1923
@@ -2759,7 +2768,7 @@ L(scc_initable_mac):
2759 .byte 9,0 /* no interrupts */ 2768 .byte 9,0 /* no interrupts */
2760 .byte 10,0 /* NRZ */ 2769 .byte 10,0 /* NRZ */
2761 .byte 11,0x50 /* use baud rate generator */ 2770 .byte 11,0x50 /* use baud rate generator */
2762 .byte 12,1,13,0 /* 38400 baud */ 2771 .byte 12,10,13,0 /* 9600 baud */
2763 .byte 14,1 /* Baud rate generator enable */ 2772 .byte 14,1 /* Baud rate generator enable */
2764 .byte 3,0xc1 /* enable receiver */ 2773 .byte 3,0xc1 /* enable receiver */
2765 .byte 5,0xea /* enable transmitter */ 2774 .byte 5,0xea /* enable transmitter */
@@ -2897,12 +2906,10 @@ func_start serial_init,%d0/%d1/%a0/%a1
2897#endif 2906#endif
2898#ifdef CONFIG_MAC 2907#ifdef CONFIG_MAC
2899 is_not_mac(L(serial_init_not_mac)) 2908 is_not_mac(L(serial_init_not_mac))
2900 2909#ifdef MAC_SERIAL_DEBUG
2901#ifdef SERIAL_DEBUG 2910#if !defined(MAC_USE_SCC_A) && !defined(MAC_USE_SCC_B)
2902/* You may define either or both of these. */ 2911#define MAC_USE_SCC_B
2903#define MAC_USE_SCC_A /* Modem port */ 2912#endif
2904#define MAC_USE_SCC_B /* Printer port */
2905
2906#define mac_scc_cha_b_ctrl_offset 0x0 2913#define mac_scc_cha_b_ctrl_offset 0x0
2907#define mac_scc_cha_a_ctrl_offset 0x2 2914#define mac_scc_cha_a_ctrl_offset 0x2
2908#define mac_scc_cha_b_data_offset 0x4 2915#define mac_scc_cha_b_data_offset 0x4
@@ -2933,7 +2940,7 @@ func_start serial_init,%d0/%d1/%a0/%a1
2933 jra 7b 2940 jra 7b
29348: 29418:
2935#endif /* MAC_USE_SCC_B */ 2942#endif /* MAC_USE_SCC_B */
2936#endif /* SERIAL_DEBUG */ 2943#endif /* MAC_SERIAL_DEBUG */
2937 2944
2938 jra L(serial_init_done) 2945 jra L(serial_init_done)
2939L(serial_init_not_mac): 2946L(serial_init_not_mac):
@@ -3004,7 +3011,7 @@ func_start serial_putc,%d0/%d1/%a0/%a1
3004#ifdef CONFIG_MAC 3011#ifdef CONFIG_MAC
3005 is_not_mac(5f) 3012 is_not_mac(5f)
3006 3013
3007#ifdef SERIAL_DEBUG 3014#ifdef MAC_SERIAL_DEBUG
3008 3015
3009#ifdef MAC_USE_SCC_A 3016#ifdef MAC_USE_SCC_A
3010 movel %pc@(L(mac_sccbase)),%a1 3017 movel %pc@(L(mac_sccbase)),%a1
@@ -3022,7 +3029,7 @@ func_start serial_putc,%d0/%d1/%a0/%a1
3022 moveb %d0,%a1@(mac_scc_cha_b_data_offset) 3029 moveb %d0,%a1@(mac_scc_cha_b_data_offset)
3023#endif /* MAC_USE_SCC_B */ 3030#endif /* MAC_USE_SCC_B */
3024 3031
3025#endif /* SERIAL_DEBUG */ 3032#endif /* MAC_SERIAL_DEBUG */
3026 3033
3027 jra L(serial_putc_done) 3034 jra L(serial_putc_done)
30285: 30355:
@@ -3241,39 +3248,33 @@ func_return putn
3241 3248
3242#ifdef CONFIG_MAC 3249#ifdef CONFIG_MAC
3243/* 3250/*
3244 * mac_early_print 3251 * mac_serial_print
3245 * 3252 *
3246 * This routine takes its parameters on the stack. It then 3253 * This routine takes its parameters on the stack. It then
3247 * turns around and calls the internal routines. This routine 3254 * turns around and calls the internal routine. This routine
3248 * is used by the boot console. 3255 * is used until the Linux console driver initializes itself.
3249 * 3256 *
3250 * The calling parameters are: 3257 * The calling parameters are:
3251 * void mac_early_print(const char *str, unsigned length); 3258 * void mac_serial_print(const char *str);
3252 * 3259 *
3253 * This routine does NOT understand variable arguments only 3260 * This routine does NOT understand variable arguments only
3254 * simple strings! 3261 * simple strings!
3255 */ 3262 */
3256ENTRY(mac_early_print) 3263ENTRY(mac_serial_print)
3257 moveml %d0/%d1/%a0,%sp@- 3264 moveml %d0/%a0,%sp@-
3258 movew %sr,%sp@- 3265#if 1
3266 move %sr,%sp@-
3259 ori #0x0700,%sr 3267 ori #0x0700,%sr
3260 movel %sp@(18),%a0 /* fetch parameter */
3261 movel %sp@(22),%d1 /* fetch parameter */
3262 jra 2f
32631:
3264#ifdef CONSOLE
3265 console_putc %d0
3266#endif 3268#endif
3267#ifdef SERIAL_DEBUG 3269 movel %sp@(10),%a0 /* fetch parameter */
3268 serial_putc %d0 3270 jra 2f
3269#endif 32711: serial_putc %d0
3270 subq #1,%d1 32722: moveb %a0@+,%d0
32712: jeq 3f
3272 moveb %a0@+,%d0
3273 jne 1b 3273 jne 1b
32743: 3274#if 1
3275 movew %sp@+,%sr 3275 move %sp@+,%sr
3276 moveml %sp@+,%d0/%d1/%a0 3276#endif
3277 moveml %sp@+,%d0/%a0
3277 rts 3278 rts
3278#endif /* CONFIG_MAC */ 3279#endif /* CONFIG_MAC */
3279 3280
@@ -3408,10 +3409,10 @@ func_start console_put_stats,%a0/%d7
3408 * a0 = pointer to boot_info 3409 * a0 = pointer to boot_info
3409 * d7 = value of boot_info fields 3410 * d7 = value of boot_info fields
3410 */ 3411 */
3411 puts "\nMacLinux\n" 3412 puts "\nMacLinux\n\n"
3412 3413
3413#ifdef SERIAL_DEBUG 3414#ifdef SERIAL_DEBUG
3414 puts "\n vidaddr:" 3415 puts " vidaddr:"
3415 putn %pc@(L(mac_videobase)) /* video addr. */ 3416 putn %pc@(L(mac_videobase)) /* video addr. */
3416 3417
3417 puts "\n _stext:" 3418 puts "\n _stext:"
@@ -3422,21 +3423,19 @@ func_start console_put_stats,%a0/%d7
3422 lea %pc@(_end),%a0 3423 lea %pc@(_end),%a0
3423 putn %a0 3424 putn %a0
3424 3425
3425 puts "\n cpuid:" 3426 puts "\ncpuid:"
3426 putn %pc@(L(cputype)) 3427 putn %pc@(L(cputype))
3428 putc '\n'
3427 3429
3428# ifdef CONFIG_MAC 3430#ifdef MAC_SERIAL_DEBUG
3429 puts "\n sccbase:"
3430 putn %pc@(L(mac_sccbase)) 3431 putn %pc@(L(mac_sccbase))
3431# endif
3432# ifdef MMU_PRINT
3433 putc '\n' 3432 putc '\n'
3433#endif
3434# if defined(MMU_PRINT)
3434 jbsr mmu_print_machine_cpu_types 3435 jbsr mmu_print_machine_cpu_types
3435# endif 3436# endif /* MMU_PRINT */
3436#endif /* SERIAL_DEBUG */ 3437#endif /* SERIAL_DEBUG */
3437 3438
3438 putc '\n'
3439
3440func_return console_put_stats 3439func_return console_put_stats
3441 3440
3442#ifdef CONSOLE_PENGUIN 3441#ifdef CONSOLE_PENGUIN
@@ -3897,11 +3896,11 @@ L(mac_dimensions):
3897 .long 0 3896 .long 0
3898L(mac_rowbytes): 3897L(mac_rowbytes):
3899 .long 0 3898 .long 0
3900#ifdef SERIAL_DEBUG 3899#ifdef MAC_SERIAL_DEBUG
3901L(mac_sccbase): 3900L(mac_sccbase):
3902 .long 0 3901 .long 0
3902#endif /* MAC_SERIAL_DEBUG */
3903#endif 3903#endif
3904#endif /* CONFIG_MAC */
3905 3904
3906#if defined (CONFIG_APOLLO) 3905#if defined (CONFIG_APOLLO)
3907LSRB0 = 0x10412 3906LSRB0 = 0x10412
diff --git a/arch/m68k/kernel/ints.c b/arch/m68k/kernel/ints.c
index 6b32b64bac3..761ee0440c9 100644
--- a/arch/m68k/kernel/ints.c
+++ b/arch/m68k/kernel/ints.c
@@ -4,6 +4,25 @@
4 * This file is subject to the terms and conditions of the GNU General Public 4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file COPYING in the main directory of this archive 5 * License. See the file COPYING in the main directory of this archive
6 * for more details. 6 * for more details.
7 *
8 * 07/03/96: Timer initialization, and thus mach_sched_init(),
9 * removed from request_irq() and moved to init_time().
10 * We should therefore consider renaming our add_isr() and
11 * remove_isr() to request_irq() and free_irq()
12 * respectively, so they are compliant with the other
13 * architectures. /Jes
14 * 11/07/96: Changed all add_/remove_isr() to request_/free_irq() calls.
15 * Removed irq list support, if any machine needs an irq server
16 * it must implement this itself (as it's already done), instead
17 * only default handler are used with mach_default_handler.
18 * request_irq got some flags different from other architectures:
19 * - IRQ_FLG_REPLACE : Replace an existing handler (the default one
20 * can be replaced without this flag)
21 * - IRQ_FLG_LOCK : handler can't be replaced
22 * There are other machine depending flags, see there
23 * If you want to replace a default handler you should know what
24 * you're doing, since it might handle different other irq sources
25 * which must be served /Roman Zippel
7 */ 26 */
8 27
9#include <linux/module.h> 28#include <linux/module.h>
@@ -15,6 +34,7 @@
15#include <linux/init.h> 34#include <linux/init.h>
16 35
17#include <asm/setup.h> 36#include <asm/setup.h>
37#include <asm/system.h>
18#include <asm/irq.h> 38#include <asm/irq.h>
19#include <asm/traps.h> 39#include <asm/traps.h>
20#include <asm/page.h> 40#include <asm/page.h>
@@ -27,22 +47,33 @@
27#endif 47#endif
28 48
29extern u32 auto_irqhandler_fixup[]; 49extern u32 auto_irqhandler_fixup[];
50extern u32 user_irqhandler_fixup[];
30extern u16 user_irqvec_fixup[]; 51extern u16 user_irqvec_fixup[];
31 52
53/* table for system interrupt handlers */
54static struct irq_node *irq_list[NR_IRQS];
55static struct irq_controller *irq_controller[NR_IRQS];
56static int irq_depth[NR_IRQS];
57
32static int m68k_first_user_vec; 58static int m68k_first_user_vec;
33 59
34static struct irq_chip auto_irq_chip = { 60static struct irq_controller auto_irq_controller = {
35 .name = "auto", 61 .name = "auto",
36 .irq_startup = m68k_irq_startup, 62 .lock = __SPIN_LOCK_UNLOCKED(auto_irq_controller.lock),
37 .irq_shutdown = m68k_irq_shutdown, 63 .startup = m68k_irq_startup,
64 .shutdown = m68k_irq_shutdown,
38}; 65};
39 66
40static struct irq_chip user_irq_chip = { 67static struct irq_controller user_irq_controller = {
41 .name = "user", 68 .name = "user",
42 .irq_startup = m68k_irq_startup, 69 .lock = __SPIN_LOCK_UNLOCKED(user_irq_controller.lock),
43 .irq_shutdown = m68k_irq_shutdown, 70 .startup = m68k_irq_startup,
71 .shutdown = m68k_irq_shutdown,
44}; 72};
45 73
74#define NUM_IRQ_NODES 100
75static irq_node_t nodes[NUM_IRQ_NODES];
76
46/* 77/*
47 * void init_IRQ(void) 78 * void init_IRQ(void)
48 * 79 *
@@ -65,7 +96,7 @@ void __init init_IRQ(void)
65 } 96 }
66 97
67 for (i = IRQ_AUTO_1; i <= IRQ_AUTO_7; i++) 98 for (i = IRQ_AUTO_1; i <= IRQ_AUTO_7; i++)
68 irq_set_chip_and_handler(i, &auto_irq_chip, handle_simple_irq); 99 irq_controller[i] = &auto_irq_controller;
69 100
70 mach_init_IRQ(); 101 mach_init_IRQ();
71} 102}
@@ -75,7 +106,7 @@ void __init init_IRQ(void)
75 * @handler: called from auto vector interrupts 106 * @handler: called from auto vector interrupts
76 * 107 *
77 * setup the handler to be called from auto vector interrupts instead of the 108 * setup the handler to be called from auto vector interrupts instead of the
78 * standard do_IRQ(), it will be called with irq numbers in the range 109 * standard __m68k_handle_int(), it will be called with irq numbers in the range
79 * from IRQ_AUTO_1 - IRQ_AUTO_7. 110 * from IRQ_AUTO_1 - IRQ_AUTO_7.
80 */ 111 */
81void __init m68k_setup_auto_interrupt(void (*handler)(unsigned int, struct pt_regs *)) 112void __init m68k_setup_auto_interrupt(void (*handler)(unsigned int, struct pt_regs *))
@@ -89,66 +120,227 @@ void __init m68k_setup_auto_interrupt(void (*handler)(unsigned int, struct pt_re
89 * m68k_setup_user_interrupt 120 * m68k_setup_user_interrupt
90 * @vec: first user vector interrupt to handle 121 * @vec: first user vector interrupt to handle
91 * @cnt: number of active user vector interrupts 122 * @cnt: number of active user vector interrupts
123 * @handler: called from user vector interrupts
92 * 124 *
93 * setup user vector interrupts, this includes activating the specified range 125 * setup user vector interrupts, this includes activating the specified range
94 * of interrupts, only then these interrupts can be requested (note: this is 126 * of interrupts, only then these interrupts can be requested (note: this is
95 * different from auto vector interrupts). 127 * different from auto vector interrupts). An optional handler can be installed
128 * to be called instead of the default __m68k_handle_int(), it will be called
129 * with irq numbers starting from IRQ_USER.
96 */ 130 */
97void __init m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt) 131void __init m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt,
132 void (*handler)(unsigned int, struct pt_regs *))
98{ 133{
99 int i; 134 int i;
100 135
101 BUG_ON(IRQ_USER + cnt > NR_IRQS); 136 BUG_ON(IRQ_USER + cnt > NR_IRQS);
102 m68k_first_user_vec = vec; 137 m68k_first_user_vec = vec;
103 for (i = 0; i < cnt; i++) 138 for (i = 0; i < cnt; i++)
104 irq_set_chip(IRQ_USER + i, &user_irq_chip); 139 irq_controller[IRQ_USER + i] = &user_irq_controller;
105 *user_irqvec_fixup = vec - IRQ_USER; 140 *user_irqvec_fixup = vec - IRQ_USER;
141 if (handler)
142 *user_irqhandler_fixup = (u32)handler;
106 flush_icache(); 143 flush_icache();
107} 144}
108 145
109/** 146/**
110 * m68k_setup_irq_controller 147 * m68k_setup_irq_controller
111 * @chip: irq chip which controls specified irq 148 * @contr: irq controller which controls specified irq
112 * @handle: flow handler which handles specified irq
113 * @irq: first irq to be managed by the controller 149 * @irq: first irq to be managed by the controller
114 * @cnt: number of irqs to be managed by the controller
115 * 150 *
116 * Change the controller for the specified range of irq, which will be used to 151 * Change the controller for the specified range of irq, which will be used to
117 * manage these irq. auto/user irq already have a default controller, which can 152 * manage these irq. auto/user irq already have a default controller, which can
118 * be changed as well, but the controller probably should use m68k_irq_startup/ 153 * be changed as well, but the controller probably should use m68k_irq_startup/
119 * m68k_irq_shutdown. 154 * m68k_irq_shutdown.
120 */ 155 */
121void m68k_setup_irq_controller(struct irq_chip *chip, 156void m68k_setup_irq_controller(struct irq_controller *contr, unsigned int irq,
122 irq_flow_handler_t handle, unsigned int irq,
123 unsigned int cnt) 157 unsigned int cnt)
124{ 158{
125 int i; 159 int i;
126 160
127 for (i = 0; i < cnt; i++) { 161 for (i = 0; i < cnt; i++)
128 irq_set_chip(irq + i, chip); 162 irq_controller[irq + i] = contr;
129 if (handle) 163}
130 irq_set_handler(irq + i, handle); 164
165irq_node_t *new_irq_node(void)
166{
167 irq_node_t *node;
168 short i;
169
170 for (node = nodes, i = NUM_IRQ_NODES-1; i >= 0; node++, i--) {
171 if (!node->handler) {
172 memset(node, 0, sizeof(*node));
173 return node;
174 }
131 } 175 }
176
177 printk ("new_irq_node: out of nodes\n");
178 return NULL;
132} 179}
133 180
134unsigned int m68k_irq_startup_irq(unsigned int irq) 181int setup_irq(unsigned int irq, struct irq_node *node)
135{ 182{
136 if (irq <= IRQ_AUTO_7) 183 struct irq_controller *contr;
137 vectors[VEC_SPUR + irq] = auto_inthandler; 184 struct irq_node **prev;
138 else 185 unsigned long flags;
139 vectors[m68k_first_user_vec + irq - IRQ_USER] = user_inthandler; 186
187 if (irq >= NR_IRQS || !(contr = irq_controller[irq])) {
188 printk("%s: Incorrect IRQ %d from %s\n",
189 __func__, irq, node->devname);
190 return -ENXIO;
191 }
192
193 spin_lock_irqsave(&contr->lock, flags);
194
195 prev = irq_list + irq;
196 if (*prev) {
197 /* Can't share interrupts unless both agree to */
198 if (!((*prev)->flags & node->flags & IRQF_SHARED)) {
199 spin_unlock_irqrestore(&contr->lock, flags);
200 return -EBUSY;
201 }
202 while (*prev)
203 prev = &(*prev)->next;
204 }
205
206 if (!irq_list[irq]) {
207 if (contr->startup)
208 contr->startup(irq);
209 else
210 contr->enable(irq);
211 }
212 node->next = NULL;
213 *prev = node;
214
215 spin_unlock_irqrestore(&contr->lock, flags);
216
140 return 0; 217 return 0;
141} 218}
142 219
143unsigned int m68k_irq_startup(struct irq_data *data) 220int request_irq(unsigned int irq,
221 irq_handler_t handler,
222 unsigned long flags, const char *devname, void *dev_id)
144{ 223{
145 return m68k_irq_startup_irq(data->irq); 224 struct irq_node *node;
225 int res;
226
227 node = new_irq_node();
228 if (!node)
229 return -ENOMEM;
230
231 node->handler = handler;
232 node->flags = flags;
233 node->dev_id = dev_id;
234 node->devname = devname;
235
236 res = setup_irq(irq, node);
237 if (res)
238 node->handler = NULL;
239
240 return res;
241}
242
243EXPORT_SYMBOL(request_irq);
244
245void free_irq(unsigned int irq, void *dev_id)
246{
247 struct irq_controller *contr;
248 struct irq_node **p, *node;
249 unsigned long flags;
250
251 if (irq >= NR_IRQS || !(contr = irq_controller[irq])) {
252 printk("%s: Incorrect IRQ %d\n", __func__, irq);
253 return;
254 }
255
256 spin_lock_irqsave(&contr->lock, flags);
257
258 p = irq_list + irq;
259 while ((node = *p)) {
260 if (node->dev_id == dev_id)
261 break;
262 p = &node->next;
263 }
264
265 if (node) {
266 *p = node->next;
267 node->handler = NULL;
268 } else
269 printk("%s: Removing probably wrong IRQ %d\n",
270 __func__, irq);
271
272 if (!irq_list[irq]) {
273 if (contr->shutdown)
274 contr->shutdown(irq);
275 else
276 contr->disable(irq);
277 }
278
279 spin_unlock_irqrestore(&contr->lock, flags);
280}
281
282EXPORT_SYMBOL(free_irq);
283
284void enable_irq(unsigned int irq)
285{
286 struct irq_controller *contr;
287 unsigned long flags;
288
289 if (irq >= NR_IRQS || !(contr = irq_controller[irq])) {
290 printk("%s: Incorrect IRQ %d\n",
291 __func__, irq);
292 return;
293 }
294
295 spin_lock_irqsave(&contr->lock, flags);
296 if (irq_depth[irq]) {
297 if (!--irq_depth[irq]) {
298 if (contr->enable)
299 contr->enable(irq);
300 }
301 } else
302 WARN_ON(1);
303 spin_unlock_irqrestore(&contr->lock, flags);
146} 304}
147 305
148void m68k_irq_shutdown(struct irq_data *data) 306EXPORT_SYMBOL(enable_irq);
307
308void disable_irq(unsigned int irq)
149{ 309{
150 unsigned int irq = data->irq; 310 struct irq_controller *contr;
311 unsigned long flags;
151 312
313 if (irq >= NR_IRQS || !(contr = irq_controller[irq])) {
314 printk("%s: Incorrect IRQ %d\n",
315 __func__, irq);
316 return;
317 }
318
319 spin_lock_irqsave(&contr->lock, flags);
320 if (!irq_depth[irq]++) {
321 if (contr->disable)
322 contr->disable(irq);
323 }
324 spin_unlock_irqrestore(&contr->lock, flags);
325}
326
327EXPORT_SYMBOL(disable_irq);
328
329void disable_irq_nosync(unsigned int irq) __attribute__((alias("disable_irq")));
330
331EXPORT_SYMBOL(disable_irq_nosync);
332
333int m68k_irq_startup(unsigned int irq)
334{
335 if (irq <= IRQ_AUTO_7)
336 vectors[VEC_SPUR + irq] = auto_inthandler;
337 else
338 vectors[m68k_first_user_vec + irq - IRQ_USER] = user_inthandler;
339 return 0;
340}
341
342void m68k_irq_shutdown(unsigned int irq)
343{
152 if (irq <= IRQ_AUTO_7) 344 if (irq <= IRQ_AUTO_7)
153 vectors[VEC_SPUR + irq] = bad_inthandler; 345 vectors[VEC_SPUR + irq] = bad_inthandler;
154 else 346 else
@@ -156,6 +348,33 @@ void m68k_irq_shutdown(struct irq_data *data)
156} 348}
157 349
158 350
351/*
352 * Do we need these probe functions on the m68k?
353 *
354 * ... may be useful with ISA devices
355 */
356unsigned long probe_irq_on (void)
357{
358#ifdef CONFIG_Q40
359 if (MACH_IS_Q40)
360 return q40_probe_irq_on();
361#endif
362 return 0;
363}
364
365EXPORT_SYMBOL(probe_irq_on);
366
367int probe_irq_off (unsigned long irqs)
368{
369#ifdef CONFIG_Q40
370 if (MACH_IS_Q40)
371 return q40_probe_irq_off(irqs);
372#endif
373 return 0;
374}
375
376EXPORT_SYMBOL(probe_irq_off);
377
159unsigned int irq_canonicalize(unsigned int irq) 378unsigned int irq_canonicalize(unsigned int irq)
160{ 379{
161#ifdef CONFIG_Q40 380#ifdef CONFIG_Q40
@@ -167,9 +386,52 @@ unsigned int irq_canonicalize(unsigned int irq)
167 386
168EXPORT_SYMBOL(irq_canonicalize); 387EXPORT_SYMBOL(irq_canonicalize);
169 388
389asmlinkage void m68k_handle_int(unsigned int irq)
390{
391 struct irq_node *node;
392 kstat_cpu(0).irqs[irq]++;
393 node = irq_list[irq];
394 do {
395 node->handler(irq, node->dev_id);
396 node = node->next;
397 } while (node);
398}
399
400asmlinkage void __m68k_handle_int(unsigned int irq, struct pt_regs *regs)
401{
402 struct pt_regs *old_regs;
403 old_regs = set_irq_regs(regs);
404 m68k_handle_int(irq);
405 set_irq_regs(old_regs);
406}
170 407
171asmlinkage void handle_badint(struct pt_regs *regs) 408asmlinkage void handle_badint(struct pt_regs *regs)
172{ 409{
173 atomic_inc(&irq_err_count); 410 kstat_cpu(0).irqs[0]++;
174 pr_warn("unexpected interrupt from %u\n", regs->vector); 411 printk("unexpected interrupt from %u\n", regs->vector);
412}
413
414int show_interrupts(struct seq_file *p, void *v)
415{
416 struct irq_controller *contr;
417 struct irq_node *node;
418 int i = *(loff_t *) v;
419
420 /* autovector interrupts */
421 if (irq_list[i]) {
422 contr = irq_controller[i];
423 node = irq_list[i];
424 seq_printf(p, "%-8s %3u: %10u %s", contr->name, i, kstat_cpu(0).irqs[i], node->devname);
425 while ((node = node->next))
426 seq_printf(p, ", %s", node->devname);
427 seq_puts(p, "\n");
428 }
429 return 0;
430}
431
432#ifdef CONFIG_PROC_FS
433void init_irq_proc(void)
434{
435 /* Insert /proc/irq driver here */
175} 436}
437#endif
diff --git a/arch/m68k/kernel/irq.c b/arch/m68k/kernel/irq.c
index 9ab4f550342..c73988cfa90 100644
--- a/arch/m68k/kernel/irq.c
+++ b/arch/m68k/kernel/irq.c
@@ -15,6 +15,7 @@
15#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16#include <linux/irq.h> 16#include <linux/irq.h>
17#include <linux/seq_file.h> 17#include <linux/seq_file.h>
18#include <asm/system.h>
18#include <asm/traps.h> 19#include <asm/traps.h>
19 20
20asmlinkage void do_IRQ(int irq, struct pt_regs *regs) 21asmlinkage void do_IRQ(int irq, struct pt_regs *regs)
diff --git a/arch/m68k/kernel/m68k_ksyms.c b/arch/m68k/kernel/m68k_ksyms.c
index 774c1bd59c3..1b7a14d1a00 100644
--- a/arch/m68k/kernel/m68k_ksyms.c
+++ b/arch/m68k/kernel/m68k_ksyms.c
@@ -14,7 +14,7 @@ EXPORT_SYMBOL(__ashrdi3);
14EXPORT_SYMBOL(__lshrdi3); 14EXPORT_SYMBOL(__lshrdi3);
15EXPORT_SYMBOL(__muldi3); 15EXPORT_SYMBOL(__muldi3);
16 16
17#if defined(CONFIG_CPU_HAS_NO_MULDIV64) 17#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
18/* 18/*
19 * Simpler 68k and ColdFire parts also need a few other gcc functions. 19 * Simpler 68k and ColdFire parts also need a few other gcc functions.
20 */ 20 */
diff --git a/arch/m68k/kernel/module.c b/arch/m68k/kernel/module.c
index eb46fd6038c..34849c4c6e3 100644
--- a/arch/m68k/kernel/module.c
+++ b/arch/m68k/kernel/module.c
@@ -47,7 +47,7 @@ int apply_relocate(Elf32_Shdr *sechdrs,
47 *location += sym->st_value; 47 *location += sym->st_value;
48 break; 48 break;
49 case R_68K_PC32: 49 case R_68K_PC32:
50 /* Add the value, subtract its position */ 50 /* Add the value, subtract its postition */
51 *location += sym->st_value - (uint32_t)location; 51 *location += sym->st_value - (uint32_t)location;
52 break; 52 break;
53 default: 53 default:
@@ -87,7 +87,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
87 *location = rel[i].r_addend + sym->st_value; 87 *location = rel[i].r_addend + sym->st_value;
88 break; 88 break;
89 case R_68K_PC32: 89 case R_68K_PC32:
90 /* Add the value, subtract its position */ 90 /* Add the value, subtract its postition */
91 *location = rel[i].r_addend + sym->st_value - (uint32_t)location; 91 *location = rel[i].r_addend + sym->st_value - (uint32_t)location;
92 break; 92 break;
93 default: 93 default:
diff --git a/arch/m68k/kernel/pcibios.c b/arch/m68k/kernel/pcibios.c
deleted file mode 100644
index 931a31ff59d..00000000000
--- a/arch/m68k/kernel/pcibios.c
+++ /dev/null
@@ -1,104 +0,0 @@
1/*
2 * pci.c -- basic PCI support code
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * (C) Copyright 2011, Greg Ungerer <gerg@uclinux.org>
10 */
11
12#include <linux/kernel.h>
13#include <linux/types.h>
14#include <linux/mm.h>
15#include <linux/init.h>
16#include <linux/pci.h>
17
18/*
19 * From arch/i386/kernel/pci-i386.c:
20 *
21 * We need to avoid collisions with `mirrored' VGA ports
22 * and other strange ISA hardware, so we always want the
23 * addresses to be allocated in the 0x000-0x0ff region
24 * modulo 0x400.
25 *
26 * Why? Because some silly external IO cards only decode
27 * the low 10 bits of the IO address. The 0x00-0xff region
28 * is reserved for motherboard devices that decode all 16
29 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
30 * but we want to try to avoid allocating at 0x2900-0x2bff
31 * which might be mirrored at 0x0100-0x03ff..
32 */
33resource_size_t pcibios_align_resource(void *data, const struct resource *res,
34 resource_size_t size, resource_size_t align)
35{
36 resource_size_t start = res->start;
37
38 if ((res->flags & IORESOURCE_IO) && (start & 0x300))
39 start = (start + 0x3ff) & ~0x3ff;
40
41 start = (start + align - 1) & ~(align - 1);
42
43 return start;
44}
45
46/*
47 * This is taken from the ARM code for this.
48 */
49int pcibios_enable_device(struct pci_dev *dev, int mask)
50{
51 struct resource *r;
52 u16 cmd, newcmd;
53 int idx;
54
55 pci_read_config_word(dev, PCI_COMMAND, &cmd);
56 newcmd = cmd;
57
58 for (idx = 0; idx < 6; idx++) {
59 /* Only set up the requested stuff */
60 if (!(mask & (1 << idx)))
61 continue;
62
63 r = dev->resource + idx;
64 if (!r->start && r->end) {
65 pr_err(KERN_ERR "PCI: Device %s not available because of resource collisions\n",
66 pci_name(dev));
67 return -EINVAL;
68 }
69 if (r->flags & IORESOURCE_IO)
70 newcmd |= PCI_COMMAND_IO;
71 if (r->flags & IORESOURCE_MEM)
72 newcmd |= PCI_COMMAND_MEMORY;
73 }
74
75 /*
76 * Bridges (eg, cardbus bridges) need to be fully enabled
77 */
78 if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)
79 newcmd |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
80
81
82 if (newcmd != cmd) {
83 pr_info("PCI: enabling device %s (0x%04x -> 0x%04x)\n",
84 pci_name(dev), cmd, newcmd);
85 pci_write_config_word(dev, PCI_COMMAND, newcmd);
86 }
87 return 0;
88}
89
90void pcibios_fixup_bus(struct pci_bus *bus)
91{
92 struct pci_dev *dev;
93
94 list_for_each_entry(dev, &bus->devices, bus_list) {
95 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 8);
96 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 32);
97 }
98}
99
100char *pcibios_setup(char *str)
101{
102 return str;
103}
104
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index d538694ad20..6cf4bd6e34f 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -1,301 +1,5 @@
1/* 1#ifdef CONFIG_MMU
2 * linux/arch/m68k/kernel/process.c 2#include "process_mm.c"
3 *
4 * Copyright (C) 1995 Hamish Macdonald
5 *
6 * 68060 fixes by Jesper Skov
7 */
8
9/*
10 * This file handles the architecture-dependent parts of process handling..
11 */
12
13#include <linux/errno.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/mm.h>
18#include <linux/slab.h>
19#include <linux/fs.h>
20#include <linux/smp.h>
21#include <linux/stddef.h>
22#include <linux/unistd.h>
23#include <linux/ptrace.h>
24#include <linux/user.h>
25#include <linux/reboot.h>
26#include <linux/init_task.h>
27#include <linux/mqueue.h>
28#include <linux/rcupdate.h>
29
30#include <asm/uaccess.h>
31#include <asm/traps.h>
32#include <asm/machdep.h>
33#include <asm/setup.h>
34#include <asm/pgtable.h>
35
36
37asmlinkage void ret_from_fork(void);
38asmlinkage void ret_from_kernel_thread(void);
39
40
41/*
42 * Return saved PC from a blocked thread
43 */
44unsigned long thread_saved_pc(struct task_struct *tsk)
45{
46 struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp;
47 /* Check whether the thread is blocked in resume() */
48 if (in_sched_functions(sw->retpc))
49 return ((unsigned long *)sw->a6)[1];
50 else
51 return sw->retpc;
52}
53
54/*
55 * The idle loop on an m68k..
56 */
57static void default_idle(void)
58{
59 if (!need_resched())
60#if defined(MACH_ATARI_ONLY)
61 /* block out HSYNC on the atari (falcon) */
62 __asm__("stop #0x2200" : : : "cc");
63#else 3#else
64 __asm__("stop #0x2000" : : : "cc"); 4#include "process_no.c"
65#endif 5#endif
66}
67
68void (*idle)(void) = default_idle;
69
70/*
71 * The idle thread. There's no useful work to be
72 * done, so just try to conserve power and have a
73 * low exit latency (ie sit in a loop waiting for
74 * somebody to say that they'd like to reschedule)
75 */
76void cpu_idle(void)
77{
78 /* endless idle loop with no priority at all */
79 while (1) {
80 rcu_idle_enter();
81 while (!need_resched())
82 idle();
83 rcu_idle_exit();
84 schedule_preempt_disabled();
85 }
86}
87
88void machine_restart(char * __unused)
89{
90 if (mach_reset)
91 mach_reset();
92 for (;;);
93}
94
95void machine_halt(void)
96{
97 if (mach_halt)
98 mach_halt();
99 for (;;);
100}
101
102void machine_power_off(void)
103{
104 if (mach_power_off)
105 mach_power_off();
106 for (;;);
107}
108
109void (*pm_power_off)(void) = machine_power_off;
110EXPORT_SYMBOL(pm_power_off);
111
112void show_regs(struct pt_regs * regs)
113{
114 printk("\n");
115 printk("Format %02x Vector: %04x PC: %08lx Status: %04x %s\n",
116 regs->format, regs->vector, regs->pc, regs->sr, print_tainted());
117 printk("ORIG_D0: %08lx D0: %08lx A2: %08lx A1: %08lx\n",
118 regs->orig_d0, regs->d0, regs->a2, regs->a1);
119 printk("A0: %08lx D5: %08lx D4: %08lx\n",
120 regs->a0, regs->d5, regs->d4);
121 printk("D3: %08lx D2: %08lx D1: %08lx\n",
122 regs->d3, regs->d2, regs->d1);
123 if (!(regs->sr & PS_S))
124 printk("USP: %08lx\n", rdusp());
125}
126
127void flush_thread(void)
128{
129 current->thread.fs = __USER_DS;
130#ifdef CONFIG_FPU
131 if (!FPU_IS_EMU) {
132 unsigned long zero = 0;
133 asm volatile("frestore %0": :"m" (zero));
134 }
135#endif
136}
137
138/*
139 * Why not generic sys_clone, you ask? m68k passes all arguments on stack.
140 * And we need all registers saved, which means a bunch of stuff pushed
141 * on top of pt_regs, which means that sys_clone() arguments would be
142 * buried. We could, of course, copy them, but it's too costly for no
143 * good reason - generic clone() would have to copy them *again* for
144 * do_fork() anyway. So in this case it's actually better to pass pt_regs *
145 * and extract arguments for do_fork() from there. Eventually we might
146 * go for calling do_fork() directly from the wrapper, but only after we
147 * are finished with do_fork() prototype conversion.
148 */
149asmlinkage int m68k_clone(struct pt_regs *regs)
150{
151 /* regs will be equal to current_pt_regs() */
152 return do_fork(regs->d1, regs->d2, 0,
153 (int __user *)regs->d3, (int __user *)regs->d4);
154}
155
156int copy_thread(unsigned long clone_flags, unsigned long usp,
157 unsigned long arg, struct task_struct *p)
158{
159 struct fork_frame {
160 struct switch_stack sw;
161 struct pt_regs regs;
162 } *frame;
163
164 frame = (struct fork_frame *) (task_stack_page(p) + THREAD_SIZE) - 1;
165
166 p->thread.ksp = (unsigned long)frame;
167 p->thread.esp0 = (unsigned long)&frame->regs;
168
169 /*
170 * Must save the current SFC/DFC value, NOT the value when
171 * the parent was last descheduled - RGH 10-08-96
172 */
173 p->thread.fs = get_fs().seg;
174
175 if (unlikely(p->flags & PF_KTHREAD)) {
176 /* kernel thread */
177 memset(frame, 0, sizeof(struct fork_frame));
178 frame->regs.sr = PS_S;
179 frame->sw.a3 = usp; /* function */
180 frame->sw.d7 = arg;
181 frame->sw.retpc = (unsigned long)ret_from_kernel_thread;
182 p->thread.usp = 0;
183 return 0;
184 }
185 memcpy(frame, container_of(current_pt_regs(), struct fork_frame, regs),
186 sizeof(struct fork_frame));
187 frame->regs.d0 = 0;
188 frame->sw.retpc = (unsigned long)ret_from_fork;
189 p->thread.usp = usp ?: rdusp();
190
191 if (clone_flags & CLONE_SETTLS)
192 task_thread_info(p)->tp_value = frame->regs.d5;
193
194#ifdef CONFIG_FPU
195 if (!FPU_IS_EMU) {
196 /* Copy the current fpu state */
197 asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0]) : "memory");
198
199 if (!CPU_IS_060 ? p->thread.fpstate[0] : p->thread.fpstate[2]) {
200 if (CPU_IS_COLDFIRE) {
201 asm volatile ("fmovemd %/fp0-%/fp7,%0\n\t"
202 "fmovel %/fpiar,%1\n\t"
203 "fmovel %/fpcr,%2\n\t"
204 "fmovel %/fpsr,%3"
205 :
206 : "m" (p->thread.fp[0]),
207 "m" (p->thread.fpcntl[0]),
208 "m" (p->thread.fpcntl[1]),
209 "m" (p->thread.fpcntl[2])
210 : "memory");
211 } else {
212 asm volatile ("fmovemx %/fp0-%/fp7,%0\n\t"
213 "fmoveml %/fpiar/%/fpcr/%/fpsr,%1"
214 :
215 : "m" (p->thread.fp[0]),
216 "m" (p->thread.fpcntl[0])
217 : "memory");
218 }
219 }
220
221 /* Restore the state in case the fpu was busy */
222 asm volatile ("frestore %0" : : "m" (p->thread.fpstate[0]));
223 }
224#endif /* CONFIG_FPU */
225
226 return 0;
227}
228
229/* Fill in the fpu structure for a core dump. */
230#ifdef CONFIG_FPU
231int dump_fpu (struct pt_regs *regs, struct user_m68kfp_struct *fpu)
232{
233 char fpustate[216];
234
235 if (FPU_IS_EMU) {
236 int i;
237
238 memcpy(fpu->fpcntl, current->thread.fpcntl, 12);
239 memcpy(fpu->fpregs, current->thread.fp, 96);
240 /* Convert internal fpu reg representation
241 * into long double format
242 */
243 for (i = 0; i < 24; i += 3)
244 fpu->fpregs[i] = ((fpu->fpregs[i] & 0xffff0000) << 15) |
245 ((fpu->fpregs[i] & 0x0000ffff) << 16);
246 return 1;
247 }
248
249 /* First dump the fpu context to avoid protocol violation. */
250 asm volatile ("fsave %0" :: "m" (fpustate[0]) : "memory");
251 if (!CPU_IS_060 ? !fpustate[0] : !fpustate[2])
252 return 0;
253
254 if (CPU_IS_COLDFIRE) {
255 asm volatile ("fmovel %/fpiar,%0\n\t"
256 "fmovel %/fpcr,%1\n\t"
257 "fmovel %/fpsr,%2\n\t"
258 "fmovemd %/fp0-%/fp7,%3"
259 :
260 : "m" (fpu->fpcntl[0]),
261 "m" (fpu->fpcntl[1]),
262 "m" (fpu->fpcntl[2]),
263 "m" (fpu->fpregs[0])
264 : "memory");
265 } else {
266 asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0"
267 :
268 : "m" (fpu->fpcntl[0])
269 : "memory");
270 asm volatile ("fmovemx %/fp0-%/fp7,%0"
271 :
272 : "m" (fpu->fpregs[0])
273 : "memory");
274 }
275
276 return 1;
277}
278EXPORT_SYMBOL(dump_fpu);
279#endif /* CONFIG_FPU */
280
281unsigned long get_wchan(struct task_struct *p)
282{
283 unsigned long fp, pc;
284 unsigned long stack_page;
285 int count = 0;
286 if (!p || p == current || p->state == TASK_RUNNING)
287 return 0;
288
289 stack_page = (unsigned long)task_stack_page(p);
290 fp = ((struct switch_stack *)p->thread.ksp)->a6;
291 do {
292 if (fp < stack_page+sizeof(struct thread_info) ||
293 fp >= 8184+stack_page)
294 return 0;
295 pc = ((unsigned long *)fp)[1];
296 if (!in_sched_functions(pc))
297 return pc;
298 fp = *(unsigned long *) fp;
299 } while (count++ < 16);
300 return 0;
301}
diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c
index 1bc10e62b9a..07a417550e9 100644
--- a/arch/m68k/kernel/ptrace.c
+++ b/arch/m68k/kernel/ptrace.c
@@ -1,304 +1,5 @@
1/*
2 * linux/arch/m68k/kernel/ptrace.c
3 *
4 * Copyright (C) 1994 by Hamish Macdonald
5 * Taken from linux/kernel/ptrace.c and modified for M680x0.
6 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
7 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of
10 * this archive for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/mm.h>
16#include <linux/smp.h>
17#include <linux/errno.h>
18#include <linux/ptrace.h>
19#include <linux/user.h>
20#include <linux/signal.h>
21#include <linux/tracehook.h>
22
23#include <asm/uaccess.h>
24#include <asm/page.h>
25#include <asm/pgtable.h>
26#include <asm/processor.h>
27
28/*
29 * does not yet catch signals sent when the child dies.
30 * in exit.c or in signal.c.
31 */
32
33/* determines which bits in the SR the user has access to. */
34/* 1 = access 0 = no access */
35#define SR_MASK 0x001f
36
37/* sets the trace bits. */
38#define TRACE_BITS 0xC000
39#define T1_BIT 0x8000
40#define T0_BIT 0x4000
41
42/* Find the stack offset for a register, relative to thread.esp0. */
43#define PT_REG(reg) ((long)&((struct pt_regs *)0)->reg)
44#define SW_REG(reg) ((long)&((struct switch_stack *)0)->reg \
45 - sizeof(struct switch_stack))
46/* Mapping from PT_xxx to the stack offset at which the register is
47 saved. Notice that usp has no stack-slot and needs to be treated
48 specially (see get_reg/put_reg below). */
49static const int regoff[] = {
50 [0] = PT_REG(d1),
51 [1] = PT_REG(d2),
52 [2] = PT_REG(d3),
53 [3] = PT_REG(d4),
54 [4] = PT_REG(d5),
55 [5] = SW_REG(d6),
56 [6] = SW_REG(d7),
57 [7] = PT_REG(a0),
58 [8] = PT_REG(a1),
59 [9] = PT_REG(a2),
60 [10] = SW_REG(a3),
61 [11] = SW_REG(a4),
62 [12] = SW_REG(a5),
63 [13] = SW_REG(a6),
64 [14] = PT_REG(d0),
65 [15] = -1,
66 [16] = PT_REG(orig_d0),
67 [17] = PT_REG(sr),
68 [18] = PT_REG(pc),
69};
70
71/*
72 * Get contents of register REGNO in task TASK.
73 */
74static inline long get_reg(struct task_struct *task, int regno)
75{
76 unsigned long *addr;
77
78 if (regno == PT_USP)
79 addr = &task->thread.usp;
80 else if (regno < ARRAY_SIZE(regoff))
81 addr = (unsigned long *)(task->thread.esp0 + regoff[regno]);
82 else
83 return 0;
84 /* Need to take stkadj into account. */
85 if (regno == PT_SR || regno == PT_PC) {
86 long stkadj = *(long *)(task->thread.esp0 + PT_REG(stkadj));
87 addr = (unsigned long *) ((unsigned long)addr + stkadj);
88 /* The sr is actually a 16 bit register. */
89 if (regno == PT_SR)
90 return *(unsigned short *)addr;
91 }
92 return *addr;
93}
94
95/*
96 * Write contents of register REGNO in task TASK.
97 */
98static inline int put_reg(struct task_struct *task, int regno,
99 unsigned long data)
100{
101 unsigned long *addr;
102
103 if (regno == PT_USP)
104 addr = &task->thread.usp;
105 else if (regno < ARRAY_SIZE(regoff))
106 addr = (unsigned long *)(task->thread.esp0 + regoff[regno]);
107 else
108 return -1;
109 /* Need to take stkadj into account. */
110 if (regno == PT_SR || regno == PT_PC) {
111 long stkadj = *(long *)(task->thread.esp0 + PT_REG(stkadj));
112 addr = (unsigned long *) ((unsigned long)addr + stkadj);
113 /* The sr is actually a 16 bit register. */
114 if (regno == PT_SR) {
115 *(unsigned short *)addr = data;
116 return 0;
117 }
118 }
119 *addr = data;
120 return 0;
121}
122
123/*
124 * Make sure the single step bit is not set.
125 */
126static inline void singlestep_disable(struct task_struct *child)
127{
128 unsigned long tmp = get_reg(child, PT_SR) & ~TRACE_BITS;
129 put_reg(child, PT_SR, tmp);
130 clear_tsk_thread_flag(child, TIF_DELAYED_TRACE);
131}
132
133/*
134 * Called by kernel/ptrace.c when detaching..
135 */
136void ptrace_disable(struct task_struct *child)
137{
138 singlestep_disable(child);
139}
140
141void user_enable_single_step(struct task_struct *child)
142{
143 unsigned long tmp = get_reg(child, PT_SR) & ~TRACE_BITS;
144 put_reg(child, PT_SR, tmp | T1_BIT);
145 set_tsk_thread_flag(child, TIF_DELAYED_TRACE);
146}
147
148#ifdef CONFIG_MMU 1#ifdef CONFIG_MMU
149void user_enable_block_step(struct task_struct *child) 2#include "ptrace_mm.c"
150{ 3#else
151 unsigned long tmp = get_reg(child, PT_SR) & ~TRACE_BITS; 4#include "ptrace_no.c"
152 put_reg(child, PT_SR, tmp | T0_BIT);
153}
154#endif 5#endif
155
156void user_disable_single_step(struct task_struct *child)
157{
158 singlestep_disable(child);
159}
160
161long arch_ptrace(struct task_struct *child, long request,
162 unsigned long addr, unsigned long data)
163{
164 unsigned long tmp;
165 int i, ret = 0;
166 int regno = addr >> 2; /* temporary hack. */
167 unsigned long __user *datap = (unsigned long __user *) data;
168
169 switch (request) {
170 /* read the word at location addr in the USER area. */
171 case PTRACE_PEEKUSR:
172 if (addr & 3)
173 goto out_eio;
174
175 if (regno >= 0 && regno < 19) {
176 tmp = get_reg(child, regno);
177 } else if (regno >= 21 && regno < 49) {
178 tmp = child->thread.fp[regno - 21];
179 /* Convert internal fpu reg representation
180 * into long double format
181 */
182 if (FPU_IS_EMU && (regno < 45) && !(regno % 3))
183 tmp = ((tmp & 0xffff0000) << 15) |
184 ((tmp & 0x0000ffff) << 16);
185#ifndef CONFIG_MMU
186 } else if (regno == 49) {
187 tmp = child->mm->start_code;
188 } else if (regno == 50) {
189 tmp = child->mm->start_data;
190 } else if (regno == 51) {
191 tmp = child->mm->end_code;
192#endif
193 } else
194 goto out_eio;
195 ret = put_user(tmp, datap);
196 break;
197
198 case PTRACE_POKEUSR:
199 /* write the word at location addr in the USER area */
200 if (addr & 3)
201 goto out_eio;
202
203 if (regno == PT_SR) {
204 data &= SR_MASK;
205 data |= get_reg(child, PT_SR) & ~SR_MASK;
206 }
207 if (regno >= 0 && regno < 19) {
208 if (put_reg(child, regno, data))
209 goto out_eio;
210 } else if (regno >= 21 && regno < 48) {
211 /* Convert long double format
212 * into internal fpu reg representation
213 */
214 if (FPU_IS_EMU && (regno < 45) && !(regno % 3)) {
215 data <<= 15;
216 data = (data & 0xffff0000) |
217 ((data & 0x0000ffff) >> 1);
218 }
219 child->thread.fp[regno - 21] = data;
220 } else
221 goto out_eio;
222 break;
223
224 case PTRACE_GETREGS: /* Get all gp regs from the child. */
225 for (i = 0; i < 19; i++) {
226 tmp = get_reg(child, i);
227 ret = put_user(tmp, datap);
228 if (ret)
229 break;
230 datap++;
231 }
232 break;
233
234 case PTRACE_SETREGS: /* Set all gp regs in the child. */
235 for (i = 0; i < 19; i++) {
236 ret = get_user(tmp, datap);
237 if (ret)
238 break;
239 if (i == PT_SR) {
240 tmp &= SR_MASK;
241 tmp |= get_reg(child, PT_SR) & ~SR_MASK;
242 }
243 put_reg(child, i, tmp);
244 datap++;
245 }
246 break;
247
248 case PTRACE_GETFPREGS: /* Get the child FPU state. */
249 if (copy_to_user(datap, &child->thread.fp,
250 sizeof(struct user_m68kfp_struct)))
251 ret = -EFAULT;
252 break;
253
254 case PTRACE_SETFPREGS: /* Set the child FPU state. */
255 if (copy_from_user(&child->thread.fp, datap,
256 sizeof(struct user_m68kfp_struct)))
257 ret = -EFAULT;
258 break;
259
260 case PTRACE_GET_THREAD_AREA:
261 ret = put_user(task_thread_info(child)->tp_value, datap);
262 break;
263
264 default:
265 ret = ptrace_request(child, request, addr, data);
266 break;
267 }
268
269 return ret;
270out_eio:
271 return -EIO;
272}
273
274asmlinkage void syscall_trace(void)
275{
276 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
277 ? 0x80 : 0));
278 /*
279 * this isn't the same as continuing with a signal, but it will do
280 * for normal use. strace only continues with a signal if the
281 * stopping signal is not SIGTRAP. -brl
282 */
283 if (current->exit_code) {
284 send_sig(current->exit_code, current, 1);
285 current->exit_code = 0;
286 }
287}
288
289#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
290asmlinkage int syscall_trace_enter(void)
291{
292 int ret = 0;
293
294 if (test_thread_flag(TIF_SYSCALL_TRACE))
295 ret = tracehook_report_syscall_entry(task_pt_regs(current));
296 return ret;
297}
298
299asmlinkage void syscall_trace_leave(void)
300{
301 if (test_thread_flag(TIF_SYSCALL_TRACE))
302 tracehook_report_syscall_exit(task_pt_regs(current), 0);
303}
304#endif /* CONFIG_COLDFIRE */
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
index d872ce4807c..c3b45061dd0 100644
--- a/arch/m68k/kernel/setup_mm.c
+++ b/arch/m68k/kernel/setup_mm.c
@@ -221,8 +221,7 @@ void __init setup_arch(char **cmdline_p)
221#endif 221#endif
222 222
223 /* The bootinfo is located right after the kernel bss */ 223 /* The bootinfo is located right after the kernel bss */
224 if (!CPU_IS_COLDFIRE) 224 m68k_parse_bootinfo((const struct bi_record *)_end);
225 m68k_parse_bootinfo((const struct bi_record *)_end);
226 225
227 if (CPU_IS_040) 226 if (CPU_IS_040)
228 m68k_is040or060 = 4; 227 m68k_is040or060 = 4;
@@ -236,7 +235,7 @@ void __init setup_arch(char **cmdline_p)
236 * with them, we should add a test to check_bugs() below] */ 235 * with them, we should add a test to check_bugs() below] */
237#ifndef CONFIG_M68KFPU_EMU_ONLY 236#ifndef CONFIG_M68KFPU_EMU_ONLY
238 /* clear the fpu if we have one */ 237 /* clear the fpu if we have one */
239 if (m68k_fputype & (FPU_68881|FPU_68882|FPU_68040|FPU_68060|FPU_COLDFIRE)) { 238 if (m68k_fputype & (FPU_68881|FPU_68882|FPU_68040|FPU_68060)) {
240 volatile int zero = 0; 239 volatile int zero = 0;
241 asm volatile ("frestore %0" : : "m" (zero)); 240 asm volatile ("frestore %0" : : "m" (zero));
242 } 241 }
@@ -259,10 +258,6 @@ void __init setup_arch(char **cmdline_p)
259 init_mm.end_data = (unsigned long)_edata; 258 init_mm.end_data = (unsigned long)_edata;
260 init_mm.brk = (unsigned long)_end; 259 init_mm.brk = (unsigned long)_end;
261 260
262#if defined(CONFIG_BOOTPARAM)
263 strncpy(m68k_command_line, CONFIG_BOOTPARAM_STRING, CL_SIZE);
264 m68k_command_line[CL_SIZE - 1] = 0;
265#endif /* CONFIG_BOOTPARAM */
266 *cmdline_p = m68k_command_line; 261 *cmdline_p = m68k_command_line;
267 memcpy(boot_command_line, *cmdline_p, CL_SIZE); 262 memcpy(boot_command_line, *cmdline_p, CL_SIZE);
268 263
@@ -328,11 +323,6 @@ void __init setup_arch(char **cmdline_p)
328 config_sun3x(); 323 config_sun3x();
329 break; 324 break;
330#endif 325#endif
331#ifdef CONFIG_COLDFIRE
332 case MACH_M54XX:
333 config_BSP(NULL, 0);
334 break;
335#endif
336 default: 326 default:
337 panic("No configuration setup"); 327 panic("No configuration setup");
338 } 328 }
@@ -394,7 +384,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
394#define LOOP_CYCLES_68030 (8) 384#define LOOP_CYCLES_68030 (8)
395#define LOOP_CYCLES_68040 (3) 385#define LOOP_CYCLES_68040 (3)
396#define LOOP_CYCLES_68060 (1) 386#define LOOP_CYCLES_68060 (1)
397#define LOOP_CYCLES_COLDFIRE (2)
398 387
399 if (CPU_IS_020) { 388 if (CPU_IS_020) {
400 cpu = "68020"; 389 cpu = "68020";
@@ -408,9 +397,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
408 } else if (CPU_IS_060) { 397 } else if (CPU_IS_060) {
409 cpu = "68060"; 398 cpu = "68060";
410 clockfactor = LOOP_CYCLES_68060; 399 clockfactor = LOOP_CYCLES_68060;
411 } else if (CPU_IS_COLDFIRE) {
412 cpu = "ColdFire";
413 clockfactor = LOOP_CYCLES_COLDFIRE;
414 } else { 400 } else {
415 cpu = "680x0"; 401 cpu = "680x0";
416 clockfactor = 0; 402 clockfactor = 0;
@@ -429,8 +415,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
429 fpu = "68060"; 415 fpu = "68060";
430 else if (m68k_fputype & FPU_SUNFPA) 416 else if (m68k_fputype & FPU_SUNFPA)
431 fpu = "Sun FPA"; 417 fpu = "Sun FPA";
432 else if (m68k_fputype & FPU_COLDFIRE)
433 fpu = "ColdFire";
434 else 418 else
435 fpu = "none"; 419 fpu = "none";
436#endif 420#endif
@@ -447,8 +431,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
447 mmu = "Sun-3"; 431 mmu = "Sun-3";
448 else if (m68k_mmutype & MMU_APOLLO) 432 else if (m68k_mmutype & MMU_APOLLO)
449 mmu = "Apollo"; 433 mmu = "Apollo";
450 else if (m68k_mmutype & MMU_COLDFIRE)
451 mmu = "ColdFire";
452 else 434 else
453 mmu = "unknown"; 435 mmu = "unknown";
454 436
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index 71fb29938db..16b2de7f510 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -31,13 +31,11 @@
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/initrd.h> 32#include <linux/initrd.h>
33#include <linux/root_dev.h> 33#include <linux/root_dev.h>
34#include <linux/rtc.h>
35 34
36#include <asm/setup.h> 35#include <asm/setup.h>
37#include <asm/irq.h> 36#include <asm/irq.h>
38#include <asm/machdep.h> 37#include <asm/machdep.h>
39#include <asm/pgtable.h> 38#include <asm/pgtable.h>
40#include <asm/sections.h>
41 39
42unsigned long memory_start; 40unsigned long memory_start;
43unsigned long memory_end; 41unsigned long memory_end;
@@ -48,9 +46,8 @@ EXPORT_SYMBOL(memory_end);
48char __initdata command_line[COMMAND_LINE_SIZE]; 46char __initdata command_line[COMMAND_LINE_SIZE];
49 47
50/* machine dependent timer functions */ 48/* machine dependent timer functions */
51void (*mach_sched_init)(irq_handler_t handler) __initdata = NULL; 49void (*mach_gettod)(int*, int*, int*, int*, int*, int*);
52int (*mach_set_clock_mmss)(unsigned long); 50int (*mach_set_clock_mmss)(unsigned long);
53int (*mach_hwclk) (int, struct rtc_time*);
54 51
55/* machine dependent reboot functions */ 52/* machine dependent reboot functions */
56void (*mach_reset)(void); 53void (*mach_reset)(void);
@@ -83,6 +80,9 @@ void (*mach_power_off)(void);
83#define CPU_INSTR_PER_JIFFY 16 80#define CPU_INSTR_PER_JIFFY 16
84#endif 81#endif
85 82
83extern int _stext, _etext, _sdata, _edata, _sbss, _ebss, _end;
84extern int _ramstart, _ramend;
85
86#if defined(CONFIG_UBOOT) 86#if defined(CONFIG_UBOOT)
87/* 87/*
88 * parse_uboot_commandline 88 * parse_uboot_commandline
@@ -218,10 +218,13 @@ void __init setup_arch(char **cmdline_p)
218 printk(KERN_INFO "Motorola M5235EVB support (C)2005 Syn-tech Systems, Inc. (Jate Sujjavanich)\n"); 218 printk(KERN_INFO "Motorola M5235EVB support (C)2005 Syn-tech Systems, Inc. (Jate Sujjavanich)\n");
219#endif 219#endif
220 220
221 pr_debug("KERNEL -> TEXT=0x%p-0x%p DATA=0x%p-0x%p BSS=0x%p-0x%p\n", 221 pr_debug("KERNEL -> TEXT=0x%06x-0x%06x DATA=0x%06x-0x%06x "
222 _stext, _etext, _sdata, _edata, __bss_start, __bss_stop); 222 "BSS=0x%06x-0x%06x\n", (int) &_stext, (int) &_etext,
223 pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ", 223 (int) &_sdata, (int) &_edata,
224 __bss_stop, memory_start, memory_start, memory_end); 224 (int) &_sbss, (int) &_ebss);
225 pr_debug("MEMORY -> ROMFS=0x%06x-0x%06x MEM=0x%06x-0x%06x\n ",
226 (int) &_ebss, (int) memory_start,
227 (int) memory_start, (int) memory_end);
225 228
226 /* Keep a copy of command line */ 229 /* Keep a copy of command line */
227 *cmdline_p = &command_line[0]; 230 *cmdline_p = &command_line[0];
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index 9a396cda314..2e25713e2ea 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -1,1185 +1,5 @@
1/*
2 * linux/arch/m68k/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
8 * for more details.
9 */
10
11/*
12 * Linux/m68k support by Hamish Macdonald
13 *
14 * 68060 fixes by Jesper Skov
15 *
16 * 1997-12-01 Modified for POSIX.1b signals by Andreas Schwab
17 *
18 * mathemu support by Roman Zippel
19 * (Note: fpstate in the signal context is completely ignored for the emulator
20 * and the internal floating point format is put on stack)
21 */
22
23/*
24 * ++roman (07/09/96): implemented signal stacks (specially for tosemu on
25 * Atari :-) Current limitation: Only one sigstack can be active at one time.
26 * If a second signal with SA_ONSTACK set arrives while working on a sigstack,
27 * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested
28 * signal handlers!
29 */
30
31#include <linux/sched.h>
32#include <linux/mm.h>
33#include <linux/kernel.h>
34#include <linux/signal.h>
35#include <linux/syscalls.h>
36#include <linux/errno.h>
37#include <linux/wait.h>
38#include <linux/ptrace.h>
39#include <linux/unistd.h>
40#include <linux/stddef.h>
41#include <linux/highuid.h>
42#include <linux/personality.h>
43#include <linux/tty.h>
44#include <linux/binfmts.h>
45#include <linux/module.h>
46#include <linux/tracehook.h>
47
48#include <asm/setup.h>
49#include <asm/uaccess.h>
50#include <asm/pgtable.h>
51#include <asm/traps.h>
52#include <asm/ucontext.h>
53
54#ifdef CONFIG_MMU 1#ifdef CONFIG_MMU
55 2#include "signal_mm.c"
56/*
57 * Handle the slight differences in classic 68k and ColdFire trap frames.
58 */
59#ifdef CONFIG_COLDFIRE
60#define FORMAT 4
61#define FMT4SIZE 0
62#else 3#else
63#define FORMAT 0 4#include "signal_no.c"
64#define FMT4SIZE sizeof(((struct frame *)0)->un.fmt4)
65#endif 5#endif
66
67static const int frame_size_change[16] = {
68 [1] = -1, /* sizeof(((struct frame *)0)->un.fmt1), */
69 [2] = sizeof(((struct frame *)0)->un.fmt2),
70 [3] = sizeof(((struct frame *)0)->un.fmt3),
71 [4] = FMT4SIZE,
72 [5] = -1, /* sizeof(((struct frame *)0)->un.fmt5), */
73 [6] = -1, /* sizeof(((struct frame *)0)->un.fmt6), */
74 [7] = sizeof(((struct frame *)0)->un.fmt7),
75 [8] = -1, /* sizeof(((struct frame *)0)->un.fmt8), */
76 [9] = sizeof(((struct frame *)0)->un.fmt9),
77 [10] = sizeof(((struct frame *)0)->un.fmta),
78 [11] = sizeof(((struct frame *)0)->un.fmtb),
79 [12] = -1, /* sizeof(((struct frame *)0)->un.fmtc), */
80 [13] = -1, /* sizeof(((struct frame *)0)->un.fmtd), */
81 [14] = -1, /* sizeof(((struct frame *)0)->un.fmte), */
82 [15] = -1, /* sizeof(((struct frame *)0)->un.fmtf), */
83};
84
85static inline int frame_extra_sizes(int f)
86{
87 return frame_size_change[f];
88}
89
90int handle_kernel_fault(struct pt_regs *regs)
91{
92 const struct exception_table_entry *fixup;
93 struct pt_regs *tregs;
94
95 /* Are we prepared to handle this kernel fault? */
96 fixup = search_exception_tables(regs->pc);
97 if (!fixup)
98 return 0;
99
100 /* Create a new four word stack frame, discarding the old one. */
101 regs->stkadj = frame_extra_sizes(regs->format);
102 tregs = (struct pt_regs *)((long)regs + regs->stkadj);
103 tregs->vector = regs->vector;
104 tregs->format = FORMAT;
105 tregs->pc = fixup->fixup;
106 tregs->sr = regs->sr;
107
108 return 1;
109}
110
111void ptrace_signal_deliver(void)
112{
113 struct pt_regs *regs = signal_pt_regs();
114 if (regs->orig_d0 < 0)
115 return;
116 switch (regs->d0) {
117 case -ERESTARTNOHAND:
118 case -ERESTARTSYS:
119 case -ERESTARTNOINTR:
120 regs->d0 = regs->orig_d0;
121 regs->orig_d0 = -1;
122 regs->pc -= 2;
123 break;
124 }
125}
126
127static inline void push_cache (unsigned long vaddr)
128{
129 /*
130 * Using the old cache_push_v() was really a big waste.
131 *
132 * What we are trying to do is to flush 8 bytes to ram.
133 * Flushing 2 cache lines of 16 bytes is much cheaper than
134 * flushing 1 or 2 pages, as previously done in
135 * cache_push_v().
136 * Jes
137 */
138 if (CPU_IS_040) {
139 unsigned long temp;
140
141 __asm__ __volatile__ (".chip 68040\n\t"
142 "nop\n\t"
143 "ptestr (%1)\n\t"
144 "movec %%mmusr,%0\n\t"
145 ".chip 68k"
146 : "=r" (temp)
147 : "a" (vaddr));
148
149 temp &= PAGE_MASK;
150 temp |= vaddr & ~PAGE_MASK;
151
152 __asm__ __volatile__ (".chip 68040\n\t"
153 "nop\n\t"
154 "cpushl %%bc,(%0)\n\t"
155 ".chip 68k"
156 : : "a" (temp));
157 }
158 else if (CPU_IS_060) {
159 unsigned long temp;
160 __asm__ __volatile__ (".chip 68060\n\t"
161 "plpar (%0)\n\t"
162 ".chip 68k"
163 : "=a" (temp)
164 : "0" (vaddr));
165 __asm__ __volatile__ (".chip 68060\n\t"
166 "cpushl %%bc,(%0)\n\t"
167 ".chip 68k"
168 : : "a" (temp));
169 } else if (!CPU_IS_COLDFIRE) {
170 /*
171 * 68030/68020 have no writeback cache;
172 * still need to clear icache.
173 * Note that vaddr is guaranteed to be long word aligned.
174 */
175 unsigned long temp;
176 asm volatile ("movec %%cacr,%0" : "=r" (temp));
177 temp += 4;
178 asm volatile ("movec %0,%%caar\n\t"
179 "movec %1,%%cacr"
180 : : "r" (vaddr), "r" (temp));
181 asm volatile ("movec %0,%%caar\n\t"
182 "movec %1,%%cacr"
183 : : "r" (vaddr + 4), "r" (temp));
184 }
185}
186
187static inline void adjustformat(struct pt_regs *regs)
188{
189}
190
191static inline void save_a5_state(struct sigcontext *sc, struct pt_regs *regs)
192{
193}
194
195#else /* CONFIG_MMU */
196
197void ret_from_user_signal(void);
198void ret_from_user_rt_signal(void);
199
200static inline int frame_extra_sizes(int f)
201{
202 /* No frame size adjustments required on non-MMU CPUs */
203 return 0;
204}
205
206static inline void adjustformat(struct pt_regs *regs)
207{
208 ((struct switch_stack *)regs - 1)->a5 = current->mm->start_data;
209 /*
210 * set format byte to make stack appear modulo 4, which it will
211 * be when doing the rte
212 */
213 regs->format = 0x4;
214}
215
216static inline void save_a5_state(struct sigcontext *sc, struct pt_regs *regs)
217{
218 sc->sc_a5 = ((struct switch_stack *)regs - 1)->a5;
219}
220
221static inline void push_cache(unsigned long vaddr)
222{
223}
224
225#endif /* CONFIG_MMU */
226
227/*
228 * Atomically swap in the new signal mask, and wait for a signal.
229 */
230asmlinkage int
231sys_sigsuspend(int unused0, int unused1, old_sigset_t mask)
232{
233 sigset_t blocked;
234 siginitset(&blocked, mask);
235 return sigsuspend(&blocked);
236}
237
238asmlinkage int
239sys_sigaction(int sig, const struct old_sigaction __user *act,
240 struct old_sigaction __user *oact)
241{
242 struct k_sigaction new_ka, old_ka;
243 int ret;
244
245 if (act) {
246 old_sigset_t mask;
247 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
248 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
249 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
250 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
251 __get_user(mask, &act->sa_mask))
252 return -EFAULT;
253 siginitset(&new_ka.sa.sa_mask, mask);
254 }
255
256 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
257
258 if (!ret && oact) {
259 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
260 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
261 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
262 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
263 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
264 return -EFAULT;
265 }
266
267 return ret;
268}
269
270asmlinkage int
271sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
272{
273 return do_sigaltstack(uss, uoss, rdusp());
274}
275
276
277/*
278 * Do a signal return; undo the signal stack.
279 *
280 * Keep the return code on the stack quadword aligned!
281 * That makes the cache flush below easier.
282 */
283
284struct sigframe
285{
286 char __user *pretcode;
287 int sig;
288 int code;
289 struct sigcontext __user *psc;
290 char retcode[8];
291 unsigned long extramask[_NSIG_WORDS-1];
292 struct sigcontext sc;
293};
294
295struct rt_sigframe
296{
297 char __user *pretcode;
298 int sig;
299 struct siginfo __user *pinfo;
300 void __user *puc;
301 char retcode[8];
302 struct siginfo info;
303 struct ucontext uc;
304};
305
306#define FPCONTEXT_SIZE 216
307#define uc_fpstate uc_filler[0]
308#define uc_formatvec uc_filler[FPCONTEXT_SIZE/4]
309#define uc_extra uc_filler[FPCONTEXT_SIZE/4+1]
310
311#ifdef CONFIG_FPU
312
313static unsigned char fpu_version; /* version number of fpu, set by setup_frame */
314
315static inline int restore_fpu_state(struct sigcontext *sc)
316{
317 int err = 1;
318
319 if (FPU_IS_EMU) {
320 /* restore registers */
321 memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12);
322 memcpy(current->thread.fp, sc->sc_fpregs, 24);
323 return 0;
324 }
325
326 if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
327 /* Verify the frame format. */
328 if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
329 (sc->sc_fpstate[0] != fpu_version))
330 goto out;
331 if (CPU_IS_020_OR_030) {
332 if (m68k_fputype & FPU_68881 &&
333 !(sc->sc_fpstate[1] == 0x18 || sc->sc_fpstate[1] == 0xb4))
334 goto out;
335 if (m68k_fputype & FPU_68882 &&
336 !(sc->sc_fpstate[1] == 0x38 || sc->sc_fpstate[1] == 0xd4))
337 goto out;
338 } else if (CPU_IS_040) {
339 if (!(sc->sc_fpstate[1] == 0x00 ||
340 sc->sc_fpstate[1] == 0x28 ||
341 sc->sc_fpstate[1] == 0x60))
342 goto out;
343 } else if (CPU_IS_060) {
344 if (!(sc->sc_fpstate[3] == 0x00 ||
345 sc->sc_fpstate[3] == 0x60 ||
346 sc->sc_fpstate[3] == 0xe0))
347 goto out;
348 } else if (CPU_IS_COLDFIRE) {
349 if (!(sc->sc_fpstate[0] == 0x00 ||
350 sc->sc_fpstate[0] == 0x05 ||
351 sc->sc_fpstate[0] == 0xe5))
352 goto out;
353 } else
354 goto out;
355
356 if (CPU_IS_COLDFIRE) {
357 __asm__ volatile ("fmovemd %0,%%fp0-%%fp1\n\t"
358 "fmovel %1,%%fpcr\n\t"
359 "fmovel %2,%%fpsr\n\t"
360 "fmovel %3,%%fpiar"
361 : /* no outputs */
362 : "m" (sc->sc_fpregs[0]),
363 "m" (sc->sc_fpcntl[0]),
364 "m" (sc->sc_fpcntl[1]),
365 "m" (sc->sc_fpcntl[2]));
366 } else {
367 __asm__ volatile (".chip 68k/68881\n\t"
368 "fmovemx %0,%%fp0-%%fp1\n\t"
369 "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
370 ".chip 68k"
371 : /* no outputs */
372 : "m" (*sc->sc_fpregs),
373 "m" (*sc->sc_fpcntl));
374 }
375 }
376
377 if (CPU_IS_COLDFIRE) {
378 __asm__ volatile ("frestore %0" : : "m" (*sc->sc_fpstate));
379 } else {
380 __asm__ volatile (".chip 68k/68881\n\t"
381 "frestore %0\n\t"
382 ".chip 68k"
383 : : "m" (*sc->sc_fpstate));
384 }
385 err = 0;
386
387out:
388 return err;
389}
390
391static inline int rt_restore_fpu_state(struct ucontext __user *uc)
392{
393 unsigned char fpstate[FPCONTEXT_SIZE];
394 int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
395 fpregset_t fpregs;
396 int err = 1;
397
398 if (FPU_IS_EMU) {
399 /* restore fpu control register */
400 if (__copy_from_user(current->thread.fpcntl,
401 uc->uc_mcontext.fpregs.f_fpcntl, 12))
402 goto out;
403 /* restore all other fpu register */
404 if (__copy_from_user(current->thread.fp,
405 uc->uc_mcontext.fpregs.f_fpregs, 96))
406 goto out;
407 return 0;
408 }
409
410 if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
411 goto out;
412 if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
413 if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
414 context_size = fpstate[1];
415 /* Verify the frame format. */
416 if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
417 (fpstate[0] != fpu_version))
418 goto out;
419 if (CPU_IS_020_OR_030) {
420 if (m68k_fputype & FPU_68881 &&
421 !(context_size == 0x18 || context_size == 0xb4))
422 goto out;
423 if (m68k_fputype & FPU_68882 &&
424 !(context_size == 0x38 || context_size == 0xd4))
425 goto out;
426 } else if (CPU_IS_040) {
427 if (!(context_size == 0x00 ||
428 context_size == 0x28 ||
429 context_size == 0x60))
430 goto out;
431 } else if (CPU_IS_060) {
432 if (!(fpstate[3] == 0x00 ||
433 fpstate[3] == 0x60 ||
434 fpstate[3] == 0xe0))
435 goto out;
436 } else if (CPU_IS_COLDFIRE) {
437 if (!(fpstate[3] == 0x00 ||
438 fpstate[3] == 0x05 ||
439 fpstate[3] == 0xe5))
440 goto out;
441 } else
442 goto out;
443 if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
444 sizeof(fpregs)))
445 goto out;
446
447 if (CPU_IS_COLDFIRE) {
448 __asm__ volatile ("fmovemd %0,%%fp0-%%fp7\n\t"
449 "fmovel %1,%%fpcr\n\t"
450 "fmovel %2,%%fpsr\n\t"
451 "fmovel %3,%%fpiar"
452 : /* no outputs */
453 : "m" (fpregs.f_fpregs[0]),
454 "m" (fpregs.f_fpcntl[0]),
455 "m" (fpregs.f_fpcntl[1]),
456 "m" (fpregs.f_fpcntl[2]));
457 } else {
458 __asm__ volatile (".chip 68k/68881\n\t"
459 "fmovemx %0,%%fp0-%%fp7\n\t"
460 "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
461 ".chip 68k"
462 : /* no outputs */
463 : "m" (*fpregs.f_fpregs),
464 "m" (*fpregs.f_fpcntl));
465 }
466 }
467 if (context_size &&
468 __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
469 context_size))
470 goto out;
471
472 if (CPU_IS_COLDFIRE) {
473 __asm__ volatile ("frestore %0" : : "m" (*fpstate));
474 } else {
475 __asm__ volatile (".chip 68k/68881\n\t"
476 "frestore %0\n\t"
477 ".chip 68k"
478 : : "m" (*fpstate));
479 }
480 err = 0;
481
482out:
483 return err;
484}
485
486/*
487 * Set up a signal frame.
488 */
489static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
490{
491 if (FPU_IS_EMU) {
492 /* save registers */
493 memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12);
494 memcpy(sc->sc_fpregs, current->thread.fp, 24);
495 return;
496 }
497
498 if (CPU_IS_COLDFIRE) {
499 __asm__ volatile ("fsave %0"
500 : : "m" (*sc->sc_fpstate) : "memory");
501 } else {
502 __asm__ volatile (".chip 68k/68881\n\t"
503 "fsave %0\n\t"
504 ".chip 68k"
505 : : "m" (*sc->sc_fpstate) : "memory");
506 }
507
508 if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
509 fpu_version = sc->sc_fpstate[0];
510 if (CPU_IS_020_OR_030 &&
511 regs->vector >= (VEC_FPBRUC * 4) &&
512 regs->vector <= (VEC_FPNAN * 4)) {
513 /* Clear pending exception in 68882 idle frame */
514 if (*(unsigned short *) sc->sc_fpstate == 0x1f38)
515 sc->sc_fpstate[0x38] |= 1 << 3;
516 }
517
518 if (CPU_IS_COLDFIRE) {
519 __asm__ volatile ("fmovemd %%fp0-%%fp1,%0\n\t"
520 "fmovel %%fpcr,%1\n\t"
521 "fmovel %%fpsr,%2\n\t"
522 "fmovel %%fpiar,%3"
523 : "=m" (sc->sc_fpregs[0]),
524 "=m" (sc->sc_fpcntl[0]),
525 "=m" (sc->sc_fpcntl[1]),
526 "=m" (sc->sc_fpcntl[2])
527 : /* no inputs */
528 : "memory");
529 } else {
530 __asm__ volatile (".chip 68k/68881\n\t"
531 "fmovemx %%fp0-%%fp1,%0\n\t"
532 "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
533 ".chip 68k"
534 : "=m" (*sc->sc_fpregs),
535 "=m" (*sc->sc_fpcntl)
536 : /* no inputs */
537 : "memory");
538 }
539 }
540}
541
542static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
543{
544 unsigned char fpstate[FPCONTEXT_SIZE];
545 int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
546 int err = 0;
547
548 if (FPU_IS_EMU) {
549 /* save fpu control register */
550 err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpcntl,
551 current->thread.fpcntl, 12);
552 /* save all other fpu register */
553 err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs,
554 current->thread.fp, 96);
555 return err;
556 }
557
558 if (CPU_IS_COLDFIRE) {
559 __asm__ volatile ("fsave %0" : : "m" (*fpstate) : "memory");
560 } else {
561 __asm__ volatile (".chip 68k/68881\n\t"
562 "fsave %0\n\t"
563 ".chip 68k"
564 : : "m" (*fpstate) : "memory");
565 }
566
567 err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate);
568 if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
569 fpregset_t fpregs;
570 if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
571 context_size = fpstate[1];
572 fpu_version = fpstate[0];
573 if (CPU_IS_020_OR_030 &&
574 regs->vector >= (VEC_FPBRUC * 4) &&
575 regs->vector <= (VEC_FPNAN * 4)) {
576 /* Clear pending exception in 68882 idle frame */
577 if (*(unsigned short *) fpstate == 0x1f38)
578 fpstate[0x38] |= 1 << 3;
579 }
580 if (CPU_IS_COLDFIRE) {
581 __asm__ volatile ("fmovemd %%fp0-%%fp7,%0\n\t"
582 "fmovel %%fpcr,%1\n\t"
583 "fmovel %%fpsr,%2\n\t"
584 "fmovel %%fpiar,%3"
585 : "=m" (fpregs.f_fpregs[0]),
586 "=m" (fpregs.f_fpcntl[0]),
587 "=m" (fpregs.f_fpcntl[1]),
588 "=m" (fpregs.f_fpcntl[2])
589 : /* no inputs */
590 : "memory");
591 } else {
592 __asm__ volatile (".chip 68k/68881\n\t"
593 "fmovemx %%fp0-%%fp7,%0\n\t"
594 "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
595 ".chip 68k"
596 : "=m" (*fpregs.f_fpregs),
597 "=m" (*fpregs.f_fpcntl)
598 : /* no inputs */
599 : "memory");
600 }
601 err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs,
602 sizeof(fpregs));
603 }
604 if (context_size)
605 err |= copy_to_user((long __user *)&uc->uc_fpstate + 1, fpstate + 4,
606 context_size);
607 return err;
608}
609
610#else /* CONFIG_FPU */
611
612/*
613 * For the case with no FPU configured these all do nothing.
614 */
615static inline int restore_fpu_state(struct sigcontext *sc)
616{
617 return 0;
618}
619
620static inline int rt_restore_fpu_state(struct ucontext __user *uc)
621{
622 return 0;
623}
624
625static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
626{
627}
628
629static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
630{
631 return 0;
632}
633
634#endif /* CONFIG_FPU */
635
636static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,
637 void __user *fp)
638{
639 int fsize = frame_extra_sizes(formatvec >> 12);
640 if (fsize < 0) {
641 /*
642 * user process trying to return with weird frame format
643 */
644#ifdef DEBUG
645 printk("user process returning with weird frame format\n");
646#endif
647 return 1;
648 }
649 if (!fsize) {
650 regs->format = formatvec >> 12;
651 regs->vector = formatvec & 0xfff;
652 } else {
653 struct switch_stack *sw = (struct switch_stack *)regs - 1;
654 unsigned long buf[fsize / 2]; /* yes, twice as much */
655
656 /* that'll make sure that expansion won't crap over data */
657 if (copy_from_user(buf + fsize / 4, fp, fsize))
658 return 1;
659
660 /* point of no return */
661 regs->format = formatvec >> 12;
662 regs->vector = formatvec & 0xfff;
663#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
664 __asm__ __volatile__ (
665#ifdef CONFIG_COLDFIRE
666 " movel %0,%/sp\n\t"
667 " bra ret_from_signal\n"
668#else
669 " movel %0,%/a0\n\t"
670 " subl %1,%/a0\n\t" /* make room on stack */
671 " movel %/a0,%/sp\n\t" /* set stack pointer */
672 /* move switch_stack and pt_regs */
673 "1: movel %0@+,%/a0@+\n\t"
674 " dbra %2,1b\n\t"
675 " lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */
676 " lsrl #2,%1\n\t"
677 " subql #1,%1\n\t"
678 /* copy to the gap we'd made */
679 "2: movel %4@+,%/a0@+\n\t"
680 " dbra %1,2b\n\t"
681 " bral ret_from_signal\n"
682#endif
683 : /* no outputs, it doesn't ever return */
684 : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
685 "n" (frame_offset), "a" (buf + fsize/4)
686 : "a0");
687#undef frame_offset
688 }
689 return 0;
690}
691
692static inline int
693restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __user *fp)
694{
695 int formatvec;
696 struct sigcontext context;
697 int err = 0;
698
699 /* Always make any pending restarted system calls return -EINTR */
700 current_thread_info()->restart_block.fn = do_no_restart_syscall;
701
702 /* get previous context */
703 if (copy_from_user(&context, usc, sizeof(context)))
704 goto badframe;
705
706 /* restore passed registers */
707 regs->d0 = context.sc_d0;
708 regs->d1 = context.sc_d1;
709 regs->a0 = context.sc_a0;
710 regs->a1 = context.sc_a1;
711 regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff);
712 regs->pc = context.sc_pc;
713 regs->orig_d0 = -1; /* disable syscall checks */
714 wrusp(context.sc_usp);
715 formatvec = context.sc_formatvec;
716
717 err = restore_fpu_state(&context);
718
719 if (err || mangle_kernel_stack(regs, formatvec, fp))
720 goto badframe;
721
722 return 0;
723
724badframe:
725 return 1;
726}
727
728static inline int
729rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
730 struct ucontext __user *uc)
731{
732 int temp;
733 greg_t __user *gregs = uc->uc_mcontext.gregs;
734 unsigned long usp;
735 int err;
736
737 /* Always make any pending restarted system calls return -EINTR */
738 current_thread_info()->restart_block.fn = do_no_restart_syscall;
739
740 err = __get_user(temp, &uc->uc_mcontext.version);
741 if (temp != MCONTEXT_VERSION)
742 goto badframe;
743 /* restore passed registers */
744 err |= __get_user(regs->d0, &gregs[0]);
745 err |= __get_user(regs->d1, &gregs[1]);
746 err |= __get_user(regs->d2, &gregs[2]);
747 err |= __get_user(regs->d3, &gregs[3]);
748 err |= __get_user(regs->d4, &gregs[4]);
749 err |= __get_user(regs->d5, &gregs[5]);
750 err |= __get_user(sw->d6, &gregs[6]);
751 err |= __get_user(sw->d7, &gregs[7]);
752 err |= __get_user(regs->a0, &gregs[8]);
753 err |= __get_user(regs->a1, &gregs[9]);
754 err |= __get_user(regs->a2, &gregs[10]);
755 err |= __get_user(sw->a3, &gregs[11]);
756 err |= __get_user(sw->a4, &gregs[12]);
757 err |= __get_user(sw->a5, &gregs[13]);
758 err |= __get_user(sw->a6, &gregs[14]);
759 err |= __get_user(usp, &gregs[15]);
760 wrusp(usp);
761 err |= __get_user(regs->pc, &gregs[16]);
762 err |= __get_user(temp, &gregs[17]);
763 regs->sr = (regs->sr & 0xff00) | (temp & 0xff);
764 regs->orig_d0 = -1; /* disable syscall checks */
765 err |= __get_user(temp, &uc->uc_formatvec);
766
767 err |= rt_restore_fpu_state(uc);
768
769 if (err || do_sigaltstack(&uc->uc_stack, NULL, usp) == -EFAULT)
770 goto badframe;
771
772 if (mangle_kernel_stack(regs, temp, &uc->uc_extra))
773 goto badframe;
774
775 return 0;
776
777badframe:
778 return 1;
779}
780
781asmlinkage int do_sigreturn(unsigned long __unused)
782{
783 struct switch_stack *sw = (struct switch_stack *) &__unused;
784 struct pt_regs *regs = (struct pt_regs *) (sw + 1);
785 unsigned long usp = rdusp();
786 struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
787 sigset_t set;
788
789 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
790 goto badframe;
791 if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
792 (_NSIG_WORDS > 1 &&
793 __copy_from_user(&set.sig[1], &frame->extramask,
794 sizeof(frame->extramask))))
795 goto badframe;
796
797 set_current_blocked(&set);
798
799 if (restore_sigcontext(regs, &frame->sc, frame + 1))
800 goto badframe;
801 return regs->d0;
802
803badframe:
804 force_sig(SIGSEGV, current);
805 return 0;
806}
807
808asmlinkage int do_rt_sigreturn(unsigned long __unused)
809{
810 struct switch_stack *sw = (struct switch_stack *) &__unused;
811 struct pt_regs *regs = (struct pt_regs *) (sw + 1);
812 unsigned long usp = rdusp();
813 struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);
814 sigset_t set;
815
816 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
817 goto badframe;
818 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
819 goto badframe;
820
821 set_current_blocked(&set);
822
823 if (rt_restore_ucontext(regs, sw, &frame->uc))
824 goto badframe;
825 return regs->d0;
826
827badframe:
828 force_sig(SIGSEGV, current);
829 return 0;
830}
831
832static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
833 unsigned long mask)
834{
835 sc->sc_mask = mask;
836 sc->sc_usp = rdusp();
837 sc->sc_d0 = regs->d0;
838 sc->sc_d1 = regs->d1;
839 sc->sc_a0 = regs->a0;
840 sc->sc_a1 = regs->a1;
841 sc->sc_sr = regs->sr;
842 sc->sc_pc = regs->pc;
843 sc->sc_formatvec = regs->format << 12 | regs->vector;
844 save_a5_state(sc, regs);
845 save_fpu_state(sc, regs);
846}
847
848static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
849{
850 struct switch_stack *sw = (struct switch_stack *)regs - 1;
851 greg_t __user *gregs = uc->uc_mcontext.gregs;
852 int err = 0;
853
854 err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
855 err |= __put_user(regs->d0, &gregs[0]);
856 err |= __put_user(regs->d1, &gregs[1]);
857 err |= __put_user(regs->d2, &gregs[2]);
858 err |= __put_user(regs->d3, &gregs[3]);
859 err |= __put_user(regs->d4, &gregs[4]);
860 err |= __put_user(regs->d5, &gregs[5]);
861 err |= __put_user(sw->d6, &gregs[6]);
862 err |= __put_user(sw->d7, &gregs[7]);
863 err |= __put_user(regs->a0, &gregs[8]);
864 err |= __put_user(regs->a1, &gregs[9]);
865 err |= __put_user(regs->a2, &gregs[10]);
866 err |= __put_user(sw->a3, &gregs[11]);
867 err |= __put_user(sw->a4, &gregs[12]);
868 err |= __put_user(sw->a5, &gregs[13]);
869 err |= __put_user(sw->a6, &gregs[14]);
870 err |= __put_user(rdusp(), &gregs[15]);
871 err |= __put_user(regs->pc, &gregs[16]);
872 err |= __put_user(regs->sr, &gregs[17]);
873 err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec);
874 err |= rt_save_fpu_state(uc, regs);
875 return err;
876}
877
878static inline void __user *
879get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
880{
881 unsigned long usp;
882
883 /* Default to using normal stack. */
884 usp = rdusp();
885
886 /* This is the X/Open sanctioned signal stack switching. */
887 if (ka->sa.sa_flags & SA_ONSTACK) {
888 if (!sas_ss_flags(usp))
889 usp = current->sas_ss_sp + current->sas_ss_size;
890 }
891 return (void __user *)((usp - frame_size) & -8UL);
892}
893
894static int setup_frame (int sig, struct k_sigaction *ka,
895 sigset_t *set, struct pt_regs *regs)
896{
897 struct sigframe __user *frame;
898 int fsize = frame_extra_sizes(regs->format);
899 struct sigcontext context;
900 int err = 0;
901
902 if (fsize < 0) {
903#ifdef DEBUG
904 printk ("setup_frame: Unknown frame format %#x\n",
905 regs->format);
906#endif
907 goto give_sigsegv;
908 }
909
910 frame = get_sigframe(ka, regs, sizeof(*frame) + fsize);
911
912 if (fsize)
913 err |= copy_to_user (frame + 1, regs + 1, fsize);
914
915 err |= __put_user((current_thread_info()->exec_domain
916 && current_thread_info()->exec_domain->signal_invmap
917 && sig < 32
918 ? current_thread_info()->exec_domain->signal_invmap[sig]
919 : sig),
920 &frame->sig);
921
922 err |= __put_user(regs->vector, &frame->code);
923 err |= __put_user(&frame->sc, &frame->psc);
924
925 if (_NSIG_WORDS > 1)
926 err |= copy_to_user(frame->extramask, &set->sig[1],
927 sizeof(frame->extramask));
928
929 setup_sigcontext(&context, regs, set->sig[0]);
930 err |= copy_to_user (&frame->sc, &context, sizeof(context));
931
932 /* Set up to return from userspace. */
933#ifdef CONFIG_MMU
934 err |= __put_user(frame->retcode, &frame->pretcode);
935 /* moveq #,d0; trap #0 */
936 err |= __put_user(0x70004e40 + (__NR_sigreturn << 16),
937 (long __user *)(frame->retcode));
938#else
939 err |= __put_user((void *) ret_from_user_signal, &frame->pretcode);
940#endif
941
942 if (err)
943 goto give_sigsegv;
944
945 push_cache ((unsigned long) &frame->retcode);
946
947 /*
948 * Set up registers for signal handler. All the state we are about
949 * to destroy is successfully copied to sigframe.
950 */
951 wrusp ((unsigned long) frame);
952 regs->pc = (unsigned long) ka->sa.sa_handler;
953 adjustformat(regs);
954
955 /*
956 * This is subtle; if we build more than one sigframe, all but the
957 * first one will see frame format 0 and have fsize == 0, so we won't
958 * screw stkadj.
959 */
960 if (fsize)
961 regs->stkadj = fsize;
962
963 /* Prepare to skip over the extra stuff in the exception frame. */
964 if (regs->stkadj) {
965 struct pt_regs *tregs =
966 (struct pt_regs *)((ulong)regs + regs->stkadj);
967#ifdef DEBUG
968 printk("Performing stackadjust=%04x\n", regs->stkadj);
969#endif
970 /* This must be copied with decreasing addresses to
971 handle overlaps. */
972 tregs->vector = 0;
973 tregs->format = 0;
974 tregs->pc = regs->pc;
975 tregs->sr = regs->sr;
976 }
977 return 0;
978
979give_sigsegv:
980 force_sigsegv(sig, current);
981 return err;
982}
983
984static int setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
985 sigset_t *set, struct pt_regs *regs)
986{
987 struct rt_sigframe __user *frame;
988 int fsize = frame_extra_sizes(regs->format);
989 int err = 0;
990
991 if (fsize < 0) {
992#ifdef DEBUG
993 printk ("setup_frame: Unknown frame format %#x\n",
994 regs->format);
995#endif
996 goto give_sigsegv;
997 }
998
999 frame = get_sigframe(ka, regs, sizeof(*frame));
1000
1001 if (fsize)
1002 err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize);
1003
1004 err |= __put_user((current_thread_info()->exec_domain
1005 && current_thread_info()->exec_domain->signal_invmap
1006 && sig < 32
1007 ? current_thread_info()->exec_domain->signal_invmap[sig]
1008 : sig),
1009 &frame->sig);
1010 err |= __put_user(&frame->info, &frame->pinfo);
1011 err |= __put_user(&frame->uc, &frame->puc);
1012 err |= copy_siginfo_to_user(&frame->info, info);
1013
1014 /* Create the ucontext. */
1015 err |= __put_user(0, &frame->uc.uc_flags);
1016 err |= __put_user(NULL, &frame->uc.uc_link);
1017 err |= __put_user((void __user *)current->sas_ss_sp,
1018 &frame->uc.uc_stack.ss_sp);
1019 err |= __put_user(sas_ss_flags(rdusp()),
1020 &frame->uc.uc_stack.ss_flags);
1021 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
1022 err |= rt_setup_ucontext(&frame->uc, regs);
1023 err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set));
1024
1025 /* Set up to return from userspace. */
1026#ifdef CONFIG_MMU
1027 err |= __put_user(frame->retcode, &frame->pretcode);
1028#ifdef __mcoldfire__
1029 /* movel #__NR_rt_sigreturn,d0; trap #0 */
1030 err |= __put_user(0x203c0000, (long __user *)(frame->retcode + 0));
1031 err |= __put_user(0x00004e40 + (__NR_rt_sigreturn << 16),
1032 (long __user *)(frame->retcode + 4));
1033#else
1034 /* moveq #,d0; notb d0; trap #0 */
1035 err |= __put_user(0x70004600 + ((__NR_rt_sigreturn ^ 0xff) << 16),
1036 (long __user *)(frame->retcode + 0));
1037 err |= __put_user(0x4e40, (short __user *)(frame->retcode + 4));
1038#endif
1039#else
1040 err |= __put_user((void *) ret_from_user_rt_signal, &frame->pretcode);
1041#endif /* CONFIG_MMU */
1042
1043 if (err)
1044 goto give_sigsegv;
1045
1046 push_cache ((unsigned long) &frame->retcode);
1047
1048 /*
1049 * Set up registers for signal handler. All the state we are about
1050 * to destroy is successfully copied to sigframe.
1051 */
1052 wrusp ((unsigned long) frame);
1053 regs->pc = (unsigned long) ka->sa.sa_handler;
1054 adjustformat(regs);
1055
1056 /*
1057 * This is subtle; if we build more than one sigframe, all but the
1058 * first one will see frame format 0 and have fsize == 0, so we won't
1059 * screw stkadj.
1060 */
1061 if (fsize)
1062 regs->stkadj = fsize;
1063
1064 /* Prepare to skip over the extra stuff in the exception frame. */
1065 if (regs->stkadj) {
1066 struct pt_regs *tregs =
1067 (struct pt_regs *)((ulong)regs + regs->stkadj);
1068#ifdef DEBUG
1069 printk("Performing stackadjust=%04x\n", regs->stkadj);
1070#endif
1071 /* This must be copied with decreasing addresses to
1072 handle overlaps. */
1073 tregs->vector = 0;
1074 tregs->format = 0;
1075 tregs->pc = regs->pc;
1076 tregs->sr = regs->sr;
1077 }
1078 return 0;
1079
1080give_sigsegv:
1081 force_sigsegv(sig, current);
1082 return err;
1083}
1084
1085static inline void
1086handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
1087{
1088 switch (regs->d0) {
1089 case -ERESTARTNOHAND:
1090 if (!has_handler)
1091 goto do_restart;
1092 regs->d0 = -EINTR;
1093 break;
1094
1095 case -ERESTART_RESTARTBLOCK:
1096 if (!has_handler) {
1097 regs->d0 = __NR_restart_syscall;
1098 regs->pc -= 2;
1099 break;
1100 }
1101 regs->d0 = -EINTR;
1102 break;
1103
1104 case -ERESTARTSYS:
1105 if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
1106 regs->d0 = -EINTR;
1107 break;
1108 }
1109 /* fallthrough */
1110 case -ERESTARTNOINTR:
1111 do_restart:
1112 regs->d0 = regs->orig_d0;
1113 regs->pc -= 2;
1114 break;
1115 }
1116}
1117
1118/*
1119 * OK, we're invoking a handler
1120 */
1121static void
1122handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
1123 struct pt_regs *regs)
1124{
1125 sigset_t *oldset = sigmask_to_save();
1126 int err;
1127 /* are we from a system call? */
1128 if (regs->orig_d0 >= 0)
1129 /* If so, check system call restarting.. */
1130 handle_restart(regs, ka, 1);
1131
1132 /* set up the stack frame */
1133 if (ka->sa.sa_flags & SA_SIGINFO)
1134 err = setup_rt_frame(sig, ka, info, oldset, regs);
1135 else
1136 err = setup_frame(sig, ka, oldset, regs);
1137
1138 if (err)
1139 return;
1140
1141 signal_delivered(sig, info, ka, regs, 0);
1142
1143 if (test_thread_flag(TIF_DELAYED_TRACE)) {
1144 regs->sr &= ~0x8000;
1145 send_sig(SIGTRAP, current, 1);
1146 }
1147}
1148
1149/*
1150 * Note that 'init' is a special process: it doesn't get signals it doesn't
1151 * want to handle. Thus you cannot kill init even with a SIGKILL even by
1152 * mistake.
1153 */
1154static void do_signal(struct pt_regs *regs)
1155{
1156 siginfo_t info;
1157 struct k_sigaction ka;
1158 int signr;
1159
1160 current->thread.esp0 = (unsigned long) regs;
1161
1162 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
1163 if (signr > 0) {
1164 /* Whee! Actually deliver the signal. */
1165 handle_signal(signr, &ka, &info, regs);
1166 return;
1167 }
1168
1169 /* Did we come from a system call? */
1170 if (regs->orig_d0 >= 0)
1171 /* Restart the system call - no handlers present */
1172 handle_restart(regs, NULL, 0);
1173
1174 /* If there's no signal to deliver, we just restore the saved mask. */
1175 restore_saved_sigmask();
1176}
1177
1178void do_notify_resume(struct pt_regs *regs)
1179{
1180 if (test_thread_flag(TIF_SIGPENDING))
1181 do_signal(regs);
1182
1183 if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
1184 tracehook_notify_resume(regs);
1185}
diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
index 3a480b3df0d..8623f8dc16f 100644
--- a/arch/m68k/kernel/sys_m68k.c
+++ b/arch/m68k/kernel/sys_m68k.c
@@ -479,13 +479,9 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
479 goto bad_access; 479 goto bad_access;
480 } 480 }
481 481
482 /* 482 mem_value = *mem;
483 * No need to check for EFAULT; we know that the page is
484 * present and writable.
485 */
486 __get_user(mem_value, mem);
487 if (mem_value == oldval) 483 if (mem_value == oldval)
488 __put_user(newval, mem); 484 *mem = newval;
489 485
490 pte_unmap_unlock(pte, ptl); 486 pte_unmap_unlock(pte, ptl);
491 up_read(&mm->mmap_sem); 487 up_read(&mm->mmap_sem);
@@ -549,6 +545,23 @@ asmlinkage int sys_getpagesize(void)
549 return PAGE_SIZE; 545 return PAGE_SIZE;
550} 546}
551 547
548/*
549 * Do a system call from kernel instead of calling sys_execve so we
550 * end up with proper pt_regs.
551 */
552int kernel_execve(const char *filename,
553 const char *const argv[],
554 const char *const envp[])
555{
556 register long __res asm ("%d0") = __NR_execve;
557 register long __a asm ("%d1") = (long)(filename);
558 register long __b asm ("%d2") = (long)(argv);
559 register long __c asm ("%d3") = (long)(envp);
560 asm volatile ("trap #0" : "+d" (__res)
561 : "d" (__a), "d" (__b), "d" (__c));
562 return __res;
563}
564
552asmlinkage unsigned long sys_get_thread_area(void) 565asmlinkage unsigned long sys_get_thread_area(void)
553{ 566{
554 return current_thread_info()->tp_value; 567 return current_thread_info()->tp_value;
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index c30da5b3f2d..c468f2edaa8 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -22,7 +22,7 @@ ALIGN
22ENTRY(sys_call_table) 22ENTRY(sys_call_table)
23 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */ 23 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
24 .long sys_exit 24 .long sys_exit
25 .long __sys_fork 25 .long sys_fork
26 .long sys_read 26 .long sys_read
27 .long sys_write 27 .long sys_write
28 .long sys_open /* 5 */ 28 .long sys_open /* 5 */
@@ -140,7 +140,7 @@ ENTRY(sys_call_table)
140 .long sys_ipc 140 .long sys_ipc
141 .long sys_fsync 141 .long sys_fsync
142 .long sys_sigreturn 142 .long sys_sigreturn
143 .long __sys_clone /* 120 */ 143 .long sys_clone /* 120 */
144 .long sys_setdomainname 144 .long sys_setdomainname
145 .long sys_newuname 145 .long sys_newuname
146 .long sys_cacheflush /* modify_ldt for i386 */ 146 .long sys_cacheflush /* modify_ldt for i386 */
@@ -210,7 +210,7 @@ ENTRY(sys_call_table)
210 .long sys_sendfile 210 .long sys_sendfile
211 .long sys_ni_syscall /* streams1 */ 211 .long sys_ni_syscall /* streams1 */
212 .long sys_ni_syscall /* streams2 */ 212 .long sys_ni_syscall /* streams2 */
213 .long __sys_vfork /* 190 */ 213 .long sys_vfork /* 190 */
214 .long sys_getrlimit 214 .long sys_getrlimit
215 .long sys_mmap2 215 .long sys_mmap2
216 .long sys_truncate64 216 .long sys_truncate64
@@ -365,7 +365,4 @@ ENTRY(sys_call_table)
365 .long sys_clock_adjtime 365 .long sys_clock_adjtime
366 .long sys_syncfs 366 .long sys_syncfs
367 .long sys_setns 367 .long sys_setns
368 .long sys_process_vm_readv /* 345 */
369 .long sys_process_vm_writev
370 .long sys_kcmp
371 368
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
index 5d0bcaad2e5..a5cf40c26de 100644
--- a/arch/m68k/kernel/time.c
+++ b/arch/m68k/kernel/time.c
@@ -1,108 +1,5 @@
1/* 1#ifdef CONFIG_MMU
2 * linux/arch/m68k/kernel/time.c 2#include "time_mm.c"
3 * 3#else
4 * Copyright (C) 1991, 1992, 1995 Linus Torvalds 4#include "time_no.c"
5 * 5#endif
6 * This file contains the m68k-specific time handling details.
7 * Most of the stuff is located in the machine specific files.
8 *
9 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
10 * "A Kernel Model for Precision Timekeeping" by Dave Mills
11 */
12
13#include <linux/errno.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/param.h>
18#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/rtc.h>
21#include <linux/platform_device.h>
22
23#include <asm/machdep.h>
24#include <asm/io.h>
25#include <asm/irq_regs.h>
26
27#include <linux/time.h>
28#include <linux/timex.h>
29#include <linux/profile.h>
30
31/*
32 * timer_interrupt() needs to keep up the real-time clock,
33 * as well as call the "xtime_update()" routine every clocktick
34 */
35static irqreturn_t timer_interrupt(int irq, void *dummy)
36{
37 xtime_update(1);
38 update_process_times(user_mode(get_irq_regs()));
39 profile_tick(CPU_PROFILING);
40
41#ifdef CONFIG_HEARTBEAT
42 /* use power LED as a heartbeat instead -- much more useful
43 for debugging -- based on the version for PReP by Cort */
44 /* acts like an actual heart beat -- ie thump-thump-pause... */
45 if (mach_heartbeat) {
46 static unsigned cnt = 0, period = 0, dist = 0;
47
48 if (cnt == 0 || cnt == dist)
49 mach_heartbeat( 1 );
50 else if (cnt == 7 || cnt == dist+7)
51 mach_heartbeat( 0 );
52
53 if (++cnt > period) {
54 cnt = 0;
55 /* The hyperbolic function below modifies the heartbeat period
56 * length in dependency of the current (5min) load. It goes
57 * through the points f(0)=126, f(1)=86, f(5)=51,
58 * f(inf)->30. */
59 period = ((672<<FSHIFT)/(5*avenrun[0]+(7<<FSHIFT))) + 30;
60 dist = period / 4;
61 }
62 }
63#endif /* CONFIG_HEARTBEAT */
64 return IRQ_HANDLED;
65}
66
67void read_persistent_clock(struct timespec *ts)
68{
69 struct rtc_time time;
70 ts->tv_sec = 0;
71 ts->tv_nsec = 0;
72
73 if (mach_hwclk) {
74 mach_hwclk(0, &time);
75
76 if ((time.tm_year += 1900) < 1970)
77 time.tm_year += 100;
78 ts->tv_sec = mktime(time.tm_year, time.tm_mon, time.tm_mday,
79 time.tm_hour, time.tm_min, time.tm_sec);
80 }
81}
82
83void __init time_init(void)
84{
85 mach_sched_init(timer_interrupt);
86}
87
88#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
89
90u32 arch_gettimeoffset(void)
91{
92 return mach_gettimeoffset() * 1000;
93}
94
95static int __init rtc_init(void)
96{
97 struct platform_device *pdev;
98
99 if (!mach_hwclk)
100 return -ENODEV;
101
102 pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
103 return PTR_RET(pdev);
104}
105
106module_init(rtc_init);
107
108#endif /* CONFIG_ARCH_USES_GETTIMEOFFSET */
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index cbc624af449..c98add3f5f0 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -1,1208 +1,5 @@
1/* 1#ifdef CONFIG_MMU
2 * linux/arch/m68k/kernel/traps.c 2#include "traps_mm.c"
3 *
4 * Copyright (C) 1993, 1994 by Hamish Macdonald
5 *
6 * 68040 fixes by Michael Rausch
7 * 68040 fixes by Martin Apel
8 * 68040 fixes and writeback by Richard Zidlicky
9 * 68060 fixes by Roman Hodek
10 * 68060 fixes by Jesper Skov
11 *
12 * This file is subject to the terms and conditions of the GNU General Public
13 * License. See the file COPYING in the main directory of this archive
14 * for more details.
15 */
16
17/*
18 * Sets up all exception vectors
19 */
20
21#include <linux/sched.h>
22#include <linux/signal.h>
23#include <linux/kernel.h>
24#include <linux/mm.h>
25#include <linux/module.h>
26#include <linux/user.h>
27#include <linux/string.h>
28#include <linux/linkage.h>
29#include <linux/init.h>
30#include <linux/ptrace.h>
31#include <linux/kallsyms.h>
32
33#include <asm/setup.h>
34#include <asm/fpu.h>
35#include <asm/uaccess.h>
36#include <asm/traps.h>
37#include <asm/pgalloc.h>
38#include <asm/machdep.h>
39#include <asm/siginfo.h>
40
41
42static const char *vec_names[] = {
43 [VEC_RESETSP] = "RESET SP",
44 [VEC_RESETPC] = "RESET PC",
45 [VEC_BUSERR] = "BUS ERROR",
46 [VEC_ADDRERR] = "ADDRESS ERROR",
47 [VEC_ILLEGAL] = "ILLEGAL INSTRUCTION",
48 [VEC_ZERODIV] = "ZERO DIVIDE",
49 [VEC_CHK] = "CHK",
50 [VEC_TRAP] = "TRAPcc",
51 [VEC_PRIV] = "PRIVILEGE VIOLATION",
52 [VEC_TRACE] = "TRACE",
53 [VEC_LINE10] = "LINE 1010",
54 [VEC_LINE11] = "LINE 1111",
55 [VEC_RESV12] = "UNASSIGNED RESERVED 12",
56 [VEC_COPROC] = "COPROCESSOR PROTOCOL VIOLATION",
57 [VEC_FORMAT] = "FORMAT ERROR",
58 [VEC_UNINT] = "UNINITIALIZED INTERRUPT",
59 [VEC_RESV16] = "UNASSIGNED RESERVED 16",
60 [VEC_RESV17] = "UNASSIGNED RESERVED 17",
61 [VEC_RESV18] = "UNASSIGNED RESERVED 18",
62 [VEC_RESV19] = "UNASSIGNED RESERVED 19",
63 [VEC_RESV20] = "UNASSIGNED RESERVED 20",
64 [VEC_RESV21] = "UNASSIGNED RESERVED 21",
65 [VEC_RESV22] = "UNASSIGNED RESERVED 22",
66 [VEC_RESV23] = "UNASSIGNED RESERVED 23",
67 [VEC_SPUR] = "SPURIOUS INTERRUPT",
68 [VEC_INT1] = "LEVEL 1 INT",
69 [VEC_INT2] = "LEVEL 2 INT",
70 [VEC_INT3] = "LEVEL 3 INT",
71 [VEC_INT4] = "LEVEL 4 INT",
72 [VEC_INT5] = "LEVEL 5 INT",
73 [VEC_INT6] = "LEVEL 6 INT",
74 [VEC_INT7] = "LEVEL 7 INT",
75 [VEC_SYS] = "SYSCALL",
76 [VEC_TRAP1] = "TRAP #1",
77 [VEC_TRAP2] = "TRAP #2",
78 [VEC_TRAP3] = "TRAP #3",
79 [VEC_TRAP4] = "TRAP #4",
80 [VEC_TRAP5] = "TRAP #5",
81 [VEC_TRAP6] = "TRAP #6",
82 [VEC_TRAP7] = "TRAP #7",
83 [VEC_TRAP8] = "TRAP #8",
84 [VEC_TRAP9] = "TRAP #9",
85 [VEC_TRAP10] = "TRAP #10",
86 [VEC_TRAP11] = "TRAP #11",
87 [VEC_TRAP12] = "TRAP #12",
88 [VEC_TRAP13] = "TRAP #13",
89 [VEC_TRAP14] = "TRAP #14",
90 [VEC_TRAP15] = "TRAP #15",
91 [VEC_FPBRUC] = "FPCP BSUN",
92 [VEC_FPIR] = "FPCP INEXACT",
93 [VEC_FPDIVZ] = "FPCP DIV BY 0",
94 [VEC_FPUNDER] = "FPCP UNDERFLOW",
95 [VEC_FPOE] = "FPCP OPERAND ERROR",
96 [VEC_FPOVER] = "FPCP OVERFLOW",
97 [VEC_FPNAN] = "FPCP SNAN",
98 [VEC_FPUNSUP] = "FPCP UNSUPPORTED OPERATION",
99 [VEC_MMUCFG] = "MMU CONFIGURATION ERROR",
100 [VEC_MMUILL] = "MMU ILLEGAL OPERATION ERROR",
101 [VEC_MMUACC] = "MMU ACCESS LEVEL VIOLATION ERROR",
102 [VEC_RESV59] = "UNASSIGNED RESERVED 59",
103 [VEC_UNIMPEA] = "UNASSIGNED RESERVED 60",
104 [VEC_UNIMPII] = "UNASSIGNED RESERVED 61",
105 [VEC_RESV62] = "UNASSIGNED RESERVED 62",
106 [VEC_RESV63] = "UNASSIGNED RESERVED 63",
107};
108
109static const char *space_names[] = {
110 [0] = "Space 0",
111 [USER_DATA] = "User Data",
112 [USER_PROGRAM] = "User Program",
113#ifndef CONFIG_SUN3
114 [3] = "Space 3",
115#else 3#else
116 [FC_CONTROL] = "Control", 4#include "traps_no.c"
117#endif
118 [4] = "Space 4",
119 [SUPER_DATA] = "Super Data",
120 [SUPER_PROGRAM] = "Super Program",
121 [CPU_SPACE] = "CPU"
122};
123
124void die_if_kernel(char *,struct pt_regs *,int);
125asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
126 unsigned long error_code);
127int send_fault_sig(struct pt_regs *regs);
128
129asmlinkage void trap_c(struct frame *fp);
130
131#if defined (CONFIG_M68060)
132static inline void access_error060 (struct frame *fp)
133{
134 unsigned long fslw = fp->un.fmt4.pc; /* is really FSLW for access error */
135
136#ifdef DEBUG
137 printk("fslw=%#lx, fa=%#lx\n", fslw, fp->un.fmt4.effaddr);
138#endif
139
140 if (fslw & MMU060_BPE) {
141 /* branch prediction error -> clear branch cache */
142 __asm__ __volatile__ ("movec %/cacr,%/d0\n\t"
143 "orl #0x00400000,%/d0\n\t"
144 "movec %/d0,%/cacr"
145 : : : "d0" );
146 /* return if there's no other error */
147 if (!(fslw & MMU060_ERR_BITS) && !(fslw & MMU060_SEE))
148 return;
149 }
150
151 if (fslw & (MMU060_DESC_ERR | MMU060_WP | MMU060_SP)) {
152 unsigned long errorcode;
153 unsigned long addr = fp->un.fmt4.effaddr;
154
155 if (fslw & MMU060_MA)
156 addr = (addr + PAGE_SIZE - 1) & PAGE_MASK;
157
158 errorcode = 1;
159 if (fslw & MMU060_DESC_ERR) {
160 __flush_tlb040_one(addr);
161 errorcode = 0;
162 }
163 if (fslw & MMU060_W)
164 errorcode |= 2;
165#ifdef DEBUG
166 printk("errorcode = %d\n", errorcode );
167#endif
168 do_page_fault(&fp->ptregs, addr, errorcode);
169 } else if (fslw & (MMU060_SEE)){
170 /* Software Emulation Error.
171 * fault during mem_read/mem_write in ifpsp060/os.S
172 */
173 send_fault_sig(&fp->ptregs);
174 } else if (!(fslw & (MMU060_RE|MMU060_WE)) ||
175 send_fault_sig(&fp->ptregs) > 0) {
176 printk("pc=%#lx, fa=%#lx\n", fp->ptregs.pc, fp->un.fmt4.effaddr);
177 printk( "68060 access error, fslw=%lx\n", fslw );
178 trap_c( fp );
179 }
180}
181#endif /* CONFIG_M68060 */
182
183#if defined (CONFIG_M68040)
184static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs)
185{
186 unsigned long mmusr;
187 mm_segment_t old_fs = get_fs();
188
189 set_fs(MAKE_MM_SEG(wbs));
190
191 if (iswrite)
192 asm volatile (".chip 68040; ptestw (%0); .chip 68k" : : "a" (addr));
193 else
194 asm volatile (".chip 68040; ptestr (%0); .chip 68k" : : "a" (addr));
195
196 asm volatile (".chip 68040; movec %%mmusr,%0; .chip 68k" : "=r" (mmusr));
197
198 set_fs(old_fs);
199
200 return mmusr;
201}
202
203static inline int do_040writeback1(unsigned short wbs, unsigned long wba,
204 unsigned long wbd)
205{
206 int res = 0;
207 mm_segment_t old_fs = get_fs();
208
209 /* set_fs can not be moved, otherwise put_user() may oops */
210 set_fs(MAKE_MM_SEG(wbs));
211
212 switch (wbs & WBSIZ_040) {
213 case BA_SIZE_BYTE:
214 res = put_user(wbd & 0xff, (char __user *)wba);
215 break;
216 case BA_SIZE_WORD:
217 res = put_user(wbd & 0xffff, (short __user *)wba);
218 break;
219 case BA_SIZE_LONG:
220 res = put_user(wbd, (int __user *)wba);
221 break;
222 }
223
224 /* set_fs can not be moved, otherwise put_user() may oops */
225 set_fs(old_fs);
226
227
228#ifdef DEBUG
229 printk("do_040writeback1, res=%d\n",res);
230#endif
231
232 return res;
233}
234
235/* after an exception in a writeback the stack frame corresponding
236 * to that exception is discarded, set a few bits in the old frame
237 * to simulate what it should look like
238 */
239static inline void fix_xframe040(struct frame *fp, unsigned long wba, unsigned short wbs)
240{
241 fp->un.fmt7.faddr = wba;
242 fp->un.fmt7.ssw = wbs & 0xff;
243 if (wba != current->thread.faddr)
244 fp->un.fmt7.ssw |= MA_040;
245}
246
247static inline void do_040writebacks(struct frame *fp)
248{
249 int res = 0;
250#if 0
251 if (fp->un.fmt7.wb1s & WBV_040)
252 printk("access_error040: cannot handle 1st writeback. oops.\n");
253#endif
254
255 if ((fp->un.fmt7.wb2s & WBV_040) &&
256 !(fp->un.fmt7.wb2s & WBTT_040)) {
257 res = do_040writeback1(fp->un.fmt7.wb2s, fp->un.fmt7.wb2a,
258 fp->un.fmt7.wb2d);
259 if (res)
260 fix_xframe040(fp, fp->un.fmt7.wb2a, fp->un.fmt7.wb2s);
261 else
262 fp->un.fmt7.wb2s = 0;
263 }
264
265 /* do the 2nd wb only if the first one was successful (except for a kernel wb) */
266 if (fp->un.fmt7.wb3s & WBV_040 && (!res || fp->un.fmt7.wb3s & 4)) {
267 res = do_040writeback1(fp->un.fmt7.wb3s, fp->un.fmt7.wb3a,
268 fp->un.fmt7.wb3d);
269 if (res)
270 {
271 fix_xframe040(fp, fp->un.fmt7.wb3a, fp->un.fmt7.wb3s);
272
273 fp->un.fmt7.wb2s = fp->un.fmt7.wb3s;
274 fp->un.fmt7.wb3s &= (~WBV_040);
275 fp->un.fmt7.wb2a = fp->un.fmt7.wb3a;
276 fp->un.fmt7.wb2d = fp->un.fmt7.wb3d;
277 }
278 else
279 fp->un.fmt7.wb3s = 0;
280 }
281
282 if (res)
283 send_fault_sig(&fp->ptregs);
284}
285
286/*
287 * called from sigreturn(), must ensure userspace code didn't
288 * manipulate exception frame to circumvent protection, then complete
289 * pending writebacks
290 * we just clear TM2 to turn it into a userspace access
291 */
292asmlinkage void berr_040cleanup(struct frame *fp)
293{
294 fp->un.fmt7.wb2s &= ~4;
295 fp->un.fmt7.wb3s &= ~4;
296
297 do_040writebacks(fp);
298}
299
300static inline void access_error040(struct frame *fp)
301{
302 unsigned short ssw = fp->un.fmt7.ssw;
303 unsigned long mmusr;
304
305#ifdef DEBUG
306 printk("ssw=%#x, fa=%#lx\n", ssw, fp->un.fmt7.faddr);
307 printk("wb1s=%#x, wb2s=%#x, wb3s=%#x\n", fp->un.fmt7.wb1s,
308 fp->un.fmt7.wb2s, fp->un.fmt7.wb3s);
309 printk ("wb2a=%lx, wb3a=%lx, wb2d=%lx, wb3d=%lx\n",
310 fp->un.fmt7.wb2a, fp->un.fmt7.wb3a,
311 fp->un.fmt7.wb2d, fp->un.fmt7.wb3d);
312#endif
313
314 if (ssw & ATC_040) {
315 unsigned long addr = fp->un.fmt7.faddr;
316 unsigned long errorcode;
317
318 /*
319 * The MMU status has to be determined AFTER the address
320 * has been corrected if there was a misaligned access (MA).
321 */
322 if (ssw & MA_040)
323 addr = (addr + 7) & -8;
324
325 /* MMU error, get the MMUSR info for this access */
326 mmusr = probe040(!(ssw & RW_040), addr, ssw);
327#ifdef DEBUG
328 printk("mmusr = %lx\n", mmusr);
329#endif
330 errorcode = 1;
331 if (!(mmusr & MMU_R_040)) {
332 /* clear the invalid atc entry */
333 __flush_tlb040_one(addr);
334 errorcode = 0;
335 }
336
337 /* despite what documentation seems to say, RMW
338 * accesses have always both the LK and RW bits set */
339 if (!(ssw & RW_040) || (ssw & LK_040))
340 errorcode |= 2;
341
342 if (do_page_fault(&fp->ptregs, addr, errorcode)) {
343#ifdef DEBUG
344 printk("do_page_fault() !=0\n");
345#endif
346 if (user_mode(&fp->ptregs)){
347 /* delay writebacks after signal delivery */
348#ifdef DEBUG
349 printk(".. was usermode - return\n");
350#endif
351 return;
352 }
353 /* disable writeback into user space from kernel
354 * (if do_page_fault didn't fix the mapping,
355 * the writeback won't do good)
356 */
357disable_wb:
358#ifdef DEBUG
359 printk(".. disabling wb2\n");
360#endif
361 if (fp->un.fmt7.wb2a == fp->un.fmt7.faddr)
362 fp->un.fmt7.wb2s &= ~WBV_040;
363 if (fp->un.fmt7.wb3a == fp->un.fmt7.faddr)
364 fp->un.fmt7.wb3s &= ~WBV_040;
365 }
366 } else {
367 /* In case of a bus error we either kill the process or expect
368 * the kernel to catch the fault, which then is also responsible
369 * for cleaning up the mess.
370 */
371 current->thread.signo = SIGBUS;
372 current->thread.faddr = fp->un.fmt7.faddr;
373 if (send_fault_sig(&fp->ptregs) >= 0)
374 printk("68040 bus error (ssw=%x, faddr=%lx)\n", ssw,
375 fp->un.fmt7.faddr);
376 goto disable_wb;
377 }
378
379 do_040writebacks(fp);
380}
381#endif /* CONFIG_M68040 */
382
383#if defined(CONFIG_SUN3)
384#include <asm/sun3mmu.h>
385
386extern int mmu_emu_handle_fault (unsigned long, int, int);
387
388/* sun3 version of bus_error030 */
389
390static inline void bus_error030 (struct frame *fp)
391{
392 unsigned char buserr_type = sun3_get_buserr ();
393 unsigned long addr, errorcode;
394 unsigned short ssw = fp->un.fmtb.ssw;
395 extern unsigned long _sun3_map_test_start, _sun3_map_test_end;
396
397#ifdef DEBUG
398 if (ssw & (FC | FB))
399 printk ("Instruction fault at %#010lx\n",
400 ssw & FC ?
401 fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2
402 :
403 fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
404 if (ssw & DF)
405 printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
406 ssw & RW ? "read" : "write",
407 fp->un.fmtb.daddr,
408 space_names[ssw & DFC], fp->ptregs.pc);
409#endif
410
411 /*
412 * Check if this page should be demand-mapped. This needs to go before
413 * the testing for a bad kernel-space access (demand-mapping applies
414 * to kernel accesses too).
415 */
416
417 if ((ssw & DF)
418 && (buserr_type & (SUN3_BUSERR_PROTERR | SUN3_BUSERR_INVALID))) {
419 if (mmu_emu_handle_fault (fp->un.fmtb.daddr, ssw & RW, 0))
420 return;
421 }
422
423 /* Check for kernel-space pagefault (BAD). */
424 if (fp->ptregs.sr & PS_S) {
425 /* kernel fault must be a data fault to user space */
426 if (! ((ssw & DF) && ((ssw & DFC) == USER_DATA))) {
427 // try checking the kernel mappings before surrender
428 if (mmu_emu_handle_fault (fp->un.fmtb.daddr, ssw & RW, 1))
429 return;
430 /* instruction fault or kernel data fault! */
431 if (ssw & (FC | FB))
432 printk ("Instruction fault at %#010lx\n",
433 fp->ptregs.pc);
434 if (ssw & DF) {
435 /* was this fault incurred testing bus mappings? */
436 if((fp->ptregs.pc >= (unsigned long)&_sun3_map_test_start) &&
437 (fp->ptregs.pc <= (unsigned long)&_sun3_map_test_end)) {
438 send_fault_sig(&fp->ptregs);
439 return;
440 }
441
442 printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
443 ssw & RW ? "read" : "write",
444 fp->un.fmtb.daddr,
445 space_names[ssw & DFC], fp->ptregs.pc);
446 }
447 printk ("BAD KERNEL BUSERR\n");
448
449 die_if_kernel("Oops", &fp->ptregs,0);
450 force_sig(SIGKILL, current);
451 return;
452 }
453 } else {
454 /* user fault */
455 if (!(ssw & (FC | FB)) && !(ssw & DF))
456 /* not an instruction fault or data fault! BAD */
457 panic ("USER BUSERR w/o instruction or data fault");
458 }
459
460
461 /* First handle the data fault, if any. */
462 if (ssw & DF) {
463 addr = fp->un.fmtb.daddr;
464
465// errorcode bit 0: 0 -> no page 1 -> protection fault
466// errorcode bit 1: 0 -> read fault 1 -> write fault
467
468// (buserr_type & SUN3_BUSERR_PROTERR) -> protection fault
469// (buserr_type & SUN3_BUSERR_INVALID) -> invalid page fault
470
471 if (buserr_type & SUN3_BUSERR_PROTERR)
472 errorcode = 0x01;
473 else if (buserr_type & SUN3_BUSERR_INVALID)
474 errorcode = 0x00;
475 else {
476#ifdef DEBUG
477 printk ("*** unexpected busfault type=%#04x\n", buserr_type);
478 printk ("invalid %s access at %#lx from pc %#lx\n",
479 !(ssw & RW) ? "write" : "read", addr,
480 fp->ptregs.pc);
481#endif
482 die_if_kernel ("Oops", &fp->ptregs, buserr_type);
483 force_sig (SIGBUS, current);
484 return;
485 }
486
487//todo: wtf is RM bit? --m
488 if (!(ssw & RW) || ssw & RM)
489 errorcode |= 0x02;
490
491 /* Handle page fault. */
492 do_page_fault (&fp->ptregs, addr, errorcode);
493
494 /* Retry the data fault now. */
495 return;
496 }
497
498 /* Now handle the instruction fault. */
499
500 /* Get the fault address. */
501 if (fp->ptregs.format == 0xA)
502 addr = fp->ptregs.pc + 4;
503 else
504 addr = fp->un.fmtb.baddr;
505 if (ssw & FC)
506 addr -= 2;
507
508 if (buserr_type & SUN3_BUSERR_INVALID) {
509 if (!mmu_emu_handle_fault(addr, 1, 0))
510 do_page_fault (&fp->ptregs, addr, 0);
511 } else {
512#ifdef DEBUG
513 printk ("protection fault on insn access (segv).\n");
514#endif
515 force_sig (SIGSEGV, current);
516 }
517}
518#else
519#if defined(CPU_M68020_OR_M68030)
520static inline void bus_error030 (struct frame *fp)
521{
522 volatile unsigned short temp;
523 unsigned short mmusr;
524 unsigned long addr, errorcode;
525 unsigned short ssw = fp->un.fmtb.ssw;
526#ifdef DEBUG
527 unsigned long desc;
528
529 printk ("pid = %x ", current->pid);
530 printk ("SSW=%#06x ", ssw);
531
532 if (ssw & (FC | FB))
533 printk ("Instruction fault at %#010lx\n",
534 ssw & FC ?
535 fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2
536 :
537 fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
538 if (ssw & DF)
539 printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
540 ssw & RW ? "read" : "write",
541 fp->un.fmtb.daddr,
542 space_names[ssw & DFC], fp->ptregs.pc);
543#endif
544
545 /* ++andreas: If a data fault and an instruction fault happen
546 at the same time map in both pages. */
547
548 /* First handle the data fault, if any. */
549 if (ssw & DF) {
550 addr = fp->un.fmtb.daddr;
551
552#ifdef DEBUG
553 asm volatile ("ptestr %3,%2@,#7,%0\n\t"
554 "pmove %%psr,%1"
555 : "=a&" (desc), "=m" (temp)
556 : "a" (addr), "d" (ssw));
557#else
558 asm volatile ("ptestr %2,%1@,#7\n\t"
559 "pmove %%psr,%0"
560 : "=m" (temp) : "a" (addr), "d" (ssw));
561#endif
562 mmusr = temp;
563
564#ifdef DEBUG
565 printk("mmusr is %#x for addr %#lx in task %p\n",
566 mmusr, addr, current);
567 printk("descriptor address is %#lx, contents %#lx\n",
568 __va(desc), *(unsigned long *)__va(desc));
569#endif
570
571 errorcode = (mmusr & MMU_I) ? 0 : 1;
572 if (!(ssw & RW) || (ssw & RM))
573 errorcode |= 2;
574
575 if (mmusr & (MMU_I | MMU_WP)) {
576 if (ssw & 4) {
577 printk("Data %s fault at %#010lx in %s (pc=%#lx)\n",
578 ssw & RW ? "read" : "write",
579 fp->un.fmtb.daddr,
580 space_names[ssw & DFC], fp->ptregs.pc);
581 goto buserr;
582 }
583 /* Don't try to do anything further if an exception was
584 handled. */
585 if (do_page_fault (&fp->ptregs, addr, errorcode) < 0)
586 return;
587 } else if (!(mmusr & MMU_I)) {
588 /* probably a 020 cas fault */
589 if (!(ssw & RM) && send_fault_sig(&fp->ptregs) > 0)
590 printk("unexpected bus error (%#x,%#x)\n", ssw, mmusr);
591 } else if (mmusr & (MMU_B|MMU_L|MMU_S)) {
592 printk("invalid %s access at %#lx from pc %#lx\n",
593 !(ssw & RW) ? "write" : "read", addr,
594 fp->ptregs.pc);
595 die_if_kernel("Oops",&fp->ptregs,mmusr);
596 force_sig(SIGSEGV, current);
597 return;
598 } else {
599#if 0
600 static volatile long tlong;
601#endif
602
603 printk("weird %s access at %#lx from pc %#lx (ssw is %#x)\n",
604 !(ssw & RW) ? "write" : "read", addr,
605 fp->ptregs.pc, ssw);
606 asm volatile ("ptestr #1,%1@,#0\n\t"
607 "pmove %%psr,%0"
608 : "=m" (temp)
609 : "a" (addr));
610 mmusr = temp;
611
612 printk ("level 0 mmusr is %#x\n", mmusr);
613#if 0
614 asm volatile ("pmove %%tt0,%0"
615 : "=m" (tlong));
616 printk("tt0 is %#lx, ", tlong);
617 asm volatile ("pmove %%tt1,%0"
618 : "=m" (tlong));
619 printk("tt1 is %#lx\n", tlong);
620#endif
621#ifdef DEBUG
622 printk("Unknown SIGSEGV - 1\n");
623#endif
624 die_if_kernel("Oops",&fp->ptregs,mmusr);
625 force_sig(SIGSEGV, current);
626 return;
627 }
628
629 /* setup an ATC entry for the access about to be retried */
630 if (!(ssw & RW) || (ssw & RM))
631 asm volatile ("ploadw %1,%0@" : /* no outputs */
632 : "a" (addr), "d" (ssw));
633 else
634 asm volatile ("ploadr %1,%0@" : /* no outputs */
635 : "a" (addr), "d" (ssw));
636 }
637
638 /* Now handle the instruction fault. */
639
640 if (!(ssw & (FC|FB)))
641 return;
642
643 if (fp->ptregs.sr & PS_S) {
644 printk("Instruction fault at %#010lx\n",
645 fp->ptregs.pc);
646 buserr:
647 printk ("BAD KERNEL BUSERR\n");
648 die_if_kernel("Oops",&fp->ptregs,0);
649 force_sig(SIGKILL, current);
650 return;
651 }
652
653 /* get the fault address */
654 if (fp->ptregs.format == 10)
655 addr = fp->ptregs.pc + 4;
656 else
657 addr = fp->un.fmtb.baddr;
658 if (ssw & FC)
659 addr -= 2;
660
661 if ((ssw & DF) && ((addr ^ fp->un.fmtb.daddr) & PAGE_MASK) == 0)
662 /* Insn fault on same page as data fault. But we
663 should still create the ATC entry. */
664 goto create_atc_entry;
665
666#ifdef DEBUG
667 asm volatile ("ptestr #1,%2@,#7,%0\n\t"
668 "pmove %%psr,%1"
669 : "=a&" (desc), "=m" (temp)
670 : "a" (addr));
671#else
672 asm volatile ("ptestr #1,%1@,#7\n\t"
673 "pmove %%psr,%0"
674 : "=m" (temp) : "a" (addr));
675#endif
676 mmusr = temp;
677
678#ifdef DEBUG
679 printk ("mmusr is %#x for addr %#lx in task %p\n",
680 mmusr, addr, current);
681 printk ("descriptor address is %#lx, contents %#lx\n",
682 __va(desc), *(unsigned long *)__va(desc));
683#endif
684
685 if (mmusr & MMU_I)
686 do_page_fault (&fp->ptregs, addr, 0);
687 else if (mmusr & (MMU_B|MMU_L|MMU_S)) {
688 printk ("invalid insn access at %#lx from pc %#lx\n",
689 addr, fp->ptregs.pc);
690#ifdef DEBUG
691 printk("Unknown SIGSEGV - 2\n");
692#endif
693 die_if_kernel("Oops",&fp->ptregs,mmusr);
694 force_sig(SIGSEGV, current);
695 return;
696 }
697
698create_atc_entry:
699 /* setup an ATC entry for the access about to be retried */
700 asm volatile ("ploadr #2,%0@" : /* no outputs */
701 : "a" (addr));
702}
703#endif /* CPU_M68020_OR_M68030 */
704#endif /* !CONFIG_SUN3 */
705
706#if defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU)
707#include <asm/mcfmmu.h>
708
709/*
710 * The following table converts the FS encoding of a ColdFire
711 * exception stack frame into the error_code value needed by
712 * do_fault.
713*/
714static const unsigned char fs_err_code[] = {
715 0, /* 0000 */
716 0, /* 0001 */
717 0, /* 0010 */
718 0, /* 0011 */
719 1, /* 0100 */
720 0, /* 0101 */
721 0, /* 0110 */
722 0, /* 0111 */
723 2, /* 1000 */
724 3, /* 1001 */
725 2, /* 1010 */
726 0, /* 1011 */
727 1, /* 1100 */
728 1, /* 1101 */
729 0, /* 1110 */
730 0 /* 1111 */
731};
732
733static inline void access_errorcf(unsigned int fs, struct frame *fp)
734{
735 unsigned long mmusr, addr;
736 unsigned int err_code;
737 int need_page_fault;
738
739 mmusr = mmu_read(MMUSR);
740 addr = mmu_read(MMUAR);
741
742 /*
743 * error_code:
744 * bit 0 == 0 means no page found, 1 means protection fault
745 * bit 1 == 0 means read, 1 means write
746 */
747 switch (fs) {
748 case 5: /* 0101 TLB opword X miss */
749 need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 0);
750 addr = fp->ptregs.pc;
751 break;
752 case 6: /* 0110 TLB extension word X miss */
753 need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 1);
754 addr = fp->ptregs.pc + sizeof(long);
755 break;
756 case 10: /* 1010 TLB W miss */
757 need_page_fault = cf_tlb_miss(&fp->ptregs, 1, 1, 0);
758 break;
759 case 14: /* 1110 TLB R miss */
760 need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 1, 0);
761 break;
762 default:
763 /* 0000 Normal */
764 /* 0001 Reserved */
765 /* 0010 Interrupt during debug service routine */
766 /* 0011 Reserved */
767 /* 0100 X Protection */
768 /* 0111 IFP in emulator mode */
769 /* 1000 W Protection*/
770 /* 1001 Write error*/
771 /* 1011 Reserved*/
772 /* 1100 R Protection*/
773 /* 1101 R Protection*/
774 /* 1111 OEP in emulator mode*/
775 need_page_fault = 1;
776 break;
777 }
778
779 if (need_page_fault) {
780 err_code = fs_err_code[fs];
781 if ((fs == 13) && (mmusr & MMUSR_WF)) /* rd-mod-wr access */
782 err_code |= 2; /* bit1 - write, bit0 - protection */
783 do_page_fault(&fp->ptregs, addr, err_code);
784 }
785}
786#endif /* CONFIG_COLDFIRE CONFIG_MMU */
787
788asmlinkage void buserr_c(struct frame *fp)
789{
790 /* Only set esp0 if coming from user mode */
791 if (user_mode(&fp->ptregs))
792 current->thread.esp0 = (unsigned long) fp;
793
794#ifdef DEBUG
795 printk ("*** Bus Error *** Format is %x\n", fp->ptregs.format);
796#endif
797
798#if defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU)
799 if (CPU_IS_COLDFIRE) {
800 unsigned int fs;
801 fs = (fp->ptregs.vector & 0x3) |
802 ((fp->ptregs.vector & 0xc00) >> 8);
803 switch (fs) {
804 case 0x5:
805 case 0x6:
806 case 0x7:
807 case 0x9:
808 case 0xa:
809 case 0xd:
810 case 0xe:
811 case 0xf:
812 access_errorcf(fs, fp);
813 return;
814 default:
815 break;
816 }
817 }
818#endif /* CONFIG_COLDFIRE && CONFIG_MMU */
819
820 switch (fp->ptregs.format) {
821#if defined (CONFIG_M68060)
822 case 4: /* 68060 access error */
823 access_error060 (fp);
824 break;
825#endif
826#if defined (CONFIG_M68040)
827 case 0x7: /* 68040 access error */
828 access_error040 (fp);
829 break;
830#endif
831#if defined (CPU_M68020_OR_M68030)
832 case 0xa:
833 case 0xb:
834 bus_error030 (fp);
835 break;
836#endif
837 default:
838 die_if_kernel("bad frame format",&fp->ptregs,0);
839#ifdef DEBUG
840 printk("Unknown SIGSEGV - 4\n");
841#endif
842 force_sig(SIGSEGV, current);
843 }
844}
845
846
847static int kstack_depth_to_print = 48;
848
849void show_trace(unsigned long *stack)
850{
851 unsigned long *endstack;
852 unsigned long addr;
853 int i;
854
855 printk("Call Trace:");
856 addr = (unsigned long)stack + THREAD_SIZE - 1;
857 endstack = (unsigned long *)(addr & -THREAD_SIZE);
858 i = 0;
859 while (stack + 1 <= endstack) {
860 addr = *stack++;
861 /*
862 * If the address is either in the text segment of the
863 * kernel, or in the region which contains vmalloc'ed
864 * memory, it *may* be the address of a calling
865 * routine; if so, print it so that someone tracing
866 * down the cause of the crash will be able to figure
867 * out the call path that was taken.
868 */
869 if (__kernel_text_address(addr)) {
870#ifndef CONFIG_KALLSYMS
871 if (i % 5 == 0)
872 printk("\n ");
873#endif
874 printk(" [<%08lx>] %pS\n", addr, (void *)addr);
875 i++;
876 }
877 }
878 printk("\n");
879}
880
881void show_registers(struct pt_regs *regs)
882{
883 struct frame *fp = (struct frame *)regs;
884 mm_segment_t old_fs = get_fs();
885 u16 c, *cp;
886 unsigned long addr;
887 int i;
888
889 print_modules();
890 printk("PC: [<%08lx>] %pS\n", regs->pc, (void *)regs->pc);
891 printk("SR: %04x SP: %p a2: %08lx\n", regs->sr, regs, regs->a2);
892 printk("d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n",
893 regs->d0, regs->d1, regs->d2, regs->d3);
894 printk("d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n",
895 regs->d4, regs->d5, regs->a0, regs->a1);
896
897 printk("Process %s (pid: %d, task=%p)\n",
898 current->comm, task_pid_nr(current), current);
899 addr = (unsigned long)&fp->un;
900 printk("Frame format=%X ", regs->format);
901 switch (regs->format) {
902 case 0x2:
903 printk("instr addr=%08lx\n", fp->un.fmt2.iaddr);
904 addr += sizeof(fp->un.fmt2);
905 break;
906 case 0x3:
907 printk("eff addr=%08lx\n", fp->un.fmt3.effaddr);
908 addr += sizeof(fp->un.fmt3);
909 break;
910 case 0x4:
911 printk((CPU_IS_060 ? "fault addr=%08lx fslw=%08lx\n"
912 : "eff addr=%08lx pc=%08lx\n"),
913 fp->un.fmt4.effaddr, fp->un.fmt4.pc);
914 addr += sizeof(fp->un.fmt4);
915 break;
916 case 0x7:
917 printk("eff addr=%08lx ssw=%04x faddr=%08lx\n",
918 fp->un.fmt7.effaddr, fp->un.fmt7.ssw, fp->un.fmt7.faddr);
919 printk("wb 1 stat/addr/data: %04x %08lx %08lx\n",
920 fp->un.fmt7.wb1s, fp->un.fmt7.wb1a, fp->un.fmt7.wb1dpd0);
921 printk("wb 2 stat/addr/data: %04x %08lx %08lx\n",
922 fp->un.fmt7.wb2s, fp->un.fmt7.wb2a, fp->un.fmt7.wb2d);
923 printk("wb 3 stat/addr/data: %04x %08lx %08lx\n",
924 fp->un.fmt7.wb3s, fp->un.fmt7.wb3a, fp->un.fmt7.wb3d);
925 printk("push data: %08lx %08lx %08lx %08lx\n",
926 fp->un.fmt7.wb1dpd0, fp->un.fmt7.pd1, fp->un.fmt7.pd2,
927 fp->un.fmt7.pd3);
928 addr += sizeof(fp->un.fmt7);
929 break;
930 case 0x9:
931 printk("instr addr=%08lx\n", fp->un.fmt9.iaddr);
932 addr += sizeof(fp->un.fmt9);
933 break;
934 case 0xa:
935 printk("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n",
936 fp->un.fmta.ssw, fp->un.fmta.isc, fp->un.fmta.isb,
937 fp->un.fmta.daddr, fp->un.fmta.dobuf);
938 addr += sizeof(fp->un.fmta);
939 break;
940 case 0xb:
941 printk("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n",
942 fp->un.fmtb.ssw, fp->un.fmtb.isc, fp->un.fmtb.isb,
943 fp->un.fmtb.daddr, fp->un.fmtb.dobuf);
944 printk("baddr=%08lx dibuf=%08lx ver=%x\n",
945 fp->un.fmtb.baddr, fp->un.fmtb.dibuf, fp->un.fmtb.ver);
946 addr += sizeof(fp->un.fmtb);
947 break;
948 default:
949 printk("\n");
950 }
951 show_stack(NULL, (unsigned long *)addr);
952
953 printk("Code:");
954 set_fs(KERNEL_DS);
955 cp = (u16 *)regs->pc;
956 for (i = -8; i < 16; i++) {
957 if (get_user(c, cp + i) && i >= 0) {
958 printk(" Bad PC value.");
959 break;
960 }
961 printk(i ? " %04x" : " <%04x>", c);
962 }
963 set_fs(old_fs);
964 printk ("\n");
965}
966
967void show_stack(struct task_struct *task, unsigned long *stack)
968{
969 unsigned long *p;
970 unsigned long *endstack;
971 int i;
972
973 if (!stack) {
974 if (task)
975 stack = (unsigned long *)task->thread.esp0;
976 else
977 stack = (unsigned long *)&stack;
978 }
979 endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1) & -THREAD_SIZE);
980
981 printk("Stack from %08lx:", (unsigned long)stack);
982 p = stack;
983 for (i = 0; i < kstack_depth_to_print; i++) {
984 if (p + 1 > endstack)
985 break;
986 if (i % 8 == 0)
987 printk("\n ");
988 printk(" %08lx", *p++);
989 }
990 printk("\n");
991 show_trace(stack);
992}
993
994/*
995 * The architecture-independent backtrace generator
996 */
997void dump_stack(void)
998{
999 unsigned long stack;
1000
1001 show_trace(&stack);
1002}
1003
1004EXPORT_SYMBOL(dump_stack);
1005
1006/*
1007 * The vector number returned in the frame pointer may also contain
1008 * the "fs" (Fault Status) bits on ColdFire. These are in the bottom
1009 * 2 bits, and upper 2 bits. So we need to mask out the real vector
1010 * number before using it in comparisons. You don't need to do this on
1011 * real 68k parts, but it won't hurt either.
1012 */
1013
1014void bad_super_trap (struct frame *fp)
1015{
1016 int vector = (fp->ptregs.vector >> 2) & 0xff;
1017
1018 console_verbose();
1019 if (vector < ARRAY_SIZE(vec_names))
1020 printk ("*** %s *** FORMAT=%X\n",
1021 vec_names[vector],
1022 fp->ptregs.format);
1023 else
1024 printk ("*** Exception %d *** FORMAT=%X\n",
1025 vector, fp->ptregs.format);
1026 if (vector == VEC_ADDRERR && CPU_IS_020_OR_030) {
1027 unsigned short ssw = fp->un.fmtb.ssw;
1028
1029 printk ("SSW=%#06x ", ssw);
1030
1031 if (ssw & RC)
1032 printk ("Pipe stage C instruction fault at %#010lx\n",
1033 (fp->ptregs.format) == 0xA ?
1034 fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2);
1035 if (ssw & RB)
1036 printk ("Pipe stage B instruction fault at %#010lx\n",
1037 (fp->ptregs.format) == 0xA ?
1038 fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
1039 if (ssw & DF)
1040 printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
1041 ssw & RW ? "read" : "write",
1042 fp->un.fmtb.daddr, space_names[ssw & DFC],
1043 fp->ptregs.pc);
1044 }
1045 printk ("Current process id is %d\n", task_pid_nr(current));
1046 die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0);
1047}
1048
1049asmlinkage void trap_c(struct frame *fp)
1050{
1051 int sig;
1052 int vector = (fp->ptregs.vector >> 2) & 0xff;
1053 siginfo_t info;
1054
1055 if (fp->ptregs.sr & PS_S) {
1056 if (vector == VEC_TRACE) {
1057 /* traced a trapping instruction on a 68020/30,
1058 * real exception will be executed afterwards.
1059 */
1060 } else if (!handle_kernel_fault(&fp->ptregs))
1061 bad_super_trap(fp);
1062 return;
1063 }
1064
1065 /* send the appropriate signal to the user program */
1066 switch (vector) {
1067 case VEC_ADDRERR:
1068 info.si_code = BUS_ADRALN;
1069 sig = SIGBUS;
1070 break;
1071 case VEC_ILLEGAL:
1072 case VEC_LINE10:
1073 case VEC_LINE11:
1074 info.si_code = ILL_ILLOPC;
1075 sig = SIGILL;
1076 break;
1077 case VEC_PRIV:
1078 info.si_code = ILL_PRVOPC;
1079 sig = SIGILL;
1080 break;
1081 case VEC_COPROC:
1082 info.si_code = ILL_COPROC;
1083 sig = SIGILL;
1084 break;
1085 case VEC_TRAP1:
1086 case VEC_TRAP2:
1087 case VEC_TRAP3:
1088 case VEC_TRAP4:
1089 case VEC_TRAP5:
1090 case VEC_TRAP6:
1091 case VEC_TRAP7:
1092 case VEC_TRAP8:
1093 case VEC_TRAP9:
1094 case VEC_TRAP10:
1095 case VEC_TRAP11:
1096 case VEC_TRAP12:
1097 case VEC_TRAP13:
1098 case VEC_TRAP14:
1099 info.si_code = ILL_ILLTRP;
1100 sig = SIGILL;
1101 break;
1102 case VEC_FPBRUC:
1103 case VEC_FPOE:
1104 case VEC_FPNAN:
1105 info.si_code = FPE_FLTINV;
1106 sig = SIGFPE;
1107 break;
1108 case VEC_FPIR:
1109 info.si_code = FPE_FLTRES;
1110 sig = SIGFPE;
1111 break;
1112 case VEC_FPDIVZ:
1113 info.si_code = FPE_FLTDIV;
1114 sig = SIGFPE;
1115 break;
1116 case VEC_FPUNDER:
1117 info.si_code = FPE_FLTUND;
1118 sig = SIGFPE;
1119 break;
1120 case VEC_FPOVER:
1121 info.si_code = FPE_FLTOVF;
1122 sig = SIGFPE;
1123 break;
1124 case VEC_ZERODIV:
1125 info.si_code = FPE_INTDIV;
1126 sig = SIGFPE;
1127 break;
1128 case VEC_CHK:
1129 case VEC_TRAP:
1130 info.si_code = FPE_INTOVF;
1131 sig = SIGFPE;
1132 break;
1133 case VEC_TRACE: /* ptrace single step */
1134 info.si_code = TRAP_TRACE;
1135 sig = SIGTRAP;
1136 break;
1137 case VEC_TRAP15: /* breakpoint */
1138 info.si_code = TRAP_BRKPT;
1139 sig = SIGTRAP;
1140 break;
1141 default:
1142 info.si_code = ILL_ILLOPC;
1143 sig = SIGILL;
1144 break;
1145 }
1146 info.si_signo = sig;
1147 info.si_errno = 0;
1148 switch (fp->ptregs.format) {
1149 default:
1150 info.si_addr = (void *) fp->ptregs.pc;
1151 break;
1152 case 2:
1153 info.si_addr = (void *) fp->un.fmt2.iaddr;
1154 break;
1155 case 7:
1156 info.si_addr = (void *) fp->un.fmt7.effaddr;
1157 break;
1158 case 9:
1159 info.si_addr = (void *) fp->un.fmt9.iaddr;
1160 break;
1161 case 10:
1162 info.si_addr = (void *) fp->un.fmta.daddr;
1163 break;
1164 case 11:
1165 info.si_addr = (void *) fp->un.fmtb.daddr;
1166 break;
1167 }
1168 force_sig_info (sig, &info, current);
1169}
1170
1171void die_if_kernel (char *str, struct pt_regs *fp, int nr)
1172{
1173 if (!(fp->sr & PS_S))
1174 return;
1175
1176 console_verbose();
1177 printk("%s: %08x\n",str,nr);
1178 show_registers(fp);
1179 add_taint(TAINT_DIE);
1180 do_exit(SIGSEGV);
1181}
1182
1183asmlinkage void set_esp0(unsigned long ssp)
1184{
1185 current->thread.esp0 = ssp;
1186}
1187
1188/*
1189 * This function is called if an error occur while accessing
1190 * user-space from the fpsp040 code.
1191 */
1192asmlinkage void fpsp040_die(void)
1193{
1194 do_exit(SIGSEGV);
1195}
1196
1197#ifdef CONFIG_M68KFPU_EMU
1198asmlinkage void fpemu_signal(int signal, int code, void *addr)
1199{
1200 siginfo_t info;
1201
1202 info.si_signo = signal;
1203 info.si_errno = 0;
1204 info.si_code = code;
1205 info.si_addr = addr;
1206 force_sig_info(signal, &info, current);
1207}
1208#endif 5#endif
diff --git a/arch/m68k/kernel/vectors.c b/arch/m68k/kernel/vectors.c
deleted file mode 100644
index 322c977bb9e..00000000000
--- a/arch/m68k/kernel/vectors.c
+++ /dev/null
@@ -1,144 +0,0 @@
1/*
2 * vectors.c
3 *
4 * Copyright (C) 1993, 1994 by Hamish Macdonald
5 *
6 * 68040 fixes by Michael Rausch
7 * 68040 fixes by Martin Apel
8 * 68040 fixes and writeback by Richard Zidlicky
9 * 68060 fixes by Roman Hodek
10 * 68060 fixes by Jesper Skov
11 *
12 * This file is subject to the terms and conditions of the GNU General Public
13 * License. See the file COPYING in the main directory of this archive
14 * for more details.
15 */
16
17/*
18 * Sets up all exception vectors
19 */
20#include <linux/sched.h>
21#include <linux/kernel.h>
22#include <linux/linkage.h>
23#include <linux/init.h>
24#include <linux/kallsyms.h>
25
26#include <asm/setup.h>
27#include <asm/fpu.h>
28#include <asm/traps.h>
29
30/* assembler routines */
31asmlinkage void system_call(void);
32asmlinkage void buserr(void);
33asmlinkage void trap(void);
34asmlinkage void nmihandler(void);
35#ifdef CONFIG_M68KFPU_EMU
36asmlinkage void fpu_emu(void);
37#endif
38
39e_vector vectors[256];
40
41/* nmi handler for the Amiga */
42asm(".text\n"
43 __ALIGN_STR "\n"
44 "nmihandler: rte");
45
46/*
47 * this must be called very early as the kernel might
48 * use some instruction that are emulated on the 060
49 * and so we're prepared for early probe attempts (e.g. nf_init).
50 */
51void __init base_trap_init(void)
52{
53 if (MACH_IS_SUN3X) {
54 extern e_vector *sun3x_prom_vbr;
55
56 __asm__ volatile ("movec %%vbr, %0" : "=r" (sun3x_prom_vbr));
57 }
58
59 /* setup the exception vector table */
60 __asm__ volatile ("movec %0,%%vbr" : : "r" ((void*)vectors));
61
62 if (CPU_IS_060) {
63 /* set up ISP entry points */
64 asmlinkage void unimp_vec(void) asm ("_060_isp_unimp");
65
66 vectors[VEC_UNIMPII] = unimp_vec;
67 }
68
69 vectors[VEC_BUSERR] = buserr;
70 vectors[VEC_ILLEGAL] = trap;
71 vectors[VEC_SYS] = system_call;
72}
73
74void __init trap_init (void)
75{
76 int i;
77
78 for (i = VEC_SPUR; i <= VEC_INT7; i++)
79 vectors[i] = bad_inthandler;
80
81 for (i = 0; i < VEC_USER; i++)
82 if (!vectors[i])
83 vectors[i] = trap;
84
85 for (i = VEC_USER; i < 256; i++)
86 vectors[i] = bad_inthandler;
87
88#ifdef CONFIG_M68KFPU_EMU
89 if (FPU_IS_EMU)
90 vectors[VEC_LINE11] = fpu_emu;
91#endif
92
93 if (CPU_IS_040 && !FPU_IS_EMU) {
94 /* set up FPSP entry points */
95 asmlinkage void dz_vec(void) asm ("dz");
96 asmlinkage void inex_vec(void) asm ("inex");
97 asmlinkage void ovfl_vec(void) asm ("ovfl");
98 asmlinkage void unfl_vec(void) asm ("unfl");
99 asmlinkage void snan_vec(void) asm ("snan");
100 asmlinkage void operr_vec(void) asm ("operr");
101 asmlinkage void bsun_vec(void) asm ("bsun");
102 asmlinkage void fline_vec(void) asm ("fline");
103 asmlinkage void unsupp_vec(void) asm ("unsupp");
104
105 vectors[VEC_FPDIVZ] = dz_vec;
106 vectors[VEC_FPIR] = inex_vec;
107 vectors[VEC_FPOVER] = ovfl_vec;
108 vectors[VEC_FPUNDER] = unfl_vec;
109 vectors[VEC_FPNAN] = snan_vec;
110 vectors[VEC_FPOE] = operr_vec;
111 vectors[VEC_FPBRUC] = bsun_vec;
112 vectors[VEC_LINE11] = fline_vec;
113 vectors[VEC_FPUNSUP] = unsupp_vec;
114 }
115
116 if (CPU_IS_060 && !FPU_IS_EMU) {
117 /* set up IFPSP entry points */
118 asmlinkage void snan_vec6(void) asm ("_060_fpsp_snan");
119 asmlinkage void operr_vec6(void) asm ("_060_fpsp_operr");
120 asmlinkage void ovfl_vec6(void) asm ("_060_fpsp_ovfl");
121 asmlinkage void unfl_vec6(void) asm ("_060_fpsp_unfl");
122 asmlinkage void dz_vec6(void) asm ("_060_fpsp_dz");
123 asmlinkage void inex_vec6(void) asm ("_060_fpsp_inex");
124 asmlinkage void fline_vec6(void) asm ("_060_fpsp_fline");
125 asmlinkage void unsupp_vec6(void) asm ("_060_fpsp_unsupp");
126 asmlinkage void effadd_vec6(void) asm ("_060_fpsp_effadd");
127
128 vectors[VEC_FPNAN] = snan_vec6;
129 vectors[VEC_FPOE] = operr_vec6;
130 vectors[VEC_FPOVER] = ovfl_vec6;
131 vectors[VEC_FPUNDER] = unfl_vec6;
132 vectors[VEC_FPDIVZ] = dz_vec6;
133 vectors[VEC_FPIR] = inex_vec6;
134 vectors[VEC_LINE11] = fline_vec6;
135 vectors[VEC_FPUNSUP] = unsupp_vec6;
136 vectors[VEC_UNIMPEA] = effadd_vec6;
137 }
138
139 /* if running on an amiga, make the NMI interrupt do nothing */
140 if (MACH_IS_AMIGA) {
141 vectors[VEC_INT7] = nmihandler;
142 }
143}
144
diff --git a/arch/m68k/kernel/vmlinux.lds.S b/arch/m68k/kernel/vmlinux.lds.S
index 69ec7963887..030dabf0bc5 100644
--- a/arch/m68k/kernel/vmlinux.lds.S
+++ b/arch/m68k/kernel/vmlinux.lds.S
@@ -1,14 +1,5 @@
1#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE) 1#ifdef CONFIG_MMU
2PHDRS 2#include "vmlinux.lds_mm.S"
3{
4 text PT_LOAD FILEHDR PHDRS FLAGS (7);
5 data PT_LOAD FLAGS (7);
6}
7#ifdef CONFIG_SUN3
8#include "vmlinux-sun3.lds"
9#else 3#else
10#include "vmlinux-std.lds" 4#include "vmlinux.lds_no.S"
11#endif
12#else
13#include "vmlinux-nommu.lds"
14#endif 5#endif