diff options
author | Greg Ungerer <gerg@uclinux.org> | 2012-03-13 23:22:25 -0400 |
---|---|---|
committer | Greg Ungerer <gerg@uclinux.org> | 2012-05-20 07:21:08 -0400 |
commit | 0c22fafd312cd8658ca23f6067a20aec0781c02a (patch) | |
tree | 147230e8fef212a4f80cc0beeb3ad0321129540a /arch/m68k/kernel | |
parent | 36be50515fe2aef61533b516fa2576a2c7fe7664 (diff) |
m68k: merge the MMU and non-MMU signal.c code
The MMU (signal_mm.c) and non-MMU (signal_no.c) versions of the m68k
architecture signal handling code are very similar. Most of their code is
the same.
Merge the two back into a single signal.c, and move some of the code around
inside the file to minimize the number of #ifdefs required. Specificially
we can group out the CONFIG_FPU and the CONFIG_MMU code. We end up needing
a few other "#ifdef CONFIG_MMU" as well, but not too many.
Signed-off-by: Greg Ungerer <gerg@uclinux.org>
Acked-by: Geert Uytterhoeven <geert@linux-m68k.org>
Diffstat (limited to 'arch/m68k/kernel')
-rw-r--r-- | arch/m68k/kernel/signal.c | 1201 | ||||
-rw-r--r-- | arch/m68k/kernel/signal_mm.c | 1115 | ||||
-rw-r--r-- | arch/m68k/kernel/signal_no.c | 765 |
3 files changed, 1199 insertions, 1882 deletions
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c index 2e25713e2ead..1747c7030a33 100644 --- a/arch/m68k/kernel/signal.c +++ b/arch/m68k/kernel/signal.c | |||
@@ -1,5 +1,1202 @@ | |||
1 | /* | ||
2 | * linux/arch/m68k/kernel/signal.c | ||
3 | * | ||
4 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file COPYING in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | |||
11 | /* | ||
12 | * Linux/m68k support by Hamish Macdonald | ||
13 | * | ||
14 | * 68060 fixes by Jesper Skov | ||
15 | * | ||
16 | * 1997-12-01 Modified for POSIX.1b signals by Andreas Schwab | ||
17 | * | ||
18 | * mathemu support by Roman Zippel | ||
19 | * (Note: fpstate in the signal context is completely ignored for the emulator | ||
20 | * and the internal floating point format is put on stack) | ||
21 | */ | ||
22 | |||
23 | /* | ||
24 | * ++roman (07/09/96): implemented signal stacks (specially for tosemu on | ||
25 | * Atari :-) Current limitation: Only one sigstack can be active at one time. | ||
26 | * If a second signal with SA_ONSTACK set arrives while working on a sigstack, | ||
27 | * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested | ||
28 | * signal handlers! | ||
29 | */ | ||
30 | |||
31 | #include <linux/sched.h> | ||
32 | #include <linux/mm.h> | ||
33 | #include <linux/kernel.h> | ||
34 | #include <linux/signal.h> | ||
35 | #include <linux/syscalls.h> | ||
36 | #include <linux/errno.h> | ||
37 | #include <linux/wait.h> | ||
38 | #include <linux/ptrace.h> | ||
39 | #include <linux/unistd.h> | ||
40 | #include <linux/stddef.h> | ||
41 | #include <linux/highuid.h> | ||
42 | #include <linux/personality.h> | ||
43 | #include <linux/tty.h> | ||
44 | #include <linux/binfmts.h> | ||
45 | #include <linux/module.h> | ||
46 | |||
47 | #include <asm/setup.h> | ||
48 | #include <asm/uaccess.h> | ||
49 | #include <asm/pgtable.h> | ||
50 | #include <asm/traps.h> | ||
51 | #include <asm/ucontext.h> | ||
52 | |||
53 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | ||
54 | |||
1 | #ifdef CONFIG_MMU | 55 | #ifdef CONFIG_MMU |
2 | #include "signal_mm.c" | 56 | |
57 | /* | ||
58 | * Handle the slight differences in classic 68k and ColdFire trap frames. | ||
59 | */ | ||
60 | #ifdef CONFIG_COLDFIRE | ||
61 | #define FORMAT 4 | ||
62 | #define FMT4SIZE 0 | ||
3 | #else | 63 | #else |
4 | #include "signal_no.c" | 64 | #define FORMAT 0 |
65 | #define FMT4SIZE sizeof(((struct frame *)0)->un.fmt4) | ||
5 | #endif | 66 | #endif |
67 | |||
68 | static const int frame_size_change[16] = { | ||
69 | [1] = -1, /* sizeof(((struct frame *)0)->un.fmt1), */ | ||
70 | [2] = sizeof(((struct frame *)0)->un.fmt2), | ||
71 | [3] = sizeof(((struct frame *)0)->un.fmt3), | ||
72 | [4] = FMT4SIZE, | ||
73 | [5] = -1, /* sizeof(((struct frame *)0)->un.fmt5), */ | ||
74 | [6] = -1, /* sizeof(((struct frame *)0)->un.fmt6), */ | ||
75 | [7] = sizeof(((struct frame *)0)->un.fmt7), | ||
76 | [8] = -1, /* sizeof(((struct frame *)0)->un.fmt8), */ | ||
77 | [9] = sizeof(((struct frame *)0)->un.fmt9), | ||
78 | [10] = sizeof(((struct frame *)0)->un.fmta), | ||
79 | [11] = sizeof(((struct frame *)0)->un.fmtb), | ||
80 | [12] = -1, /* sizeof(((struct frame *)0)->un.fmtc), */ | ||
81 | [13] = -1, /* sizeof(((struct frame *)0)->un.fmtd), */ | ||
82 | [14] = -1, /* sizeof(((struct frame *)0)->un.fmte), */ | ||
83 | [15] = -1, /* sizeof(((struct frame *)0)->un.fmtf), */ | ||
84 | }; | ||
85 | |||
86 | static inline int frame_extra_sizes(int f) | ||
87 | { | ||
88 | return frame_size_change[f]; | ||
89 | } | ||
90 | |||
91 | int handle_kernel_fault(struct pt_regs *regs) | ||
92 | { | ||
93 | const struct exception_table_entry *fixup; | ||
94 | struct pt_regs *tregs; | ||
95 | |||
96 | /* Are we prepared to handle this kernel fault? */ | ||
97 | fixup = search_exception_tables(regs->pc); | ||
98 | if (!fixup) | ||
99 | return 0; | ||
100 | |||
101 | /* Create a new four word stack frame, discarding the old one. */ | ||
102 | regs->stkadj = frame_extra_sizes(regs->format); | ||
103 | tregs = (struct pt_regs *)((long)regs + regs->stkadj); | ||
104 | tregs->vector = regs->vector; | ||
105 | tregs->format = FORMAT; | ||
106 | tregs->pc = fixup->fixup; | ||
107 | tregs->sr = regs->sr; | ||
108 | |||
109 | return 1; | ||
110 | } | ||
111 | |||
112 | void ptrace_signal_deliver(struct pt_regs *regs, void *cookie) | ||
113 | { | ||
114 | if (regs->orig_d0 < 0) | ||
115 | return; | ||
116 | switch (regs->d0) { | ||
117 | case -ERESTARTNOHAND: | ||
118 | case -ERESTARTSYS: | ||
119 | case -ERESTARTNOINTR: | ||
120 | regs->d0 = regs->orig_d0; | ||
121 | regs->orig_d0 = -1; | ||
122 | regs->pc -= 2; | ||
123 | break; | ||
124 | } | ||
125 | } | ||
126 | |||
127 | static inline void push_cache (unsigned long vaddr) | ||
128 | { | ||
129 | /* | ||
130 | * Using the old cache_push_v() was really a big waste. | ||
131 | * | ||
132 | * What we are trying to do is to flush 8 bytes to ram. | ||
133 | * Flushing 2 cache lines of 16 bytes is much cheaper than | ||
134 | * flushing 1 or 2 pages, as previously done in | ||
135 | * cache_push_v(). | ||
136 | * Jes | ||
137 | */ | ||
138 | if (CPU_IS_040) { | ||
139 | unsigned long temp; | ||
140 | |||
141 | __asm__ __volatile__ (".chip 68040\n\t" | ||
142 | "nop\n\t" | ||
143 | "ptestr (%1)\n\t" | ||
144 | "movec %%mmusr,%0\n\t" | ||
145 | ".chip 68k" | ||
146 | : "=r" (temp) | ||
147 | : "a" (vaddr)); | ||
148 | |||
149 | temp &= PAGE_MASK; | ||
150 | temp |= vaddr & ~PAGE_MASK; | ||
151 | |||
152 | __asm__ __volatile__ (".chip 68040\n\t" | ||
153 | "nop\n\t" | ||
154 | "cpushl %%bc,(%0)\n\t" | ||
155 | ".chip 68k" | ||
156 | : : "a" (temp)); | ||
157 | } | ||
158 | else if (CPU_IS_060) { | ||
159 | unsigned long temp; | ||
160 | __asm__ __volatile__ (".chip 68060\n\t" | ||
161 | "plpar (%0)\n\t" | ||
162 | ".chip 68k" | ||
163 | : "=a" (temp) | ||
164 | : "0" (vaddr)); | ||
165 | __asm__ __volatile__ (".chip 68060\n\t" | ||
166 | "cpushl %%bc,(%0)\n\t" | ||
167 | ".chip 68k" | ||
168 | : : "a" (temp)); | ||
169 | } else if (!CPU_IS_COLDFIRE) { | ||
170 | /* | ||
171 | * 68030/68020 have no writeback cache; | ||
172 | * still need to clear icache. | ||
173 | * Note that vaddr is guaranteed to be long word aligned. | ||
174 | */ | ||
175 | unsigned long temp; | ||
176 | asm volatile ("movec %%cacr,%0" : "=r" (temp)); | ||
177 | temp += 4; | ||
178 | asm volatile ("movec %0,%%caar\n\t" | ||
179 | "movec %1,%%cacr" | ||
180 | : : "r" (vaddr), "r" (temp)); | ||
181 | asm volatile ("movec %0,%%caar\n\t" | ||
182 | "movec %1,%%cacr" | ||
183 | : : "r" (vaddr + 4), "r" (temp)); | ||
184 | } | ||
185 | } | ||
186 | |||
187 | static inline void adjustformat(struct pt_regs *regs) | ||
188 | { | ||
189 | } | ||
190 | |||
191 | static inline void save_a5_state(struct sigcontext *sc, struct pt_regs *regs) | ||
192 | { | ||
193 | } | ||
194 | |||
195 | #else /* CONFIG_MMU */ | ||
196 | |||
197 | void ret_from_user_signal(void); | ||
198 | void ret_from_user_rt_signal(void); | ||
199 | |||
200 | static inline int frame_extra_sizes(int f) | ||
201 | { | ||
202 | /* No frame size adjustments required on non-MMU CPUs */ | ||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | static inline void adjustformat(struct pt_regs *regs) | ||
207 | { | ||
208 | ((struct switch_stack *)regs - 1)->a5 = current->mm->start_data; | ||
209 | /* | ||
210 | * set format byte to make stack appear modulo 4, which it will | ||
211 | * be when doing the rte | ||
212 | */ | ||
213 | regs->format = 0x4; | ||
214 | } | ||
215 | |||
216 | static inline void save_a5_state(struct sigcontext *sc, struct pt_regs *regs) | ||
217 | { | ||
218 | sc->sc_a5 = ((struct switch_stack *)regs - 1)->a5; | ||
219 | } | ||
220 | |||
221 | static inline void push_cache(unsigned long vaddr) | ||
222 | { | ||
223 | } | ||
224 | |||
225 | #endif /* CONFIG_MMU */ | ||
226 | |||
227 | /* | ||
228 | * Atomically swap in the new signal mask, and wait for a signal. | ||
229 | */ | ||
230 | asmlinkage int | ||
231 | sys_sigsuspend(int unused0, int unused1, old_sigset_t mask) | ||
232 | { | ||
233 | mask &= _BLOCKABLE; | ||
234 | spin_lock_irq(¤t->sighand->siglock); | ||
235 | current->saved_sigmask = current->blocked; | ||
236 | siginitset(¤t->blocked, mask); | ||
237 | recalc_sigpending(); | ||
238 | spin_unlock_irq(¤t->sighand->siglock); | ||
239 | |||
240 | current->state = TASK_INTERRUPTIBLE; | ||
241 | schedule(); | ||
242 | set_restore_sigmask(); | ||
243 | |||
244 | return -ERESTARTNOHAND; | ||
245 | } | ||
246 | |||
247 | asmlinkage int | ||
248 | sys_sigaction(int sig, const struct old_sigaction __user *act, | ||
249 | struct old_sigaction __user *oact) | ||
250 | { | ||
251 | struct k_sigaction new_ka, old_ka; | ||
252 | int ret; | ||
253 | |||
254 | if (act) { | ||
255 | old_sigset_t mask; | ||
256 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || | ||
257 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || | ||
258 | __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || | ||
259 | __get_user(new_ka.sa.sa_flags, &act->sa_flags) || | ||
260 | __get_user(mask, &act->sa_mask)) | ||
261 | return -EFAULT; | ||
262 | siginitset(&new_ka.sa.sa_mask, mask); | ||
263 | } | ||
264 | |||
265 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | ||
266 | |||
267 | if (!ret && oact) { | ||
268 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || | ||
269 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || | ||
270 | __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || | ||
271 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || | ||
272 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) | ||
273 | return -EFAULT; | ||
274 | } | ||
275 | |||
276 | return ret; | ||
277 | } | ||
278 | |||
279 | asmlinkage int | ||
280 | sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss) | ||
281 | { | ||
282 | return do_sigaltstack(uss, uoss, rdusp()); | ||
283 | } | ||
284 | |||
285 | |||
286 | /* | ||
287 | * Do a signal return; undo the signal stack. | ||
288 | * | ||
289 | * Keep the return code on the stack quadword aligned! | ||
290 | * That makes the cache flush below easier. | ||
291 | */ | ||
292 | |||
293 | struct sigframe | ||
294 | { | ||
295 | char __user *pretcode; | ||
296 | int sig; | ||
297 | int code; | ||
298 | struct sigcontext __user *psc; | ||
299 | char retcode[8]; | ||
300 | unsigned long extramask[_NSIG_WORDS-1]; | ||
301 | struct sigcontext sc; | ||
302 | }; | ||
303 | |||
304 | struct rt_sigframe | ||
305 | { | ||
306 | char __user *pretcode; | ||
307 | int sig; | ||
308 | struct siginfo __user *pinfo; | ||
309 | void __user *puc; | ||
310 | char retcode[8]; | ||
311 | struct siginfo info; | ||
312 | struct ucontext uc; | ||
313 | }; | ||
314 | |||
315 | #define FPCONTEXT_SIZE 216 | ||
316 | #define uc_fpstate uc_filler[0] | ||
317 | #define uc_formatvec uc_filler[FPCONTEXT_SIZE/4] | ||
318 | #define uc_extra uc_filler[FPCONTEXT_SIZE/4+1] | ||
319 | |||
320 | #ifdef CONFIG_FPU | ||
321 | |||
322 | static unsigned char fpu_version; /* version number of fpu, set by setup_frame */ | ||
323 | |||
324 | static inline int restore_fpu_state(struct sigcontext *sc) | ||
325 | { | ||
326 | int err = 1; | ||
327 | |||
328 | if (FPU_IS_EMU) { | ||
329 | /* restore registers */ | ||
330 | memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12); | ||
331 | memcpy(current->thread.fp, sc->sc_fpregs, 24); | ||
332 | return 0; | ||
333 | } | ||
334 | |||
335 | if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) { | ||
336 | /* Verify the frame format. */ | ||
337 | if (!(CPU_IS_060 || CPU_IS_COLDFIRE) && | ||
338 | (sc->sc_fpstate[0] != fpu_version)) | ||
339 | goto out; | ||
340 | if (CPU_IS_020_OR_030) { | ||
341 | if (m68k_fputype & FPU_68881 && | ||
342 | !(sc->sc_fpstate[1] == 0x18 || sc->sc_fpstate[1] == 0xb4)) | ||
343 | goto out; | ||
344 | if (m68k_fputype & FPU_68882 && | ||
345 | !(sc->sc_fpstate[1] == 0x38 || sc->sc_fpstate[1] == 0xd4)) | ||
346 | goto out; | ||
347 | } else if (CPU_IS_040) { | ||
348 | if (!(sc->sc_fpstate[1] == 0x00 || | ||
349 | sc->sc_fpstate[1] == 0x28 || | ||
350 | sc->sc_fpstate[1] == 0x60)) | ||
351 | goto out; | ||
352 | } else if (CPU_IS_060) { | ||
353 | if (!(sc->sc_fpstate[3] == 0x00 || | ||
354 | sc->sc_fpstate[3] == 0x60 || | ||
355 | sc->sc_fpstate[3] == 0xe0)) | ||
356 | goto out; | ||
357 | } else if (CPU_IS_COLDFIRE) { | ||
358 | if (!(sc->sc_fpstate[0] == 0x00 || | ||
359 | sc->sc_fpstate[0] == 0x05 || | ||
360 | sc->sc_fpstate[0] == 0xe5)) | ||
361 | goto out; | ||
362 | } else | ||
363 | goto out; | ||
364 | |||
365 | if (CPU_IS_COLDFIRE) { | ||
366 | __asm__ volatile ("fmovemd %0,%%fp0-%%fp1\n\t" | ||
367 | "fmovel %1,%%fpcr\n\t" | ||
368 | "fmovel %2,%%fpsr\n\t" | ||
369 | "fmovel %3,%%fpiar" | ||
370 | : /* no outputs */ | ||
371 | : "m" (sc->sc_fpregs[0]), | ||
372 | "m" (sc->sc_fpcntl[0]), | ||
373 | "m" (sc->sc_fpcntl[1]), | ||
374 | "m" (sc->sc_fpcntl[2])); | ||
375 | } else { | ||
376 | __asm__ volatile (".chip 68k/68881\n\t" | ||
377 | "fmovemx %0,%%fp0-%%fp1\n\t" | ||
378 | "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t" | ||
379 | ".chip 68k" | ||
380 | : /* no outputs */ | ||
381 | : "m" (*sc->sc_fpregs), | ||
382 | "m" (*sc->sc_fpcntl)); | ||
383 | } | ||
384 | } | ||
385 | |||
386 | if (CPU_IS_COLDFIRE) { | ||
387 | __asm__ volatile ("frestore %0" : : "m" (*sc->sc_fpstate)); | ||
388 | } else { | ||
389 | __asm__ volatile (".chip 68k/68881\n\t" | ||
390 | "frestore %0\n\t" | ||
391 | ".chip 68k" | ||
392 | : : "m" (*sc->sc_fpstate)); | ||
393 | } | ||
394 | err = 0; | ||
395 | |||
396 | out: | ||
397 | return err; | ||
398 | } | ||
399 | |||
400 | static inline int rt_restore_fpu_state(struct ucontext __user *uc) | ||
401 | { | ||
402 | unsigned char fpstate[FPCONTEXT_SIZE]; | ||
403 | int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0); | ||
404 | fpregset_t fpregs; | ||
405 | int err = 1; | ||
406 | |||
407 | if (FPU_IS_EMU) { | ||
408 | /* restore fpu control register */ | ||
409 | if (__copy_from_user(current->thread.fpcntl, | ||
410 | uc->uc_mcontext.fpregs.f_fpcntl, 12)) | ||
411 | goto out; | ||
412 | /* restore all other fpu register */ | ||
413 | if (__copy_from_user(current->thread.fp, | ||
414 | uc->uc_mcontext.fpregs.f_fpregs, 96)) | ||
415 | goto out; | ||
416 | return 0; | ||
417 | } | ||
418 | |||
419 | if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate)) | ||
420 | goto out; | ||
421 | if (CPU_IS_060 ? fpstate[2] : fpstate[0]) { | ||
422 | if (!(CPU_IS_060 || CPU_IS_COLDFIRE)) | ||
423 | context_size = fpstate[1]; | ||
424 | /* Verify the frame format. */ | ||
425 | if (!(CPU_IS_060 || CPU_IS_COLDFIRE) && | ||
426 | (fpstate[0] != fpu_version)) | ||
427 | goto out; | ||
428 | if (CPU_IS_020_OR_030) { | ||
429 | if (m68k_fputype & FPU_68881 && | ||
430 | !(context_size == 0x18 || context_size == 0xb4)) | ||
431 | goto out; | ||
432 | if (m68k_fputype & FPU_68882 && | ||
433 | !(context_size == 0x38 || context_size == 0xd4)) | ||
434 | goto out; | ||
435 | } else if (CPU_IS_040) { | ||
436 | if (!(context_size == 0x00 || | ||
437 | context_size == 0x28 || | ||
438 | context_size == 0x60)) | ||
439 | goto out; | ||
440 | } else if (CPU_IS_060) { | ||
441 | if (!(fpstate[3] == 0x00 || | ||
442 | fpstate[3] == 0x60 || | ||
443 | fpstate[3] == 0xe0)) | ||
444 | goto out; | ||
445 | } else if (CPU_IS_COLDFIRE) { | ||
446 | if (!(fpstate[3] == 0x00 || | ||
447 | fpstate[3] == 0x05 || | ||
448 | fpstate[3] == 0xe5)) | ||
449 | goto out; | ||
450 | } else | ||
451 | goto out; | ||
452 | if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs, | ||
453 | sizeof(fpregs))) | ||
454 | goto out; | ||
455 | |||
456 | if (CPU_IS_COLDFIRE) { | ||
457 | __asm__ volatile ("fmovemd %0,%%fp0-%%fp7\n\t" | ||
458 | "fmovel %1,%%fpcr\n\t" | ||
459 | "fmovel %2,%%fpsr\n\t" | ||
460 | "fmovel %3,%%fpiar" | ||
461 | : /* no outputs */ | ||
462 | : "m" (fpregs.f_fpregs[0]), | ||
463 | "m" (fpregs.f_fpcntl[0]), | ||
464 | "m" (fpregs.f_fpcntl[1]), | ||
465 | "m" (fpregs.f_fpcntl[2])); | ||
466 | } else { | ||
467 | __asm__ volatile (".chip 68k/68881\n\t" | ||
468 | "fmovemx %0,%%fp0-%%fp7\n\t" | ||
469 | "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t" | ||
470 | ".chip 68k" | ||
471 | : /* no outputs */ | ||
472 | : "m" (*fpregs.f_fpregs), | ||
473 | "m" (*fpregs.f_fpcntl)); | ||
474 | } | ||
475 | } | ||
476 | if (context_size && | ||
477 | __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1, | ||
478 | context_size)) | ||
479 | goto out; | ||
480 | |||
481 | if (CPU_IS_COLDFIRE) { | ||
482 | __asm__ volatile ("frestore %0" : : "m" (*fpstate)); | ||
483 | } else { | ||
484 | __asm__ volatile (".chip 68k/68881\n\t" | ||
485 | "frestore %0\n\t" | ||
486 | ".chip 68k" | ||
487 | : : "m" (*fpstate)); | ||
488 | } | ||
489 | err = 0; | ||
490 | |||
491 | out: | ||
492 | return err; | ||
493 | } | ||
494 | |||
495 | /* | ||
496 | * Set up a signal frame. | ||
497 | */ | ||
498 | static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs) | ||
499 | { | ||
500 | if (FPU_IS_EMU) { | ||
501 | /* save registers */ | ||
502 | memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12); | ||
503 | memcpy(sc->sc_fpregs, current->thread.fp, 24); | ||
504 | return; | ||
505 | } | ||
506 | |||
507 | if (CPU_IS_COLDFIRE) { | ||
508 | __asm__ volatile ("fsave %0" | ||
509 | : : "m" (*sc->sc_fpstate) : "memory"); | ||
510 | } else { | ||
511 | __asm__ volatile (".chip 68k/68881\n\t" | ||
512 | "fsave %0\n\t" | ||
513 | ".chip 68k" | ||
514 | : : "m" (*sc->sc_fpstate) : "memory"); | ||
515 | } | ||
516 | |||
517 | if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) { | ||
518 | fpu_version = sc->sc_fpstate[0]; | ||
519 | if (CPU_IS_020_OR_030 && | ||
520 | regs->vector >= (VEC_FPBRUC * 4) && | ||
521 | regs->vector <= (VEC_FPNAN * 4)) { | ||
522 | /* Clear pending exception in 68882 idle frame */ | ||
523 | if (*(unsigned short *) sc->sc_fpstate == 0x1f38) | ||
524 | sc->sc_fpstate[0x38] |= 1 << 3; | ||
525 | } | ||
526 | |||
527 | if (CPU_IS_COLDFIRE) { | ||
528 | __asm__ volatile ("fmovemd %%fp0-%%fp1,%0\n\t" | ||
529 | "fmovel %%fpcr,%1\n\t" | ||
530 | "fmovel %%fpsr,%2\n\t" | ||
531 | "fmovel %%fpiar,%3" | ||
532 | : "=m" (sc->sc_fpregs[0]), | ||
533 | "=m" (sc->sc_fpcntl[0]), | ||
534 | "=m" (sc->sc_fpcntl[1]), | ||
535 | "=m" (sc->sc_fpcntl[2]) | ||
536 | : /* no inputs */ | ||
537 | : "memory"); | ||
538 | } else { | ||
539 | __asm__ volatile (".chip 68k/68881\n\t" | ||
540 | "fmovemx %%fp0-%%fp1,%0\n\t" | ||
541 | "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t" | ||
542 | ".chip 68k" | ||
543 | : "=m" (*sc->sc_fpregs), | ||
544 | "=m" (*sc->sc_fpcntl) | ||
545 | : /* no inputs */ | ||
546 | : "memory"); | ||
547 | } | ||
548 | } | ||
549 | } | ||
550 | |||
551 | static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs) | ||
552 | { | ||
553 | unsigned char fpstate[FPCONTEXT_SIZE]; | ||
554 | int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0); | ||
555 | int err = 0; | ||
556 | |||
557 | if (FPU_IS_EMU) { | ||
558 | /* save fpu control register */ | ||
559 | err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpcntl, | ||
560 | current->thread.fpcntl, 12); | ||
561 | /* save all other fpu register */ | ||
562 | err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs, | ||
563 | current->thread.fp, 96); | ||
564 | return err; | ||
565 | } | ||
566 | |||
567 | if (CPU_IS_COLDFIRE) { | ||
568 | __asm__ volatile ("fsave %0" : : "m" (*fpstate) : "memory"); | ||
569 | } else { | ||
570 | __asm__ volatile (".chip 68k/68881\n\t" | ||
571 | "fsave %0\n\t" | ||
572 | ".chip 68k" | ||
573 | : : "m" (*fpstate) : "memory"); | ||
574 | } | ||
575 | |||
576 | err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate); | ||
577 | if (CPU_IS_060 ? fpstate[2] : fpstate[0]) { | ||
578 | fpregset_t fpregs; | ||
579 | if (!(CPU_IS_060 || CPU_IS_COLDFIRE)) | ||
580 | context_size = fpstate[1]; | ||
581 | fpu_version = fpstate[0]; | ||
582 | if (CPU_IS_020_OR_030 && | ||
583 | regs->vector >= (VEC_FPBRUC * 4) && | ||
584 | regs->vector <= (VEC_FPNAN * 4)) { | ||
585 | /* Clear pending exception in 68882 idle frame */ | ||
586 | if (*(unsigned short *) fpstate == 0x1f38) | ||
587 | fpstate[0x38] |= 1 << 3; | ||
588 | } | ||
589 | if (CPU_IS_COLDFIRE) { | ||
590 | __asm__ volatile ("fmovemd %%fp0-%%fp7,%0\n\t" | ||
591 | "fmovel %%fpcr,%1\n\t" | ||
592 | "fmovel %%fpsr,%2\n\t" | ||
593 | "fmovel %%fpiar,%3" | ||
594 | : "=m" (fpregs.f_fpregs[0]), | ||
595 | "=m" (fpregs.f_fpcntl[0]), | ||
596 | "=m" (fpregs.f_fpcntl[1]), | ||
597 | "=m" (fpregs.f_fpcntl[2]) | ||
598 | : /* no inputs */ | ||
599 | : "memory"); | ||
600 | } else { | ||
601 | __asm__ volatile (".chip 68k/68881\n\t" | ||
602 | "fmovemx %%fp0-%%fp7,%0\n\t" | ||
603 | "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t" | ||
604 | ".chip 68k" | ||
605 | : "=m" (*fpregs.f_fpregs), | ||
606 | "=m" (*fpregs.f_fpcntl) | ||
607 | : /* no inputs */ | ||
608 | : "memory"); | ||
609 | } | ||
610 | err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs, | ||
611 | sizeof(fpregs)); | ||
612 | } | ||
613 | if (context_size) | ||
614 | err |= copy_to_user((long __user *)&uc->uc_fpstate + 1, fpstate + 4, | ||
615 | context_size); | ||
616 | return err; | ||
617 | } | ||
618 | |||
619 | #else /* CONFIG_FPU */ | ||
620 | |||
621 | /* | ||
622 | * For the case with no FPU configured these all do nothing. | ||
623 | */ | ||
624 | static inline int restore_fpu_state(struct sigcontext *sc) | ||
625 | { | ||
626 | return 0; | ||
627 | } | ||
628 | |||
629 | static inline int rt_restore_fpu_state(struct ucontext __user *uc) | ||
630 | { | ||
631 | return 0; | ||
632 | } | ||
633 | |||
634 | static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs) | ||
635 | { | ||
636 | } | ||
637 | |||
638 | static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs) | ||
639 | { | ||
640 | return 0; | ||
641 | } | ||
642 | |||
643 | #endif /* CONFIG_FPU */ | ||
644 | |||
645 | static int mangle_kernel_stack(struct pt_regs *regs, int formatvec, | ||
646 | void __user *fp) | ||
647 | { | ||
648 | int fsize = frame_extra_sizes(formatvec >> 12); | ||
649 | if (fsize < 0) { | ||
650 | /* | ||
651 | * user process trying to return with weird frame format | ||
652 | */ | ||
653 | #ifdef DEBUG | ||
654 | printk("user process returning with weird frame format\n"); | ||
655 | #endif | ||
656 | return 1; | ||
657 | } | ||
658 | if (!fsize) { | ||
659 | regs->format = formatvec >> 12; | ||
660 | regs->vector = formatvec & 0xfff; | ||
661 | } else { | ||
662 | struct switch_stack *sw = (struct switch_stack *)regs - 1; | ||
663 | unsigned long buf[fsize / 2]; /* yes, twice as much */ | ||
664 | |||
665 | /* that'll make sure that expansion won't crap over data */ | ||
666 | if (copy_from_user(buf + fsize / 4, fp, fsize)) | ||
667 | return 1; | ||
668 | |||
669 | /* point of no return */ | ||
670 | regs->format = formatvec >> 12; | ||
671 | regs->vector = formatvec & 0xfff; | ||
672 | #define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack)) | ||
673 | __asm__ __volatile__ ( | ||
674 | #ifdef CONFIG_COLDFIRE | ||
675 | " movel %0,%/sp\n\t" | ||
676 | " bra ret_from_signal\n" | ||
677 | #else | ||
678 | " movel %0,%/a0\n\t" | ||
679 | " subl %1,%/a0\n\t" /* make room on stack */ | ||
680 | " movel %/a0,%/sp\n\t" /* set stack pointer */ | ||
681 | /* move switch_stack and pt_regs */ | ||
682 | "1: movel %0@+,%/a0@+\n\t" | ||
683 | " dbra %2,1b\n\t" | ||
684 | " lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */ | ||
685 | " lsrl #2,%1\n\t" | ||
686 | " subql #1,%1\n\t" | ||
687 | /* copy to the gap we'd made */ | ||
688 | "2: movel %4@+,%/a0@+\n\t" | ||
689 | " dbra %1,2b\n\t" | ||
690 | " bral ret_from_signal\n" | ||
691 | #endif | ||
692 | : /* no outputs, it doesn't ever return */ | ||
693 | : "a" (sw), "d" (fsize), "d" (frame_offset/4-1), | ||
694 | "n" (frame_offset), "a" (buf + fsize/4) | ||
695 | : "a0"); | ||
696 | #undef frame_offset | ||
697 | } | ||
698 | return 0; | ||
699 | } | ||
700 | |||
701 | static inline int | ||
702 | restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __user *fp) | ||
703 | { | ||
704 | int formatvec; | ||
705 | struct sigcontext context; | ||
706 | int err = 0; | ||
707 | |||
708 | /* Always make any pending restarted system calls return -EINTR */ | ||
709 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
710 | |||
711 | /* get previous context */ | ||
712 | if (copy_from_user(&context, usc, sizeof(context))) | ||
713 | goto badframe; | ||
714 | |||
715 | /* restore passed registers */ | ||
716 | regs->d0 = context.sc_d0; | ||
717 | regs->d1 = context.sc_d1; | ||
718 | regs->a0 = context.sc_a0; | ||
719 | regs->a1 = context.sc_a1; | ||
720 | regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff); | ||
721 | regs->pc = context.sc_pc; | ||
722 | regs->orig_d0 = -1; /* disable syscall checks */ | ||
723 | wrusp(context.sc_usp); | ||
724 | formatvec = context.sc_formatvec; | ||
725 | |||
726 | err = restore_fpu_state(&context); | ||
727 | |||
728 | if (err || mangle_kernel_stack(regs, formatvec, fp)) | ||
729 | goto badframe; | ||
730 | |||
731 | return 0; | ||
732 | |||
733 | badframe: | ||
734 | return 1; | ||
735 | } | ||
736 | |||
737 | static inline int | ||
738 | rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw, | ||
739 | struct ucontext __user *uc) | ||
740 | { | ||
741 | int temp; | ||
742 | greg_t __user *gregs = uc->uc_mcontext.gregs; | ||
743 | unsigned long usp; | ||
744 | int err; | ||
745 | |||
746 | /* Always make any pending restarted system calls return -EINTR */ | ||
747 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
748 | |||
749 | err = __get_user(temp, &uc->uc_mcontext.version); | ||
750 | if (temp != MCONTEXT_VERSION) | ||
751 | goto badframe; | ||
752 | /* restore passed registers */ | ||
753 | err |= __get_user(regs->d0, &gregs[0]); | ||
754 | err |= __get_user(regs->d1, &gregs[1]); | ||
755 | err |= __get_user(regs->d2, &gregs[2]); | ||
756 | err |= __get_user(regs->d3, &gregs[3]); | ||
757 | err |= __get_user(regs->d4, &gregs[4]); | ||
758 | err |= __get_user(regs->d5, &gregs[5]); | ||
759 | err |= __get_user(sw->d6, &gregs[6]); | ||
760 | err |= __get_user(sw->d7, &gregs[7]); | ||
761 | err |= __get_user(regs->a0, &gregs[8]); | ||
762 | err |= __get_user(regs->a1, &gregs[9]); | ||
763 | err |= __get_user(regs->a2, &gregs[10]); | ||
764 | err |= __get_user(sw->a3, &gregs[11]); | ||
765 | err |= __get_user(sw->a4, &gregs[12]); | ||
766 | err |= __get_user(sw->a5, &gregs[13]); | ||
767 | err |= __get_user(sw->a6, &gregs[14]); | ||
768 | err |= __get_user(usp, &gregs[15]); | ||
769 | wrusp(usp); | ||
770 | err |= __get_user(regs->pc, &gregs[16]); | ||
771 | err |= __get_user(temp, &gregs[17]); | ||
772 | regs->sr = (regs->sr & 0xff00) | (temp & 0xff); | ||
773 | regs->orig_d0 = -1; /* disable syscall checks */ | ||
774 | err |= __get_user(temp, &uc->uc_formatvec); | ||
775 | |||
776 | err |= rt_restore_fpu_state(uc); | ||
777 | |||
778 | if (err || do_sigaltstack(&uc->uc_stack, NULL, usp) == -EFAULT) | ||
779 | goto badframe; | ||
780 | |||
781 | if (mangle_kernel_stack(regs, temp, &uc->uc_extra)) | ||
782 | goto badframe; | ||
783 | |||
784 | return 0; | ||
785 | |||
786 | badframe: | ||
787 | return 1; | ||
788 | } | ||
789 | |||
790 | asmlinkage int do_sigreturn(unsigned long __unused) | ||
791 | { | ||
792 | struct switch_stack *sw = (struct switch_stack *) &__unused; | ||
793 | struct pt_regs *regs = (struct pt_regs *) (sw + 1); | ||
794 | unsigned long usp = rdusp(); | ||
795 | struct sigframe __user *frame = (struct sigframe __user *)(usp - 4); | ||
796 | sigset_t set; | ||
797 | |||
798 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | ||
799 | goto badframe; | ||
800 | if (__get_user(set.sig[0], &frame->sc.sc_mask) || | ||
801 | (_NSIG_WORDS > 1 && | ||
802 | __copy_from_user(&set.sig[1], &frame->extramask, | ||
803 | sizeof(frame->extramask)))) | ||
804 | goto badframe; | ||
805 | |||
806 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
807 | current->blocked = set; | ||
808 | recalc_sigpending(); | ||
809 | |||
810 | if (restore_sigcontext(regs, &frame->sc, frame + 1)) | ||
811 | goto badframe; | ||
812 | return regs->d0; | ||
813 | |||
814 | badframe: | ||
815 | force_sig(SIGSEGV, current); | ||
816 | return 0; | ||
817 | } | ||
818 | |||
819 | asmlinkage int do_rt_sigreturn(unsigned long __unused) | ||
820 | { | ||
821 | struct switch_stack *sw = (struct switch_stack *) &__unused; | ||
822 | struct pt_regs *regs = (struct pt_regs *) (sw + 1); | ||
823 | unsigned long usp = rdusp(); | ||
824 | struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4); | ||
825 | sigset_t set; | ||
826 | |||
827 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | ||
828 | goto badframe; | ||
829 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) | ||
830 | goto badframe; | ||
831 | |||
832 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
833 | current->blocked = set; | ||
834 | recalc_sigpending(); | ||
835 | |||
836 | if (rt_restore_ucontext(regs, sw, &frame->uc)) | ||
837 | goto badframe; | ||
838 | return regs->d0; | ||
839 | |||
840 | badframe: | ||
841 | force_sig(SIGSEGV, current); | ||
842 | return 0; | ||
843 | } | ||
844 | |||
845 | static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs, | ||
846 | unsigned long mask) | ||
847 | { | ||
848 | sc->sc_mask = mask; | ||
849 | sc->sc_usp = rdusp(); | ||
850 | sc->sc_d0 = regs->d0; | ||
851 | sc->sc_d1 = regs->d1; | ||
852 | sc->sc_a0 = regs->a0; | ||
853 | sc->sc_a1 = regs->a1; | ||
854 | sc->sc_sr = regs->sr; | ||
855 | sc->sc_pc = regs->pc; | ||
856 | sc->sc_formatvec = regs->format << 12 | regs->vector; | ||
857 | save_a5_state(sc, regs); | ||
858 | save_fpu_state(sc, regs); | ||
859 | } | ||
860 | |||
861 | static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs) | ||
862 | { | ||
863 | struct switch_stack *sw = (struct switch_stack *)regs - 1; | ||
864 | greg_t __user *gregs = uc->uc_mcontext.gregs; | ||
865 | int err = 0; | ||
866 | |||
867 | err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version); | ||
868 | err |= __put_user(regs->d0, &gregs[0]); | ||
869 | err |= __put_user(regs->d1, &gregs[1]); | ||
870 | err |= __put_user(regs->d2, &gregs[2]); | ||
871 | err |= __put_user(regs->d3, &gregs[3]); | ||
872 | err |= __put_user(regs->d4, &gregs[4]); | ||
873 | err |= __put_user(regs->d5, &gregs[5]); | ||
874 | err |= __put_user(sw->d6, &gregs[6]); | ||
875 | err |= __put_user(sw->d7, &gregs[7]); | ||
876 | err |= __put_user(regs->a0, &gregs[8]); | ||
877 | err |= __put_user(regs->a1, &gregs[9]); | ||
878 | err |= __put_user(regs->a2, &gregs[10]); | ||
879 | err |= __put_user(sw->a3, &gregs[11]); | ||
880 | err |= __put_user(sw->a4, &gregs[12]); | ||
881 | err |= __put_user(sw->a5, &gregs[13]); | ||
882 | err |= __put_user(sw->a6, &gregs[14]); | ||
883 | err |= __put_user(rdusp(), &gregs[15]); | ||
884 | err |= __put_user(regs->pc, &gregs[16]); | ||
885 | err |= __put_user(regs->sr, &gregs[17]); | ||
886 | err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec); | ||
887 | err |= rt_save_fpu_state(uc, regs); | ||
888 | return err; | ||
889 | } | ||
890 | |||
891 | static inline void __user * | ||
892 | get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) | ||
893 | { | ||
894 | unsigned long usp; | ||
895 | |||
896 | /* Default to using normal stack. */ | ||
897 | usp = rdusp(); | ||
898 | |||
899 | /* This is the X/Open sanctioned signal stack switching. */ | ||
900 | if (ka->sa.sa_flags & SA_ONSTACK) { | ||
901 | if (!sas_ss_flags(usp)) | ||
902 | usp = current->sas_ss_sp + current->sas_ss_size; | ||
903 | } | ||
904 | return (void __user *)((usp - frame_size) & -8UL); | ||
905 | } | ||
906 | |||
907 | static int setup_frame (int sig, struct k_sigaction *ka, | ||
908 | sigset_t *set, struct pt_regs *regs) | ||
909 | { | ||
910 | struct sigframe __user *frame; | ||
911 | int fsize = frame_extra_sizes(regs->format); | ||
912 | struct sigcontext context; | ||
913 | int err = 0; | ||
914 | |||
915 | if (fsize < 0) { | ||
916 | #ifdef DEBUG | ||
917 | printk ("setup_frame: Unknown frame format %#x\n", | ||
918 | regs->format); | ||
919 | #endif | ||
920 | goto give_sigsegv; | ||
921 | } | ||
922 | |||
923 | frame = get_sigframe(ka, regs, sizeof(*frame) + fsize); | ||
924 | |||
925 | if (fsize) | ||
926 | err |= copy_to_user (frame + 1, regs + 1, fsize); | ||
927 | |||
928 | err |= __put_user((current_thread_info()->exec_domain | ||
929 | && current_thread_info()->exec_domain->signal_invmap | ||
930 | && sig < 32 | ||
931 | ? current_thread_info()->exec_domain->signal_invmap[sig] | ||
932 | : sig), | ||
933 | &frame->sig); | ||
934 | |||
935 | err |= __put_user(regs->vector, &frame->code); | ||
936 | err |= __put_user(&frame->sc, &frame->psc); | ||
937 | |||
938 | if (_NSIG_WORDS > 1) | ||
939 | err |= copy_to_user(frame->extramask, &set->sig[1], | ||
940 | sizeof(frame->extramask)); | ||
941 | |||
942 | setup_sigcontext(&context, regs, set->sig[0]); | ||
943 | err |= copy_to_user (&frame->sc, &context, sizeof(context)); | ||
944 | |||
945 | /* Set up to return from userspace. */ | ||
946 | #ifdef CONFIG_MMU | ||
947 | err |= __put_user(frame->retcode, &frame->pretcode); | ||
948 | /* moveq #,d0; trap #0 */ | ||
949 | err |= __put_user(0x70004e40 + (__NR_sigreturn << 16), | ||
950 | (long __user *)(frame->retcode)); | ||
951 | #else | ||
952 | err |= __put_user((void *) ret_from_user_signal, &frame->pretcode); | ||
953 | #endif | ||
954 | |||
955 | if (err) | ||
956 | goto give_sigsegv; | ||
957 | |||
958 | push_cache ((unsigned long) &frame->retcode); | ||
959 | |||
960 | /* | ||
961 | * Set up registers for signal handler. All the state we are about | ||
962 | * to destroy is successfully copied to sigframe. | ||
963 | */ | ||
964 | wrusp ((unsigned long) frame); | ||
965 | regs->pc = (unsigned long) ka->sa.sa_handler; | ||
966 | adjustformat(regs); | ||
967 | |||
968 | /* | ||
969 | * This is subtle; if we build more than one sigframe, all but the | ||
970 | * first one will see frame format 0 and have fsize == 0, so we won't | ||
971 | * screw stkadj. | ||
972 | */ | ||
973 | if (fsize) | ||
974 | regs->stkadj = fsize; | ||
975 | |||
976 | /* Prepare to skip over the extra stuff in the exception frame. */ | ||
977 | if (regs->stkadj) { | ||
978 | struct pt_regs *tregs = | ||
979 | (struct pt_regs *)((ulong)regs + regs->stkadj); | ||
980 | #ifdef DEBUG | ||
981 | printk("Performing stackadjust=%04x\n", regs->stkadj); | ||
982 | #endif | ||
983 | /* This must be copied with decreasing addresses to | ||
984 | handle overlaps. */ | ||
985 | tregs->vector = 0; | ||
986 | tregs->format = 0; | ||
987 | tregs->pc = regs->pc; | ||
988 | tregs->sr = regs->sr; | ||
989 | } | ||
990 | return 0; | ||
991 | |||
992 | give_sigsegv: | ||
993 | force_sigsegv(sig, current); | ||
994 | return err; | ||
995 | } | ||
996 | |||
997 | static int setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info, | ||
998 | sigset_t *set, struct pt_regs *regs) | ||
999 | { | ||
1000 | struct rt_sigframe __user *frame; | ||
1001 | int fsize = frame_extra_sizes(regs->format); | ||
1002 | int err = 0; | ||
1003 | |||
1004 | if (fsize < 0) { | ||
1005 | #ifdef DEBUG | ||
1006 | printk ("setup_frame: Unknown frame format %#x\n", | ||
1007 | regs->format); | ||
1008 | #endif | ||
1009 | goto give_sigsegv; | ||
1010 | } | ||
1011 | |||
1012 | frame = get_sigframe(ka, regs, sizeof(*frame)); | ||
1013 | |||
1014 | if (fsize) | ||
1015 | err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize); | ||
1016 | |||
1017 | err |= __put_user((current_thread_info()->exec_domain | ||
1018 | && current_thread_info()->exec_domain->signal_invmap | ||
1019 | && sig < 32 | ||
1020 | ? current_thread_info()->exec_domain->signal_invmap[sig] | ||
1021 | : sig), | ||
1022 | &frame->sig); | ||
1023 | err |= __put_user(&frame->info, &frame->pinfo); | ||
1024 | err |= __put_user(&frame->uc, &frame->puc); | ||
1025 | err |= copy_siginfo_to_user(&frame->info, info); | ||
1026 | |||
1027 | /* Create the ucontext. */ | ||
1028 | err |= __put_user(0, &frame->uc.uc_flags); | ||
1029 | err |= __put_user(NULL, &frame->uc.uc_link); | ||
1030 | err |= __put_user((void __user *)current->sas_ss_sp, | ||
1031 | &frame->uc.uc_stack.ss_sp); | ||
1032 | err |= __put_user(sas_ss_flags(rdusp()), | ||
1033 | &frame->uc.uc_stack.ss_flags); | ||
1034 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | ||
1035 | err |= rt_setup_ucontext(&frame->uc, regs); | ||
1036 | err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set)); | ||
1037 | |||
1038 | /* Set up to return from userspace. */ | ||
1039 | #ifdef CONFIG_MMU | ||
1040 | err |= __put_user(frame->retcode, &frame->pretcode); | ||
1041 | #ifdef __mcoldfire__ | ||
1042 | /* movel #__NR_rt_sigreturn,d0; trap #0 */ | ||
1043 | err |= __put_user(0x203c0000, (long __user *)(frame->retcode + 0)); | ||
1044 | err |= __put_user(0x00004e40 + (__NR_rt_sigreturn << 16), | ||
1045 | (long __user *)(frame->retcode + 4)); | ||
1046 | #else | ||
1047 | /* moveq #,d0; notb d0; trap #0 */ | ||
1048 | err |= __put_user(0x70004600 + ((__NR_rt_sigreturn ^ 0xff) << 16), | ||
1049 | (long __user *)(frame->retcode + 0)); | ||
1050 | err |= __put_user(0x4e40, (short __user *)(frame->retcode + 4)); | ||
1051 | #endif | ||
1052 | #else | ||
1053 | err |= __put_user((void *) ret_from_user_rt_signal, &frame->pretcode); | ||
1054 | #endif /* CONFIG_MMU */ | ||
1055 | |||
1056 | if (err) | ||
1057 | goto give_sigsegv; | ||
1058 | |||
1059 | push_cache ((unsigned long) &frame->retcode); | ||
1060 | |||
1061 | /* | ||
1062 | * Set up registers for signal handler. All the state we are about | ||
1063 | * to destroy is successfully copied to sigframe. | ||
1064 | */ | ||
1065 | wrusp ((unsigned long) frame); | ||
1066 | regs->pc = (unsigned long) ka->sa.sa_handler; | ||
1067 | adjustformat(regs); | ||
1068 | |||
1069 | /* | ||
1070 | * This is subtle; if we build more than one sigframe, all but the | ||
1071 | * first one will see frame format 0 and have fsize == 0, so we won't | ||
1072 | * screw stkadj. | ||
1073 | */ | ||
1074 | if (fsize) | ||
1075 | regs->stkadj = fsize; | ||
1076 | |||
1077 | /* Prepare to skip over the extra stuff in the exception frame. */ | ||
1078 | if (regs->stkadj) { | ||
1079 | struct pt_regs *tregs = | ||
1080 | (struct pt_regs *)((ulong)regs + regs->stkadj); | ||
1081 | #ifdef DEBUG | ||
1082 | printk("Performing stackadjust=%04x\n", regs->stkadj); | ||
1083 | #endif | ||
1084 | /* This must be copied with decreasing addresses to | ||
1085 | handle overlaps. */ | ||
1086 | tregs->vector = 0; | ||
1087 | tregs->format = 0; | ||
1088 | tregs->pc = regs->pc; | ||
1089 | tregs->sr = regs->sr; | ||
1090 | } | ||
1091 | return 0; | ||
1092 | |||
1093 | give_sigsegv: | ||
1094 | force_sigsegv(sig, current); | ||
1095 | return err; | ||
1096 | } | ||
1097 | |||
1098 | static inline void | ||
1099 | handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler) | ||
1100 | { | ||
1101 | switch (regs->d0) { | ||
1102 | case -ERESTARTNOHAND: | ||
1103 | if (!has_handler) | ||
1104 | goto do_restart; | ||
1105 | regs->d0 = -EINTR; | ||
1106 | break; | ||
1107 | |||
1108 | case -ERESTART_RESTARTBLOCK: | ||
1109 | if (!has_handler) { | ||
1110 | regs->d0 = __NR_restart_syscall; | ||
1111 | regs->pc -= 2; | ||
1112 | break; | ||
1113 | } | ||
1114 | regs->d0 = -EINTR; | ||
1115 | break; | ||
1116 | |||
1117 | case -ERESTARTSYS: | ||
1118 | if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) { | ||
1119 | regs->d0 = -EINTR; | ||
1120 | break; | ||
1121 | } | ||
1122 | /* fallthrough */ | ||
1123 | case -ERESTARTNOINTR: | ||
1124 | do_restart: | ||
1125 | regs->d0 = regs->orig_d0; | ||
1126 | regs->pc -= 2; | ||
1127 | break; | ||
1128 | } | ||
1129 | } | ||
1130 | |||
1131 | /* | ||
1132 | * OK, we're invoking a handler | ||
1133 | */ | ||
1134 | static void | ||
1135 | handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info, | ||
1136 | sigset_t *oldset, struct pt_regs *regs) | ||
1137 | { | ||
1138 | int err; | ||
1139 | /* are we from a system call? */ | ||
1140 | if (regs->orig_d0 >= 0) | ||
1141 | /* If so, check system call restarting.. */ | ||
1142 | handle_restart(regs, ka, 1); | ||
1143 | |||
1144 | /* set up the stack frame */ | ||
1145 | if (ka->sa.sa_flags & SA_SIGINFO) | ||
1146 | err = setup_rt_frame(sig, ka, info, oldset, regs); | ||
1147 | else | ||
1148 | err = setup_frame(sig, ka, oldset, regs); | ||
1149 | |||
1150 | if (err) | ||
1151 | return; | ||
1152 | |||
1153 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); | ||
1154 | if (!(ka->sa.sa_flags & SA_NODEFER)) | ||
1155 | sigaddset(¤t->blocked,sig); | ||
1156 | recalc_sigpending(); | ||
1157 | |||
1158 | if (test_thread_flag(TIF_DELAYED_TRACE)) { | ||
1159 | regs->sr &= ~0x8000; | ||
1160 | send_sig(SIGTRAP, current, 1); | ||
1161 | } | ||
1162 | |||
1163 | clear_thread_flag(TIF_RESTORE_SIGMASK); | ||
1164 | } | ||
1165 | |||
1166 | /* | ||
1167 | * Note that 'init' is a special process: it doesn't get signals it doesn't | ||
1168 | * want to handle. Thus you cannot kill init even with a SIGKILL even by | ||
1169 | * mistake. | ||
1170 | */ | ||
1171 | asmlinkage void do_signal(struct pt_regs *regs) | ||
1172 | { | ||
1173 | siginfo_t info; | ||
1174 | struct k_sigaction ka; | ||
1175 | int signr; | ||
1176 | sigset_t *oldset; | ||
1177 | |||
1178 | current->thread.esp0 = (unsigned long) regs; | ||
1179 | |||
1180 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) | ||
1181 | oldset = ¤t->saved_sigmask; | ||
1182 | else | ||
1183 | oldset = ¤t->blocked; | ||
1184 | |||
1185 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | ||
1186 | if (signr > 0) { | ||
1187 | /* Whee! Actually deliver the signal. */ | ||
1188 | handle_signal(signr, &ka, &info, oldset, regs); | ||
1189 | return; | ||
1190 | } | ||
1191 | |||
1192 | /* Did we come from a system call? */ | ||
1193 | if (regs->orig_d0 >= 0) | ||
1194 | /* Restart the system call - no handlers present */ | ||
1195 | handle_restart(regs, NULL, 0); | ||
1196 | |||
1197 | /* If there's no signal to deliver, we just restore the saved mask. */ | ||
1198 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) { | ||
1199 | clear_thread_flag(TIF_RESTORE_SIGMASK); | ||
1200 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | ||
1201 | } | ||
1202 | } | ||
diff --git a/arch/m68k/kernel/signal_mm.c b/arch/m68k/kernel/signal_mm.c deleted file mode 100644 index cb856f9da655..000000000000 --- a/arch/m68k/kernel/signal_mm.c +++ /dev/null | |||
@@ -1,1115 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/m68k/kernel/signal.c | ||
3 | * | ||
4 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file COPYING in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | |||
11 | /* | ||
12 | * Linux/m68k support by Hamish Macdonald | ||
13 | * | ||
14 | * 68060 fixes by Jesper Skov | ||
15 | * | ||
16 | * 1997-12-01 Modified for POSIX.1b signals by Andreas Schwab | ||
17 | * | ||
18 | * mathemu support by Roman Zippel | ||
19 | * (Note: fpstate in the signal context is completely ignored for the emulator | ||
20 | * and the internal floating point format is put on stack) | ||
21 | */ | ||
22 | |||
23 | /* | ||
24 | * ++roman (07/09/96): implemented signal stacks (specially for tosemu on | ||
25 | * Atari :-) Current limitation: Only one sigstack can be active at one time. | ||
26 | * If a second signal with SA_ONSTACK set arrives while working on a sigstack, | ||
27 | * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested | ||
28 | * signal handlers! | ||
29 | */ | ||
30 | |||
31 | #include <linux/sched.h> | ||
32 | #include <linux/mm.h> | ||
33 | #include <linux/kernel.h> | ||
34 | #include <linux/signal.h> | ||
35 | #include <linux/syscalls.h> | ||
36 | #include <linux/errno.h> | ||
37 | #include <linux/wait.h> | ||
38 | #include <linux/ptrace.h> | ||
39 | #include <linux/unistd.h> | ||
40 | #include <linux/stddef.h> | ||
41 | #include <linux/highuid.h> | ||
42 | #include <linux/personality.h> | ||
43 | #include <linux/tty.h> | ||
44 | #include <linux/binfmts.h> | ||
45 | #include <linux/module.h> | ||
46 | |||
47 | #include <asm/setup.h> | ||
48 | #include <asm/uaccess.h> | ||
49 | #include <asm/pgtable.h> | ||
50 | #include <asm/traps.h> | ||
51 | #include <asm/ucontext.h> | ||
52 | |||
53 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | ||
54 | |||
55 | static const int frame_extra_sizes[16] = { | ||
56 | [1] = -1, /* sizeof(((struct frame *)0)->un.fmt1), */ | ||
57 | [2] = sizeof(((struct frame *)0)->un.fmt2), | ||
58 | [3] = sizeof(((struct frame *)0)->un.fmt3), | ||
59 | #ifdef CONFIG_COLDFIRE | ||
60 | [4] = 0, | ||
61 | #else | ||
62 | [4] = sizeof(((struct frame *)0)->un.fmt4), | ||
63 | #endif | ||
64 | [5] = -1, /* sizeof(((struct frame *)0)->un.fmt5), */ | ||
65 | [6] = -1, /* sizeof(((struct frame *)0)->un.fmt6), */ | ||
66 | [7] = sizeof(((struct frame *)0)->un.fmt7), | ||
67 | [8] = -1, /* sizeof(((struct frame *)0)->un.fmt8), */ | ||
68 | [9] = sizeof(((struct frame *)0)->un.fmt9), | ||
69 | [10] = sizeof(((struct frame *)0)->un.fmta), | ||
70 | [11] = sizeof(((struct frame *)0)->un.fmtb), | ||
71 | [12] = -1, /* sizeof(((struct frame *)0)->un.fmtc), */ | ||
72 | [13] = -1, /* sizeof(((struct frame *)0)->un.fmtd), */ | ||
73 | [14] = -1, /* sizeof(((struct frame *)0)->un.fmte), */ | ||
74 | [15] = -1, /* sizeof(((struct frame *)0)->un.fmtf), */ | ||
75 | }; | ||
76 | |||
77 | int handle_kernel_fault(struct pt_regs *regs) | ||
78 | { | ||
79 | const struct exception_table_entry *fixup; | ||
80 | struct pt_regs *tregs; | ||
81 | |||
82 | /* Are we prepared to handle this kernel fault? */ | ||
83 | fixup = search_exception_tables(regs->pc); | ||
84 | if (!fixup) | ||
85 | return 0; | ||
86 | |||
87 | /* Create a new four word stack frame, discarding the old one. */ | ||
88 | regs->stkadj = frame_extra_sizes[regs->format]; | ||
89 | tregs = (struct pt_regs *)((long)regs + regs->stkadj); | ||
90 | tregs->vector = regs->vector; | ||
91 | #ifdef CONFIG_COLDFIRE | ||
92 | tregs->format = 4; | ||
93 | #else | ||
94 | tregs->format = 0; | ||
95 | #endif | ||
96 | tregs->pc = fixup->fixup; | ||
97 | tregs->sr = regs->sr; | ||
98 | |||
99 | return 1; | ||
100 | } | ||
101 | |||
102 | /* | ||
103 | * Atomically swap in the new signal mask, and wait for a signal. | ||
104 | */ | ||
105 | asmlinkage int | ||
106 | sys_sigsuspend(int unused0, int unused1, old_sigset_t mask) | ||
107 | { | ||
108 | mask &= _BLOCKABLE; | ||
109 | spin_lock_irq(¤t->sighand->siglock); | ||
110 | current->saved_sigmask = current->blocked; | ||
111 | siginitset(¤t->blocked, mask); | ||
112 | recalc_sigpending(); | ||
113 | spin_unlock_irq(¤t->sighand->siglock); | ||
114 | |||
115 | current->state = TASK_INTERRUPTIBLE; | ||
116 | schedule(); | ||
117 | set_restore_sigmask(); | ||
118 | |||
119 | return -ERESTARTNOHAND; | ||
120 | } | ||
121 | |||
122 | asmlinkage int | ||
123 | sys_sigaction(int sig, const struct old_sigaction __user *act, | ||
124 | struct old_sigaction __user *oact) | ||
125 | { | ||
126 | struct k_sigaction new_ka, old_ka; | ||
127 | int ret; | ||
128 | |||
129 | if (act) { | ||
130 | old_sigset_t mask; | ||
131 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || | ||
132 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || | ||
133 | __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || | ||
134 | __get_user(new_ka.sa.sa_flags, &act->sa_flags) || | ||
135 | __get_user(mask, &act->sa_mask)) | ||
136 | return -EFAULT; | ||
137 | siginitset(&new_ka.sa.sa_mask, mask); | ||
138 | } | ||
139 | |||
140 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | ||
141 | |||
142 | if (!ret && oact) { | ||
143 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || | ||
144 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || | ||
145 | __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || | ||
146 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || | ||
147 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) | ||
148 | return -EFAULT; | ||
149 | } | ||
150 | |||
151 | return ret; | ||
152 | } | ||
153 | |||
154 | asmlinkage int | ||
155 | sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss) | ||
156 | { | ||
157 | return do_sigaltstack(uss, uoss, rdusp()); | ||
158 | } | ||
159 | |||
160 | |||
161 | /* | ||
162 | * Do a signal return; undo the signal stack. | ||
163 | * | ||
164 | * Keep the return code on the stack quadword aligned! | ||
165 | * That makes the cache flush below easier. | ||
166 | */ | ||
167 | |||
168 | struct sigframe | ||
169 | { | ||
170 | char __user *pretcode; | ||
171 | int sig; | ||
172 | int code; | ||
173 | struct sigcontext __user *psc; | ||
174 | char retcode[8]; | ||
175 | unsigned long extramask[_NSIG_WORDS-1]; | ||
176 | struct sigcontext sc; | ||
177 | }; | ||
178 | |||
179 | struct rt_sigframe | ||
180 | { | ||
181 | char __user *pretcode; | ||
182 | int sig; | ||
183 | struct siginfo __user *pinfo; | ||
184 | void __user *puc; | ||
185 | char retcode[8]; | ||
186 | struct siginfo info; | ||
187 | struct ucontext uc; | ||
188 | }; | ||
189 | |||
190 | |||
191 | static unsigned char fpu_version; /* version number of fpu, set by setup_frame */ | ||
192 | |||
193 | static inline int restore_fpu_state(struct sigcontext *sc) | ||
194 | { | ||
195 | int err = 1; | ||
196 | |||
197 | if (FPU_IS_EMU) { | ||
198 | /* restore registers */ | ||
199 | memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12); | ||
200 | memcpy(current->thread.fp, sc->sc_fpregs, 24); | ||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) { | ||
205 | /* Verify the frame format. */ | ||
206 | if (!(CPU_IS_060 || CPU_IS_COLDFIRE) && | ||
207 | (sc->sc_fpstate[0] != fpu_version)) | ||
208 | goto out; | ||
209 | if (CPU_IS_020_OR_030) { | ||
210 | if (m68k_fputype & FPU_68881 && | ||
211 | !(sc->sc_fpstate[1] == 0x18 || sc->sc_fpstate[1] == 0xb4)) | ||
212 | goto out; | ||
213 | if (m68k_fputype & FPU_68882 && | ||
214 | !(sc->sc_fpstate[1] == 0x38 || sc->sc_fpstate[1] == 0xd4)) | ||
215 | goto out; | ||
216 | } else if (CPU_IS_040) { | ||
217 | if (!(sc->sc_fpstate[1] == 0x00 || | ||
218 | sc->sc_fpstate[1] == 0x28 || | ||
219 | sc->sc_fpstate[1] == 0x60)) | ||
220 | goto out; | ||
221 | } else if (CPU_IS_060) { | ||
222 | if (!(sc->sc_fpstate[3] == 0x00 || | ||
223 | sc->sc_fpstate[3] == 0x60 || | ||
224 | sc->sc_fpstate[3] == 0xe0)) | ||
225 | goto out; | ||
226 | } else if (CPU_IS_COLDFIRE) { | ||
227 | if (!(sc->sc_fpstate[0] == 0x00 || | ||
228 | sc->sc_fpstate[0] == 0x05 || | ||
229 | sc->sc_fpstate[0] == 0xe5)) | ||
230 | goto out; | ||
231 | } else | ||
232 | goto out; | ||
233 | |||
234 | if (CPU_IS_COLDFIRE) { | ||
235 | __asm__ volatile ("fmovemd %0,%%fp0-%%fp1\n\t" | ||
236 | "fmovel %1,%%fpcr\n\t" | ||
237 | "fmovel %2,%%fpsr\n\t" | ||
238 | "fmovel %3,%%fpiar" | ||
239 | : /* no outputs */ | ||
240 | : "m" (sc->sc_fpregs[0]), | ||
241 | "m" (sc->sc_fpcntl[0]), | ||
242 | "m" (sc->sc_fpcntl[1]), | ||
243 | "m" (sc->sc_fpcntl[2])); | ||
244 | } else { | ||
245 | __asm__ volatile (".chip 68k/68881\n\t" | ||
246 | "fmovemx %0,%%fp0-%%fp1\n\t" | ||
247 | "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t" | ||
248 | ".chip 68k" | ||
249 | : /* no outputs */ | ||
250 | : "m" (*sc->sc_fpregs), | ||
251 | "m" (*sc->sc_fpcntl)); | ||
252 | } | ||
253 | } | ||
254 | |||
255 | if (CPU_IS_COLDFIRE) { | ||
256 | __asm__ volatile ("frestore %0" : : "m" (*sc->sc_fpstate)); | ||
257 | } else { | ||
258 | __asm__ volatile (".chip 68k/68881\n\t" | ||
259 | "frestore %0\n\t" | ||
260 | ".chip 68k" | ||
261 | : : "m" (*sc->sc_fpstate)); | ||
262 | } | ||
263 | err = 0; | ||
264 | |||
265 | out: | ||
266 | return err; | ||
267 | } | ||
268 | |||
269 | #define FPCONTEXT_SIZE 216 | ||
270 | #define uc_fpstate uc_filler[0] | ||
271 | #define uc_formatvec uc_filler[FPCONTEXT_SIZE/4] | ||
272 | #define uc_extra uc_filler[FPCONTEXT_SIZE/4+1] | ||
273 | |||
274 | static inline int rt_restore_fpu_state(struct ucontext __user *uc) | ||
275 | { | ||
276 | unsigned char fpstate[FPCONTEXT_SIZE]; | ||
277 | int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0); | ||
278 | fpregset_t fpregs; | ||
279 | int err = 1; | ||
280 | |||
281 | if (FPU_IS_EMU) { | ||
282 | /* restore fpu control register */ | ||
283 | if (__copy_from_user(current->thread.fpcntl, | ||
284 | uc->uc_mcontext.fpregs.f_fpcntl, 12)) | ||
285 | goto out; | ||
286 | /* restore all other fpu register */ | ||
287 | if (__copy_from_user(current->thread.fp, | ||
288 | uc->uc_mcontext.fpregs.f_fpregs, 96)) | ||
289 | goto out; | ||
290 | return 0; | ||
291 | } | ||
292 | |||
293 | if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate)) | ||
294 | goto out; | ||
295 | if (CPU_IS_060 ? fpstate[2] : fpstate[0]) { | ||
296 | if (!(CPU_IS_060 || CPU_IS_COLDFIRE)) | ||
297 | context_size = fpstate[1]; | ||
298 | /* Verify the frame format. */ | ||
299 | if (!(CPU_IS_060 || CPU_IS_COLDFIRE) && | ||
300 | (fpstate[0] != fpu_version)) | ||
301 | goto out; | ||
302 | if (CPU_IS_020_OR_030) { | ||
303 | if (m68k_fputype & FPU_68881 && | ||
304 | !(context_size == 0x18 || context_size == 0xb4)) | ||
305 | goto out; | ||
306 | if (m68k_fputype & FPU_68882 && | ||
307 | !(context_size == 0x38 || context_size == 0xd4)) | ||
308 | goto out; | ||
309 | } else if (CPU_IS_040) { | ||
310 | if (!(context_size == 0x00 || | ||
311 | context_size == 0x28 || | ||
312 | context_size == 0x60)) | ||
313 | goto out; | ||
314 | } else if (CPU_IS_060) { | ||
315 | if (!(fpstate[3] == 0x00 || | ||
316 | fpstate[3] == 0x60 || | ||
317 | fpstate[3] == 0xe0)) | ||
318 | goto out; | ||
319 | } else if (CPU_IS_COLDFIRE) { | ||
320 | if (!(fpstate[3] == 0x00 || | ||
321 | fpstate[3] == 0x05 || | ||
322 | fpstate[3] == 0xe5)) | ||
323 | goto out; | ||
324 | } else | ||
325 | goto out; | ||
326 | if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs, | ||
327 | sizeof(fpregs))) | ||
328 | goto out; | ||
329 | |||
330 | if (CPU_IS_COLDFIRE) { | ||
331 | __asm__ volatile ("fmovemd %0,%%fp0-%%fp7\n\t" | ||
332 | "fmovel %1,%%fpcr\n\t" | ||
333 | "fmovel %2,%%fpsr\n\t" | ||
334 | "fmovel %3,%%fpiar" | ||
335 | : /* no outputs */ | ||
336 | : "m" (fpregs.f_fpregs[0]), | ||
337 | "m" (fpregs.f_fpcntl[0]), | ||
338 | "m" (fpregs.f_fpcntl[1]), | ||
339 | "m" (fpregs.f_fpcntl[2])); | ||
340 | } else { | ||
341 | __asm__ volatile (".chip 68k/68881\n\t" | ||
342 | "fmovemx %0,%%fp0-%%fp7\n\t" | ||
343 | "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t" | ||
344 | ".chip 68k" | ||
345 | : /* no outputs */ | ||
346 | : "m" (*fpregs.f_fpregs), | ||
347 | "m" (*fpregs.f_fpcntl)); | ||
348 | } | ||
349 | } | ||
350 | if (context_size && | ||
351 | __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1, | ||
352 | context_size)) | ||
353 | goto out; | ||
354 | |||
355 | if (CPU_IS_COLDFIRE) { | ||
356 | __asm__ volatile ("frestore %0" : : "m" (*fpstate)); | ||
357 | } else { | ||
358 | __asm__ volatile (".chip 68k/68881\n\t" | ||
359 | "frestore %0\n\t" | ||
360 | ".chip 68k" | ||
361 | : : "m" (*fpstate)); | ||
362 | } | ||
363 | err = 0; | ||
364 | |||
365 | out: | ||
366 | return err; | ||
367 | } | ||
368 | |||
369 | static int mangle_kernel_stack(struct pt_regs *regs, int formatvec, | ||
370 | void __user *fp) | ||
371 | { | ||
372 | int fsize = frame_extra_sizes[formatvec >> 12]; | ||
373 | if (fsize < 0) { | ||
374 | /* | ||
375 | * user process trying to return with weird frame format | ||
376 | */ | ||
377 | #ifdef DEBUG | ||
378 | printk("user process returning with weird frame format\n"); | ||
379 | #endif | ||
380 | return 1; | ||
381 | } | ||
382 | if (!fsize) { | ||
383 | regs->format = formatvec >> 12; | ||
384 | regs->vector = formatvec & 0xfff; | ||
385 | } else { | ||
386 | struct switch_stack *sw = (struct switch_stack *)regs - 1; | ||
387 | unsigned long buf[fsize / 2]; /* yes, twice as much */ | ||
388 | |||
389 | /* that'll make sure that expansion won't crap over data */ | ||
390 | if (copy_from_user(buf + fsize / 4, fp, fsize)) | ||
391 | return 1; | ||
392 | |||
393 | /* point of no return */ | ||
394 | regs->format = formatvec >> 12; | ||
395 | regs->vector = formatvec & 0xfff; | ||
396 | #define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack)) | ||
397 | __asm__ __volatile__ ( | ||
398 | #ifdef CONFIG_COLDFIRE | ||
399 | " movel %0,%/sp\n\t" | ||
400 | " bra ret_from_signal\n" | ||
401 | #else | ||
402 | " movel %0,%/a0\n\t" | ||
403 | " subl %1,%/a0\n\t" /* make room on stack */ | ||
404 | " movel %/a0,%/sp\n\t" /* set stack pointer */ | ||
405 | /* move switch_stack and pt_regs */ | ||
406 | "1: movel %0@+,%/a0@+\n\t" | ||
407 | " dbra %2,1b\n\t" | ||
408 | " lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */ | ||
409 | " lsrl #2,%1\n\t" | ||
410 | " subql #1,%1\n\t" | ||
411 | /* copy to the gap we'd made */ | ||
412 | "2: movel %4@+,%/a0@+\n\t" | ||
413 | " dbra %1,2b\n\t" | ||
414 | " bral ret_from_signal\n" | ||
415 | #endif | ||
416 | : /* no outputs, it doesn't ever return */ | ||
417 | : "a" (sw), "d" (fsize), "d" (frame_offset/4-1), | ||
418 | "n" (frame_offset), "a" (buf + fsize/4) | ||
419 | : "a0"); | ||
420 | #undef frame_offset | ||
421 | } | ||
422 | return 0; | ||
423 | } | ||
424 | |||
425 | static inline int | ||
426 | restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __user *fp) | ||
427 | { | ||
428 | int formatvec; | ||
429 | struct sigcontext context; | ||
430 | int err; | ||
431 | |||
432 | /* Always make any pending restarted system calls return -EINTR */ | ||
433 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
434 | |||
435 | /* get previous context */ | ||
436 | if (copy_from_user(&context, usc, sizeof(context))) | ||
437 | goto badframe; | ||
438 | |||
439 | /* restore passed registers */ | ||
440 | regs->d0 = context.sc_d0; | ||
441 | regs->d1 = context.sc_d1; | ||
442 | regs->a0 = context.sc_a0; | ||
443 | regs->a1 = context.sc_a1; | ||
444 | regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff); | ||
445 | regs->pc = context.sc_pc; | ||
446 | regs->orig_d0 = -1; /* disable syscall checks */ | ||
447 | wrusp(context.sc_usp); | ||
448 | formatvec = context.sc_formatvec; | ||
449 | |||
450 | err = restore_fpu_state(&context); | ||
451 | |||
452 | if (err || mangle_kernel_stack(regs, formatvec, fp)) | ||
453 | goto badframe; | ||
454 | |||
455 | return 0; | ||
456 | |||
457 | badframe: | ||
458 | return 1; | ||
459 | } | ||
460 | |||
461 | static inline int | ||
462 | rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw, | ||
463 | struct ucontext __user *uc) | ||
464 | { | ||
465 | int temp; | ||
466 | greg_t __user *gregs = uc->uc_mcontext.gregs; | ||
467 | unsigned long usp; | ||
468 | int err; | ||
469 | |||
470 | /* Always make any pending restarted system calls return -EINTR */ | ||
471 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
472 | |||
473 | err = __get_user(temp, &uc->uc_mcontext.version); | ||
474 | if (temp != MCONTEXT_VERSION) | ||
475 | goto badframe; | ||
476 | /* restore passed registers */ | ||
477 | err |= __get_user(regs->d0, &gregs[0]); | ||
478 | err |= __get_user(regs->d1, &gregs[1]); | ||
479 | err |= __get_user(regs->d2, &gregs[2]); | ||
480 | err |= __get_user(regs->d3, &gregs[3]); | ||
481 | err |= __get_user(regs->d4, &gregs[4]); | ||
482 | err |= __get_user(regs->d5, &gregs[5]); | ||
483 | err |= __get_user(sw->d6, &gregs[6]); | ||
484 | err |= __get_user(sw->d7, &gregs[7]); | ||
485 | err |= __get_user(regs->a0, &gregs[8]); | ||
486 | err |= __get_user(regs->a1, &gregs[9]); | ||
487 | err |= __get_user(regs->a2, &gregs[10]); | ||
488 | err |= __get_user(sw->a3, &gregs[11]); | ||
489 | err |= __get_user(sw->a4, &gregs[12]); | ||
490 | err |= __get_user(sw->a5, &gregs[13]); | ||
491 | err |= __get_user(sw->a6, &gregs[14]); | ||
492 | err |= __get_user(usp, &gregs[15]); | ||
493 | wrusp(usp); | ||
494 | err |= __get_user(regs->pc, &gregs[16]); | ||
495 | err |= __get_user(temp, &gregs[17]); | ||
496 | regs->sr = (regs->sr & 0xff00) | (temp & 0xff); | ||
497 | regs->orig_d0 = -1; /* disable syscall checks */ | ||
498 | err |= __get_user(temp, &uc->uc_formatvec); | ||
499 | |||
500 | err |= rt_restore_fpu_state(uc); | ||
501 | |||
502 | if (err || do_sigaltstack(&uc->uc_stack, NULL, usp) == -EFAULT) | ||
503 | goto badframe; | ||
504 | |||
505 | if (mangle_kernel_stack(regs, temp, &uc->uc_extra)) | ||
506 | goto badframe; | ||
507 | |||
508 | return 0; | ||
509 | |||
510 | badframe: | ||
511 | return 1; | ||
512 | } | ||
513 | |||
514 | asmlinkage int do_sigreturn(unsigned long __unused) | ||
515 | { | ||
516 | struct switch_stack *sw = (struct switch_stack *) &__unused; | ||
517 | struct pt_regs *regs = (struct pt_regs *) (sw + 1); | ||
518 | unsigned long usp = rdusp(); | ||
519 | struct sigframe __user *frame = (struct sigframe __user *)(usp - 4); | ||
520 | sigset_t set; | ||
521 | |||
522 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | ||
523 | goto badframe; | ||
524 | if (__get_user(set.sig[0], &frame->sc.sc_mask) || | ||
525 | (_NSIG_WORDS > 1 && | ||
526 | __copy_from_user(&set.sig[1], &frame->extramask, | ||
527 | sizeof(frame->extramask)))) | ||
528 | goto badframe; | ||
529 | |||
530 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
531 | current->blocked = set; | ||
532 | recalc_sigpending(); | ||
533 | |||
534 | if (restore_sigcontext(regs, &frame->sc, frame + 1)) | ||
535 | goto badframe; | ||
536 | return regs->d0; | ||
537 | |||
538 | badframe: | ||
539 | force_sig(SIGSEGV, current); | ||
540 | return 0; | ||
541 | } | ||
542 | |||
543 | asmlinkage int do_rt_sigreturn(unsigned long __unused) | ||
544 | { | ||
545 | struct switch_stack *sw = (struct switch_stack *) &__unused; | ||
546 | struct pt_regs *regs = (struct pt_regs *) (sw + 1); | ||
547 | unsigned long usp = rdusp(); | ||
548 | struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4); | ||
549 | sigset_t set; | ||
550 | |||
551 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | ||
552 | goto badframe; | ||
553 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) | ||
554 | goto badframe; | ||
555 | |||
556 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
557 | current->blocked = set; | ||
558 | recalc_sigpending(); | ||
559 | |||
560 | if (rt_restore_ucontext(regs, sw, &frame->uc)) | ||
561 | goto badframe; | ||
562 | return regs->d0; | ||
563 | |||
564 | badframe: | ||
565 | force_sig(SIGSEGV, current); | ||
566 | return 0; | ||
567 | } | ||
568 | |||
569 | /* | ||
570 | * Set up a signal frame. | ||
571 | */ | ||
572 | |||
573 | static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs) | ||
574 | { | ||
575 | if (FPU_IS_EMU) { | ||
576 | /* save registers */ | ||
577 | memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12); | ||
578 | memcpy(sc->sc_fpregs, current->thread.fp, 24); | ||
579 | return; | ||
580 | } | ||
581 | |||
582 | if (CPU_IS_COLDFIRE) { | ||
583 | __asm__ volatile ("fsave %0" | ||
584 | : : "m" (*sc->sc_fpstate) : "memory"); | ||
585 | } else { | ||
586 | __asm__ volatile (".chip 68k/68881\n\t" | ||
587 | "fsave %0\n\t" | ||
588 | ".chip 68k" | ||
589 | : : "m" (*sc->sc_fpstate) : "memory"); | ||
590 | } | ||
591 | |||
592 | if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) { | ||
593 | fpu_version = sc->sc_fpstate[0]; | ||
594 | if (CPU_IS_020_OR_030 && | ||
595 | regs->vector >= (VEC_FPBRUC * 4) && | ||
596 | regs->vector <= (VEC_FPNAN * 4)) { | ||
597 | /* Clear pending exception in 68882 idle frame */ | ||
598 | if (*(unsigned short *) sc->sc_fpstate == 0x1f38) | ||
599 | sc->sc_fpstate[0x38] |= 1 << 3; | ||
600 | } | ||
601 | |||
602 | if (CPU_IS_COLDFIRE) { | ||
603 | __asm__ volatile ("fmovemd %%fp0-%%fp1,%0\n\t" | ||
604 | "fmovel %%fpcr,%1\n\t" | ||
605 | "fmovel %%fpsr,%2\n\t" | ||
606 | "fmovel %%fpiar,%3" | ||
607 | : "=m" (sc->sc_fpregs[0]), | ||
608 | "=m" (sc->sc_fpcntl[0]), | ||
609 | "=m" (sc->sc_fpcntl[1]), | ||
610 | "=m" (sc->sc_fpcntl[2]) | ||
611 | : /* no inputs */ | ||
612 | : "memory"); | ||
613 | } else { | ||
614 | __asm__ volatile (".chip 68k/68881\n\t" | ||
615 | "fmovemx %%fp0-%%fp1,%0\n\t" | ||
616 | "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t" | ||
617 | ".chip 68k" | ||
618 | : "=m" (*sc->sc_fpregs), | ||
619 | "=m" (*sc->sc_fpcntl) | ||
620 | : /* no inputs */ | ||
621 | : "memory"); | ||
622 | } | ||
623 | } | ||
624 | } | ||
625 | |||
626 | static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs) | ||
627 | { | ||
628 | unsigned char fpstate[FPCONTEXT_SIZE]; | ||
629 | int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0); | ||
630 | int err = 0; | ||
631 | |||
632 | if (FPU_IS_EMU) { | ||
633 | /* save fpu control register */ | ||
634 | err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpcntl, | ||
635 | current->thread.fpcntl, 12); | ||
636 | /* save all other fpu register */ | ||
637 | err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs, | ||
638 | current->thread.fp, 96); | ||
639 | return err; | ||
640 | } | ||
641 | |||
642 | if (CPU_IS_COLDFIRE) { | ||
643 | __asm__ volatile ("fsave %0" : : "m" (*fpstate) : "memory"); | ||
644 | } else { | ||
645 | __asm__ volatile (".chip 68k/68881\n\t" | ||
646 | "fsave %0\n\t" | ||
647 | ".chip 68k" | ||
648 | : : "m" (*fpstate) : "memory"); | ||
649 | } | ||
650 | |||
651 | err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate); | ||
652 | if (CPU_IS_060 ? fpstate[2] : fpstate[0]) { | ||
653 | fpregset_t fpregs; | ||
654 | if (!(CPU_IS_060 || CPU_IS_COLDFIRE)) | ||
655 | context_size = fpstate[1]; | ||
656 | fpu_version = fpstate[0]; | ||
657 | if (CPU_IS_020_OR_030 && | ||
658 | regs->vector >= (VEC_FPBRUC * 4) && | ||
659 | regs->vector <= (VEC_FPNAN * 4)) { | ||
660 | /* Clear pending exception in 68882 idle frame */ | ||
661 | if (*(unsigned short *) fpstate == 0x1f38) | ||
662 | fpstate[0x38] |= 1 << 3; | ||
663 | } | ||
664 | if (CPU_IS_COLDFIRE) { | ||
665 | __asm__ volatile ("fmovemd %%fp0-%%fp7,%0\n\t" | ||
666 | "fmovel %%fpcr,%1\n\t" | ||
667 | "fmovel %%fpsr,%2\n\t" | ||
668 | "fmovel %%fpiar,%3" | ||
669 | : "=m" (fpregs.f_fpregs[0]), | ||
670 | "=m" (fpregs.f_fpcntl[0]), | ||
671 | "=m" (fpregs.f_fpcntl[1]), | ||
672 | "=m" (fpregs.f_fpcntl[2]) | ||
673 | : /* no inputs */ | ||
674 | : "memory"); | ||
675 | } else { | ||
676 | __asm__ volatile (".chip 68k/68881\n\t" | ||
677 | "fmovemx %%fp0-%%fp7,%0\n\t" | ||
678 | "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t" | ||
679 | ".chip 68k" | ||
680 | : "=m" (*fpregs.f_fpregs), | ||
681 | "=m" (*fpregs.f_fpcntl) | ||
682 | : /* no inputs */ | ||
683 | : "memory"); | ||
684 | } | ||
685 | err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs, | ||
686 | sizeof(fpregs)); | ||
687 | } | ||
688 | if (context_size) | ||
689 | err |= copy_to_user((long __user *)&uc->uc_fpstate + 1, fpstate + 4, | ||
690 | context_size); | ||
691 | return err; | ||
692 | } | ||
693 | |||
694 | static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs, | ||
695 | unsigned long mask) | ||
696 | { | ||
697 | sc->sc_mask = mask; | ||
698 | sc->sc_usp = rdusp(); | ||
699 | sc->sc_d0 = regs->d0; | ||
700 | sc->sc_d1 = regs->d1; | ||
701 | sc->sc_a0 = regs->a0; | ||
702 | sc->sc_a1 = regs->a1; | ||
703 | sc->sc_sr = regs->sr; | ||
704 | sc->sc_pc = regs->pc; | ||
705 | sc->sc_formatvec = regs->format << 12 | regs->vector; | ||
706 | save_fpu_state(sc, regs); | ||
707 | } | ||
708 | |||
709 | static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs) | ||
710 | { | ||
711 | struct switch_stack *sw = (struct switch_stack *)regs - 1; | ||
712 | greg_t __user *gregs = uc->uc_mcontext.gregs; | ||
713 | int err = 0; | ||
714 | |||
715 | err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version); | ||
716 | err |= __put_user(regs->d0, &gregs[0]); | ||
717 | err |= __put_user(regs->d1, &gregs[1]); | ||
718 | err |= __put_user(regs->d2, &gregs[2]); | ||
719 | err |= __put_user(regs->d3, &gregs[3]); | ||
720 | err |= __put_user(regs->d4, &gregs[4]); | ||
721 | err |= __put_user(regs->d5, &gregs[5]); | ||
722 | err |= __put_user(sw->d6, &gregs[6]); | ||
723 | err |= __put_user(sw->d7, &gregs[7]); | ||
724 | err |= __put_user(regs->a0, &gregs[8]); | ||
725 | err |= __put_user(regs->a1, &gregs[9]); | ||
726 | err |= __put_user(regs->a2, &gregs[10]); | ||
727 | err |= __put_user(sw->a3, &gregs[11]); | ||
728 | err |= __put_user(sw->a4, &gregs[12]); | ||
729 | err |= __put_user(sw->a5, &gregs[13]); | ||
730 | err |= __put_user(sw->a6, &gregs[14]); | ||
731 | err |= __put_user(rdusp(), &gregs[15]); | ||
732 | err |= __put_user(regs->pc, &gregs[16]); | ||
733 | err |= __put_user(regs->sr, &gregs[17]); | ||
734 | err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec); | ||
735 | err |= rt_save_fpu_state(uc, regs); | ||
736 | return err; | ||
737 | } | ||
738 | |||
739 | static inline void push_cache (unsigned long vaddr) | ||
740 | { | ||
741 | /* | ||
742 | * Using the old cache_push_v() was really a big waste. | ||
743 | * | ||
744 | * What we are trying to do is to flush 8 bytes to ram. | ||
745 | * Flushing 2 cache lines of 16 bytes is much cheaper than | ||
746 | * flushing 1 or 2 pages, as previously done in | ||
747 | * cache_push_v(). | ||
748 | * Jes | ||
749 | */ | ||
750 | if (CPU_IS_040) { | ||
751 | unsigned long temp; | ||
752 | |||
753 | __asm__ __volatile__ (".chip 68040\n\t" | ||
754 | "nop\n\t" | ||
755 | "ptestr (%1)\n\t" | ||
756 | "movec %%mmusr,%0\n\t" | ||
757 | ".chip 68k" | ||
758 | : "=r" (temp) | ||
759 | : "a" (vaddr)); | ||
760 | |||
761 | temp &= PAGE_MASK; | ||
762 | temp |= vaddr & ~PAGE_MASK; | ||
763 | |||
764 | __asm__ __volatile__ (".chip 68040\n\t" | ||
765 | "nop\n\t" | ||
766 | "cpushl %%bc,(%0)\n\t" | ||
767 | ".chip 68k" | ||
768 | : : "a" (temp)); | ||
769 | } | ||
770 | else if (CPU_IS_060) { | ||
771 | unsigned long temp; | ||
772 | __asm__ __volatile__ (".chip 68060\n\t" | ||
773 | "plpar (%0)\n\t" | ||
774 | ".chip 68k" | ||
775 | : "=a" (temp) | ||
776 | : "0" (vaddr)); | ||
777 | __asm__ __volatile__ (".chip 68060\n\t" | ||
778 | "cpushl %%bc,(%0)\n\t" | ||
779 | ".chip 68k" | ||
780 | : : "a" (temp)); | ||
781 | } else if (!CPU_IS_COLDFIRE) { | ||
782 | /* | ||
783 | * 68030/68020 have no writeback cache; | ||
784 | * still need to clear icache. | ||
785 | * Note that vaddr is guaranteed to be long word aligned. | ||
786 | */ | ||
787 | unsigned long temp; | ||
788 | asm volatile ("movec %%cacr,%0" : "=r" (temp)); | ||
789 | temp += 4; | ||
790 | asm volatile ("movec %0,%%caar\n\t" | ||
791 | "movec %1,%%cacr" | ||
792 | : : "r" (vaddr), "r" (temp)); | ||
793 | asm volatile ("movec %0,%%caar\n\t" | ||
794 | "movec %1,%%cacr" | ||
795 | : : "r" (vaddr + 4), "r" (temp)); | ||
796 | } | ||
797 | } | ||
798 | |||
799 | static inline void __user * | ||
800 | get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) | ||
801 | { | ||
802 | unsigned long usp; | ||
803 | |||
804 | /* Default to using normal stack. */ | ||
805 | usp = rdusp(); | ||
806 | |||
807 | /* This is the X/Open sanctioned signal stack switching. */ | ||
808 | if (ka->sa.sa_flags & SA_ONSTACK) { | ||
809 | if (!sas_ss_flags(usp)) | ||
810 | usp = current->sas_ss_sp + current->sas_ss_size; | ||
811 | } | ||
812 | return (void __user *)((usp - frame_size) & -8UL); | ||
813 | } | ||
814 | |||
815 | static int setup_frame (int sig, struct k_sigaction *ka, | ||
816 | sigset_t *set, struct pt_regs *regs) | ||
817 | { | ||
818 | struct sigframe __user *frame; | ||
819 | int fsize = frame_extra_sizes[regs->format]; | ||
820 | struct sigcontext context; | ||
821 | int err = 0; | ||
822 | |||
823 | if (fsize < 0) { | ||
824 | #ifdef DEBUG | ||
825 | printk ("setup_frame: Unknown frame format %#x\n", | ||
826 | regs->format); | ||
827 | #endif | ||
828 | goto give_sigsegv; | ||
829 | } | ||
830 | |||
831 | frame = get_sigframe(ka, regs, sizeof(*frame) + fsize); | ||
832 | |||
833 | if (fsize) | ||
834 | err |= copy_to_user (frame + 1, regs + 1, fsize); | ||
835 | |||
836 | err |= __put_user((current_thread_info()->exec_domain | ||
837 | && current_thread_info()->exec_domain->signal_invmap | ||
838 | && sig < 32 | ||
839 | ? current_thread_info()->exec_domain->signal_invmap[sig] | ||
840 | : sig), | ||
841 | &frame->sig); | ||
842 | |||
843 | err |= __put_user(regs->vector, &frame->code); | ||
844 | err |= __put_user(&frame->sc, &frame->psc); | ||
845 | |||
846 | if (_NSIG_WORDS > 1) | ||
847 | err |= copy_to_user(frame->extramask, &set->sig[1], | ||
848 | sizeof(frame->extramask)); | ||
849 | |||
850 | setup_sigcontext(&context, regs, set->sig[0]); | ||
851 | err |= copy_to_user (&frame->sc, &context, sizeof(context)); | ||
852 | |||
853 | /* Set up to return from userspace. */ | ||
854 | err |= __put_user(frame->retcode, &frame->pretcode); | ||
855 | /* moveq #,d0; trap #0 */ | ||
856 | err |= __put_user(0x70004e40 + (__NR_sigreturn << 16), | ||
857 | (long __user *)(frame->retcode)); | ||
858 | |||
859 | if (err) | ||
860 | goto give_sigsegv; | ||
861 | |||
862 | push_cache ((unsigned long) &frame->retcode); | ||
863 | |||
864 | /* | ||
865 | * Set up registers for signal handler. All the state we are about | ||
866 | * to destroy is successfully copied to sigframe. | ||
867 | */ | ||
868 | wrusp ((unsigned long) frame); | ||
869 | regs->pc = (unsigned long) ka->sa.sa_handler; | ||
870 | |||
871 | /* | ||
872 | * This is subtle; if we build more than one sigframe, all but the | ||
873 | * first one will see frame format 0 and have fsize == 0, so we won't | ||
874 | * screw stkadj. | ||
875 | */ | ||
876 | if (fsize) | ||
877 | regs->stkadj = fsize; | ||
878 | |||
879 | /* Prepare to skip over the extra stuff in the exception frame. */ | ||
880 | if (regs->stkadj) { | ||
881 | struct pt_regs *tregs = | ||
882 | (struct pt_regs *)((ulong)regs + regs->stkadj); | ||
883 | #ifdef DEBUG | ||
884 | printk("Performing stackadjust=%04x\n", regs->stkadj); | ||
885 | #endif | ||
886 | /* This must be copied with decreasing addresses to | ||
887 | handle overlaps. */ | ||
888 | tregs->vector = 0; | ||
889 | tregs->format = 0; | ||
890 | tregs->pc = regs->pc; | ||
891 | tregs->sr = regs->sr; | ||
892 | } | ||
893 | return 0; | ||
894 | |||
895 | give_sigsegv: | ||
896 | force_sigsegv(sig, current); | ||
897 | return err; | ||
898 | } | ||
899 | |||
900 | static int setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info, | ||
901 | sigset_t *set, struct pt_regs *regs) | ||
902 | { | ||
903 | struct rt_sigframe __user *frame; | ||
904 | int fsize = frame_extra_sizes[regs->format]; | ||
905 | int err = 0; | ||
906 | |||
907 | if (fsize < 0) { | ||
908 | #ifdef DEBUG | ||
909 | printk ("setup_frame: Unknown frame format %#x\n", | ||
910 | regs->format); | ||
911 | #endif | ||
912 | goto give_sigsegv; | ||
913 | } | ||
914 | |||
915 | frame = get_sigframe(ka, regs, sizeof(*frame)); | ||
916 | |||
917 | if (fsize) | ||
918 | err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize); | ||
919 | |||
920 | err |= __put_user((current_thread_info()->exec_domain | ||
921 | && current_thread_info()->exec_domain->signal_invmap | ||
922 | && sig < 32 | ||
923 | ? current_thread_info()->exec_domain->signal_invmap[sig] | ||
924 | : sig), | ||
925 | &frame->sig); | ||
926 | err |= __put_user(&frame->info, &frame->pinfo); | ||
927 | err |= __put_user(&frame->uc, &frame->puc); | ||
928 | err |= copy_siginfo_to_user(&frame->info, info); | ||
929 | |||
930 | /* Create the ucontext. */ | ||
931 | err |= __put_user(0, &frame->uc.uc_flags); | ||
932 | err |= __put_user(NULL, &frame->uc.uc_link); | ||
933 | err |= __put_user((void __user *)current->sas_ss_sp, | ||
934 | &frame->uc.uc_stack.ss_sp); | ||
935 | err |= __put_user(sas_ss_flags(rdusp()), | ||
936 | &frame->uc.uc_stack.ss_flags); | ||
937 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | ||
938 | err |= rt_setup_ucontext(&frame->uc, regs); | ||
939 | err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set)); | ||
940 | |||
941 | /* Set up to return from userspace. */ | ||
942 | err |= __put_user(frame->retcode, &frame->pretcode); | ||
943 | #ifdef __mcoldfire__ | ||
944 | /* movel #__NR_rt_sigreturn,d0; trap #0 */ | ||
945 | err |= __put_user(0x203c0000, (long __user *)(frame->retcode + 0)); | ||
946 | err |= __put_user(0x00004e40 + (__NR_rt_sigreturn << 16), | ||
947 | (long __user *)(frame->retcode + 4)); | ||
948 | #else | ||
949 | /* moveq #,d0; notb d0; trap #0 */ | ||
950 | err |= __put_user(0x70004600 + ((__NR_rt_sigreturn ^ 0xff) << 16), | ||
951 | (long __user *)(frame->retcode + 0)); | ||
952 | err |= __put_user(0x4e40, (short __user *)(frame->retcode + 4)); | ||
953 | #endif | ||
954 | |||
955 | if (err) | ||
956 | goto give_sigsegv; | ||
957 | |||
958 | push_cache ((unsigned long) &frame->retcode); | ||
959 | |||
960 | /* | ||
961 | * Set up registers for signal handler. All the state we are about | ||
962 | * to destroy is successfully copied to sigframe. | ||
963 | */ | ||
964 | wrusp ((unsigned long) frame); | ||
965 | regs->pc = (unsigned long) ka->sa.sa_handler; | ||
966 | |||
967 | /* | ||
968 | * This is subtle; if we build more than one sigframe, all but the | ||
969 | * first one will see frame format 0 and have fsize == 0, so we won't | ||
970 | * screw stkadj. | ||
971 | */ | ||
972 | if (fsize) | ||
973 | regs->stkadj = fsize; | ||
974 | |||
975 | /* Prepare to skip over the extra stuff in the exception frame. */ | ||
976 | if (regs->stkadj) { | ||
977 | struct pt_regs *tregs = | ||
978 | (struct pt_regs *)((ulong)regs + regs->stkadj); | ||
979 | #ifdef DEBUG | ||
980 | printk("Performing stackadjust=%04x\n", regs->stkadj); | ||
981 | #endif | ||
982 | /* This must be copied with decreasing addresses to | ||
983 | handle overlaps. */ | ||
984 | tregs->vector = 0; | ||
985 | tregs->format = 0; | ||
986 | tregs->pc = regs->pc; | ||
987 | tregs->sr = regs->sr; | ||
988 | } | ||
989 | return 0; | ||
990 | |||
991 | give_sigsegv: | ||
992 | force_sigsegv(sig, current); | ||
993 | return err; | ||
994 | } | ||
995 | |||
996 | static inline void | ||
997 | handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler) | ||
998 | { | ||
999 | switch (regs->d0) { | ||
1000 | case -ERESTARTNOHAND: | ||
1001 | if (!has_handler) | ||
1002 | goto do_restart; | ||
1003 | regs->d0 = -EINTR; | ||
1004 | break; | ||
1005 | |||
1006 | case -ERESTART_RESTARTBLOCK: | ||
1007 | if (!has_handler) { | ||
1008 | regs->d0 = __NR_restart_syscall; | ||
1009 | regs->pc -= 2; | ||
1010 | break; | ||
1011 | } | ||
1012 | regs->d0 = -EINTR; | ||
1013 | break; | ||
1014 | |||
1015 | case -ERESTARTSYS: | ||
1016 | if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) { | ||
1017 | regs->d0 = -EINTR; | ||
1018 | break; | ||
1019 | } | ||
1020 | /* fallthrough */ | ||
1021 | case -ERESTARTNOINTR: | ||
1022 | do_restart: | ||
1023 | regs->d0 = regs->orig_d0; | ||
1024 | regs->pc -= 2; | ||
1025 | break; | ||
1026 | } | ||
1027 | } | ||
1028 | |||
1029 | void ptrace_signal_deliver(struct pt_regs *regs, void *cookie) | ||
1030 | { | ||
1031 | if (regs->orig_d0 < 0) | ||
1032 | return; | ||
1033 | switch (regs->d0) { | ||
1034 | case -ERESTARTNOHAND: | ||
1035 | case -ERESTARTSYS: | ||
1036 | case -ERESTARTNOINTR: | ||
1037 | regs->d0 = regs->orig_d0; | ||
1038 | regs->orig_d0 = -1; | ||
1039 | regs->pc -= 2; | ||
1040 | break; | ||
1041 | } | ||
1042 | } | ||
1043 | |||
1044 | /* | ||
1045 | * OK, we're invoking a handler | ||
1046 | */ | ||
1047 | static void | ||
1048 | handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info, | ||
1049 | sigset_t *oldset, struct pt_regs *regs) | ||
1050 | { | ||
1051 | int err; | ||
1052 | /* are we from a system call? */ | ||
1053 | if (regs->orig_d0 >= 0) | ||
1054 | /* If so, check system call restarting.. */ | ||
1055 | handle_restart(regs, ka, 1); | ||
1056 | |||
1057 | /* set up the stack frame */ | ||
1058 | if (ka->sa.sa_flags & SA_SIGINFO) | ||
1059 | err = setup_rt_frame(sig, ka, info, oldset, regs); | ||
1060 | else | ||
1061 | err = setup_frame(sig, ka, oldset, regs); | ||
1062 | |||
1063 | if (err) | ||
1064 | return; | ||
1065 | |||
1066 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); | ||
1067 | if (!(ka->sa.sa_flags & SA_NODEFER)) | ||
1068 | sigaddset(¤t->blocked,sig); | ||
1069 | recalc_sigpending(); | ||
1070 | |||
1071 | if (test_thread_flag(TIF_DELAYED_TRACE)) { | ||
1072 | regs->sr &= ~0x8000; | ||
1073 | send_sig(SIGTRAP, current, 1); | ||
1074 | } | ||
1075 | |||
1076 | clear_thread_flag(TIF_RESTORE_SIGMASK); | ||
1077 | } | ||
1078 | |||
1079 | /* | ||
1080 | * Note that 'init' is a special process: it doesn't get signals it doesn't | ||
1081 | * want to handle. Thus you cannot kill init even with a SIGKILL even by | ||
1082 | * mistake. | ||
1083 | */ | ||
1084 | asmlinkage void do_signal(struct pt_regs *regs) | ||
1085 | { | ||
1086 | siginfo_t info; | ||
1087 | struct k_sigaction ka; | ||
1088 | int signr; | ||
1089 | sigset_t *oldset; | ||
1090 | |||
1091 | current->thread.esp0 = (unsigned long) regs; | ||
1092 | |||
1093 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) | ||
1094 | oldset = ¤t->saved_sigmask; | ||
1095 | else | ||
1096 | oldset = ¤t->blocked; | ||
1097 | |||
1098 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | ||
1099 | if (signr > 0) { | ||
1100 | /* Whee! Actually deliver the signal. */ | ||
1101 | handle_signal(signr, &ka, &info, oldset, regs); | ||
1102 | return; | ||
1103 | } | ||
1104 | |||
1105 | /* Did we come from a system call? */ | ||
1106 | if (regs->orig_d0 >= 0) | ||
1107 | /* Restart the system call - no handlers present */ | ||
1108 | handle_restart(regs, NULL, 0); | ||
1109 | |||
1110 | /* If there's no signal to deliver, we just restore the saved mask. */ | ||
1111 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) { | ||
1112 | clear_thread_flag(TIF_RESTORE_SIGMASK); | ||
1113 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | ||
1114 | } | ||
1115 | } | ||
diff --git a/arch/m68k/kernel/signal_no.c b/arch/m68k/kernel/signal_no.c deleted file mode 100644 index 36a81bb6835a..000000000000 --- a/arch/m68k/kernel/signal_no.c +++ /dev/null | |||
@@ -1,765 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/m68knommu/kernel/signal.c | ||
3 | * | ||
4 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file COPYING in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | |||
11 | /* | ||
12 | * Linux/m68k support by Hamish Macdonald | ||
13 | * | ||
14 | * 68060 fixes by Jesper Skov | ||
15 | * | ||
16 | * 1997-12-01 Modified for POSIX.1b signals by Andreas Schwab | ||
17 | * | ||
18 | * mathemu support by Roman Zippel | ||
19 | * (Note: fpstate in the signal context is completely ignored for the emulator | ||
20 | * and the internal floating point format is put on stack) | ||
21 | */ | ||
22 | |||
23 | /* | ||
24 | * ++roman (07/09/96): implemented signal stacks (specially for tosemu on | ||
25 | * Atari :-) Current limitation: Only one sigstack can be active at one time. | ||
26 | * If a second signal with SA_ONSTACK set arrives while working on a sigstack, | ||
27 | * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested | ||
28 | * signal handlers! | ||
29 | */ | ||
30 | |||
31 | #include <linux/sched.h> | ||
32 | #include <linux/mm.h> | ||
33 | #include <linux/kernel.h> | ||
34 | #include <linux/signal.h> | ||
35 | #include <linux/syscalls.h> | ||
36 | #include <linux/errno.h> | ||
37 | #include <linux/wait.h> | ||
38 | #include <linux/ptrace.h> | ||
39 | #include <linux/unistd.h> | ||
40 | #include <linux/stddef.h> | ||
41 | #include <linux/highuid.h> | ||
42 | #include <linux/tty.h> | ||
43 | #include <linux/personality.h> | ||
44 | #include <linux/binfmts.h> | ||
45 | |||
46 | #include <asm/setup.h> | ||
47 | #include <asm/uaccess.h> | ||
48 | #include <asm/pgtable.h> | ||
49 | #include <asm/traps.h> | ||
50 | #include <asm/ucontext.h> | ||
51 | |||
52 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | ||
53 | |||
54 | void ret_from_user_signal(void); | ||
55 | void ret_from_user_rt_signal(void); | ||
56 | |||
57 | /* | ||
58 | * Atomically swap in the new signal mask, and wait for a signal. | ||
59 | */ | ||
60 | asmlinkage int | ||
61 | sys_sigsuspend(int unused0, int unused1, old_sigset_t mask) | ||
62 | { | ||
63 | mask &= _BLOCKABLE; | ||
64 | spin_lock_irq(¤t->sighand->siglock); | ||
65 | current->saved_sigmask = current->blocked; | ||
66 | siginitset(¤t->blocked, mask); | ||
67 | recalc_sigpending(); | ||
68 | spin_unlock_irq(¤t->sighand->siglock); | ||
69 | |||
70 | current->state = TASK_INTERRUPTIBLE; | ||
71 | schedule(); | ||
72 | set_restore_sigmask(); | ||
73 | |||
74 | return -ERESTARTNOHAND; | ||
75 | } | ||
76 | |||
77 | asmlinkage int | ||
78 | sys_sigaction(int sig, const struct old_sigaction __user *act, | ||
79 | struct old_sigaction __user *oact) | ||
80 | { | ||
81 | struct k_sigaction new_ka, old_ka; | ||
82 | int ret; | ||
83 | |||
84 | if (act) { | ||
85 | old_sigset_t mask; | ||
86 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || | ||
87 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || | ||
88 | __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || | ||
89 | __get_user(new_ka.sa.sa_flags, &act->sa_flags) || | ||
90 | __get_user(mask, &act->sa_mask)) | ||
91 | return -EFAULT; | ||
92 | siginitset(&new_ka.sa.sa_mask, mask); | ||
93 | } | ||
94 | |||
95 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | ||
96 | |||
97 | if (!ret && oact) { | ||
98 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || | ||
99 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || | ||
100 | __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || | ||
101 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || | ||
102 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) | ||
103 | return -EFAULT; | ||
104 | } | ||
105 | |||
106 | return ret; | ||
107 | } | ||
108 | |||
109 | asmlinkage int | ||
110 | sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss) | ||
111 | { | ||
112 | return do_sigaltstack(uss, uoss, rdusp()); | ||
113 | } | ||
114 | |||
115 | |||
116 | /* | ||
117 | * Do a signal return; undo the signal stack. | ||
118 | * | ||
119 | * Keep the return code on the stack quadword aligned! | ||
120 | * That makes the cache flush below easier. | ||
121 | */ | ||
122 | |||
123 | struct sigframe | ||
124 | { | ||
125 | char __user *pretcode; | ||
126 | int sig; | ||
127 | int code; | ||
128 | struct sigcontext __user *psc; | ||
129 | char retcode[8]; | ||
130 | unsigned long extramask[_NSIG_WORDS-1]; | ||
131 | struct sigcontext sc; | ||
132 | }; | ||
133 | |||
134 | struct rt_sigframe | ||
135 | { | ||
136 | char __user *pretcode; | ||
137 | int sig; | ||
138 | struct siginfo __user *pinfo; | ||
139 | void __user *puc; | ||
140 | char retcode[8]; | ||
141 | struct siginfo info; | ||
142 | struct ucontext uc; | ||
143 | }; | ||
144 | |||
145 | #ifdef CONFIG_FPU | ||
146 | |||
147 | static unsigned char fpu_version = 0; /* version number of fpu, set by setup_frame */ | ||
148 | |||
149 | static inline int restore_fpu_state(struct sigcontext *sc) | ||
150 | { | ||
151 | int err = 1; | ||
152 | |||
153 | if (FPU_IS_EMU) { | ||
154 | /* restore registers */ | ||
155 | memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12); | ||
156 | memcpy(current->thread.fp, sc->sc_fpregs, 24); | ||
157 | return 0; | ||
158 | } | ||
159 | |||
160 | if (sc->sc_fpstate[0]) { | ||
161 | /* Verify the frame format. */ | ||
162 | if (sc->sc_fpstate[0] != fpu_version) | ||
163 | goto out; | ||
164 | |||
165 | __asm__ volatile (".chip 68k/68881\n\t" | ||
166 | "fmovemx %0,%%fp0-%%fp1\n\t" | ||
167 | "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t" | ||
168 | ".chip 68k" | ||
169 | : /* no outputs */ | ||
170 | : "m" (*sc->sc_fpregs), "m" (*sc->sc_fpcntl)); | ||
171 | } | ||
172 | __asm__ volatile (".chip 68k/68881\n\t" | ||
173 | "frestore %0\n\t" | ||
174 | ".chip 68k" : : "m" (*sc->sc_fpstate)); | ||
175 | err = 0; | ||
176 | |||
177 | out: | ||
178 | return err; | ||
179 | } | ||
180 | |||
181 | #define FPCONTEXT_SIZE 216 | ||
182 | #define uc_fpstate uc_filler[0] | ||
183 | #define uc_formatvec uc_filler[FPCONTEXT_SIZE/4] | ||
184 | #define uc_extra uc_filler[FPCONTEXT_SIZE/4+1] | ||
185 | |||
186 | static inline int rt_restore_fpu_state(struct ucontext __user *uc) | ||
187 | { | ||
188 | unsigned char fpstate[FPCONTEXT_SIZE]; | ||
189 | int context_size = 0; | ||
190 | fpregset_t fpregs; | ||
191 | int err = 1; | ||
192 | |||
193 | if (FPU_IS_EMU) { | ||
194 | /* restore fpu control register */ | ||
195 | if (__copy_from_user(current->thread.fpcntl, | ||
196 | uc->uc_mcontext.fpregs.f_fpcntl, 12)) | ||
197 | goto out; | ||
198 | /* restore all other fpu register */ | ||
199 | if (__copy_from_user(current->thread.fp, | ||
200 | uc->uc_mcontext.fpregs.f_fpregs, 96)) | ||
201 | goto out; | ||
202 | return 0; | ||
203 | } | ||
204 | |||
205 | if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate)) | ||
206 | goto out; | ||
207 | if (fpstate[0]) { | ||
208 | context_size = fpstate[1]; | ||
209 | |||
210 | /* Verify the frame format. */ | ||
211 | if (fpstate[0] != fpu_version) | ||
212 | goto out; | ||
213 | if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs, | ||
214 | sizeof(fpregs))) | ||
215 | goto out; | ||
216 | __asm__ volatile (".chip 68k/68881\n\t" | ||
217 | "fmovemx %0,%%fp0-%%fp7\n\t" | ||
218 | "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t" | ||
219 | ".chip 68k" | ||
220 | : /* no outputs */ | ||
221 | : "m" (*fpregs.f_fpregs), | ||
222 | "m" (*fpregs.f_fpcntl)); | ||
223 | } | ||
224 | if (context_size && | ||
225 | __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1, | ||
226 | context_size)) | ||
227 | goto out; | ||
228 | __asm__ volatile (".chip 68k/68881\n\t" | ||
229 | "frestore %0\n\t" | ||
230 | ".chip 68k" : : "m" (*fpstate)); | ||
231 | err = 0; | ||
232 | |||
233 | out: | ||
234 | return err; | ||
235 | } | ||
236 | |||
237 | #endif | ||
238 | |||
239 | static inline int | ||
240 | restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __user *fp, | ||
241 | int *pd0) | ||
242 | { | ||
243 | int formatvec; | ||
244 | struct sigcontext context; | ||
245 | int err = 0; | ||
246 | |||
247 | /* Always make any pending restarted system calls return -EINTR */ | ||
248 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
249 | |||
250 | /* get previous context */ | ||
251 | if (copy_from_user(&context, usc, sizeof(context))) | ||
252 | goto badframe; | ||
253 | |||
254 | /* restore passed registers */ | ||
255 | regs->d1 = context.sc_d1; | ||
256 | regs->a0 = context.sc_a0; | ||
257 | regs->a1 = context.sc_a1; | ||
258 | ((struct switch_stack *)regs - 1)->a5 = context.sc_a5; | ||
259 | regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff); | ||
260 | regs->pc = context.sc_pc; | ||
261 | regs->orig_d0 = -1; /* disable syscall checks */ | ||
262 | wrusp(context.sc_usp); | ||
263 | formatvec = context.sc_formatvec; | ||
264 | regs->format = formatvec >> 12; | ||
265 | regs->vector = formatvec & 0xfff; | ||
266 | |||
267 | #ifdef CONFIG_FPU | ||
268 | err = restore_fpu_state(&context); | ||
269 | #endif | ||
270 | |||
271 | *pd0 = context.sc_d0; | ||
272 | return err; | ||
273 | |||
274 | badframe: | ||
275 | return 1; | ||
276 | } | ||
277 | |||
278 | static inline int | ||
279 | rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw, | ||
280 | struct ucontext __user *uc, int *pd0) | ||
281 | { | ||
282 | int temp; | ||
283 | greg_t __user *gregs = uc->uc_mcontext.gregs; | ||
284 | unsigned long usp; | ||
285 | int err; | ||
286 | |||
287 | /* Always make any pending restarted system calls return -EINTR */ | ||
288 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
289 | |||
290 | err = __get_user(temp, &uc->uc_mcontext.version); | ||
291 | if (temp != MCONTEXT_VERSION) | ||
292 | goto badframe; | ||
293 | /* restore passed registers */ | ||
294 | err |= __get_user(regs->d0, &gregs[0]); | ||
295 | err |= __get_user(regs->d1, &gregs[1]); | ||
296 | err |= __get_user(regs->d2, &gregs[2]); | ||
297 | err |= __get_user(regs->d3, &gregs[3]); | ||
298 | err |= __get_user(regs->d4, &gregs[4]); | ||
299 | err |= __get_user(regs->d5, &gregs[5]); | ||
300 | err |= __get_user(sw->d6, &gregs[6]); | ||
301 | err |= __get_user(sw->d7, &gregs[7]); | ||
302 | err |= __get_user(regs->a0, &gregs[8]); | ||
303 | err |= __get_user(regs->a1, &gregs[9]); | ||
304 | err |= __get_user(regs->a2, &gregs[10]); | ||
305 | err |= __get_user(sw->a3, &gregs[11]); | ||
306 | err |= __get_user(sw->a4, &gregs[12]); | ||
307 | err |= __get_user(sw->a5, &gregs[13]); | ||
308 | err |= __get_user(sw->a6, &gregs[14]); | ||
309 | err |= __get_user(usp, &gregs[15]); | ||
310 | wrusp(usp); | ||
311 | err |= __get_user(regs->pc, &gregs[16]); | ||
312 | err |= __get_user(temp, &gregs[17]); | ||
313 | regs->sr = (regs->sr & 0xff00) | (temp & 0xff); | ||
314 | regs->orig_d0 = -1; /* disable syscall checks */ | ||
315 | regs->format = temp >> 12; | ||
316 | regs->vector = temp & 0xfff; | ||
317 | |||
318 | if (do_sigaltstack(&uc->uc_stack, NULL, usp) == -EFAULT) | ||
319 | goto badframe; | ||
320 | |||
321 | *pd0 = regs->d0; | ||
322 | return err; | ||
323 | |||
324 | badframe: | ||
325 | return 1; | ||
326 | } | ||
327 | |||
328 | asmlinkage int do_sigreturn(unsigned long __unused) | ||
329 | { | ||
330 | struct switch_stack *sw = (struct switch_stack *) &__unused; | ||
331 | struct pt_regs *regs = (struct pt_regs *) (sw + 1); | ||
332 | unsigned long usp = rdusp(); | ||
333 | struct sigframe __user *frame = (struct sigframe __user *)(usp - 4); | ||
334 | sigset_t set; | ||
335 | int d0; | ||
336 | |||
337 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | ||
338 | goto badframe; | ||
339 | if (__get_user(set.sig[0], &frame->sc.sc_mask) || | ||
340 | (_NSIG_WORDS > 1 && | ||
341 | __copy_from_user(&set.sig[1], &frame->extramask, | ||
342 | sizeof(frame->extramask)))) | ||
343 | goto badframe; | ||
344 | |||
345 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
346 | spin_lock_irq(¤t->sighand->siglock); | ||
347 | current->blocked = set; | ||
348 | recalc_sigpending(); | ||
349 | spin_unlock_irq(¤t->sighand->siglock); | ||
350 | |||
351 | if (restore_sigcontext(regs, &frame->sc, frame + 1, &d0)) | ||
352 | goto badframe; | ||
353 | return d0; | ||
354 | |||
355 | badframe: | ||
356 | force_sig(SIGSEGV, current); | ||
357 | return 0; | ||
358 | } | ||
359 | |||
360 | asmlinkage int do_rt_sigreturn(unsigned long __unused) | ||
361 | { | ||
362 | struct switch_stack *sw = (struct switch_stack *) &__unused; | ||
363 | struct pt_regs *regs = (struct pt_regs *) (sw + 1); | ||
364 | unsigned long usp = rdusp(); | ||
365 | struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4); | ||
366 | sigset_t set; | ||
367 | int d0; | ||
368 | |||
369 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | ||
370 | goto badframe; | ||
371 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) | ||
372 | goto badframe; | ||
373 | |||
374 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
375 | spin_lock_irq(¤t->sighand->siglock); | ||
376 | current->blocked = set; | ||
377 | recalc_sigpending(); | ||
378 | spin_unlock_irq(¤t->sighand->siglock); | ||
379 | |||
380 | if (rt_restore_ucontext(regs, sw, &frame->uc, &d0)) | ||
381 | goto badframe; | ||
382 | return d0; | ||
383 | |||
384 | badframe: | ||
385 | force_sig(SIGSEGV, current); | ||
386 | return 0; | ||
387 | } | ||
388 | |||
389 | #ifdef CONFIG_FPU | ||
390 | /* | ||
391 | * Set up a signal frame. | ||
392 | */ | ||
393 | |||
394 | static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs) | ||
395 | { | ||
396 | if (FPU_IS_EMU) { | ||
397 | /* save registers */ | ||
398 | memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12); | ||
399 | memcpy(sc->sc_fpregs, current->thread.fp, 24); | ||
400 | return; | ||
401 | } | ||
402 | |||
403 | __asm__ volatile (".chip 68k/68881\n\t" | ||
404 | "fsave %0\n\t" | ||
405 | ".chip 68k" | ||
406 | : : "m" (*sc->sc_fpstate) : "memory"); | ||
407 | |||
408 | if (sc->sc_fpstate[0]) { | ||
409 | fpu_version = sc->sc_fpstate[0]; | ||
410 | __asm__ volatile (".chip 68k/68881\n\t" | ||
411 | "fmovemx %%fp0-%%fp1,%0\n\t" | ||
412 | "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t" | ||
413 | ".chip 68k" | ||
414 | : "=m" (*sc->sc_fpregs), | ||
415 | "=m" (*sc->sc_fpcntl) | ||
416 | : /* no inputs */ | ||
417 | : "memory"); | ||
418 | } | ||
419 | } | ||
420 | |||
421 | static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs) | ||
422 | { | ||
423 | unsigned char fpstate[FPCONTEXT_SIZE]; | ||
424 | int context_size = 0; | ||
425 | int err = 0; | ||
426 | |||
427 | if (FPU_IS_EMU) { | ||
428 | /* save fpu control register */ | ||
429 | err |= copy_to_user(uc->uc_mcontext.fpregs.f_pcntl, | ||
430 | current->thread.fpcntl, 12); | ||
431 | /* save all other fpu register */ | ||
432 | err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs, | ||
433 | current->thread.fp, 96); | ||
434 | return err; | ||
435 | } | ||
436 | |||
437 | __asm__ volatile (".chip 68k/68881\n\t" | ||
438 | "fsave %0\n\t" | ||
439 | ".chip 68k" | ||
440 | : : "m" (*fpstate) : "memory"); | ||
441 | |||
442 | err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate); | ||
443 | if (fpstate[0]) { | ||
444 | fpregset_t fpregs; | ||
445 | context_size = fpstate[1]; | ||
446 | fpu_version = fpstate[0]; | ||
447 | __asm__ volatile (".chip 68k/68881\n\t" | ||
448 | "fmovemx %%fp0-%%fp7,%0\n\t" | ||
449 | "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t" | ||
450 | ".chip 68k" | ||
451 | : "=m" (*fpregs.f_fpregs), | ||
452 | "=m" (*fpregs.f_fpcntl) | ||
453 | : /* no inputs */ | ||
454 | : "memory"); | ||
455 | err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs, | ||
456 | sizeof(fpregs)); | ||
457 | } | ||
458 | if (context_size) | ||
459 | err |= copy_to_user((long __user *)&uc->uc_fpstate + 1, fpstate + 4, | ||
460 | context_size); | ||
461 | return err; | ||
462 | } | ||
463 | |||
464 | #endif | ||
465 | |||
466 | static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs, | ||
467 | unsigned long mask) | ||
468 | { | ||
469 | sc->sc_mask = mask; | ||
470 | sc->sc_usp = rdusp(); | ||
471 | sc->sc_d0 = regs->d0; | ||
472 | sc->sc_d1 = regs->d1; | ||
473 | sc->sc_a0 = regs->a0; | ||
474 | sc->sc_a1 = regs->a1; | ||
475 | sc->sc_a5 = ((struct switch_stack *)regs - 1)->a5; | ||
476 | sc->sc_sr = regs->sr; | ||
477 | sc->sc_pc = regs->pc; | ||
478 | sc->sc_formatvec = regs->format << 12 | regs->vector; | ||
479 | #ifdef CONFIG_FPU | ||
480 | save_fpu_state(sc, regs); | ||
481 | #endif | ||
482 | } | ||
483 | |||
484 | static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs) | ||
485 | { | ||
486 | struct switch_stack *sw = (struct switch_stack *)regs - 1; | ||
487 | greg_t __user *gregs = uc->uc_mcontext.gregs; | ||
488 | int err = 0; | ||
489 | |||
490 | err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version); | ||
491 | err |= __put_user(regs->d0, &gregs[0]); | ||
492 | err |= __put_user(regs->d1, &gregs[1]); | ||
493 | err |= __put_user(regs->d2, &gregs[2]); | ||
494 | err |= __put_user(regs->d3, &gregs[3]); | ||
495 | err |= __put_user(regs->d4, &gregs[4]); | ||
496 | err |= __put_user(regs->d5, &gregs[5]); | ||
497 | err |= __put_user(sw->d6, &gregs[6]); | ||
498 | err |= __put_user(sw->d7, &gregs[7]); | ||
499 | err |= __put_user(regs->a0, &gregs[8]); | ||
500 | err |= __put_user(regs->a1, &gregs[9]); | ||
501 | err |= __put_user(regs->a2, &gregs[10]); | ||
502 | err |= __put_user(sw->a3, &gregs[11]); | ||
503 | err |= __put_user(sw->a4, &gregs[12]); | ||
504 | err |= __put_user(sw->a5, &gregs[13]); | ||
505 | err |= __put_user(sw->a6, &gregs[14]); | ||
506 | err |= __put_user(rdusp(), &gregs[15]); | ||
507 | err |= __put_user(regs->pc, &gregs[16]); | ||
508 | err |= __put_user(regs->sr, &gregs[17]); | ||
509 | #ifdef CONFIG_FPU | ||
510 | err |= rt_save_fpu_state(uc, regs); | ||
511 | #endif | ||
512 | return err; | ||
513 | } | ||
514 | |||
515 | static inline void __user * | ||
516 | get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) | ||
517 | { | ||
518 | unsigned long usp; | ||
519 | |||
520 | /* Default to using normal stack. */ | ||
521 | usp = rdusp(); | ||
522 | |||
523 | /* This is the X/Open sanctioned signal stack switching. */ | ||
524 | if (ka->sa.sa_flags & SA_ONSTACK) { | ||
525 | if (!sas_ss_flags(usp)) | ||
526 | usp = current->sas_ss_sp + current->sas_ss_size; | ||
527 | } | ||
528 | return (void __user *)((usp - frame_size) & -8UL); | ||
529 | } | ||
530 | |||
531 | static int setup_frame (int sig, struct k_sigaction *ka, | ||
532 | sigset_t *set, struct pt_regs *regs) | ||
533 | { | ||
534 | struct sigframe __user *frame; | ||
535 | struct sigcontext context; | ||
536 | int err = 0; | ||
537 | |||
538 | frame = get_sigframe(ka, regs, sizeof(*frame)); | ||
539 | |||
540 | err |= __put_user((current_thread_info()->exec_domain | ||
541 | && current_thread_info()->exec_domain->signal_invmap | ||
542 | && sig < 32 | ||
543 | ? current_thread_info()->exec_domain->signal_invmap[sig] | ||
544 | : sig), | ||
545 | &frame->sig); | ||
546 | |||
547 | err |= __put_user(regs->vector, &frame->code); | ||
548 | err |= __put_user(&frame->sc, &frame->psc); | ||
549 | |||
550 | if (_NSIG_WORDS > 1) | ||
551 | err |= copy_to_user(frame->extramask, &set->sig[1], | ||
552 | sizeof(frame->extramask)); | ||
553 | |||
554 | setup_sigcontext(&context, regs, set->sig[0]); | ||
555 | err |= copy_to_user (&frame->sc, &context, sizeof(context)); | ||
556 | |||
557 | /* Set up to return from userspace. */ | ||
558 | err |= __put_user((void *) ret_from_user_signal, &frame->pretcode); | ||
559 | |||
560 | if (err) | ||
561 | goto give_sigsegv; | ||
562 | |||
563 | /* Set up registers for signal handler */ | ||
564 | wrusp ((unsigned long) frame); | ||
565 | regs->pc = (unsigned long) ka->sa.sa_handler; | ||
566 | ((struct switch_stack *)regs - 1)->a5 = current->mm->start_data; | ||
567 | regs->format = 0x4; /*set format byte to make stack appear modulo 4 | ||
568 | which it will be when doing the rte */ | ||
569 | |||
570 | adjust_stack: | ||
571 | /* Prepare to skip over the extra stuff in the exception frame. */ | ||
572 | if (regs->stkadj) { | ||
573 | struct pt_regs *tregs = | ||
574 | (struct pt_regs *)((ulong)regs + regs->stkadj); | ||
575 | #if defined(DEBUG) | ||
576 | printk(KERN_DEBUG "Performing stackadjust=%04x\n", regs->stkadj); | ||
577 | #endif | ||
578 | /* This must be copied with decreasing addresses to | ||
579 | handle overlaps. */ | ||
580 | tregs->vector = 0; | ||
581 | tregs->format = 0; | ||
582 | tregs->pc = regs->pc; | ||
583 | tregs->sr = regs->sr; | ||
584 | } | ||
585 | return err; | ||
586 | |||
587 | give_sigsegv: | ||
588 | force_sigsegv(sig, current); | ||
589 | goto adjust_stack; | ||
590 | } | ||
591 | |||
592 | static int setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info, | ||
593 | sigset_t *set, struct pt_regs *regs) | ||
594 | { | ||
595 | struct rt_sigframe __user *frame; | ||
596 | int err = 0; | ||
597 | |||
598 | frame = get_sigframe(ka, regs, sizeof(*frame)); | ||
599 | |||
600 | err |= __put_user((current_thread_info()->exec_domain | ||
601 | && current_thread_info()->exec_domain->signal_invmap | ||
602 | && sig < 32 | ||
603 | ? current_thread_info()->exec_domain->signal_invmap[sig] | ||
604 | : sig), | ||
605 | &frame->sig); | ||
606 | err |= __put_user(&frame->info, &frame->pinfo); | ||
607 | err |= __put_user(&frame->uc, &frame->puc); | ||
608 | err |= copy_siginfo_to_user(&frame->info, info); | ||
609 | |||
610 | /* Create the ucontext. */ | ||
611 | err |= __put_user(0, &frame->uc.uc_flags); | ||
612 | err |= __put_user(NULL, &frame->uc.uc_link); | ||
613 | err |= __put_user((void __user *)current->sas_ss_sp, | ||
614 | &frame->uc.uc_stack.ss_sp); | ||
615 | err |= __put_user(sas_ss_flags(rdusp()), | ||
616 | &frame->uc.uc_stack.ss_flags); | ||
617 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | ||
618 | err |= rt_setup_ucontext(&frame->uc, regs); | ||
619 | err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set)); | ||
620 | |||
621 | /* Set up to return from userspace. */ | ||
622 | err |= __put_user((void *) ret_from_user_rt_signal, &frame->pretcode); | ||
623 | |||
624 | if (err) | ||
625 | goto give_sigsegv; | ||
626 | |||
627 | /* Set up registers for signal handler */ | ||
628 | wrusp ((unsigned long) frame); | ||
629 | regs->pc = (unsigned long) ka->sa.sa_handler; | ||
630 | ((struct switch_stack *)regs - 1)->a5 = current->mm->start_data; | ||
631 | regs->format = 0x4; /*set format byte to make stack appear modulo 4 | ||
632 | which it will be when doing the rte */ | ||
633 | |||
634 | adjust_stack: | ||
635 | /* Prepare to skip over the extra stuff in the exception frame. */ | ||
636 | if (regs->stkadj) { | ||
637 | struct pt_regs *tregs = | ||
638 | (struct pt_regs *)((ulong)regs + regs->stkadj); | ||
639 | #if defined(DEBUG) | ||
640 | printk(KERN_DEBUG "Performing stackadjust=%04x\n", regs->stkadj); | ||
641 | #endif | ||
642 | /* This must be copied with decreasing addresses to | ||
643 | handle overlaps. */ | ||
644 | tregs->vector = 0; | ||
645 | tregs->format = 0; | ||
646 | tregs->pc = regs->pc; | ||
647 | tregs->sr = regs->sr; | ||
648 | } | ||
649 | return err; | ||
650 | |||
651 | give_sigsegv: | ||
652 | force_sigsegv(sig, current); | ||
653 | goto adjust_stack; | ||
654 | } | ||
655 | |||
656 | static inline void | ||
657 | handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler) | ||
658 | { | ||
659 | switch (regs->d0) { | ||
660 | case -ERESTARTNOHAND: | ||
661 | if (!has_handler) | ||
662 | goto do_restart; | ||
663 | regs->d0 = -EINTR; | ||
664 | break; | ||
665 | |||
666 | case -ERESTART_RESTARTBLOCK: | ||
667 | if (!has_handler) { | ||
668 | regs->d0 = __NR_restart_syscall; | ||
669 | regs->pc -= 2; | ||
670 | break; | ||
671 | } | ||
672 | regs->d0 = -EINTR; | ||
673 | break; | ||
674 | |||
675 | case -ERESTARTSYS: | ||
676 | if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) { | ||
677 | regs->d0 = -EINTR; | ||
678 | break; | ||
679 | } | ||
680 | /* fallthrough */ | ||
681 | case -ERESTARTNOINTR: | ||
682 | do_restart: | ||
683 | regs->d0 = regs->orig_d0; | ||
684 | regs->pc -= 2; | ||
685 | break; | ||
686 | } | ||
687 | } | ||
688 | |||
689 | /* | ||
690 | * OK, we're invoking a handler | ||
691 | */ | ||
692 | static void | ||
693 | handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info, | ||
694 | sigset_t *oldset, struct pt_regs *regs) | ||
695 | { | ||
696 | int err; | ||
697 | /* are we from a system call? */ | ||
698 | if (regs->orig_d0 >= 0) | ||
699 | /* If so, check system call restarting.. */ | ||
700 | handle_restart(regs, ka, 1); | ||
701 | |||
702 | /* set up the stack frame */ | ||
703 | if (ka->sa.sa_flags & SA_SIGINFO) | ||
704 | err = setup_rt_frame(sig, ka, info, oldset, regs); | ||
705 | else | ||
706 | err = setup_frame(sig, ka, oldset, regs); | ||
707 | |||
708 | if (err) | ||
709 | return; | ||
710 | |||
711 | spin_lock_irq(¤t->sighand->siglock); | ||
712 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); | ||
713 | if (!(ka->sa.sa_flags & SA_NODEFER)) | ||
714 | sigaddset(¤t->blocked,sig); | ||
715 | recalc_sigpending(); | ||
716 | spin_unlock_irq(¤t->sighand->siglock); | ||
717 | |||
718 | clear_thread_flag(TIF_RESTORE_SIGMASK); | ||
719 | } | ||
720 | |||
721 | /* | ||
722 | * Note that 'init' is a special process: it doesn't get signals it doesn't | ||
723 | * want to handle. Thus you cannot kill init even with a SIGKILL even by | ||
724 | * mistake. | ||
725 | */ | ||
726 | asmlinkage void do_signal(struct pt_regs *regs) | ||
727 | { | ||
728 | struct k_sigaction ka; | ||
729 | siginfo_t info; | ||
730 | int signr; | ||
731 | sigset_t *oldset; | ||
732 | |||
733 | /* | ||
734 | * We want the common case to go fast, which | ||
735 | * is why we may in certain cases get here from | ||
736 | * kernel mode. Just return without doing anything | ||
737 | * if so. | ||
738 | */ | ||
739 | if (!user_mode(regs)) | ||
740 | return; | ||
741 | |||
742 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) | ||
743 | oldset = ¤t->saved_sigmask; | ||
744 | else | ||
745 | oldset = ¤t->blocked; | ||
746 | |||
747 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | ||
748 | if (signr > 0) { | ||
749 | /* Whee! Actually deliver the signal. */ | ||
750 | handle_signal(signr, &ka, &info, oldset, regs); | ||
751 | return; | ||
752 | } | ||
753 | |||
754 | /* Did we come from a system call? */ | ||
755 | if (regs->orig_d0 >= 0) { | ||
756 | /* Restart the system call - no handlers present */ | ||
757 | handle_restart(regs, NULL, 0); | ||
758 | } | ||
759 | |||
760 | /* If there's no signal to deliver, we just restore the saved mask. */ | ||
761 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) { | ||
762 | clear_thread_flag(TIF_RESTORE_SIGMASK); | ||
763 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | ||
764 | } | ||
765 | } | ||