diff options
Diffstat (limited to 'arch/ppc')
-rw-r--r-- | arch/ppc/kernel/Makefile | 4 | ||||
-rw-r--r-- | arch/ppc/kernel/signal.c | 771 |
2 files changed, 2 insertions, 773 deletions
diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile index 87d3be4af820..c178397c50af 100644 --- a/arch/ppc/kernel/Makefile +++ b/arch/ppc/kernel/Makefile | |||
@@ -13,7 +13,7 @@ extra-$(CONFIG_POWER4) += idle_power4.o | |||
13 | extra-y += vmlinux.lds | 13 | extra-y += vmlinux.lds |
14 | 14 | ||
15 | obj-y := entry.o traps.o irq.o idle.o time.o misc.o \ | 15 | obj-y := entry.o traps.o irq.o idle.o time.o misc.o \ |
16 | process.o signal.o align.o \ | 16 | process.o align.o \ |
17 | syscalls.o setup.o \ | 17 | syscalls.o setup.o \ |
18 | ppc_htab.o perfmon.o | 18 | ppc_htab.o perfmon.o |
19 | obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o | 19 | obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o |
@@ -38,7 +38,7 @@ endif | |||
38 | 38 | ||
39 | else | 39 | else |
40 | obj-y := irq.o idle.o time.o \ | 40 | obj-y := irq.o idle.o time.o \ |
41 | signal.o align.o perfmon.o | 41 | align.o perfmon.o |
42 | obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o | 42 | obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o |
43 | obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o | 43 | obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o |
44 | obj-$(CONFIG_MODULES) += module.o | 44 | obj-$(CONFIG_MODULES) += module.o |
diff --git a/arch/ppc/kernel/signal.c b/arch/ppc/kernel/signal.c deleted file mode 100644 index 2244bf91e593..000000000000 --- a/arch/ppc/kernel/signal.c +++ /dev/null | |||
@@ -1,771 +0,0 @@ | |||
1 | /* | ||
2 | * arch/ppc/kernel/signal.c | ||
3 | * | ||
4 | * PowerPC version | ||
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
6 | * | ||
7 | * Derived from "arch/i386/kernel/signal.c" | ||
8 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
9 | * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU General Public License | ||
13 | * as published by the Free Software Foundation; either version | ||
14 | * 2 of the License, or (at your option) any later version. | ||
15 | */ | ||
16 | |||
17 | #include <linux/sched.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/smp.h> | ||
20 | #include <linux/smp_lock.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/signal.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/wait.h> | ||
25 | #include <linux/ptrace.h> | ||
26 | #include <linux/unistd.h> | ||
27 | #include <linux/stddef.h> | ||
28 | #include <linux/elf.h> | ||
29 | #include <linux/tty.h> | ||
30 | #include <linux/binfmts.h> | ||
31 | #include <linux/suspend.h> | ||
32 | #include <asm/ucontext.h> | ||
33 | #include <asm/uaccess.h> | ||
34 | #include <asm/pgtable.h> | ||
35 | #include <asm/cacheflush.h> | ||
36 | |||
37 | #undef DEBUG_SIG | ||
38 | |||
39 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | ||
40 | |||
41 | extern void sigreturn_exit(struct pt_regs *); | ||
42 | |||
43 | #define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs)) | ||
44 | |||
45 | int do_signal(sigset_t *oldset, struct pt_regs *regs); | ||
46 | |||
47 | /* | ||
48 | * Atomically swap in the new signal mask, and wait for a signal. | ||
49 | */ | ||
50 | int | ||
51 | sys_sigsuspend(old_sigset_t mask, int p2, int p3, int p4, int p6, int p7, | ||
52 | struct pt_regs *regs) | ||
53 | { | ||
54 | sigset_t saveset; | ||
55 | |||
56 | mask &= _BLOCKABLE; | ||
57 | spin_lock_irq(¤t->sighand->siglock); | ||
58 | saveset = current->blocked; | ||
59 | siginitset(¤t->blocked, mask); | ||
60 | recalc_sigpending(); | ||
61 | spin_unlock_irq(¤t->sighand->siglock); | ||
62 | |||
63 | regs->result = -EINTR; | ||
64 | regs->gpr[3] = EINTR; | ||
65 | regs->ccr |= 0x10000000; | ||
66 | while (1) { | ||
67 | current->state = TASK_INTERRUPTIBLE; | ||
68 | schedule(); | ||
69 | if (do_signal(&saveset, regs)) | ||
70 | sigreturn_exit(regs); | ||
71 | } | ||
72 | } | ||
73 | |||
74 | int | ||
75 | sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, int p3, int p4, | ||
76 | int p6, int p7, struct pt_regs *regs) | ||
77 | { | ||
78 | sigset_t saveset, newset; | ||
79 | |||
80 | /* XXX: Don't preclude handling different sized sigset_t's. */ | ||
81 | if (sigsetsize != sizeof(sigset_t)) | ||
82 | return -EINVAL; | ||
83 | |||
84 | if (copy_from_user(&newset, unewset, sizeof(newset))) | ||
85 | return -EFAULT; | ||
86 | sigdelsetmask(&newset, ~_BLOCKABLE); | ||
87 | |||
88 | spin_lock_irq(¤t->sighand->siglock); | ||
89 | saveset = current->blocked; | ||
90 | current->blocked = newset; | ||
91 | recalc_sigpending(); | ||
92 | spin_unlock_irq(¤t->sighand->siglock); | ||
93 | |||
94 | regs->result = -EINTR; | ||
95 | regs->gpr[3] = EINTR; | ||
96 | regs->ccr |= 0x10000000; | ||
97 | while (1) { | ||
98 | current->state = TASK_INTERRUPTIBLE; | ||
99 | schedule(); | ||
100 | if (do_signal(&saveset, regs)) | ||
101 | sigreturn_exit(regs); | ||
102 | } | ||
103 | } | ||
104 | |||
105 | |||
106 | int | ||
107 | sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, int r5, | ||
108 | int r6, int r7, int r8, struct pt_regs *regs) | ||
109 | { | ||
110 | return do_sigaltstack(uss, uoss, regs->gpr[1]); | ||
111 | } | ||
112 | |||
113 | int | ||
114 | sys_sigaction(int sig, const struct old_sigaction __user *act, | ||
115 | struct old_sigaction __user *oact) | ||
116 | { | ||
117 | struct k_sigaction new_ka, old_ka; | ||
118 | int ret; | ||
119 | |||
120 | if (act) { | ||
121 | old_sigset_t mask; | ||
122 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || | ||
123 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || | ||
124 | __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) | ||
125 | return -EFAULT; | ||
126 | __get_user(new_ka.sa.sa_flags, &act->sa_flags); | ||
127 | __get_user(mask, &act->sa_mask); | ||
128 | siginitset(&new_ka.sa.sa_mask, mask); | ||
129 | } | ||
130 | |||
131 | ret = do_sigaction(sig, (act? &new_ka: NULL), (oact? &old_ka: NULL)); | ||
132 | |||
133 | if (!ret && oact) { | ||
134 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || | ||
135 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || | ||
136 | __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) | ||
137 | return -EFAULT; | ||
138 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags); | ||
139 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); | ||
140 | } | ||
141 | |||
142 | return ret; | ||
143 | } | ||
144 | |||
145 | /* | ||
146 | * When we have signals to deliver, we set up on the | ||
147 | * user stack, going down from the original stack pointer: | ||
148 | * a sigregs struct | ||
149 | * a sigcontext struct | ||
150 | * a gap of __SIGNAL_FRAMESIZE bytes | ||
151 | * | ||
152 | * Each of these things must be a multiple of 16 bytes in size. | ||
153 | * | ||
154 | */ | ||
155 | struct sigregs { | ||
156 | struct mcontext mctx; /* all the register values */ | ||
157 | /* Programs using the rs6000/xcoff abi can save up to 19 gp regs | ||
158 | and 18 fp regs below sp before decrementing it. */ | ||
159 | int abigap[56]; | ||
160 | }; | ||
161 | |||
162 | /* We use the mc_pad field for the signal return trampoline. */ | ||
163 | #define tramp mc_pad | ||
164 | |||
165 | /* | ||
166 | * When we have rt signals to deliver, we set up on the | ||
167 | * user stack, going down from the original stack pointer: | ||
168 | * one rt_sigframe struct (siginfo + ucontext + ABI gap) | ||
169 | * a gap of __SIGNAL_FRAMESIZE+16 bytes | ||
170 | * (the +16 is to get the siginfo and ucontext in the same | ||
171 | * positions as in older kernels). | ||
172 | * | ||
173 | * Each of these things must be a multiple of 16 bytes in size. | ||
174 | * | ||
175 | */ | ||
176 | struct rt_sigframe | ||
177 | { | ||
178 | struct siginfo info; | ||
179 | struct ucontext uc; | ||
180 | /* Programs using the rs6000/xcoff abi can save up to 19 gp regs | ||
181 | and 18 fp regs below sp before decrementing it. */ | ||
182 | int abigap[56]; | ||
183 | }; | ||
184 | |||
185 | /* | ||
186 | * Save the current user registers on the user stack. | ||
187 | * We only save the altivec/spe registers if the process has used | ||
188 | * altivec/spe instructions at some point. | ||
189 | */ | ||
190 | static int | ||
191 | save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, int sigret) | ||
192 | { | ||
193 | /* save general and floating-point registers */ | ||
194 | CHECK_FULL_REGS(regs); | ||
195 | preempt_disable(); | ||
196 | if (regs->msr & MSR_FP) | ||
197 | giveup_fpu(current); | ||
198 | #ifdef CONFIG_ALTIVEC | ||
199 | if (current->thread.used_vr && (regs->msr & MSR_VEC)) | ||
200 | giveup_altivec(current); | ||
201 | #endif /* CONFIG_ALTIVEC */ | ||
202 | #ifdef CONFIG_SPE | ||
203 | if (current->thread.used_spe && (regs->msr & MSR_SPE)) | ||
204 | giveup_spe(current); | ||
205 | #endif /* CONFIG_ALTIVEC */ | ||
206 | preempt_enable(); | ||
207 | |||
208 | if (__copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE) | ||
209 | || __copy_to_user(&frame->mc_fregs, current->thread.fpr, | ||
210 | ELF_NFPREG * sizeof(double))) | ||
211 | return 1; | ||
212 | |||
213 | current->thread.fpscr = 0; /* turn off all fp exceptions */ | ||
214 | |||
215 | #ifdef CONFIG_ALTIVEC | ||
216 | /* save altivec registers */ | ||
217 | if (current->thread.used_vr) { | ||
218 | if (__copy_to_user(&frame->mc_vregs, current->thread.vr, | ||
219 | ELF_NVRREG * sizeof(vector128))) | ||
220 | return 1; | ||
221 | /* set MSR_VEC in the saved MSR value to indicate that | ||
222 | frame->mc_vregs contains valid data */ | ||
223 | if (__put_user(regs->msr | MSR_VEC, &frame->mc_gregs[PT_MSR])) | ||
224 | return 1; | ||
225 | } | ||
226 | /* else assert((regs->msr & MSR_VEC) == 0) */ | ||
227 | |||
228 | /* We always copy to/from vrsave, it's 0 if we don't have or don't | ||
229 | * use altivec. Since VSCR only contains 32 bits saved in the least | ||
230 | * significant bits of a vector, we "cheat" and stuff VRSAVE in the | ||
231 | * most significant bits of that same vector. --BenH | ||
232 | */ | ||
233 | if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32])) | ||
234 | return 1; | ||
235 | #endif /* CONFIG_ALTIVEC */ | ||
236 | |||
237 | #ifdef CONFIG_SPE | ||
238 | /* save spe registers */ | ||
239 | if (current->thread.used_spe) { | ||
240 | if (__copy_to_user(&frame->mc_vregs, current->thread.evr, | ||
241 | ELF_NEVRREG * sizeof(u32))) | ||
242 | return 1; | ||
243 | /* set MSR_SPE in the saved MSR value to indicate that | ||
244 | frame->mc_vregs contains valid data */ | ||
245 | if (__put_user(regs->msr | MSR_SPE, &frame->mc_gregs[PT_MSR])) | ||
246 | return 1; | ||
247 | } | ||
248 | /* else assert((regs->msr & MSR_SPE) == 0) */ | ||
249 | |||
250 | /* We always copy to/from spefscr */ | ||
251 | if (__put_user(current->thread.spefscr, (u32 *)&frame->mc_vregs + ELF_NEVRREG)) | ||
252 | return 1; | ||
253 | #endif /* CONFIG_SPE */ | ||
254 | |||
255 | if (sigret) { | ||
256 | /* Set up the sigreturn trampoline: li r0,sigret; sc */ | ||
257 | if (__put_user(0x38000000UL + sigret, &frame->tramp[0]) | ||
258 | || __put_user(0x44000002UL, &frame->tramp[1])) | ||
259 | return 1; | ||
260 | flush_icache_range((unsigned long) &frame->tramp[0], | ||
261 | (unsigned long) &frame->tramp[2]); | ||
262 | } | ||
263 | |||
264 | return 0; | ||
265 | } | ||
266 | |||
267 | /* | ||
268 | * Restore the current user register values from the user stack, | ||
269 | * (except for MSR). | ||
270 | */ | ||
271 | static int | ||
272 | restore_user_regs(struct pt_regs *regs, struct mcontext __user *sr, int sig) | ||
273 | { | ||
274 | unsigned long save_r2 = 0; | ||
275 | #if defined(CONFIG_ALTIVEC) || defined(CONFIG_SPE) | ||
276 | unsigned long msr; | ||
277 | #endif | ||
278 | |||
279 | /* backup/restore the TLS as we don't want it to be modified */ | ||
280 | if (!sig) | ||
281 | save_r2 = regs->gpr[2]; | ||
282 | /* copy up to but not including MSR */ | ||
283 | if (__copy_from_user(regs, &sr->mc_gregs, PT_MSR * sizeof(elf_greg_t))) | ||
284 | return 1; | ||
285 | /* copy from orig_r3 (the word after the MSR) up to the end */ | ||
286 | if (__copy_from_user(®s->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3], | ||
287 | GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t))) | ||
288 | return 1; | ||
289 | if (!sig) | ||
290 | regs->gpr[2] = save_r2; | ||
291 | |||
292 | /* force the process to reload the FP registers from | ||
293 | current->thread when it next does FP instructions */ | ||
294 | regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1); | ||
295 | if (__copy_from_user(current->thread.fpr, &sr->mc_fregs, | ||
296 | sizeof(sr->mc_fregs))) | ||
297 | return 1; | ||
298 | |||
299 | #ifdef CONFIG_ALTIVEC | ||
300 | /* force the process to reload the altivec registers from | ||
301 | current->thread when it next does altivec instructions */ | ||
302 | regs->msr &= ~MSR_VEC; | ||
303 | if (!__get_user(msr, &sr->mc_gregs[PT_MSR]) && (msr & MSR_VEC) != 0) { | ||
304 | /* restore altivec registers from the stack */ | ||
305 | if (__copy_from_user(current->thread.vr, &sr->mc_vregs, | ||
306 | sizeof(sr->mc_vregs))) | ||
307 | return 1; | ||
308 | } else if (current->thread.used_vr) | ||
309 | memset(¤t->thread.vr, 0, ELF_NVRREG * sizeof(vector128)); | ||
310 | |||
311 | /* Always get VRSAVE back */ | ||
312 | if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32])) | ||
313 | return 1; | ||
314 | #endif /* CONFIG_ALTIVEC */ | ||
315 | |||
316 | #ifdef CONFIG_SPE | ||
317 | /* force the process to reload the spe registers from | ||
318 | current->thread when it next does spe instructions */ | ||
319 | regs->msr &= ~MSR_SPE; | ||
320 | if (!__get_user(msr, &sr->mc_gregs[PT_MSR]) && (msr & MSR_SPE) != 0) { | ||
321 | /* restore spe registers from the stack */ | ||
322 | if (__copy_from_user(current->thread.evr, &sr->mc_vregs, | ||
323 | ELF_NEVRREG * sizeof(u32))) | ||
324 | return 1; | ||
325 | } else if (current->thread.used_spe) | ||
326 | memset(¤t->thread.evr, 0, ELF_NEVRREG * sizeof(u32)); | ||
327 | |||
328 | /* Always get SPEFSCR back */ | ||
329 | if (__get_user(current->thread.spefscr, (u32 *)&sr->mc_vregs + ELF_NEVRREG)) | ||
330 | return 1; | ||
331 | #endif /* CONFIG_SPE */ | ||
332 | |||
333 | #ifndef CONFIG_SMP | ||
334 | preempt_disable(); | ||
335 | if (last_task_used_math == current) | ||
336 | last_task_used_math = NULL; | ||
337 | if (last_task_used_altivec == current) | ||
338 | last_task_used_altivec = NULL; | ||
339 | if (last_task_used_spe == current) | ||
340 | last_task_used_spe = NULL; | ||
341 | preempt_enable(); | ||
342 | #endif | ||
343 | return 0; | ||
344 | } | ||
345 | |||
346 | /* | ||
347 | * Restore the user process's signal mask | ||
348 | */ | ||
349 | static void | ||
350 | restore_sigmask(sigset_t *set) | ||
351 | { | ||
352 | sigdelsetmask(set, ~_BLOCKABLE); | ||
353 | spin_lock_irq(¤t->sighand->siglock); | ||
354 | current->blocked = *set; | ||
355 | recalc_sigpending(); | ||
356 | spin_unlock_irq(¤t->sighand->siglock); | ||
357 | } | ||
358 | |||
359 | /* | ||
360 | * Set up a signal frame for a "real-time" signal handler | ||
361 | * (one which gets siginfo). | ||
362 | */ | ||
363 | static void | ||
364 | handle_rt_signal(unsigned long sig, struct k_sigaction *ka, | ||
365 | siginfo_t *info, sigset_t *oldset, struct pt_regs * regs, | ||
366 | unsigned long newsp) | ||
367 | { | ||
368 | struct rt_sigframe __user *rt_sf; | ||
369 | struct mcontext __user *frame; | ||
370 | unsigned long origsp = newsp; | ||
371 | |||
372 | /* Set up Signal Frame */ | ||
373 | /* Put a Real Time Context onto stack */ | ||
374 | newsp -= sizeof(*rt_sf); | ||
375 | rt_sf = (struct rt_sigframe __user *) newsp; | ||
376 | |||
377 | /* create a stack frame for the caller of the handler */ | ||
378 | newsp -= __SIGNAL_FRAMESIZE + 16; | ||
379 | |||
380 | if (!access_ok(VERIFY_WRITE, (void __user *) newsp, origsp - newsp)) | ||
381 | goto badframe; | ||
382 | |||
383 | /* Put the siginfo & fill in most of the ucontext */ | ||
384 | if (copy_siginfo_to_user(&rt_sf->info, info) | ||
385 | || __put_user(0, &rt_sf->uc.uc_flags) | ||
386 | || __put_user(0, &rt_sf->uc.uc_link) | ||
387 | || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp) | ||
388 | || __put_user(sas_ss_flags(regs->gpr[1]), | ||
389 | &rt_sf->uc.uc_stack.ss_flags) | ||
390 | || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size) | ||
391 | || __put_user(&rt_sf->uc.uc_mcontext, &rt_sf->uc.uc_regs) | ||
392 | || __copy_to_user(&rt_sf->uc.uc_sigmask, oldset, sizeof(*oldset))) | ||
393 | goto badframe; | ||
394 | |||
395 | /* Save user registers on the stack */ | ||
396 | frame = &rt_sf->uc.uc_mcontext; | ||
397 | if (save_user_regs(regs, frame, __NR_rt_sigreturn)) | ||
398 | goto badframe; | ||
399 | |||
400 | if (put_user(regs->gpr[1], (unsigned long __user *)newsp)) | ||
401 | goto badframe; | ||
402 | regs->gpr[1] = newsp; | ||
403 | regs->gpr[3] = sig; | ||
404 | regs->gpr[4] = (unsigned long) &rt_sf->info; | ||
405 | regs->gpr[5] = (unsigned long) &rt_sf->uc; | ||
406 | regs->gpr[6] = (unsigned long) rt_sf; | ||
407 | regs->nip = (unsigned long) ka->sa.sa_handler; | ||
408 | regs->link = (unsigned long) frame->tramp; | ||
409 | regs->trap = 0; | ||
410 | |||
411 | return; | ||
412 | |||
413 | badframe: | ||
414 | #ifdef DEBUG_SIG | ||
415 | printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n", | ||
416 | regs, frame, newsp); | ||
417 | #endif | ||
418 | force_sigsegv(sig, current); | ||
419 | } | ||
420 | |||
421 | static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig) | ||
422 | { | ||
423 | sigset_t set; | ||
424 | struct mcontext __user *mcp; | ||
425 | |||
426 | if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(set)) | ||
427 | || __get_user(mcp, &ucp->uc_regs)) | ||
428 | return -EFAULT; | ||
429 | restore_sigmask(&set); | ||
430 | if (restore_user_regs(regs, mcp, sig)) | ||
431 | return -EFAULT; | ||
432 | |||
433 | return 0; | ||
434 | } | ||
435 | |||
436 | int sys_swapcontext(struct ucontext __user *old_ctx, | ||
437 | struct ucontext __user *new_ctx, | ||
438 | int ctx_size, int r6, int r7, int r8, struct pt_regs *regs) | ||
439 | { | ||
440 | unsigned char tmp; | ||
441 | |||
442 | /* Context size is for future use. Right now, we only make sure | ||
443 | * we are passed something we understand | ||
444 | */ | ||
445 | if (ctx_size < sizeof(struct ucontext)) | ||
446 | return -EINVAL; | ||
447 | |||
448 | if (old_ctx != NULL) { | ||
449 | if (!access_ok(VERIFY_WRITE, old_ctx, sizeof(*old_ctx)) | ||
450 | || save_user_regs(regs, &old_ctx->uc_mcontext, 0) | ||
451 | || __copy_to_user(&old_ctx->uc_sigmask, | ||
452 | ¤t->blocked, sizeof(sigset_t)) | ||
453 | || __put_user(&old_ctx->uc_mcontext, &old_ctx->uc_regs)) | ||
454 | return -EFAULT; | ||
455 | } | ||
456 | if (new_ctx == NULL) | ||
457 | return 0; | ||
458 | if (!access_ok(VERIFY_READ, new_ctx, sizeof(*new_ctx)) | ||
459 | || __get_user(tmp, (u8 __user *) new_ctx) | ||
460 | || __get_user(tmp, (u8 __user *) (new_ctx + 1) - 1)) | ||
461 | return -EFAULT; | ||
462 | |||
463 | /* | ||
464 | * If we get a fault copying the context into the kernel's | ||
465 | * image of the user's registers, we can't just return -EFAULT | ||
466 | * because the user's registers will be corrupted. For instance | ||
467 | * the NIP value may have been updated but not some of the | ||
468 | * other registers. Given that we have done the access_ok | ||
469 | * and successfully read the first and last bytes of the region | ||
470 | * above, this should only happen in an out-of-memory situation | ||
471 | * or if another thread unmaps the region containing the context. | ||
472 | * We kill the task with a SIGSEGV in this situation. | ||
473 | */ | ||
474 | if (do_setcontext(new_ctx, regs, 0)) | ||
475 | do_exit(SIGSEGV); | ||
476 | sigreturn_exit(regs); | ||
477 | /* doesn't actually return back to here */ | ||
478 | return 0; | ||
479 | } | ||
480 | |||
481 | int sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, | ||
482 | struct pt_regs *regs) | ||
483 | { | ||
484 | struct rt_sigframe __user *rt_sf; | ||
485 | |||
486 | /* Always make any pending restarted system calls return -EINTR */ | ||
487 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
488 | |||
489 | rt_sf = (struct rt_sigframe __user *) | ||
490 | (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16); | ||
491 | if (!access_ok(VERIFY_READ, rt_sf, sizeof(struct rt_sigframe))) | ||
492 | goto bad; | ||
493 | if (do_setcontext(&rt_sf->uc, regs, 1)) | ||
494 | goto bad; | ||
495 | |||
496 | /* | ||
497 | * It's not clear whether or why it is desirable to save the | ||
498 | * sigaltstack setting on signal delivery and restore it on | ||
499 | * signal return. But other architectures do this and we have | ||
500 | * always done it up until now so it is probably better not to | ||
501 | * change it. -- paulus | ||
502 | */ | ||
503 | do_sigaltstack(&rt_sf->uc.uc_stack, NULL, regs->gpr[1]); | ||
504 | |||
505 | sigreturn_exit(regs); /* doesn't return here */ | ||
506 | return 0; | ||
507 | |||
508 | bad: | ||
509 | force_sig(SIGSEGV, current); | ||
510 | return 0; | ||
511 | } | ||
512 | |||
513 | int sys_debug_setcontext(struct ucontext __user *ctx, | ||
514 | int ndbg, struct sig_dbg_op __user *dbg, | ||
515 | int r6, int r7, int r8, | ||
516 | struct pt_regs *regs) | ||
517 | { | ||
518 | struct sig_dbg_op op; | ||
519 | int i; | ||
520 | unsigned long new_msr = regs->msr; | ||
521 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | ||
522 | unsigned long new_dbcr0 = current->thread.dbcr0; | ||
523 | #endif | ||
524 | |||
525 | for (i=0; i<ndbg; i++) { | ||
526 | if (__copy_from_user(&op, dbg, sizeof(op))) | ||
527 | return -EFAULT; | ||
528 | switch (op.dbg_type) { | ||
529 | case SIG_DBG_SINGLE_STEPPING: | ||
530 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | ||
531 | if (op.dbg_value) { | ||
532 | new_msr |= MSR_DE; | ||
533 | new_dbcr0 |= (DBCR0_IDM | DBCR0_IC); | ||
534 | } else { | ||
535 | new_msr &= ~MSR_DE; | ||
536 | new_dbcr0 &= ~(DBCR0_IDM | DBCR0_IC); | ||
537 | } | ||
538 | #else | ||
539 | if (op.dbg_value) | ||
540 | new_msr |= MSR_SE; | ||
541 | else | ||
542 | new_msr &= ~MSR_SE; | ||
543 | #endif | ||
544 | break; | ||
545 | case SIG_DBG_BRANCH_TRACING: | ||
546 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | ||
547 | return -EINVAL; | ||
548 | #else | ||
549 | if (op.dbg_value) | ||
550 | new_msr |= MSR_BE; | ||
551 | else | ||
552 | new_msr &= ~MSR_BE; | ||
553 | #endif | ||
554 | break; | ||
555 | |||
556 | default: | ||
557 | return -EINVAL; | ||
558 | } | ||
559 | } | ||
560 | |||
561 | /* We wait until here to actually install the values in the | ||
562 | registers so if we fail in the above loop, it will not | ||
563 | affect the contents of these registers. After this point, | ||
564 | failure is a problem, anyway, and it's very unlikely unless | ||
565 | the user is really doing something wrong. */ | ||
566 | regs->msr = new_msr; | ||
567 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | ||
568 | current->thread.dbcr0 = new_dbcr0; | ||
569 | #endif | ||
570 | |||
571 | /* | ||
572 | * If we get a fault copying the context into the kernel's | ||
573 | * image of the user's registers, we can't just return -EFAULT | ||
574 | * because the user's registers will be corrupted. For instance | ||
575 | * the NIP value may have been updated but not some of the | ||
576 | * other registers. Given that we have done the access_ok | ||
577 | * and successfully read the first and last bytes of the region | ||
578 | * above, this should only happen in an out-of-memory situation | ||
579 | * or if another thread unmaps the region containing the context. | ||
580 | * We kill the task with a SIGSEGV in this situation. | ||
581 | */ | ||
582 | if (do_setcontext(ctx, regs, 1)) { | ||
583 | force_sig(SIGSEGV, current); | ||
584 | goto out; | ||
585 | } | ||
586 | |||
587 | /* | ||
588 | * It's not clear whether or why it is desirable to save the | ||
589 | * sigaltstack setting on signal delivery and restore it on | ||
590 | * signal return. But other architectures do this and we have | ||
591 | * always done it up until now so it is probably better not to | ||
592 | * change it. -- paulus | ||
593 | */ | ||
594 | do_sigaltstack(&ctx->uc_stack, NULL, regs->gpr[1]); | ||
595 | |||
596 | sigreturn_exit(regs); | ||
597 | /* doesn't actually return back to here */ | ||
598 | |||
599 | out: | ||
600 | return 0; | ||
601 | } | ||
602 | |||
603 | /* | ||
604 | * OK, we're invoking a handler | ||
605 | */ | ||
606 | static void | ||
607 | handle_signal(unsigned long sig, struct k_sigaction *ka, | ||
608 | siginfo_t *info, sigset_t *oldset, struct pt_regs * regs, | ||
609 | unsigned long newsp) | ||
610 | { | ||
611 | struct sigcontext __user *sc; | ||
612 | struct sigregs __user *frame; | ||
613 | unsigned long origsp = newsp; | ||
614 | |||
615 | /* Set up Signal Frame */ | ||
616 | newsp -= sizeof(struct sigregs); | ||
617 | frame = (struct sigregs __user *) newsp; | ||
618 | |||
619 | /* Put a sigcontext on the stack */ | ||
620 | newsp -= sizeof(*sc); | ||
621 | sc = (struct sigcontext __user *) newsp; | ||
622 | |||
623 | /* create a stack frame for the caller of the handler */ | ||
624 | newsp -= __SIGNAL_FRAMESIZE; | ||
625 | |||
626 | if (!access_ok(VERIFY_WRITE, (void __user *) newsp, origsp - newsp)) | ||
627 | goto badframe; | ||
628 | |||
629 | #if _NSIG != 64 | ||
630 | #error "Please adjust handle_signal()" | ||
631 | #endif | ||
632 | if (__put_user((unsigned long) ka->sa.sa_handler, &sc->handler) | ||
633 | || __put_user(oldset->sig[0], &sc->oldmask) | ||
634 | || __put_user(oldset->sig[1], &sc->_unused[3]) | ||
635 | || __put_user((struct pt_regs __user *)frame, &sc->regs) | ||
636 | || __put_user(sig, &sc->signal)) | ||
637 | goto badframe; | ||
638 | |||
639 | if (save_user_regs(regs, &frame->mctx, __NR_sigreturn)) | ||
640 | goto badframe; | ||
641 | |||
642 | if (put_user(regs->gpr[1], (unsigned long __user *)newsp)) | ||
643 | goto badframe; | ||
644 | regs->gpr[1] = newsp; | ||
645 | regs->gpr[3] = sig; | ||
646 | regs->gpr[4] = (unsigned long) sc; | ||
647 | regs->nip = (unsigned long) ka->sa.sa_handler; | ||
648 | regs->link = (unsigned long) frame->mctx.tramp; | ||
649 | regs->trap = 0; | ||
650 | |||
651 | return; | ||
652 | |||
653 | badframe: | ||
654 | #ifdef DEBUG_SIG | ||
655 | printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n", | ||
656 | regs, frame, newsp); | ||
657 | #endif | ||
658 | force_sigsegv(sig, current); | ||
659 | } | ||
660 | |||
661 | /* | ||
662 | * Do a signal return; undo the signal stack. | ||
663 | */ | ||
664 | int sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, | ||
665 | struct pt_regs *regs) | ||
666 | { | ||
667 | struct sigcontext __user *sc; | ||
668 | struct sigcontext sigctx; | ||
669 | struct mcontext __user *sr; | ||
670 | sigset_t set; | ||
671 | |||
672 | /* Always make any pending restarted system calls return -EINTR */ | ||
673 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
674 | |||
675 | sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE); | ||
676 | if (copy_from_user(&sigctx, sc, sizeof(sigctx))) | ||
677 | goto badframe; | ||
678 | |||
679 | set.sig[0] = sigctx.oldmask; | ||
680 | set.sig[1] = sigctx._unused[3]; | ||
681 | restore_sigmask(&set); | ||
682 | |||
683 | sr = (struct mcontext __user *) sigctx.regs; | ||
684 | if (!access_ok(VERIFY_READ, sr, sizeof(*sr)) | ||
685 | || restore_user_regs(regs, sr, 1)) | ||
686 | goto badframe; | ||
687 | |||
688 | sigreturn_exit(regs); /* doesn't return */ | ||
689 | return 0; | ||
690 | |||
691 | badframe: | ||
692 | force_sig(SIGSEGV, current); | ||
693 | return 0; | ||
694 | } | ||
695 | |||
696 | /* | ||
697 | * Note that 'init' is a special process: it doesn't get signals it doesn't | ||
698 | * want to handle. Thus you cannot kill init even with a SIGKILL even by | ||
699 | * mistake. | ||
700 | */ | ||
701 | int do_signal(sigset_t *oldset, struct pt_regs *regs) | ||
702 | { | ||
703 | siginfo_t info; | ||
704 | struct k_sigaction ka; | ||
705 | unsigned long frame, newsp; | ||
706 | int signr, ret; | ||
707 | |||
708 | if (try_to_freeze()) { | ||
709 | signr = 0; | ||
710 | if (!signal_pending(current)) | ||
711 | goto no_signal; | ||
712 | } | ||
713 | |||
714 | if (!oldset) | ||
715 | oldset = ¤t->blocked; | ||
716 | |||
717 | newsp = frame = 0; | ||
718 | |||
719 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | ||
720 | no_signal: | ||
721 | if (TRAP(regs) == 0x0C00 /* System Call! */ | ||
722 | && regs->ccr & 0x10000000 /* error signalled */ | ||
723 | && ((ret = regs->gpr[3]) == ERESTARTSYS | ||
724 | || ret == ERESTARTNOHAND || ret == ERESTARTNOINTR | ||
725 | || ret == ERESTART_RESTARTBLOCK)) { | ||
726 | |||
727 | if (signr > 0 | ||
728 | && (ret == ERESTARTNOHAND || ret == ERESTART_RESTARTBLOCK | ||
729 | || (ret == ERESTARTSYS | ||
730 | && !(ka.sa.sa_flags & SA_RESTART)))) { | ||
731 | /* make the system call return an EINTR error */ | ||
732 | regs->result = -EINTR; | ||
733 | regs->gpr[3] = EINTR; | ||
734 | /* note that the cr0.SO bit is already set */ | ||
735 | } else { | ||
736 | regs->nip -= 4; /* Back up & retry system call */ | ||
737 | regs->result = 0; | ||
738 | regs->trap = 0; | ||
739 | if (ret == ERESTART_RESTARTBLOCK) | ||
740 | regs->gpr[0] = __NR_restart_syscall; | ||
741 | else | ||
742 | regs->gpr[3] = regs->orig_gpr3; | ||
743 | } | ||
744 | } | ||
745 | |||
746 | if (signr == 0) | ||
747 | return 0; /* no signals delivered */ | ||
748 | |||
749 | if ((ka.sa.sa_flags & SA_ONSTACK) && current->sas_ss_size | ||
750 | && !on_sig_stack(regs->gpr[1])) | ||
751 | newsp = current->sas_ss_sp + current->sas_ss_size; | ||
752 | else | ||
753 | newsp = regs->gpr[1]; | ||
754 | newsp &= ~0xfUL; | ||
755 | |||
756 | /* Whee! Actually deliver the signal. */ | ||
757 | if (ka.sa.sa_flags & SA_SIGINFO) | ||
758 | handle_rt_signal(signr, &ka, &info, oldset, regs, newsp); | ||
759 | else | ||
760 | handle_signal(signr, &ka, &info, oldset, regs, newsp); | ||
761 | |||
762 | spin_lock_irq(¤t->sighand->siglock); | ||
763 | sigorsets(¤t->blocked,¤t->blocked,&ka.sa.sa_mask); | ||
764 | if (!(ka.sa.sa_flags & SA_NODEFER)) | ||
765 | sigaddset(¤t->blocked, signr); | ||
766 | recalc_sigpending(); | ||
767 | spin_unlock_irq(¤t->sighand->siglock); | ||
768 | |||
769 | return 1; | ||
770 | } | ||
771 | |||