diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/ia64/kernel/signal.c |
Linux-2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/ia64/kernel/signal.c')
-rw-r--r-- | arch/ia64/kernel/signal.c | 691 |
1 files changed, 691 insertions, 0 deletions
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c new file mode 100644 index 000000000000..6891d86937d9 --- /dev/null +++ b/arch/ia64/kernel/signal.c | |||
@@ -0,0 +1,691 @@ | |||
1 | /* | ||
2 | * Architecture-specific signal handling support. | ||
3 | * | ||
4 | * Copyright (C) 1999-2004 Hewlett-Packard Co | ||
5 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
6 | * | ||
7 | * Derived from i386 and Alpha versions. | ||
8 | */ | ||
9 | |||
10 | #include <linux/config.h> | ||
11 | #include <linux/errno.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/ptrace.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/signal.h> | ||
17 | #include <linux/smp.h> | ||
18 | #include <linux/smp_lock.h> | ||
19 | #include <linux/stddef.h> | ||
20 | #include <linux/tty.h> | ||
21 | #include <linux/binfmts.h> | ||
22 | #include <linux/unistd.h> | ||
23 | #include <linux/wait.h> | ||
24 | |||
25 | #include <asm/ia32.h> | ||
26 | #include <asm/intrinsics.h> | ||
27 | #include <asm/uaccess.h> | ||
28 | #include <asm/rse.h> | ||
29 | #include <asm/sigcontext.h> | ||
30 | |||
31 | #include "sigframe.h" | ||
32 | |||
33 | #define DEBUG_SIG 0 | ||
34 | #define STACK_ALIGN 16 /* minimal alignment for stack pointer */ | ||
35 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | ||
36 | |||
37 | #if _NSIG_WORDS > 1 | ||
38 | # define PUT_SIGSET(k,u) __copy_to_user((u)->sig, (k)->sig, sizeof(sigset_t)) | ||
39 | # define GET_SIGSET(k,u) __copy_from_user((k)->sig, (u)->sig, sizeof(sigset_t)) | ||
40 | #else | ||
41 | # define PUT_SIGSET(k,u) __put_user((k)->sig[0], &(u)->sig[0]) | ||
42 | # define GET_SIGSET(k,u) __get_user((k)->sig[0], &(u)->sig[0]) | ||
43 | #endif | ||
44 | |||
45 | long | ||
46 | ia64_rt_sigsuspend (sigset_t __user *uset, size_t sigsetsize, struct sigscratch *scr) | ||
47 | { | ||
48 | sigset_t oldset, set; | ||
49 | |||
50 | /* XXX: Don't preclude handling different sized sigset_t's. */ | ||
51 | if (sigsetsize != sizeof(sigset_t)) | ||
52 | return -EINVAL; | ||
53 | |||
54 | if (!access_ok(VERIFY_READ, uset, sigsetsize)) | ||
55 | return -EFAULT; | ||
56 | |||
57 | if (GET_SIGSET(&set, uset)) | ||
58 | return -EFAULT; | ||
59 | |||
60 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
61 | |||
62 | spin_lock_irq(¤t->sighand->siglock); | ||
63 | { | ||
64 | oldset = current->blocked; | ||
65 | current->blocked = set; | ||
66 | recalc_sigpending(); | ||
67 | } | ||
68 | spin_unlock_irq(¤t->sighand->siglock); | ||
69 | |||
70 | /* | ||
71 | * The return below usually returns to the signal handler. We need to | ||
72 | * pre-set the correct error code here to ensure that the right values | ||
73 | * get saved in sigcontext by ia64_do_signal. | ||
74 | */ | ||
75 | scr->pt.r8 = EINTR; | ||
76 | scr->pt.r10 = -1; | ||
77 | |||
78 | while (1) { | ||
79 | current->state = TASK_INTERRUPTIBLE; | ||
80 | schedule(); | ||
81 | if (ia64_do_signal(&oldset, scr, 1)) | ||
82 | return -EINTR; | ||
83 | } | ||
84 | } | ||
85 | |||
86 | asmlinkage long | ||
87 | sys_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, long arg2, | ||
88 | long arg3, long arg4, long arg5, long arg6, long arg7, | ||
89 | struct pt_regs regs) | ||
90 | { | ||
91 | return do_sigaltstack(uss, uoss, regs.r12); | ||
92 | } | ||
93 | |||
94 | static long | ||
95 | restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr) | ||
96 | { | ||
97 | unsigned long ip, flags, nat, um, cfm; | ||
98 | long err; | ||
99 | |||
100 | /* Always make any pending restarted system calls return -EINTR */ | ||
101 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
102 | |||
103 | /* restore scratch that always needs gets updated during signal delivery: */ | ||
104 | err = __get_user(flags, &sc->sc_flags); | ||
105 | err |= __get_user(nat, &sc->sc_nat); | ||
106 | err |= __get_user(ip, &sc->sc_ip); /* instruction pointer */ | ||
107 | err |= __get_user(cfm, &sc->sc_cfm); | ||
108 | err |= __get_user(um, &sc->sc_um); /* user mask */ | ||
109 | err |= __get_user(scr->pt.ar_rsc, &sc->sc_ar_rsc); | ||
110 | err |= __get_user(scr->pt.ar_unat, &sc->sc_ar_unat); | ||
111 | err |= __get_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr); | ||
112 | err |= __get_user(scr->pt.ar_pfs, &sc->sc_ar_pfs); | ||
113 | err |= __get_user(scr->pt.pr, &sc->sc_pr); /* predicates */ | ||
114 | err |= __get_user(scr->pt.b0, &sc->sc_br[0]); /* b0 (rp) */ | ||
115 | err |= __get_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */ | ||
116 | err |= __copy_from_user(&scr->pt.r1, &sc->sc_gr[1], 8); /* r1 */ | ||
117 | err |= __copy_from_user(&scr->pt.r8, &sc->sc_gr[8], 4*8); /* r8-r11 */ | ||
118 | err |= __copy_from_user(&scr->pt.r12, &sc->sc_gr[12], 2*8); /* r12-r13 */ | ||
119 | err |= __copy_from_user(&scr->pt.r15, &sc->sc_gr[15], 8); /* r15 */ | ||
120 | |||
121 | scr->pt.cr_ifs = cfm | (1UL << 63); | ||
122 | |||
123 | /* establish new instruction pointer: */ | ||
124 | scr->pt.cr_iip = ip & ~0x3UL; | ||
125 | ia64_psr(&scr->pt)->ri = ip & 0x3; | ||
126 | scr->pt.cr_ipsr = (scr->pt.cr_ipsr & ~IA64_PSR_UM) | (um & IA64_PSR_UM); | ||
127 | |||
128 | scr->scratch_unat = ia64_put_scratch_nat_bits(&scr->pt, nat); | ||
129 | |||
130 | if (!(flags & IA64_SC_FLAG_IN_SYSCALL)) { | ||
131 | /* Restore most scratch-state only when not in syscall. */ | ||
132 | err |= __get_user(scr->pt.ar_ccv, &sc->sc_ar_ccv); /* ar.ccv */ | ||
133 | err |= __get_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */ | ||
134 | err |= __get_user(scr->pt.r14, &sc->sc_gr[14]); /* r14 */ | ||
135 | err |= __copy_from_user(&scr->pt.ar_csd, &sc->sc_ar25, 2*8); /* ar.csd & ar.ssd */ | ||
136 | err |= __copy_from_user(&scr->pt.r2, &sc->sc_gr[2], 2*8); /* r2-r3 */ | ||
137 | err |= __copy_from_user(&scr->pt.r16, &sc->sc_gr[16], 16*8); /* r16-r31 */ | ||
138 | } | ||
139 | |||
140 | if ((flags & IA64_SC_FLAG_FPH_VALID) != 0) { | ||
141 | struct ia64_psr *psr = ia64_psr(&scr->pt); | ||
142 | |||
143 | __copy_from_user(current->thread.fph, &sc->sc_fr[32], 96*16); | ||
144 | psr->mfh = 0; /* drop signal handler's fph contents... */ | ||
145 | if (psr->dfh) | ||
146 | ia64_drop_fpu(current); | ||
147 | else { | ||
148 | /* We already own the local fph, otherwise psr->dfh wouldn't be 0. */ | ||
149 | __ia64_load_fpu(current->thread.fph); | ||
150 | ia64_set_local_fpu_owner(current); | ||
151 | } | ||
152 | } | ||
153 | return err; | ||
154 | } | ||
155 | |||
156 | int | ||
157 | copy_siginfo_to_user (siginfo_t __user *to, siginfo_t *from) | ||
158 | { | ||
159 | if (!access_ok(VERIFY_WRITE, to, sizeof(siginfo_t))) | ||
160 | return -EFAULT; | ||
161 | if (from->si_code < 0) { | ||
162 | if (__copy_to_user(to, from, sizeof(siginfo_t))) | ||
163 | return -EFAULT; | ||
164 | return 0; | ||
165 | } else { | ||
166 | int err; | ||
167 | |||
168 | /* | ||
169 | * If you change siginfo_t structure, please be sure this code is fixed | ||
170 | * accordingly. It should never copy any pad contained in the structure | ||
171 | * to avoid security leaks, but must copy the generic 3 ints plus the | ||
172 | * relevant union member. | ||
173 | */ | ||
174 | err = __put_user(from->si_signo, &to->si_signo); | ||
175 | err |= __put_user(from->si_errno, &to->si_errno); | ||
176 | err |= __put_user((short)from->si_code, &to->si_code); | ||
177 | switch (from->si_code >> 16) { | ||
178 | case __SI_FAULT >> 16: | ||
179 | err |= __put_user(from->si_flags, &to->si_flags); | ||
180 | err |= __put_user(from->si_isr, &to->si_isr); | ||
181 | case __SI_POLL >> 16: | ||
182 | err |= __put_user(from->si_addr, &to->si_addr); | ||
183 | err |= __put_user(from->si_imm, &to->si_imm); | ||
184 | break; | ||
185 | case __SI_TIMER >> 16: | ||
186 | err |= __put_user(from->si_tid, &to->si_tid); | ||
187 | err |= __put_user(from->si_overrun, &to->si_overrun); | ||
188 | err |= __put_user(from->si_ptr, &to->si_ptr); | ||
189 | break; | ||
190 | case __SI_RT >> 16: /* Not generated by the kernel as of now. */ | ||
191 | case __SI_MESGQ >> 16: | ||
192 | err |= __put_user(from->si_uid, &to->si_uid); | ||
193 | err |= __put_user(from->si_pid, &to->si_pid); | ||
194 | err |= __put_user(from->si_ptr, &to->si_ptr); | ||
195 | break; | ||
196 | case __SI_CHLD >> 16: | ||
197 | err |= __put_user(from->si_utime, &to->si_utime); | ||
198 | err |= __put_user(from->si_stime, &to->si_stime); | ||
199 | err |= __put_user(from->si_status, &to->si_status); | ||
200 | default: | ||
201 | err |= __put_user(from->si_uid, &to->si_uid); | ||
202 | err |= __put_user(from->si_pid, &to->si_pid); | ||
203 | break; | ||
204 | } | ||
205 | return err; | ||
206 | } | ||
207 | } | ||
208 | |||
209 | long | ||
210 | ia64_rt_sigreturn (struct sigscratch *scr) | ||
211 | { | ||
212 | extern char ia64_strace_leave_kernel, ia64_leave_kernel; | ||
213 | struct sigcontext __user *sc; | ||
214 | struct siginfo si; | ||
215 | sigset_t set; | ||
216 | long retval; | ||
217 | |||
218 | sc = &((struct sigframe __user *) (scr->pt.r12 + 16))->sc; | ||
219 | |||
220 | /* | ||
221 | * When we return to the previously executing context, r8 and r10 have already | ||
222 | * been setup the way we want them. Indeed, if the signal wasn't delivered while | ||
223 | * in a system call, we must not touch r8 or r10 as otherwise user-level state | ||
224 | * could be corrupted. | ||
225 | */ | ||
226 | retval = (long) &ia64_leave_kernel; | ||
227 | if (test_thread_flag(TIF_SYSCALL_TRACE)) | ||
228 | /* | ||
229 | * strace expects to be notified after sigreturn returns even though the | ||
230 | * context to which we return may not be in the middle of a syscall. | ||
231 | * Thus, the return-value that strace displays for sigreturn is | ||
232 | * meaningless. | ||
233 | */ | ||
234 | retval = (long) &ia64_strace_leave_kernel; | ||
235 | |||
236 | if (!access_ok(VERIFY_READ, sc, sizeof(*sc))) | ||
237 | goto give_sigsegv; | ||
238 | |||
239 | if (GET_SIGSET(&set, &sc->sc_mask)) | ||
240 | goto give_sigsegv; | ||
241 | |||
242 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
243 | |||
244 | spin_lock_irq(¤t->sighand->siglock); | ||
245 | { | ||
246 | current->blocked = set; | ||
247 | recalc_sigpending(); | ||
248 | } | ||
249 | spin_unlock_irq(¤t->sighand->siglock); | ||
250 | |||
251 | if (restore_sigcontext(sc, scr)) | ||
252 | goto give_sigsegv; | ||
253 | |||
254 | #if DEBUG_SIG | ||
255 | printk("SIG return (%s:%d): sp=%lx ip=%lx\n", | ||
256 | current->comm, current->pid, scr->pt.r12, scr->pt.cr_iip); | ||
257 | #endif | ||
258 | /* | ||
259 | * It is more difficult to avoid calling this function than to | ||
260 | * call it and ignore errors. | ||
261 | */ | ||
262 | do_sigaltstack(&sc->sc_stack, NULL, scr->pt.r12); | ||
263 | return retval; | ||
264 | |||
265 | give_sigsegv: | ||
266 | si.si_signo = SIGSEGV; | ||
267 | si.si_errno = 0; | ||
268 | si.si_code = SI_KERNEL; | ||
269 | si.si_pid = current->pid; | ||
270 | si.si_uid = current->uid; | ||
271 | si.si_addr = sc; | ||
272 | force_sig_info(SIGSEGV, &si, current); | ||
273 | return retval; | ||
274 | } | ||
275 | |||
276 | /* | ||
277 | * This does just the minimum required setup of sigcontext. | ||
278 | * Specifically, it only installs data that is either not knowable at | ||
279 | * the user-level or that gets modified before execution in the | ||
280 | * trampoline starts. Everything else is done at the user-level. | ||
281 | */ | ||
282 | static long | ||
283 | setup_sigcontext (struct sigcontext __user *sc, sigset_t *mask, struct sigscratch *scr) | ||
284 | { | ||
285 | unsigned long flags = 0, ifs, cfm, nat; | ||
286 | long err; | ||
287 | |||
288 | ifs = scr->pt.cr_ifs; | ||
289 | |||
290 | if (on_sig_stack((unsigned long) sc)) | ||
291 | flags |= IA64_SC_FLAG_ONSTACK; | ||
292 | if ((ifs & (1UL << 63)) == 0) | ||
293 | /* if cr_ifs doesn't have the valid bit set, we got here through a syscall */ | ||
294 | flags |= IA64_SC_FLAG_IN_SYSCALL; | ||
295 | cfm = ifs & ((1UL << 38) - 1); | ||
296 | ia64_flush_fph(current); | ||
297 | if ((current->thread.flags & IA64_THREAD_FPH_VALID)) { | ||
298 | flags |= IA64_SC_FLAG_FPH_VALID; | ||
299 | __copy_to_user(&sc->sc_fr[32], current->thread.fph, 96*16); | ||
300 | } | ||
301 | |||
302 | nat = ia64_get_scratch_nat_bits(&scr->pt, scr->scratch_unat); | ||
303 | |||
304 | err = __put_user(flags, &sc->sc_flags); | ||
305 | err |= __put_user(nat, &sc->sc_nat); | ||
306 | err |= PUT_SIGSET(mask, &sc->sc_mask); | ||
307 | err |= __put_user(cfm, &sc->sc_cfm); | ||
308 | err |= __put_user(scr->pt.cr_ipsr & IA64_PSR_UM, &sc->sc_um); | ||
309 | err |= __put_user(scr->pt.ar_rsc, &sc->sc_ar_rsc); | ||
310 | err |= __put_user(scr->pt.ar_unat, &sc->sc_ar_unat); /* ar.unat */ | ||
311 | err |= __put_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr); /* ar.fpsr */ | ||
312 | err |= __put_user(scr->pt.ar_pfs, &sc->sc_ar_pfs); | ||
313 | err |= __put_user(scr->pt.pr, &sc->sc_pr); /* predicates */ | ||
314 | err |= __put_user(scr->pt.b0, &sc->sc_br[0]); /* b0 (rp) */ | ||
315 | err |= __put_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */ | ||
316 | err |= __copy_to_user(&sc->sc_gr[1], &scr->pt.r1, 8); /* r1 */ | ||
317 | err |= __copy_to_user(&sc->sc_gr[8], &scr->pt.r8, 4*8); /* r8-r11 */ | ||
318 | err |= __copy_to_user(&sc->sc_gr[12], &scr->pt.r12, 2*8); /* r12-r13 */ | ||
319 | err |= __copy_to_user(&sc->sc_gr[15], &scr->pt.r15, 8); /* r15 */ | ||
320 | err |= __put_user(scr->pt.cr_iip + ia64_psr(&scr->pt)->ri, &sc->sc_ip); | ||
321 | |||
322 | if (flags & IA64_SC_FLAG_IN_SYSCALL) { | ||
323 | /* Clear scratch registers if the signal interrupted a system call. */ | ||
324 | err |= __put_user(0, &sc->sc_ar_ccv); /* ar.ccv */ | ||
325 | err |= __put_user(0, &sc->sc_br[7]); /* b7 */ | ||
326 | err |= __put_user(0, &sc->sc_gr[14]); /* r14 */ | ||
327 | err |= __clear_user(&sc->sc_ar25, 2*8); /* ar.csd & ar.ssd */ | ||
328 | err |= __clear_user(&sc->sc_gr[2], 2*8); /* r2-r3 */ | ||
329 | err |= __clear_user(&sc->sc_gr[16], 16*8); /* r16-r31 */ | ||
330 | } else { | ||
331 | /* Copy scratch regs to sigcontext if the signal didn't interrupt a syscall. */ | ||
332 | err |= __put_user(scr->pt.ar_ccv, &sc->sc_ar_ccv); /* ar.ccv */ | ||
333 | err |= __put_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */ | ||
334 | err |= __put_user(scr->pt.r14, &sc->sc_gr[14]); /* r14 */ | ||
335 | err |= __copy_to_user(&sc->sc_ar25, &scr->pt.ar_csd, 2*8); /* ar.csd & ar.ssd */ | ||
336 | err |= __copy_to_user(&sc->sc_gr[2], &scr->pt.r2, 2*8); /* r2-r3 */ | ||
337 | err |= __copy_to_user(&sc->sc_gr[16], &scr->pt.r16, 16*8); /* r16-r31 */ | ||
338 | } | ||
339 | return err; | ||
340 | } | ||
341 | |||
342 | /* | ||
343 | * Check whether the register-backing store is already on the signal stack. | ||
344 | */ | ||
345 | static inline int | ||
346 | rbs_on_sig_stack (unsigned long bsp) | ||
347 | { | ||
348 | return (bsp - current->sas_ss_sp < current->sas_ss_size); | ||
349 | } | ||
350 | |||
351 | static long | ||
352 | force_sigsegv_info (int sig, void __user *addr) | ||
353 | { | ||
354 | unsigned long flags; | ||
355 | struct siginfo si; | ||
356 | |||
357 | if (sig == SIGSEGV) { | ||
358 | /* | ||
359 | * Acquiring siglock around the sa_handler-update is almost | ||
360 | * certainly overkill, but this isn't a | ||
361 | * performance-critical path and I'd rather play it safe | ||
362 | * here than having to debug a nasty race if and when | ||
363 | * something changes in kernel/signal.c that would make it | ||
364 | * no longer safe to modify sa_handler without holding the | ||
365 | * lock. | ||
366 | */ | ||
367 | spin_lock_irqsave(¤t->sighand->siglock, flags); | ||
368 | current->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; | ||
369 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | ||
370 | } | ||
371 | si.si_signo = SIGSEGV; | ||
372 | si.si_errno = 0; | ||
373 | si.si_code = SI_KERNEL; | ||
374 | si.si_pid = current->pid; | ||
375 | si.si_uid = current->uid; | ||
376 | si.si_addr = addr; | ||
377 | force_sig_info(SIGSEGV, &si, current); | ||
378 | return 0; | ||
379 | } | ||
380 | |||
381 | static long | ||
382 | setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, | ||
383 | struct sigscratch *scr) | ||
384 | { | ||
385 | extern char __kernel_sigtramp[]; | ||
386 | unsigned long tramp_addr, new_rbs = 0; | ||
387 | struct sigframe __user *frame; | ||
388 | long err; | ||
389 | |||
390 | frame = (void __user *) scr->pt.r12; | ||
391 | tramp_addr = (unsigned long) __kernel_sigtramp; | ||
392 | if ((ka->sa.sa_flags & SA_ONSTACK) && sas_ss_flags((unsigned long) frame) == 0) { | ||
393 | frame = (void __user *) ((current->sas_ss_sp + current->sas_ss_size) | ||
394 | & ~(STACK_ALIGN - 1)); | ||
395 | /* | ||
396 | * We need to check for the register stack being on the signal stack | ||
397 | * separately, because it's switched separately (memory stack is switched | ||
398 | * in the kernel, register stack is switched in the signal trampoline). | ||
399 | */ | ||
400 | if (!rbs_on_sig_stack(scr->pt.ar_bspstore)) | ||
401 | new_rbs = (current->sas_ss_sp + sizeof(long) - 1) & ~(sizeof(long) - 1); | ||
402 | } | ||
403 | frame = (void __user *) frame - ((sizeof(*frame) + STACK_ALIGN - 1) & ~(STACK_ALIGN - 1)); | ||
404 | |||
405 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | ||
406 | return force_sigsegv_info(sig, frame); | ||
407 | |||
408 | err = __put_user(sig, &frame->arg0); | ||
409 | err |= __put_user(&frame->info, &frame->arg1); | ||
410 | err |= __put_user(&frame->sc, &frame->arg2); | ||
411 | err |= __put_user(new_rbs, &frame->sc.sc_rbs_base); | ||
412 | err |= __put_user(0, &frame->sc.sc_loadrs); /* initialize to zero */ | ||
413 | err |= __put_user(ka->sa.sa_handler, &frame->handler); | ||
414 | |||
415 | err |= copy_siginfo_to_user(&frame->info, info); | ||
416 | |||
417 | err |= __put_user(current->sas_ss_sp, &frame->sc.sc_stack.ss_sp); | ||
418 | err |= __put_user(current->sas_ss_size, &frame->sc.sc_stack.ss_size); | ||
419 | err |= __put_user(sas_ss_flags(scr->pt.r12), &frame->sc.sc_stack.ss_flags); | ||
420 | err |= setup_sigcontext(&frame->sc, set, scr); | ||
421 | |||
422 | if (unlikely(err)) | ||
423 | return force_sigsegv_info(sig, frame); | ||
424 | |||
425 | scr->pt.r12 = (unsigned long) frame - 16; /* new stack pointer */ | ||
426 | scr->pt.ar_fpsr = FPSR_DEFAULT; /* reset fpsr for signal handler */ | ||
427 | scr->pt.cr_iip = tramp_addr; | ||
428 | ia64_psr(&scr->pt)->ri = 0; /* start executing in first slot */ | ||
429 | ia64_psr(&scr->pt)->be = 0; /* force little-endian byte-order */ | ||
430 | /* | ||
431 | * Force the interruption function mask to zero. This has no effect when a | ||
432 | * system-call got interrupted by a signal (since, in that case, scr->pt_cr_ifs is | ||
433 | * ignored), but it has the desirable effect of making it possible to deliver a | ||
434 | * signal with an incomplete register frame (which happens when a mandatory RSE | ||
435 | * load faults). Furthermore, it has no negative effect on the getting the user's | ||
436 | * dirty partition preserved, because that's governed by scr->pt.loadrs. | ||
437 | */ | ||
438 | scr->pt.cr_ifs = (1UL << 63); | ||
439 | |||
440 | /* | ||
441 | * Note: this affects only the NaT bits of the scratch regs (the ones saved in | ||
442 | * pt_regs), which is exactly what we want. | ||
443 | */ | ||
444 | scr->scratch_unat = 0; /* ensure NaT bits of r12 is clear */ | ||
445 | |||
446 | #if DEBUG_SIG | ||
447 | printk("SIG deliver (%s:%d): sig=%d sp=%lx ip=%lx handler=%p\n", | ||
448 | current->comm, current->pid, sig, scr->pt.r12, frame->sc.sc_ip, frame->handler); | ||
449 | #endif | ||
450 | return 1; | ||
451 | } | ||
452 | |||
453 | static long | ||
454 | handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, | ||
455 | struct sigscratch *scr) | ||
456 | { | ||
457 | if (IS_IA32_PROCESS(&scr->pt)) { | ||
458 | /* send signal to IA-32 process */ | ||
459 | if (!ia32_setup_frame1(sig, ka, info, oldset, &scr->pt)) | ||
460 | return 0; | ||
461 | } else | ||
462 | /* send signal to IA-64 process */ | ||
463 | if (!setup_frame(sig, ka, info, oldset, scr)) | ||
464 | return 0; | ||
465 | |||
466 | if (!(ka->sa.sa_flags & SA_NODEFER)) { | ||
467 | spin_lock_irq(¤t->sighand->siglock); | ||
468 | { | ||
469 | sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); | ||
470 | sigaddset(¤t->blocked, sig); | ||
471 | recalc_sigpending(); | ||
472 | } | ||
473 | spin_unlock_irq(¤t->sighand->siglock); | ||
474 | } | ||
475 | return 1; | ||
476 | } | ||
477 | |||
478 | /* | ||
479 | * Note that `init' is a special process: it doesn't get signals it doesn't want to | ||
480 | * handle. Thus you cannot kill init even with a SIGKILL even by mistake. | ||
481 | */ | ||
482 | long | ||
483 | ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall) | ||
484 | { | ||
485 | struct k_sigaction ka; | ||
486 | siginfo_t info; | ||
487 | long restart = in_syscall; | ||
488 | long errno = scr->pt.r8; | ||
489 | # define ERR_CODE(c) (IS_IA32_PROCESS(&scr->pt) ? -(c) : (c)) | ||
490 | |||
491 | /* | ||
492 | * In the ia64_leave_kernel code path, we want the common case to go fast, which | ||
493 | * is why we may in certain cases get here from kernel mode. Just return without | ||
494 | * doing anything if so. | ||
495 | */ | ||
496 | if (!user_mode(&scr->pt)) | ||
497 | return 0; | ||
498 | |||
499 | if (!oldset) | ||
500 | oldset = ¤t->blocked; | ||
501 | |||
502 | /* | ||
503 | * This only loops in the rare cases of handle_signal() failing, in which case we | ||
504 | * need to push through a forced SIGSEGV. | ||
505 | */ | ||
506 | while (1) { | ||
507 | int signr = get_signal_to_deliver(&info, &ka, &scr->pt, NULL); | ||
508 | |||
509 | /* | ||
510 | * get_signal_to_deliver() may have run a debugger (via notify_parent()) | ||
511 | * and the debugger may have modified the state (e.g., to arrange for an | ||
512 | * inferior call), thus it's important to check for restarting _after_ | ||
513 | * get_signal_to_deliver(). | ||
514 | */ | ||
515 | if (IS_IA32_PROCESS(&scr->pt)) { | ||
516 | if (in_syscall) { | ||
517 | if (errno >= 0) | ||
518 | restart = 0; | ||
519 | else | ||
520 | errno = -errno; | ||
521 | } | ||
522 | } else if ((long) scr->pt.r10 != -1) | ||
523 | /* | ||
524 | * A system calls has to be restarted only if one of the error codes | ||
525 | * ERESTARTNOHAND, ERESTARTSYS, or ERESTARTNOINTR is returned. If r10 | ||
526 | * isn't -1 then r8 doesn't hold an error code and we don't need to | ||
527 | * restart the syscall, so we can clear the "restart" flag here. | ||
528 | */ | ||
529 | restart = 0; | ||
530 | |||
531 | if (signr <= 0) | ||
532 | break; | ||
533 | |||
534 | if (unlikely(restart)) { | ||
535 | switch (errno) { | ||
536 | case ERESTART_RESTARTBLOCK: | ||
537 | case ERESTARTNOHAND: | ||
538 | scr->pt.r8 = ERR_CODE(EINTR); | ||
539 | /* note: scr->pt.r10 is already -1 */ | ||
540 | break; | ||
541 | |||
542 | case ERESTARTSYS: | ||
543 | if ((ka.sa.sa_flags & SA_RESTART) == 0) { | ||
544 | scr->pt.r8 = ERR_CODE(EINTR); | ||
545 | /* note: scr->pt.r10 is already -1 */ | ||
546 | break; | ||
547 | } | ||
548 | case ERESTARTNOINTR: | ||
549 | if (IS_IA32_PROCESS(&scr->pt)) { | ||
550 | scr->pt.r8 = scr->pt.r1; | ||
551 | scr->pt.cr_iip -= 2; | ||
552 | } else | ||
553 | ia64_decrement_ip(&scr->pt); | ||
554 | restart = 0; /* don't restart twice if handle_signal() fails... */ | ||
555 | } | ||
556 | } | ||
557 | |||
558 | /* | ||
559 | * Whee! Actually deliver the signal. If the delivery failed, we need to | ||
560 | * continue to iterate in this loop so we can deliver the SIGSEGV... | ||
561 | */ | ||
562 | if (handle_signal(signr, &ka, &info, oldset, scr)) | ||
563 | return 1; | ||
564 | } | ||
565 | |||
566 | /* Did we come from a system call? */ | ||
567 | if (restart) { | ||
568 | /* Restart the system call - no handlers present */ | ||
569 | if (errno == ERESTARTNOHAND || errno == ERESTARTSYS || errno == ERESTARTNOINTR | ||
570 | || errno == ERESTART_RESTARTBLOCK) | ||
571 | { | ||
572 | if (IS_IA32_PROCESS(&scr->pt)) { | ||
573 | scr->pt.r8 = scr->pt.r1; | ||
574 | scr->pt.cr_iip -= 2; | ||
575 | if (errno == ERESTART_RESTARTBLOCK) | ||
576 | scr->pt.r8 = 0; /* x86 version of __NR_restart_syscall */ | ||
577 | } else { | ||
578 | /* | ||
579 | * Note: the syscall number is in r15 which is saved in | ||
580 | * pt_regs so all we need to do here is adjust ip so that | ||
581 | * the "break" instruction gets re-executed. | ||
582 | */ | ||
583 | ia64_decrement_ip(&scr->pt); | ||
584 | if (errno == ERESTART_RESTARTBLOCK) | ||
585 | scr->pt.r15 = __NR_restart_syscall; | ||
586 | } | ||
587 | } | ||
588 | } | ||
589 | return 0; | ||
590 | } | ||
591 | |||
592 | /* Set a delayed signal that was detected in MCA/INIT/NMI/PMI context where it | ||
593 | * could not be delivered. It is important that the target process is not | ||
594 | * allowed to do any more work in user space. Possible cases for the target | ||
595 | * process: | ||
596 | * | ||
597 | * - It is sleeping and will wake up soon. Store the data in the current task, | ||
598 | * the signal will be sent when the current task returns from the next | ||
599 | * interrupt. | ||
600 | * | ||
601 | * - It is running in user context. Store the data in the current task, the | ||
602 | * signal will be sent when the current task returns from the next interrupt. | ||
603 | * | ||
604 | * - It is running in kernel context on this or another cpu and will return to | ||
605 | * user context. Store the data in the target task, the signal will be sent | ||
606 | * to itself when the target task returns to user space. | ||
607 | * | ||
608 | * - It is running in kernel context on this cpu and will sleep before | ||
609 | * returning to user context. Because this is also the current task, the | ||
610 | * signal will not get delivered and the task could sleep indefinitely. | ||
611 | * Store the data in the idle task for this cpu, the signal will be sent | ||
612 | * after the idle task processes its next interrupt. | ||
613 | * | ||
614 | * To cover all cases, store the data in the target task, the current task and | ||
615 | * the idle task on this cpu. Whatever happens, the signal will be delivered | ||
616 | * to the target task before it can do any useful user space work. Multiple | ||
617 | * deliveries have no unwanted side effects. | ||
618 | * | ||
619 | * Note: This code is executed in MCA/INIT/NMI/PMI context, with interrupts | ||
620 | * disabled. It must not take any locks nor use kernel structures or services | ||
621 | * that require locks. | ||
622 | */ | ||
623 | |||
624 | /* To ensure that we get the right pid, check its start time. To avoid extra | ||
625 | * include files in thread_info.h, convert the task start_time to unsigned long, | ||
626 | * giving us a cycle time of > 580 years. | ||
627 | */ | ||
628 | static inline unsigned long | ||
629 | start_time_ul(const struct task_struct *t) | ||
630 | { | ||
631 | return t->start_time.tv_sec * NSEC_PER_SEC + t->start_time.tv_nsec; | ||
632 | } | ||
633 | |||
634 | void | ||
635 | set_sigdelayed(pid_t pid, int signo, int code, void __user *addr) | ||
636 | { | ||
637 | struct task_struct *t; | ||
638 | unsigned long start_time = 0; | ||
639 | int i; | ||
640 | |||
641 | for (i = 1; i <= 3; ++i) { | ||
642 | switch (i) { | ||
643 | case 1: | ||
644 | t = find_task_by_pid(pid); | ||
645 | if (t) | ||
646 | start_time = start_time_ul(t); | ||
647 | break; | ||
648 | case 2: | ||
649 | t = current; | ||
650 | break; | ||
651 | default: | ||
652 | t = idle_task(smp_processor_id()); | ||
653 | break; | ||
654 | } | ||
655 | |||
656 | if (!t) | ||
657 | return; | ||
658 | t->thread_info->sigdelayed.signo = signo; | ||
659 | t->thread_info->sigdelayed.code = code; | ||
660 | t->thread_info->sigdelayed.addr = addr; | ||
661 | t->thread_info->sigdelayed.start_time = start_time; | ||
662 | t->thread_info->sigdelayed.pid = pid; | ||
663 | wmb(); | ||
664 | set_tsk_thread_flag(t, TIF_SIGDELAYED); | ||
665 | } | ||
666 | } | ||
667 | |||
668 | /* Called from entry.S when it detects TIF_SIGDELAYED, a delayed signal that | ||
669 | * was detected in MCA/INIT/NMI/PMI context where it could not be delivered. | ||
670 | */ | ||
671 | |||
672 | void | ||
673 | do_sigdelayed(void) | ||
674 | { | ||
675 | struct siginfo siginfo; | ||
676 | pid_t pid; | ||
677 | struct task_struct *t; | ||
678 | |||
679 | clear_thread_flag(TIF_SIGDELAYED); | ||
680 | memset(&siginfo, 0, sizeof(siginfo)); | ||
681 | siginfo.si_signo = current_thread_info()->sigdelayed.signo; | ||
682 | siginfo.si_code = current_thread_info()->sigdelayed.code; | ||
683 | siginfo.si_addr = current_thread_info()->sigdelayed.addr; | ||
684 | pid = current_thread_info()->sigdelayed.pid; | ||
685 | t = find_task_by_pid(pid); | ||
686 | if (!t) | ||
687 | return; | ||
688 | if (current_thread_info()->sigdelayed.start_time != start_time_ul(t)) | ||
689 | return; | ||
690 | force_sig_info(siginfo.si_signo, &siginfo, t); | ||
691 | } | ||