diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/ia64/kernel/ptrace.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/ia64/kernel/ptrace.c')
-rw-r--r-- | arch/ia64/kernel/ptrace.c | 1627 |
1 files changed, 1627 insertions, 0 deletions
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c new file mode 100644 index 000000000000..55789fcd7210 --- /dev/null +++ b/arch/ia64/kernel/ptrace.c | |||
@@ -0,0 +1,1627 @@ | |||
1 | /* | ||
2 | * Kernel support for the ptrace() and syscall tracing interfaces. | ||
3 | * | ||
4 | * Copyright (C) 1999-2005 Hewlett-Packard Co | ||
5 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
6 | * | ||
7 | * Derived from the x86 and Alpha versions. | ||
8 | */ | ||
9 | #include <linux/config.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/slab.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/errno.h> | ||
15 | #include <linux/ptrace.h> | ||
16 | #include <linux/smp_lock.h> | ||
17 | #include <linux/user.h> | ||
18 | #include <linux/security.h> | ||
19 | #include <linux/audit.h> | ||
20 | |||
21 | #include <asm/pgtable.h> | ||
22 | #include <asm/processor.h> | ||
23 | #include <asm/ptrace_offsets.h> | ||
24 | #include <asm/rse.h> | ||
25 | #include <asm/system.h> | ||
26 | #include <asm/uaccess.h> | ||
27 | #include <asm/unwind.h> | ||
28 | #ifdef CONFIG_PERFMON | ||
29 | #include <asm/perfmon.h> | ||
30 | #endif | ||
31 | |||
32 | #include "entry.h" | ||
33 | |||
34 | /* | ||
35 | * Bits in the PSR that we allow ptrace() to change: | ||
36 | * be, up, ac, mfl, mfh (the user mask; five bits total) | ||
37 | * db (debug breakpoint fault; one bit) | ||
38 | * id (instruction debug fault disable; one bit) | ||
39 | * dd (data debug fault disable; one bit) | ||
40 | * ri (restart instruction; two bits) | ||
41 | * is (instruction set; one bit) | ||
42 | */ | ||
43 | #define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS \ | ||
44 | | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI) | ||
45 | |||
46 | #define MASK(nbits) ((1UL << (nbits)) - 1) /* mask with NBITS bits set */ | ||
47 | #define PFM_MASK MASK(38) | ||
48 | |||
49 | #define PTRACE_DEBUG 0 | ||
50 | |||
51 | #if PTRACE_DEBUG | ||
52 | # define dprintk(format...) printk(format) | ||
53 | # define inline | ||
54 | #else | ||
55 | # define dprintk(format...) | ||
56 | #endif | ||
57 | |||
58 | /* Return TRUE if PT was created due to kernel-entry via a system-call. */ | ||
59 | |||
60 | static inline int | ||
61 | in_syscall (struct pt_regs *pt) | ||
62 | { | ||
63 | return (long) pt->cr_ifs >= 0; | ||
64 | } | ||
65 | |||
66 | /* | ||
67 | * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT | ||
68 | * bitset where bit i is set iff the NaT bit of register i is set. | ||
69 | */ | ||
70 | unsigned long | ||
71 | ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat) | ||
72 | { | ||
73 | # define GET_BITS(first, last, unat) \ | ||
74 | ({ \ | ||
75 | unsigned long bit = ia64_unat_pos(&pt->r##first); \ | ||
76 | unsigned long nbits = (last - first + 1); \ | ||
77 | unsigned long mask = MASK(nbits) << first; \ | ||
78 | unsigned long dist; \ | ||
79 | if (bit < first) \ | ||
80 | dist = 64 + bit - first; \ | ||
81 | else \ | ||
82 | dist = bit - first; \ | ||
83 | ia64_rotr(unat, dist) & mask; \ | ||
84 | }) | ||
85 | unsigned long val; | ||
86 | |||
87 | /* | ||
88 | * Registers that are stored consecutively in struct pt_regs | ||
89 | * can be handled in parallel. If the register order in | ||
90 | * struct_pt_regs changes, this code MUST be updated. | ||
91 | */ | ||
92 | val = GET_BITS( 1, 1, scratch_unat); | ||
93 | val |= GET_BITS( 2, 3, scratch_unat); | ||
94 | val |= GET_BITS(12, 13, scratch_unat); | ||
95 | val |= GET_BITS(14, 14, scratch_unat); | ||
96 | val |= GET_BITS(15, 15, scratch_unat); | ||
97 | val |= GET_BITS( 8, 11, scratch_unat); | ||
98 | val |= GET_BITS(16, 31, scratch_unat); | ||
99 | return val; | ||
100 | |||
101 | # undef GET_BITS | ||
102 | } | ||
103 | |||
104 | /* | ||
105 | * Set the NaT bits for the scratch registers according to NAT and | ||
106 | * return the resulting unat (assuming the scratch registers are | ||
107 | * stored in PT). | ||
108 | */ | ||
109 | unsigned long | ||
110 | ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat) | ||
111 | { | ||
112 | # define PUT_BITS(first, last, nat) \ | ||
113 | ({ \ | ||
114 | unsigned long bit = ia64_unat_pos(&pt->r##first); \ | ||
115 | unsigned long nbits = (last - first + 1); \ | ||
116 | unsigned long mask = MASK(nbits) << first; \ | ||
117 | long dist; \ | ||
118 | if (bit < first) \ | ||
119 | dist = 64 + bit - first; \ | ||
120 | else \ | ||
121 | dist = bit - first; \ | ||
122 | ia64_rotl(nat & mask, dist); \ | ||
123 | }) | ||
124 | unsigned long scratch_unat; | ||
125 | |||
126 | /* | ||
127 | * Registers that are stored consecutively in struct pt_regs | ||
128 | * can be handled in parallel. If the register order in | ||
129 | * struct_pt_regs changes, this code MUST be updated. | ||
130 | */ | ||
131 | scratch_unat = PUT_BITS( 1, 1, nat); | ||
132 | scratch_unat |= PUT_BITS( 2, 3, nat); | ||
133 | scratch_unat |= PUT_BITS(12, 13, nat); | ||
134 | scratch_unat |= PUT_BITS(14, 14, nat); | ||
135 | scratch_unat |= PUT_BITS(15, 15, nat); | ||
136 | scratch_unat |= PUT_BITS( 8, 11, nat); | ||
137 | scratch_unat |= PUT_BITS(16, 31, nat); | ||
138 | |||
139 | return scratch_unat; | ||
140 | |||
141 | # undef PUT_BITS | ||
142 | } | ||
143 | |||
144 | #define IA64_MLX_TEMPLATE 0x2 | ||
145 | #define IA64_MOVL_OPCODE 6 | ||
146 | |||
147 | void | ||
148 | ia64_increment_ip (struct pt_regs *regs) | ||
149 | { | ||
150 | unsigned long w0, ri = ia64_psr(regs)->ri + 1; | ||
151 | |||
152 | if (ri > 2) { | ||
153 | ri = 0; | ||
154 | regs->cr_iip += 16; | ||
155 | } else if (ri == 2) { | ||
156 | get_user(w0, (char __user *) regs->cr_iip + 0); | ||
157 | if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) { | ||
158 | /* | ||
159 | * rfi'ing to slot 2 of an MLX bundle causes | ||
160 | * an illegal operation fault. We don't want | ||
161 | * that to happen... | ||
162 | */ | ||
163 | ri = 0; | ||
164 | regs->cr_iip += 16; | ||
165 | } | ||
166 | } | ||
167 | ia64_psr(regs)->ri = ri; | ||
168 | } | ||
169 | |||
170 | void | ||
171 | ia64_decrement_ip (struct pt_regs *regs) | ||
172 | { | ||
173 | unsigned long w0, ri = ia64_psr(regs)->ri - 1; | ||
174 | |||
175 | if (ia64_psr(regs)->ri == 0) { | ||
176 | regs->cr_iip -= 16; | ||
177 | ri = 2; | ||
178 | get_user(w0, (char __user *) regs->cr_iip + 0); | ||
179 | if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) { | ||
180 | /* | ||
181 | * rfi'ing to slot 2 of an MLX bundle causes | ||
182 | * an illegal operation fault. We don't want | ||
183 | * that to happen... | ||
184 | */ | ||
185 | ri = 1; | ||
186 | } | ||
187 | } | ||
188 | ia64_psr(regs)->ri = ri; | ||
189 | } | ||
190 | |||
191 | /* | ||
192 | * This routine is used to read an rnat bits that are stored on the | ||
193 | * kernel backing store. Since, in general, the alignment of the user | ||
194 | * and kernel are different, this is not completely trivial. In | ||
195 | * essence, we need to construct the user RNAT based on up to two | ||
196 | * kernel RNAT values and/or the RNAT value saved in the child's | ||
197 | * pt_regs. | ||
198 | * | ||
199 | * user rbs | ||
200 | * | ||
201 | * +--------+ <-- lowest address | ||
202 | * | slot62 | | ||
203 | * +--------+ | ||
204 | * | rnat | 0x....1f8 | ||
205 | * +--------+ | ||
206 | * | slot00 | \ | ||
207 | * +--------+ | | ||
208 | * | slot01 | > child_regs->ar_rnat | ||
209 | * +--------+ | | ||
210 | * | slot02 | / kernel rbs | ||
211 | * +--------+ +--------+ | ||
212 | * <- child_regs->ar_bspstore | slot61 | <-- krbs | ||
213 | * +- - - - + +--------+ | ||
214 | * | slot62 | | ||
215 | * +- - - - + +--------+ | ||
216 | * | rnat | | ||
217 | * +- - - - + +--------+ | ||
218 | * vrnat | slot00 | | ||
219 | * +- - - - + +--------+ | ||
220 | * = = | ||
221 | * +--------+ | ||
222 | * | slot00 | \ | ||
223 | * +--------+ | | ||
224 | * | slot01 | > child_stack->ar_rnat | ||
225 | * +--------+ | | ||
226 | * | slot02 | / | ||
227 | * +--------+ | ||
228 | * <--- child_stack->ar_bspstore | ||
229 | * | ||
230 | * The way to think of this code is as follows: bit 0 in the user rnat | ||
231 | * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat | ||
232 | * value. The kernel rnat value holding this bit is stored in | ||
233 | * variable rnat0. rnat1 is loaded with the kernel rnat value that | ||
234 | * form the upper bits of the user rnat value. | ||
235 | * | ||
236 | * Boundary cases: | ||
237 | * | ||
238 | * o when reading the rnat "below" the first rnat slot on the kernel | ||
239 | * backing store, rnat0/rnat1 are set to 0 and the low order bits are | ||
240 | * merged in from pt->ar_rnat. | ||
241 | * | ||
242 | * o when reading the rnat "above" the last rnat slot on the kernel | ||
243 | * backing store, rnat0/rnat1 gets its value from sw->ar_rnat. | ||
244 | */ | ||
245 | static unsigned long | ||
246 | get_rnat (struct task_struct *task, struct switch_stack *sw, | ||
247 | unsigned long *krbs, unsigned long *urnat_addr, | ||
248 | unsigned long *urbs_end) | ||
249 | { | ||
250 | unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr; | ||
251 | unsigned long umask = 0, mask, m; | ||
252 | unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift; | ||
253 | long num_regs, nbits; | ||
254 | struct pt_regs *pt; | ||
255 | |||
256 | pt = ia64_task_regs(task); | ||
257 | kbsp = (unsigned long *) sw->ar_bspstore; | ||
258 | ubspstore = (unsigned long *) pt->ar_bspstore; | ||
259 | |||
260 | if (urbs_end < urnat_addr) | ||
261 | nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end); | ||
262 | else | ||
263 | nbits = 63; | ||
264 | mask = MASK(nbits); | ||
265 | /* | ||
266 | * First, figure out which bit number slot 0 in user-land maps | ||
267 | * to in the kernel rnat. Do this by figuring out how many | ||
268 | * register slots we're beyond the user's backingstore and | ||
269 | * then computing the equivalent address in kernel space. | ||
270 | */ | ||
271 | num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1); | ||
272 | slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs); | ||
273 | shift = ia64_rse_slot_num(slot0_kaddr); | ||
274 | rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr); | ||
275 | rnat0_kaddr = rnat1_kaddr - 64; | ||
276 | |||
277 | if (ubspstore + 63 > urnat_addr) { | ||
278 | /* some bits need to be merged in from pt->ar_rnat */ | ||
279 | umask = MASK(ia64_rse_slot_num(ubspstore)) & mask; | ||
280 | urnat = (pt->ar_rnat & umask); | ||
281 | mask &= ~umask; | ||
282 | if (!mask) | ||
283 | return urnat; | ||
284 | } | ||
285 | |||
286 | m = mask << shift; | ||
287 | if (rnat0_kaddr >= kbsp) | ||
288 | rnat0 = sw->ar_rnat; | ||
289 | else if (rnat0_kaddr > krbs) | ||
290 | rnat0 = *rnat0_kaddr; | ||
291 | urnat |= (rnat0 & m) >> shift; | ||
292 | |||
293 | m = mask >> (63 - shift); | ||
294 | if (rnat1_kaddr >= kbsp) | ||
295 | rnat1 = sw->ar_rnat; | ||
296 | else if (rnat1_kaddr > krbs) | ||
297 | rnat1 = *rnat1_kaddr; | ||
298 | urnat |= (rnat1 & m) << (63 - shift); | ||
299 | return urnat; | ||
300 | } | ||
301 | |||
302 | /* | ||
303 | * The reverse of get_rnat. | ||
304 | */ | ||
305 | static void | ||
306 | put_rnat (struct task_struct *task, struct switch_stack *sw, | ||
307 | unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat, | ||
308 | unsigned long *urbs_end) | ||
309 | { | ||
310 | unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m; | ||
311 | unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift; | ||
312 | long num_regs, nbits; | ||
313 | struct pt_regs *pt; | ||
314 | unsigned long cfm, *urbs_kargs; | ||
315 | |||
316 | pt = ia64_task_regs(task); | ||
317 | kbsp = (unsigned long *) sw->ar_bspstore; | ||
318 | ubspstore = (unsigned long *) pt->ar_bspstore; | ||
319 | |||
320 | urbs_kargs = urbs_end; | ||
321 | if (in_syscall(pt)) { | ||
322 | /* | ||
323 | * If entered via syscall, don't allow user to set rnat bits | ||
324 | * for syscall args. | ||
325 | */ | ||
326 | cfm = pt->cr_ifs; | ||
327 | urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f)); | ||
328 | } | ||
329 | |||
330 | if (urbs_kargs >= urnat_addr) | ||
331 | nbits = 63; | ||
332 | else { | ||
333 | if ((urnat_addr - 63) >= urbs_kargs) | ||
334 | return; | ||
335 | nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs); | ||
336 | } | ||
337 | mask = MASK(nbits); | ||
338 | |||
339 | /* | ||
340 | * First, figure out which bit number slot 0 in user-land maps | ||
341 | * to in the kernel rnat. Do this by figuring out how many | ||
342 | * register slots we're beyond the user's backingstore and | ||
343 | * then computing the equivalent address in kernel space. | ||
344 | */ | ||
345 | num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1); | ||
346 | slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs); | ||
347 | shift = ia64_rse_slot_num(slot0_kaddr); | ||
348 | rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr); | ||
349 | rnat0_kaddr = rnat1_kaddr - 64; | ||
350 | |||
351 | if (ubspstore + 63 > urnat_addr) { | ||
352 | /* some bits need to be place in pt->ar_rnat: */ | ||
353 | umask = MASK(ia64_rse_slot_num(ubspstore)) & mask; | ||
354 | pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask); | ||
355 | mask &= ~umask; | ||
356 | if (!mask) | ||
357 | return; | ||
358 | } | ||
359 | /* | ||
360 | * Note: Section 11.1 of the EAS guarantees that bit 63 of an | ||
361 | * rnat slot is ignored. so we don't have to clear it here. | ||
362 | */ | ||
363 | rnat0 = (urnat << shift); | ||
364 | m = mask << shift; | ||
365 | if (rnat0_kaddr >= kbsp) | ||
366 | sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m); | ||
367 | else if (rnat0_kaddr > krbs) | ||
368 | *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m)); | ||
369 | |||
370 | rnat1 = (urnat >> (63 - shift)); | ||
371 | m = mask >> (63 - shift); | ||
372 | if (rnat1_kaddr >= kbsp) | ||
373 | sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m); | ||
374 | else if (rnat1_kaddr > krbs) | ||
375 | *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m)); | ||
376 | } | ||
377 | |||
378 | static inline int | ||
379 | on_kernel_rbs (unsigned long addr, unsigned long bspstore, | ||
380 | unsigned long urbs_end) | ||
381 | { | ||
382 | unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *) | ||
383 | urbs_end); | ||
384 | return (addr >= bspstore && addr <= (unsigned long) rnat_addr); | ||
385 | } | ||
386 | |||
387 | /* | ||
388 | * Read a word from the user-level backing store of task CHILD. ADDR | ||
389 | * is the user-level address to read the word from, VAL a pointer to | ||
390 | * the return value, and USER_BSP gives the end of the user-level | ||
391 | * backing store (i.e., it's the address that would be in ar.bsp after | ||
392 | * the user executed a "cover" instruction). | ||
393 | * | ||
394 | * This routine takes care of accessing the kernel register backing | ||
395 | * store for those registers that got spilled there. It also takes | ||
396 | * care of calculating the appropriate RNaT collection words. | ||
397 | */ | ||
398 | long | ||
399 | ia64_peek (struct task_struct *child, struct switch_stack *child_stack, | ||
400 | unsigned long user_rbs_end, unsigned long addr, long *val) | ||
401 | { | ||
402 | unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr; | ||
403 | struct pt_regs *child_regs; | ||
404 | size_t copied; | ||
405 | long ret; | ||
406 | |||
407 | urbs_end = (long *) user_rbs_end; | ||
408 | laddr = (unsigned long *) addr; | ||
409 | child_regs = ia64_task_regs(child); | ||
410 | bspstore = (unsigned long *) child_regs->ar_bspstore; | ||
411 | krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; | ||
412 | if (on_kernel_rbs(addr, (unsigned long) bspstore, | ||
413 | (unsigned long) urbs_end)) | ||
414 | { | ||
415 | /* | ||
416 | * Attempt to read the RBS in an area that's actually | ||
417 | * on the kernel RBS => read the corresponding bits in | ||
418 | * the kernel RBS. | ||
419 | */ | ||
420 | rnat_addr = ia64_rse_rnat_addr(laddr); | ||
421 | ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end); | ||
422 | |||
423 | if (laddr == rnat_addr) { | ||
424 | /* return NaT collection word itself */ | ||
425 | *val = ret; | ||
426 | return 0; | ||
427 | } | ||
428 | |||
429 | if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) { | ||
430 | /* | ||
431 | * It is implementation dependent whether the | ||
432 | * data portion of a NaT value gets saved on a | ||
433 | * st8.spill or RSE spill (e.g., see EAS 2.6, | ||
434 | * 4.4.4.6 Register Spill and Fill). To get | ||
435 | * consistent behavior across all possible | ||
436 | * IA-64 implementations, we return zero in | ||
437 | * this case. | ||
438 | */ | ||
439 | *val = 0; | ||
440 | return 0; | ||
441 | } | ||
442 | |||
443 | if (laddr < urbs_end) { | ||
444 | /* | ||
445 | * The desired word is on the kernel RBS and | ||
446 | * is not a NaT. | ||
447 | */ | ||
448 | regnum = ia64_rse_num_regs(bspstore, laddr); | ||
449 | *val = *ia64_rse_skip_regs(krbs, regnum); | ||
450 | return 0; | ||
451 | } | ||
452 | } | ||
453 | copied = access_process_vm(child, addr, &ret, sizeof(ret), 0); | ||
454 | if (copied != sizeof(ret)) | ||
455 | return -EIO; | ||
456 | *val = ret; | ||
457 | return 0; | ||
458 | } | ||
459 | |||
460 | long | ||
461 | ia64_poke (struct task_struct *child, struct switch_stack *child_stack, | ||
462 | unsigned long user_rbs_end, unsigned long addr, long val) | ||
463 | { | ||
464 | unsigned long *bspstore, *krbs, regnum, *laddr; | ||
465 | unsigned long *urbs_end = (long *) user_rbs_end; | ||
466 | struct pt_regs *child_regs; | ||
467 | |||
468 | laddr = (unsigned long *) addr; | ||
469 | child_regs = ia64_task_regs(child); | ||
470 | bspstore = (unsigned long *) child_regs->ar_bspstore; | ||
471 | krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; | ||
472 | if (on_kernel_rbs(addr, (unsigned long) bspstore, | ||
473 | (unsigned long) urbs_end)) | ||
474 | { | ||
475 | /* | ||
476 | * Attempt to write the RBS in an area that's actually | ||
477 | * on the kernel RBS => write the corresponding bits | ||
478 | * in the kernel RBS. | ||
479 | */ | ||
480 | if (ia64_rse_is_rnat_slot(laddr)) | ||
481 | put_rnat(child, child_stack, krbs, laddr, val, | ||
482 | urbs_end); | ||
483 | else { | ||
484 | if (laddr < urbs_end) { | ||
485 | regnum = ia64_rse_num_regs(bspstore, laddr); | ||
486 | *ia64_rse_skip_regs(krbs, regnum) = val; | ||
487 | } | ||
488 | } | ||
489 | } else if (access_process_vm(child, addr, &val, sizeof(val), 1) | ||
490 | != sizeof(val)) | ||
491 | return -EIO; | ||
492 | return 0; | ||
493 | } | ||
494 | |||
495 | /* | ||
496 | * Calculate the address of the end of the user-level register backing | ||
497 | * store. This is the address that would have been stored in ar.bsp | ||
498 | * if the user had executed a "cover" instruction right before | ||
499 | * entering the kernel. If CFMP is not NULL, it is used to return the | ||
500 | * "current frame mask" that was active at the time the kernel was | ||
501 | * entered. | ||
502 | */ | ||
503 | unsigned long | ||
504 | ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt, | ||
505 | unsigned long *cfmp) | ||
506 | { | ||
507 | unsigned long *krbs, *bspstore, cfm = pt->cr_ifs; | ||
508 | long ndirty; | ||
509 | |||
510 | krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; | ||
511 | bspstore = (unsigned long *) pt->ar_bspstore; | ||
512 | ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19)); | ||
513 | |||
514 | if (in_syscall(pt)) | ||
515 | ndirty += (cfm & 0x7f); | ||
516 | else | ||
517 | cfm &= ~(1UL << 63); /* clear valid bit */ | ||
518 | |||
519 | if (cfmp) | ||
520 | *cfmp = cfm; | ||
521 | return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty); | ||
522 | } | ||
523 | |||
524 | /* | ||
525 | * Synchronize (i.e, write) the RSE backing store living in kernel | ||
526 | * space to the VM of the CHILD task. SW and PT are the pointers to | ||
527 | * the switch_stack and pt_regs structures, respectively. | ||
528 | * USER_RBS_END is the user-level address at which the backing store | ||
529 | * ends. | ||
530 | */ | ||
531 | long | ||
532 | ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw, | ||
533 | unsigned long user_rbs_start, unsigned long user_rbs_end) | ||
534 | { | ||
535 | unsigned long addr, val; | ||
536 | long ret; | ||
537 | |||
538 | /* now copy word for word from kernel rbs to user rbs: */ | ||
539 | for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) { | ||
540 | ret = ia64_peek(child, sw, user_rbs_end, addr, &val); | ||
541 | if (ret < 0) | ||
542 | return ret; | ||
543 | if (access_process_vm(child, addr, &val, sizeof(val), 1) | ||
544 | != sizeof(val)) | ||
545 | return -EIO; | ||
546 | } | ||
547 | return 0; | ||
548 | } | ||
549 | |||
550 | static inline int | ||
551 | thread_matches (struct task_struct *thread, unsigned long addr) | ||
552 | { | ||
553 | unsigned long thread_rbs_end; | ||
554 | struct pt_regs *thread_regs; | ||
555 | |||
556 | if (ptrace_check_attach(thread, 0) < 0) | ||
557 | /* | ||
558 | * If the thread is not in an attachable state, we'll | ||
559 | * ignore it. The net effect is that if ADDR happens | ||
560 | * to overlap with the portion of the thread's | ||
561 | * register backing store that is currently residing | ||
562 | * on the thread's kernel stack, then ptrace() may end | ||
563 | * up accessing a stale value. But if the thread | ||
564 | * isn't stopped, that's a problem anyhow, so we're | ||
565 | * doing as well as we can... | ||
566 | */ | ||
567 | return 0; | ||
568 | |||
569 | thread_regs = ia64_task_regs(thread); | ||
570 | thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL); | ||
571 | if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end)) | ||
572 | return 0; | ||
573 | |||
574 | return 1; /* looks like we've got a winner */ | ||
575 | } | ||
576 | |||
577 | /* | ||
578 | * GDB apparently wants to be able to read the register-backing store | ||
579 | * of any thread when attached to a given process. If we are peeking | ||
580 | * or poking an address that happens to reside in the kernel-backing | ||
581 | * store of another thread, we need to attach to that thread, because | ||
582 | * otherwise we end up accessing stale data. | ||
583 | * | ||
584 | * task_list_lock must be read-locked before calling this routine! | ||
585 | */ | ||
586 | static struct task_struct * | ||
587 | find_thread_for_addr (struct task_struct *child, unsigned long addr) | ||
588 | { | ||
589 | struct task_struct *g, *p; | ||
590 | struct mm_struct *mm; | ||
591 | int mm_users; | ||
592 | |||
593 | if (!(mm = get_task_mm(child))) | ||
594 | return child; | ||
595 | |||
596 | /* -1 because of our get_task_mm(): */ | ||
597 | mm_users = atomic_read(&mm->mm_users) - 1; | ||
598 | if (mm_users <= 1) | ||
599 | goto out; /* not multi-threaded */ | ||
600 | |||
601 | /* | ||
602 | * First, traverse the child's thread-list. Good for scalability with | ||
603 | * NPTL-threads. | ||
604 | */ | ||
605 | p = child; | ||
606 | do { | ||
607 | if (thread_matches(p, addr)) { | ||
608 | child = p; | ||
609 | goto out; | ||
610 | } | ||
611 | if (mm_users-- <= 1) | ||
612 | goto out; | ||
613 | } while ((p = next_thread(p)) != child); | ||
614 | |||
615 | do_each_thread(g, p) { | ||
616 | if (child->mm != mm) | ||
617 | continue; | ||
618 | |||
619 | if (thread_matches(p, addr)) { | ||
620 | child = p; | ||
621 | goto out; | ||
622 | } | ||
623 | } while_each_thread(g, p); | ||
624 | out: | ||
625 | mmput(mm); | ||
626 | return child; | ||
627 | } | ||
628 | |||
629 | /* | ||
630 | * Write f32-f127 back to task->thread.fph if it has been modified. | ||
631 | */ | ||
632 | inline void | ||
633 | ia64_flush_fph (struct task_struct *task) | ||
634 | { | ||
635 | struct ia64_psr *psr = ia64_psr(ia64_task_regs(task)); | ||
636 | |||
637 | if (ia64_is_local_fpu_owner(task) && psr->mfh) { | ||
638 | psr->mfh = 0; | ||
639 | task->thread.flags |= IA64_THREAD_FPH_VALID; | ||
640 | ia64_save_fpu(&task->thread.fph[0]); | ||
641 | } | ||
642 | } | ||
643 | |||
644 | /* | ||
645 | * Sync the fph state of the task so that it can be manipulated | ||
646 | * through thread.fph. If necessary, f32-f127 are written back to | ||
647 | * thread.fph or, if the fph state hasn't been used before, thread.fph | ||
648 | * is cleared to zeroes. Also, access to f32-f127 is disabled to | ||
649 | * ensure that the task picks up the state from thread.fph when it | ||
650 | * executes again. | ||
651 | */ | ||
652 | void | ||
653 | ia64_sync_fph (struct task_struct *task) | ||
654 | { | ||
655 | struct ia64_psr *psr = ia64_psr(ia64_task_regs(task)); | ||
656 | |||
657 | ia64_flush_fph(task); | ||
658 | if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) { | ||
659 | task->thread.flags |= IA64_THREAD_FPH_VALID; | ||
660 | memset(&task->thread.fph, 0, sizeof(task->thread.fph)); | ||
661 | } | ||
662 | ia64_drop_fpu(task); | ||
663 | psr->dfh = 1; | ||
664 | } | ||
665 | |||
666 | static int | ||
667 | access_fr (struct unw_frame_info *info, int regnum, int hi, | ||
668 | unsigned long *data, int write_access) | ||
669 | { | ||
670 | struct ia64_fpreg fpval; | ||
671 | int ret; | ||
672 | |||
673 | ret = unw_get_fr(info, regnum, &fpval); | ||
674 | if (ret < 0) | ||
675 | return ret; | ||
676 | |||
677 | if (write_access) { | ||
678 | fpval.u.bits[hi] = *data; | ||
679 | ret = unw_set_fr(info, regnum, fpval); | ||
680 | } else | ||
681 | *data = fpval.u.bits[hi]; | ||
682 | return ret; | ||
683 | } | ||
684 | |||
685 | /* | ||
686 | * Change the machine-state of CHILD such that it will return via the normal | ||
687 | * kernel exit-path, rather than the syscall-exit path. | ||
688 | */ | ||
689 | static void | ||
690 | convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt, | ||
691 | unsigned long cfm) | ||
692 | { | ||
693 | struct unw_frame_info info, prev_info; | ||
694 | unsigned long ip, pr; | ||
695 | |||
696 | unw_init_from_blocked_task(&info, child); | ||
697 | while (1) { | ||
698 | prev_info = info; | ||
699 | if (unw_unwind(&info) < 0) | ||
700 | return; | ||
701 | if (unw_get_rp(&info, &ip) < 0) | ||
702 | return; | ||
703 | if (ip < FIXADDR_USER_END) | ||
704 | break; | ||
705 | } | ||
706 | |||
707 | unw_get_pr(&prev_info, &pr); | ||
708 | pr &= ~(1UL << PRED_SYSCALL); | ||
709 | pr |= (1UL << PRED_NON_SYSCALL); | ||
710 | unw_set_pr(&prev_info, pr); | ||
711 | |||
712 | pt->cr_ifs = (1UL << 63) | cfm; | ||
713 | } | ||
714 | |||
715 | static int | ||
716 | access_nat_bits (struct task_struct *child, struct pt_regs *pt, | ||
717 | struct unw_frame_info *info, | ||
718 | unsigned long *data, int write_access) | ||
719 | { | ||
720 | unsigned long regnum, nat_bits, scratch_unat, dummy = 0; | ||
721 | char nat = 0; | ||
722 | |||
723 | if (write_access) { | ||
724 | nat_bits = *data; | ||
725 | scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits); | ||
726 | if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) { | ||
727 | dprintk("ptrace: failed to set ar.unat\n"); | ||
728 | return -1; | ||
729 | } | ||
730 | for (regnum = 4; regnum <= 7; ++regnum) { | ||
731 | unw_get_gr(info, regnum, &dummy, &nat); | ||
732 | unw_set_gr(info, regnum, dummy, | ||
733 | (nat_bits >> regnum) & 1); | ||
734 | } | ||
735 | } else { | ||
736 | if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) { | ||
737 | dprintk("ptrace: failed to read ar.unat\n"); | ||
738 | return -1; | ||
739 | } | ||
740 | nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat); | ||
741 | for (regnum = 4; regnum <= 7; ++regnum) { | ||
742 | unw_get_gr(info, regnum, &dummy, &nat); | ||
743 | nat_bits |= (nat != 0) << regnum; | ||
744 | } | ||
745 | *data = nat_bits; | ||
746 | } | ||
747 | return 0; | ||
748 | } | ||
749 | |||
750 | static int | ||
751 | access_uarea (struct task_struct *child, unsigned long addr, | ||
752 | unsigned long *data, int write_access) | ||
753 | { | ||
754 | unsigned long *ptr, regnum, urbs_end, rnat_addr, cfm; | ||
755 | struct switch_stack *sw; | ||
756 | struct pt_regs *pt; | ||
757 | # define pt_reg_addr(pt, reg) ((void *) \ | ||
758 | ((unsigned long) (pt) \ | ||
759 | + offsetof(struct pt_regs, reg))) | ||
760 | |||
761 | |||
762 | pt = ia64_task_regs(child); | ||
763 | sw = (struct switch_stack *) (child->thread.ksp + 16); | ||
764 | |||
765 | if ((addr & 0x7) != 0) { | ||
766 | dprintk("ptrace: unaligned register address 0x%lx\n", addr); | ||
767 | return -1; | ||
768 | } | ||
769 | |||
770 | if (addr < PT_F127 + 16) { | ||
771 | /* accessing fph */ | ||
772 | if (write_access) | ||
773 | ia64_sync_fph(child); | ||
774 | else | ||
775 | ia64_flush_fph(child); | ||
776 | ptr = (unsigned long *) | ||
777 | ((unsigned long) &child->thread.fph + addr); | ||
778 | } else if ((addr >= PT_F10) && (addr < PT_F11 + 16)) { | ||
779 | /* scratch registers untouched by kernel (saved in pt_regs) */ | ||
780 | ptr = pt_reg_addr(pt, f10) + (addr - PT_F10); | ||
781 | } else if (addr >= PT_F12 && addr < PT_F15 + 16) { | ||
782 | /* | ||
783 | * Scratch registers untouched by kernel (saved in | ||
784 | * switch_stack). | ||
785 | */ | ||
786 | ptr = (unsigned long *) ((long) sw | ||
787 | + (addr - PT_NAT_BITS - 32)); | ||
788 | } else if (addr < PT_AR_LC + 8) { | ||
789 | /* preserved state: */ | ||
790 | struct unw_frame_info info; | ||
791 | char nat = 0; | ||
792 | int ret; | ||
793 | |||
794 | unw_init_from_blocked_task(&info, child); | ||
795 | if (unw_unwind_to_user(&info) < 0) | ||
796 | return -1; | ||
797 | |||
798 | switch (addr) { | ||
799 | case PT_NAT_BITS: | ||
800 | return access_nat_bits(child, pt, &info, | ||
801 | data, write_access); | ||
802 | |||
803 | case PT_R4: case PT_R5: case PT_R6: case PT_R7: | ||
804 | if (write_access) { | ||
805 | /* read NaT bit first: */ | ||
806 | unsigned long dummy; | ||
807 | |||
808 | ret = unw_get_gr(&info, (addr - PT_R4)/8 + 4, | ||
809 | &dummy, &nat); | ||
810 | if (ret < 0) | ||
811 | return ret; | ||
812 | } | ||
813 | return unw_access_gr(&info, (addr - PT_R4)/8 + 4, data, | ||
814 | &nat, write_access); | ||
815 | |||
816 | case PT_B1: case PT_B2: case PT_B3: | ||
817 | case PT_B4: case PT_B5: | ||
818 | return unw_access_br(&info, (addr - PT_B1)/8 + 1, data, | ||
819 | write_access); | ||
820 | |||
821 | case PT_AR_EC: | ||
822 | return unw_access_ar(&info, UNW_AR_EC, data, | ||
823 | write_access); | ||
824 | |||
825 | case PT_AR_LC: | ||
826 | return unw_access_ar(&info, UNW_AR_LC, data, | ||
827 | write_access); | ||
828 | |||
829 | default: | ||
830 | if (addr >= PT_F2 && addr < PT_F5 + 16) | ||
831 | return access_fr(&info, (addr - PT_F2)/16 + 2, | ||
832 | (addr & 8) != 0, data, | ||
833 | write_access); | ||
834 | else if (addr >= PT_F16 && addr < PT_F31 + 16) | ||
835 | return access_fr(&info, | ||
836 | (addr - PT_F16)/16 + 16, | ||
837 | (addr & 8) != 0, | ||
838 | data, write_access); | ||
839 | else { | ||
840 | dprintk("ptrace: rejecting access to register " | ||
841 | "address 0x%lx\n", addr); | ||
842 | return -1; | ||
843 | } | ||
844 | } | ||
845 | } else if (addr < PT_F9+16) { | ||
846 | /* scratch state */ | ||
847 | switch (addr) { | ||
848 | case PT_AR_BSP: | ||
849 | /* | ||
850 | * By convention, we use PT_AR_BSP to refer to | ||
851 | * the end of the user-level backing store. | ||
852 | * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof) | ||
853 | * to get the real value of ar.bsp at the time | ||
854 | * the kernel was entered. | ||
855 | * | ||
856 | * Furthermore, when changing the contents of | ||
857 | * PT_AR_BSP (or PT_CFM) we MUST copy any | ||
858 | * users-level stacked registers that are | ||
859 | * stored on the kernel stack back to | ||
860 | * user-space because otherwise, we might end | ||
861 | * up clobbering kernel stacked registers. | ||
862 | * Also, if this happens while the task is | ||
863 | * blocked in a system call, which convert the | ||
864 | * state such that the non-system-call exit | ||
865 | * path is used. This ensures that the proper | ||
866 | * state will be picked up when resuming | ||
867 | * execution. However, it *also* means that | ||
868 | * once we write PT_AR_BSP/PT_CFM, it won't be | ||
869 | * possible to modify the syscall arguments of | ||
870 | * the pending system call any longer. This | ||
871 | * shouldn't be an issue because modifying | ||
872 | * PT_AR_BSP/PT_CFM generally implies that | ||
873 | * we're either abandoning the pending system | ||
874 | * call or that we defer it's re-execution | ||
875 | * (e.g., due to GDB doing an inferior | ||
876 | * function call). | ||
877 | */ | ||
878 | urbs_end = ia64_get_user_rbs_end(child, pt, &cfm); | ||
879 | if (write_access) { | ||
880 | if (*data != urbs_end) { | ||
881 | if (ia64_sync_user_rbs(child, sw, | ||
882 | pt->ar_bspstore, | ||
883 | urbs_end) < 0) | ||
884 | return -1; | ||
885 | if (in_syscall(pt)) | ||
886 | convert_to_non_syscall(child, | ||
887 | pt, | ||
888 | cfm); | ||
889 | /* | ||
890 | * Simulate user-level write | ||
891 | * of ar.bsp: | ||
892 | */ | ||
893 | pt->loadrs = 0; | ||
894 | pt->ar_bspstore = *data; | ||
895 | } | ||
896 | } else | ||
897 | *data = urbs_end; | ||
898 | return 0; | ||
899 | |||
900 | case PT_CFM: | ||
901 | urbs_end = ia64_get_user_rbs_end(child, pt, &cfm); | ||
902 | if (write_access) { | ||
903 | if (((cfm ^ *data) & PFM_MASK) != 0) { | ||
904 | if (ia64_sync_user_rbs(child, sw, | ||
905 | pt->ar_bspstore, | ||
906 | urbs_end) < 0) | ||
907 | return -1; | ||
908 | if (in_syscall(pt)) | ||
909 | convert_to_non_syscall(child, | ||
910 | pt, | ||
911 | cfm); | ||
912 | pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK) | ||
913 | | (*data & PFM_MASK)); | ||
914 | } | ||
915 | } else | ||
916 | *data = cfm; | ||
917 | return 0; | ||
918 | |||
919 | case PT_CR_IPSR: | ||
920 | if (write_access) | ||
921 | pt->cr_ipsr = ((*data & IPSR_MASK) | ||
922 | | (pt->cr_ipsr & ~IPSR_MASK)); | ||
923 | else | ||
924 | *data = (pt->cr_ipsr & IPSR_MASK); | ||
925 | return 0; | ||
926 | |||
927 | case PT_AR_RNAT: | ||
928 | urbs_end = ia64_get_user_rbs_end(child, pt, NULL); | ||
929 | rnat_addr = (long) ia64_rse_rnat_addr((long *) | ||
930 | urbs_end); | ||
931 | if (write_access) | ||
932 | return ia64_poke(child, sw, urbs_end, | ||
933 | rnat_addr, *data); | ||
934 | else | ||
935 | return ia64_peek(child, sw, urbs_end, | ||
936 | rnat_addr, data); | ||
937 | |||
938 | case PT_R1: | ||
939 | ptr = pt_reg_addr(pt, r1); | ||
940 | break; | ||
941 | case PT_R2: case PT_R3: | ||
942 | ptr = pt_reg_addr(pt, r2) + (addr - PT_R2); | ||
943 | break; | ||
944 | case PT_R8: case PT_R9: case PT_R10: case PT_R11: | ||
945 | ptr = pt_reg_addr(pt, r8) + (addr - PT_R8); | ||
946 | break; | ||
947 | case PT_R12: case PT_R13: | ||
948 | ptr = pt_reg_addr(pt, r12) + (addr - PT_R12); | ||
949 | break; | ||
950 | case PT_R14: | ||
951 | ptr = pt_reg_addr(pt, r14); | ||
952 | break; | ||
953 | case PT_R15: | ||
954 | ptr = pt_reg_addr(pt, r15); | ||
955 | break; | ||
956 | case PT_R16: case PT_R17: case PT_R18: case PT_R19: | ||
957 | case PT_R20: case PT_R21: case PT_R22: case PT_R23: | ||
958 | case PT_R24: case PT_R25: case PT_R26: case PT_R27: | ||
959 | case PT_R28: case PT_R29: case PT_R30: case PT_R31: | ||
960 | ptr = pt_reg_addr(pt, r16) + (addr - PT_R16); | ||
961 | break; | ||
962 | case PT_B0: | ||
963 | ptr = pt_reg_addr(pt, b0); | ||
964 | break; | ||
965 | case PT_B6: | ||
966 | ptr = pt_reg_addr(pt, b6); | ||
967 | break; | ||
968 | case PT_B7: | ||
969 | ptr = pt_reg_addr(pt, b7); | ||
970 | break; | ||
971 | case PT_F6: case PT_F6+8: case PT_F7: case PT_F7+8: | ||
972 | case PT_F8: case PT_F8+8: case PT_F9: case PT_F9+8: | ||
973 | ptr = pt_reg_addr(pt, f6) + (addr - PT_F6); | ||
974 | break; | ||
975 | case PT_AR_BSPSTORE: | ||
976 | ptr = pt_reg_addr(pt, ar_bspstore); | ||
977 | break; | ||
978 | case PT_AR_RSC: | ||
979 | ptr = pt_reg_addr(pt, ar_rsc); | ||
980 | break; | ||
981 | case PT_AR_UNAT: | ||
982 | ptr = pt_reg_addr(pt, ar_unat); | ||
983 | break; | ||
984 | case PT_AR_PFS: | ||
985 | ptr = pt_reg_addr(pt, ar_pfs); | ||
986 | break; | ||
987 | case PT_AR_CCV: | ||
988 | ptr = pt_reg_addr(pt, ar_ccv); | ||
989 | break; | ||
990 | case PT_AR_FPSR: | ||
991 | ptr = pt_reg_addr(pt, ar_fpsr); | ||
992 | break; | ||
993 | case PT_CR_IIP: | ||
994 | ptr = pt_reg_addr(pt, cr_iip); | ||
995 | break; | ||
996 | case PT_PR: | ||
997 | ptr = pt_reg_addr(pt, pr); | ||
998 | break; | ||
999 | /* scratch register */ | ||
1000 | |||
1001 | default: | ||
1002 | /* disallow accessing anything else... */ | ||
1003 | dprintk("ptrace: rejecting access to register " | ||
1004 | "address 0x%lx\n", addr); | ||
1005 | return -1; | ||
1006 | } | ||
1007 | } else if (addr <= PT_AR_SSD) { | ||
1008 | ptr = pt_reg_addr(pt, ar_csd) + (addr - PT_AR_CSD); | ||
1009 | } else { | ||
1010 | /* access debug registers */ | ||
1011 | |||
1012 | if (addr >= PT_IBR) { | ||
1013 | regnum = (addr - PT_IBR) >> 3; | ||
1014 | ptr = &child->thread.ibr[0]; | ||
1015 | } else { | ||
1016 | regnum = (addr - PT_DBR) >> 3; | ||
1017 | ptr = &child->thread.dbr[0]; | ||
1018 | } | ||
1019 | |||
1020 | if (regnum >= 8) { | ||
1021 | dprintk("ptrace: rejecting access to register " | ||
1022 | "address 0x%lx\n", addr); | ||
1023 | return -1; | ||
1024 | } | ||
1025 | #ifdef CONFIG_PERFMON | ||
1026 | /* | ||
1027 | * Check if debug registers are used by perfmon. This | ||
1028 | * test must be done once we know that we can do the | ||
1029 | * operation, i.e. the arguments are all valid, but | ||
1030 | * before we start modifying the state. | ||
1031 | * | ||
1032 | * Perfmon needs to keep a count of how many processes | ||
1033 | * are trying to modify the debug registers for system | ||
1034 | * wide monitoring sessions. | ||
1035 | * | ||
1036 | * We also include read access here, because they may | ||
1037 | * cause the PMU-installed debug register state | ||
1038 | * (dbr[], ibr[]) to be reset. The two arrays are also | ||
1039 | * used by perfmon, but we do not use | ||
1040 | * IA64_THREAD_DBG_VALID. The registers are restored | ||
1041 | * by the PMU context switch code. | ||
1042 | */ | ||
1043 | if (pfm_use_debug_registers(child)) return -1; | ||
1044 | #endif | ||
1045 | |||
1046 | if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) { | ||
1047 | child->thread.flags |= IA64_THREAD_DBG_VALID; | ||
1048 | memset(child->thread.dbr, 0, | ||
1049 | sizeof(child->thread.dbr)); | ||
1050 | memset(child->thread.ibr, 0, | ||
1051 | sizeof(child->thread.ibr)); | ||
1052 | } | ||
1053 | |||
1054 | ptr += regnum; | ||
1055 | |||
1056 | if ((regnum & 1) && write_access) { | ||
1057 | /* don't let the user set kernel-level breakpoints: */ | ||
1058 | *ptr = *data & ~(7UL << 56); | ||
1059 | return 0; | ||
1060 | } | ||
1061 | } | ||
1062 | if (write_access) | ||
1063 | *ptr = *data; | ||
1064 | else | ||
1065 | *data = *ptr; | ||
1066 | return 0; | ||
1067 | } | ||
1068 | |||
1069 | static long | ||
1070 | ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) | ||
1071 | { | ||
1072 | unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val; | ||
1073 | struct unw_frame_info info; | ||
1074 | struct ia64_fpreg fpval; | ||
1075 | struct switch_stack *sw; | ||
1076 | struct pt_regs *pt; | ||
1077 | long ret, retval = 0; | ||
1078 | char nat = 0; | ||
1079 | int i; | ||
1080 | |||
1081 | if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs))) | ||
1082 | return -EIO; | ||
1083 | |||
1084 | pt = ia64_task_regs(child); | ||
1085 | sw = (struct switch_stack *) (child->thread.ksp + 16); | ||
1086 | unw_init_from_blocked_task(&info, child); | ||
1087 | if (unw_unwind_to_user(&info) < 0) { | ||
1088 | return -EIO; | ||
1089 | } | ||
1090 | |||
1091 | if (((unsigned long) ppr & 0x7) != 0) { | ||
1092 | dprintk("ptrace:unaligned register address %p\n", ppr); | ||
1093 | return -EIO; | ||
1094 | } | ||
1095 | |||
1096 | if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0 | ||
1097 | || access_uarea(child, PT_AR_EC, &ec, 0) < 0 | ||
1098 | || access_uarea(child, PT_AR_LC, &lc, 0) < 0 | ||
1099 | || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0 | ||
1100 | || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0 | ||
1101 | || access_uarea(child, PT_CFM, &cfm, 0) | ||
1102 | || access_uarea(child, PT_NAT_BITS, &nat_bits, 0)) | ||
1103 | return -EIO; | ||
1104 | |||
1105 | /* control regs */ | ||
1106 | |||
1107 | retval |= __put_user(pt->cr_iip, &ppr->cr_iip); | ||
1108 | retval |= __put_user(psr, &ppr->cr_ipsr); | ||
1109 | |||
1110 | /* app regs */ | ||
1111 | |||
1112 | retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]); | ||
1113 | retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]); | ||
1114 | retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]); | ||
1115 | retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]); | ||
1116 | retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]); | ||
1117 | retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]); | ||
1118 | |||
1119 | retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]); | ||
1120 | retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]); | ||
1121 | retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]); | ||
1122 | retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]); | ||
1123 | retval |= __put_user(cfm, &ppr->cfm); | ||
1124 | |||
1125 | /* gr1-gr3 */ | ||
1126 | |||
1127 | retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long)); | ||
1128 | retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2); | ||
1129 | |||
1130 | /* gr4-gr7 */ | ||
1131 | |||
1132 | for (i = 4; i < 8; i++) { | ||
1133 | if (unw_access_gr(&info, i, &val, &nat, 0) < 0) | ||
1134 | return -EIO; | ||
1135 | retval |= __put_user(val, &ppr->gr[i]); | ||
1136 | } | ||
1137 | |||
1138 | /* gr8-gr11 */ | ||
1139 | |||
1140 | retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4); | ||
1141 | |||
1142 | /* gr12-gr15 */ | ||
1143 | |||
1144 | retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2); | ||
1145 | retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long)); | ||
1146 | retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long)); | ||
1147 | |||
1148 | /* gr16-gr31 */ | ||
1149 | |||
1150 | retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16); | ||
1151 | |||
1152 | /* b0 */ | ||
1153 | |||
1154 | retval |= __put_user(pt->b0, &ppr->br[0]); | ||
1155 | |||
1156 | /* b1-b5 */ | ||
1157 | |||
1158 | for (i = 1; i < 6; i++) { | ||
1159 | if (unw_access_br(&info, i, &val, 0) < 0) | ||
1160 | return -EIO; | ||
1161 | __put_user(val, &ppr->br[i]); | ||
1162 | } | ||
1163 | |||
1164 | /* b6-b7 */ | ||
1165 | |||
1166 | retval |= __put_user(pt->b6, &ppr->br[6]); | ||
1167 | retval |= __put_user(pt->b7, &ppr->br[7]); | ||
1168 | |||
1169 | /* fr2-fr5 */ | ||
1170 | |||
1171 | for (i = 2; i < 6; i++) { | ||
1172 | if (unw_get_fr(&info, i, &fpval) < 0) | ||
1173 | return -EIO; | ||
1174 | retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval)); | ||
1175 | } | ||
1176 | |||
1177 | /* fr6-fr11 */ | ||
1178 | |||
1179 | retval |= __copy_to_user(&ppr->fr[6], &pt->f6, | ||
1180 | sizeof(struct ia64_fpreg) * 6); | ||
1181 | |||
1182 | /* fp scratch regs(12-15) */ | ||
1183 | |||
1184 | retval |= __copy_to_user(&ppr->fr[12], &sw->f12, | ||
1185 | sizeof(struct ia64_fpreg) * 4); | ||
1186 | |||
1187 | /* fr16-fr31 */ | ||
1188 | |||
1189 | for (i = 16; i < 32; i++) { | ||
1190 | if (unw_get_fr(&info, i, &fpval) < 0) | ||
1191 | return -EIO; | ||
1192 | retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval)); | ||
1193 | } | ||
1194 | |||
1195 | /* fph */ | ||
1196 | |||
1197 | ia64_flush_fph(child); | ||
1198 | retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph, | ||
1199 | sizeof(ppr->fr[32]) * 96); | ||
1200 | |||
1201 | /* preds */ | ||
1202 | |||
1203 | retval |= __put_user(pt->pr, &ppr->pr); | ||
1204 | |||
1205 | /* nat bits */ | ||
1206 | |||
1207 | retval |= __put_user(nat_bits, &ppr->nat); | ||
1208 | |||
1209 | ret = retval ? -EIO : 0; | ||
1210 | return ret; | ||
1211 | } | ||
1212 | |||
1213 | static long | ||
1214 | ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) | ||
1215 | { | ||
1216 | unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val = 0; | ||
1217 | struct unw_frame_info info; | ||
1218 | struct switch_stack *sw; | ||
1219 | struct ia64_fpreg fpval; | ||
1220 | struct pt_regs *pt; | ||
1221 | long ret, retval = 0; | ||
1222 | int i; | ||
1223 | |||
1224 | memset(&fpval, 0, sizeof(fpval)); | ||
1225 | |||
1226 | if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs))) | ||
1227 | return -EIO; | ||
1228 | |||
1229 | pt = ia64_task_regs(child); | ||
1230 | sw = (struct switch_stack *) (child->thread.ksp + 16); | ||
1231 | unw_init_from_blocked_task(&info, child); | ||
1232 | if (unw_unwind_to_user(&info) < 0) { | ||
1233 | return -EIO; | ||
1234 | } | ||
1235 | |||
1236 | if (((unsigned long) ppr & 0x7) != 0) { | ||
1237 | dprintk("ptrace:unaligned register address %p\n", ppr); | ||
1238 | return -EIO; | ||
1239 | } | ||
1240 | |||
1241 | /* control regs */ | ||
1242 | |||
1243 | retval |= __get_user(pt->cr_iip, &ppr->cr_iip); | ||
1244 | retval |= __get_user(psr, &ppr->cr_ipsr); | ||
1245 | |||
1246 | /* app regs */ | ||
1247 | |||
1248 | retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]); | ||
1249 | retval |= __get_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]); | ||
1250 | retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]); | ||
1251 | retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]); | ||
1252 | retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]); | ||
1253 | retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]); | ||
1254 | |||
1255 | retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]); | ||
1256 | retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]); | ||
1257 | retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]); | ||
1258 | retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]); | ||
1259 | retval |= __get_user(cfm, &ppr->cfm); | ||
1260 | |||
1261 | /* gr1-gr3 */ | ||
1262 | |||
1263 | retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long)); | ||
1264 | retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2); | ||
1265 | |||
1266 | /* gr4-gr7 */ | ||
1267 | |||
1268 | for (i = 4; i < 8; i++) { | ||
1269 | retval |= __get_user(val, &ppr->gr[i]); | ||
1270 | /* NaT bit will be set via PT_NAT_BITS: */ | ||
1271 | if (unw_set_gr(&info, i, val, 0) < 0) | ||
1272 | return -EIO; | ||
1273 | } | ||
1274 | |||
1275 | /* gr8-gr11 */ | ||
1276 | |||
1277 | retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4); | ||
1278 | |||
1279 | /* gr12-gr15 */ | ||
1280 | |||
1281 | retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2); | ||
1282 | retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long)); | ||
1283 | retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long)); | ||
1284 | |||
1285 | /* gr16-gr31 */ | ||
1286 | |||
1287 | retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16); | ||
1288 | |||
1289 | /* b0 */ | ||
1290 | |||
1291 | retval |= __get_user(pt->b0, &ppr->br[0]); | ||
1292 | |||
1293 | /* b1-b5 */ | ||
1294 | |||
1295 | for (i = 1; i < 6; i++) { | ||
1296 | retval |= __get_user(val, &ppr->br[i]); | ||
1297 | unw_set_br(&info, i, val); | ||
1298 | } | ||
1299 | |||
1300 | /* b6-b7 */ | ||
1301 | |||
1302 | retval |= __get_user(pt->b6, &ppr->br[6]); | ||
1303 | retval |= __get_user(pt->b7, &ppr->br[7]); | ||
1304 | |||
1305 | /* fr2-fr5 */ | ||
1306 | |||
1307 | for (i = 2; i < 6; i++) { | ||
1308 | retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval)); | ||
1309 | if (unw_set_fr(&info, i, fpval) < 0) | ||
1310 | return -EIO; | ||
1311 | } | ||
1312 | |||
1313 | /* fr6-fr11 */ | ||
1314 | |||
1315 | retval |= __copy_from_user(&pt->f6, &ppr->fr[6], | ||
1316 | sizeof(ppr->fr[6]) * 6); | ||
1317 | |||
1318 | /* fp scratch regs(12-15) */ | ||
1319 | |||
1320 | retval |= __copy_from_user(&sw->f12, &ppr->fr[12], | ||
1321 | sizeof(ppr->fr[12]) * 4); | ||
1322 | |||
1323 | /* fr16-fr31 */ | ||
1324 | |||
1325 | for (i = 16; i < 32; i++) { | ||
1326 | retval |= __copy_from_user(&fpval, &ppr->fr[i], | ||
1327 | sizeof(fpval)); | ||
1328 | if (unw_set_fr(&info, i, fpval) < 0) | ||
1329 | return -EIO; | ||
1330 | } | ||
1331 | |||
1332 | /* fph */ | ||
1333 | |||
1334 | ia64_sync_fph(child); | ||
1335 | retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32], | ||
1336 | sizeof(ppr->fr[32]) * 96); | ||
1337 | |||
1338 | /* preds */ | ||
1339 | |||
1340 | retval |= __get_user(pt->pr, &ppr->pr); | ||
1341 | |||
1342 | /* nat bits */ | ||
1343 | |||
1344 | retval |= __get_user(nat_bits, &ppr->nat); | ||
1345 | |||
1346 | retval |= access_uarea(child, PT_CR_IPSR, &psr, 1); | ||
1347 | retval |= access_uarea(child, PT_AR_EC, &ec, 1); | ||
1348 | retval |= access_uarea(child, PT_AR_LC, &lc, 1); | ||
1349 | retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1); | ||
1350 | retval |= access_uarea(child, PT_AR_BSP, &bsp, 1); | ||
1351 | retval |= access_uarea(child, PT_CFM, &cfm, 1); | ||
1352 | retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1); | ||
1353 | |||
1354 | ret = retval ? -EIO : 0; | ||
1355 | return ret; | ||
1356 | } | ||
1357 | |||
1358 | /* | ||
1359 | * Called by kernel/ptrace.c when detaching.. | ||
1360 | * | ||
1361 | * Make sure the single step bit is not set. | ||
1362 | */ | ||
1363 | void | ||
1364 | ptrace_disable (struct task_struct *child) | ||
1365 | { | ||
1366 | struct ia64_psr *child_psr = ia64_psr(ia64_task_regs(child)); | ||
1367 | |||
1368 | /* make sure the single step/taken-branch trap bits are not set: */ | ||
1369 | child_psr->ss = 0; | ||
1370 | child_psr->tb = 0; | ||
1371 | } | ||
1372 | |||
1373 | asmlinkage long | ||
1374 | sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data) | ||
1375 | { | ||
1376 | struct pt_regs *pt; | ||
1377 | unsigned long urbs_end, peek_or_poke; | ||
1378 | struct task_struct *child; | ||
1379 | struct switch_stack *sw; | ||
1380 | long ret; | ||
1381 | |||
1382 | lock_kernel(); | ||
1383 | ret = -EPERM; | ||
1384 | if (request == PTRACE_TRACEME) { | ||
1385 | /* are we already being traced? */ | ||
1386 | if (current->ptrace & PT_PTRACED) | ||
1387 | goto out; | ||
1388 | ret = security_ptrace(current->parent, current); | ||
1389 | if (ret) | ||
1390 | goto out; | ||
1391 | current->ptrace |= PT_PTRACED; | ||
1392 | ret = 0; | ||
1393 | goto out; | ||
1394 | } | ||
1395 | |||
1396 | peek_or_poke = (request == PTRACE_PEEKTEXT | ||
1397 | || request == PTRACE_PEEKDATA | ||
1398 | || request == PTRACE_POKETEXT | ||
1399 | || request == PTRACE_POKEDATA); | ||
1400 | ret = -ESRCH; | ||
1401 | read_lock(&tasklist_lock); | ||
1402 | { | ||
1403 | child = find_task_by_pid(pid); | ||
1404 | if (child) { | ||
1405 | if (peek_or_poke) | ||
1406 | child = find_thread_for_addr(child, addr); | ||
1407 | get_task_struct(child); | ||
1408 | } | ||
1409 | } | ||
1410 | read_unlock(&tasklist_lock); | ||
1411 | if (!child) | ||
1412 | goto out; | ||
1413 | ret = -EPERM; | ||
1414 | if (pid == 1) /* no messing around with init! */ | ||
1415 | goto out_tsk; | ||
1416 | |||
1417 | if (request == PTRACE_ATTACH) { | ||
1418 | ret = ptrace_attach(child); | ||
1419 | goto out_tsk; | ||
1420 | } | ||
1421 | |||
1422 | ret = ptrace_check_attach(child, request == PTRACE_KILL); | ||
1423 | if (ret < 0) | ||
1424 | goto out_tsk; | ||
1425 | |||
1426 | pt = ia64_task_regs(child); | ||
1427 | sw = (struct switch_stack *) (child->thread.ksp + 16); | ||
1428 | |||
1429 | switch (request) { | ||
1430 | case PTRACE_PEEKTEXT: | ||
1431 | case PTRACE_PEEKDATA: | ||
1432 | /* read word at location addr */ | ||
1433 | urbs_end = ia64_get_user_rbs_end(child, pt, NULL); | ||
1434 | ret = ia64_peek(child, sw, urbs_end, addr, &data); | ||
1435 | if (ret == 0) { | ||
1436 | ret = data; | ||
1437 | /* ensure "ret" is not mistaken as an error code: */ | ||
1438 | force_successful_syscall_return(); | ||
1439 | } | ||
1440 | goto out_tsk; | ||
1441 | |||
1442 | case PTRACE_POKETEXT: | ||
1443 | case PTRACE_POKEDATA: | ||
1444 | /* write the word at location addr */ | ||
1445 | urbs_end = ia64_get_user_rbs_end(child, pt, NULL); | ||
1446 | ret = ia64_poke(child, sw, urbs_end, addr, data); | ||
1447 | goto out_tsk; | ||
1448 | |||
1449 | case PTRACE_PEEKUSR: | ||
1450 | /* read the word at addr in the USER area */ | ||
1451 | if (access_uarea(child, addr, &data, 0) < 0) { | ||
1452 | ret = -EIO; | ||
1453 | goto out_tsk; | ||
1454 | } | ||
1455 | ret = data; | ||
1456 | /* ensure "ret" is not mistaken as an error code */ | ||
1457 | force_successful_syscall_return(); | ||
1458 | goto out_tsk; | ||
1459 | |||
1460 | case PTRACE_POKEUSR: | ||
1461 | /* write the word at addr in the USER area */ | ||
1462 | if (access_uarea(child, addr, &data, 1) < 0) { | ||
1463 | ret = -EIO; | ||
1464 | goto out_tsk; | ||
1465 | } | ||
1466 | ret = 0; | ||
1467 | goto out_tsk; | ||
1468 | |||
1469 | case PTRACE_OLD_GETSIGINFO: | ||
1470 | /* for backwards-compatibility */ | ||
1471 | ret = ptrace_request(child, PTRACE_GETSIGINFO, addr, data); | ||
1472 | goto out_tsk; | ||
1473 | |||
1474 | case PTRACE_OLD_SETSIGINFO: | ||
1475 | /* for backwards-compatibility */ | ||
1476 | ret = ptrace_request(child, PTRACE_SETSIGINFO, addr, data); | ||
1477 | goto out_tsk; | ||
1478 | |||
1479 | case PTRACE_SYSCALL: | ||
1480 | /* continue and stop at next (return from) syscall */ | ||
1481 | case PTRACE_CONT: | ||
1482 | /* restart after signal. */ | ||
1483 | ret = -EIO; | ||
1484 | if (data > _NSIG) | ||
1485 | goto out_tsk; | ||
1486 | if (request == PTRACE_SYSCALL) | ||
1487 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
1488 | else | ||
1489 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
1490 | child->exit_code = data; | ||
1491 | |||
1492 | /* | ||
1493 | * Make sure the single step/taken-branch trap bits | ||
1494 | * are not set: | ||
1495 | */ | ||
1496 | ia64_psr(pt)->ss = 0; | ||
1497 | ia64_psr(pt)->tb = 0; | ||
1498 | |||
1499 | wake_up_process(child); | ||
1500 | ret = 0; | ||
1501 | goto out_tsk; | ||
1502 | |||
1503 | case PTRACE_KILL: | ||
1504 | /* | ||
1505 | * Make the child exit. Best I can do is send it a | ||
1506 | * sigkill. Perhaps it should be put in the status | ||
1507 | * that it wants to exit. | ||
1508 | */ | ||
1509 | if (child->exit_state == EXIT_ZOMBIE) | ||
1510 | /* already dead */ | ||
1511 | goto out_tsk; | ||
1512 | child->exit_code = SIGKILL; | ||
1513 | |||
1514 | ptrace_disable(child); | ||
1515 | wake_up_process(child); | ||
1516 | ret = 0; | ||
1517 | goto out_tsk; | ||
1518 | |||
1519 | case PTRACE_SINGLESTEP: | ||
1520 | /* let child execute for one instruction */ | ||
1521 | case PTRACE_SINGLEBLOCK: | ||
1522 | ret = -EIO; | ||
1523 | if (data > _NSIG) | ||
1524 | goto out_tsk; | ||
1525 | |||
1526 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
1527 | if (request == PTRACE_SINGLESTEP) { | ||
1528 | ia64_psr(pt)->ss = 1; | ||
1529 | } else { | ||
1530 | ia64_psr(pt)->tb = 1; | ||
1531 | } | ||
1532 | child->exit_code = data; | ||
1533 | |||
1534 | /* give it a chance to run. */ | ||
1535 | wake_up_process(child); | ||
1536 | ret = 0; | ||
1537 | goto out_tsk; | ||
1538 | |||
1539 | case PTRACE_DETACH: | ||
1540 | /* detach a process that was attached. */ | ||
1541 | ret = ptrace_detach(child, data); | ||
1542 | goto out_tsk; | ||
1543 | |||
1544 | case PTRACE_GETREGS: | ||
1545 | ret = ptrace_getregs(child, | ||
1546 | (struct pt_all_user_regs __user *) data); | ||
1547 | goto out_tsk; | ||
1548 | |||
1549 | case PTRACE_SETREGS: | ||
1550 | ret = ptrace_setregs(child, | ||
1551 | (struct pt_all_user_regs __user *) data); | ||
1552 | goto out_tsk; | ||
1553 | |||
1554 | default: | ||
1555 | ret = ptrace_request(child, request, addr, data); | ||
1556 | goto out_tsk; | ||
1557 | } | ||
1558 | out_tsk: | ||
1559 | put_task_struct(child); | ||
1560 | out: | ||
1561 | unlock_kernel(); | ||
1562 | return ret; | ||
1563 | } | ||
1564 | |||
1565 | |||
1566 | void | ||
1567 | syscall_trace (void) | ||
1568 | { | ||
1569 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) | ||
1570 | return; | ||
1571 | if (!(current->ptrace & PT_PTRACED)) | ||
1572 | return; | ||
1573 | /* | ||
1574 | * The 0x80 provides a way for the tracing parent to | ||
1575 | * distinguish between a syscall stop and SIGTRAP delivery. | ||
1576 | */ | ||
1577 | ptrace_notify(SIGTRAP | ||
1578 | | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); | ||
1579 | |||
1580 | /* | ||
1581 | * This isn't the same as continuing with a signal, but it | ||
1582 | * will do for normal use. strace only continues with a | ||
1583 | * signal if the stopping signal is not SIGTRAP. -brl | ||
1584 | */ | ||
1585 | if (current->exit_code) { | ||
1586 | send_sig(current->exit_code, current, 1); | ||
1587 | current->exit_code = 0; | ||
1588 | } | ||
1589 | } | ||
1590 | |||
1591 | /* "asmlinkage" so the input arguments are preserved... */ | ||
1592 | |||
1593 | asmlinkage void | ||
1594 | syscall_trace_enter (long arg0, long arg1, long arg2, long arg3, | ||
1595 | long arg4, long arg5, long arg6, long arg7, | ||
1596 | struct pt_regs regs) | ||
1597 | { | ||
1598 | long syscall; | ||
1599 | |||
1600 | if (unlikely(current->audit_context)) { | ||
1601 | if (IS_IA32_PROCESS(®s)) | ||
1602 | syscall = regs.r1; | ||
1603 | else | ||
1604 | syscall = regs.r15; | ||
1605 | |||
1606 | audit_syscall_entry(current, syscall, arg0, arg1, arg2, arg3); | ||
1607 | } | ||
1608 | |||
1609 | if (test_thread_flag(TIF_SYSCALL_TRACE) | ||
1610 | && (current->ptrace & PT_PTRACED)) | ||
1611 | syscall_trace(); | ||
1612 | } | ||
1613 | |||
1614 | /* "asmlinkage" so the input arguments are preserved... */ | ||
1615 | |||
1616 | asmlinkage void | ||
1617 | syscall_trace_leave (long arg0, long arg1, long arg2, long arg3, | ||
1618 | long arg4, long arg5, long arg6, long arg7, | ||
1619 | struct pt_regs regs) | ||
1620 | { | ||
1621 | if (unlikely(current->audit_context)) | ||
1622 | audit_syscall_exit(current, regs.r8); | ||
1623 | |||
1624 | if (test_thread_flag(TIF_SYSCALL_TRACE) | ||
1625 | && (current->ptrace & PT_PTRACED)) | ||
1626 | syscall_trace(); | ||
1627 | } | ||