diff options
Diffstat (limited to 'arch/m32r/kernel/ptrace.c')
-rw-r--r-- | arch/m32r/kernel/ptrace.c | 708 |
1 files changed, 0 insertions, 708 deletions
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c deleted file mode 100644 index d702a5ca0f92..000000000000 --- a/arch/m32r/kernel/ptrace.c +++ /dev/null | |||
@@ -1,708 +0,0 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * linux/arch/m32r/kernel/ptrace.c | ||
4 | * | ||
5 | * Copyright (C) 2002 Hirokazu Takata, Takeo Takahashi | ||
6 | * Copyright (C) 2004 Hirokazu Takata, Kei Sakamoto | ||
7 | * | ||
8 | * Original x86 implementation: | ||
9 | * By Ross Biro 1/23/92 | ||
10 | * edited by Linus Torvalds | ||
11 | * | ||
12 | * Some code taken from sh version: | ||
13 | * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka | ||
14 | * Some code taken from arm version: | ||
15 | * Copyright (C) 2000 Russell King | ||
16 | */ | ||
17 | |||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/sched/task_stack.h> | ||
21 | #include <linux/mm.h> | ||
22 | #include <linux/err.h> | ||
23 | #include <linux/smp.h> | ||
24 | #include <linux/errno.h> | ||
25 | #include <linux/ptrace.h> | ||
26 | #include <linux/user.h> | ||
27 | #include <linux/string.h> | ||
28 | #include <linux/signal.h> | ||
29 | |||
30 | #include <asm/cacheflush.h> | ||
31 | #include <asm/io.h> | ||
32 | #include <linux/uaccess.h> | ||
33 | #include <asm/pgtable.h> | ||
34 | #include <asm/processor.h> | ||
35 | #include <asm/mmu_context.h> | ||
36 | |||
37 | /* | ||
38 | * This routine will get a word off of the process kernel stack. | ||
39 | */ | ||
40 | static inline unsigned long int | ||
41 | get_stack_long(struct task_struct *task, int offset) | ||
42 | { | ||
43 | unsigned long *stack; | ||
44 | |||
45 | stack = (unsigned long *)task_pt_regs(task); | ||
46 | |||
47 | return stack[offset]; | ||
48 | } | ||
49 | |||
50 | /* | ||
51 | * This routine will put a word on the process kernel stack. | ||
52 | */ | ||
53 | static inline int | ||
54 | put_stack_long(struct task_struct *task, int offset, unsigned long data) | ||
55 | { | ||
56 | unsigned long *stack; | ||
57 | |||
58 | stack = (unsigned long *)task_pt_regs(task); | ||
59 | stack[offset] = data; | ||
60 | |||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | static int reg_offset[] = { | ||
65 | PT_R0, PT_R1, PT_R2, PT_R3, PT_R4, PT_R5, PT_R6, PT_R7, | ||
66 | PT_R8, PT_R9, PT_R10, PT_R11, PT_R12, PT_FP, PT_LR, PT_SPU, | ||
67 | }; | ||
68 | |||
69 | /* | ||
70 | * Read the word at offset "off" into the "struct user". We | ||
71 | * actually access the pt_regs stored on the kernel stack. | ||
72 | */ | ||
73 | static int ptrace_read_user(struct task_struct *tsk, unsigned long off, | ||
74 | unsigned long __user *data) | ||
75 | { | ||
76 | unsigned long tmp; | ||
77 | #ifndef NO_FPU | ||
78 | struct user * dummy = NULL; | ||
79 | #endif | ||
80 | |||
81 | if ((off & 3) || off > sizeof(struct user) - 3) | ||
82 | return -EIO; | ||
83 | |||
84 | off >>= 2; | ||
85 | switch (off) { | ||
86 | case PT_EVB: | ||
87 | __asm__ __volatile__ ( | ||
88 | "mvfc %0, cr5 \n\t" | ||
89 | : "=r" (tmp) | ||
90 | ); | ||
91 | break; | ||
92 | case PT_CBR: { | ||
93 | unsigned long psw; | ||
94 | psw = get_stack_long(tsk, PT_PSW); | ||
95 | tmp = ((psw >> 8) & 1); | ||
96 | } | ||
97 | break; | ||
98 | case PT_PSW: { | ||
99 | unsigned long psw, bbpsw; | ||
100 | psw = get_stack_long(tsk, PT_PSW); | ||
101 | bbpsw = get_stack_long(tsk, PT_BBPSW); | ||
102 | tmp = ((psw >> 8) & 0xff) | ((bbpsw & 0xff) << 8); | ||
103 | } | ||
104 | break; | ||
105 | case PT_PC: | ||
106 | tmp = get_stack_long(tsk, PT_BPC); | ||
107 | break; | ||
108 | case PT_BPC: | ||
109 | off = PT_BBPC; | ||
110 | /* fall through */ | ||
111 | default: | ||
112 | if (off < (sizeof(struct pt_regs) >> 2)) | ||
113 | tmp = get_stack_long(tsk, off); | ||
114 | #ifndef NO_FPU | ||
115 | else if (off >= (long)(&dummy->fpu >> 2) && | ||
116 | off < (long)(&dummy->u_fpvalid >> 2)) { | ||
117 | if (!tsk_used_math(tsk)) { | ||
118 | if (off == (long)(&dummy->fpu.fpscr >> 2)) | ||
119 | tmp = FPSCR_INIT; | ||
120 | else | ||
121 | tmp = 0; | ||
122 | } else | ||
123 | tmp = ((long *)(&tsk->thread.fpu >> 2)) | ||
124 | [off - (long)&dummy->fpu]; | ||
125 | } else if (off == (long)(&dummy->u_fpvalid >> 2)) | ||
126 | tmp = !!tsk_used_math(tsk); | ||
127 | #endif /* not NO_FPU */ | ||
128 | else | ||
129 | tmp = 0; | ||
130 | } | ||
131 | |||
132 | return put_user(tmp, data); | ||
133 | } | ||
134 | |||
135 | static int ptrace_write_user(struct task_struct *tsk, unsigned long off, | ||
136 | unsigned long data) | ||
137 | { | ||
138 | int ret = -EIO; | ||
139 | #ifndef NO_FPU | ||
140 | struct user * dummy = NULL; | ||
141 | #endif | ||
142 | |||
143 | if ((off & 3) || off > sizeof(struct user) - 3) | ||
144 | return -EIO; | ||
145 | |||
146 | off >>= 2; | ||
147 | switch (off) { | ||
148 | case PT_EVB: | ||
149 | case PT_BPC: | ||
150 | case PT_SPI: | ||
151 | /* We don't allow to modify evb. */ | ||
152 | ret = 0; | ||
153 | break; | ||
154 | case PT_PSW: | ||
155 | case PT_CBR: { | ||
156 | /* We allow to modify only cbr in psw */ | ||
157 | unsigned long psw; | ||
158 | psw = get_stack_long(tsk, PT_PSW); | ||
159 | psw = (psw & ~0x100) | ((data & 1) << 8); | ||
160 | ret = put_stack_long(tsk, PT_PSW, psw); | ||
161 | } | ||
162 | break; | ||
163 | case PT_PC: | ||
164 | off = PT_BPC; | ||
165 | data &= ~1; | ||
166 | /* fall through */ | ||
167 | default: | ||
168 | if (off < (sizeof(struct pt_regs) >> 2)) | ||
169 | ret = put_stack_long(tsk, off, data); | ||
170 | #ifndef NO_FPU | ||
171 | else if (off >= (long)(&dummy->fpu >> 2) && | ||
172 | off < (long)(&dummy->u_fpvalid >> 2)) { | ||
173 | set_stopped_child_used_math(tsk); | ||
174 | ((long *)&tsk->thread.fpu) | ||
175 | [off - (long)&dummy->fpu] = data; | ||
176 | ret = 0; | ||
177 | } else if (off == (long)(&dummy->u_fpvalid >> 2)) { | ||
178 | conditional_stopped_child_used_math(data, tsk); | ||
179 | ret = 0; | ||
180 | } | ||
181 | #endif /* not NO_FPU */ | ||
182 | break; | ||
183 | } | ||
184 | |||
185 | return ret; | ||
186 | } | ||
187 | |||
188 | /* | ||
189 | * Get all user integer registers. | ||
190 | */ | ||
191 | static int ptrace_getregs(struct task_struct *tsk, void __user *uregs) | ||
192 | { | ||
193 | struct pt_regs *regs = task_pt_regs(tsk); | ||
194 | |||
195 | return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0; | ||
196 | } | ||
197 | |||
198 | /* | ||
199 | * Set all user integer registers. | ||
200 | */ | ||
201 | static int ptrace_setregs(struct task_struct *tsk, void __user *uregs) | ||
202 | { | ||
203 | struct pt_regs newregs; | ||
204 | int ret; | ||
205 | |||
206 | ret = -EFAULT; | ||
207 | if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) { | ||
208 | struct pt_regs *regs = task_pt_regs(tsk); | ||
209 | *regs = newregs; | ||
210 | ret = 0; | ||
211 | } | ||
212 | |||
213 | return ret; | ||
214 | } | ||
215 | |||
216 | |||
217 | static inline int | ||
218 | check_condition_bit(struct task_struct *child) | ||
219 | { | ||
220 | return (int)((get_stack_long(child, PT_PSW) >> 8) & 1); | ||
221 | } | ||
222 | |||
223 | static int | ||
224 | check_condition_src(unsigned long op, unsigned long regno1, | ||
225 | unsigned long regno2, struct task_struct *child) | ||
226 | { | ||
227 | unsigned long reg1, reg2; | ||
228 | |||
229 | reg2 = get_stack_long(child, reg_offset[regno2]); | ||
230 | |||
231 | switch (op) { | ||
232 | case 0x0: /* BEQ */ | ||
233 | reg1 = get_stack_long(child, reg_offset[regno1]); | ||
234 | return reg1 == reg2; | ||
235 | case 0x1: /* BNE */ | ||
236 | reg1 = get_stack_long(child, reg_offset[regno1]); | ||
237 | return reg1 != reg2; | ||
238 | case 0x8: /* BEQZ */ | ||
239 | return reg2 == 0; | ||
240 | case 0x9: /* BNEZ */ | ||
241 | return reg2 != 0; | ||
242 | case 0xa: /* BLTZ */ | ||
243 | return (int)reg2 < 0; | ||
244 | case 0xb: /* BGEZ */ | ||
245 | return (int)reg2 >= 0; | ||
246 | case 0xc: /* BLEZ */ | ||
247 | return (int)reg2 <= 0; | ||
248 | case 0xd: /* BGTZ */ | ||
249 | return (int)reg2 > 0; | ||
250 | default: | ||
251 | /* never reached */ | ||
252 | return 0; | ||
253 | } | ||
254 | } | ||
255 | |||
256 | static void | ||
257 | compute_next_pc_for_16bit_insn(unsigned long insn, unsigned long pc, | ||
258 | unsigned long *next_pc, | ||
259 | struct task_struct *child) | ||
260 | { | ||
261 | unsigned long op, op2, op3; | ||
262 | unsigned long disp; | ||
263 | unsigned long regno; | ||
264 | int parallel = 0; | ||
265 | |||
266 | if (insn & 0x00008000) | ||
267 | parallel = 1; | ||
268 | if (pc & 3) | ||
269 | insn &= 0x7fff; /* right slot */ | ||
270 | else | ||
271 | insn >>= 16; /* left slot */ | ||
272 | |||
273 | op = (insn >> 12) & 0xf; | ||
274 | op2 = (insn >> 8) & 0xf; | ||
275 | op3 = (insn >> 4) & 0xf; | ||
276 | |||
277 | if (op == 0x7) { | ||
278 | switch (op2) { | ||
279 | case 0xd: /* BNC */ | ||
280 | case 0x9: /* BNCL */ | ||
281 | if (!check_condition_bit(child)) { | ||
282 | disp = (long)(insn << 24) >> 22; | ||
283 | *next_pc = (pc & ~0x3) + disp; | ||
284 | return; | ||
285 | } | ||
286 | break; | ||
287 | case 0x8: /* BCL */ | ||
288 | case 0xc: /* BC */ | ||
289 | if (check_condition_bit(child)) { | ||
290 | disp = (long)(insn << 24) >> 22; | ||
291 | *next_pc = (pc & ~0x3) + disp; | ||
292 | return; | ||
293 | } | ||
294 | break; | ||
295 | case 0xe: /* BL */ | ||
296 | case 0xf: /* BRA */ | ||
297 | disp = (long)(insn << 24) >> 22; | ||
298 | *next_pc = (pc & ~0x3) + disp; | ||
299 | return; | ||
300 | break; | ||
301 | } | ||
302 | } else if (op == 0x1) { | ||
303 | switch (op2) { | ||
304 | case 0x0: | ||
305 | if (op3 == 0xf) { /* TRAP */ | ||
306 | #if 1 | ||
307 | /* pass through */ | ||
308 | #else | ||
309 | /* kernel space is not allowed as next_pc */ | ||
310 | unsigned long evb; | ||
311 | unsigned long trapno; | ||
312 | trapno = insn & 0xf; | ||
313 | __asm__ __volatile__ ( | ||
314 | "mvfc %0, cr5\n" | ||
315 | :"=r"(evb) | ||
316 | : | ||
317 | ); | ||
318 | *next_pc = evb + (trapno << 2); | ||
319 | return; | ||
320 | #endif | ||
321 | } else if (op3 == 0xd) { /* RTE */ | ||
322 | *next_pc = get_stack_long(child, PT_BPC); | ||
323 | return; | ||
324 | } | ||
325 | break; | ||
326 | case 0xc: /* JC */ | ||
327 | if (op3 == 0xc && check_condition_bit(child)) { | ||
328 | regno = insn & 0xf; | ||
329 | *next_pc = get_stack_long(child, | ||
330 | reg_offset[regno]); | ||
331 | return; | ||
332 | } | ||
333 | break; | ||
334 | case 0xd: /* JNC */ | ||
335 | if (op3 == 0xc && !check_condition_bit(child)) { | ||
336 | regno = insn & 0xf; | ||
337 | *next_pc = get_stack_long(child, | ||
338 | reg_offset[regno]); | ||
339 | return; | ||
340 | } | ||
341 | break; | ||
342 | case 0xe: /* JL */ | ||
343 | case 0xf: /* JMP */ | ||
344 | if (op3 == 0xc) { /* JMP */ | ||
345 | regno = insn & 0xf; | ||
346 | *next_pc = get_stack_long(child, | ||
347 | reg_offset[regno]); | ||
348 | return; | ||
349 | } | ||
350 | break; | ||
351 | } | ||
352 | } | ||
353 | if (parallel) | ||
354 | *next_pc = pc + 4; | ||
355 | else | ||
356 | *next_pc = pc + 2; | ||
357 | } | ||
358 | |||
359 | static void | ||
360 | compute_next_pc_for_32bit_insn(unsigned long insn, unsigned long pc, | ||
361 | unsigned long *next_pc, | ||
362 | struct task_struct *child) | ||
363 | { | ||
364 | unsigned long op; | ||
365 | unsigned long op2; | ||
366 | unsigned long disp; | ||
367 | unsigned long regno1, regno2; | ||
368 | |||
369 | op = (insn >> 28) & 0xf; | ||
370 | if (op == 0xf) { /* branch 24-bit relative */ | ||
371 | op2 = (insn >> 24) & 0xf; | ||
372 | switch (op2) { | ||
373 | case 0xd: /* BNC */ | ||
374 | case 0x9: /* BNCL */ | ||
375 | if (!check_condition_bit(child)) { | ||
376 | disp = (long)(insn << 8) >> 6; | ||
377 | *next_pc = (pc & ~0x3) + disp; | ||
378 | return; | ||
379 | } | ||
380 | break; | ||
381 | case 0x8: /* BCL */ | ||
382 | case 0xc: /* BC */ | ||
383 | if (check_condition_bit(child)) { | ||
384 | disp = (long)(insn << 8) >> 6; | ||
385 | *next_pc = (pc & ~0x3) + disp; | ||
386 | return; | ||
387 | } | ||
388 | break; | ||
389 | case 0xe: /* BL */ | ||
390 | case 0xf: /* BRA */ | ||
391 | disp = (long)(insn << 8) >> 6; | ||
392 | *next_pc = (pc & ~0x3) + disp; | ||
393 | return; | ||
394 | } | ||
395 | } else if (op == 0xb) { /* branch 16-bit relative */ | ||
396 | op2 = (insn >> 20) & 0xf; | ||
397 | switch (op2) { | ||
398 | case 0x0: /* BEQ */ | ||
399 | case 0x1: /* BNE */ | ||
400 | case 0x8: /* BEQZ */ | ||
401 | case 0x9: /* BNEZ */ | ||
402 | case 0xa: /* BLTZ */ | ||
403 | case 0xb: /* BGEZ */ | ||
404 | case 0xc: /* BLEZ */ | ||
405 | case 0xd: /* BGTZ */ | ||
406 | regno1 = ((insn >> 24) & 0xf); | ||
407 | regno2 = ((insn >> 16) & 0xf); | ||
408 | if (check_condition_src(op2, regno1, regno2, child)) { | ||
409 | disp = (long)(insn << 16) >> 14; | ||
410 | *next_pc = (pc & ~0x3) + disp; | ||
411 | return; | ||
412 | } | ||
413 | break; | ||
414 | } | ||
415 | } | ||
416 | *next_pc = pc + 4; | ||
417 | } | ||
418 | |||
419 | static inline void | ||
420 | compute_next_pc(unsigned long insn, unsigned long pc, | ||
421 | unsigned long *next_pc, struct task_struct *child) | ||
422 | { | ||
423 | if (insn & 0x80000000) | ||
424 | compute_next_pc_for_32bit_insn(insn, pc, next_pc, child); | ||
425 | else | ||
426 | compute_next_pc_for_16bit_insn(insn, pc, next_pc, child); | ||
427 | } | ||
428 | |||
429 | static int | ||
430 | register_debug_trap(struct task_struct *child, unsigned long next_pc, | ||
431 | unsigned long next_insn, unsigned long *code) | ||
432 | { | ||
433 | struct debug_trap *p = &child->thread.debug_trap; | ||
434 | unsigned long addr = next_pc & ~3; | ||
435 | |||
436 | if (p->nr_trap == MAX_TRAPS) { | ||
437 | printk("kernel BUG at %s %d: p->nr_trap = %d\n", | ||
438 | __FILE__, __LINE__, p->nr_trap); | ||
439 | return -1; | ||
440 | } | ||
441 | p->addr[p->nr_trap] = addr; | ||
442 | p->insn[p->nr_trap] = next_insn; | ||
443 | p->nr_trap++; | ||
444 | if (next_pc & 3) { | ||
445 | *code = (next_insn & 0xffff0000) | 0x10f1; | ||
446 | /* xxx --> TRAP1 */ | ||
447 | } else { | ||
448 | if ((next_insn & 0x80000000) || (next_insn & 0x8000)) { | ||
449 | *code = 0x10f17000; | ||
450 | /* TRAP1 --> NOP */ | ||
451 | } else { | ||
452 | *code = (next_insn & 0xffff) | 0x10f10000; | ||
453 | /* TRAP1 --> xxx */ | ||
454 | } | ||
455 | } | ||
456 | return 0; | ||
457 | } | ||
458 | |||
459 | static int | ||
460 | unregister_debug_trap(struct task_struct *child, unsigned long addr, | ||
461 | unsigned long *code) | ||
462 | { | ||
463 | struct debug_trap *p = &child->thread.debug_trap; | ||
464 | int i; | ||
465 | |||
466 | /* Search debug trap entry. */ | ||
467 | for (i = 0; i < p->nr_trap; i++) { | ||
468 | if (p->addr[i] == addr) | ||
469 | break; | ||
470 | } | ||
471 | if (i >= p->nr_trap) { | ||
472 | /* The trap may be requested from debugger. | ||
473 | * ptrace should do nothing in this case. | ||
474 | */ | ||
475 | return 0; | ||
476 | } | ||
477 | |||
478 | /* Recover original instruction code. */ | ||
479 | *code = p->insn[i]; | ||
480 | |||
481 | /* Shift debug trap entries. */ | ||
482 | while (i < p->nr_trap - 1) { | ||
483 | p->insn[i] = p->insn[i + 1]; | ||
484 | p->addr[i] = p->addr[i + 1]; | ||
485 | i++; | ||
486 | } | ||
487 | p->nr_trap--; | ||
488 | return 1; | ||
489 | } | ||
490 | |||
491 | static void | ||
492 | unregister_all_debug_traps(struct task_struct *child) | ||
493 | { | ||
494 | struct debug_trap *p = &child->thread.debug_trap; | ||
495 | int i; | ||
496 | |||
497 | for (i = 0; i < p->nr_trap; i++) | ||
498 | access_process_vm(child, p->addr[i], &p->insn[i], sizeof(p->insn[i]), | ||
499 | FOLL_FORCE | FOLL_WRITE); | ||
500 | p->nr_trap = 0; | ||
501 | } | ||
502 | |||
503 | static inline void | ||
504 | invalidate_cache(void) | ||
505 | { | ||
506 | #if defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_OPSP) | ||
507 | |||
508 | _flush_cache_copyback_all(); | ||
509 | |||
510 | #else /* ! CONFIG_CHIP_M32700 */ | ||
511 | |||
512 | /* Invalidate cache */ | ||
513 | __asm__ __volatile__ ( | ||
514 | "ldi r0, #-1 \n\t" | ||
515 | "ldi r1, #0 \n\t" | ||
516 | "stb r1, @r0 ; cache off \n\t" | ||
517 | "; \n\t" | ||
518 | "ldi r0, #-2 \n\t" | ||
519 | "ldi r1, #1 \n\t" | ||
520 | "stb r1, @r0 ; cache invalidate \n\t" | ||
521 | ".fillinsn \n" | ||
522 | "0: \n\t" | ||
523 | "ldb r1, @r0 ; invalidate check \n\t" | ||
524 | "bnez r1, 0b \n\t" | ||
525 | "; \n\t" | ||
526 | "ldi r0, #-1 \n\t" | ||
527 | "ldi r1, #1 \n\t" | ||
528 | "stb r1, @r0 ; cache on \n\t" | ||
529 | : : : "r0", "r1", "memory" | ||
530 | ); | ||
531 | /* FIXME: copying-back d-cache and invalidating i-cache are needed. | ||
532 | */ | ||
533 | #endif /* CONFIG_CHIP_M32700 */ | ||
534 | } | ||
535 | |||
536 | /* Embed a debug trap (TRAP1) code */ | ||
537 | static int | ||
538 | embed_debug_trap(struct task_struct *child, unsigned long next_pc) | ||
539 | { | ||
540 | unsigned long next_insn, code; | ||
541 | unsigned long addr = next_pc & ~3; | ||
542 | |||
543 | if (access_process_vm(child, addr, &next_insn, sizeof(next_insn), | ||
544 | FOLL_FORCE) | ||
545 | != sizeof(next_insn)) { | ||
546 | return -1; /* error */ | ||
547 | } | ||
548 | |||
549 | /* Set a trap code. */ | ||
550 | if (register_debug_trap(child, next_pc, next_insn, &code)) { | ||
551 | return -1; /* error */ | ||
552 | } | ||
553 | if (access_process_vm(child, addr, &code, sizeof(code), | ||
554 | FOLL_FORCE | FOLL_WRITE) | ||
555 | != sizeof(code)) { | ||
556 | return -1; /* error */ | ||
557 | } | ||
558 | return 0; /* success */ | ||
559 | } | ||
560 | |||
561 | void | ||
562 | withdraw_debug_trap(struct pt_regs *regs) | ||
563 | { | ||
564 | unsigned long addr; | ||
565 | unsigned long code; | ||
566 | |||
567 | addr = (regs->bpc - 2) & ~3; | ||
568 | regs->bpc -= 2; | ||
569 | if (unregister_debug_trap(current, addr, &code)) { | ||
570 | access_process_vm(current, addr, &code, sizeof(code), | ||
571 | FOLL_FORCE | FOLL_WRITE); | ||
572 | invalidate_cache(); | ||
573 | } | ||
574 | } | ||
575 | |||
576 | void | ||
577 | init_debug_traps(struct task_struct *child) | ||
578 | { | ||
579 | struct debug_trap *p = &child->thread.debug_trap; | ||
580 | int i; | ||
581 | p->nr_trap = 0; | ||
582 | for (i = 0; i < MAX_TRAPS; i++) { | ||
583 | p->addr[i] = 0; | ||
584 | p->insn[i] = 0; | ||
585 | } | ||
586 | } | ||
587 | |||
588 | void user_enable_single_step(struct task_struct *child) | ||
589 | { | ||
590 | unsigned long next_pc; | ||
591 | unsigned long pc, insn; | ||
592 | |||
593 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
594 | |||
595 | /* Compute next pc. */ | ||
596 | pc = get_stack_long(child, PT_BPC); | ||
597 | |||
598 | if (access_process_vm(child, pc&~3, &insn, sizeof(insn), | ||
599 | FOLL_FORCE) | ||
600 | != sizeof(insn)) | ||
601 | return; | ||
602 | |||
603 | compute_next_pc(insn, pc, &next_pc, child); | ||
604 | if (next_pc & 0x80000000) | ||
605 | return; | ||
606 | |||
607 | if (embed_debug_trap(child, next_pc)) | ||
608 | return; | ||
609 | |||
610 | invalidate_cache(); | ||
611 | } | ||
612 | |||
613 | void user_disable_single_step(struct task_struct *child) | ||
614 | { | ||
615 | unregister_all_debug_traps(child); | ||
616 | invalidate_cache(); | ||
617 | } | ||
618 | |||
619 | /* | ||
620 | * Called by kernel/ptrace.c when detaching.. | ||
621 | * | ||
622 | * Make sure single step bits etc are not set. | ||
623 | */ | ||
624 | void ptrace_disable(struct task_struct *child) | ||
625 | { | ||
626 | /* nothing to do.. */ | ||
627 | } | ||
628 | |||
629 | long | ||
630 | arch_ptrace(struct task_struct *child, long request, | ||
631 | unsigned long addr, unsigned long data) | ||
632 | { | ||
633 | int ret; | ||
634 | unsigned long __user *datap = (unsigned long __user *) data; | ||
635 | |||
636 | switch (request) { | ||
637 | /* | ||
638 | * read word at location "addr" in the child process. | ||
639 | */ | ||
640 | case PTRACE_PEEKTEXT: | ||
641 | case PTRACE_PEEKDATA: | ||
642 | ret = generic_ptrace_peekdata(child, addr, data); | ||
643 | break; | ||
644 | |||
645 | /* | ||
646 | * read the word at location addr in the USER area. | ||
647 | */ | ||
648 | case PTRACE_PEEKUSR: | ||
649 | ret = ptrace_read_user(child, addr, datap); | ||
650 | break; | ||
651 | |||
652 | /* | ||
653 | * write the word at location addr. | ||
654 | */ | ||
655 | case PTRACE_POKETEXT: | ||
656 | case PTRACE_POKEDATA: | ||
657 | ret = generic_ptrace_pokedata(child, addr, data); | ||
658 | if (ret == 0 && request == PTRACE_POKETEXT) | ||
659 | invalidate_cache(); | ||
660 | break; | ||
661 | |||
662 | /* | ||
663 | * write the word at location addr in the USER area. | ||
664 | */ | ||
665 | case PTRACE_POKEUSR: | ||
666 | ret = ptrace_write_user(child, addr, data); | ||
667 | break; | ||
668 | |||
669 | case PTRACE_GETREGS: | ||
670 | ret = ptrace_getregs(child, datap); | ||
671 | break; | ||
672 | |||
673 | case PTRACE_SETREGS: | ||
674 | ret = ptrace_setregs(child, datap); | ||
675 | break; | ||
676 | |||
677 | default: | ||
678 | ret = ptrace_request(child, request, addr, data); | ||
679 | break; | ||
680 | } | ||
681 | |||
682 | return ret; | ||
683 | } | ||
684 | |||
685 | /* notification of system call entry/exit | ||
686 | * - triggered by current->work.syscall_trace | ||
687 | */ | ||
688 | void do_syscall_trace(void) | ||
689 | { | ||
690 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) | ||
691 | return; | ||
692 | if (!(current->ptrace & PT_PTRACED)) | ||
693 | return; | ||
694 | /* the 0x80 provides a way for the tracing parent to distinguish | ||
695 | between a syscall stop and SIGTRAP delivery */ | ||
696 | ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) | ||
697 | ? 0x80 : 0)); | ||
698 | |||
699 | /* | ||
700 | * this isn't the same as continuing with a signal, but it will do | ||
701 | * for normal use. strace only continues with a signal if the | ||
702 | * stopping signal is not SIGTRAP. -brl | ||
703 | */ | ||
704 | if (current->exit_code) { | ||
705 | send_sig(current->exit_code, current, 1); | ||
706 | current->exit_code = 0; | ||
707 | } | ||
708 | } | ||