1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
|
#ifndef _LINUX_PTRACE_H
#define _LINUX_PTRACE_H
/* ptrace.h */
/* structs and defines to help the user use the ptrace system call. */
/* has the defines to get at the registers. */
#define PTRACE_TRACEME 0
#define PTRACE_PEEKTEXT 1
#define PTRACE_PEEKDATA 2
#define PTRACE_PEEKUSR 3
#define PTRACE_POKETEXT 4
#define PTRACE_POKEDATA 5
#define PTRACE_POKEUSR 6
#define PTRACE_CONT 7
#define PTRACE_KILL 8
#define PTRACE_SINGLESTEP 9
#define PTRACE_ATTACH 16
#define PTRACE_DETACH 17
#define PTRACE_SYSCALL 24
/* 0x4200-0x4300 are reserved for architecture-independent additions. */
#define PTRACE_SETOPTIONS 0x4200
#define PTRACE_GETEVENTMSG 0x4201
#define PTRACE_GETSIGINFO 0x4202
#define PTRACE_SETSIGINFO 0x4203
/* options set using PTRACE_SETOPTIONS */
#define PTRACE_O_TRACESYSGOOD 0x00000001
#define PTRACE_O_TRACEFORK 0x00000002
#define PTRACE_O_TRACEVFORK 0x00000004
#define PTRACE_O_TRACECLONE 0x00000008
#define PTRACE_O_TRACEEXEC 0x00000010
#define PTRACE_O_TRACEVFORKDONE 0x00000020
#define PTRACE_O_TRACEEXIT 0x00000040
#define PTRACE_O_MASK 0x0000007f
/* Wait extended result codes for the above trace options. */
#define PTRACE_EVENT_FORK 1
#define PTRACE_EVENT_VFORK 2
#define PTRACE_EVENT_CLONE 3
#define PTRACE_EVENT_EXEC 4
#define PTRACE_EVENT_VFORK_DONE 5
#define PTRACE_EVENT_EXIT 6
#include <asm/ptrace.h>
#ifdef __KERNEL__
/*
* Ptrace flags
*
* The owner ship rules for task->ptrace which holds the ptrace
* flags is simple. When a task is running it owns it's task->ptrace
* flags. When the a task is stopped the ptracer owns task->ptrace.
*/
#define PT_PTRACED 0x00000001
#define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */
#define PT_TRACESYSGOOD 0x00000004
#define PT_PTRACE_CAP 0x00000008 /* ptracer can follow suid-exec */
#define PT_TRACE_FORK 0x00000010
#define PT_TRACE_VFORK 0x00000020
#define PT_TRACE_CLONE 0x00000040
#define PT_TRACE_EXEC 0x00000080
#define PT_TRACE_VFORK_DONE 0x00000100
#define PT_TRACE_EXIT 0x00000200
#define PT_ATTACHED 0x00000400 /* parent != real_parent */
#define PT_TRACE_MASK 0x000003f4
/* single stepping state bits (used on ARM and PA-RISC) */
#define PT_SINGLESTEP_BIT 31
#define PT_SINGLESTEP (1<<PT_SINGLESTEP_BIT)
#define PT_BLOCKSTEP_BIT 30
#define PT_BLOCKSTEP (1<<PT_BLOCKSTEP_BIT)
#include <linux/compiler.h> /* For unlikely. */
#include <linux/sched.h> /* For struct task_struct. */
extern long arch_ptrace(struct task_struct *child, long request, long addr, long data);
extern struct task_struct *ptrace_get_task_struct(pid_t pid);
extern int ptrace_traceme(void);
extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
extern int ptrace_attach(struct task_struct *tsk);
extern int ptrace_detach(struct task_struct *, unsigned int);
extern void ptrace_disable(struct task_struct *);
extern int ptrace_check_attach(struct task_struct *task, int kill);
extern int ptrace_request(struct task_struct *child, long request, long addr, long data);
extern void ptrace_notify(int exit_code);
extern void __ptrace_link(struct task_struct *child,
struct task_struct *new_parent);
extern void __ptrace_unlink(struct task_struct *child);
extern void ptrace_untrace(struct task_struct *child);
extern int ptrace_may_attach(struct task_struct *task);
extern int __ptrace_may_attach(struct task_struct *task);
static inline void ptrace_link(struct task_struct *child,
struct task_struct *new_parent)
{
if (unlikely(child->ptrace))
__ptrace_link(child, new_parent);
}
static inline void ptrace_unlink(struct task_struct *child)
{
if (unlikely(child->ptrace))
__ptrace_unlink(child);
}
int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data);
int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data);
#ifndef force_successful_syscall_return
/*
* System call handlers that, upon successful completion, need to return a
* negative value should call force_successful_syscall_return() right before
* returning. On architectures where the syscall convention provides for a
* separate error flag (e.g., alpha, ia64, ppc{,64}, sparc{,64}, possibly
* others), this macro can be used to ensure that the error flag will not get
* set. On architectures which do not support a separate error flag, the macro
* is a no-op and the spurious error condition needs to be filtered out by some
* other means (e.g., in user-level, by passing an extra argument to the
* syscall handler, or something along those lines).
*/
#define force_successful_syscall_return() do { } while (0)
#endif
/*
* <asm/ptrace.h> should define the following things inside #ifdef __KERNEL__.
*
* These do-nothing inlines are used when the arch does not
* implement single-step. The kerneldoc comments are here
* to document the interface for all arch definitions.
*/
#ifndef arch_has_single_step
/**
* arch_has_single_step - does this CPU support user-mode single-step?
*
* If this is defined, then there must be function declarations or
* inlines for user_enable_single_step() and user_disable_single_step().
* arch_has_single_step() should evaluate to nonzero iff the machine
* supports instruction single-step for user mode.
* It can be a constant or it can test a CPU feature bit.
*/
#define arch_has_single_step() (0)
/**
* user_enable_single_step - single-step in user-mode task
* @task: either current or a task stopped in %TASK_TRACED
*
* This can only be called when arch_has_single_step() has returned nonzero.
* Set @task so that when it returns to user mode, it will trap after the
* next single instruction executes. If arch_has_block_step() is defined,
* this must clear the effects of user_enable_block_step() too.
*/
static inline void user_enable_single_step(struct task_struct *task)
{
BUG(); /* This can never be called. */
}
/**
* user_disable_single_step - cancel user-mode single-step
* @task: either current or a task stopped in %TASK_TRACED
*
* Clear @task of the effects of user_enable_single_step() and
* user_enable_block_step(). This can be called whether or not either
* of those was ever called on @task, and even if arch_has_single_step()
* returned zero.
*/
static inline void user_disable_single_step(struct task_struct *task)
{
}
#endif /* arch_has_single_step */
#ifndef arch_has_block_step
/**
* arch_has_block_step - does this CPU support user-mode block-step?
*
* If this is defined, then there must be a function declaration or inline
* for user_enable_block_step(), and arch_has_single_step() must be defined
* too. arch_has_block_step() should evaluate to nonzero iff the machine
* supports step-until-branch for user mode. It can be a constant or it
* can test a CPU feature bit.
*/
#define arch_has_single_step() (0)
/**
* user_enable_block_step - step until branch in user-mode task
* @task: either current or a task stopped in %TASK_TRACED
*
* This can only be called when arch_has_block_step() has returned nonzero,
* and will never be called when single-instruction stepping is being used.
* Set @task so that when it returns to user mode, it will trap after the
* next branch or trap taken.
*/
static inline void user_enable_block_step(struct task_struct *task)
{
BUG(); /* This can never be called. */
}
#endif /* arch_has_block_step */
#endif
#endif
|