aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/ptrace.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86/ptrace.h')
-rw-r--r--include/asm-x86/ptrace.h220
1 files changed, 156 insertions, 64 deletions
diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h
index 213c97300cb3..d9e04b46a440 100644
--- a/include/asm-x86/ptrace.h
+++ b/include/asm-x86/ptrace.h
@@ -4,12 +4,15 @@
4#include <linux/compiler.h> /* For __user */ 4#include <linux/compiler.h> /* For __user */
5#include <asm/ptrace-abi.h> 5#include <asm/ptrace-abi.h>
6 6
7
7#ifndef __ASSEMBLY__ 8#ifndef __ASSEMBLY__
8 9
9#ifdef __i386__ 10#ifdef __i386__
10/* this struct defines the way the registers are stored on the 11/* this struct defines the way the registers are stored on the
11 stack during a system call. */ 12 stack during a system call. */
12 13
14#ifndef __KERNEL__
15
13struct pt_regs { 16struct pt_regs {
14 long ebx; 17 long ebx;
15 long ecx; 18 long ecx;
@@ -21,7 +24,7 @@ struct pt_regs {
21 int xds; 24 int xds;
22 int xes; 25 int xes;
23 int xfs; 26 int xfs;
24 /* int xgs; */ 27 /* int gs; */
25 long orig_eax; 28 long orig_eax;
26 long eip; 29 long eip;
27 int xcs; 30 int xcs;
@@ -30,44 +33,37 @@ struct pt_regs {
30 int xss; 33 int xss;
31}; 34};
32 35
33#ifdef __KERNEL__ 36#else /* __KERNEL__ */
37
38struct pt_regs {
39 long bx;
40 long cx;
41 long dx;
42 long si;
43 long di;
44 long bp;
45 long ax;
46 int ds;
47 int es;
48 int fs;
49 /* int gs; */
50 long orig_ax;
51 long ip;
52 int cs;
53 long flags;
54 long sp;
55 int ss;
56};
34 57
35#include <asm/vm86.h> 58#include <asm/vm86.h>
36#include <asm/segment.h> 59#include <asm/segment.h>
37 60
38struct task_struct;
39extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code);
40
41/*
42 * user_mode_vm(regs) determines whether a register set came from user mode.
43 * This is true if V8086 mode was enabled OR if the register set was from
44 * protected mode with RPL-3 CS value. This tricky test checks that with
45 * one comparison. Many places in the kernel can bypass this full check
46 * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
47 */
48static inline int user_mode(struct pt_regs *regs)
49{
50 return (regs->xcs & SEGMENT_RPL_MASK) == USER_RPL;
51}
52static inline int user_mode_vm(struct pt_regs *regs)
53{
54 return ((regs->xcs & SEGMENT_RPL_MASK) | (regs->eflags & VM_MASK)) >= USER_RPL;
55}
56static inline int v8086_mode(struct pt_regs *regs)
57{
58 return (regs->eflags & VM_MASK);
59}
60
61#define instruction_pointer(regs) ((regs)->eip)
62#define frame_pointer(regs) ((regs)->ebp)
63#define stack_pointer(regs) ((regs)->esp)
64#define regs_return_value(regs) ((regs)->eax)
65
66extern unsigned long profile_pc(struct pt_regs *regs);
67#endif /* __KERNEL__ */ 61#endif /* __KERNEL__ */
68 62
69#else /* __i386__ */ 63#else /* __i386__ */
70 64
65#ifndef __KERNEL__
66
71struct pt_regs { 67struct pt_regs {
72 unsigned long r15; 68 unsigned long r15;
73 unsigned long r14; 69 unsigned long r14;
@@ -96,47 +92,143 @@ struct pt_regs {
96/* top of stack page */ 92/* top of stack page */
97}; 93};
98 94
95#else /* __KERNEL__ */
96
97struct pt_regs {
98 unsigned long r15;
99 unsigned long r14;
100 unsigned long r13;
101 unsigned long r12;
102 unsigned long bp;
103 unsigned long bx;
104/* arguments: non interrupts/non tracing syscalls only save upto here*/
105 unsigned long r11;
106 unsigned long r10;
107 unsigned long r9;
108 unsigned long r8;
109 unsigned long ax;
110 unsigned long cx;
111 unsigned long dx;
112 unsigned long si;
113 unsigned long di;
114 unsigned long orig_ax;
115/* end of arguments */
116/* cpu exception frame or undefined */
117 unsigned long ip;
118 unsigned long cs;
119 unsigned long flags;
120 unsigned long sp;
121 unsigned long ss;
122/* top of stack page */
123};
124
125#endif /* __KERNEL__ */
126#endif /* !__i386__ */
127
99#ifdef __KERNEL__ 128#ifdef __KERNEL__
100 129
101#define user_mode(regs) (!!((regs)->cs & 3)) 130/* the DS BTS struct is used for ptrace as well */
102#define user_mode_vm(regs) user_mode(regs) 131#include <asm/ds.h>
103#define instruction_pointer(regs) ((regs)->rip) 132
104#define frame_pointer(regs) ((regs)->rbp) 133struct task_struct;
105#define stack_pointer(regs) ((regs)->rsp) 134
106#define regs_return_value(regs) ((regs)->rax) 135extern void ptrace_bts_take_timestamp(struct task_struct *, enum bts_qualifier);
107 136
108extern unsigned long profile_pc(struct pt_regs *regs); 137extern unsigned long profile_pc(struct pt_regs *regs);
138
139extern unsigned long
140convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
141
142#ifdef CONFIG_X86_32
143extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code);
144#else
109void signal_fault(struct pt_regs *regs, void __user *frame, char *where); 145void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
146#endif
110 147
111struct task_struct; 148#define regs_return_value(regs) ((regs)->ax)
149
150/*
151 * user_mode_vm(regs) determines whether a register set came from user mode.
152 * This is true if V8086 mode was enabled OR if the register set was from
153 * protected mode with RPL-3 CS value. This tricky test checks that with
154 * one comparison. Many places in the kernel can bypass this full check
155 * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
156 */
157static inline int user_mode(struct pt_regs *regs)
158{
159#ifdef CONFIG_X86_32
160 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
161#else
162 return !!(regs->cs & 3);
163#endif
164}
165
166static inline int user_mode_vm(struct pt_regs *regs)
167{
168#ifdef CONFIG_X86_32
169 return ((regs->cs & SEGMENT_RPL_MASK) |
170 (regs->flags & VM_MASK)) >= USER_RPL;
171#else
172 return user_mode(regs);
173#endif
174}
175
176static inline int v8086_mode(struct pt_regs *regs)
177{
178#ifdef CONFIG_X86_32
179 return (regs->flags & VM_MASK);
180#else
181 return 0; /* No V86 mode support in long mode */
182#endif
183}
184
185/*
186 * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
187 * when it traps. So regs will be the current sp.
188 *
189 * This is valid only for kernel mode traps.
190 */
191static inline unsigned long kernel_trap_sp(struct pt_regs *regs)
192{
193#ifdef CONFIG_X86_32
194 return (unsigned long)regs;
195#else
196 return regs->sp;
197#endif
198}
199
200static inline unsigned long instruction_pointer(struct pt_regs *regs)
201{
202 return regs->ip;
203}
204
205static inline unsigned long frame_pointer(struct pt_regs *regs)
206{
207 return regs->bp;
208}
209
210/*
211 * These are defined as per linux/ptrace.h, which see.
212 */
213#define arch_has_single_step() (1)
214extern void user_enable_single_step(struct task_struct *);
215extern void user_disable_single_step(struct task_struct *);
216
217extern void user_enable_block_step(struct task_struct *);
218#ifdef CONFIG_X86_DEBUGCTLMSR
219#define arch_has_block_step() (1)
220#else
221#define arch_has_block_step() (boot_cpu_data.x86 >= 6)
222#endif
223
224struct user_desc;
225extern int do_get_thread_area(struct task_struct *p, int idx,
226 struct user_desc __user *info);
227extern int do_set_thread_area(struct task_struct *p, int idx,
228 struct user_desc __user *info, int can_allocate);
112 229
113extern unsigned long
114convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs);
115
116enum {
117 EF_CF = 0x00000001,
118 EF_PF = 0x00000004,
119 EF_AF = 0x00000010,
120 EF_ZF = 0x00000040,
121 EF_SF = 0x00000080,
122 EF_TF = 0x00000100,
123 EF_IE = 0x00000200,
124 EF_DF = 0x00000400,
125 EF_OF = 0x00000800,
126 EF_IOPL = 0x00003000,
127 EF_IOPL_RING0 = 0x00000000,
128 EF_IOPL_RING1 = 0x00001000,
129 EF_IOPL_RING2 = 0x00002000,
130 EF_NT = 0x00004000, /* nested task */
131 EF_RF = 0x00010000, /* resume */
132 EF_VM = 0x00020000, /* virtual mode */
133 EF_AC = 0x00040000, /* alignment */
134 EF_VIF = 0x00080000, /* virtual interrupt */
135 EF_VIP = 0x00100000, /* virtual interrupt pending */
136 EF_ID = 0x00200000, /* id */
137};
138#endif /* __KERNEL__ */ 230#endif /* __KERNEL__ */
139#endif /* !__i386__ */ 231
140#endif /* !__ASSEMBLY__ */ 232#endif /* !__ASSEMBLY__ */
141 233
142#endif 234#endif