aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/ptrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/ptrace.c')
-rw-r--r--arch/x86/kernel/ptrace.c28
1 files changed, 28 insertions, 0 deletions
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index b00b33a18390..2484e331a64d 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -166,6 +166,34 @@ static inline bool invalid_selector(u16 value)
166 166
167#define FLAG_MASK FLAG_MASK_32 167#define FLAG_MASK FLAG_MASK_32
168 168
169/*
170 * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
171 * when it traps. The previous stack will be directly underneath the saved
172 * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
173 *
174 * Now, if the stack is empty, '&regs->sp' is out of range. In this
175 * case we try to take the previous stack. To always return a non-null
176 * stack pointer we fall back to regs as stack if no previous stack
177 * exists.
178 *
179 * This is valid only for kernel mode traps.
180 */
181unsigned long kernel_stack_pointer(struct pt_regs *regs)
182{
183 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
184 unsigned long sp = (unsigned long)&regs->sp;
185 struct thread_info *tinfo;
186
187 if (context == (sp & ~(THREAD_SIZE - 1)))
188 return sp;
189
190 tinfo = (struct thread_info *)context;
191 if (tinfo->previous_esp)
192 return tinfo->previous_esp;
193
194 return (unsigned long)regs;
195}
196
169static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno) 197static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
170{ 198{
171 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); 199 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);