aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/ptrace_32.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel/ptrace_32.c')
-rw-r--r--arch/sh/kernel/ptrace_32.c82
1 files changed, 57 insertions, 25 deletions
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
index 9be35f348093..c625cdab76dd 100644
--- a/arch/sh/kernel/ptrace_32.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -2,7 +2,7 @@
2 * SuperH process tracing 2 * SuperH process tracing
3 * 3 *
4 * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka 4 * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
5 * Copyright (C) 2002 - 2008 Paul Mundt 5 * Copyright (C) 2002 - 2009 Paul Mundt
6 * 6 *
7 * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp> 7 * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp>
8 * 8 *
@@ -26,6 +26,7 @@
26#include <linux/tracehook.h> 26#include <linux/tracehook.h>
27#include <linux/elf.h> 27#include <linux/elf.h>
28#include <linux/regset.h> 28#include <linux/regset.h>
29#include <linux/hw_breakpoint.h>
29#include <asm/uaccess.h> 30#include <asm/uaccess.h>
30#include <asm/pgtable.h> 31#include <asm/pgtable.h>
31#include <asm/system.h> 32#include <asm/system.h>
@@ -63,33 +64,64 @@ static inline int put_stack_long(struct task_struct *task, int offset,
63 return 0; 64 return 0;
64} 65}
65 66
66void user_enable_single_step(struct task_struct *child) 67void ptrace_triggered(struct perf_event *bp, int nmi,
68 struct perf_sample_data *data, struct pt_regs *regs)
67{ 69{
68 /* Next scheduling will set up UBC */ 70 struct perf_event_attr attr;
69 if (child->thread.ubc_pc == 0) 71
70 ubc_usercnt += 1; 72 /*
73 * Disable the breakpoint request here since ptrace has defined a
74 * one-shot behaviour for breakpoint exceptions.
75 */
76 attr = bp->attr;
77 attr.disabled = true;
78 modify_user_hw_breakpoint(bp, &attr);
79}
80
81static int set_single_step(struct task_struct *tsk, unsigned long addr)
82{
83 struct thread_struct *thread = &tsk->thread;
84 struct perf_event *bp;
85 struct perf_event_attr attr;
86
87 bp = thread->ptrace_bps[0];
88 if (!bp) {
89 hw_breakpoint_init(&attr);
90
91 attr.bp_addr = addr;
92 attr.bp_len = HW_BREAKPOINT_LEN_2;
93 attr.bp_type = HW_BREAKPOINT_R;
94
95 bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk);
96 if (IS_ERR(bp))
97 return PTR_ERR(bp);
98
99 thread->ptrace_bps[0] = bp;
100 } else {
101 int err;
102
103 attr = bp->attr;
104 attr.bp_addr = addr;
105 err = modify_user_hw_breakpoint(bp, &attr);
106 if (unlikely(err))
107 return err;
108 }
109
110 return 0;
111}
71 112
72 child->thread.ubc_pc = get_stack_long(child, 113void user_enable_single_step(struct task_struct *child)
73 offsetof(struct pt_regs, pc)); 114{
115 unsigned long pc = get_stack_long(child, offsetof(struct pt_regs, pc));
74 116
75 set_tsk_thread_flag(child, TIF_SINGLESTEP); 117 set_tsk_thread_flag(child, TIF_SINGLESTEP);
118
119 set_single_step(child, pc);
76} 120}
77 121
78void user_disable_single_step(struct task_struct *child) 122void user_disable_single_step(struct task_struct *child)
79{ 123{
80 clear_tsk_thread_flag(child, TIF_SINGLESTEP); 124 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
81
82 /*
83 * Ensure the UBC is not programmed at the next context switch.
84 *
85 * Normally this is not needed but there are sequences such as
86 * singlestep, signal delivery, and continue that leave the
87 * ubc_pc non-zero leading to spurious SIGTRAPs.
88 */
89 if (child->thread.ubc_pc != 0) {
90 ubc_usercnt -= 1;
91 child->thread.ubc_pc = 0;
92 }
93} 125}
94 126
95/* 127/*
@@ -163,10 +195,10 @@ int fpregs_get(struct task_struct *target,
163 195
164 if ((boot_cpu_data.flags & CPU_HAS_FPU)) 196 if ((boot_cpu_data.flags & CPU_HAS_FPU))
165 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 197 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
166 &target->thread.fpu.hard, 0, -1); 198 &target->thread.xstate->hardfpu, 0, -1);
167 199
168 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 200 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
169 &target->thread.fpu.soft, 0, -1); 201 &target->thread.xstate->softfpu, 0, -1);
170} 202}
171 203
172static int fpregs_set(struct task_struct *target, 204static int fpregs_set(struct task_struct *target,
@@ -184,10 +216,10 @@ static int fpregs_set(struct task_struct *target,
184 216
185 if ((boot_cpu_data.flags & CPU_HAS_FPU)) 217 if ((boot_cpu_data.flags & CPU_HAS_FPU))
186 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 218 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
187 &target->thread.fpu.hard, 0, -1); 219 &target->thread.xstate->hardfpu, 0, -1);
188 220
189 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 221 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
190 &target->thread.fpu.soft, 0, -1); 222 &target->thread.xstate->softfpu, 0, -1);
191} 223}
192 224
193static int fpregs_active(struct task_struct *target, 225static int fpregs_active(struct task_struct *target,
@@ -333,7 +365,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
333 else 365 else
334 tmp = 0; 366 tmp = 0;
335 } else 367 } else
336 tmp = ((long *)&child->thread.fpu) 368 tmp = ((long *)child->thread.xstate)
337 [(addr - (long)&dummy->fpu) >> 2]; 369 [(addr - (long)&dummy->fpu) >> 2];
338 } else if (addr == (long) &dummy->u_fpvalid) 370 } else if (addr == (long) &dummy->u_fpvalid)
339 tmp = !!tsk_used_math(child); 371 tmp = !!tsk_used_math(child);
@@ -362,7 +394,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
362 else if (addr >= (long) &dummy->fpu && 394 else if (addr >= (long) &dummy->fpu &&
363 addr < (long) &dummy->u_fpvalid) { 395 addr < (long) &dummy->u_fpvalid) {
364 set_stopped_child_used_math(child); 396 set_stopped_child_used_math(child);
365 ((long *)&child->thread.fpu) 397 ((long *)child->thread.xstate)
366 [(addr - (long)&dummy->fpu) >> 2] = data; 398 [(addr - (long)&dummy->fpu) >> 2] = data;
367 ret = 0; 399 ret = 0;
368 } else if (addr == (long) &dummy->u_fpvalid) { 400 } else if (addr == (long) &dummy->u_fpvalid) {