diff options
author | Paul Mundt <lethal@linux-sh.org> | 2009-11-09 02:27:40 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-12-08 01:02:27 -0500 |
commit | 09a072947791088b88ae15111cf68fc5aaaf758d (patch) | |
tree | 510728ca3d3906a352cfc673e7f3e38e471165b4 /arch/sh/kernel | |
parent | 6ec22f9b037fc0c2e00ddb7023fad279c365324d (diff) |
sh: hw-breakpoints: Add preliminary support for SH-4A UBC.
This adds preliminary support for the SH-4A UBC to the hw-breakpoints API.
Presently only a single channel is implemented, and the ptrace interface
still needs to be converted. This is the first step to cleaning up the
long-standing UBC mess, making the UBC more generally accessible, and
finally making it SMP safe.
An additional abstraction will be layered on top of this as with the perf
events code to permit the various CPU families to wire up support for
their own specific UBCs, as many variations exist.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r-- | arch/sh/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/sh/kernel/cpu/sh3/ex.S | 2 | ||||
-rw-r--r-- | arch/sh/kernel/hw_breakpoint.c | 416 | ||||
-rw-r--r-- | arch/sh/kernel/process_32.c | 94 | ||||
-rw-r--r-- | arch/sh/kernel/ptrace_32.c | 19 |
5 files changed, 424 insertions, 108 deletions
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile index a2d0a40f3848..649daadd4519 100644 --- a/arch/sh/kernel/Makefile +++ b/arch/sh/kernel/Makefile | |||
@@ -36,6 +36,7 @@ obj-$(CONFIG_DUMP_CODE) += disassemble.o | |||
36 | obj-$(CONFIG_HIBERNATION) += swsusp.o | 36 | obj-$(CONFIG_HIBERNATION) += swsusp.o |
37 | obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o | 37 | obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o |
38 | 38 | ||
39 | obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o | ||
39 | obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o | 40 | obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o |
40 | 41 | ||
41 | EXTRA_CFLAGS += -Werror | 42 | EXTRA_CFLAGS += -Werror |
diff --git a/arch/sh/kernel/cpu/sh3/ex.S b/arch/sh/kernel/cpu/sh3/ex.S index 46610c35c232..99b4d020179a 100644 --- a/arch/sh/kernel/cpu/sh3/ex.S +++ b/arch/sh/kernel/cpu/sh3/ex.S | |||
@@ -49,7 +49,7 @@ ENTRY(exception_handling_table) | |||
49 | .long exception_error ! reserved_instruction (filled by trap_init) /* 180 */ | 49 | .long exception_error ! reserved_instruction (filled by trap_init) /* 180 */ |
50 | .long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/ | 50 | .long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/ |
51 | .long nmi_trap_handler /* 1C0 */ ! Allow trap to debugger | 51 | .long nmi_trap_handler /* 1C0 */ ! Allow trap to debugger |
52 | .long break_point_trap /* 1E0 */ | 52 | .long breakpoint_trap_handler /* 1E0 */ |
53 | 53 | ||
54 | /* | 54 | /* |
55 | * Pad the remainder of the table out, exceptions residing in far | 55 | * Pad the remainder of the table out, exceptions residing in far |
diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c new file mode 100644 index 000000000000..ff3cb3d7df8f --- /dev/null +++ b/arch/sh/kernel/hw_breakpoint.c | |||
@@ -0,0 +1,416 @@ | |||
1 | /* | ||
2 | * arch/sh/kernel/hw_breakpoint.c | ||
3 | * | ||
4 | * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC. | ||
5 | * | ||
6 | * Copyright (C) 2009 Paul Mundt | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/perf_event.h> | ||
14 | #include <linux/hw_breakpoint.h> | ||
15 | #include <linux/percpu.h> | ||
16 | #include <linux/kallsyms.h> | ||
17 | #include <linux/notifier.h> | ||
18 | #include <linux/kprobes.h> | ||
19 | #include <linux/kdebug.h> | ||
20 | #include <linux/io.h> | ||
21 | #include <asm/hw_breakpoint.h> | ||
22 | #include <asm/mmu_context.h> | ||
23 | |||
24 | struct ubc_context { | ||
25 | unsigned long pc; | ||
26 | unsigned long state; | ||
27 | }; | ||
28 | |||
29 | /* Per cpu ubc channel state */ | ||
30 | static DEFINE_PER_CPU(struct ubc_context, ubc_ctx[HBP_NUM]); | ||
31 | |||
32 | /* | ||
33 | * Stores the breakpoints currently in use on each breakpoint address | ||
34 | * register for each cpus | ||
35 | */ | ||
36 | static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]); | ||
37 | |||
38 | static int __init ubc_init(void) | ||
39 | { | ||
40 | __raw_writel(0, UBC_CAMR0); | ||
41 | __raw_writel(0, UBC_CBR0); | ||
42 | __raw_writel(0, UBC_CBCR); | ||
43 | |||
44 | __raw_writel(UBC_CRR_BIE | UBC_CRR_PCB, UBC_CRR0); | ||
45 | |||
46 | /* dummy read for write posting */ | ||
47 | (void)__raw_readl(UBC_CRR0); | ||
48 | |||
49 | return 0; | ||
50 | } | ||
51 | arch_initcall(ubc_init); | ||
52 | |||
53 | /* | ||
54 | * Install a perf counter breakpoint. | ||
55 | * | ||
56 | * We seek a free UBC channel and use it for this breakpoint. | ||
57 | * | ||
58 | * Atomic: we hold the counter->ctx->lock and we only handle variables | ||
59 | * and registers local to this cpu. | ||
60 | */ | ||
61 | int arch_install_hw_breakpoint(struct perf_event *bp) | ||
62 | { | ||
63 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
64 | struct ubc_context *ubc_ctx; | ||
65 | int i; | ||
66 | |||
67 | for (i = 0; i < HBP_NUM; i++) { | ||
68 | struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); | ||
69 | |||
70 | if (!*slot) { | ||
71 | *slot = bp; | ||
72 | break; | ||
73 | } | ||
74 | } | ||
75 | |||
76 | if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) | ||
77 | return -EBUSY; | ||
78 | |||
79 | ubc_ctx = &__get_cpu_var(ubc_ctx[i]); | ||
80 | |||
81 | ubc_ctx->pc = info->address; | ||
82 | ubc_ctx->state = info->len | info->type; | ||
83 | |||
84 | __raw_writel(UBC_CBR_CE | ubc_ctx->state, UBC_CBR0); | ||
85 | __raw_writel(ubc_ctx->pc, UBC_CAR0); | ||
86 | |||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | /* | ||
91 | * Uninstall the breakpoint contained in the given counter. | ||
92 | * | ||
93 | * First we search the debug address register it uses and then we disable | ||
94 | * it. | ||
95 | * | ||
96 | * Atomic: we hold the counter->ctx->lock and we only handle variables | ||
97 | * and registers local to this cpu. | ||
98 | */ | ||
99 | void arch_uninstall_hw_breakpoint(struct perf_event *bp) | ||
100 | { | ||
101 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
102 | struct ubc_context *ubc_ctx; | ||
103 | int i; | ||
104 | |||
105 | for (i = 0; i < HBP_NUM; i++) { | ||
106 | struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); | ||
107 | |||
108 | if (*slot == bp) { | ||
109 | *slot = NULL; | ||
110 | break; | ||
111 | } | ||
112 | } | ||
113 | |||
114 | if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) | ||
115 | return; | ||
116 | |||
117 | ubc_ctx = &__get_cpu_var(ubc_ctx[i]); | ||
118 | ubc_ctx->pc = 0; | ||
119 | ubc_ctx->state &= ~(info->len | info->type); | ||
120 | |||
121 | __raw_writel(ubc_ctx->pc, UBC_CBR0); | ||
122 | __raw_writel(ubc_ctx->state, UBC_CAR0); | ||
123 | } | ||
124 | |||
125 | static int get_hbp_len(u16 hbp_len) | ||
126 | { | ||
127 | unsigned int len_in_bytes = 0; | ||
128 | |||
129 | switch (hbp_len) { | ||
130 | case SH_BREAKPOINT_LEN_1: | ||
131 | len_in_bytes = 1; | ||
132 | break; | ||
133 | case SH_BREAKPOINT_LEN_2: | ||
134 | len_in_bytes = 2; | ||
135 | break; | ||
136 | case SH_BREAKPOINT_LEN_4: | ||
137 | len_in_bytes = 4; | ||
138 | break; | ||
139 | case SH_BREAKPOINT_LEN_8: | ||
140 | len_in_bytes = 8; | ||
141 | break; | ||
142 | } | ||
143 | return len_in_bytes; | ||
144 | } | ||
145 | |||
146 | /* | ||
147 | * Check for virtual address in user space. | ||
148 | */ | ||
149 | int arch_check_va_in_userspace(unsigned long va, u16 hbp_len) | ||
150 | { | ||
151 | unsigned int len; | ||
152 | |||
153 | len = get_hbp_len(hbp_len); | ||
154 | |||
155 | return (va <= TASK_SIZE - len); | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * Check for virtual address in kernel space. | ||
160 | */ | ||
161 | static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len) | ||
162 | { | ||
163 | unsigned int len; | ||
164 | |||
165 | len = get_hbp_len(hbp_len); | ||
166 | |||
167 | return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); | ||
168 | } | ||
169 | |||
170 | /* | ||
171 | * Store a breakpoint's encoded address, length, and type. | ||
172 | */ | ||
173 | static int arch_store_info(struct perf_event *bp) | ||
174 | { | ||
175 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
176 | |||
177 | /* | ||
178 | * User-space requests will always have the address field populated | ||
179 | * For kernel-addresses, either the address or symbol name can be | ||
180 | * specified. | ||
181 | */ | ||
182 | if (info->name) | ||
183 | info->address = (unsigned long)kallsyms_lookup_name(info->name); | ||
184 | if (info->address) { | ||
185 | info->asid = get_asid(); | ||
186 | return 0; | ||
187 | } | ||
188 | |||
189 | return -EINVAL; | ||
190 | } | ||
191 | |||
192 | int arch_bp_generic_fields(int sh_len, int sh_type, | ||
193 | int *gen_len, int *gen_type) | ||
194 | { | ||
195 | /* Len */ | ||
196 | switch (sh_len) { | ||
197 | case SH_BREAKPOINT_LEN_1: | ||
198 | *gen_len = HW_BREAKPOINT_LEN_1; | ||
199 | break; | ||
200 | case SH_BREAKPOINT_LEN_2: | ||
201 | *gen_len = HW_BREAKPOINT_LEN_2; | ||
202 | break; | ||
203 | case SH_BREAKPOINT_LEN_4: | ||
204 | *gen_len = HW_BREAKPOINT_LEN_4; | ||
205 | break; | ||
206 | case SH_BREAKPOINT_LEN_8: | ||
207 | *gen_len = HW_BREAKPOINT_LEN_8; | ||
208 | break; | ||
209 | default: | ||
210 | return -EINVAL; | ||
211 | } | ||
212 | |||
213 | /* Type */ | ||
214 | switch (sh_type) { | ||
215 | case SH_BREAKPOINT_READ: | ||
216 | *gen_type = HW_BREAKPOINT_R; | ||
217 | case SH_BREAKPOINT_WRITE: | ||
218 | *gen_type = HW_BREAKPOINT_W; | ||
219 | break; | ||
220 | case SH_BREAKPOINT_RW: | ||
221 | *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R; | ||
222 | break; | ||
223 | default: | ||
224 | return -EINVAL; | ||
225 | } | ||
226 | |||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | static int arch_build_bp_info(struct perf_event *bp) | ||
231 | { | ||
232 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
233 | |||
234 | info->address = bp->attr.bp_addr; | ||
235 | |||
236 | /* Len */ | ||
237 | switch (bp->attr.bp_len) { | ||
238 | case HW_BREAKPOINT_LEN_1: | ||
239 | info->len = SH_BREAKPOINT_LEN_1; | ||
240 | break; | ||
241 | case HW_BREAKPOINT_LEN_2: | ||
242 | info->len = SH_BREAKPOINT_LEN_2; | ||
243 | break; | ||
244 | case HW_BREAKPOINT_LEN_4: | ||
245 | info->len = SH_BREAKPOINT_LEN_4; | ||
246 | break; | ||
247 | case HW_BREAKPOINT_LEN_8: | ||
248 | info->len = SH_BREAKPOINT_LEN_8; | ||
249 | break; | ||
250 | default: | ||
251 | return -EINVAL; | ||
252 | } | ||
253 | |||
254 | /* Type */ | ||
255 | switch (bp->attr.bp_type) { | ||
256 | case HW_BREAKPOINT_R: | ||
257 | info->type = SH_BREAKPOINT_READ; | ||
258 | break; | ||
259 | case HW_BREAKPOINT_W: | ||
260 | info->type = SH_BREAKPOINT_WRITE; | ||
261 | break; | ||
262 | case HW_BREAKPOINT_W | HW_BREAKPOINT_R: | ||
263 | info->type = SH_BREAKPOINT_RW; | ||
264 | break; | ||
265 | default: | ||
266 | return -EINVAL; | ||
267 | } | ||
268 | |||
269 | return 0; | ||
270 | } | ||
271 | |||
272 | /* | ||
273 | * Validate the arch-specific HW Breakpoint register settings | ||
274 | */ | ||
275 | int arch_validate_hwbkpt_settings(struct perf_event *bp, | ||
276 | struct task_struct *tsk) | ||
277 | { | ||
278 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
279 | unsigned int align; | ||
280 | int ret; | ||
281 | |||
282 | ret = arch_build_bp_info(bp); | ||
283 | if (ret) | ||
284 | return ret; | ||
285 | |||
286 | ret = -EINVAL; | ||
287 | |||
288 | switch (info->len) { | ||
289 | case SH_BREAKPOINT_LEN_1: | ||
290 | align = 0; | ||
291 | break; | ||
292 | case SH_BREAKPOINT_LEN_2: | ||
293 | align = 1; | ||
294 | break; | ||
295 | case SH_BREAKPOINT_LEN_4: | ||
296 | align = 3; | ||
297 | break; | ||
298 | case SH_BREAKPOINT_LEN_8: | ||
299 | align = 7; | ||
300 | break; | ||
301 | default: | ||
302 | return ret; | ||
303 | } | ||
304 | |||
305 | if (bp->callback) | ||
306 | ret = arch_store_info(bp); | ||
307 | |||
308 | if (ret < 0) | ||
309 | return ret; | ||
310 | |||
311 | /* | ||
312 | * Check that the low-order bits of the address are appropriate | ||
313 | * for the alignment implied by len. | ||
314 | */ | ||
315 | if (info->address & align) | ||
316 | return -EINVAL; | ||
317 | |||
318 | /* Check that the virtual address is in the proper range */ | ||
319 | if (tsk) { | ||
320 | if (!arch_check_va_in_userspace(info->address, info->len)) | ||
321 | return -EFAULT; | ||
322 | } else { | ||
323 | if (!arch_check_va_in_kernelspace(info->address, info->len)) | ||
324 | return -EFAULT; | ||
325 | } | ||
326 | |||
327 | return 0; | ||
328 | } | ||
329 | |||
330 | /* | ||
331 | * Release the user breakpoints used by ptrace | ||
332 | */ | ||
333 | void flush_ptrace_hw_breakpoint(struct task_struct *tsk) | ||
334 | { | ||
335 | int i; | ||
336 | struct thread_struct *t = &tsk->thread; | ||
337 | |||
338 | for (i = 0; i < HBP_NUM; i++) { | ||
339 | unregister_hw_breakpoint(t->ptrace_bps[i]); | ||
340 | t->ptrace_bps[i] = NULL; | ||
341 | } | ||
342 | } | ||
343 | |||
344 | static int __kprobes hw_breakpoint_handler(struct die_args *args) | ||
345 | { | ||
346 | int cpu, i, rc = NOTIFY_STOP; | ||
347 | struct perf_event *bp; | ||
348 | unsigned long val; | ||
349 | |||
350 | val = __raw_readl(UBC_CBR0); | ||
351 | __raw_writel(val & ~UBC_CBR_CE, UBC_CBR0); | ||
352 | |||
353 | cpu = get_cpu(); | ||
354 | for (i = 0; i < HBP_NUM; i++) { | ||
355 | /* | ||
356 | * The counter may be concurrently released but that can only | ||
357 | * occur from a call_rcu() path. We can then safely fetch | ||
358 | * the breakpoint, use its callback, touch its counter | ||
359 | * while we are in an rcu_read_lock() path. | ||
360 | */ | ||
361 | rcu_read_lock(); | ||
362 | |||
363 | bp = per_cpu(bp_per_reg[i], cpu); | ||
364 | if (bp) { | ||
365 | rc = NOTIFY_DONE; | ||
366 | } else { | ||
367 | rcu_read_unlock(); | ||
368 | break; | ||
369 | } | ||
370 | |||
371 | (bp->callback)(bp, args->regs); | ||
372 | |||
373 | rcu_read_unlock(); | ||
374 | } | ||
375 | |||
376 | if (bp) { | ||
377 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
378 | |||
379 | __raw_writel(UBC_CBR_CE | info->len | info->type, UBC_CBR0); | ||
380 | __raw_writel(info->address, UBC_CAR0); | ||
381 | } | ||
382 | |||
383 | put_cpu(); | ||
384 | |||
385 | return rc; | ||
386 | } | ||
387 | |||
388 | BUILD_TRAP_HANDLER(breakpoint) | ||
389 | { | ||
390 | unsigned long ex = lookup_exception_vector(); | ||
391 | TRAP_HANDLER_DECL; | ||
392 | |||
393 | notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP); | ||
394 | } | ||
395 | |||
396 | /* | ||
397 | * Handle debug exception notifications. | ||
398 | */ | ||
399 | int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused, | ||
400 | unsigned long val, void *data) | ||
401 | { | ||
402 | if (val != DIE_BREAKPOINT) | ||
403 | return NOTIFY_DONE; | ||
404 | |||
405 | return hw_breakpoint_handler(data); | ||
406 | } | ||
407 | |||
408 | void hw_breakpoint_pmu_read(struct perf_event *bp) | ||
409 | { | ||
410 | /* TODO */ | ||
411 | } | ||
412 | |||
413 | void hw_breakpoint_pmu_unthrottle(struct perf_event *bp) | ||
414 | { | ||
415 | /* TODO */ | ||
416 | } | ||
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 0673c4746be3..4a2c866f9773 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/fs.h> | 25 | #include <linux/fs.h> |
26 | #include <linux/ftrace.h> | 26 | #include <linux/ftrace.h> |
27 | #include <linux/preempt.h> | 27 | #include <linux/preempt.h> |
28 | #include <linux/hw_breakpoint.h> | ||
28 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
29 | #include <asm/mmu_context.h> | 30 | #include <asm/mmu_context.h> |
30 | #include <asm/pgalloc.h> | 31 | #include <asm/pgalloc.h> |
@@ -34,8 +35,6 @@ | |||
34 | #include <asm/syscalls.h> | 35 | #include <asm/syscalls.h> |
35 | #include <asm/watchdog.h> | 36 | #include <asm/watchdog.h> |
36 | 37 | ||
37 | int ubc_usercnt = 0; | ||
38 | |||
39 | #ifdef CONFIG_32BIT | 38 | #ifdef CONFIG_32BIT |
40 | static void watchdog_trigger_immediate(void) | 39 | static void watchdog_trigger_immediate(void) |
41 | { | 40 | { |
@@ -148,16 +147,15 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |||
148 | */ | 147 | */ |
149 | void exit_thread(void) | 148 | void exit_thread(void) |
150 | { | 149 | { |
151 | if (current->thread.ubc_pc) { | ||
152 | current->thread.ubc_pc = 0; | ||
153 | ubc_usercnt -= 1; | ||
154 | } | ||
155 | } | 150 | } |
156 | 151 | ||
157 | void flush_thread(void) | 152 | void flush_thread(void) |
158 | { | 153 | { |
159 | #if defined(CONFIG_SH_FPU) | ||
160 | struct task_struct *tsk = current; | 154 | struct task_struct *tsk = current; |
155 | |||
156 | flush_ptrace_hw_breakpoint(tsk); | ||
157 | |||
158 | #if defined(CONFIG_SH_FPU) | ||
161 | /* Forget lazy FPU state */ | 159 | /* Forget lazy FPU state */ |
162 | clear_fpu(tsk, task_pt_regs(tsk)); | 160 | clear_fpu(tsk, task_pt_regs(tsk)); |
163 | clear_used_math(); | 161 | clear_used_math(); |
@@ -195,9 +193,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
195 | { | 193 | { |
196 | struct thread_info *ti = task_thread_info(p); | 194 | struct thread_info *ti = task_thread_info(p); |
197 | struct pt_regs *childregs; | 195 | struct pt_regs *childregs; |
198 | #if defined(CONFIG_SH_FPU) || defined(CONFIG_SH_DSP) | ||
199 | struct task_struct *tsk = current; | 196 | struct task_struct *tsk = current; |
200 | #endif | ||
201 | 197 | ||
202 | #if defined(CONFIG_SH_FPU) | 198 | #if defined(CONFIG_SH_FPU) |
203 | unlazy_fpu(tsk, regs); | 199 | unlazy_fpu(tsk, regs); |
@@ -234,53 +230,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
234 | p->thread.sp = (unsigned long) childregs; | 230 | p->thread.sp = (unsigned long) childregs; |
235 | p->thread.pc = (unsigned long) ret_from_fork; | 231 | p->thread.pc = (unsigned long) ret_from_fork; |
236 | 232 | ||
237 | p->thread.ubc_pc = 0; | 233 | memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); |
238 | 234 | ||
239 | return 0; | 235 | return 0; |
240 | } | 236 | } |
241 | 237 | ||
242 | /* Tracing by user break controller. */ | ||
243 | static void ubc_set_tracing(int asid, unsigned long pc) | ||
244 | { | ||
245 | #if defined(CONFIG_CPU_SH4A) | ||
246 | unsigned long val; | ||
247 | |||
248 | val = (UBC_CBR_ID_INST | UBC_CBR_RW_READ | UBC_CBR_CE); | ||
249 | val |= (UBC_CBR_AIE | UBC_CBR_AIV_SET(asid)); | ||
250 | |||
251 | ctrl_outl(val, UBC_CBR0); | ||
252 | ctrl_outl(pc, UBC_CAR0); | ||
253 | ctrl_outl(0x0, UBC_CAMR0); | ||
254 | ctrl_outl(0x0, UBC_CBCR); | ||
255 | |||
256 | val = (UBC_CRR_RES | UBC_CRR_PCB | UBC_CRR_BIE); | ||
257 | ctrl_outl(val, UBC_CRR0); | ||
258 | |||
259 | /* Read UBC register that we wrote last, for checking update */ | ||
260 | val = ctrl_inl(UBC_CRR0); | ||
261 | |||
262 | #else /* CONFIG_CPU_SH4A */ | ||
263 | ctrl_outl(pc, UBC_BARA); | ||
264 | |||
265 | #ifdef CONFIG_MMU | ||
266 | ctrl_outb(asid, UBC_BASRA); | ||
267 | #endif | ||
268 | |||
269 | ctrl_outl(0, UBC_BAMRA); | ||
270 | |||
271 | if (current_cpu_data.type == CPU_SH7729 || | ||
272 | current_cpu_data.type == CPU_SH7710 || | ||
273 | current_cpu_data.type == CPU_SH7712 || | ||
274 | current_cpu_data.type == CPU_SH7203){ | ||
275 | ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA); | ||
276 | ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR); | ||
277 | } else { | ||
278 | ctrl_outw(BBR_INST | BBR_READ, UBC_BBRA); | ||
279 | ctrl_outw(BRCR_PCBA, UBC_BRCR); | ||
280 | } | ||
281 | #endif /* CONFIG_CPU_SH4A */ | ||
282 | } | ||
283 | |||
284 | /* | 238 | /* |
285 | * switch_to(x,y) should switch tasks from x to y. | 239 | * switch_to(x,y) should switch tasks from x to y. |
286 | * | 240 | * |
@@ -302,25 +256,6 @@ __switch_to(struct task_struct *prev, struct task_struct *next) | |||
302 | : "r" (task_thread_info(next))); | 256 | : "r" (task_thread_info(next))); |
303 | #endif | 257 | #endif |
304 | 258 | ||
305 | /* If no tasks are using the UBC, we're done */ | ||
306 | if (ubc_usercnt == 0) | ||
307 | /* If no tasks are using the UBC, we're done */; | ||
308 | else if (next->thread.ubc_pc && next->mm) { | ||
309 | int asid = 0; | ||
310 | #ifdef CONFIG_MMU | ||
311 | asid |= cpu_asid(smp_processor_id(), next->mm); | ||
312 | #endif | ||
313 | ubc_set_tracing(asid, next->thread.ubc_pc); | ||
314 | } else { | ||
315 | #if defined(CONFIG_CPU_SH4A) | ||
316 | ctrl_outl(UBC_CBR_INIT, UBC_CBR0); | ||
317 | ctrl_outl(UBC_CRR_INIT, UBC_CRR0); | ||
318 | #else | ||
319 | ctrl_outw(0, UBC_BBRA); | ||
320 | ctrl_outw(0, UBC_BBRB); | ||
321 | #endif | ||
322 | } | ||
323 | |||
324 | return prev; | 259 | return prev; |
325 | } | 260 | } |
326 | 261 | ||
@@ -412,20 +347,3 @@ unsigned long get_wchan(struct task_struct *p) | |||
412 | 347 | ||
413 | return pc; | 348 | return pc; |
414 | } | 349 | } |
415 | |||
416 | asmlinkage void break_point_trap(void) | ||
417 | { | ||
418 | /* Clear tracing. */ | ||
419 | #if defined(CONFIG_CPU_SH4A) | ||
420 | ctrl_outl(UBC_CBR_INIT, UBC_CBR0); | ||
421 | ctrl_outl(UBC_CRR_INIT, UBC_CRR0); | ||
422 | #else | ||
423 | ctrl_outw(0, UBC_BBRA); | ||
424 | ctrl_outw(0, UBC_BBRB); | ||
425 | ctrl_outl(0, UBC_BRCR); | ||
426 | #endif | ||
427 | current->thread.ubc_pc = 0; | ||
428 | ubc_usercnt -= 1; | ||
429 | |||
430 | force_sig(SIGTRAP, current); | ||
431 | } | ||
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index 9be35f348093..bdb10446cbac 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c | |||
@@ -65,31 +65,12 @@ static inline int put_stack_long(struct task_struct *task, int offset, | |||
65 | 65 | ||
66 | void user_enable_single_step(struct task_struct *child) | 66 | void user_enable_single_step(struct task_struct *child) |
67 | { | 67 | { |
68 | /* Next scheduling will set up UBC */ | ||
69 | if (child->thread.ubc_pc == 0) | ||
70 | ubc_usercnt += 1; | ||
71 | |||
72 | child->thread.ubc_pc = get_stack_long(child, | ||
73 | offsetof(struct pt_regs, pc)); | ||
74 | |||
75 | set_tsk_thread_flag(child, TIF_SINGLESTEP); | 68 | set_tsk_thread_flag(child, TIF_SINGLESTEP); |
76 | } | 69 | } |
77 | 70 | ||
78 | void user_disable_single_step(struct task_struct *child) | 71 | void user_disable_single_step(struct task_struct *child) |
79 | { | 72 | { |
80 | clear_tsk_thread_flag(child, TIF_SINGLESTEP); | 73 | clear_tsk_thread_flag(child, TIF_SINGLESTEP); |
81 | |||
82 | /* | ||
83 | * Ensure the UBC is not programmed at the next context switch. | ||
84 | * | ||
85 | * Normally this is not needed but there are sequences such as | ||
86 | * singlestep, signal delivery, and continue that leave the | ||
87 | * ubc_pc non-zero leading to spurious SIGTRAPs. | ||
88 | */ | ||
89 | if (child->thread.ubc_pc != 0) { | ||
90 | ubc_usercnt -= 1; | ||
91 | child->thread.ubc_pc = 0; | ||
92 | } | ||
93 | } | 74 | } |
94 | 75 | ||
95 | /* | 76 | /* |