aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/hw_breakpoint.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel/hw_breakpoint.c')
-rw-r--r--arch/sh/kernel/hw_breakpoint.c463
1 files changed, 463 insertions, 0 deletions
diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c
new file mode 100644
index 000000000000..e2f1753d275c
--- /dev/null
+++ b/arch/sh/kernel/hw_breakpoint.c
@@ -0,0 +1,463 @@
1/*
2 * arch/sh/kernel/hw_breakpoint.c
3 *
4 * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC.
5 *
6 * Copyright (C) 2009 - 2010 Paul Mundt
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/init.h>
13#include <linux/perf_event.h>
14#include <linux/hw_breakpoint.h>
15#include <linux/percpu.h>
16#include <linux/kallsyms.h>
17#include <linux/notifier.h>
18#include <linux/kprobes.h>
19#include <linux/kdebug.h>
20#include <linux/io.h>
21#include <linux/clk.h>
22#include <asm/hw_breakpoint.h>
23#include <asm/mmu_context.h>
24#include <asm/ptrace.h>
25
26/*
27 * Stores the breakpoints currently in use on each breakpoint address
28 * register for each cpus
29 */
30static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
31
32/*
33 * A dummy placeholder for early accesses until the CPUs get a chance to
34 * register their UBCs later in the boot process.
35 */
36static struct sh_ubc ubc_dummy = { .num_events = 0 };
37
38static struct sh_ubc *sh_ubc __read_mostly = &ubc_dummy;
39
40/*
41 * Install a perf counter breakpoint.
42 *
43 * We seek a free UBC channel and use it for this breakpoint.
44 *
45 * Atomic: we hold the counter->ctx->lock and we only handle variables
46 * and registers local to this cpu.
47 */
48int arch_install_hw_breakpoint(struct perf_event *bp)
49{
50 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
51 int i;
52
53 for (i = 0; i < sh_ubc->num_events; i++) {
54 struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
55
56 if (!*slot) {
57 *slot = bp;
58 break;
59 }
60 }
61
62 if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
63 return -EBUSY;
64
65 clk_enable(sh_ubc->clk);
66 sh_ubc->enable(info, i);
67
68 return 0;
69}
70
71/*
72 * Uninstall the breakpoint contained in the given counter.
73 *
74 * First we search the debug address register it uses and then we disable
75 * it.
76 *
77 * Atomic: we hold the counter->ctx->lock and we only handle variables
78 * and registers local to this cpu.
79 */
80void arch_uninstall_hw_breakpoint(struct perf_event *bp)
81{
82 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
83 int i;
84
85 for (i = 0; i < sh_ubc->num_events; i++) {
86 struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
87
88 if (*slot == bp) {
89 *slot = NULL;
90 break;
91 }
92 }
93
94 if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
95 return;
96
97 sh_ubc->disable(info, i);
98 clk_disable(sh_ubc->clk);
99}
100
101static int get_hbp_len(u16 hbp_len)
102{
103 unsigned int len_in_bytes = 0;
104
105 switch (hbp_len) {
106 case SH_BREAKPOINT_LEN_1:
107 len_in_bytes = 1;
108 break;
109 case SH_BREAKPOINT_LEN_2:
110 len_in_bytes = 2;
111 break;
112 case SH_BREAKPOINT_LEN_4:
113 len_in_bytes = 4;
114 break;
115 case SH_BREAKPOINT_LEN_8:
116 len_in_bytes = 8;
117 break;
118 }
119 return len_in_bytes;
120}
121
122/*
123 * Check for virtual address in user space.
124 */
125int arch_check_va_in_userspace(unsigned long va, u16 hbp_len)
126{
127 unsigned int len;
128
129 len = get_hbp_len(hbp_len);
130
131 return (va <= TASK_SIZE - len);
132}
133
134/*
135 * Check for virtual address in kernel space.
136 */
137static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len)
138{
139 unsigned int len;
140
141 len = get_hbp_len(hbp_len);
142
143 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
144}
145
146/*
147 * Store a breakpoint's encoded address, length, and type.
148 */
149static int arch_store_info(struct perf_event *bp)
150{
151 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
152
153 /*
154 * User-space requests will always have the address field populated
155 * For kernel-addresses, either the address or symbol name can be
156 * specified.
157 */
158 if (info->name)
159 info->address = (unsigned long)kallsyms_lookup_name(info->name);
160 if (info->address)
161 return 0;
162
163 return -EINVAL;
164}
165
166int arch_bp_generic_fields(int sh_len, int sh_type,
167 int *gen_len, int *gen_type)
168{
169 /* Len */
170 switch (sh_len) {
171 case SH_BREAKPOINT_LEN_1:
172 *gen_len = HW_BREAKPOINT_LEN_1;
173 break;
174 case SH_BREAKPOINT_LEN_2:
175 *gen_len = HW_BREAKPOINT_LEN_2;
176 break;
177 case SH_BREAKPOINT_LEN_4:
178 *gen_len = HW_BREAKPOINT_LEN_4;
179 break;
180 case SH_BREAKPOINT_LEN_8:
181 *gen_len = HW_BREAKPOINT_LEN_8;
182 break;
183 default:
184 return -EINVAL;
185 }
186
187 /* Type */
188 switch (sh_type) {
189 case SH_BREAKPOINT_READ:
190 *gen_type = HW_BREAKPOINT_R;
191 case SH_BREAKPOINT_WRITE:
192 *gen_type = HW_BREAKPOINT_W;
193 break;
194 case SH_BREAKPOINT_RW:
195 *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
196 break;
197 default:
198 return -EINVAL;
199 }
200
201 return 0;
202}
203
204static int arch_build_bp_info(struct perf_event *bp)
205{
206 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
207
208 info->address = bp->attr.bp_addr;
209
210 /* Len */
211 switch (bp->attr.bp_len) {
212 case HW_BREAKPOINT_LEN_1:
213 info->len = SH_BREAKPOINT_LEN_1;
214 break;
215 case HW_BREAKPOINT_LEN_2:
216 info->len = SH_BREAKPOINT_LEN_2;
217 break;
218 case HW_BREAKPOINT_LEN_4:
219 info->len = SH_BREAKPOINT_LEN_4;
220 break;
221 case HW_BREAKPOINT_LEN_8:
222 info->len = SH_BREAKPOINT_LEN_8;
223 break;
224 default:
225 return -EINVAL;
226 }
227
228 /* Type */
229 switch (bp->attr.bp_type) {
230 case HW_BREAKPOINT_R:
231 info->type = SH_BREAKPOINT_READ;
232 break;
233 case HW_BREAKPOINT_W:
234 info->type = SH_BREAKPOINT_WRITE;
235 break;
236 case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
237 info->type = SH_BREAKPOINT_RW;
238 break;
239 default:
240 return -EINVAL;
241 }
242
243 return 0;
244}
245
246/*
247 * Validate the arch-specific HW Breakpoint register settings
248 */
249int arch_validate_hwbkpt_settings(struct perf_event *bp,
250 struct task_struct *tsk)
251{
252 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
253 unsigned int align;
254 int ret;
255
256 ret = arch_build_bp_info(bp);
257 if (ret)
258 return ret;
259
260 ret = -EINVAL;
261
262 switch (info->len) {
263 case SH_BREAKPOINT_LEN_1:
264 align = 0;
265 break;
266 case SH_BREAKPOINT_LEN_2:
267 align = 1;
268 break;
269 case SH_BREAKPOINT_LEN_4:
270 align = 3;
271 break;
272 case SH_BREAKPOINT_LEN_8:
273 align = 7;
274 break;
275 default:
276 return ret;
277 }
278
279 ret = arch_store_info(bp);
280
281 if (ret < 0)
282 return ret;
283
284 /*
285 * Check that the low-order bits of the address are appropriate
286 * for the alignment implied by len.
287 */
288 if (info->address & align)
289 return -EINVAL;
290
291 /* Check that the virtual address is in the proper range */
292 if (tsk) {
293 if (!arch_check_va_in_userspace(info->address, info->len))
294 return -EFAULT;
295 } else {
296 if (!arch_check_va_in_kernelspace(info->address, info->len))
297 return -EFAULT;
298 }
299
300 return 0;
301}
302
303/*
304 * Release the user breakpoints used by ptrace
305 */
306void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
307{
308 int i;
309 struct thread_struct *t = &tsk->thread;
310
311 for (i = 0; i < sh_ubc->num_events; i++) {
312 unregister_hw_breakpoint(t->ptrace_bps[i]);
313 t->ptrace_bps[i] = NULL;
314 }
315}
316
317static int __kprobes hw_breakpoint_handler(struct die_args *args)
318{
319 int cpu, i, rc = NOTIFY_STOP;
320 struct perf_event *bp;
321 unsigned int cmf, resume_mask;
322
323 /*
324 * Do an early return if none of the channels triggered.
325 */
326 cmf = sh_ubc->triggered_mask();
327 if (unlikely(!cmf))
328 return NOTIFY_DONE;
329
330 /*
331 * By default, resume all of the active channels.
332 */
333 resume_mask = sh_ubc->active_mask();
334
335 /*
336 * Disable breakpoints during exception handling.
337 */
338 sh_ubc->disable_all();
339
340 cpu = get_cpu();
341 for (i = 0; i < sh_ubc->num_events; i++) {
342 unsigned long event_mask = (1 << i);
343
344 if (likely(!(cmf & event_mask)))
345 continue;
346
347 /*
348 * The counter may be concurrently released but that can only
349 * occur from a call_rcu() path. We can then safely fetch
350 * the breakpoint, use its callback, touch its counter
351 * while we are in an rcu_read_lock() path.
352 */
353 rcu_read_lock();
354
355 bp = per_cpu(bp_per_reg[i], cpu);
356 if (bp)
357 rc = NOTIFY_DONE;
358
359 /*
360 * Reset the condition match flag to denote completion of
361 * exception handling.
362 */
363 sh_ubc->clear_triggered_mask(event_mask);
364
365 /*
366 * bp can be NULL due to concurrent perf counter
367 * removing.
368 */
369 if (!bp) {
370 rcu_read_unlock();
371 break;
372 }
373
374 /*
375 * Don't restore the channel if the breakpoint is from
376 * ptrace, as it always operates in one-shot mode.
377 */
378 if (bp->overflow_handler == ptrace_triggered)
379 resume_mask &= ~(1 << i);
380
381 perf_bp_event(bp, args->regs);
382
383 /* Deliver the signal to userspace */
384 if (arch_check_va_in_userspace(bp->attr.bp_addr,
385 bp->attr.bp_len)) {
386 siginfo_t info;
387
388 info.si_signo = args->signr;
389 info.si_errno = notifier_to_errno(rc);
390 info.si_code = TRAP_HWBKPT;
391
392 force_sig_info(args->signr, &info, current);
393 }
394
395 rcu_read_unlock();
396 }
397
398 if (cmf == 0)
399 rc = NOTIFY_DONE;
400
401 sh_ubc->enable_all(resume_mask);
402
403 put_cpu();
404
405 return rc;
406}
407
408BUILD_TRAP_HANDLER(breakpoint)
409{
410 unsigned long ex = lookup_exception_vector();
411 TRAP_HANDLER_DECL;
412
413 notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP);
414}
415
416/*
417 * Handle debug exception notifications.
418 */
419int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused,
420 unsigned long val, void *data)
421{
422 struct die_args *args = data;
423
424 if (val != DIE_BREAKPOINT)
425 return NOTIFY_DONE;
426
427 /*
428 * If the breakpoint hasn't been triggered by the UBC, it's
429 * probably from a debugger, so don't do anything more here.
430 *
431 * This also permits the UBC interface clock to remain off for
432 * non-UBC breakpoints, as we don't need to check the triggered
433 * or active channel masks.
434 */
435 if (args->trapnr != sh_ubc->trap_nr)
436 return NOTIFY_DONE;
437
438 return hw_breakpoint_handler(data);
439}
440
441void hw_breakpoint_pmu_read(struct perf_event *bp)
442{
443 /* TODO */
444}
445
446void hw_breakpoint_pmu_unthrottle(struct perf_event *bp)
447{
448 /* TODO */
449}
450
451int register_sh_ubc(struct sh_ubc *ubc)
452{
453 /* Bail if it's already assigned */
454 if (sh_ubc != &ubc_dummy)
455 return -EBUSY;
456 sh_ubc = ubc;
457
458 pr_info("HW Breakpoints: %s UBC support registered\n", ubc->name);
459
460 WARN_ON(ubc->num_events > HBP_NUM);
461
462 return 0;
463}