aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-01-05 05:06:45 -0500
committerPaul Mundt <lethal@linux-sh.org>2010-01-05 05:06:45 -0500
commit4352fc1b12fae4c753a063a2f162ddf9277af774 (patch)
tree686ca79f2e1c4dbe65e51cac2b14a8234069b0a0 /arch/sh/kernel
parentc4761815ab49feca904776dec464046bc7138d3a (diff)
sh: Abstracted SH-4A UBC support on hw-breakpoint core.
This is the next big chunk of hw_breakpoint support. This decouples the SH-4A support from the core and moves it out in to its own stub, following many of the conventions established with the perf events layering. In addition to extending SH-4A support to encapsulate the remainder of the UBC channels, clock framework support for handling the UBC interface clock is added as well, allowing for dynamic clock gating. This also fixes up a regression introduced by the SIGTRAP handling that broke the ksym_tracer, to the extent that the current support works well with all of the ksym_tracer/ptrace/kgdb. The kprobes singlestep code will follow in turn. With this in place, the remaining UBC variants (SH-2A and SH-4) can now be trivially plugged in. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r--arch/sh/kernel/cpu/sh4a/Makefile9
-rw-r--r--arch/sh/kernel/cpu/sh4a/ubc.c133
-rw-r--r--arch/sh/kernel/hw_breakpoint.c164
3 files changed, 234 insertions, 72 deletions
diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile
index 33bab477d2e2..b144e8af89dc 100644
--- a/arch/sh/kernel/cpu/sh4a/Makefile
+++ b/arch/sh/kernel/cpu/sh4a/Makefile
@@ -41,7 +41,8 @@ pinmux-$(CONFIG_CPU_SUBTYPE_SH7757) := pinmux-sh7757.o
41pinmux-$(CONFIG_CPU_SUBTYPE_SH7785) := pinmux-sh7785.o 41pinmux-$(CONFIG_CPU_SUBTYPE_SH7785) := pinmux-sh7785.o
42pinmux-$(CONFIG_CPU_SUBTYPE_SH7786) := pinmux-sh7786.o 42pinmux-$(CONFIG_CPU_SUBTYPE_SH7786) := pinmux-sh7786.o
43 43
44obj-y += $(clock-y) 44obj-y += $(clock-y)
45obj-$(CONFIG_SMP) += $(smp-y) 45obj-$(CONFIG_SMP) += $(smp-y)
46obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y) 46obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y)
47obj-$(CONFIG_PERF_EVENTS) += perf_event.o 47obj-$(CONFIG_PERF_EVENTS) += perf_event.o
48obj-$(CONFIG_HAVE_HW_BREAKPOINT) += ubc.o
diff --git a/arch/sh/kernel/cpu/sh4a/ubc.c b/arch/sh/kernel/cpu/sh4a/ubc.c
new file mode 100644
index 000000000000..efb2745bcb36
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/ubc.c
@@ -0,0 +1,133 @@
1/*
2 * arch/sh/kernel/cpu/sh4a/ubc.c
3 *
4 * On-chip UBC support for SH-4A CPUs.
5 *
6 * Copyright (C) 2009 - 2010 Paul Mundt
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/init.h>
13#include <linux/err.h>
14#include <linux/clk.h>
15#include <linux/io.h>
16#include <asm/hw_breakpoint.h>
17
18#define UBC_CBR(idx) (0xff200000 + (0x20 * idx))
19#define UBC_CRR(idx) (0xff200004 + (0x20 * idx))
20#define UBC_CAR(idx) (0xff200008 + (0x20 * idx))
21#define UBC_CAMR(idx) (0xff20000c + (0x20 * idx))
22
23#define UBC_CCMFR 0xff200600
24#define UBC_CBCR 0xff200620
25
26/* CRR */
27#define UBC_CRR_PCB (1 << 1)
28#define UBC_CRR_BIE (1 << 0)
29
30/* CBR */
31#define UBC_CBR_CE (1 << 0)
32
33static struct sh_ubc sh4a_ubc;
34
35static void sh4a_ubc_enable(struct arch_hw_breakpoint *info, int idx)
36{
37 __raw_writel(UBC_CBR_CE | info->len | info->type, UBC_CBR(idx));
38 __raw_writel(info->address, UBC_CAR(idx));
39}
40
41static void sh4a_ubc_disable(struct arch_hw_breakpoint *info, int idx)
42{
43 __raw_writel(0, UBC_CBR(idx));
44 __raw_writel(0, UBC_CAR(idx));
45}
46
47static void sh4a_ubc_enable_all(unsigned long mask)
48{
49 int i;
50
51 for (i = 0; i < sh4a_ubc.num_events; i++)
52 if (mask & (1 << i))
53 __raw_writel(__raw_readl(UBC_CBR(i)) | UBC_CBR_CE,
54 UBC_CBR(i));
55}
56
57static void sh4a_ubc_disable_all(void)
58{
59 int i;
60
61 for (i = 0; i < sh4a_ubc.num_events; i++)
62 __raw_writel(__raw_readl(UBC_CBR(i)) & ~UBC_CBR_CE,
63 UBC_CBR(i));
64}
65
66static unsigned long sh4a_ubc_active_mask(void)
67{
68 unsigned long active = 0;
69 int i;
70
71 for (i = 0; i < sh4a_ubc.num_events; i++)
72 if (__raw_readl(UBC_CBR(i)) & UBC_CBR_CE)
73 active |= (1 << i);
74
75 return active;
76}
77
78static unsigned long sh4a_ubc_triggered_mask(void)
79{
80 return __raw_readl(UBC_CCMFR);
81}
82
83static void sh4a_ubc_clear_triggered_mask(unsigned long mask)
84{
85 __raw_writel(__raw_readl(UBC_CCMFR) & ~mask, UBC_CCMFR);
86}
87
88static struct sh_ubc sh4a_ubc = {
89 .name = "SH-4A",
90 .num_events = 2,
91 .trap_nr = 0x1e0,
92 .enable = sh4a_ubc_enable,
93 .disable = sh4a_ubc_disable,
94 .enable_all = sh4a_ubc_enable_all,
95 .disable_all = sh4a_ubc_disable_all,
96 .active_mask = sh4a_ubc_active_mask,
97 .triggered_mask = sh4a_ubc_triggered_mask,
98 .clear_triggered_mask = sh4a_ubc_clear_triggered_mask,
99};
100
101static int __init sh4a_ubc_init(void)
102{
103 struct clk *ubc_iclk = clk_get(NULL, "ubc0");
104 int i;
105
106 /*
107 * The UBC MSTP bit is optional, as not all platforms will have
108 * it. Just ignore it if we can't find it.
109 */
110 if (IS_ERR(ubc_iclk))
111 ubc_iclk = NULL;
112
113 clk_enable(ubc_iclk);
114
115 __raw_writel(0, UBC_CBCR);
116
117 for (i = 0; i < sh4a_ubc.num_events; i++) {
118 __raw_writel(0, UBC_CAMR(i));
119 __raw_writel(0, UBC_CBR(i));
120
121 __raw_writel(UBC_CRR_BIE | UBC_CRR_PCB, UBC_CRR(i));
122
123 /* dummy read for write posting */
124 (void)__raw_readl(UBC_CRR(i));
125 }
126
127 clk_disable(ubc_iclk);
128
129 sh4a_ubc.clk = ubc_iclk;
130
131 return register_sh_ubc(&sh4a_ubc);
132}
133arch_initcall(sh4a_ubc_init);
diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c
index c515a3ecf562..e2f1753d275c 100644
--- a/arch/sh/kernel/hw_breakpoint.c
+++ b/arch/sh/kernel/hw_breakpoint.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC. 4 * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC.
5 * 5 *
6 * Copyright (C) 2009 Paul Mundt 6 * Copyright (C) 2009 - 2010 Paul Mundt
7 * 7 *
8 * This file is subject to the terms and conditions of the GNU General Public 8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive 9 * License. See the file "COPYING" in the main directory of this archive
@@ -18,38 +18,24 @@
18#include <linux/kprobes.h> 18#include <linux/kprobes.h>
19#include <linux/kdebug.h> 19#include <linux/kdebug.h>
20#include <linux/io.h> 20#include <linux/io.h>
21#include <linux/clk.h>
21#include <asm/hw_breakpoint.h> 22#include <asm/hw_breakpoint.h>
22#include <asm/mmu_context.h> 23#include <asm/mmu_context.h>
23#include <asm/ptrace.h> 24#include <asm/ptrace.h>
24 25
25struct ubc_context {
26 unsigned long pc;
27 unsigned long state;
28};
29
30/* Per cpu ubc channel state */
31static DEFINE_PER_CPU(struct ubc_context, ubc_ctx[HBP_NUM]);
32
33/* 26/*
34 * Stores the breakpoints currently in use on each breakpoint address 27 * Stores the breakpoints currently in use on each breakpoint address
35 * register for each cpus 28 * register for each cpus
36 */ 29 */
37static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]); 30static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
38 31
39static int __init ubc_init(void) 32/*
40{ 33 * A dummy placeholder for early accesses until the CPUs get a chance to
41 __raw_writel(0, UBC_CAMR0); 34 * register their UBCs later in the boot process.
42 __raw_writel(0, UBC_CBR0); 35 */
43 __raw_writel(0, UBC_CBCR); 36static struct sh_ubc ubc_dummy = { .num_events = 0 };
44
45 __raw_writel(UBC_CRR_BIE | UBC_CRR_PCB, UBC_CRR0);
46
47 /* dummy read for write posting */
48 (void)__raw_readl(UBC_CRR0);
49 37
50 return 0; 38static struct sh_ubc *sh_ubc __read_mostly = &ubc_dummy;
51}
52arch_initcall(ubc_init);
53 39
54/* 40/*
55 * Install a perf counter breakpoint. 41 * Install a perf counter breakpoint.
@@ -62,10 +48,9 @@ arch_initcall(ubc_init);
62int arch_install_hw_breakpoint(struct perf_event *bp) 48int arch_install_hw_breakpoint(struct perf_event *bp)
63{ 49{
64 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 50 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
65 struct ubc_context *ubc_ctx;
66 int i; 51 int i;
67 52
68 for (i = 0; i < HBP_NUM; i++) { 53 for (i = 0; i < sh_ubc->num_events; i++) {
69 struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); 54 struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
70 55
71 if (!*slot) { 56 if (!*slot) {
@@ -74,16 +59,11 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
74 } 59 }
75 } 60 }
76 61
77 if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) 62 if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
78 return -EBUSY; 63 return -EBUSY;
79 64
80 ubc_ctx = &__get_cpu_var(ubc_ctx[i]); 65 clk_enable(sh_ubc->clk);
81 66 sh_ubc->enable(info, i);
82 ubc_ctx->pc = info->address;
83 ubc_ctx->state = info->len | info->type;
84
85 __raw_writel(UBC_CBR_CE | ubc_ctx->state, UBC_CBR0);
86 __raw_writel(ubc_ctx->pc, UBC_CAR0);
87 67
88 return 0; 68 return 0;
89} 69}
@@ -100,10 +80,9 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
100void arch_uninstall_hw_breakpoint(struct perf_event *bp) 80void arch_uninstall_hw_breakpoint(struct perf_event *bp)
101{ 81{
102 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 82 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
103 struct ubc_context *ubc_ctx;
104 int i; 83 int i;
105 84
106 for (i = 0; i < HBP_NUM; i++) { 85 for (i = 0; i < sh_ubc->num_events; i++) {
107 struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); 86 struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
108 87
109 if (*slot == bp) { 88 if (*slot == bp) {
@@ -112,15 +91,11 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
112 } 91 }
113 } 92 }
114 93
115 if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) 94 if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
116 return; 95 return;
117 96
118 ubc_ctx = &__get_cpu_var(ubc_ctx[i]); 97 sh_ubc->disable(info, i);
119 ubc_ctx->pc = 0; 98 clk_disable(sh_ubc->clk);
120 ubc_ctx->state &= ~(info->len | info->type);
121
122 __raw_writel(ubc_ctx->pc, UBC_CBR0);
123 __raw_writel(ubc_ctx->state, UBC_CAR0);
124} 99}
125 100
126static int get_hbp_len(u16 hbp_len) 101static int get_hbp_len(u16 hbp_len)
@@ -182,10 +157,8 @@ static int arch_store_info(struct perf_event *bp)
182 */ 157 */
183 if (info->name) 158 if (info->name)
184 info->address = (unsigned long)kallsyms_lookup_name(info->name); 159 info->address = (unsigned long)kallsyms_lookup_name(info->name);
185 if (info->address) { 160 if (info->address)
186 info->asid = get_asid();
187 return 0; 161 return 0;
188 }
189 162
190 return -EINVAL; 163 return -EINVAL;
191} 164}
@@ -335,7 +308,7 @@ void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
335 int i; 308 int i;
336 struct thread_struct *t = &tsk->thread; 309 struct thread_struct *t = &tsk->thread;
337 310
338 for (i = 0; i < HBP_NUM; i++) { 311 for (i = 0; i < sh_ubc->num_events; i++) {
339 unregister_hw_breakpoint(t->ptrace_bps[i]); 312 unregister_hw_breakpoint(t->ptrace_bps[i]);
340 t->ptrace_bps[i] = NULL; 313 t->ptrace_bps[i] = NULL;
341 } 314 }
@@ -345,13 +318,32 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
345{ 318{
346 int cpu, i, rc = NOTIFY_STOP; 319 int cpu, i, rc = NOTIFY_STOP;
347 struct perf_event *bp; 320 struct perf_event *bp;
348 unsigned long val; 321 unsigned int cmf, resume_mask;
322
323 /*
324 * Do an early return if none of the channels triggered.
325 */
326 cmf = sh_ubc->triggered_mask();
327 if (unlikely(!cmf))
328 return NOTIFY_DONE;
329
330 /*
331 * By default, resume all of the active channels.
332 */
333 resume_mask = sh_ubc->active_mask();
349 334
350 val = __raw_readl(UBC_CBR0); 335 /*
351 __raw_writel(val & ~UBC_CBR_CE, UBC_CBR0); 336 * Disable breakpoints during exception handling.
337 */
338 sh_ubc->disable_all();
352 339
353 cpu = get_cpu(); 340 cpu = get_cpu();
354 for (i = 0; i < HBP_NUM; i++) { 341 for (i = 0; i < sh_ubc->num_events; i++) {
342 unsigned long event_mask = (1 << i);
343
344 if (likely(!(cmf & event_mask)))
345 continue;
346
355 /* 347 /*
356 * The counter may be concurrently released but that can only 348 * The counter may be concurrently released but that can only
357 * occur from a call_rcu() path. We can then safely fetch 349 * occur from a call_rcu() path. We can then safely fetch
@@ -361,24 +353,52 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
361 rcu_read_lock(); 353 rcu_read_lock();
362 354
363 bp = per_cpu(bp_per_reg[i], cpu); 355 bp = per_cpu(bp_per_reg[i], cpu);
364 if (bp) { 356 if (bp)
365 rc = NOTIFY_DONE; 357 rc = NOTIFY_DONE;
366 } else { 358
359 /*
360 * Reset the condition match flag to denote completion of
361 * exception handling.
362 */
363 sh_ubc->clear_triggered_mask(event_mask);
364
365 /*
366 * bp can be NULL due to concurrent perf counter
367 * removing.
368 */
369 if (!bp) {
367 rcu_read_unlock(); 370 rcu_read_unlock();
368 break; 371 break;
369 } 372 }
370 373
374 /*
375 * Don't restore the channel if the breakpoint is from
376 * ptrace, as it always operates in one-shot mode.
377 */
378 if (bp->overflow_handler == ptrace_triggered)
379 resume_mask &= ~(1 << i);
380
371 perf_bp_event(bp, args->regs); 381 perf_bp_event(bp, args->regs);
372 382
383 /* Deliver the signal to userspace */
384 if (arch_check_va_in_userspace(bp->attr.bp_addr,
385 bp->attr.bp_len)) {
386 siginfo_t info;
387
388 info.si_signo = args->signr;
389 info.si_errno = notifier_to_errno(rc);
390 info.si_code = TRAP_HWBKPT;
391
392 force_sig_info(args->signr, &info, current);
393 }
394
373 rcu_read_unlock(); 395 rcu_read_unlock();
374 } 396 }
375 397
376 if (bp && bp->overflow_handler != ptrace_triggered) { 398 if (cmf == 0)
377 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 399 rc = NOTIFY_DONE;
378 400
379 __raw_writel(UBC_CBR_CE | info->len | info->type, UBC_CBR0); 401 sh_ubc->enable_all(resume_mask);
380 __raw_writel(info->address, UBC_CAR0);
381 }
382 402
383 put_cpu(); 403 put_cpu();
384 404
@@ -388,19 +408,9 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
388BUILD_TRAP_HANDLER(breakpoint) 408BUILD_TRAP_HANDLER(breakpoint)
389{ 409{
390 unsigned long ex = lookup_exception_vector(); 410 unsigned long ex = lookup_exception_vector();
391 siginfo_t info;
392 int err;
393 TRAP_HANDLER_DECL; 411 TRAP_HANDLER_DECL;
394 412
395 err = notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP); 413 notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP);
396 if (err == NOTIFY_STOP)
397 return;
398
399 /* Deliver the signal to userspace */
400 info.si_signo = SIGTRAP;
401 info.si_errno = 0;
402 info.si_code = TRAP_HWBKPT;
403 force_sig_info(SIGTRAP, &info, current);
404} 414}
405 415
406/* 416/*
@@ -417,8 +427,12 @@ int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused,
417 /* 427 /*
418 * If the breakpoint hasn't been triggered by the UBC, it's 428 * If the breakpoint hasn't been triggered by the UBC, it's
419 * probably from a debugger, so don't do anything more here. 429 * probably from a debugger, so don't do anything more here.
430 *
431 * This also permits the UBC interface clock to remain off for
432 * non-UBC breakpoints, as we don't need to check the triggered
433 * or active channel masks.
420 */ 434 */
421 if (args->trapnr != 0x1e0) 435 if (args->trapnr != sh_ubc->trap_nr)
422 return NOTIFY_DONE; 436 return NOTIFY_DONE;
423 437
424 return hw_breakpoint_handler(data); 438 return hw_breakpoint_handler(data);
@@ -433,3 +447,17 @@ void hw_breakpoint_pmu_unthrottle(struct perf_event *bp)
433{ 447{
434 /* TODO */ 448 /* TODO */
435} 449}
450
451int register_sh_ubc(struct sh_ubc *ubc)
452{
453 /* Bail if it's already assigned */
454 if (sh_ubc != &ubc_dummy)
455 return -EBUSY;
456 sh_ubc = ubc;
457
458 pr_info("HW Breakpoints: %s UBC support registered\n", ubc->name);
459
460 WARN_ON(ubc->num_events > HBP_NUM);
461
462 return 0;
463}