aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r--arch/sh/kernel/Makefile1
-rw-r--r--arch/sh/kernel/cpu/init.c20
-rw-r--r--arch/sh/kernel/cpu/sh3/ex.S2
-rw-r--r--arch/sh/kernel/cpu/sh4a/Makefile9
-rw-r--r--arch/sh/kernel/cpu/sh4a/ubc.c133
-rw-r--r--arch/sh/kernel/debugtraps.S1
-rw-r--r--arch/sh/kernel/hw_breakpoint.c463
-rw-r--r--arch/sh/kernel/kgdb.c46
-rw-r--r--arch/sh/kernel/process_32.c96
-rw-r--r--arch/sh/kernel/ptrace_32.c70
-rw-r--r--arch/sh/kernel/traps_32.c2
11 files changed, 711 insertions, 132 deletions
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index 379053c008f7..56704a6d723a 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_HIBERNATION) += swsusp.o
39obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o 39obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o
40obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_callchain.o 40obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_callchain.o
41 41
42obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
42obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o 43obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o
43 44
44EXTRA_CFLAGS += -Werror 45EXTRA_CFLAGS += -Werror
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index 2e23422280a7..a5bb0550bbf3 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -24,9 +24,6 @@
24#include <asm/elf.h> 24#include <asm/elf.h>
25#include <asm/io.h> 25#include <asm/io.h>
26#include <asm/smp.h> 26#include <asm/smp.h>
27#ifdef CONFIG_SUPERH32
28#include <asm/ubc.h>
29#endif
30 27
31#ifdef CONFIG_SH_FPU 28#ifdef CONFIG_SH_FPU
32#define cpu_has_fpu 1 29#define cpu_has_fpu 1
@@ -284,20 +281,19 @@ static inline void __init dsp_init(void) { }
284/** 281/**
285 * sh_cpu_init 282 * sh_cpu_init
286 * 283 *
287 * This is our initial entry point for each CPU, and is invoked on the boot 284 * This is our initial entry point for each CPU, and is invoked on the
288 * CPU prior to calling start_kernel(). For SMP, a combination of this and 285 * boot CPU prior to calling start_kernel(). For SMP, a combination of
289 * start_secondary() will bring up each processor to a ready state prior 286 * this and start_secondary() will bring up each processor to a ready
290 * to hand forking the idle loop. 287 * state prior to hand forking the idle loop.
291 * 288 *
292 * We do all of the basic processor init here, including setting up the 289 * We do all of the basic processor init here, including setting up
293 * caches, FPU, DSP, kicking the UBC, etc. By the time start_kernel() is 290 * the caches, FPU, DSP, etc. By the time start_kernel() is hit (and
294 * hit (and subsequently platform_setup()) things like determining the 291 * subsequently platform_setup()) things like determining the CPU
295 * CPU subtype and initial configuration will all be done. 292 * subtype and initial configuration will all be done.
296 * 293 *
297 * Each processor family is still responsible for doing its own probing 294 * Each processor family is still responsible for doing its own probing
298 * and cache configuration in detect_cpu_and_cache_system(). 295 * and cache configuration in detect_cpu_and_cache_system().
299 */ 296 */
300
301asmlinkage void __init sh_cpu_init(void) 297asmlinkage void __init sh_cpu_init(void)
302{ 298{
303 current_thread_info()->cpu = hard_smp_processor_id(); 299 current_thread_info()->cpu = hard_smp_processor_id();
diff --git a/arch/sh/kernel/cpu/sh3/ex.S b/arch/sh/kernel/cpu/sh3/ex.S
index 46610c35c232..99b4d020179a 100644
--- a/arch/sh/kernel/cpu/sh3/ex.S
+++ b/arch/sh/kernel/cpu/sh3/ex.S
@@ -49,7 +49,7 @@ ENTRY(exception_handling_table)
49 .long exception_error ! reserved_instruction (filled by trap_init) /* 180 */ 49 .long exception_error ! reserved_instruction (filled by trap_init) /* 180 */
50 .long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/ 50 .long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/
51 .long nmi_trap_handler /* 1C0 */ ! Allow trap to debugger 51 .long nmi_trap_handler /* 1C0 */ ! Allow trap to debugger
52 .long break_point_trap /* 1E0 */ 52 .long breakpoint_trap_handler /* 1E0 */
53 53
54 /* 54 /*
55 * Pad the remainder of the table out, exceptions residing in far 55 * Pad the remainder of the table out, exceptions residing in far
diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile
index 33bab477d2e2..b144e8af89dc 100644
--- a/arch/sh/kernel/cpu/sh4a/Makefile
+++ b/arch/sh/kernel/cpu/sh4a/Makefile
@@ -41,7 +41,8 @@ pinmux-$(CONFIG_CPU_SUBTYPE_SH7757) := pinmux-sh7757.o
41pinmux-$(CONFIG_CPU_SUBTYPE_SH7785) := pinmux-sh7785.o 41pinmux-$(CONFIG_CPU_SUBTYPE_SH7785) := pinmux-sh7785.o
42pinmux-$(CONFIG_CPU_SUBTYPE_SH7786) := pinmux-sh7786.o 42pinmux-$(CONFIG_CPU_SUBTYPE_SH7786) := pinmux-sh7786.o
43 43
44obj-y += $(clock-y) 44obj-y += $(clock-y)
45obj-$(CONFIG_SMP) += $(smp-y) 45obj-$(CONFIG_SMP) += $(smp-y)
46obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y) 46obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y)
47obj-$(CONFIG_PERF_EVENTS) += perf_event.o 47obj-$(CONFIG_PERF_EVENTS) += perf_event.o
48obj-$(CONFIG_HAVE_HW_BREAKPOINT) += ubc.o
diff --git a/arch/sh/kernel/cpu/sh4a/ubc.c b/arch/sh/kernel/cpu/sh4a/ubc.c
new file mode 100644
index 000000000000..efb2745bcb36
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/ubc.c
@@ -0,0 +1,133 @@
1/*
2 * arch/sh/kernel/cpu/sh4a/ubc.c
3 *
4 * On-chip UBC support for SH-4A CPUs.
5 *
6 * Copyright (C) 2009 - 2010 Paul Mundt
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/init.h>
13#include <linux/err.h>
14#include <linux/clk.h>
15#include <linux/io.h>
16#include <asm/hw_breakpoint.h>
17
18#define UBC_CBR(idx) (0xff200000 + (0x20 * idx))
19#define UBC_CRR(idx) (0xff200004 + (0x20 * idx))
20#define UBC_CAR(idx) (0xff200008 + (0x20 * idx))
21#define UBC_CAMR(idx) (0xff20000c + (0x20 * idx))
22
23#define UBC_CCMFR 0xff200600
24#define UBC_CBCR 0xff200620
25
26/* CRR */
27#define UBC_CRR_PCB (1 << 1)
28#define UBC_CRR_BIE (1 << 0)
29
30/* CBR */
31#define UBC_CBR_CE (1 << 0)
32
33static struct sh_ubc sh4a_ubc;
34
35static void sh4a_ubc_enable(struct arch_hw_breakpoint *info, int idx)
36{
37 __raw_writel(UBC_CBR_CE | info->len | info->type, UBC_CBR(idx));
38 __raw_writel(info->address, UBC_CAR(idx));
39}
40
41static void sh4a_ubc_disable(struct arch_hw_breakpoint *info, int idx)
42{
43 __raw_writel(0, UBC_CBR(idx));
44 __raw_writel(0, UBC_CAR(idx));
45}
46
47static void sh4a_ubc_enable_all(unsigned long mask)
48{
49 int i;
50
51 for (i = 0; i < sh4a_ubc.num_events; i++)
52 if (mask & (1 << i))
53 __raw_writel(__raw_readl(UBC_CBR(i)) | UBC_CBR_CE,
54 UBC_CBR(i));
55}
56
57static void sh4a_ubc_disable_all(void)
58{
59 int i;
60
61 for (i = 0; i < sh4a_ubc.num_events; i++)
62 __raw_writel(__raw_readl(UBC_CBR(i)) & ~UBC_CBR_CE,
63 UBC_CBR(i));
64}
65
66static unsigned long sh4a_ubc_active_mask(void)
67{
68 unsigned long active = 0;
69 int i;
70
71 for (i = 0; i < sh4a_ubc.num_events; i++)
72 if (__raw_readl(UBC_CBR(i)) & UBC_CBR_CE)
73 active |= (1 << i);
74
75 return active;
76}
77
78static unsigned long sh4a_ubc_triggered_mask(void)
79{
80 return __raw_readl(UBC_CCMFR);
81}
82
83static void sh4a_ubc_clear_triggered_mask(unsigned long mask)
84{
85 __raw_writel(__raw_readl(UBC_CCMFR) & ~mask, UBC_CCMFR);
86}
87
88static struct sh_ubc sh4a_ubc = {
89 .name = "SH-4A",
90 .num_events = 2,
91 .trap_nr = 0x1e0,
92 .enable = sh4a_ubc_enable,
93 .disable = sh4a_ubc_disable,
94 .enable_all = sh4a_ubc_enable_all,
95 .disable_all = sh4a_ubc_disable_all,
96 .active_mask = sh4a_ubc_active_mask,
97 .triggered_mask = sh4a_ubc_triggered_mask,
98 .clear_triggered_mask = sh4a_ubc_clear_triggered_mask,
99};
100
101static int __init sh4a_ubc_init(void)
102{
103 struct clk *ubc_iclk = clk_get(NULL, "ubc0");
104 int i;
105
106 /*
107 * The UBC MSTP bit is optional, as not all platforms will have
108 * it. Just ignore it if we can't find it.
109 */
110 if (IS_ERR(ubc_iclk))
111 ubc_iclk = NULL;
112
113 clk_enable(ubc_iclk);
114
115 __raw_writel(0, UBC_CBCR);
116
117 for (i = 0; i < sh4a_ubc.num_events; i++) {
118 __raw_writel(0, UBC_CAMR(i));
119 __raw_writel(0, UBC_CBR(i));
120
121 __raw_writel(UBC_CRR_BIE | UBC_CRR_PCB, UBC_CRR(i));
122
123 /* dummy read for write posting */
124 (void)__raw_readl(UBC_CRR(i));
125 }
126
127 clk_disable(ubc_iclk);
128
129 sh4a_ubc.clk = ubc_iclk;
130
131 return register_sh_ubc(&sh4a_ubc);
132}
133arch_initcall(sh4a_ubc_init);
diff --git a/arch/sh/kernel/debugtraps.S b/arch/sh/kernel/debugtraps.S
index 591741383ee6..7a1b46fec0f4 100644
--- a/arch/sh/kernel/debugtraps.S
+++ b/arch/sh/kernel/debugtraps.S
@@ -13,7 +13,6 @@
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14 14
15#if !defined(CONFIG_KGDB) 15#if !defined(CONFIG_KGDB)
16#define breakpoint_trap_handler debug_trap_handler
17#define singlestep_trap_handler debug_trap_handler 16#define singlestep_trap_handler debug_trap_handler
18#endif 17#endif
19 18
diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c
new file mode 100644
index 000000000000..e2f1753d275c
--- /dev/null
+++ b/arch/sh/kernel/hw_breakpoint.c
@@ -0,0 +1,463 @@
1/*
2 * arch/sh/kernel/hw_breakpoint.c
3 *
4 * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC.
5 *
6 * Copyright (C) 2009 - 2010 Paul Mundt
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/init.h>
13#include <linux/perf_event.h>
14#include <linux/hw_breakpoint.h>
15#include <linux/percpu.h>
16#include <linux/kallsyms.h>
17#include <linux/notifier.h>
18#include <linux/kprobes.h>
19#include <linux/kdebug.h>
20#include <linux/io.h>
21#include <linux/clk.h>
22#include <asm/hw_breakpoint.h>
23#include <asm/mmu_context.h>
24#include <asm/ptrace.h>
25
26/*
27 * Stores the breakpoints currently in use on each breakpoint address
28 * register for each cpus
29 */
30static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
31
32/*
33 * A dummy placeholder for early accesses until the CPUs get a chance to
34 * register their UBCs later in the boot process.
35 */
36static struct sh_ubc ubc_dummy = { .num_events = 0 };
37
38static struct sh_ubc *sh_ubc __read_mostly = &ubc_dummy;
39
40/*
41 * Install a perf counter breakpoint.
42 *
43 * We seek a free UBC channel and use it for this breakpoint.
44 *
45 * Atomic: we hold the counter->ctx->lock and we only handle variables
46 * and registers local to this cpu.
47 */
48int arch_install_hw_breakpoint(struct perf_event *bp)
49{
50 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
51 int i;
52
53 for (i = 0; i < sh_ubc->num_events; i++) {
54 struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
55
56 if (!*slot) {
57 *slot = bp;
58 break;
59 }
60 }
61
62 if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
63 return -EBUSY;
64
65 clk_enable(sh_ubc->clk);
66 sh_ubc->enable(info, i);
67
68 return 0;
69}
70
71/*
72 * Uninstall the breakpoint contained in the given counter.
73 *
74 * First we search the debug address register it uses and then we disable
75 * it.
76 *
77 * Atomic: we hold the counter->ctx->lock and we only handle variables
78 * and registers local to this cpu.
79 */
80void arch_uninstall_hw_breakpoint(struct perf_event *bp)
81{
82 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
83 int i;
84
85 for (i = 0; i < sh_ubc->num_events; i++) {
86 struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
87
88 if (*slot == bp) {
89 *slot = NULL;
90 break;
91 }
92 }
93
94 if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
95 return;
96
97 sh_ubc->disable(info, i);
98 clk_disable(sh_ubc->clk);
99}
100
101static int get_hbp_len(u16 hbp_len)
102{
103 unsigned int len_in_bytes = 0;
104
105 switch (hbp_len) {
106 case SH_BREAKPOINT_LEN_1:
107 len_in_bytes = 1;
108 break;
109 case SH_BREAKPOINT_LEN_2:
110 len_in_bytes = 2;
111 break;
112 case SH_BREAKPOINT_LEN_4:
113 len_in_bytes = 4;
114 break;
115 case SH_BREAKPOINT_LEN_8:
116 len_in_bytes = 8;
117 break;
118 }
119 return len_in_bytes;
120}
121
122/*
123 * Check for virtual address in user space.
124 */
125int arch_check_va_in_userspace(unsigned long va, u16 hbp_len)
126{
127 unsigned int len;
128
129 len = get_hbp_len(hbp_len);
130
131 return (va <= TASK_SIZE - len);
132}
133
134/*
135 * Check for virtual address in kernel space.
136 */
137static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len)
138{
139 unsigned int len;
140
141 len = get_hbp_len(hbp_len);
142
143 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
144}
145
146/*
147 * Store a breakpoint's encoded address, length, and type.
148 */
149static int arch_store_info(struct perf_event *bp)
150{
151 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
152
153 /*
154 * User-space requests will always have the address field populated
155 * For kernel-addresses, either the address or symbol name can be
156 * specified.
157 */
158 if (info->name)
159 info->address = (unsigned long)kallsyms_lookup_name(info->name);
160 if (info->address)
161 return 0;
162
163 return -EINVAL;
164}
165
166int arch_bp_generic_fields(int sh_len, int sh_type,
167 int *gen_len, int *gen_type)
168{
169 /* Len */
170 switch (sh_len) {
171 case SH_BREAKPOINT_LEN_1:
172 *gen_len = HW_BREAKPOINT_LEN_1;
173 break;
174 case SH_BREAKPOINT_LEN_2:
175 *gen_len = HW_BREAKPOINT_LEN_2;
176 break;
177 case SH_BREAKPOINT_LEN_4:
178 *gen_len = HW_BREAKPOINT_LEN_4;
179 break;
180 case SH_BREAKPOINT_LEN_8:
181 *gen_len = HW_BREAKPOINT_LEN_8;
182 break;
183 default:
184 return -EINVAL;
185 }
186
187 /* Type */
188 switch (sh_type) {
189 case SH_BREAKPOINT_READ:
190 *gen_type = HW_BREAKPOINT_R;
191 case SH_BREAKPOINT_WRITE:
192 *gen_type = HW_BREAKPOINT_W;
193 break;
194 case SH_BREAKPOINT_RW:
195 *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
196 break;
197 default:
198 return -EINVAL;
199 }
200
201 return 0;
202}
203
204static int arch_build_bp_info(struct perf_event *bp)
205{
206 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
207
208 info->address = bp->attr.bp_addr;
209
210 /* Len */
211 switch (bp->attr.bp_len) {
212 case HW_BREAKPOINT_LEN_1:
213 info->len = SH_BREAKPOINT_LEN_1;
214 break;
215 case HW_BREAKPOINT_LEN_2:
216 info->len = SH_BREAKPOINT_LEN_2;
217 break;
218 case HW_BREAKPOINT_LEN_4:
219 info->len = SH_BREAKPOINT_LEN_4;
220 break;
221 case HW_BREAKPOINT_LEN_8:
222 info->len = SH_BREAKPOINT_LEN_8;
223 break;
224 default:
225 return -EINVAL;
226 }
227
228 /* Type */
229 switch (bp->attr.bp_type) {
230 case HW_BREAKPOINT_R:
231 info->type = SH_BREAKPOINT_READ;
232 break;
233 case HW_BREAKPOINT_W:
234 info->type = SH_BREAKPOINT_WRITE;
235 break;
236 case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
237 info->type = SH_BREAKPOINT_RW;
238 break;
239 default:
240 return -EINVAL;
241 }
242
243 return 0;
244}
245
246/*
247 * Validate the arch-specific HW Breakpoint register settings
248 */
249int arch_validate_hwbkpt_settings(struct perf_event *bp,
250 struct task_struct *tsk)
251{
252 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
253 unsigned int align;
254 int ret;
255
256 ret = arch_build_bp_info(bp);
257 if (ret)
258 return ret;
259
260 ret = -EINVAL;
261
262 switch (info->len) {
263 case SH_BREAKPOINT_LEN_1:
264 align = 0;
265 break;
266 case SH_BREAKPOINT_LEN_2:
267 align = 1;
268 break;
269 case SH_BREAKPOINT_LEN_4:
270 align = 3;
271 break;
272 case SH_BREAKPOINT_LEN_8:
273 align = 7;
274 break;
275 default:
276 return ret;
277 }
278
279 ret = arch_store_info(bp);
280
281 if (ret < 0)
282 return ret;
283
284 /*
285 * Check that the low-order bits of the address are appropriate
286 * for the alignment implied by len.
287 */
288 if (info->address & align)
289 return -EINVAL;
290
291 /* Check that the virtual address is in the proper range */
292 if (tsk) {
293 if (!arch_check_va_in_userspace(info->address, info->len))
294 return -EFAULT;
295 } else {
296 if (!arch_check_va_in_kernelspace(info->address, info->len))
297 return -EFAULT;
298 }
299
300 return 0;
301}
302
303/*
304 * Release the user breakpoints used by ptrace
305 */
306void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
307{
308 int i;
309 struct thread_struct *t = &tsk->thread;
310
311 for (i = 0; i < sh_ubc->num_events; i++) {
312 unregister_hw_breakpoint(t->ptrace_bps[i]);
313 t->ptrace_bps[i] = NULL;
314 }
315}
316
317static int __kprobes hw_breakpoint_handler(struct die_args *args)
318{
319 int cpu, i, rc = NOTIFY_STOP;
320 struct perf_event *bp;
321 unsigned int cmf, resume_mask;
322
323 /*
324 * Do an early return if none of the channels triggered.
325 */
326 cmf = sh_ubc->triggered_mask();
327 if (unlikely(!cmf))
328 return NOTIFY_DONE;
329
330 /*
331 * By default, resume all of the active channels.
332 */
333 resume_mask = sh_ubc->active_mask();
334
335 /*
336 * Disable breakpoints during exception handling.
337 */
338 sh_ubc->disable_all();
339
340 cpu = get_cpu();
341 for (i = 0; i < sh_ubc->num_events; i++) {
342 unsigned long event_mask = (1 << i);
343
344 if (likely(!(cmf & event_mask)))
345 continue;
346
347 /*
348 * The counter may be concurrently released but that can only
349 * occur from a call_rcu() path. We can then safely fetch
350 * the breakpoint, use its callback, touch its counter
351 * while we are in an rcu_read_lock() path.
352 */
353 rcu_read_lock();
354
355 bp = per_cpu(bp_per_reg[i], cpu);
356 if (bp)
357 rc = NOTIFY_DONE;
358
359 /*
360 * Reset the condition match flag to denote completion of
361 * exception handling.
362 */
363 sh_ubc->clear_triggered_mask(event_mask);
364
365 /*
366 * bp can be NULL due to concurrent perf counter
367 * removing.
368 */
369 if (!bp) {
370 rcu_read_unlock();
371 break;
372 }
373
374 /*
375 * Don't restore the channel if the breakpoint is from
376 * ptrace, as it always operates in one-shot mode.
377 */
378 if (bp->overflow_handler == ptrace_triggered)
379 resume_mask &= ~(1 << i);
380
381 perf_bp_event(bp, args->regs);
382
383 /* Deliver the signal to userspace */
384 if (arch_check_va_in_userspace(bp->attr.bp_addr,
385 bp->attr.bp_len)) {
386 siginfo_t info;
387
388 info.si_signo = args->signr;
389 info.si_errno = notifier_to_errno(rc);
390 info.si_code = TRAP_HWBKPT;
391
392 force_sig_info(args->signr, &info, current);
393 }
394
395 rcu_read_unlock();
396 }
397
398 if (cmf == 0)
399 rc = NOTIFY_DONE;
400
401 sh_ubc->enable_all(resume_mask);
402
403 put_cpu();
404
405 return rc;
406}
407
408BUILD_TRAP_HANDLER(breakpoint)
409{
410 unsigned long ex = lookup_exception_vector();
411 TRAP_HANDLER_DECL;
412
413 notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP);
414}
415
416/*
417 * Handle debug exception notifications.
418 */
419int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused,
420 unsigned long val, void *data)
421{
422 struct die_args *args = data;
423
424 if (val != DIE_BREAKPOINT)
425 return NOTIFY_DONE;
426
427 /*
428 * If the breakpoint hasn't been triggered by the UBC, it's
429 * probably from a debugger, so don't do anything more here.
430 *
431 * This also permits the UBC interface clock to remain off for
432 * non-UBC breakpoints, as we don't need to check the triggered
433 * or active channel masks.
434 */
435 if (args->trapnr != sh_ubc->trap_nr)
436 return NOTIFY_DONE;
437
438 return hw_breakpoint_handler(data);
439}
440
441void hw_breakpoint_pmu_read(struct perf_event *bp)
442{
443 /* TODO */
444}
445
446void hw_breakpoint_pmu_unthrottle(struct perf_event *bp)
447{
448 /* TODO */
449}
450
451int register_sh_ubc(struct sh_ubc *ubc)
452{
453 /* Bail if it's already assigned */
454 if (sh_ubc != &ubc_dummy)
455 return -EBUSY;
456 sh_ubc = ubc;
457
458 pr_info("HW Breakpoints: %s UBC support registered\n", ubc->name);
459
460 WARN_ON(ubc->num_events > HBP_NUM);
461
462 return 0;
463}
diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
index 3e532d0d4a5c..70c69659b846 100644
--- a/arch/sh/kernel/kgdb.c
+++ b/arch/sh/kernel/kgdb.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * SuperH KGDB support 2 * SuperH KGDB support
3 * 3 *
4 * Copyright (C) 2008 Paul Mundt 4 * Copyright (C) 2008 - 2009 Paul Mundt
5 * 5 *
6 * Single stepping taken from the old stub by Henry Bell and Jeremy Siegel. 6 * Single stepping taken from the old stub by Henry Bell and Jeremy Siegel.
7 * 7 *
@@ -251,24 +251,60 @@ BUILD_TRAP_HANDLER(singlestep)
251 local_irq_restore(flags); 251 local_irq_restore(flags);
252} 252}
253 253
254static int __kgdb_notify(struct die_args *args, unsigned long cmd)
255{
256 int ret;
257
258 switch (cmd) {
259 case DIE_BREAKPOINT:
260 /*
261 * This means a user thread is single stepping
262 * a system call which should be ignored
263 */
264 if (test_thread_flag(TIF_SINGLESTEP))
265 return NOTIFY_DONE;
266
267 ret = kgdb_handle_exception(args->trapnr & 0xff, args->signr,
268 args->err, args->regs);
269 if (ret)
270 return NOTIFY_DONE;
271
272 break;
273 }
254 274
255BUILD_TRAP_HANDLER(breakpoint) 275 return NOTIFY_STOP;
276}
277
278static int
279kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
256{ 280{
257 unsigned long flags; 281 unsigned long flags;
258 TRAP_HANDLER_DECL; 282 int ret;
259 283
260 local_irq_save(flags); 284 local_irq_save(flags);
261 kgdb_handle_exception(vec >> 2, SIGTRAP, 0, regs); 285 ret = __kgdb_notify(ptr, cmd);
262 local_irq_restore(flags); 286 local_irq_restore(flags);
287
288 return ret;
263} 289}
264 290
291static struct notifier_block kgdb_notifier = {
292 .notifier_call = kgdb_notify,
293
294 /*
295 * Lowest-prio notifier priority, we want to be notified last:
296 */
297 .priority = -INT_MAX,
298};
299
265int kgdb_arch_init(void) 300int kgdb_arch_init(void)
266{ 301{
267 return 0; 302 return register_die_notifier(&kgdb_notifier);
268} 303}
269 304
270void kgdb_arch_exit(void) 305void kgdb_arch_exit(void)
271{ 306{
307 unregister_die_notifier(&kgdb_notifier);
272} 308}
273 309
274struct kgdb_arch arch_kgdb_ops = { 310struct kgdb_arch arch_kgdb_ops = {
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index 03de6573aa76..856010f9ebc9 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -25,17 +25,15 @@
25#include <linux/fs.h> 25#include <linux/fs.h>
26#include <linux/ftrace.h> 26#include <linux/ftrace.h>
27#include <linux/preempt.h> 27#include <linux/preempt.h>
28#include <linux/hw_breakpoint.h>
28#include <asm/uaccess.h> 29#include <asm/uaccess.h>
29#include <asm/mmu_context.h> 30#include <asm/mmu_context.h>
30#include <asm/pgalloc.h> 31#include <asm/pgalloc.h>
31#include <asm/system.h> 32#include <asm/system.h>
32#include <asm/ubc.h>
33#include <asm/fpu.h> 33#include <asm/fpu.h>
34#include <asm/syscalls.h> 34#include <asm/syscalls.h>
35#include <asm/watchdog.h> 35#include <asm/watchdog.h>
36 36
37int ubc_usercnt = 0;
38
39#ifdef CONFIG_32BIT 37#ifdef CONFIG_32BIT
40static void watchdog_trigger_immediate(void) 38static void watchdog_trigger_immediate(void)
41{ 39{
@@ -166,16 +164,15 @@ EXPORT_SYMBOL(start_thread);
166 */ 164 */
167void exit_thread(void) 165void exit_thread(void)
168{ 166{
169 if (current->thread.ubc_pc) {
170 current->thread.ubc_pc = 0;
171 ubc_usercnt -= 1;
172 }
173} 167}
174 168
175void flush_thread(void) 169void flush_thread(void)
176{ 170{
177#if defined(CONFIG_SH_FPU)
178 struct task_struct *tsk = current; 171 struct task_struct *tsk = current;
172
173 flush_ptrace_hw_breakpoint(tsk);
174
175#if defined(CONFIG_SH_FPU)
179 /* Forget lazy FPU state */ 176 /* Forget lazy FPU state */
180 clear_fpu(tsk, task_pt_regs(tsk)); 177 clear_fpu(tsk, task_pt_regs(tsk));
181 clear_used_math(); 178 clear_used_math();
@@ -223,11 +220,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
223{ 220{
224 struct thread_info *ti = task_thread_info(p); 221 struct thread_info *ti = task_thread_info(p);
225 struct pt_regs *childregs; 222 struct pt_regs *childregs;
223
226#if defined(CONFIG_SH_DSP) 224#if defined(CONFIG_SH_DSP)
227 struct task_struct *tsk = current; 225 struct task_struct *tsk = current;
228#endif
229 226
230#if defined(CONFIG_SH_DSP)
231 if (is_dsp_enabled(tsk)) { 227 if (is_dsp_enabled(tsk)) {
232 /* We can use the __save_dsp or just copy the struct: 228 /* We can use the __save_dsp or just copy the struct:
233 * __save_dsp(p); 229 * __save_dsp(p);
@@ -258,53 +254,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
258 p->thread.sp = (unsigned long) childregs; 254 p->thread.sp = (unsigned long) childregs;
259 p->thread.pc = (unsigned long) ret_from_fork; 255 p->thread.pc = (unsigned long) ret_from_fork;
260 256
261 p->thread.ubc_pc = 0; 257 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
262 258
263 return 0; 259 return 0;
264} 260}
265 261
266/* Tracing by user break controller. */
267static void ubc_set_tracing(int asid, unsigned long pc)
268{
269#if defined(CONFIG_CPU_SH4A)
270 unsigned long val;
271
272 val = (UBC_CBR_ID_INST | UBC_CBR_RW_READ | UBC_CBR_CE);
273 val |= (UBC_CBR_AIE | UBC_CBR_AIV_SET(asid));
274
275 ctrl_outl(val, UBC_CBR0);
276 ctrl_outl(pc, UBC_CAR0);
277 ctrl_outl(0x0, UBC_CAMR0);
278 ctrl_outl(0x0, UBC_CBCR);
279
280 val = (UBC_CRR_RES | UBC_CRR_PCB | UBC_CRR_BIE);
281 ctrl_outl(val, UBC_CRR0);
282
283 /* Read UBC register that we wrote last, for checking update */
284 val = ctrl_inl(UBC_CRR0);
285
286#else /* CONFIG_CPU_SH4A */
287 ctrl_outl(pc, UBC_BARA);
288
289#ifdef CONFIG_MMU
290 ctrl_outb(asid, UBC_BASRA);
291#endif
292
293 ctrl_outl(0, UBC_BAMRA);
294
295 if (current_cpu_data.type == CPU_SH7729 ||
296 current_cpu_data.type == CPU_SH7710 ||
297 current_cpu_data.type == CPU_SH7712 ||
298 current_cpu_data.type == CPU_SH7203){
299 ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA);
300 ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR);
301 } else {
302 ctrl_outw(BBR_INST | BBR_READ, UBC_BBRA);
303 ctrl_outw(BRCR_PCBA, UBC_BRCR);
304 }
305#endif /* CONFIG_CPU_SH4A */
306}
307
308/* 262/*
309 * switch_to(x,y) should switch tasks from x to y. 263 * switch_to(x,y) should switch tasks from x to y.
310 * 264 *
@@ -330,25 +284,6 @@ __switch_to(struct task_struct *prev, struct task_struct *next)
330 : "r" (task_thread_info(next))); 284 : "r" (task_thread_info(next)));
331#endif 285#endif
332 286
333 /* If no tasks are using the UBC, we're done */
334 if (ubc_usercnt == 0)
335 /* If no tasks are using the UBC, we're done */;
336 else if (next->thread.ubc_pc && next->mm) {
337 int asid = 0;
338#ifdef CONFIG_MMU
339 asid |= cpu_asid(smp_processor_id(), next->mm);
340#endif
341 ubc_set_tracing(asid, next->thread.ubc_pc);
342 } else {
343#if defined(CONFIG_CPU_SH4A)
344 ctrl_outl(UBC_CBR_INIT, UBC_CBR0);
345 ctrl_outl(UBC_CRR_INIT, UBC_CRR0);
346#else
347 ctrl_outw(0, UBC_BBRA);
348 ctrl_outw(0, UBC_BBRB);
349#endif
350 }
351
352 /* 287 /*
353 * If the task has used fpu the last 5 timeslices, just do a full 288 * If the task has used fpu the last 5 timeslices, just do a full
354 * restore of the math state immediately to avoid the trap; the 289 * restore of the math state immediately to avoid the trap; the
@@ -448,20 +383,3 @@ unsigned long get_wchan(struct task_struct *p)
448 383
449 return pc; 384 return pc;
450} 385}
451
452asmlinkage void break_point_trap(void)
453{
454 /* Clear tracing. */
455#if defined(CONFIG_CPU_SH4A)
456 ctrl_outl(UBC_CBR_INIT, UBC_CBR0);
457 ctrl_outl(UBC_CRR_INIT, UBC_CRR0);
458#else
459 ctrl_outw(0, UBC_BBRA);
460 ctrl_outw(0, UBC_BBRB);
461 ctrl_outl(0, UBC_BRCR);
462#endif
463 current->thread.ubc_pc = 0;
464 ubc_usercnt -= 1;
465
466 force_sig(SIGTRAP, current);
467}
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
index be9b5dcb4021..c625cdab76dd 100644
--- a/arch/sh/kernel/ptrace_32.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -2,7 +2,7 @@
2 * SuperH process tracing 2 * SuperH process tracing
3 * 3 *
4 * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka 4 * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
5 * Copyright (C) 2002 - 2008 Paul Mundt 5 * Copyright (C) 2002 - 2009 Paul Mundt
6 * 6 *
7 * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp> 7 * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp>
8 * 8 *
@@ -26,6 +26,7 @@
26#include <linux/tracehook.h> 26#include <linux/tracehook.h>
27#include <linux/elf.h> 27#include <linux/elf.h>
28#include <linux/regset.h> 28#include <linux/regset.h>
29#include <linux/hw_breakpoint.h>
29#include <asm/uaccess.h> 30#include <asm/uaccess.h>
30#include <asm/pgtable.h> 31#include <asm/pgtable.h>
31#include <asm/system.h> 32#include <asm/system.h>
@@ -63,33 +64,64 @@ static inline int put_stack_long(struct task_struct *task, int offset,
63 return 0; 64 return 0;
64} 65}
65 66
66void user_enable_single_step(struct task_struct *child) 67void ptrace_triggered(struct perf_event *bp, int nmi,
68 struct perf_sample_data *data, struct pt_regs *regs)
67{ 69{
68 /* Next scheduling will set up UBC */ 70 struct perf_event_attr attr;
69 if (child->thread.ubc_pc == 0) 71
70 ubc_usercnt += 1; 72 /*
73 * Disable the breakpoint request here since ptrace has defined a
74 * one-shot behaviour for breakpoint exceptions.
75 */
76 attr = bp->attr;
77 attr.disabled = true;
78 modify_user_hw_breakpoint(bp, &attr);
79}
80
81static int set_single_step(struct task_struct *tsk, unsigned long addr)
82{
83 struct thread_struct *thread = &tsk->thread;
84 struct perf_event *bp;
85 struct perf_event_attr attr;
86
87 bp = thread->ptrace_bps[0];
88 if (!bp) {
89 hw_breakpoint_init(&attr);
90
91 attr.bp_addr = addr;
92 attr.bp_len = HW_BREAKPOINT_LEN_2;
93 attr.bp_type = HW_BREAKPOINT_R;
94
95 bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk);
96 if (IS_ERR(bp))
97 return PTR_ERR(bp);
98
99 thread->ptrace_bps[0] = bp;
100 } else {
101 int err;
102
103 attr = bp->attr;
104 attr.bp_addr = addr;
105 err = modify_user_hw_breakpoint(bp, &attr);
106 if (unlikely(err))
107 return err;
108 }
109
110 return 0;
111}
71 112
72 child->thread.ubc_pc = get_stack_long(child, 113void user_enable_single_step(struct task_struct *child)
73 offsetof(struct pt_regs, pc)); 114{
115 unsigned long pc = get_stack_long(child, offsetof(struct pt_regs, pc));
74 116
75 set_tsk_thread_flag(child, TIF_SINGLESTEP); 117 set_tsk_thread_flag(child, TIF_SINGLESTEP);
118
119 set_single_step(child, pc);
76} 120}
77 121
78void user_disable_single_step(struct task_struct *child) 122void user_disable_single_step(struct task_struct *child)
79{ 123{
80 clear_tsk_thread_flag(child, TIF_SINGLESTEP); 124 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
81
82 /*
83 * Ensure the UBC is not programmed at the next context switch.
84 *
85 * Normally this is not needed but there are sequences such as
86 * singlestep, signal delivery, and continue that leave the
87 * ubc_pc non-zero leading to spurious SIGTRAPs.
88 */
89 if (child->thread.ubc_pc != 0) {
90 ubc_usercnt -= 1;
91 child->thread.ubc_pc = 0;
92 }
93} 125}
94 126
95/* 127/*
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index 204def6ecb6a..9c090cb68878 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -846,7 +846,7 @@ void __init trap_init(void)
846#endif 846#endif
847 847
848#ifdef TRAP_UBC 848#ifdef TRAP_UBC
849 set_exception_table_vec(TRAP_UBC, break_point_trap); 849 set_exception_table_vec(TRAP_UBC, breakpoint_trap_handler);
850#endif 850#endif
851 851
852 /* Save off the BIOS VBR, if there is one */ 852 /* Save off the BIOS VBR, if there is one */