diff options
-rw-r--r-- | arch/sh/Kconfig | 1 | ||||
-rw-r--r-- | arch/sh/include/asm/Kbuild | 4 | ||||
-rw-r--r-- | arch/sh/include/asm/hw_breakpoint.h | 53 | ||||
-rw-r--r-- | arch/sh/include/asm/kdebug.h | 2 | ||||
-rw-r--r-- | arch/sh/include/asm/processor_32.h | 8 | ||||
-rw-r--r-- | arch/sh/include/asm/system.h | 2 | ||||
-rw-r--r-- | arch/sh/include/asm/thread_info.h | 2 | ||||
-rw-r--r-- | arch/sh/include/asm/ubc.h | 8 | ||||
-rw-r--r-- | arch/sh/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/sh/kernel/cpu/sh3/ex.S | 2 | ||||
-rw-r--r-- | arch/sh/kernel/hw_breakpoint.c | 416 | ||||
-rw-r--r-- | arch/sh/kernel/process_32.c | 94 | ||||
-rw-r--r-- | arch/sh/kernel/ptrace_32.c | 19 |
13 files changed, 495 insertions, 117 deletions
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 88cdeb9f72d9..d563884833e9 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
@@ -39,6 +39,7 @@ config SUPERH32 | |||
39 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | 39 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST |
40 | select HAVE_FUNCTION_GRAPH_TRACER | 40 | select HAVE_FUNCTION_GRAPH_TRACER |
41 | select HAVE_ARCH_KGDB | 41 | select HAVE_ARCH_KGDB |
42 | select HAVE_HW_BREAKPOINT if CPU_SH4A | ||
42 | select ARCH_HIBERNATION_POSSIBLE if MMU | 43 | select ARCH_HIBERNATION_POSSIBLE if MMU |
43 | 44 | ||
44 | config SUPERH64 | 45 | config SUPERH64 |
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild index e121c30f797d..46cb93477bcb 100644 --- a/arch/sh/include/asm/Kbuild +++ b/arch/sh/include/asm/Kbuild | |||
@@ -1,6 +1,8 @@ | |||
1 | include include/asm-generic/Kbuild.asm | 1 | include include/asm-generic/Kbuild.asm |
2 | 2 | ||
3 | header-y += cachectl.h cpu-features.h | 3 | header-y += cachectl.h |
4 | header-y += cpu-features.h | ||
5 | header-y += hw_breakpoint.h | ||
4 | 6 | ||
5 | unifdef-y += unistd_32.h | 7 | unifdef-y += unistd_32.h |
6 | unifdef-y += unistd_64.h | 8 | unifdef-y += unistd_64.h |
diff --git a/arch/sh/include/asm/hw_breakpoint.h b/arch/sh/include/asm/hw_breakpoint.h new file mode 100644 index 000000000000..0f4a00f60058 --- /dev/null +++ b/arch/sh/include/asm/hw_breakpoint.h | |||
@@ -0,0 +1,53 @@ | |||
1 | #ifndef __ASM_SH_HW_BREAKPOINT_H | ||
2 | #define __ASM_SH_HW_BREAKPOINT_H | ||
3 | |||
4 | #include <linux/kdebug.h> | ||
5 | #include <linux/types.h> | ||
6 | #include <asm/ubc.h> | ||
7 | |||
8 | #ifdef __KERNEL__ | ||
9 | #define __ARCH_HW_BREAKPOINT_H | ||
10 | |||
11 | struct arch_hw_breakpoint { | ||
12 | char *name; /* Contains name of the symbol to set bkpt */ | ||
13 | unsigned long address; | ||
14 | unsigned long asid; | ||
15 | u16 len; | ||
16 | u16 type; | ||
17 | }; | ||
18 | |||
19 | enum { | ||
20 | SH_BREAKPOINT_READ = (1 << 1), | ||
21 | SH_BREAKPOINT_WRITE = (1 << 2), | ||
22 | SH_BREAKPOINT_RW = SH_BREAKPOINT_READ | SH_BREAKPOINT_WRITE, | ||
23 | |||
24 | SH_BREAKPOINT_LEN_1 = (1 << 12), | ||
25 | SH_BREAKPOINT_LEN_2 = (1 << 13), | ||
26 | SH_BREAKPOINT_LEN_4 = SH_BREAKPOINT_LEN_1 | SH_BREAKPOINT_LEN_2, | ||
27 | SH_BREAKPOINT_LEN_8 = (1 << 14), | ||
28 | }; | ||
29 | |||
30 | /* Total number of available UBC channels */ | ||
31 | #define HBP_NUM 1 /* XXX */ | ||
32 | |||
33 | struct perf_event; | ||
34 | struct task_struct; | ||
35 | struct pmu; | ||
36 | |||
37 | extern int arch_check_va_in_userspace(unsigned long va, u16 hbp_len); | ||
38 | extern int arch_validate_hwbkpt_settings(struct perf_event *bp, | ||
39 | struct task_struct *tsk); | ||
40 | extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, | ||
41 | unsigned long val, void *data); | ||
42 | |||
43 | int arch_install_hw_breakpoint(struct perf_event *bp); | ||
44 | void arch_uninstall_hw_breakpoint(struct perf_event *bp); | ||
45 | void hw_breakpoint_pmu_read(struct perf_event *bp); | ||
46 | void hw_breakpoint_pmu_unthrottle(struct perf_event *bp); | ||
47 | |||
48 | extern void arch_fill_perf_breakpoint(struct perf_event *bp); | ||
49 | |||
50 | extern struct pmu perf_ops_bp; | ||
51 | |||
52 | #endif /* __KERNEL__ */ | ||
53 | #endif /* __ASM_SH_HW_BREAKPOINT_H */ | ||
diff --git a/arch/sh/include/asm/kdebug.h b/arch/sh/include/asm/kdebug.h index 985219f9759e..5f6d2e9ccb7c 100644 --- a/arch/sh/include/asm/kdebug.h +++ b/arch/sh/include/asm/kdebug.h | |||
@@ -6,6 +6,8 @@ enum die_val { | |||
6 | DIE_TRAP, | 6 | DIE_TRAP, |
7 | DIE_NMI, | 7 | DIE_NMI, |
8 | DIE_OOPS, | 8 | DIE_OOPS, |
9 | DIE_BREAKPOINT, | ||
10 | DIE_SSTEP, | ||
9 | }; | 11 | }; |
10 | 12 | ||
11 | #endif /* __ASM_SH_KDEBUG_H */ | 13 | #endif /* __ASM_SH_KDEBUG_H */ |
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index 9a8714945dc9..f4b54040dbc3 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <asm/page.h> | 14 | #include <asm/page.h> |
15 | #include <asm/types.h> | 15 | #include <asm/types.h> |
16 | #include <asm/ptrace.h> | 16 | #include <asm/ptrace.h> |
17 | #include <asm/ubc.h> | ||
17 | 18 | ||
18 | /* | 19 | /* |
19 | * Default implementation of macro that returns current | 20 | * Default implementation of macro that returns current |
@@ -99,8 +100,8 @@ struct thread_struct { | |||
99 | unsigned long sp; | 100 | unsigned long sp; |
100 | unsigned long pc; | 101 | unsigned long pc; |
101 | 102 | ||
102 | /* Hardware debugging registers */ | 103 | /* Save middle states of ptrace breakpoints */ |
103 | unsigned long ubc_pc; | 104 | struct perf_event *ptrace_bps[NR_UBC_CHANNELS]; |
104 | 105 | ||
105 | /* floating point info */ | 106 | /* floating point info */ |
106 | union sh_fpu_union fpu; | 107 | union sh_fpu_union fpu; |
@@ -111,9 +112,6 @@ struct thread_struct { | |||
111 | #endif | 112 | #endif |
112 | }; | 113 | }; |
113 | 114 | ||
114 | /* Count of active tasks with UBC settings */ | ||
115 | extern int ubc_usercnt; | ||
116 | |||
117 | #define INIT_THREAD { \ | 115 | #define INIT_THREAD { \ |
118 | .sp = sizeof(init_stack) + (long) &init_stack, \ | 116 | .sp = sizeof(init_stack) + (long) &init_stack, \ |
119 | } | 117 | } |
diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h index b5c5acdc8c0e..1014da8b3ed3 100644 --- a/arch/sh/include/asm/system.h +++ b/arch/sh/include/asm/system.h | |||
@@ -144,8 +144,6 @@ void per_cpu_trap_init(void); | |||
144 | void default_idle(void); | 144 | void default_idle(void); |
145 | void cpu_idle_wait(void); | 145 | void cpu_idle_wait(void); |
146 | 146 | ||
147 | asmlinkage void break_point_trap(void); | ||
148 | |||
149 | #ifdef CONFIG_SUPERH32 | 147 | #ifdef CONFIG_SUPERH32 |
150 | #define BUILD_TRAP_HANDLER(name) \ | 148 | #define BUILD_TRAP_HANDLER(name) \ |
151 | asmlinkage void name##_trap_handler(unsigned long r4, unsigned long r5, \ | 149 | asmlinkage void name##_trap_handler(unsigned long r4, unsigned long r5, \ |
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index bdeb9d46d17d..8ab9145bf50b 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h | |||
@@ -117,6 +117,7 @@ extern void free_thread_info(struct thread_info *ti); | |||
117 | #define TIF_SECCOMP 6 /* secure computing */ | 117 | #define TIF_SECCOMP 6 /* secure computing */ |
118 | #define TIF_NOTIFY_RESUME 7 /* callback before returning to user */ | 118 | #define TIF_NOTIFY_RESUME 7 /* callback before returning to user */ |
119 | #define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */ | 119 | #define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */ |
120 | #define TIF_DEBUG 9 /* uses UBC */ | ||
120 | #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ | 121 | #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ |
121 | #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ | 122 | #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ |
122 | #define TIF_MEMDIE 18 | 123 | #define TIF_MEMDIE 18 |
@@ -131,6 +132,7 @@ extern void free_thread_info(struct thread_info *ti); | |||
131 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) | 132 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) |
132 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | 133 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) |
133 | #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) | 134 | #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) |
135 | #define _TIF_DEBUG (1 << TIF_DEBUG) | ||
134 | #define _TIF_USEDFPU (1 << TIF_USEDFPU) | 136 | #define _TIF_USEDFPU (1 << TIF_USEDFPU) |
135 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) | 137 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) |
136 | #define _TIF_FREEZE (1 << TIF_FREEZE) | 138 | #define _TIF_FREEZE (1 << TIF_FREEZE) |
diff --git a/arch/sh/include/asm/ubc.h b/arch/sh/include/asm/ubc.h index 4ca4b7717371..dd7878197b6e 100644 --- a/arch/sh/include/asm/ubc.h +++ b/arch/sh/include/asm/ubc.h | |||
@@ -10,8 +10,8 @@ | |||
10 | */ | 10 | */ |
11 | #ifndef __ASM_SH_UBC_H | 11 | #ifndef __ASM_SH_UBC_H |
12 | #define __ASM_SH_UBC_H | 12 | #define __ASM_SH_UBC_H |
13 | #ifdef __KERNEL__ | ||
14 | 13 | ||
14 | #ifdef __KERNEL__ | ||
15 | #include <cpu/ubc.h> | 15 | #include <cpu/ubc.h> |
16 | 16 | ||
17 | /* User Break Controller */ | 17 | /* User Break Controller */ |
@@ -60,6 +60,12 @@ | |||
60 | #define BRCR_UBDE (1 << 0) | 60 | #define BRCR_UBDE (1 << 0) |
61 | #endif | 61 | #endif |
62 | 62 | ||
63 | /* | ||
64 | * All SH parts have 2 UBC channels. I defy any hardware designer to | ||
65 | * invalidate this assertion. | ||
66 | */ | ||
67 | #define NR_UBC_CHANNELS 2 | ||
68 | |||
63 | #ifndef __ASSEMBLY__ | 69 | #ifndef __ASSEMBLY__ |
64 | /* arch/sh/kernel/cpu/ubc.S */ | 70 | /* arch/sh/kernel/cpu/ubc.S */ |
65 | extern void ubc_sleep(void); | 71 | extern void ubc_sleep(void); |
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile index a2d0a40f3848..649daadd4519 100644 --- a/arch/sh/kernel/Makefile +++ b/arch/sh/kernel/Makefile | |||
@@ -36,6 +36,7 @@ obj-$(CONFIG_DUMP_CODE) += disassemble.o | |||
36 | obj-$(CONFIG_HIBERNATION) += swsusp.o | 36 | obj-$(CONFIG_HIBERNATION) += swsusp.o |
37 | obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o | 37 | obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o |
38 | 38 | ||
39 | obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o | ||
39 | obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o | 40 | obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o |
40 | 41 | ||
41 | EXTRA_CFLAGS += -Werror | 42 | EXTRA_CFLAGS += -Werror |
diff --git a/arch/sh/kernel/cpu/sh3/ex.S b/arch/sh/kernel/cpu/sh3/ex.S index 46610c35c232..99b4d020179a 100644 --- a/arch/sh/kernel/cpu/sh3/ex.S +++ b/arch/sh/kernel/cpu/sh3/ex.S | |||
@@ -49,7 +49,7 @@ ENTRY(exception_handling_table) | |||
49 | .long exception_error ! reserved_instruction (filled by trap_init) /* 180 */ | 49 | .long exception_error ! reserved_instruction (filled by trap_init) /* 180 */ |
50 | .long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/ | 50 | .long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/ |
51 | .long nmi_trap_handler /* 1C0 */ ! Allow trap to debugger | 51 | .long nmi_trap_handler /* 1C0 */ ! Allow trap to debugger |
52 | .long break_point_trap /* 1E0 */ | 52 | .long breakpoint_trap_handler /* 1E0 */ |
53 | 53 | ||
54 | /* | 54 | /* |
55 | * Pad the remainder of the table out, exceptions residing in far | 55 | * Pad the remainder of the table out, exceptions residing in far |
diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c new file mode 100644 index 000000000000..ff3cb3d7df8f --- /dev/null +++ b/arch/sh/kernel/hw_breakpoint.c | |||
@@ -0,0 +1,416 @@ | |||
1 | /* | ||
2 | * arch/sh/kernel/hw_breakpoint.c | ||
3 | * | ||
4 | * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC. | ||
5 | * | ||
6 | * Copyright (C) 2009 Paul Mundt | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/perf_event.h> | ||
14 | #include <linux/hw_breakpoint.h> | ||
15 | #include <linux/percpu.h> | ||
16 | #include <linux/kallsyms.h> | ||
17 | #include <linux/notifier.h> | ||
18 | #include <linux/kprobes.h> | ||
19 | #include <linux/kdebug.h> | ||
20 | #include <linux/io.h> | ||
21 | #include <asm/hw_breakpoint.h> | ||
22 | #include <asm/mmu_context.h> | ||
23 | |||
24 | struct ubc_context { | ||
25 | unsigned long pc; | ||
26 | unsigned long state; | ||
27 | }; | ||
28 | |||
29 | /* Per cpu ubc channel state */ | ||
30 | static DEFINE_PER_CPU(struct ubc_context, ubc_ctx[HBP_NUM]); | ||
31 | |||
32 | /* | ||
33 | * Stores the breakpoints currently in use on each breakpoint address | ||
34 | * register for each cpus | ||
35 | */ | ||
36 | static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]); | ||
37 | |||
38 | static int __init ubc_init(void) | ||
39 | { | ||
40 | __raw_writel(0, UBC_CAMR0); | ||
41 | __raw_writel(0, UBC_CBR0); | ||
42 | __raw_writel(0, UBC_CBCR); | ||
43 | |||
44 | __raw_writel(UBC_CRR_BIE | UBC_CRR_PCB, UBC_CRR0); | ||
45 | |||
46 | /* dummy read for write posting */ | ||
47 | (void)__raw_readl(UBC_CRR0); | ||
48 | |||
49 | return 0; | ||
50 | } | ||
51 | arch_initcall(ubc_init); | ||
52 | |||
53 | /* | ||
54 | * Install a perf counter breakpoint. | ||
55 | * | ||
56 | * We seek a free UBC channel and use it for this breakpoint. | ||
57 | * | ||
58 | * Atomic: we hold the counter->ctx->lock and we only handle variables | ||
59 | * and registers local to this cpu. | ||
60 | */ | ||
61 | int arch_install_hw_breakpoint(struct perf_event *bp) | ||
62 | { | ||
63 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
64 | struct ubc_context *ubc_ctx; | ||
65 | int i; | ||
66 | |||
67 | for (i = 0; i < HBP_NUM; i++) { | ||
68 | struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); | ||
69 | |||
70 | if (!*slot) { | ||
71 | *slot = bp; | ||
72 | break; | ||
73 | } | ||
74 | } | ||
75 | |||
76 | if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) | ||
77 | return -EBUSY; | ||
78 | |||
79 | ubc_ctx = &__get_cpu_var(ubc_ctx[i]); | ||
80 | |||
81 | ubc_ctx->pc = info->address; | ||
82 | ubc_ctx->state = info->len | info->type; | ||
83 | |||
84 | __raw_writel(UBC_CBR_CE | ubc_ctx->state, UBC_CBR0); | ||
85 | __raw_writel(ubc_ctx->pc, UBC_CAR0); | ||
86 | |||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | /* | ||
91 | * Uninstall the breakpoint contained in the given counter. | ||
92 | * | ||
93 | * First we search the debug address register it uses and then we disable | ||
94 | * it. | ||
95 | * | ||
96 | * Atomic: we hold the counter->ctx->lock and we only handle variables | ||
97 | * and registers local to this cpu. | ||
98 | */ | ||
99 | void arch_uninstall_hw_breakpoint(struct perf_event *bp) | ||
100 | { | ||
101 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
102 | struct ubc_context *ubc_ctx; | ||
103 | int i; | ||
104 | |||
105 | for (i = 0; i < HBP_NUM; i++) { | ||
106 | struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); | ||
107 | |||
108 | if (*slot == bp) { | ||
109 | *slot = NULL; | ||
110 | break; | ||
111 | } | ||
112 | } | ||
113 | |||
114 | if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) | ||
115 | return; | ||
116 | |||
117 | ubc_ctx = &__get_cpu_var(ubc_ctx[i]); | ||
118 | ubc_ctx->pc = 0; | ||
119 | ubc_ctx->state &= ~(info->len | info->type); | ||
120 | |||
121 | __raw_writel(ubc_ctx->pc, UBC_CBR0); | ||
122 | __raw_writel(ubc_ctx->state, UBC_CAR0); | ||
123 | } | ||
124 | |||
125 | static int get_hbp_len(u16 hbp_len) | ||
126 | { | ||
127 | unsigned int len_in_bytes = 0; | ||
128 | |||
129 | switch (hbp_len) { | ||
130 | case SH_BREAKPOINT_LEN_1: | ||
131 | len_in_bytes = 1; | ||
132 | break; | ||
133 | case SH_BREAKPOINT_LEN_2: | ||
134 | len_in_bytes = 2; | ||
135 | break; | ||
136 | case SH_BREAKPOINT_LEN_4: | ||
137 | len_in_bytes = 4; | ||
138 | break; | ||
139 | case SH_BREAKPOINT_LEN_8: | ||
140 | len_in_bytes = 8; | ||
141 | break; | ||
142 | } | ||
143 | return len_in_bytes; | ||
144 | } | ||
145 | |||
146 | /* | ||
147 | * Check for virtual address in user space. | ||
148 | */ | ||
149 | int arch_check_va_in_userspace(unsigned long va, u16 hbp_len) | ||
150 | { | ||
151 | unsigned int len; | ||
152 | |||
153 | len = get_hbp_len(hbp_len); | ||
154 | |||
155 | return (va <= TASK_SIZE - len); | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * Check for virtual address in kernel space. | ||
160 | */ | ||
161 | static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len) | ||
162 | { | ||
163 | unsigned int len; | ||
164 | |||
165 | len = get_hbp_len(hbp_len); | ||
166 | |||
167 | return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); | ||
168 | } | ||
169 | |||
170 | /* | ||
171 | * Store a breakpoint's encoded address, length, and type. | ||
172 | */ | ||
173 | static int arch_store_info(struct perf_event *bp) | ||
174 | { | ||
175 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
176 | |||
177 | /* | ||
178 | * User-space requests will always have the address field populated | ||
179 | * For kernel-addresses, either the address or symbol name can be | ||
180 | * specified. | ||
181 | */ | ||
182 | if (info->name) | ||
183 | info->address = (unsigned long)kallsyms_lookup_name(info->name); | ||
184 | if (info->address) { | ||
185 | info->asid = get_asid(); | ||
186 | return 0; | ||
187 | } | ||
188 | |||
189 | return -EINVAL; | ||
190 | } | ||
191 | |||
192 | int arch_bp_generic_fields(int sh_len, int sh_type, | ||
193 | int *gen_len, int *gen_type) | ||
194 | { | ||
195 | /* Len */ | ||
196 | switch (sh_len) { | ||
197 | case SH_BREAKPOINT_LEN_1: | ||
198 | *gen_len = HW_BREAKPOINT_LEN_1; | ||
199 | break; | ||
200 | case SH_BREAKPOINT_LEN_2: | ||
201 | *gen_len = HW_BREAKPOINT_LEN_2; | ||
202 | break; | ||
203 | case SH_BREAKPOINT_LEN_4: | ||
204 | *gen_len = HW_BREAKPOINT_LEN_4; | ||
205 | break; | ||
206 | case SH_BREAKPOINT_LEN_8: | ||
207 | *gen_len = HW_BREAKPOINT_LEN_8; | ||
208 | break; | ||
209 | default: | ||
210 | return -EINVAL; | ||
211 | } | ||
212 | |||
213 | /* Type */ | ||
214 | switch (sh_type) { | ||
215 | case SH_BREAKPOINT_READ: | ||
216 | *gen_type = HW_BREAKPOINT_R; | ||
217 | case SH_BREAKPOINT_WRITE: | ||
218 | *gen_type = HW_BREAKPOINT_W; | ||
219 | break; | ||
220 | case SH_BREAKPOINT_RW: | ||
221 | *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R; | ||
222 | break; | ||
223 | default: | ||
224 | return -EINVAL; | ||
225 | } | ||
226 | |||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | static int arch_build_bp_info(struct perf_event *bp) | ||
231 | { | ||
232 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
233 | |||
234 | info->address = bp->attr.bp_addr; | ||
235 | |||
236 | /* Len */ | ||
237 | switch (bp->attr.bp_len) { | ||
238 | case HW_BREAKPOINT_LEN_1: | ||
239 | info->len = SH_BREAKPOINT_LEN_1; | ||
240 | break; | ||
241 | case HW_BREAKPOINT_LEN_2: | ||
242 | info->len = SH_BREAKPOINT_LEN_2; | ||
243 | break; | ||
244 | case HW_BREAKPOINT_LEN_4: | ||
245 | info->len = SH_BREAKPOINT_LEN_4; | ||
246 | break; | ||
247 | case HW_BREAKPOINT_LEN_8: | ||
248 | info->len = SH_BREAKPOINT_LEN_8; | ||
249 | break; | ||
250 | default: | ||
251 | return -EINVAL; | ||
252 | } | ||
253 | |||
254 | /* Type */ | ||
255 | switch (bp->attr.bp_type) { | ||
256 | case HW_BREAKPOINT_R: | ||
257 | info->type = SH_BREAKPOINT_READ; | ||
258 | break; | ||
259 | case HW_BREAKPOINT_W: | ||
260 | info->type = SH_BREAKPOINT_WRITE; | ||
261 | break; | ||
262 | case HW_BREAKPOINT_W | HW_BREAKPOINT_R: | ||
263 | info->type = SH_BREAKPOINT_RW; | ||
264 | break; | ||
265 | default: | ||
266 | return -EINVAL; | ||
267 | } | ||
268 | |||
269 | return 0; | ||
270 | } | ||
271 | |||
272 | /* | ||
273 | * Validate the arch-specific HW Breakpoint register settings | ||
274 | */ | ||
275 | int arch_validate_hwbkpt_settings(struct perf_event *bp, | ||
276 | struct task_struct *tsk) | ||
277 | { | ||
278 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
279 | unsigned int align; | ||
280 | int ret; | ||
281 | |||
282 | ret = arch_build_bp_info(bp); | ||
283 | if (ret) | ||
284 | return ret; | ||
285 | |||
286 | ret = -EINVAL; | ||
287 | |||
288 | switch (info->len) { | ||
289 | case SH_BREAKPOINT_LEN_1: | ||
290 | align = 0; | ||
291 | break; | ||
292 | case SH_BREAKPOINT_LEN_2: | ||
293 | align = 1; | ||
294 | break; | ||
295 | case SH_BREAKPOINT_LEN_4: | ||
296 | align = 3; | ||
297 | break; | ||
298 | case SH_BREAKPOINT_LEN_8: | ||
299 | align = 7; | ||
300 | break; | ||
301 | default: | ||
302 | return ret; | ||
303 | } | ||
304 | |||
305 | if (bp->callback) | ||
306 | ret = arch_store_info(bp); | ||
307 | |||
308 | if (ret < 0) | ||
309 | return ret; | ||
310 | |||
311 | /* | ||
312 | * Check that the low-order bits of the address are appropriate | ||
313 | * for the alignment implied by len. | ||
314 | */ | ||
315 | if (info->address & align) | ||
316 | return -EINVAL; | ||
317 | |||
318 | /* Check that the virtual address is in the proper range */ | ||
319 | if (tsk) { | ||
320 | if (!arch_check_va_in_userspace(info->address, info->len)) | ||
321 | return -EFAULT; | ||
322 | } else { | ||
323 | if (!arch_check_va_in_kernelspace(info->address, info->len)) | ||
324 | return -EFAULT; | ||
325 | } | ||
326 | |||
327 | return 0; | ||
328 | } | ||
329 | |||
330 | /* | ||
331 | * Release the user breakpoints used by ptrace | ||
332 | */ | ||
333 | void flush_ptrace_hw_breakpoint(struct task_struct *tsk) | ||
334 | { | ||
335 | int i; | ||
336 | struct thread_struct *t = &tsk->thread; | ||
337 | |||
338 | for (i = 0; i < HBP_NUM; i++) { | ||
339 | unregister_hw_breakpoint(t->ptrace_bps[i]); | ||
340 | t->ptrace_bps[i] = NULL; | ||
341 | } | ||
342 | } | ||
343 | |||
344 | static int __kprobes hw_breakpoint_handler(struct die_args *args) | ||
345 | { | ||
346 | int cpu, i, rc = NOTIFY_STOP; | ||
347 | struct perf_event *bp; | ||
348 | unsigned long val; | ||
349 | |||
350 | val = __raw_readl(UBC_CBR0); | ||
351 | __raw_writel(val & ~UBC_CBR_CE, UBC_CBR0); | ||
352 | |||
353 | cpu = get_cpu(); | ||
354 | for (i = 0; i < HBP_NUM; i++) { | ||
355 | /* | ||
356 | * The counter may be concurrently released but that can only | ||
357 | * occur from a call_rcu() path. We can then safely fetch | ||
358 | * the breakpoint, use its callback, touch its counter | ||
359 | * while we are in an rcu_read_lock() path. | ||
360 | */ | ||
361 | rcu_read_lock(); | ||
362 | |||
363 | bp = per_cpu(bp_per_reg[i], cpu); | ||
364 | if (bp) { | ||
365 | rc = NOTIFY_DONE; | ||
366 | } else { | ||
367 | rcu_read_unlock(); | ||
368 | break; | ||
369 | } | ||
370 | |||
371 | (bp->callback)(bp, args->regs); | ||
372 | |||
373 | rcu_read_unlock(); | ||
374 | } | ||
375 | |||
376 | if (bp) { | ||
377 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
378 | |||
379 | __raw_writel(UBC_CBR_CE | info->len | info->type, UBC_CBR0); | ||
380 | __raw_writel(info->address, UBC_CAR0); | ||
381 | } | ||
382 | |||
383 | put_cpu(); | ||
384 | |||
385 | return rc; | ||
386 | } | ||
387 | |||
388 | BUILD_TRAP_HANDLER(breakpoint) | ||
389 | { | ||
390 | unsigned long ex = lookup_exception_vector(); | ||
391 | TRAP_HANDLER_DECL; | ||
392 | |||
393 | notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP); | ||
394 | } | ||
395 | |||
396 | /* | ||
397 | * Handle debug exception notifications. | ||
398 | */ | ||
399 | int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused, | ||
400 | unsigned long val, void *data) | ||
401 | { | ||
402 | if (val != DIE_BREAKPOINT) | ||
403 | return NOTIFY_DONE; | ||
404 | |||
405 | return hw_breakpoint_handler(data); | ||
406 | } | ||
407 | |||
408 | void hw_breakpoint_pmu_read(struct perf_event *bp) | ||
409 | { | ||
410 | /* TODO */ | ||
411 | } | ||
412 | |||
413 | void hw_breakpoint_pmu_unthrottle(struct perf_event *bp) | ||
414 | { | ||
415 | /* TODO */ | ||
416 | } | ||
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 0673c4746be3..4a2c866f9773 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/fs.h> | 25 | #include <linux/fs.h> |
26 | #include <linux/ftrace.h> | 26 | #include <linux/ftrace.h> |
27 | #include <linux/preempt.h> | 27 | #include <linux/preempt.h> |
28 | #include <linux/hw_breakpoint.h> | ||
28 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
29 | #include <asm/mmu_context.h> | 30 | #include <asm/mmu_context.h> |
30 | #include <asm/pgalloc.h> | 31 | #include <asm/pgalloc.h> |
@@ -34,8 +35,6 @@ | |||
34 | #include <asm/syscalls.h> | 35 | #include <asm/syscalls.h> |
35 | #include <asm/watchdog.h> | 36 | #include <asm/watchdog.h> |
36 | 37 | ||
37 | int ubc_usercnt = 0; | ||
38 | |||
39 | #ifdef CONFIG_32BIT | 38 | #ifdef CONFIG_32BIT |
40 | static void watchdog_trigger_immediate(void) | 39 | static void watchdog_trigger_immediate(void) |
41 | { | 40 | { |
@@ -148,16 +147,15 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |||
148 | */ | 147 | */ |
149 | void exit_thread(void) | 148 | void exit_thread(void) |
150 | { | 149 | { |
151 | if (current->thread.ubc_pc) { | ||
152 | current->thread.ubc_pc = 0; | ||
153 | ubc_usercnt -= 1; | ||
154 | } | ||
155 | } | 150 | } |
156 | 151 | ||
157 | void flush_thread(void) | 152 | void flush_thread(void) |
158 | { | 153 | { |
159 | #if defined(CONFIG_SH_FPU) | ||
160 | struct task_struct *tsk = current; | 154 | struct task_struct *tsk = current; |
155 | |||
156 | flush_ptrace_hw_breakpoint(tsk); | ||
157 | |||
158 | #if defined(CONFIG_SH_FPU) | ||
161 | /* Forget lazy FPU state */ | 159 | /* Forget lazy FPU state */ |
162 | clear_fpu(tsk, task_pt_regs(tsk)); | 160 | clear_fpu(tsk, task_pt_regs(tsk)); |
163 | clear_used_math(); | 161 | clear_used_math(); |
@@ -195,9 +193,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
195 | { | 193 | { |
196 | struct thread_info *ti = task_thread_info(p); | 194 | struct thread_info *ti = task_thread_info(p); |
197 | struct pt_regs *childregs; | 195 | struct pt_regs *childregs; |
198 | #if defined(CONFIG_SH_FPU) || defined(CONFIG_SH_DSP) | ||
199 | struct task_struct *tsk = current; | 196 | struct task_struct *tsk = current; |
200 | #endif | ||
201 | 197 | ||
202 | #if defined(CONFIG_SH_FPU) | 198 | #if defined(CONFIG_SH_FPU) |
203 | unlazy_fpu(tsk, regs); | 199 | unlazy_fpu(tsk, regs); |
@@ -234,53 +230,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
234 | p->thread.sp = (unsigned long) childregs; | 230 | p->thread.sp = (unsigned long) childregs; |
235 | p->thread.pc = (unsigned long) ret_from_fork; | 231 | p->thread.pc = (unsigned long) ret_from_fork; |
236 | 232 | ||
237 | p->thread.ubc_pc = 0; | 233 | memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); |
238 | 234 | ||
239 | return 0; | 235 | return 0; |
240 | } | 236 | } |
241 | 237 | ||
242 | /* Tracing by user break controller. */ | ||
243 | static void ubc_set_tracing(int asid, unsigned long pc) | ||
244 | { | ||
245 | #if defined(CONFIG_CPU_SH4A) | ||
246 | unsigned long val; | ||
247 | |||
248 | val = (UBC_CBR_ID_INST | UBC_CBR_RW_READ | UBC_CBR_CE); | ||
249 | val |= (UBC_CBR_AIE | UBC_CBR_AIV_SET(asid)); | ||
250 | |||
251 | ctrl_outl(val, UBC_CBR0); | ||
252 | ctrl_outl(pc, UBC_CAR0); | ||
253 | ctrl_outl(0x0, UBC_CAMR0); | ||
254 | ctrl_outl(0x0, UBC_CBCR); | ||
255 | |||
256 | val = (UBC_CRR_RES | UBC_CRR_PCB | UBC_CRR_BIE); | ||
257 | ctrl_outl(val, UBC_CRR0); | ||
258 | |||
259 | /* Read UBC register that we wrote last, for checking update */ | ||
260 | val = ctrl_inl(UBC_CRR0); | ||
261 | |||
262 | #else /* CONFIG_CPU_SH4A */ | ||
263 | ctrl_outl(pc, UBC_BARA); | ||
264 | |||
265 | #ifdef CONFIG_MMU | ||
266 | ctrl_outb(asid, UBC_BASRA); | ||
267 | #endif | ||
268 | |||
269 | ctrl_outl(0, UBC_BAMRA); | ||
270 | |||
271 | if (current_cpu_data.type == CPU_SH7729 || | ||
272 | current_cpu_data.type == CPU_SH7710 || | ||
273 | current_cpu_data.type == CPU_SH7712 || | ||
274 | current_cpu_data.type == CPU_SH7203){ | ||
275 | ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA); | ||
276 | ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR); | ||
277 | } else { | ||
278 | ctrl_outw(BBR_INST | BBR_READ, UBC_BBRA); | ||
279 | ctrl_outw(BRCR_PCBA, UBC_BRCR); | ||
280 | } | ||
281 | #endif /* CONFIG_CPU_SH4A */ | ||
282 | } | ||
283 | |||
284 | /* | 238 | /* |
285 | * switch_to(x,y) should switch tasks from x to y. | 239 | * switch_to(x,y) should switch tasks from x to y. |
286 | * | 240 | * |
@@ -302,25 +256,6 @@ __switch_to(struct task_struct *prev, struct task_struct *next) | |||
302 | : "r" (task_thread_info(next))); | 256 | : "r" (task_thread_info(next))); |
303 | #endif | 257 | #endif |
304 | 258 | ||
305 | /* If no tasks are using the UBC, we're done */ | ||
306 | if (ubc_usercnt == 0) | ||
307 | /* If no tasks are using the UBC, we're done */; | ||
308 | else if (next->thread.ubc_pc && next->mm) { | ||
309 | int asid = 0; | ||
310 | #ifdef CONFIG_MMU | ||
311 | asid |= cpu_asid(smp_processor_id(), next->mm); | ||
312 | #endif | ||
313 | ubc_set_tracing(asid, next->thread.ubc_pc); | ||
314 | } else { | ||
315 | #if defined(CONFIG_CPU_SH4A) | ||
316 | ctrl_outl(UBC_CBR_INIT, UBC_CBR0); | ||
317 | ctrl_outl(UBC_CRR_INIT, UBC_CRR0); | ||
318 | #else | ||
319 | ctrl_outw(0, UBC_BBRA); | ||
320 | ctrl_outw(0, UBC_BBRB); | ||
321 | #endif | ||
322 | } | ||
323 | |||
324 | return prev; | 259 | return prev; |
325 | } | 260 | } |
326 | 261 | ||
@@ -412,20 +347,3 @@ unsigned long get_wchan(struct task_struct *p) | |||
412 | 347 | ||
413 | return pc; | 348 | return pc; |
414 | } | 349 | } |
415 | |||
416 | asmlinkage void break_point_trap(void) | ||
417 | { | ||
418 | /* Clear tracing. */ | ||
419 | #if defined(CONFIG_CPU_SH4A) | ||
420 | ctrl_outl(UBC_CBR_INIT, UBC_CBR0); | ||
421 | ctrl_outl(UBC_CRR_INIT, UBC_CRR0); | ||
422 | #else | ||
423 | ctrl_outw(0, UBC_BBRA); | ||
424 | ctrl_outw(0, UBC_BBRB); | ||
425 | ctrl_outl(0, UBC_BRCR); | ||
426 | #endif | ||
427 | current->thread.ubc_pc = 0; | ||
428 | ubc_usercnt -= 1; | ||
429 | |||
430 | force_sig(SIGTRAP, current); | ||
431 | } | ||
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index 9be35f348093..bdb10446cbac 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c | |||
@@ -65,31 +65,12 @@ static inline int put_stack_long(struct task_struct *task, int offset, | |||
65 | 65 | ||
66 | void user_enable_single_step(struct task_struct *child) | 66 | void user_enable_single_step(struct task_struct *child) |
67 | { | 67 | { |
68 | /* Next scheduling will set up UBC */ | ||
69 | if (child->thread.ubc_pc == 0) | ||
70 | ubc_usercnt += 1; | ||
71 | |||
72 | child->thread.ubc_pc = get_stack_long(child, | ||
73 | offsetof(struct pt_regs, pc)); | ||
74 | |||
75 | set_tsk_thread_flag(child, TIF_SINGLESTEP); | 68 | set_tsk_thread_flag(child, TIF_SINGLESTEP); |
76 | } | 69 | } |
77 | 70 | ||
78 | void user_disable_single_step(struct task_struct *child) | 71 | void user_disable_single_step(struct task_struct *child) |
79 | { | 72 | { |
80 | clear_tsk_thread_flag(child, TIF_SINGLESTEP); | 73 | clear_tsk_thread_flag(child, TIF_SINGLESTEP); |
81 | |||
82 | /* | ||
83 | * Ensure the UBC is not programmed at the next context switch. | ||
84 | * | ||
85 | * Normally this is not needed but there are sequences such as | ||
86 | * singlestep, signal delivery, and continue that leave the | ||
87 | * ubc_pc non-zero leading to spurious SIGTRAPs. | ||
88 | */ | ||
89 | if (child->thread.ubc_pc != 0) { | ||
90 | ubc_usercnt -= 1; | ||
91 | child->thread.ubc_pc = 0; | ||
92 | } | ||
93 | } | 74 | } |
94 | 75 | ||
95 | /* | 76 | /* |