diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/exit.c | 5 | ||||
-rw-r--r-- | kernel/hw_breakpoint.c | 424 | ||||
-rw-r--r-- | kernel/perf_event.c | 53 | ||||
-rw-r--r-- | kernel/trace/trace.h | 5 | ||||
-rw-r--r-- | kernel/trace/trace_entries.h | 6 | ||||
-rw-r--r-- | kernel/trace/trace_ksym.c | 126 | ||||
-rw-r--r-- | kernel/trace/trace_selftest.c | 3 |
7 files changed, 293 insertions, 329 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index e61891f80123..266f8920628a 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <linux/init_task.h> | 49 | #include <linux/init_task.h> |
50 | #include <linux/perf_event.h> | 50 | #include <linux/perf_event.h> |
51 | #include <trace/events/sched.h> | 51 | #include <trace/events/sched.h> |
52 | #include <linux/hw_breakpoint.h> | ||
52 | 53 | ||
53 | #include <asm/uaccess.h> | 54 | #include <asm/uaccess.h> |
54 | #include <asm/unistd.h> | 55 | #include <asm/unistd.h> |
@@ -980,6 +981,10 @@ NORET_TYPE void do_exit(long code) | |||
980 | proc_exit_connector(tsk); | 981 | proc_exit_connector(tsk); |
981 | 982 | ||
982 | /* | 983 | /* |
984 | * FIXME: do that only when needed, using sched_exit tracepoint | ||
985 | */ | ||
986 | flush_ptrace_hw_breakpoint(tsk); | ||
987 | /* | ||
983 | * Flush inherited counters to the parent - before the parent | 988 | * Flush inherited counters to the parent - before the parent |
984 | * gets woken up by child-exit notifications. | 989 | * gets woken up by child-exit notifications. |
985 | */ | 990 | */ |
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index c1f64e65a9f3..08f6d0163201 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c | |||
@@ -15,6 +15,7 @@ | |||
15 | * | 15 | * |
16 | * Copyright (C) 2007 Alan Stern | 16 | * Copyright (C) 2007 Alan Stern |
17 | * Copyright (C) IBM Corporation, 2009 | 17 | * Copyright (C) IBM Corporation, 2009 |
18 | * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com> | ||
18 | */ | 19 | */ |
19 | 20 | ||
20 | /* | 21 | /* |
@@ -35,334 +36,242 @@ | |||
35 | #include <linux/init.h> | 36 | #include <linux/init.h> |
36 | #include <linux/smp.h> | 37 | #include <linux/smp.h> |
37 | 38 | ||
38 | #include <asm/hw_breakpoint.h> | 39 | #include <linux/hw_breakpoint.h> |
40 | |||
39 | #include <asm/processor.h> | 41 | #include <asm/processor.h> |
40 | 42 | ||
41 | #ifdef CONFIG_X86 | 43 | #ifdef CONFIG_X86 |
42 | #include <asm/debugreg.h> | 44 | #include <asm/debugreg.h> |
43 | #endif | 45 | #endif |
44 | /* | ||
45 | * Spinlock that protects all (un)register operations over kernel/user-space | ||
46 | * breakpoint requests | ||
47 | */ | ||
48 | static DEFINE_SPINLOCK(hw_breakpoint_lock); | ||
49 | |||
50 | /* Array of kernel-space breakpoint structures */ | ||
51 | struct hw_breakpoint *hbp_kernel[HBP_NUM]; | ||
52 | |||
53 | /* | ||
54 | * Per-processor copy of hbp_kernel[]. Used only when hbp_kernel is being | ||
55 | * modified but we need the older copy to handle any hbp exceptions. It will | ||
56 | * sync with hbp_kernel[] value after updation is done through IPIs. | ||
57 | */ | ||
58 | DEFINE_PER_CPU(struct hw_breakpoint*, this_hbp_kernel[HBP_NUM]); | ||
59 | |||
60 | /* | ||
61 | * Kernel breakpoints grow downwards, starting from HBP_NUM | ||
62 | * 'hbp_kernel_pos' denotes lowest numbered breakpoint register occupied for | ||
63 | * kernel-space request. We will initialise it here and not in an __init | ||
64 | * routine because load_debug_registers(), which uses this variable can be | ||
65 | * called very early during CPU initialisation. | ||
66 | */ | ||
67 | unsigned int hbp_kernel_pos = HBP_NUM; | ||
68 | 46 | ||
69 | /* | 47 | static atomic_t bp_slot; |
70 | * An array containing refcount of threads using a given bkpt register | ||
71 | * Accesses are synchronised by acquiring hw_breakpoint_lock | ||
72 | */ | ||
73 | unsigned int hbp_user_refcount[HBP_NUM]; | ||
74 | 48 | ||
75 | /* | 49 | int reserve_bp_slot(struct perf_event *bp) |
76 | * Load the debug registers during startup of a CPU. | ||
77 | */ | ||
78 | void load_debug_registers(void) | ||
79 | { | 50 | { |
80 | unsigned long flags; | 51 | if (atomic_inc_return(&bp_slot) == HBP_NUM) { |
81 | struct task_struct *tsk = current; | 52 | atomic_dec(&bp_slot); |
82 | |||
83 | spin_lock_bh(&hw_breakpoint_lock); | ||
84 | |||
85 | /* Prevent IPIs for new kernel breakpoint updates */ | ||
86 | local_irq_save(flags); | ||
87 | arch_update_kernel_hw_breakpoint(NULL); | ||
88 | local_irq_restore(flags); | ||
89 | |||
90 | if (test_tsk_thread_flag(tsk, TIF_DEBUG)) | ||
91 | arch_install_thread_hw_breakpoint(tsk); | ||
92 | |||
93 | spin_unlock_bh(&hw_breakpoint_lock); | ||
94 | } | ||
95 | 53 | ||
96 | /* | 54 | return -ENOSPC; |
97 | * Erase all the hardware breakpoint info associated with a thread. | ||
98 | * | ||
99 | * If tsk != current then tsk must not be usable (for example, a | ||
100 | * child being cleaned up from a failed fork). | ||
101 | */ | ||
102 | void flush_thread_hw_breakpoint(struct task_struct *tsk) | ||
103 | { | ||
104 | int i; | ||
105 | struct thread_struct *thread = &(tsk->thread); | ||
106 | |||
107 | spin_lock_bh(&hw_breakpoint_lock); | ||
108 | |||
109 | /* The thread no longer has any breakpoints associated with it */ | ||
110 | clear_tsk_thread_flag(tsk, TIF_DEBUG); | ||
111 | for (i = 0; i < HBP_NUM; i++) { | ||
112 | if (thread->hbp[i]) { | ||
113 | hbp_user_refcount[i]--; | ||
114 | kfree(thread->hbp[i]); | ||
115 | thread->hbp[i] = NULL; | ||
116 | } | ||
117 | } | 55 | } |
118 | 56 | ||
119 | arch_flush_thread_hw_breakpoint(tsk); | 57 | return 0; |
120 | |||
121 | /* Actually uninstall the breakpoints if necessary */ | ||
122 | if (tsk == current) | ||
123 | arch_uninstall_thread_hw_breakpoint(); | ||
124 | spin_unlock_bh(&hw_breakpoint_lock); | ||
125 | } | 58 | } |
126 | 59 | ||
127 | /* | 60 | void release_bp_slot(struct perf_event *bp) |
128 | * Copy the hardware breakpoint info from a thread to its cloned child. | ||
129 | */ | ||
130 | int copy_thread_hw_breakpoint(struct task_struct *tsk, | ||
131 | struct task_struct *child, unsigned long clone_flags) | ||
132 | { | 61 | { |
133 | /* | 62 | atomic_dec(&bp_slot); |
134 | * We will assume that breakpoint settings are not inherited | ||
135 | * and the child starts out with no debug registers set. | ||
136 | * But what about CLONE_PTRACE? | ||
137 | */ | ||
138 | clear_tsk_thread_flag(child, TIF_DEBUG); | ||
139 | |||
140 | /* We will call flush routine since the debugregs are not inherited */ | ||
141 | arch_flush_thread_hw_breakpoint(child); | ||
142 | |||
143 | return 0; | ||
144 | } | 63 | } |
145 | 64 | ||
146 | static int __register_user_hw_breakpoint(int pos, struct task_struct *tsk, | 65 | int __register_perf_hw_breakpoint(struct perf_event *bp) |
147 | struct hw_breakpoint *bp) | ||
148 | { | 66 | { |
149 | struct thread_struct *thread = &(tsk->thread); | 67 | int ret; |
150 | int rc; | ||
151 | 68 | ||
152 | /* Do not overcommit. Fail if kernel has used the hbp registers */ | 69 | ret = reserve_bp_slot(bp); |
153 | if (pos >= hbp_kernel_pos) | 70 | if (ret) |
154 | return -ENOSPC; | 71 | return ret; |
155 | 72 | ||
156 | rc = arch_validate_hwbkpt_settings(bp, tsk); | 73 | if (!bp->attr.disabled) |
157 | if (rc) | 74 | ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task); |
158 | return rc; | ||
159 | 75 | ||
160 | thread->hbp[pos] = bp; | 76 | return ret; |
161 | hbp_user_refcount[pos]++; | 77 | } |
162 | 78 | ||
163 | arch_update_user_hw_breakpoint(pos, tsk); | 79 | int register_perf_hw_breakpoint(struct perf_event *bp) |
164 | /* | 80 | { |
165 | * Does it need to be installed right now? | 81 | bp->callback = perf_bp_event; |
166 | * Otherwise it will get installed the next time tsk runs | ||
167 | */ | ||
168 | if (tsk == current) | ||
169 | arch_install_thread_hw_breakpoint(tsk); | ||
170 | 82 | ||
171 | return rc; | 83 | return __register_perf_hw_breakpoint(bp); |
172 | } | 84 | } |
173 | 85 | ||
174 | /* | 86 | /* |
175 | * Modify the address of a hbp register already in use by the task | 87 | * Register a breakpoint bound to a task and a given cpu. |
176 | * Do not invoke this in-lieu of a __unregister_user_hw_breakpoint() | 88 | * If cpu is -1, the breakpoint is active for the task in every cpu |
89 | * If the task is -1, the breakpoint is active for every tasks in the given | ||
90 | * cpu. | ||
177 | */ | 91 | */ |
178 | static int __modify_user_hw_breakpoint(int pos, struct task_struct *tsk, | 92 | static struct perf_event * |
179 | struct hw_breakpoint *bp) | 93 | register_user_hw_breakpoint_cpu(unsigned long addr, |
94 | int len, | ||
95 | int type, | ||
96 | perf_callback_t triggered, | ||
97 | pid_t pid, | ||
98 | int cpu, | ||
99 | bool active) | ||
180 | { | 100 | { |
181 | struct thread_struct *thread = &(tsk->thread); | 101 | struct perf_event_attr *attr; |
182 | 102 | struct perf_event *bp; | |
183 | if ((pos >= hbp_kernel_pos) || (arch_validate_hwbkpt_settings(bp, tsk))) | 103 | |
184 | return -EINVAL; | 104 | attr = kzalloc(sizeof(*attr), GFP_KERNEL); |
185 | 105 | if (!attr) | |
186 | if (thread->hbp[pos] == NULL) | 106 | return ERR_PTR(-ENOMEM); |
187 | return -EINVAL; | 107 | |
188 | 108 | attr->type = PERF_TYPE_BREAKPOINT; | |
189 | thread->hbp[pos] = bp; | 109 | attr->size = sizeof(*attr); |
110 | attr->bp_addr = addr; | ||
111 | attr->bp_len = len; | ||
112 | attr->bp_type = type; | ||
190 | /* | 113 | /* |
191 | * 'pos' must be that of a hbp register already used by 'tsk' | 114 | * Such breakpoints are used by debuggers to trigger signals when |
192 | * Otherwise arch_modify_user_hw_breakpoint() will fail | 115 | * we hit the excepted memory op. We can't miss such events, they |
116 | * must be pinned. | ||
193 | */ | 117 | */ |
194 | arch_update_user_hw_breakpoint(pos, tsk); | 118 | attr->pinned = 1; |
195 | 119 | ||
196 | if (tsk == current) | 120 | if (!active) |
197 | arch_install_thread_hw_breakpoint(tsk); | 121 | attr->disabled = 1; |
198 | 122 | ||
199 | return 0; | 123 | bp = perf_event_create_kernel_counter(attr, cpu, pid, triggered); |
200 | } | 124 | kfree(attr); |
201 | |||
202 | static void __unregister_user_hw_breakpoint(int pos, struct task_struct *tsk) | ||
203 | { | ||
204 | hbp_user_refcount[pos]--; | ||
205 | tsk->thread.hbp[pos] = NULL; | ||
206 | 125 | ||
207 | arch_update_user_hw_breakpoint(pos, tsk); | 126 | return bp; |
208 | |||
209 | if (tsk == current) | ||
210 | arch_install_thread_hw_breakpoint(tsk); | ||
211 | } | 127 | } |
212 | 128 | ||
213 | /** | 129 | /** |
214 | * register_user_hw_breakpoint - register a hardware breakpoint for user space | 130 | * register_user_hw_breakpoint - register a hardware breakpoint for user space |
131 | * @addr: is the memory address that triggers the breakpoint | ||
132 | * @len: the length of the access to the memory (1 byte, 2 bytes etc...) | ||
133 | * @type: the type of the access to the memory (read/write/exec) | ||
134 | * @triggered: callback to trigger when we hit the breakpoint | ||
215 | * @tsk: pointer to 'task_struct' of the process to which the address belongs | 135 | * @tsk: pointer to 'task_struct' of the process to which the address belongs |
216 | * @bp: the breakpoint structure to register | 136 | * @active: should we activate it while registering it |
217 | * | ||
218 | * @bp.info->name or @bp.info->address, @bp.info->len, @bp.info->type and | ||
219 | * @bp->triggered must be set properly before invocation | ||
220 | * | 137 | * |
221 | */ | 138 | */ |
222 | int register_user_hw_breakpoint(struct task_struct *tsk, | 139 | struct perf_event * |
223 | struct hw_breakpoint *bp) | 140 | register_user_hw_breakpoint(unsigned long addr, |
141 | int len, | ||
142 | int type, | ||
143 | perf_callback_t triggered, | ||
144 | struct task_struct *tsk, | ||
145 | bool active) | ||
224 | { | 146 | { |
225 | struct thread_struct *thread = &(tsk->thread); | 147 | return register_user_hw_breakpoint_cpu(addr, len, type, triggered, |
226 | int i, rc = -ENOSPC; | 148 | tsk->pid, -1, active); |
227 | |||
228 | spin_lock_bh(&hw_breakpoint_lock); | ||
229 | |||
230 | for (i = 0; i < hbp_kernel_pos; i++) { | ||
231 | if (!thread->hbp[i]) { | ||
232 | rc = __register_user_hw_breakpoint(i, tsk, bp); | ||
233 | break; | ||
234 | } | ||
235 | } | ||
236 | if (!rc) | ||
237 | set_tsk_thread_flag(tsk, TIF_DEBUG); | ||
238 | |||
239 | spin_unlock_bh(&hw_breakpoint_lock); | ||
240 | return rc; | ||
241 | } | 149 | } |
242 | EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); | 150 | EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); |
243 | 151 | ||
244 | /** | 152 | /** |
245 | * modify_user_hw_breakpoint - modify a user-space hardware breakpoint | 153 | * modify_user_hw_breakpoint - modify a user-space hardware breakpoint |
154 | * @bp: the breakpoint structure to modify | ||
155 | * @addr: is the memory address that triggers the breakpoint | ||
156 | * @len: the length of the access to the memory (1 byte, 2 bytes etc...) | ||
157 | * @type: the type of the access to the memory (read/write/exec) | ||
158 | * @triggered: callback to trigger when we hit the breakpoint | ||
246 | * @tsk: pointer to 'task_struct' of the process to which the address belongs | 159 | * @tsk: pointer to 'task_struct' of the process to which the address belongs |
247 | * @bp: the breakpoint structure to unregister | 160 | * @active: should we activate it while registering it |
248 | * | ||
249 | */ | 161 | */ |
250 | int modify_user_hw_breakpoint(struct task_struct *tsk, struct hw_breakpoint *bp) | 162 | struct perf_event * |
163 | modify_user_hw_breakpoint(struct perf_event *bp, | ||
164 | unsigned long addr, | ||
165 | int len, | ||
166 | int type, | ||
167 | perf_callback_t triggered, | ||
168 | struct task_struct *tsk, | ||
169 | bool active) | ||
251 | { | 170 | { |
252 | struct thread_struct *thread = &(tsk->thread); | 171 | /* |
253 | int i, ret = -ENOENT; | 172 | * FIXME: do it without unregistering |
173 | * - We don't want to lose our slot | ||
174 | * - If the new bp is incorrect, don't lose the older one | ||
175 | */ | ||
176 | unregister_hw_breakpoint(bp); | ||
254 | 177 | ||
255 | spin_lock_bh(&hw_breakpoint_lock); | 178 | return register_user_hw_breakpoint(addr, len, type, triggered, |
256 | for (i = 0; i < hbp_kernel_pos; i++) { | 179 | tsk, active); |
257 | if (bp == thread->hbp[i]) { | ||
258 | ret = __modify_user_hw_breakpoint(i, tsk, bp); | ||
259 | break; | ||
260 | } | ||
261 | } | ||
262 | spin_unlock_bh(&hw_breakpoint_lock); | ||
263 | return ret; | ||
264 | } | 180 | } |
265 | EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); | 181 | EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); |
266 | 182 | ||
267 | /** | 183 | /** |
268 | * unregister_user_hw_breakpoint - unregister a user-space hardware breakpoint | 184 | * unregister_hw_breakpoint - unregister a user-space hardware breakpoint |
269 | * @tsk: pointer to 'task_struct' of the process to which the address belongs | ||
270 | * @bp: the breakpoint structure to unregister | 185 | * @bp: the breakpoint structure to unregister |
271 | * | ||
272 | */ | 186 | */ |
273 | void unregister_user_hw_breakpoint(struct task_struct *tsk, | 187 | void unregister_hw_breakpoint(struct perf_event *bp) |
274 | struct hw_breakpoint *bp) | ||
275 | { | 188 | { |
276 | struct thread_struct *thread = &(tsk->thread); | 189 | if (!bp) |
277 | int i, pos = -1, hbp_counter = 0; | 190 | return; |
278 | 191 | perf_event_release_kernel(bp); | |
279 | spin_lock_bh(&hw_breakpoint_lock); | 192 | } |
280 | for (i = 0; i < hbp_kernel_pos; i++) { | 193 | EXPORT_SYMBOL_GPL(unregister_hw_breakpoint); |
281 | if (thread->hbp[i]) | 194 | |
282 | hbp_counter++; | 195 | static struct perf_event * |
283 | if (bp == thread->hbp[i]) | 196 | register_kernel_hw_breakpoint_cpu(unsigned long addr, |
284 | pos = i; | 197 | int len, |
285 | } | 198 | int type, |
286 | if (pos >= 0) { | 199 | perf_callback_t triggered, |
287 | __unregister_user_hw_breakpoint(pos, tsk); | 200 | int cpu, |
288 | hbp_counter--; | 201 | bool active) |
289 | } | 202 | { |
290 | if (!hbp_counter) | 203 | return register_user_hw_breakpoint_cpu(addr, len, type, triggered, |
291 | clear_tsk_thread_flag(tsk, TIF_DEBUG); | 204 | -1, cpu, active); |
292 | |||
293 | spin_unlock_bh(&hw_breakpoint_lock); | ||
294 | } | 205 | } |
295 | EXPORT_SYMBOL_GPL(unregister_user_hw_breakpoint); | ||
296 | 206 | ||
297 | /** | 207 | /** |
298 | * register_kernel_hw_breakpoint - register a hardware breakpoint for kernel space | 208 | * register_wide_hw_breakpoint - register a wide breakpoint in the kernel |
299 | * @bp: the breakpoint structure to register | 209 | * @addr: is the memory address that triggers the breakpoint |
300 | * | 210 | * @len: the length of the access to the memory (1 byte, 2 bytes etc...) |
301 | * @bp.info->name or @bp.info->address, @bp.info->len, @bp.info->type and | 211 | * @type: the type of the access to the memory (read/write/exec) |
302 | * @bp->triggered must be set properly before invocation | 212 | * @triggered: callback to trigger when we hit the breakpoint |
213 | * @active: should we activate it while registering it | ||
303 | * | 214 | * |
215 | * @return a set of per_cpu pointers to perf events | ||
304 | */ | 216 | */ |
305 | int register_kernel_hw_breakpoint(struct hw_breakpoint *bp) | 217 | struct perf_event ** |
218 | register_wide_hw_breakpoint(unsigned long addr, | ||
219 | int len, | ||
220 | int type, | ||
221 | perf_callback_t triggered, | ||
222 | bool active) | ||
306 | { | 223 | { |
307 | int rc; | 224 | struct perf_event **cpu_events, **pevent, *bp; |
225 | long err; | ||
226 | int cpu; | ||
227 | |||
228 | cpu_events = alloc_percpu(typeof(*cpu_events)); | ||
229 | if (!cpu_events) | ||
230 | return ERR_PTR(-ENOMEM); | ||
308 | 231 | ||
309 | rc = arch_validate_hwbkpt_settings(bp, NULL); | 232 | for_each_possible_cpu(cpu) { |
310 | if (rc) | 233 | pevent = per_cpu_ptr(cpu_events, cpu); |
311 | return rc; | 234 | bp = register_kernel_hw_breakpoint_cpu(addr, len, type, |
235 | triggered, cpu, active); | ||
312 | 236 | ||
313 | spin_lock_bh(&hw_breakpoint_lock); | 237 | *pevent = bp; |
314 | 238 | ||
315 | rc = -ENOSPC; | 239 | if (IS_ERR(bp) || !bp) { |
316 | /* Check if we are over-committing */ | 240 | err = PTR_ERR(bp); |
317 | if ((hbp_kernel_pos > 0) && (!hbp_user_refcount[hbp_kernel_pos-1])) { | 241 | goto fail; |
318 | hbp_kernel_pos--; | 242 | } |
319 | hbp_kernel[hbp_kernel_pos] = bp; | ||
320 | on_each_cpu(arch_update_kernel_hw_breakpoint, NULL, 1); | ||
321 | rc = 0; | ||
322 | } | 243 | } |
323 | 244 | ||
324 | spin_unlock_bh(&hw_breakpoint_lock); | 245 | return cpu_events; |
325 | return rc; | 246 | |
247 | fail: | ||
248 | for_each_possible_cpu(cpu) { | ||
249 | pevent = per_cpu_ptr(cpu_events, cpu); | ||
250 | if (IS_ERR(*pevent) || !*pevent) | ||
251 | break; | ||
252 | unregister_hw_breakpoint(*pevent); | ||
253 | } | ||
254 | free_percpu(cpu_events); | ||
255 | /* return the error if any */ | ||
256 | return ERR_PTR(err); | ||
326 | } | 257 | } |
327 | EXPORT_SYMBOL_GPL(register_kernel_hw_breakpoint); | ||
328 | 258 | ||
329 | /** | 259 | /** |
330 | * unregister_kernel_hw_breakpoint - unregister a HW breakpoint for kernel space | 260 | * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel |
331 | * @bp: the breakpoint structure to unregister | 261 | * @cpu_events: the per cpu set of events to unregister |
332 | * | ||
333 | * Uninstalls and unregisters @bp. | ||
334 | */ | 262 | */ |
335 | void unregister_kernel_hw_breakpoint(struct hw_breakpoint *bp) | 263 | void unregister_wide_hw_breakpoint(struct perf_event **cpu_events) |
336 | { | 264 | { |
337 | int i, j; | 265 | int cpu; |
338 | 266 | struct perf_event **pevent; | |
339 | spin_lock_bh(&hw_breakpoint_lock); | ||
340 | |||
341 | /* Find the 'bp' in our list of breakpoints for kernel */ | ||
342 | for (i = hbp_kernel_pos; i < HBP_NUM; i++) | ||
343 | if (bp == hbp_kernel[i]) | ||
344 | break; | ||
345 | 267 | ||
346 | /* Check if we did not find a match for 'bp'. If so return early */ | 268 | for_each_possible_cpu(cpu) { |
347 | if (i == HBP_NUM) { | 269 | pevent = per_cpu_ptr(cpu_events, cpu); |
348 | spin_unlock_bh(&hw_breakpoint_lock); | 270 | unregister_hw_breakpoint(*pevent); |
349 | return; | ||
350 | } | 271 | } |
351 | 272 | free_percpu(cpu_events); | |
352 | /* | ||
353 | * We'll shift the breakpoints one-level above to compact if | ||
354 | * unregistration creates a hole | ||
355 | */ | ||
356 | for (j = i; j > hbp_kernel_pos; j--) | ||
357 | hbp_kernel[j] = hbp_kernel[j-1]; | ||
358 | |||
359 | hbp_kernel[hbp_kernel_pos] = NULL; | ||
360 | on_each_cpu(arch_update_kernel_hw_breakpoint, NULL, 1); | ||
361 | hbp_kernel_pos++; | ||
362 | |||
363 | spin_unlock_bh(&hw_breakpoint_lock); | ||
364 | } | 273 | } |
365 | EXPORT_SYMBOL_GPL(unregister_kernel_hw_breakpoint); | 274 | |
366 | 275 | ||
367 | static struct notifier_block hw_breakpoint_exceptions_nb = { | 276 | static struct notifier_block hw_breakpoint_exceptions_nb = { |
368 | .notifier_call = hw_breakpoint_exceptions_notify, | 277 | .notifier_call = hw_breakpoint_exceptions_notify, |
@@ -374,5 +283,12 @@ static int __init init_hw_breakpoint(void) | |||
374 | { | 283 | { |
375 | return register_die_notifier(&hw_breakpoint_exceptions_nb); | 284 | return register_die_notifier(&hw_breakpoint_exceptions_nb); |
376 | } | 285 | } |
377 | |||
378 | core_initcall(init_hw_breakpoint); | 286 | core_initcall(init_hw_breakpoint); |
287 | |||
288 | |||
289 | struct pmu perf_ops_bp = { | ||
290 | .enable = arch_install_hw_breakpoint, | ||
291 | .disable = arch_uninstall_hw_breakpoint, | ||
292 | .read = hw_breakpoint_pmu_read, | ||
293 | .unthrottle = hw_breakpoint_pmu_unthrottle | ||
294 | }; | ||
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 5087125e2a00..98dc56b2ebe4 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/kernel_stat.h> | 29 | #include <linux/kernel_stat.h> |
30 | #include <linux/perf_event.h> | 30 | #include <linux/perf_event.h> |
31 | #include <linux/ftrace_event.h> | 31 | #include <linux/ftrace_event.h> |
32 | #include <linux/hw_breakpoint.h> | ||
32 | 33 | ||
33 | #include <asm/irq_regs.h> | 34 | #include <asm/irq_regs.h> |
34 | 35 | ||
@@ -4229,6 +4230,51 @@ static void perf_event_free_filter(struct perf_event *event) | |||
4229 | 4230 | ||
4230 | #endif /* CONFIG_EVENT_PROFILE */ | 4231 | #endif /* CONFIG_EVENT_PROFILE */ |
4231 | 4232 | ||
4233 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | ||
4234 | static void bp_perf_event_destroy(struct perf_event *event) | ||
4235 | { | ||
4236 | release_bp_slot(event); | ||
4237 | } | ||
4238 | |||
4239 | static const struct pmu *bp_perf_event_init(struct perf_event *bp) | ||
4240 | { | ||
4241 | int err; | ||
4242 | /* | ||
4243 | * The breakpoint is already filled if we haven't created the counter | ||
4244 | * through perf syscall | ||
4245 | * FIXME: manage to get trigerred to NULL if it comes from syscalls | ||
4246 | */ | ||
4247 | if (!bp->callback) | ||
4248 | err = register_perf_hw_breakpoint(bp); | ||
4249 | else | ||
4250 | err = __register_perf_hw_breakpoint(bp); | ||
4251 | if (err) | ||
4252 | return ERR_PTR(err); | ||
4253 | |||
4254 | bp->destroy = bp_perf_event_destroy; | ||
4255 | |||
4256 | return &perf_ops_bp; | ||
4257 | } | ||
4258 | |||
4259 | void perf_bp_event(struct perf_event *bp, void *regs) | ||
4260 | { | ||
4261 | /* TODO */ | ||
4262 | } | ||
4263 | #else | ||
4264 | static void bp_perf_event_destroy(struct perf_event *event) | ||
4265 | { | ||
4266 | } | ||
4267 | |||
4268 | static const struct pmu *bp_perf_event_init(struct perf_event *bp) | ||
4269 | { | ||
4270 | return NULL; | ||
4271 | } | ||
4272 | |||
4273 | void perf_bp_event(struct perf_event *bp, void *regs) | ||
4274 | { | ||
4275 | } | ||
4276 | #endif | ||
4277 | |||
4232 | atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 4278 | atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
4233 | 4279 | ||
4234 | static void sw_perf_event_destroy(struct perf_event *event) | 4280 | static void sw_perf_event_destroy(struct perf_event *event) |
@@ -4375,6 +4421,11 @@ perf_event_alloc(struct perf_event_attr *attr, | |||
4375 | pmu = tp_perf_event_init(event); | 4421 | pmu = tp_perf_event_init(event); |
4376 | break; | 4422 | break; |
4377 | 4423 | ||
4424 | case PERF_TYPE_BREAKPOINT: | ||
4425 | pmu = bp_perf_event_init(event); | ||
4426 | break; | ||
4427 | |||
4428 | |||
4378 | default: | 4429 | default: |
4379 | break; | 4430 | break; |
4380 | } | 4431 | } |
@@ -4686,7 +4737,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, | |||
4686 | 4737 | ||
4687 | ctx = find_get_context(pid, cpu); | 4738 | ctx = find_get_context(pid, cpu); |
4688 | if (IS_ERR(ctx)) | 4739 | if (IS_ERR(ctx)) |
4689 | return NULL ; | 4740 | return NULL; |
4690 | 4741 | ||
4691 | event = perf_event_alloc(attr, cpu, ctx, NULL, | 4742 | event = perf_event_alloc(attr, cpu, ctx, NULL, |
4692 | NULL, callback, GFP_KERNEL); | 4743 | NULL, callback, GFP_KERNEL); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 91c3d0e9a5a1..d72f06ff263f 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -11,14 +11,11 @@ | |||
11 | #include <linux/ftrace.h> | 11 | #include <linux/ftrace.h> |
12 | #include <trace/boot.h> | 12 | #include <trace/boot.h> |
13 | #include <linux/kmemtrace.h> | 13 | #include <linux/kmemtrace.h> |
14 | #include <linux/hw_breakpoint.h> | ||
14 | 15 | ||
15 | #include <linux/trace_seq.h> | 16 | #include <linux/trace_seq.h> |
16 | #include <linux/ftrace_event.h> | 17 | #include <linux/ftrace_event.h> |
17 | 18 | ||
18 | #ifdef CONFIG_KSYM_TRACER | ||
19 | #include <asm/hw_breakpoint.h> | ||
20 | #endif | ||
21 | |||
22 | enum trace_type { | 19 | enum trace_type { |
23 | __TRACE_FIRST_TYPE = 0, | 20 | __TRACE_FIRST_TYPE = 0, |
24 | 21 | ||
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index e19747d4f860..c16a08f399df 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h | |||
@@ -372,11 +372,11 @@ FTRACE_ENTRY(ksym_trace, ksym_trace_entry, | |||
372 | F_STRUCT( | 372 | F_STRUCT( |
373 | __field( unsigned long, ip ) | 373 | __field( unsigned long, ip ) |
374 | __field( unsigned char, type ) | 374 | __field( unsigned char, type ) |
375 | __array( char , ksym_name, KSYM_NAME_LEN ) | ||
376 | __array( char , cmd, TASK_COMM_LEN ) | 375 | __array( char , cmd, TASK_COMM_LEN ) |
376 | __field( unsigned long, addr ) | ||
377 | ), | 377 | ), |
378 | 378 | ||
379 | F_printk("ip: %pF type: %d ksym_name: %s cmd: %s", | 379 | F_printk("ip: %pF type: %d ksym_name: %pS cmd: %s", |
380 | (void *)__entry->ip, (unsigned int)__entry->type, | 380 | (void *)__entry->ip, (unsigned int)__entry->type, |
381 | __entry->ksym_name, __entry->cmd) | 381 | (void *)__entry->addr, __entry->cmd) |
382 | ); | 382 | ); |
diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c index 6d5609c67378..fea83eeeef09 100644 --- a/kernel/trace/trace_ksym.c +++ b/kernel/trace/trace_ksym.c | |||
@@ -29,7 +29,11 @@ | |||
29 | #include "trace_stat.h" | 29 | #include "trace_stat.h" |
30 | #include "trace.h" | 30 | #include "trace.h" |
31 | 31 | ||
32 | /* For now, let us restrict the no. of symbols traced simultaneously to number | 32 | #include <linux/hw_breakpoint.h> |
33 | #include <asm/hw_breakpoint.h> | ||
34 | |||
35 | /* | ||
36 | * For now, let us restrict the no. of symbols traced simultaneously to number | ||
33 | * of available hardware breakpoint registers. | 37 | * of available hardware breakpoint registers. |
34 | */ | 38 | */ |
35 | #define KSYM_TRACER_MAX HBP_NUM | 39 | #define KSYM_TRACER_MAX HBP_NUM |
@@ -37,8 +41,10 @@ | |||
37 | #define KSYM_TRACER_OP_LEN 3 /* rw- */ | 41 | #define KSYM_TRACER_OP_LEN 3 /* rw- */ |
38 | 42 | ||
39 | struct trace_ksym { | 43 | struct trace_ksym { |
40 | struct hw_breakpoint *ksym_hbp; | 44 | struct perf_event **ksym_hbp; |
41 | unsigned long ksym_addr; | 45 | unsigned long ksym_addr; |
46 | int type; | ||
47 | int len; | ||
42 | #ifdef CONFIG_PROFILE_KSYM_TRACER | 48 | #ifdef CONFIG_PROFILE_KSYM_TRACER |
43 | unsigned long counter; | 49 | unsigned long counter; |
44 | #endif | 50 | #endif |
@@ -75,10 +81,11 @@ void ksym_collect_stats(unsigned long hbp_hit_addr) | |||
75 | } | 81 | } |
76 | #endif /* CONFIG_PROFILE_KSYM_TRACER */ | 82 | #endif /* CONFIG_PROFILE_KSYM_TRACER */ |
77 | 83 | ||
78 | void ksym_hbp_handler(struct hw_breakpoint *hbp, struct pt_regs *regs) | 84 | void ksym_hbp_handler(struct perf_event *hbp, void *data) |
79 | { | 85 | { |
80 | struct ring_buffer_event *event; | 86 | struct ring_buffer_event *event; |
81 | struct ksym_trace_entry *entry; | 87 | struct ksym_trace_entry *entry; |
88 | struct pt_regs *regs = data; | ||
82 | struct ring_buffer *buffer; | 89 | struct ring_buffer *buffer; |
83 | int pc; | 90 | int pc; |
84 | 91 | ||
@@ -96,12 +103,12 @@ void ksym_hbp_handler(struct hw_breakpoint *hbp, struct pt_regs *regs) | |||
96 | 103 | ||
97 | entry = ring_buffer_event_data(event); | 104 | entry = ring_buffer_event_data(event); |
98 | entry->ip = instruction_pointer(regs); | 105 | entry->ip = instruction_pointer(regs); |
99 | entry->type = hbp->info.type; | 106 | entry->type = hw_breakpoint_type(hbp); |
100 | strlcpy(entry->ksym_name, hbp->info.name, KSYM_SYMBOL_LEN); | 107 | entry->addr = hw_breakpoint_addr(hbp); |
101 | strlcpy(entry->cmd, current->comm, TASK_COMM_LEN); | 108 | strlcpy(entry->cmd, current->comm, TASK_COMM_LEN); |
102 | 109 | ||
103 | #ifdef CONFIG_PROFILE_KSYM_TRACER | 110 | #ifdef CONFIG_PROFILE_KSYM_TRACER |
104 | ksym_collect_stats(hbp->info.address); | 111 | ksym_collect_stats(hw_breakpoint_addr(hbp)); |
105 | #endif /* CONFIG_PROFILE_KSYM_TRACER */ | 112 | #endif /* CONFIG_PROFILE_KSYM_TRACER */ |
106 | 113 | ||
107 | trace_buffer_unlock_commit(buffer, event, 0, pc); | 114 | trace_buffer_unlock_commit(buffer, event, 0, pc); |
@@ -120,31 +127,21 @@ static int ksym_trace_get_access_type(char *str) | |||
120 | int access = 0; | 127 | int access = 0; |
121 | 128 | ||
122 | if (str[0] == 'r') | 129 | if (str[0] == 'r') |
123 | access += 4; | 130 | access |= HW_BREAKPOINT_R; |
124 | else if (str[0] != '-') | ||
125 | return -EINVAL; | ||
126 | 131 | ||
127 | if (str[1] == 'w') | 132 | if (str[1] == 'w') |
128 | access += 2; | 133 | access |= HW_BREAKPOINT_W; |
129 | else if (str[1] != '-') | ||
130 | return -EINVAL; | ||
131 | 134 | ||
132 | if (str[2] != '-') | 135 | if (str[2] == 'x') |
133 | return -EINVAL; | 136 | access |= HW_BREAKPOINT_X; |
134 | 137 | ||
135 | switch (access) { | 138 | switch (access) { |
136 | case 6: | 139 | case HW_BREAKPOINT_W: |
137 | access = HW_BREAKPOINT_RW; | 140 | case HW_BREAKPOINT_W | HW_BREAKPOINT_R: |
138 | break; | 141 | return access; |
139 | case 4: | 142 | default: |
140 | access = -EINVAL; | 143 | return -EINVAL; |
141 | break; | ||
142 | case 2: | ||
143 | access = HW_BREAKPOINT_WRITE; | ||
144 | break; | ||
145 | } | 144 | } |
146 | |||
147 | return access; | ||
148 | } | 145 | } |
149 | 146 | ||
150 | /* | 147 | /* |
@@ -194,36 +191,33 @@ int process_new_ksym_entry(char *ksymname, int op, unsigned long addr) | |||
194 | if (!entry) | 191 | if (!entry) |
195 | return -ENOMEM; | 192 | return -ENOMEM; |
196 | 193 | ||
197 | entry->ksym_hbp = kzalloc(sizeof(struct hw_breakpoint), GFP_KERNEL); | 194 | entry->type = op; |
198 | if (!entry->ksym_hbp) | 195 | entry->ksym_addr = addr; |
199 | goto err; | 196 | entry->len = HW_BREAKPOINT_LEN_4; |
200 | 197 | ||
201 | entry->ksym_hbp->info.name = kstrdup(ksymname, GFP_KERNEL); | 198 | ret = -EAGAIN; |
202 | if (!entry->ksym_hbp->info.name) | 199 | entry->ksym_hbp = register_wide_hw_breakpoint(entry->ksym_addr, |
203 | goto err; | 200 | entry->len, entry->type, |
204 | 201 | ksym_hbp_handler, true); | |
205 | entry->ksym_hbp->info.type = op; | 202 | if (IS_ERR(entry->ksym_hbp)) { |
206 | entry->ksym_addr = entry->ksym_hbp->info.address = addr; | 203 | entry->ksym_hbp = NULL; |
207 | #ifdef CONFIG_X86 | 204 | ret = PTR_ERR(entry->ksym_hbp); |
208 | entry->ksym_hbp->info.len = HW_BREAKPOINT_LEN_4; | 205 | } |
209 | #endif | ||
210 | entry->ksym_hbp->triggered = (void *)ksym_hbp_handler; | ||
211 | 206 | ||
212 | ret = register_kernel_hw_breakpoint(entry->ksym_hbp); | 207 | if (!entry->ksym_hbp) { |
213 | if (ret < 0) { | ||
214 | printk(KERN_INFO "ksym_tracer request failed. Try again" | 208 | printk(KERN_INFO "ksym_tracer request failed. Try again" |
215 | " later!!\n"); | 209 | " later!!\n"); |
216 | ret = -EAGAIN; | ||
217 | goto err; | 210 | goto err; |
218 | } | 211 | } |
212 | |||
219 | hlist_add_head_rcu(&(entry->ksym_hlist), &ksym_filter_head); | 213 | hlist_add_head_rcu(&(entry->ksym_hlist), &ksym_filter_head); |
220 | ksym_filter_entry_count++; | 214 | ksym_filter_entry_count++; |
215 | |||
221 | return 0; | 216 | return 0; |
217 | |||
222 | err: | 218 | err: |
223 | if (entry->ksym_hbp) | ||
224 | kfree(entry->ksym_hbp->info.name); | ||
225 | kfree(entry->ksym_hbp); | ||
226 | kfree(entry); | 219 | kfree(entry); |
220 | |||
227 | return ret; | 221 | return ret; |
228 | } | 222 | } |
229 | 223 | ||
@@ -244,10 +238,10 @@ static ssize_t ksym_trace_filter_read(struct file *filp, char __user *ubuf, | |||
244 | mutex_lock(&ksym_tracer_mutex); | 238 | mutex_lock(&ksym_tracer_mutex); |
245 | 239 | ||
246 | hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) { | 240 | hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) { |
247 | ret = trace_seq_printf(s, "%s:", entry->ksym_hbp->info.name); | 241 | ret = trace_seq_printf(s, "%pS:", (void *)entry->ksym_addr); |
248 | if (entry->ksym_hbp->info.type == HW_BREAKPOINT_WRITE) | 242 | if (entry->type == HW_BREAKPOINT_W) |
249 | ret = trace_seq_puts(s, "-w-\n"); | 243 | ret = trace_seq_puts(s, "-w-\n"); |
250 | else if (entry->ksym_hbp->info.type == HW_BREAKPOINT_RW) | 244 | else if (entry->type == (HW_BREAKPOINT_W | HW_BREAKPOINT_R)) |
251 | ret = trace_seq_puts(s, "rw-\n"); | 245 | ret = trace_seq_puts(s, "rw-\n"); |
252 | WARN_ON_ONCE(!ret); | 246 | WARN_ON_ONCE(!ret); |
253 | } | 247 | } |
@@ -269,12 +263,10 @@ static void __ksym_trace_reset(void) | |||
269 | mutex_lock(&ksym_tracer_mutex); | 263 | mutex_lock(&ksym_tracer_mutex); |
270 | hlist_for_each_entry_safe(entry, node, node1, &ksym_filter_head, | 264 | hlist_for_each_entry_safe(entry, node, node1, &ksym_filter_head, |
271 | ksym_hlist) { | 265 | ksym_hlist) { |
272 | unregister_kernel_hw_breakpoint(entry->ksym_hbp); | 266 | unregister_wide_hw_breakpoint(entry->ksym_hbp); |
273 | ksym_filter_entry_count--; | 267 | ksym_filter_entry_count--; |
274 | hlist_del_rcu(&(entry->ksym_hlist)); | 268 | hlist_del_rcu(&(entry->ksym_hlist)); |
275 | synchronize_rcu(); | 269 | synchronize_rcu(); |
276 | kfree(entry->ksym_hbp->info.name); | ||
277 | kfree(entry->ksym_hbp); | ||
278 | kfree(entry); | 270 | kfree(entry); |
279 | } | 271 | } |
280 | mutex_unlock(&ksym_tracer_mutex); | 272 | mutex_unlock(&ksym_tracer_mutex); |
@@ -327,7 +319,7 @@ static ssize_t ksym_trace_filter_write(struct file *file, | |||
327 | hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) { | 319 | hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) { |
328 | if (entry->ksym_addr == ksym_addr) { | 320 | if (entry->ksym_addr == ksym_addr) { |
329 | /* Check for malformed request: (6) */ | 321 | /* Check for malformed request: (6) */ |
330 | if (entry->ksym_hbp->info.type != op) | 322 | if (entry->type != op) |
331 | changed = 1; | 323 | changed = 1; |
332 | else | 324 | else |
333 | goto out; | 325 | goto out; |
@@ -335,18 +327,21 @@ static ssize_t ksym_trace_filter_write(struct file *file, | |||
335 | } | 327 | } |
336 | } | 328 | } |
337 | if (changed) { | 329 | if (changed) { |
338 | unregister_kernel_hw_breakpoint(entry->ksym_hbp); | 330 | unregister_wide_hw_breakpoint(entry->ksym_hbp); |
339 | entry->ksym_hbp->info.type = op; | 331 | entry->type = op; |
340 | if (op > 0) { | 332 | if (op > 0) { |
341 | ret = register_kernel_hw_breakpoint(entry->ksym_hbp); | 333 | entry->ksym_hbp = |
342 | if (ret == 0) | 334 | register_wide_hw_breakpoint(entry->ksym_addr, |
335 | entry->len, entry->type, | ||
336 | ksym_hbp_handler, true); | ||
337 | if (IS_ERR(entry->ksym_hbp)) | ||
338 | entry->ksym_hbp = NULL; | ||
339 | if (!entry->ksym_hbp) | ||
343 | goto out; | 340 | goto out; |
344 | } | 341 | } |
345 | ksym_filter_entry_count--; | 342 | ksym_filter_entry_count--; |
346 | hlist_del_rcu(&(entry->ksym_hlist)); | 343 | hlist_del_rcu(&(entry->ksym_hlist)); |
347 | synchronize_rcu(); | 344 | synchronize_rcu(); |
348 | kfree(entry->ksym_hbp->info.name); | ||
349 | kfree(entry->ksym_hbp); | ||
350 | kfree(entry); | 345 | kfree(entry); |
351 | ret = 0; | 346 | ret = 0; |
352 | goto out; | 347 | goto out; |
@@ -413,16 +408,16 @@ static enum print_line_t ksym_trace_output(struct trace_iterator *iter) | |||
413 | 408 | ||
414 | trace_assign_type(field, entry); | 409 | trace_assign_type(field, entry); |
415 | 410 | ||
416 | ret = trace_seq_printf(s, "%11s-%-5d [%03d] %-30s ", field->cmd, | 411 | ret = trace_seq_printf(s, "%11s-%-5d [%03d] %pS", field->cmd, |
417 | entry->pid, iter->cpu, field->ksym_name); | 412 | entry->pid, iter->cpu, (char *)field->addr); |
418 | if (!ret) | 413 | if (!ret) |
419 | return TRACE_TYPE_PARTIAL_LINE; | 414 | return TRACE_TYPE_PARTIAL_LINE; |
420 | 415 | ||
421 | switch (field->type) { | 416 | switch (field->type) { |
422 | case HW_BREAKPOINT_WRITE: | 417 | case HW_BREAKPOINT_W: |
423 | ret = trace_seq_printf(s, " W "); | 418 | ret = trace_seq_printf(s, " W "); |
424 | break; | 419 | break; |
425 | case HW_BREAKPOINT_RW: | 420 | case HW_BREAKPOINT_R | HW_BREAKPOINT_W: |
426 | ret = trace_seq_printf(s, " RW "); | 421 | ret = trace_seq_printf(s, " RW "); |
427 | break; | 422 | break; |
428 | default: | 423 | default: |
@@ -490,14 +485,13 @@ static int ksym_tracer_stat_show(struct seq_file *m, void *v) | |||
490 | 485 | ||
491 | entry = hlist_entry(stat, struct trace_ksym, ksym_hlist); | 486 | entry = hlist_entry(stat, struct trace_ksym, ksym_hlist); |
492 | 487 | ||
493 | if (entry->ksym_hbp) | 488 | access_type = entry->type; |
494 | access_type = entry->ksym_hbp->info.type; | ||
495 | 489 | ||
496 | switch (access_type) { | 490 | switch (access_type) { |
497 | case HW_BREAKPOINT_WRITE: | 491 | case HW_BREAKPOINT_W: |
498 | seq_puts(m, " W "); | 492 | seq_puts(m, " W "); |
499 | break; | 493 | break; |
500 | case HW_BREAKPOINT_RW: | 494 | case HW_BREAKPOINT_R | HW_BREAKPOINT_W: |
501 | seq_puts(m, " RW "); | 495 | seq_puts(m, " RW "); |
502 | break; | 496 | break; |
503 | default: | 497 | default: |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 7179c12e4f0f..27c5072c2e6b 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -828,7 +828,8 @@ trace_selftest_startup_ksym(struct tracer *trace, struct trace_array *tr) | |||
828 | 828 | ||
829 | ksym_selftest_dummy = 0; | 829 | ksym_selftest_dummy = 0; |
830 | /* Register the read-write tracing request */ | 830 | /* Register the read-write tracing request */ |
831 | ret = process_new_ksym_entry(KSYM_SELFTEST_ENTRY, HW_BREAKPOINT_RW, | 831 | ret = process_new_ksym_entry(KSYM_SELFTEST_ENTRY, |
832 | HW_BREAKPOINT_R | HW_BREAKPOINT_W, | ||
832 | (unsigned long)(&ksym_selftest_dummy)); | 833 | (unsigned long)(&ksym_selftest_dummy)); |
833 | 834 | ||
834 | if (ret < 0) { | 835 | if (ret < 0) { |