diff options
Diffstat (limited to 'arch/x86/kernel/hw_breakpoint.c')
-rw-r--r-- | arch/x86/kernel/hw_breakpoint.c | 545 |
1 files changed, 545 insertions, 0 deletions
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c new file mode 100644 index 000000000000..752daebe91c6 --- /dev/null +++ b/arch/x86/kernel/hw_breakpoint.c | |||
@@ -0,0 +1,545 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * along with this program; if not, write to the Free Software | ||
14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
15 | * | ||
16 | * Copyright (C) 2007 Alan Stern | ||
17 | * Copyright (C) 2009 IBM Corporation | ||
18 | * Copyright (C) 2009 Frederic Weisbecker <fweisbec@gmail.com> | ||
19 | */ | ||
20 | |||
21 | /* | ||
22 | * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, | ||
23 | * using the CPU's debug registers. | ||
24 | */ | ||
25 | |||
26 | #include <linux/perf_event.h> | ||
27 | #include <linux/hw_breakpoint.h> | ||
28 | #include <linux/irqflags.h> | ||
29 | #include <linux/notifier.h> | ||
30 | #include <linux/kallsyms.h> | ||
31 | #include <linux/kprobes.h> | ||
32 | #include <linux/percpu.h> | ||
33 | #include <linux/kdebug.h> | ||
34 | #include <linux/kernel.h> | ||
35 | #include <linux/module.h> | ||
36 | #include <linux/sched.h> | ||
37 | #include <linux/init.h> | ||
38 | #include <linux/smp.h> | ||
39 | |||
40 | #include <asm/hw_breakpoint.h> | ||
41 | #include <asm/processor.h> | ||
42 | #include <asm/debugreg.h> | ||
43 | |||
44 | /* Per cpu debug control register value */ | ||
45 | DEFINE_PER_CPU(unsigned long, dr7); | ||
46 | EXPORT_PER_CPU_SYMBOL(dr7); | ||
47 | |||
48 | /* Per cpu debug address registers values */ | ||
49 | static DEFINE_PER_CPU(unsigned long, cpu_debugreg[HBP_NUM]); | ||
50 | |||
51 | /* | ||
52 | * Stores the breakpoints currently in use on each breakpoint address | ||
53 | * register for each cpus | ||
54 | */ | ||
55 | static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]); | ||
56 | |||
57 | |||
58 | /* | ||
59 | * Encode the length, type, Exact, and Enable bits for a particular breakpoint | ||
60 | * as stored in debug register 7. | ||
61 | */ | ||
62 | unsigned long encode_dr7(int drnum, unsigned int len, unsigned int type) | ||
63 | { | ||
64 | unsigned long bp_info; | ||
65 | |||
66 | bp_info = (len | type) & 0xf; | ||
67 | bp_info <<= (DR_CONTROL_SHIFT + drnum * DR_CONTROL_SIZE); | ||
68 | bp_info |= (DR_GLOBAL_ENABLE << (drnum * DR_ENABLE_SIZE)) | | ||
69 | DR_GLOBAL_SLOWDOWN; | ||
70 | return bp_info; | ||
71 | } | ||
72 | |||
73 | /* | ||
74 | * Decode the length and type bits for a particular breakpoint as | ||
75 | * stored in debug register 7. Return the "enabled" status. | ||
76 | */ | ||
77 | int decode_dr7(unsigned long dr7, int bpnum, unsigned *len, unsigned *type) | ||
78 | { | ||
79 | int bp_info = dr7 >> (DR_CONTROL_SHIFT + bpnum * DR_CONTROL_SIZE); | ||
80 | |||
81 | *len = (bp_info & 0xc) | 0x40; | ||
82 | *type = (bp_info & 0x3) | 0x80; | ||
83 | |||
84 | return (dr7 >> (bpnum * DR_ENABLE_SIZE)) & 0x3; | ||
85 | } | ||
86 | |||
87 | /* | ||
88 | * Install a perf counter breakpoint. | ||
89 | * | ||
90 | * We seek a free debug address register and use it for this | ||
91 | * breakpoint. Eventually we enable it in the debug control register. | ||
92 | * | ||
93 | * Atomic: we hold the counter->ctx->lock and we only handle variables | ||
94 | * and registers local to this cpu. | ||
95 | */ | ||
96 | int arch_install_hw_breakpoint(struct perf_event *bp) | ||
97 | { | ||
98 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
99 | unsigned long *dr7; | ||
100 | int i; | ||
101 | |||
102 | for (i = 0; i < HBP_NUM; i++) { | ||
103 | struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); | ||
104 | |||
105 | if (!*slot) { | ||
106 | *slot = bp; | ||
107 | break; | ||
108 | } | ||
109 | } | ||
110 | |||
111 | if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) | ||
112 | return -EBUSY; | ||
113 | |||
114 | set_debugreg(info->address, i); | ||
115 | __get_cpu_var(cpu_debugreg[i]) = info->address; | ||
116 | |||
117 | dr7 = &__get_cpu_var(dr7); | ||
118 | *dr7 |= encode_dr7(i, info->len, info->type); | ||
119 | |||
120 | set_debugreg(*dr7, 7); | ||
121 | |||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | /* | ||
126 | * Uninstall the breakpoint contained in the given counter. | ||
127 | * | ||
128 | * First we search the debug address register it uses and then we disable | ||
129 | * it. | ||
130 | * | ||
131 | * Atomic: we hold the counter->ctx->lock and we only handle variables | ||
132 | * and registers local to this cpu. | ||
133 | */ | ||
134 | void arch_uninstall_hw_breakpoint(struct perf_event *bp) | ||
135 | { | ||
136 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
137 | unsigned long *dr7; | ||
138 | int i; | ||
139 | |||
140 | for (i = 0; i < HBP_NUM; i++) { | ||
141 | struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); | ||
142 | |||
143 | if (*slot == bp) { | ||
144 | *slot = NULL; | ||
145 | break; | ||
146 | } | ||
147 | } | ||
148 | |||
149 | if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) | ||
150 | return; | ||
151 | |||
152 | dr7 = &__get_cpu_var(dr7); | ||
153 | *dr7 &= ~encode_dr7(i, info->len, info->type); | ||
154 | |||
155 | set_debugreg(*dr7, 7); | ||
156 | } | ||
157 | |||
158 | static int get_hbp_len(u8 hbp_len) | ||
159 | { | ||
160 | unsigned int len_in_bytes = 0; | ||
161 | |||
162 | switch (hbp_len) { | ||
163 | case X86_BREAKPOINT_LEN_1: | ||
164 | len_in_bytes = 1; | ||
165 | break; | ||
166 | case X86_BREAKPOINT_LEN_2: | ||
167 | len_in_bytes = 2; | ||
168 | break; | ||
169 | case X86_BREAKPOINT_LEN_4: | ||
170 | len_in_bytes = 4; | ||
171 | break; | ||
172 | #ifdef CONFIG_X86_64 | ||
173 | case X86_BREAKPOINT_LEN_8: | ||
174 | len_in_bytes = 8; | ||
175 | break; | ||
176 | #endif | ||
177 | } | ||
178 | return len_in_bytes; | ||
179 | } | ||
180 | |||
181 | /* | ||
182 | * Check for virtual address in user space. | ||
183 | */ | ||
184 | int arch_check_va_in_userspace(unsigned long va, u8 hbp_len) | ||
185 | { | ||
186 | unsigned int len; | ||
187 | |||
188 | len = get_hbp_len(hbp_len); | ||
189 | |||
190 | return (va <= TASK_SIZE - len); | ||
191 | } | ||
192 | |||
193 | /* | ||
194 | * Check for virtual address in kernel space. | ||
195 | */ | ||
196 | static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len) | ||
197 | { | ||
198 | unsigned int len; | ||
199 | |||
200 | len = get_hbp_len(hbp_len); | ||
201 | |||
202 | return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); | ||
203 | } | ||
204 | |||
205 | /* | ||
206 | * Store a breakpoint's encoded address, length, and type. | ||
207 | */ | ||
208 | static int arch_store_info(struct perf_event *bp) | ||
209 | { | ||
210 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
211 | /* | ||
212 | * For kernel-addresses, either the address or symbol name can be | ||
213 | * specified. | ||
214 | */ | ||
215 | if (info->name) | ||
216 | info->address = (unsigned long) | ||
217 | kallsyms_lookup_name(info->name); | ||
218 | if (info->address) | ||
219 | return 0; | ||
220 | |||
221 | return -EINVAL; | ||
222 | } | ||
223 | |||
224 | int arch_bp_generic_fields(int x86_len, int x86_type, | ||
225 | int *gen_len, int *gen_type) | ||
226 | { | ||
227 | /* Len */ | ||
228 | switch (x86_len) { | ||
229 | case X86_BREAKPOINT_LEN_1: | ||
230 | *gen_len = HW_BREAKPOINT_LEN_1; | ||
231 | break; | ||
232 | case X86_BREAKPOINT_LEN_2: | ||
233 | *gen_len = HW_BREAKPOINT_LEN_2; | ||
234 | break; | ||
235 | case X86_BREAKPOINT_LEN_4: | ||
236 | *gen_len = HW_BREAKPOINT_LEN_4; | ||
237 | break; | ||
238 | #ifdef CONFIG_X86_64 | ||
239 | case X86_BREAKPOINT_LEN_8: | ||
240 | *gen_len = HW_BREAKPOINT_LEN_8; | ||
241 | break; | ||
242 | #endif | ||
243 | default: | ||
244 | return -EINVAL; | ||
245 | } | ||
246 | |||
247 | /* Type */ | ||
248 | switch (x86_type) { | ||
249 | case X86_BREAKPOINT_EXECUTE: | ||
250 | *gen_type = HW_BREAKPOINT_X; | ||
251 | break; | ||
252 | case X86_BREAKPOINT_WRITE: | ||
253 | *gen_type = HW_BREAKPOINT_W; | ||
254 | break; | ||
255 | case X86_BREAKPOINT_RW: | ||
256 | *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R; | ||
257 | break; | ||
258 | default: | ||
259 | return -EINVAL; | ||
260 | } | ||
261 | |||
262 | return 0; | ||
263 | } | ||
264 | |||
265 | |||
266 | static int arch_build_bp_info(struct perf_event *bp) | ||
267 | { | ||
268 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
269 | |||
270 | info->address = bp->attr.bp_addr; | ||
271 | |||
272 | /* Len */ | ||
273 | switch (bp->attr.bp_len) { | ||
274 | case HW_BREAKPOINT_LEN_1: | ||
275 | info->len = X86_BREAKPOINT_LEN_1; | ||
276 | break; | ||
277 | case HW_BREAKPOINT_LEN_2: | ||
278 | info->len = X86_BREAKPOINT_LEN_2; | ||
279 | break; | ||
280 | case HW_BREAKPOINT_LEN_4: | ||
281 | info->len = X86_BREAKPOINT_LEN_4; | ||
282 | break; | ||
283 | #ifdef CONFIG_X86_64 | ||
284 | case HW_BREAKPOINT_LEN_8: | ||
285 | info->len = X86_BREAKPOINT_LEN_8; | ||
286 | break; | ||
287 | #endif | ||
288 | default: | ||
289 | return -EINVAL; | ||
290 | } | ||
291 | |||
292 | /* Type */ | ||
293 | switch (bp->attr.bp_type) { | ||
294 | case HW_BREAKPOINT_W: | ||
295 | info->type = X86_BREAKPOINT_WRITE; | ||
296 | break; | ||
297 | case HW_BREAKPOINT_W | HW_BREAKPOINT_R: | ||
298 | info->type = X86_BREAKPOINT_RW; | ||
299 | break; | ||
300 | case HW_BREAKPOINT_X: | ||
301 | info->type = X86_BREAKPOINT_EXECUTE; | ||
302 | break; | ||
303 | default: | ||
304 | return -EINVAL; | ||
305 | } | ||
306 | |||
307 | return 0; | ||
308 | } | ||
309 | /* | ||
310 | * Validate the arch-specific HW Breakpoint register settings | ||
311 | */ | ||
312 | int arch_validate_hwbkpt_settings(struct perf_event *bp, | ||
313 | struct task_struct *tsk) | ||
314 | { | ||
315 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
316 | unsigned int align; | ||
317 | int ret; | ||
318 | |||
319 | |||
320 | ret = arch_build_bp_info(bp); | ||
321 | if (ret) | ||
322 | return ret; | ||
323 | |||
324 | ret = -EINVAL; | ||
325 | |||
326 | if (info->type == X86_BREAKPOINT_EXECUTE) | ||
327 | /* | ||
328 | * Ptrace-refactoring code | ||
329 | * For now, we'll allow instruction breakpoint only for user-space | ||
330 | * addresses | ||
331 | */ | ||
332 | if ((!arch_check_va_in_userspace(info->address, info->len)) && | ||
333 | info->len != X86_BREAKPOINT_EXECUTE) | ||
334 | return ret; | ||
335 | |||
336 | switch (info->len) { | ||
337 | case X86_BREAKPOINT_LEN_1: | ||
338 | align = 0; | ||
339 | break; | ||
340 | case X86_BREAKPOINT_LEN_2: | ||
341 | align = 1; | ||
342 | break; | ||
343 | case X86_BREAKPOINT_LEN_4: | ||
344 | align = 3; | ||
345 | break; | ||
346 | #ifdef CONFIG_X86_64 | ||
347 | case X86_BREAKPOINT_LEN_8: | ||
348 | align = 7; | ||
349 | break; | ||
350 | #endif | ||
351 | default: | ||
352 | return ret; | ||
353 | } | ||
354 | |||
355 | if (bp->callback) | ||
356 | ret = arch_store_info(bp); | ||
357 | |||
358 | if (ret < 0) | ||
359 | return ret; | ||
360 | /* | ||
361 | * Check that the low-order bits of the address are appropriate | ||
362 | * for the alignment implied by len. | ||
363 | */ | ||
364 | if (info->address & align) | ||
365 | return -EINVAL; | ||
366 | |||
367 | /* Check that the virtual address is in the proper range */ | ||
368 | if (tsk) { | ||
369 | if (!arch_check_va_in_userspace(info->address, info->len)) | ||
370 | return -EFAULT; | ||
371 | } else { | ||
372 | if (!arch_check_va_in_kernelspace(info->address, info->len)) | ||
373 | return -EFAULT; | ||
374 | } | ||
375 | |||
376 | return 0; | ||
377 | } | ||
378 | |||
379 | /* | ||
380 | * Dump the debug register contents to the user. | ||
381 | * We can't dump our per cpu values because it | ||
382 | * may contain cpu wide breakpoint, something that | ||
383 | * doesn't belong to the current task. | ||
384 | * | ||
385 | * TODO: include non-ptrace user breakpoints (perf) | ||
386 | */ | ||
387 | void aout_dump_debugregs(struct user *dump) | ||
388 | { | ||
389 | int i; | ||
390 | int dr7 = 0; | ||
391 | struct perf_event *bp; | ||
392 | struct arch_hw_breakpoint *info; | ||
393 | struct thread_struct *thread = ¤t->thread; | ||
394 | |||
395 | for (i = 0; i < HBP_NUM; i++) { | ||
396 | bp = thread->ptrace_bps[i]; | ||
397 | |||
398 | if (bp && !bp->attr.disabled) { | ||
399 | dump->u_debugreg[i] = bp->attr.bp_addr; | ||
400 | info = counter_arch_bp(bp); | ||
401 | dr7 |= encode_dr7(i, info->len, info->type); | ||
402 | } else { | ||
403 | dump->u_debugreg[i] = 0; | ||
404 | } | ||
405 | } | ||
406 | |||
407 | dump->u_debugreg[4] = 0; | ||
408 | dump->u_debugreg[5] = 0; | ||
409 | dump->u_debugreg[6] = current->thread.debugreg6; | ||
410 | |||
411 | dump->u_debugreg[7] = dr7; | ||
412 | } | ||
413 | EXPORT_SYMBOL_GPL(aout_dump_debugregs); | ||
414 | |||
415 | /* | ||
416 | * Release the user breakpoints used by ptrace | ||
417 | */ | ||
418 | void flush_ptrace_hw_breakpoint(struct task_struct *tsk) | ||
419 | { | ||
420 | int i; | ||
421 | struct thread_struct *t = &tsk->thread; | ||
422 | |||
423 | for (i = 0; i < HBP_NUM; i++) { | ||
424 | unregister_hw_breakpoint(t->ptrace_bps[i]); | ||
425 | t->ptrace_bps[i] = NULL; | ||
426 | } | ||
427 | } | ||
428 | |||
429 | void hw_breakpoint_restore(void) | ||
430 | { | ||
431 | set_debugreg(__get_cpu_var(cpu_debugreg[0]), 0); | ||
432 | set_debugreg(__get_cpu_var(cpu_debugreg[1]), 1); | ||
433 | set_debugreg(__get_cpu_var(cpu_debugreg[2]), 2); | ||
434 | set_debugreg(__get_cpu_var(cpu_debugreg[3]), 3); | ||
435 | set_debugreg(current->thread.debugreg6, 6); | ||
436 | set_debugreg(__get_cpu_var(dr7), 7); | ||
437 | } | ||
438 | EXPORT_SYMBOL_GPL(hw_breakpoint_restore); | ||
439 | |||
440 | /* | ||
441 | * Handle debug exception notifications. | ||
442 | * | ||
443 | * Return value is either NOTIFY_STOP or NOTIFY_DONE as explained below. | ||
444 | * | ||
445 | * NOTIFY_DONE returned if one of the following conditions is true. | ||
446 | * i) When the causative address is from user-space and the exception | ||
447 | * is a valid one, i.e. not triggered as a result of lazy debug register | ||
448 | * switching | ||
449 | * ii) When there are more bits than trap<n> set in DR6 register (such | ||
450 | * as BD, BS or BT) indicating that more than one debug condition is | ||
451 | * met and requires some more action in do_debug(). | ||
452 | * | ||
453 | * NOTIFY_STOP returned for all other cases | ||
454 | * | ||
455 | */ | ||
456 | static int __kprobes hw_breakpoint_handler(struct die_args *args) | ||
457 | { | ||
458 | int i, cpu, rc = NOTIFY_STOP; | ||
459 | struct perf_event *bp; | ||
460 | unsigned long dr7, dr6; | ||
461 | unsigned long *dr6_p; | ||
462 | |||
463 | /* The DR6 value is pointed by args->err */ | ||
464 | dr6_p = (unsigned long *)ERR_PTR(args->err); | ||
465 | dr6 = *dr6_p; | ||
466 | |||
467 | /* Do an early return if no trap bits are set in DR6 */ | ||
468 | if ((dr6 & DR_TRAP_BITS) == 0) | ||
469 | return NOTIFY_DONE; | ||
470 | |||
471 | get_debugreg(dr7, 7); | ||
472 | /* Disable breakpoints during exception handling */ | ||
473 | set_debugreg(0UL, 7); | ||
474 | /* | ||
475 | * Assert that local interrupts are disabled | ||
476 | * Reset the DRn bits in the virtualized register value. | ||
477 | * The ptrace trigger routine will add in whatever is needed. | ||
478 | */ | ||
479 | current->thread.debugreg6 &= ~DR_TRAP_BITS; | ||
480 | cpu = get_cpu(); | ||
481 | |||
482 | /* Handle all the breakpoints that were triggered */ | ||
483 | for (i = 0; i < HBP_NUM; ++i) { | ||
484 | if (likely(!(dr6 & (DR_TRAP0 << i)))) | ||
485 | continue; | ||
486 | |||
487 | /* | ||
488 | * The counter may be concurrently released but that can only | ||
489 | * occur from a call_rcu() path. We can then safely fetch | ||
490 | * the breakpoint, use its callback, touch its counter | ||
491 | * while we are in an rcu_read_lock() path. | ||
492 | */ | ||
493 | rcu_read_lock(); | ||
494 | |||
495 | bp = per_cpu(bp_per_reg[i], cpu); | ||
496 | if (bp) | ||
497 | rc = NOTIFY_DONE; | ||
498 | /* | ||
499 | * Reset the 'i'th TRAP bit in dr6 to denote completion of | ||
500 | * exception handling | ||
501 | */ | ||
502 | (*dr6_p) &= ~(DR_TRAP0 << i); | ||
503 | /* | ||
504 | * bp can be NULL due to lazy debug register switching | ||
505 | * or due to concurrent perf counter removing. | ||
506 | */ | ||
507 | if (!bp) { | ||
508 | rcu_read_unlock(); | ||
509 | break; | ||
510 | } | ||
511 | |||
512 | (bp->callback)(bp, args->regs); | ||
513 | |||
514 | rcu_read_unlock(); | ||
515 | } | ||
516 | if (dr6 & (~DR_TRAP_BITS)) | ||
517 | rc = NOTIFY_DONE; | ||
518 | |||
519 | set_debugreg(dr7, 7); | ||
520 | put_cpu(); | ||
521 | |||
522 | return rc; | ||
523 | } | ||
524 | |||
525 | /* | ||
526 | * Handle debug exception notifications. | ||
527 | */ | ||
528 | int __kprobes hw_breakpoint_exceptions_notify( | ||
529 | struct notifier_block *unused, unsigned long val, void *data) | ||
530 | { | ||
531 | if (val != DIE_DEBUG) | ||
532 | return NOTIFY_DONE; | ||
533 | |||
534 | return hw_breakpoint_handler(data); | ||
535 | } | ||
536 | |||
537 | void hw_breakpoint_pmu_read(struct perf_event *bp) | ||
538 | { | ||
539 | /* TODO */ | ||
540 | } | ||
541 | |||
542 | void hw_breakpoint_pmu_unthrottle(struct perf_event *bp) | ||
543 | { | ||
544 | /* TODO */ | ||
545 | } | ||