aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/hw_breakpoint.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-09-09 13:22:48 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2009-11-08 09:34:42 -0500
commit24f1e32c60c45c89a997c73395b69c8af6f0a84e (patch)
tree4f30f16e18cb4abbcf96b3b331e6a3f01bfa26e6 /arch/x86/kernel/hw_breakpoint.c
parent2da3e160cb3d226d87b907fab26850d838ed8d7c (diff)
hw-breakpoints: Rewrite the hw-breakpoints layer on top of perf events
This patch rebase the implementation of the breakpoints API on top of perf events instances. Each breakpoints are now perf events that handle the register scheduling, thread/cpu attachment, etc.. The new layering is now made as follows: ptrace kgdb ftrace perf syscall \ | / / \ | / / / Core breakpoint API / / | / | / Breakpoints perf events | | Breakpoints PMU ---- Debug Register constraints handling (Part of core breakpoint API) | | Hardware debug registers Reasons of this rewrite: - Use the centralized/optimized pmu registers scheduling, implying an easier arch integration - More powerful register handling: perf attributes (pinned/flexible events, exclusive/non-exclusive, tunable period, etc...) Impact: - New perf ABI: the hardware breakpoints counters - Ptrace breakpoints setting remains tricky and still needs some per thread breakpoints references. Todo (in the order): - Support breakpoints perf counter events for perf tools (ie: implement perf_bpcounter_event()) - Support from perf tools Changes in v2: - Follow the perf "event " rename - The ptrace regression have been fixed (ptrace breakpoint perf events weren't released when a task ended) - Drop the struct hw_breakpoint and store generic fields in perf_event_attr. - Separate core and arch specific headers, drop asm-generic/hw_breakpoint.h and create linux/hw_breakpoint.h - Use new generic len/type for breakpoint - Handle off case: when breakpoints api is not supported by an arch Changes in v3: - Fix broken CONFIG_KVM, we need to propagate the breakpoint api changes to kvm when we exit the guest and restore the bp registers to the host. Changes in v4: - Drop the hw_breakpoint_restore() stub as it is only used by KVM - EXPORT_SYMBOL_GPL hw_breakpoint_restore() as KVM can be built as a module - Restore the breakpoints unconditionally on kvm guest exit: TIF_DEBUG_THREAD doesn't anymore cover every cases of running breakpoints and vcpu->arch.switch_db_regs might not always be set when the guest used debug registers. (Waiting for a reliable optimization) Changes in v5: - Split-up the asm-generic/hw-breakpoint.h moving to linux/hw_breakpoint.h into a separate patch - Optimize the breakpoints restoring while switching from kvm guest to host. We only want to restore the state if we have active breakpoints to the host, otherwise we don't care about messed-up address registers. - Add asm/hw_breakpoint.h to Kbuild - Fix bad breakpoint type in trace_selftest.c Changes in v6: - Fix wrong header inclusion in trace.h (triggered a build error with CONFIG_FTRACE_SELFTEST Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Prasad <prasad@linux.vnet.ibm.com> Cc: Alan Stern <stern@rowland.harvard.edu> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Jan Kiszka <jan.kiszka@web.de> Cc: Jiri Slaby <jirislaby@gmail.com> Cc: Li Zefan <lizf@cn.fujitsu.com> Cc: Avi Kivity <avi@redhat.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Masami Hiramatsu <mhiramat@redhat.com> Cc: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/x86/kernel/hw_breakpoint.c')
-rw-r--r--arch/x86/kernel/hw_breakpoint.c391
1 files changed, 255 insertions, 136 deletions
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 9316a9de4de..e622620790b 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -15,6 +15,7 @@
15 * 15 *
16 * Copyright (C) 2007 Alan Stern 16 * Copyright (C) 2007 Alan Stern
17 * Copyright (C) 2009 IBM Corporation 17 * Copyright (C) 2009 IBM Corporation
18 * Copyright (C) 2009 Frederic Weisbecker <fweisbec@gmail.com>
18 */ 19 */
19 20
20/* 21/*
@@ -22,6 +23,8 @@
22 * using the CPU's debug registers. 23 * using the CPU's debug registers.
23 */ 24 */
24 25
26#include <linux/perf_event.h>
27#include <linux/hw_breakpoint.h>
25#include <linux/irqflags.h> 28#include <linux/irqflags.h>
26#include <linux/notifier.h> 29#include <linux/notifier.h>
27#include <linux/kallsyms.h> 30#include <linux/kallsyms.h>
@@ -38,26 +41,24 @@
38#include <asm/processor.h> 41#include <asm/processor.h>
39#include <asm/debugreg.h> 42#include <asm/debugreg.h>
40 43
41/* Unmasked kernel DR7 value */ 44/* Per cpu debug control register value */
42static unsigned long kdr7; 45DEFINE_PER_CPU(unsigned long, dr7);
46
47/* Per cpu debug address registers values */
48static DEFINE_PER_CPU(unsigned long, cpu_debugreg[HBP_NUM]);
43 49
44/* 50/*
45 * Masks for the bits corresponding to registers DR0 - DR3 in DR7 register. 51 * Stores the breakpoints currently in use on each breakpoint address
46 * Used to clear and verify the status of bits corresponding to DR0 - DR3 52 * register for each cpus
47 */ 53 */
48static const unsigned long dr7_masks[HBP_NUM] = { 54static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
49 0x000f0003, /* LEN0, R/W0, G0, L0 */
50 0x00f0000c, /* LEN1, R/W1, G1, L1 */
51 0x0f000030, /* LEN2, R/W2, G2, L2 */
52 0xf00000c0 /* LEN3, R/W3, G3, L3 */
53};
54 55
55 56
56/* 57/*
57 * Encode the length, type, Exact, and Enable bits for a particular breakpoint 58 * Encode the length, type, Exact, and Enable bits for a particular breakpoint
58 * as stored in debug register 7. 59 * as stored in debug register 7.
59 */ 60 */
60static unsigned long encode_dr7(int drnum, unsigned int len, unsigned int type) 61unsigned long encode_dr7(int drnum, unsigned int len, unsigned int type)
61{ 62{
62 unsigned long bp_info; 63 unsigned long bp_info;
63 64
@@ -68,64 +69,89 @@ static unsigned long encode_dr7(int drnum, unsigned int len, unsigned int type)
68 return bp_info; 69 return bp_info;
69} 70}
70 71
71void arch_update_kernel_hw_breakpoint(void *unused) 72/*
73 * Decode the length and type bits for a particular breakpoint as
74 * stored in debug register 7. Return the "enabled" status.
75 */
76int decode_dr7(unsigned long dr7, int bpnum, unsigned *len, unsigned *type)
72{ 77{
73 struct hw_breakpoint *bp; 78 int bp_info = dr7 >> (DR_CONTROL_SHIFT + bpnum * DR_CONTROL_SIZE);
74 int i, cpu = get_cpu();
75 unsigned long temp_kdr7 = 0;
76
77 /* Don't allow debug exceptions while we update the registers */
78 set_debugreg(0UL, 7);
79 79
80 for (i = hbp_kernel_pos; i < HBP_NUM; i++) { 80 *len = (bp_info & 0xc) | 0x40;
81 per_cpu(this_hbp_kernel[i], cpu) = bp = hbp_kernel[i]; 81 *type = (bp_info & 0x3) | 0x80;
82 if (bp) {
83 temp_kdr7 |= encode_dr7(i, bp->info.len, bp->info.type);
84 set_debugreg(bp->info.address, i);
85 }
86 }
87 82
88 /* No need to set DR6. Update the debug registers with kernel-space 83 return (dr7 >> (bpnum * DR_ENABLE_SIZE)) & 0x3;
89 * breakpoint values from kdr7 and user-space requests from the
90 * current process
91 */
92 kdr7 = temp_kdr7;
93 set_debugreg(kdr7 | current->thread.debugreg7, 7);
94 put_cpu();
95} 84}
96 85
97/* 86/*
98 * Install the thread breakpoints in their debug registers. 87 * Install a perf counter breakpoint.
88 *
89 * We seek a free debug address register and use it for this
90 * breakpoint. Eventually we enable it in the debug control register.
91 *
92 * Atomic: we hold the counter->ctx->lock and we only handle variables
93 * and registers local to this cpu.
99 */ 94 */
100void arch_install_thread_hw_breakpoint(struct task_struct *tsk) 95int arch_install_hw_breakpoint(struct perf_event *bp)
101{ 96{
102 struct thread_struct *thread = &(tsk->thread); 97 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
103 98 unsigned long *dr7;
104 switch (hbp_kernel_pos) { 99 int i;
105 case 4: 100
106 set_debugreg(thread->debugreg[3], 3); 101 for (i = 0; i < HBP_NUM; i++) {
107 case 3: 102 struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
108 set_debugreg(thread->debugreg[2], 2); 103
109 case 2: 104 if (!*slot) {
110 set_debugreg(thread->debugreg[1], 1); 105 *slot = bp;
111 case 1: 106 break;
112 set_debugreg(thread->debugreg[0], 0); 107 }
113 default:
114 break;
115 } 108 }
116 109
117 /* No need to set DR6 */ 110 if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
118 set_debugreg((kdr7 | thread->debugreg7), 7); 111 return -EBUSY;
112
113 set_debugreg(info->address, i);
114 __get_cpu_var(cpu_debugreg[i]) = info->address;
115
116 dr7 = &__get_cpu_var(dr7);
117 *dr7 |= encode_dr7(i, info->len, info->type);
118
119 set_debugreg(*dr7, 7);
120
121 return 0;
119} 122}
120 123
121/* 124/*
122 * Install the debug register values for just the kernel, no thread. 125 * Uninstall the breakpoint contained in the given counter.
126 *
127 * First we search the debug address register it uses and then we disable
128 * it.
129 *
130 * Atomic: we hold the counter->ctx->lock and we only handle variables
131 * and registers local to this cpu.
123 */ 132 */
124void arch_uninstall_thread_hw_breakpoint(void) 133void arch_uninstall_hw_breakpoint(struct perf_event *bp)
125{ 134{
126 /* Clear the user-space portion of debugreg7 by setting only kdr7 */ 135 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
127 set_debugreg(kdr7, 7); 136 unsigned long *dr7;
137 int i;
138
139 for (i = 0; i < HBP_NUM; i++) {
140 struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
141
142 if (*slot == bp) {
143 *slot = NULL;
144 break;
145 }
146 }
147
148 if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
149 return;
128 150
151 dr7 = &__get_cpu_var(dr7);
152 *dr7 &= ~encode_dr7(i, info->len, info->type);
153
154 set_debugreg(*dr7, 7);
129} 155}
130 156
131static int get_hbp_len(u8 hbp_len) 157static int get_hbp_len(u8 hbp_len)
@@ -133,17 +159,17 @@ static int get_hbp_len(u8 hbp_len)
133 unsigned int len_in_bytes = 0; 159 unsigned int len_in_bytes = 0;
134 160
135 switch (hbp_len) { 161 switch (hbp_len) {
136 case HW_BREAKPOINT_LEN_1: 162 case X86_BREAKPOINT_LEN_1:
137 len_in_bytes = 1; 163 len_in_bytes = 1;
138 break; 164 break;
139 case HW_BREAKPOINT_LEN_2: 165 case X86_BREAKPOINT_LEN_2:
140 len_in_bytes = 2; 166 len_in_bytes = 2;
141 break; 167 break;
142 case HW_BREAKPOINT_LEN_4: 168 case X86_BREAKPOINT_LEN_4:
143 len_in_bytes = 4; 169 len_in_bytes = 4;
144 break; 170 break;
145#ifdef CONFIG_X86_64 171#ifdef CONFIG_X86_64
146 case HW_BREAKPOINT_LEN_8: 172 case X86_BREAKPOINT_LEN_8:
147 len_in_bytes = 8; 173 len_in_bytes = 8;
148 break; 174 break;
149#endif 175#endif
@@ -178,67 +204,146 @@ static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len)
178/* 204/*
179 * Store a breakpoint's encoded address, length, and type. 205 * Store a breakpoint's encoded address, length, and type.
180 */ 206 */
181static int arch_store_info(struct hw_breakpoint *bp, struct task_struct *tsk) 207static int arch_store_info(struct perf_event *bp)
182{ 208{
183 /* 209 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
184 * User-space requests will always have the address field populated
185 * Symbol names from user-space are rejected
186 */
187 if (tsk && bp->info.name)
188 return -EINVAL;
189 /* 210 /*
190 * For kernel-addresses, either the address or symbol name can be 211 * For kernel-addresses, either the address or symbol name can be
191 * specified. 212 * specified.
192 */ 213 */
193 if (bp->info.name) 214 if (info->name)
194 bp->info.address = (unsigned long) 215 info->address = (unsigned long)
195 kallsyms_lookup_name(bp->info.name); 216 kallsyms_lookup_name(info->name);
196 if (bp->info.address) 217 if (info->address)
197 return 0; 218 return 0;
219
198 return -EINVAL; 220 return -EINVAL;
199} 221}
200 222
201/* 223int arch_bp_generic_fields(int x86_len, int x86_type,
202 * Validate the arch-specific HW Breakpoint register settings 224 int *gen_len, int *gen_type)
203 */
204int arch_validate_hwbkpt_settings(struct hw_breakpoint *bp,
205 struct task_struct *tsk)
206{ 225{
207 unsigned int align; 226 /* Len */
208 int ret = -EINVAL; 227 switch (x86_len) {
228 case X86_BREAKPOINT_LEN_1:
229 *gen_len = HW_BREAKPOINT_LEN_1;
230 break;
231 case X86_BREAKPOINT_LEN_2:
232 *gen_len = HW_BREAKPOINT_LEN_2;
233 break;
234 case X86_BREAKPOINT_LEN_4:
235 *gen_len = HW_BREAKPOINT_LEN_4;
236 break;
237#ifdef CONFIG_X86_64
238 case X86_BREAKPOINT_LEN_8:
239 *gen_len = HW_BREAKPOINT_LEN_8;
240 break;
241#endif
242 default:
243 return -EINVAL;
244 }
209 245
210 switch (bp->info.type) { 246 /* Type */
211 /* 247 switch (x86_type) {
212 * Ptrace-refactoring code 248 case X86_BREAKPOINT_EXECUTE:
213 * For now, we'll allow instruction breakpoint only for user-space 249 *gen_type = HW_BREAKPOINT_X;
214 * addresses
215 */
216 case HW_BREAKPOINT_EXECUTE:
217 if ((!arch_check_va_in_userspace(bp->info.address,
218 bp->info.len)) &&
219 bp->info.len != HW_BREAKPOINT_LEN_EXECUTE)
220 return ret;
221 break; 250 break;
222 case HW_BREAKPOINT_WRITE: 251 case X86_BREAKPOINT_WRITE:
252 *gen_type = HW_BREAKPOINT_W;
223 break; 253 break;
224 case HW_BREAKPOINT_RW: 254 case X86_BREAKPOINT_RW:
255 *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
225 break; 256 break;
226 default: 257 default:
227 return ret; 258 return -EINVAL;
228 } 259 }
229 260
230 switch (bp->info.len) { 261 return 0;
262}
263
264
265static int arch_build_bp_info(struct perf_event *bp)
266{
267 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
268
269 info->address = bp->attr.bp_addr;
270
271 /* Len */
272 switch (bp->attr.bp_len) {
231 case HW_BREAKPOINT_LEN_1: 273 case HW_BREAKPOINT_LEN_1:
232 align = 0; 274 info->len = X86_BREAKPOINT_LEN_1;
233 break; 275 break;
234 case HW_BREAKPOINT_LEN_2: 276 case HW_BREAKPOINT_LEN_2:
235 align = 1; 277 info->len = X86_BREAKPOINT_LEN_2;
236 break; 278 break;
237 case HW_BREAKPOINT_LEN_4: 279 case HW_BREAKPOINT_LEN_4:
238 align = 3; 280 info->len = X86_BREAKPOINT_LEN_4;
239 break; 281 break;
240#ifdef CONFIG_X86_64 282#ifdef CONFIG_X86_64
241 case HW_BREAKPOINT_LEN_8: 283 case HW_BREAKPOINT_LEN_8:
284 info->len = X86_BREAKPOINT_LEN_8;
285 break;
286#endif
287 default:
288 return -EINVAL;
289 }
290
291 /* Type */
292 switch (bp->attr.bp_type) {
293 case HW_BREAKPOINT_W:
294 info->type = X86_BREAKPOINT_WRITE;
295 break;
296 case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
297 info->type = X86_BREAKPOINT_RW;
298 break;
299 case HW_BREAKPOINT_X:
300 info->type = X86_BREAKPOINT_EXECUTE;
301 break;
302 default:
303 return -EINVAL;
304 }
305
306 return 0;
307}
308/*
309 * Validate the arch-specific HW Breakpoint register settings
310 */
311int arch_validate_hwbkpt_settings(struct perf_event *bp,
312 struct task_struct *tsk)
313{
314 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
315 unsigned int align;
316 int ret;
317
318
319 ret = arch_build_bp_info(bp);
320 if (ret)
321 return ret;
322
323 ret = -EINVAL;
324
325 if (info->type == X86_BREAKPOINT_EXECUTE)
326 /*
327 * Ptrace-refactoring code
328 * For now, we'll allow instruction breakpoint only for user-space
329 * addresses
330 */
331 if ((!arch_check_va_in_userspace(info->address, info->len)) &&
332 info->len != X86_BREAKPOINT_EXECUTE)
333 return ret;
334
335 switch (info->len) {
336 case X86_BREAKPOINT_LEN_1:
337 align = 0;
338 break;
339 case X86_BREAKPOINT_LEN_2:
340 align = 1;
341 break;
342 case X86_BREAKPOINT_LEN_4:
343 align = 3;
344 break;
345#ifdef CONFIG_X86_64
346 case X86_BREAKPOINT_LEN_8:
242 align = 7; 347 align = 7;
243 break; 348 break;
244#endif 349#endif
@@ -246,8 +351,8 @@ int arch_validate_hwbkpt_settings(struct hw_breakpoint *bp,
246 return ret; 351 return ret;
247 } 352 }
248 353
249 if (bp->triggered) 354 if (bp->callback)
250 ret = arch_store_info(bp, tsk); 355 ret = arch_store_info(bp);
251 356
252 if (ret < 0) 357 if (ret < 0)
253 return ret; 358 return ret;
@@ -255,44 +360,47 @@ int arch_validate_hwbkpt_settings(struct hw_breakpoint *bp,
255 * Check that the low-order bits of the address are appropriate 360 * Check that the low-order bits of the address are appropriate
256 * for the alignment implied by len. 361 * for the alignment implied by len.
257 */ 362 */
258 if (bp->info.address & align) 363 if (info->address & align)
259 return -EINVAL; 364 return -EINVAL;
260 365
261 /* Check that the virtual address is in the proper range */ 366 /* Check that the virtual address is in the proper range */
262 if (tsk) { 367 if (tsk) {
263 if (!arch_check_va_in_userspace(bp->info.address, bp->info.len)) 368 if (!arch_check_va_in_userspace(info->address, info->len))
264 return -EFAULT; 369 return -EFAULT;
265 } else { 370 } else {
266 if (!arch_check_va_in_kernelspace(bp->info.address, 371 if (!arch_check_va_in_kernelspace(info->address, info->len))
267 bp->info.len))
268 return -EFAULT; 372 return -EFAULT;
269 } 373 }
374
270 return 0; 375 return 0;
271} 376}
272 377
273void arch_update_user_hw_breakpoint(int pos, struct task_struct *tsk) 378/*
379 * Release the user breakpoints used by ptrace
380 */
381void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
274{ 382{
275 struct thread_struct *thread = &(tsk->thread); 383 int i;
276 struct hw_breakpoint *bp = thread->hbp[pos]; 384 struct thread_struct *t = &tsk->thread;
277 385
278 thread->debugreg7 &= ~dr7_masks[pos]; 386 for (i = 0; i < HBP_NUM; i++) {
279 if (bp) { 387 unregister_hw_breakpoint(t->ptrace_bps[i]);
280 thread->debugreg[pos] = bp->info.address; 388 t->ptrace_bps[i] = NULL;
281 thread->debugreg7 |= encode_dr7(pos, bp->info.len, 389 }
282 bp->info.type);
283 } else
284 thread->debugreg[pos] = 0;
285} 390}
286 391
287void arch_flush_thread_hw_breakpoint(struct task_struct *tsk) 392#ifdef CONFIG_KVM
393void hw_breakpoint_restore(void)
288{ 394{
289 int i; 395 set_debugreg(__get_cpu_var(cpu_debugreg[0]), 0);
290 struct thread_struct *thread = &(tsk->thread); 396 set_debugreg(__get_cpu_var(cpu_debugreg[1]), 1);
291 397 set_debugreg(__get_cpu_var(cpu_debugreg[2]), 2);
292 thread->debugreg7 = 0; 398 set_debugreg(__get_cpu_var(cpu_debugreg[3]), 3);
293 for (i = 0; i < HBP_NUM; i++) 399 set_debugreg(current->thread.debugreg6, 6);
294 thread->debugreg[i] = 0; 400 set_debugreg(__get_cpu_var(dr7), 7);
295} 401}
402EXPORT_SYMBOL_GPL(hw_breakpoint_restore);
403#endif
296 404
297/* 405/*
298 * Handle debug exception notifications. 406 * Handle debug exception notifications.
@@ -313,7 +421,7 @@ void arch_flush_thread_hw_breakpoint(struct task_struct *tsk)
313static int __kprobes hw_breakpoint_handler(struct die_args *args) 421static int __kprobes hw_breakpoint_handler(struct die_args *args)
314{ 422{
315 int i, cpu, rc = NOTIFY_STOP; 423 int i, cpu, rc = NOTIFY_STOP;
316 struct hw_breakpoint *bp; 424 struct perf_event *bp;
317 unsigned long dr7, dr6; 425 unsigned long dr7, dr6;
318 unsigned long *dr6_p; 426 unsigned long *dr6_p;
319 427
@@ -325,10 +433,6 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
325 if ((dr6 & DR_TRAP_BITS) == 0) 433 if ((dr6 & DR_TRAP_BITS) == 0)
326 return NOTIFY_DONE; 434 return NOTIFY_DONE;
327 435
328 /* Lazy debug register switching */
329 if (!test_tsk_thread_flag(current, TIF_DEBUG))
330 arch_uninstall_thread_hw_breakpoint();
331
332 get_debugreg(dr7, 7); 436 get_debugreg(dr7, 7);
333 /* Disable breakpoints during exception handling */ 437 /* Disable breakpoints during exception handling */
334 set_debugreg(0UL, 7); 438 set_debugreg(0UL, 7);
@@ -344,17 +448,18 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
344 for (i = 0; i < HBP_NUM; ++i) { 448 for (i = 0; i < HBP_NUM; ++i) {
345 if (likely(!(dr6 & (DR_TRAP0 << i)))) 449 if (likely(!(dr6 & (DR_TRAP0 << i))))
346 continue; 450 continue;
451
347 /* 452 /*
348 * Find the corresponding hw_breakpoint structure and 453 * The counter may be concurrently released but that can only
349 * invoke its triggered callback. 454 * occur from a call_rcu() path. We can then safely fetch
455 * the breakpoint, use its callback, touch its counter
456 * while we are in an rcu_read_lock() path.
350 */ 457 */
351 if (i >= hbp_kernel_pos) 458 rcu_read_lock();
352 bp = per_cpu(this_hbp_kernel[i], cpu); 459
353 else { 460 bp = per_cpu(bp_per_reg[i], cpu);
354 bp = current->thread.hbp[i]; 461 if (bp)
355 if (bp) 462 rc = NOTIFY_DONE;
356 rc = NOTIFY_DONE;
357 }
358 /* 463 /*
359 * Reset the 'i'th TRAP bit in dr6 to denote completion of 464 * Reset the 'i'th TRAP bit in dr6 to denote completion of
360 * exception handling 465 * exception handling
@@ -362,19 +467,23 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
362 (*dr6_p) &= ~(DR_TRAP0 << i); 467 (*dr6_p) &= ~(DR_TRAP0 << i);
363 /* 468 /*
364 * bp can be NULL due to lazy debug register switching 469 * bp can be NULL due to lazy debug register switching
365 * or due to the delay between updates of hbp_kernel_pos 470 * or due to concurrent perf counter removing.
366 * and this_hbp_kernel.
367 */ 471 */
368 if (!bp) 472 if (!bp) {
369 continue; 473 rcu_read_unlock();
474 break;
475 }
476
477 (bp->callback)(bp, args->regs);
370 478
371 (bp->triggered)(bp, args->regs); 479 rcu_read_unlock();
372 } 480 }
373 if (dr6 & (~DR_TRAP_BITS)) 481 if (dr6 & (~DR_TRAP_BITS))
374 rc = NOTIFY_DONE; 482 rc = NOTIFY_DONE;
375 483
376 set_debugreg(dr7, 7); 484 set_debugreg(dr7, 7);
377 put_cpu(); 485 put_cpu();
486
378 return rc; 487 return rc;
379} 488}
380 489
@@ -389,3 +498,13 @@ int __kprobes hw_breakpoint_exceptions_notify(
389 498
390 return hw_breakpoint_handler(data); 499 return hw_breakpoint_handler(data);
391} 500}
501
502void hw_breakpoint_pmu_read(struct perf_event *bp)
503{
504 /* TODO */
505}
506
507void hw_breakpoint_pmu_unthrottle(struct perf_event *bp)
508{
509 /* TODO */
510}