diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2009-12-09 03:25:48 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-12-09 03:48:20 -0500 |
commit | 44234adcdce38f83c56e05f808ce656175b4beeb (patch) | |
tree | caff2ca7bbf4bf7c0b12652caf739bcc6db5f4d3 /kernel | |
parent | c937fe20cb6d9e24c6ad5f9f0c64d64c78411057 (diff) |
hw-breakpoints: Modify breakpoints without unregistering them
Currently, when ptrace needs to modify a breakpoint, like disabling
it, changing its address, type or len, it calls
modify_user_hw_breakpoint(). This latter will perform the heavy and
racy task of unregistering the old breakpoint and registering a new
one.
This is racy as someone else might steal the reserved breakpoint
slot under us, which is undesired as the breakpoint is only
supposed to be modified, sometimes in the middle of a debugging
workflow. We don't want our slot to be stolen in the middle.
So instead of unregistering/registering the breakpoint, just
disable it while we modify its breakpoint fields and re-enable it
after if necessary.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Prasad <prasad@linux.vnet.ibm.com>
LKML-Reference: <1260347148-5519-1-git-send-regression-fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/hw_breakpoint.c | 42 | ||||
-rw-r--r-- | kernel/perf_event.c | 4 |
2 files changed, 34 insertions, 12 deletions
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index 03a0773ac2b2..366eedf949c0 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c | |||
@@ -320,18 +320,40 @@ EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); | |||
320 | * @triggered: callback to trigger when we hit the breakpoint | 320 | * @triggered: callback to trigger when we hit the breakpoint |
321 | * @tsk: pointer to 'task_struct' of the process to which the address belongs | 321 | * @tsk: pointer to 'task_struct' of the process to which the address belongs |
322 | */ | 322 | */ |
323 | struct perf_event * | 323 | int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) |
324 | modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) | ||
325 | { | 324 | { |
326 | /* | 325 | u64 old_addr = bp->attr.bp_addr; |
327 | * FIXME: do it without unregistering | 326 | int old_type = bp->attr.bp_type; |
328 | * - We don't want to lose our slot | 327 | int old_len = bp->attr.bp_len; |
329 | * - If the new bp is incorrect, don't lose the older one | 328 | int err = 0; |
330 | */ | 329 | |
331 | unregister_hw_breakpoint(bp); | 330 | perf_event_disable(bp); |
331 | |||
332 | bp->attr.bp_addr = attr->bp_addr; | ||
333 | bp->attr.bp_type = attr->bp_type; | ||
334 | bp->attr.bp_len = attr->bp_len; | ||
335 | |||
336 | if (attr->disabled) | ||
337 | goto end; | ||
332 | 338 | ||
333 | return perf_event_create_kernel_counter(attr, -1, bp->ctx->task->pid, | 339 | err = arch_validate_hwbkpt_settings(bp, bp->ctx->task); |
334 | bp->overflow_handler); | 340 | if (!err) |
341 | perf_event_enable(bp); | ||
342 | |||
343 | if (err) { | ||
344 | bp->attr.bp_addr = old_addr; | ||
345 | bp->attr.bp_type = old_type; | ||
346 | bp->attr.bp_len = old_len; | ||
347 | if (!bp->attr.disabled) | ||
348 | perf_event_enable(bp); | ||
349 | |||
350 | return err; | ||
351 | } | ||
352 | |||
353 | end: | ||
354 | bp->attr.disabled = attr->disabled; | ||
355 | |||
356 | return 0; | ||
335 | } | 357 | } |
336 | EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); | 358 | EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); |
337 | 359 | ||
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index fd43ff4ac860..3b0cf86eee84 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -567,7 +567,7 @@ static void __perf_event_disable(void *info) | |||
567 | * is the current context on this CPU and preemption is disabled, | 567 | * is the current context on this CPU and preemption is disabled, |
568 | * hence we can't get into perf_event_task_sched_out for this context. | 568 | * hence we can't get into perf_event_task_sched_out for this context. |
569 | */ | 569 | */ |
570 | static void perf_event_disable(struct perf_event *event) | 570 | void perf_event_disable(struct perf_event *event) |
571 | { | 571 | { |
572 | struct perf_event_context *ctx = event->ctx; | 572 | struct perf_event_context *ctx = event->ctx; |
573 | struct task_struct *task = ctx->task; | 573 | struct task_struct *task = ctx->task; |
@@ -971,7 +971,7 @@ static void __perf_event_enable(void *info) | |||
971 | * perf_event_for_each_child or perf_event_for_each as described | 971 | * perf_event_for_each_child or perf_event_for_each as described |
972 | * for perf_event_disable. | 972 | * for perf_event_disable. |
973 | */ | 973 | */ |
974 | static void perf_event_enable(struct perf_event *event) | 974 | void perf_event_enable(struct perf_event *event) |
975 | { | 975 | { |
976 | struct perf_event_context *ctx = event->ctx; | 976 | struct perf_event_context *ctx = event->ctx; |
977 | struct task_struct *task = ctx->task; | 977 | struct task_struct *task = ctx->task; |