aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-12-09 03:25:48 -0500
committerIngo Molnar <mingo@elte.hu>2009-12-09 03:48:20 -0500
commit44234adcdce38f83c56e05f808ce656175b4beeb (patch)
treecaff2ca7bbf4bf7c0b12652caf739bcc6db5f4d3
parentc937fe20cb6d9e24c6ad5f9f0c64d64c78411057 (diff)
hw-breakpoints: Modify breakpoints without unregistering them
Currently, when ptrace needs to modify a breakpoint, like disabling it, changing its address, type or len, it calls modify_user_hw_breakpoint(). This latter will perform the heavy and racy task of unregistering the old breakpoint and registering a new one. This is racy as someone else might steal the reserved breakpoint slot under us, which is undesired as the breakpoint is only supposed to be modified, sometimes in the middle of a debugging workflow. We don't want our slot to be stolen in the middle. So instead of unregistering/registering the breakpoint, just disable it while we modify its breakpoint fields and re-enable it after if necessary. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Prasad <prasad@linux.vnet.ibm.com> LKML-Reference: <1260347148-5519-1-git-send-regression-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/ptrace.c57
-rw-r--r--include/linux/hw_breakpoint.h4
-rw-r--r--include/linux/perf_event.h5
-rw-r--r--kernel/hw_breakpoint.c42
-rw-r--r--kernel/perf_event.c4
5 files changed, 66 insertions, 46 deletions
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index b361d28061d0..7079ddaf0731 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -595,7 +595,7 @@ static unsigned long ptrace_get_dr7(struct perf_event *bp[])
595 return dr7; 595 return dr7;
596} 596}
597 597
598static struct perf_event * 598static int
599ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, 599ptrace_modify_breakpoint(struct perf_event *bp, int len, int type,
600 struct task_struct *tsk, int disabled) 600 struct task_struct *tsk, int disabled)
601{ 601{
@@ -609,11 +609,11 @@ ptrace_modify_breakpoint(struct perf_event *bp, int len, int type,
609 * written the address register first 609 * written the address register first
610 */ 610 */
611 if (!bp) 611 if (!bp)
612 return ERR_PTR(-EINVAL); 612 return -EINVAL;
613 613
614 err = arch_bp_generic_fields(len, type, &gen_len, &gen_type); 614 err = arch_bp_generic_fields(len, type, &gen_len, &gen_type);
615 if (err) 615 if (err)
616 return ERR_PTR(err); 616 return err;
617 617
618 attr = bp->attr; 618 attr = bp->attr;
619 attr.bp_len = gen_len; 619 attr.bp_len = gen_len;
@@ -658,28 +658,17 @@ restore:
658 if (!second_pass) 658 if (!second_pass)
659 continue; 659 continue;
660 660
661 thread->ptrace_bps[i] = NULL; 661 rc = ptrace_modify_breakpoint(bp, len, type,
662 bp = ptrace_modify_breakpoint(bp, len, type,
663 tsk, 1); 662 tsk, 1);
664 if (IS_ERR(bp)) { 663 if (rc)
665 rc = PTR_ERR(bp);
666 thread->ptrace_bps[i] = NULL;
667 break; 664 break;
668 }
669 thread->ptrace_bps[i] = bp;
670 } 665 }
671 continue; 666 continue;
672 } 667 }
673 668
674 bp = ptrace_modify_breakpoint(bp, len, type, tsk, 0); 669 rc = ptrace_modify_breakpoint(bp, len, type, tsk, 0);
675 670 if (rc)
676 /* Incorrect bp, or we have a bug in bp API */
677 if (IS_ERR(bp)) {
678 rc = PTR_ERR(bp);
679 thread->ptrace_bps[i] = NULL;
680 break; 671 break;
681 }
682 thread->ptrace_bps[i] = bp;
683 } 672 }
684 /* 673 /*
685 * Make a second pass to free the remaining unused breakpoints 674 * Make a second pass to free the remaining unused breakpoints
@@ -737,26 +726,32 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
737 attr.disabled = 1; 726 attr.disabled = 1;
738 727
739 bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk); 728 bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk);
729
730 /*
731 * CHECKME: the previous code returned -EIO if the addr wasn't
732 * a valid task virtual addr. The new one will return -EINVAL in
733 * this case.
734 * -EINVAL may be what we want for in-kernel breakpoints users,
735 * but -EIO looks better for ptrace, since we refuse a register
736 * writing for the user. And anyway this is the previous
737 * behaviour.
738 */
739 if (IS_ERR(bp))
740 return PTR_ERR(bp);
741
742 t->ptrace_bps[nr] = bp;
740 } else { 743 } else {
744 int err;
745
741 bp = t->ptrace_bps[nr]; 746 bp = t->ptrace_bps[nr];
742 t->ptrace_bps[nr] = NULL;
743 747
744 attr = bp->attr; 748 attr = bp->attr;
745 attr.bp_addr = addr; 749 attr.bp_addr = addr;
746 bp = modify_user_hw_breakpoint(bp, &attr); 750 err = modify_user_hw_breakpoint(bp, &attr);
751 if (err)
752 return err;
747 } 753 }
748 /*
749 * CHECKME: the previous code returned -EIO if the addr wasn't a
750 * valid task virtual addr. The new one will return -EINVAL in this
751 * case.
752 * -EINVAL may be what we want for in-kernel breakpoints users, but
753 * -EIO looks better for ptrace, since we refuse a register writing
754 * for the user. And anyway this is the previous behaviour.
755 */
756 if (IS_ERR(bp))
757 return PTR_ERR(bp);
758 754
759 t->ptrace_bps[nr] = bp;
760 755
761 return 0; 756 return 0;
762} 757}
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h
index 42da1ce19ec0..69f07a9f1277 100644
--- a/include/linux/hw_breakpoint.h
+++ b/include/linux/hw_breakpoint.h
@@ -55,7 +55,7 @@ register_user_hw_breakpoint(struct perf_event_attr *attr,
55 struct task_struct *tsk); 55 struct task_struct *tsk);
56 56
57/* FIXME: only change from the attr, and don't unregister */ 57/* FIXME: only change from the attr, and don't unregister */
58extern struct perf_event * 58extern int
59modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr); 59modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr);
60 60
61/* 61/*
@@ -91,7 +91,7 @@ static inline struct perf_event *
91register_user_hw_breakpoint(struct perf_event_attr *attr, 91register_user_hw_breakpoint(struct perf_event_attr *attr,
92 perf_overflow_handler_t triggered, 92 perf_overflow_handler_t triggered,
93 struct task_struct *tsk) { return NULL; } 93 struct task_struct *tsk) { return NULL; }
94static inline struct perf_event * 94static inline int
95modify_user_hw_breakpoint(struct perf_event *bp, 95modify_user_hw_breakpoint(struct perf_event *bp,
96 struct perf_event_attr *attr) { return NULL; } 96 struct perf_event_attr *attr) { return NULL; }
97static inline struct perf_event * 97static inline struct perf_event *
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index bf3329413e18..64a53f74c9a9 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -872,6 +872,8 @@ extern void perf_output_copy(struct perf_output_handle *handle,
872 const void *buf, unsigned int len); 872 const void *buf, unsigned int len);
873extern int perf_swevent_get_recursion_context(void); 873extern int perf_swevent_get_recursion_context(void);
874extern void perf_swevent_put_recursion_context(int rctx); 874extern void perf_swevent_put_recursion_context(int rctx);
875extern void perf_event_enable(struct perf_event *event);
876extern void perf_event_disable(struct perf_event *event);
875#else 877#else
876static inline void 878static inline void
877perf_event_task_sched_in(struct task_struct *task, int cpu) { } 879perf_event_task_sched_in(struct task_struct *task, int cpu) { }
@@ -902,7 +904,8 @@ static inline void perf_event_fork(struct task_struct *tsk) { }
902static inline void perf_event_init(void) { } 904static inline void perf_event_init(void) { }
903static inline int perf_swevent_get_recursion_context(void) { return -1; } 905static inline int perf_swevent_get_recursion_context(void) { return -1; }
904static inline void perf_swevent_put_recursion_context(int rctx) { } 906static inline void perf_swevent_put_recursion_context(int rctx) { }
905 907static inline void perf_event_enable(struct perf_event *event) { }
908static inline void perf_event_disable(struct perf_event *event) { }
906#endif 909#endif
907 910
908#define perf_output_put(handle, x) \ 911#define perf_output_put(handle, x) \
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c
index 03a0773ac2b2..366eedf949c0 100644
--- a/kernel/hw_breakpoint.c
+++ b/kernel/hw_breakpoint.c
@@ -320,18 +320,40 @@ EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
320 * @triggered: callback to trigger when we hit the breakpoint 320 * @triggered: callback to trigger when we hit the breakpoint
321 * @tsk: pointer to 'task_struct' of the process to which the address belongs 321 * @tsk: pointer to 'task_struct' of the process to which the address belongs
322 */ 322 */
323struct perf_event * 323int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
324modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
325{ 324{
326 /* 325 u64 old_addr = bp->attr.bp_addr;
327 * FIXME: do it without unregistering 326 int old_type = bp->attr.bp_type;
328 * - We don't want to lose our slot 327 int old_len = bp->attr.bp_len;
329 * - If the new bp is incorrect, don't lose the older one 328 int err = 0;
330 */ 329
331 unregister_hw_breakpoint(bp); 330 perf_event_disable(bp);
331
332 bp->attr.bp_addr = attr->bp_addr;
333 bp->attr.bp_type = attr->bp_type;
334 bp->attr.bp_len = attr->bp_len;
335
336 if (attr->disabled)
337 goto end;
332 338
333 return perf_event_create_kernel_counter(attr, -1, bp->ctx->task->pid, 339 err = arch_validate_hwbkpt_settings(bp, bp->ctx->task);
334 bp->overflow_handler); 340 if (!err)
341 perf_event_enable(bp);
342
343 if (err) {
344 bp->attr.bp_addr = old_addr;
345 bp->attr.bp_type = old_type;
346 bp->attr.bp_len = old_len;
347 if (!bp->attr.disabled)
348 perf_event_enable(bp);
349
350 return err;
351 }
352
353end:
354 bp->attr.disabled = attr->disabled;
355
356 return 0;
335} 357}
336EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); 358EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
337 359
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index fd43ff4ac860..3b0cf86eee84 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -567,7 +567,7 @@ static void __perf_event_disable(void *info)
567 * is the current context on this CPU and preemption is disabled, 567 * is the current context on this CPU and preemption is disabled,
568 * hence we can't get into perf_event_task_sched_out for this context. 568 * hence we can't get into perf_event_task_sched_out for this context.
569 */ 569 */
570static void perf_event_disable(struct perf_event *event) 570void perf_event_disable(struct perf_event *event)
571{ 571{
572 struct perf_event_context *ctx = event->ctx; 572 struct perf_event_context *ctx = event->ctx;
573 struct task_struct *task = ctx->task; 573 struct task_struct *task = ctx->task;
@@ -971,7 +971,7 @@ static void __perf_event_enable(void *info)
971 * perf_event_for_each_child or perf_event_for_each as described 971 * perf_event_for_each_child or perf_event_for_each as described
972 * for perf_event_disable. 972 * for perf_event_disable.
973 */ 973 */
974static void perf_event_enable(struct perf_event *event) 974void perf_event_enable(struct perf_event *event)
975{ 975{
976 struct perf_event_context *ctx = event->ctx; 976 struct perf_event_context *ctx = event->ctx;
977 struct task_struct *task = ctx->task; 977 struct task_struct *task = ctx->task;