aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_uprobe.c
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2013-02-04 13:05:43 -0500
committerOleg Nesterov <oleg@redhat.com>2013-02-08 12:28:08 -0500
commitb2fe8ba674e8acbb9e8e63510b802c6d054d88a3 (patch)
tree1bd1defbfe3f285dfa7c77f94bc5523ac4a82679 /kernel/trace/trace_uprobe.c
parentf42d24a1d20d2e72d1e5d48930f18b138dfad117 (diff)
uprobes/perf: Avoid uprobe_apply() whenever possible
uprobe_perf_open/close call the costly uprobe_apply() every time, we can avoid it if: - "nr_systemwide != 0" is not changed. - There is another process/thread with the same ->mm. - copy_proccess() does inherit_event(). dup_mmap() preserves the inserted breakpoints. - event->attr.enable_on_exec == T, we can rely on uprobe_mmap() called by exec/mmap paths. - tp_target is exiting. Only _close() checks PF_EXITING, I don't think TRACE_REG_PERF_OPEN can hit the dying task too often. Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Diffstat (limited to 'kernel/trace/trace_uprobe.c')
-rw-r--r--kernel/trace/trace_uprobe.c42
1 files changed, 36 insertions, 6 deletions
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 2399f1416555..8dad2a92dee9 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -680,30 +680,60 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
680 return false; 680 return false;
681} 681}
682 682
683static inline bool
684uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
685{
686 return __uprobe_perf_filter(&tu->filter, event->hw.tp_target->mm);
687}
688
683static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event) 689static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
684{ 690{
691 bool done;
692
685 write_lock(&tu->filter.rwlock); 693 write_lock(&tu->filter.rwlock);
686 if (event->hw.tp_target) 694 if (event->hw.tp_target) {
695 /*
696 * event->parent != NULL means copy_process(), we can avoid
697 * uprobe_apply(). current->mm must be probed and we can rely
698 * on dup_mmap() which preserves the already installed bp's.
699 *
700 * attr.enable_on_exec means that exec/mmap will install the
701 * breakpoints we need.
702 */
703 done = tu->filter.nr_systemwide ||
704 event->parent || event->attr.enable_on_exec ||
705 uprobe_filter_event(tu, event);
687 list_add(&event->hw.tp_list, &tu->filter.perf_events); 706 list_add(&event->hw.tp_list, &tu->filter.perf_events);
688 else 707 } else {
708 done = tu->filter.nr_systemwide;
689 tu->filter.nr_systemwide++; 709 tu->filter.nr_systemwide++;
710 }
690 write_unlock(&tu->filter.rwlock); 711 write_unlock(&tu->filter.rwlock);
691 712
692 uprobe_apply(tu->inode, tu->offset, &tu->consumer, true); 713 if (!done)
714 uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
693 715
694 return 0; 716 return 0;
695} 717}
696 718
697static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event) 719static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
698{ 720{
721 bool done;
722
699 write_lock(&tu->filter.rwlock); 723 write_lock(&tu->filter.rwlock);
700 if (event->hw.tp_target) 724 if (event->hw.tp_target) {
701 list_del(&event->hw.tp_list); 725 list_del(&event->hw.tp_list);
702 else 726 done = tu->filter.nr_systemwide ||
727 (event->hw.tp_target->flags & PF_EXITING) ||
728 uprobe_filter_event(tu, event);
729 } else {
703 tu->filter.nr_systemwide--; 730 tu->filter.nr_systemwide--;
731 done = tu->filter.nr_systemwide;
732 }
704 write_unlock(&tu->filter.rwlock); 733 write_unlock(&tu->filter.rwlock);
705 734
706 uprobe_apply(tu->inode, tu->offset, &tu->consumer, false); 735 if (!done)
736 uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
707 737
708 return 0; 738 return 0;
709} 739}