diff options
author | Ingo Molnar <mingo@kernel.org> | 2017-03-28 01:44:25 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-03-28 01:44:25 -0400 |
commit | d652f4bbca35100358bad83c29ec0e40a1f8e5cc (patch) | |
tree | a59e6ad6dca5e98f82ce87da2c2b69922c6a4da5 | |
parent | e3a6a62400520452fe39740dca90a1d0b94b8f92 (diff) | |
parent | a01851faab4b485e94c2ceaa1d0208a8d16ce367 (diff) |
Merge branch 'perf/urgent' into perf/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/x86/events/core.c | 16 | ||||
-rw-r--r-- | kernel/events/core.c | 64 | ||||
-rw-r--r-- | tools/perf/util/symbol.c | 2 |
3 files changed, 63 insertions, 19 deletions
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 349d4d17aa7f..2aa1ad194db2 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c | |||
@@ -2101,8 +2101,8 @@ static int x86_pmu_event_init(struct perf_event *event) | |||
2101 | 2101 | ||
2102 | static void refresh_pce(void *ignored) | 2102 | static void refresh_pce(void *ignored) |
2103 | { | 2103 | { |
2104 | if (current->mm) | 2104 | if (current->active_mm) |
2105 | load_mm_cr4(current->mm); | 2105 | load_mm_cr4(current->active_mm); |
2106 | } | 2106 | } |
2107 | 2107 | ||
2108 | static void x86_pmu_event_mapped(struct perf_event *event) | 2108 | static void x86_pmu_event_mapped(struct perf_event *event) |
@@ -2110,6 +2110,18 @@ static void x86_pmu_event_mapped(struct perf_event *event) | |||
2110 | if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) | 2110 | if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) |
2111 | return; | 2111 | return; |
2112 | 2112 | ||
2113 | /* | ||
2114 | * This function relies on not being called concurrently in two | ||
2115 | * tasks in the same mm. Otherwise one task could observe | ||
2116 | * perf_rdpmc_allowed > 1 and return all the way back to | ||
2117 | * userspace with CR4.PCE clear while another task is still | ||
2118 | * doing on_each_cpu_mask() to propagate CR4.PCE. | ||
2119 | * | ||
2120 | * For now, this can't happen because all callers hold mmap_sem | ||
2121 | * for write. If this changes, we'll need a different solution. | ||
2122 | */ | ||
2123 | lockdep_assert_held_exclusive(¤t->mm->mmap_sem); | ||
2124 | |||
2113 | if (atomic_inc_return(¤t->mm->context.perf_rdpmc_allowed) == 1) | 2125 | if (atomic_inc_return(¤t->mm->context.perf_rdpmc_allowed) == 1) |
2114 | on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1); | 2126 | on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1); |
2115 | } | 2127 | } |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 2d7990d4e988..6e75a5c9412d 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -4261,7 +4261,7 @@ int perf_event_release_kernel(struct perf_event *event) | |||
4261 | 4261 | ||
4262 | raw_spin_lock_irq(&ctx->lock); | 4262 | raw_spin_lock_irq(&ctx->lock); |
4263 | /* | 4263 | /* |
4264 | * Mark this even as STATE_DEAD, there is no external reference to it | 4264 | * Mark this event as STATE_DEAD, there is no external reference to it |
4265 | * anymore. | 4265 | * anymore. |
4266 | * | 4266 | * |
4267 | * Anybody acquiring event->child_mutex after the below loop _must_ | 4267 | * Anybody acquiring event->child_mutex after the below loop _must_ |
@@ -10556,21 +10556,22 @@ void perf_event_free_task(struct task_struct *task) | |||
10556 | continue; | 10556 | continue; |
10557 | 10557 | ||
10558 | mutex_lock(&ctx->mutex); | 10558 | mutex_lock(&ctx->mutex); |
10559 | again: | 10559 | raw_spin_lock_irq(&ctx->lock); |
10560 | list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, | 10560 | /* |
10561 | group_entry) | 10561 | * Destroy the task <-> ctx relation and mark the context dead. |
10562 | perf_free_event(event, ctx); | 10562 | * |
10563 | * This is important because even though the task hasn't been | ||
10564 | * exposed yet the context has been (through child_list). | ||
10565 | */ | ||
10566 | RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], NULL); | ||
10567 | WRITE_ONCE(ctx->task, TASK_TOMBSTONE); | ||
10568 | put_task_struct(task); /* cannot be last */ | ||
10569 | raw_spin_unlock_irq(&ctx->lock); | ||
10563 | 10570 | ||
10564 | list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, | 10571 | list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) |
10565 | group_entry) | ||
10566 | perf_free_event(event, ctx); | 10572 | perf_free_event(event, ctx); |
10567 | 10573 | ||
10568 | if (!list_empty(&ctx->pinned_groups) || | ||
10569 | !list_empty(&ctx->flexible_groups)) | ||
10570 | goto again; | ||
10571 | |||
10572 | mutex_unlock(&ctx->mutex); | 10574 | mutex_unlock(&ctx->mutex); |
10573 | |||
10574 | put_ctx(ctx); | 10575 | put_ctx(ctx); |
10575 | } | 10576 | } |
10576 | } | 10577 | } |
@@ -10608,7 +10609,12 @@ const struct perf_event_attr *perf_event_attrs(struct perf_event *event) | |||
10608 | } | 10609 | } |
10609 | 10610 | ||
10610 | /* | 10611 | /* |
10611 | * inherit a event from parent task to child task: | 10612 | * Inherit a event from parent task to child task. |
10613 | * | ||
10614 | * Returns: | ||
10615 | * - valid pointer on success | ||
10616 | * - NULL for orphaned events | ||
10617 | * - IS_ERR() on error | ||
10612 | */ | 10618 | */ |
10613 | static struct perf_event * | 10619 | static struct perf_event * |
10614 | inherit_event(struct perf_event *parent_event, | 10620 | inherit_event(struct perf_event *parent_event, |
@@ -10702,6 +10708,16 @@ inherit_event(struct perf_event *parent_event, | |||
10702 | return child_event; | 10708 | return child_event; |
10703 | } | 10709 | } |
10704 | 10710 | ||
10711 | /* | ||
10712 | * Inherits an event group. | ||
10713 | * | ||
10714 | * This will quietly suppress orphaned events; !inherit_event() is not an error. | ||
10715 | * This matches with perf_event_release_kernel() removing all child events. | ||
10716 | * | ||
10717 | * Returns: | ||
10718 | * - 0 on success | ||
10719 | * - <0 on error | ||
10720 | */ | ||
10705 | static int inherit_group(struct perf_event *parent_event, | 10721 | static int inherit_group(struct perf_event *parent_event, |
10706 | struct task_struct *parent, | 10722 | struct task_struct *parent, |
10707 | struct perf_event_context *parent_ctx, | 10723 | struct perf_event_context *parent_ctx, |
@@ -10716,6 +10732,11 @@ static int inherit_group(struct perf_event *parent_event, | |||
10716 | child, NULL, child_ctx); | 10732 | child, NULL, child_ctx); |
10717 | if (IS_ERR(leader)) | 10733 | if (IS_ERR(leader)) |
10718 | return PTR_ERR(leader); | 10734 | return PTR_ERR(leader); |
10735 | /* | ||
10736 | * @leader can be NULL here because of is_orphaned_event(). In this | ||
10737 | * case inherit_event() will create individual events, similar to what | ||
10738 | * perf_group_detach() would do anyway. | ||
10739 | */ | ||
10719 | list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { | 10740 | list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { |
10720 | child_ctr = inherit_event(sub, parent, parent_ctx, | 10741 | child_ctr = inherit_event(sub, parent, parent_ctx, |
10721 | child, leader, child_ctx); | 10742 | child, leader, child_ctx); |
@@ -10725,6 +10746,17 @@ static int inherit_group(struct perf_event *parent_event, | |||
10725 | return 0; | 10746 | return 0; |
10726 | } | 10747 | } |
10727 | 10748 | ||
10749 | /* | ||
10750 | * Creates the child task context and tries to inherit the event-group. | ||
10751 | * | ||
10752 | * Clears @inherited_all on !attr.inherited or error. Note that we'll leave | ||
10753 | * inherited_all set when we 'fail' to inherit an orphaned event; this is | ||
10754 | * consistent with perf_event_release_kernel() removing all child events. | ||
10755 | * | ||
10756 | * Returns: | ||
10757 | * - 0 on success | ||
10758 | * - <0 on error | ||
10759 | */ | ||
10728 | static int | 10760 | static int |
10729 | inherit_task_group(struct perf_event *event, struct task_struct *parent, | 10761 | inherit_task_group(struct perf_event *event, struct task_struct *parent, |
10730 | struct perf_event_context *parent_ctx, | 10762 | struct perf_event_context *parent_ctx, |
@@ -10747,7 +10779,6 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent, | |||
10747 | * First allocate and initialize a context for the | 10779 | * First allocate and initialize a context for the |
10748 | * child. | 10780 | * child. |
10749 | */ | 10781 | */ |
10750 | |||
10751 | child_ctx = alloc_perf_context(parent_ctx->pmu, child); | 10782 | child_ctx = alloc_perf_context(parent_ctx->pmu, child); |
10752 | if (!child_ctx) | 10783 | if (!child_ctx) |
10753 | return -ENOMEM; | 10784 | return -ENOMEM; |
@@ -10809,7 +10840,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) | |||
10809 | ret = inherit_task_group(event, parent, parent_ctx, | 10840 | ret = inherit_task_group(event, parent, parent_ctx, |
10810 | child, ctxn, &inherited_all); | 10841 | child, ctxn, &inherited_all); |
10811 | if (ret) | 10842 | if (ret) |
10812 | break; | 10843 | goto out_unlock; |
10813 | } | 10844 | } |
10814 | 10845 | ||
10815 | /* | 10846 | /* |
@@ -10825,7 +10856,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) | |||
10825 | ret = inherit_task_group(event, parent, parent_ctx, | 10856 | ret = inherit_task_group(event, parent, parent_ctx, |
10826 | child, ctxn, &inherited_all); | 10857 | child, ctxn, &inherited_all); |
10827 | if (ret) | 10858 | if (ret) |
10828 | break; | 10859 | goto out_unlock; |
10829 | } | 10860 | } |
10830 | 10861 | ||
10831 | raw_spin_lock_irqsave(&parent_ctx->lock, flags); | 10862 | raw_spin_lock_irqsave(&parent_ctx->lock, flags); |
@@ -10853,6 +10884,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) | |||
10853 | } | 10884 | } |
10854 | 10885 | ||
10855 | raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); | 10886 | raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); |
10887 | out_unlock: | ||
10856 | mutex_unlock(&parent_ctx->mutex); | 10888 | mutex_unlock(&parent_ctx->mutex); |
10857 | 10889 | ||
10858 | perf_unpin_context(parent_ctx); | 10890 | perf_unpin_context(parent_ctx); |
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 70e389bc4af7..9b4d8ba22fed 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c | |||
@@ -202,7 +202,7 @@ void symbols__fixup_end(struct rb_root *symbols) | |||
202 | 202 | ||
203 | /* Last entry */ | 203 | /* Last entry */ |
204 | if (curr->end == curr->start) | 204 | if (curr->end == curr->start) |
205 | curr->end = roundup(curr->start, 4096); | 205 | curr->end = roundup(curr->start, 4096) + 4096; |
206 | } | 206 | } |
207 | 207 | ||
208 | void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) | 208 | void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) |