aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/events/core.c16
-rw-r--r--kernel/events/core.c64
-rw-r--r--tools/perf/util/symbol.c2
3 files changed, 63 insertions, 19 deletions
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 349d4d17aa7f..2aa1ad194db2 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2101,8 +2101,8 @@ static int x86_pmu_event_init(struct perf_event *event)
2101 2101
2102static void refresh_pce(void *ignored) 2102static void refresh_pce(void *ignored)
2103{ 2103{
2104 if (current->mm) 2104 if (current->active_mm)
2105 load_mm_cr4(current->mm); 2105 load_mm_cr4(current->active_mm);
2106} 2106}
2107 2107
2108static void x86_pmu_event_mapped(struct perf_event *event) 2108static void x86_pmu_event_mapped(struct perf_event *event)
@@ -2110,6 +2110,18 @@ static void x86_pmu_event_mapped(struct perf_event *event)
2110 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) 2110 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
2111 return; 2111 return;
2112 2112
2113 /*
2114 * This function relies on not being called concurrently in two
2115 * tasks in the same mm. Otherwise one task could observe
2116 * perf_rdpmc_allowed > 1 and return all the way back to
2117 * userspace with CR4.PCE clear while another task is still
2118 * doing on_each_cpu_mask() to propagate CR4.PCE.
2119 *
2120 * For now, this can't happen because all callers hold mmap_sem
2121 * for write. If this changes, we'll need a different solution.
2122 */
2123 lockdep_assert_held_exclusive(&current->mm->mmap_sem);
2124
2113 if (atomic_inc_return(&current->mm->context.perf_rdpmc_allowed) == 1) 2125 if (atomic_inc_return(&current->mm->context.perf_rdpmc_allowed) == 1)
2114 on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1); 2126 on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
2115} 2127}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index a17ed56c8ce1..ff01cba86f43 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4256,7 +4256,7 @@ int perf_event_release_kernel(struct perf_event *event)
4256 4256
4257 raw_spin_lock_irq(&ctx->lock); 4257 raw_spin_lock_irq(&ctx->lock);
4258 /* 4258 /*
4259 * Mark this even as STATE_DEAD, there is no external reference to it 4259 * Mark this event as STATE_DEAD, there is no external reference to it
4260 * anymore. 4260 * anymore.
4261 * 4261 *
4262 * Anybody acquiring event->child_mutex after the below loop _must_ 4262 * Anybody acquiring event->child_mutex after the below loop _must_
@@ -10417,21 +10417,22 @@ void perf_event_free_task(struct task_struct *task)
10417 continue; 10417 continue;
10418 10418
10419 mutex_lock(&ctx->mutex); 10419 mutex_lock(&ctx->mutex);
10420again: 10420 raw_spin_lock_irq(&ctx->lock);
10421 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, 10421 /*
10422 group_entry) 10422 * Destroy the task <-> ctx relation and mark the context dead.
10423 perf_free_event(event, ctx); 10423 *
10424 * This is important because even though the task hasn't been
10425 * exposed yet the context has been (through child_list).
10426 */
10427 RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], NULL);
10428 WRITE_ONCE(ctx->task, TASK_TOMBSTONE);
10429 put_task_struct(task); /* cannot be last */
10430 raw_spin_unlock_irq(&ctx->lock);
10424 10431
10425 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, 10432 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry)
10426 group_entry)
10427 perf_free_event(event, ctx); 10433 perf_free_event(event, ctx);
10428 10434
10429 if (!list_empty(&ctx->pinned_groups) ||
10430 !list_empty(&ctx->flexible_groups))
10431 goto again;
10432
10433 mutex_unlock(&ctx->mutex); 10435 mutex_unlock(&ctx->mutex);
10434
10435 put_ctx(ctx); 10436 put_ctx(ctx);
10436 } 10437 }
10437} 10438}
@@ -10469,7 +10470,12 @@ const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
10469} 10470}
10470 10471
10471/* 10472/*
10472 * inherit a event from parent task to child task: 10473 * Inherit a event from parent task to child task.
10474 *
10475 * Returns:
10476 * - valid pointer on success
10477 * - NULL for orphaned events
10478 * - IS_ERR() on error
10473 */ 10479 */
10474static struct perf_event * 10480static struct perf_event *
10475inherit_event(struct perf_event *parent_event, 10481inherit_event(struct perf_event *parent_event,
@@ -10563,6 +10569,16 @@ inherit_event(struct perf_event *parent_event,
10563 return child_event; 10569 return child_event;
10564} 10570}
10565 10571
10572/*
10573 * Inherits an event group.
10574 *
10575 * This will quietly suppress orphaned events; !inherit_event() is not an error.
10576 * This matches with perf_event_release_kernel() removing all child events.
10577 *
10578 * Returns:
10579 * - 0 on success
10580 * - <0 on error
10581 */
10566static int inherit_group(struct perf_event *parent_event, 10582static int inherit_group(struct perf_event *parent_event,
10567 struct task_struct *parent, 10583 struct task_struct *parent,
10568 struct perf_event_context *parent_ctx, 10584 struct perf_event_context *parent_ctx,
@@ -10577,6 +10593,11 @@ static int inherit_group(struct perf_event *parent_event,
10577 child, NULL, child_ctx); 10593 child, NULL, child_ctx);
10578 if (IS_ERR(leader)) 10594 if (IS_ERR(leader))
10579 return PTR_ERR(leader); 10595 return PTR_ERR(leader);
10596 /*
10597 * @leader can be NULL here because of is_orphaned_event(). In this
10598 * case inherit_event() will create individual events, similar to what
10599 * perf_group_detach() would do anyway.
10600 */
10580 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { 10601 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
10581 child_ctr = inherit_event(sub, parent, parent_ctx, 10602 child_ctr = inherit_event(sub, parent, parent_ctx,
10582 child, leader, child_ctx); 10603 child, leader, child_ctx);
@@ -10586,6 +10607,17 @@ static int inherit_group(struct perf_event *parent_event,
10586 return 0; 10607 return 0;
10587} 10608}
10588 10609
10610/*
10611 * Creates the child task context and tries to inherit the event-group.
10612 *
10613 * Clears @inherited_all on !attr.inherited or error. Note that we'll leave
10614 * inherited_all set when we 'fail' to inherit an orphaned event; this is
10615 * consistent with perf_event_release_kernel() removing all child events.
10616 *
10617 * Returns:
10618 * - 0 on success
10619 * - <0 on error
10620 */
10589static int 10621static int
10590inherit_task_group(struct perf_event *event, struct task_struct *parent, 10622inherit_task_group(struct perf_event *event, struct task_struct *parent,
10591 struct perf_event_context *parent_ctx, 10623 struct perf_event_context *parent_ctx,
@@ -10608,7 +10640,6 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent,
10608 * First allocate and initialize a context for the 10640 * First allocate and initialize a context for the
10609 * child. 10641 * child.
10610 */ 10642 */
10611
10612 child_ctx = alloc_perf_context(parent_ctx->pmu, child); 10643 child_ctx = alloc_perf_context(parent_ctx->pmu, child);
10613 if (!child_ctx) 10644 if (!child_ctx)
10614 return -ENOMEM; 10645 return -ENOMEM;
@@ -10670,7 +10701,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
10670 ret = inherit_task_group(event, parent, parent_ctx, 10701 ret = inherit_task_group(event, parent, parent_ctx,
10671 child, ctxn, &inherited_all); 10702 child, ctxn, &inherited_all);
10672 if (ret) 10703 if (ret)
10673 break; 10704 goto out_unlock;
10674 } 10705 }
10675 10706
10676 /* 10707 /*
@@ -10686,7 +10717,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
10686 ret = inherit_task_group(event, parent, parent_ctx, 10717 ret = inherit_task_group(event, parent, parent_ctx,
10687 child, ctxn, &inherited_all); 10718 child, ctxn, &inherited_all);
10688 if (ret) 10719 if (ret)
10689 break; 10720 goto out_unlock;
10690 } 10721 }
10691 10722
10692 raw_spin_lock_irqsave(&parent_ctx->lock, flags); 10723 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
@@ -10714,6 +10745,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
10714 } 10745 }
10715 10746
10716 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); 10747 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
10748out_unlock:
10717 mutex_unlock(&parent_ctx->mutex); 10749 mutex_unlock(&parent_ctx->mutex);
10718 10750
10719 perf_unpin_context(parent_ctx); 10751 perf_unpin_context(parent_ctx);
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 70e389bc4af7..9b4d8ba22fed 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -202,7 +202,7 @@ void symbols__fixup_end(struct rb_root *symbols)
202 202
203 /* Last entry */ 203 /* Last entry */
204 if (curr->end == curr->start) 204 if (curr->end == curr->start)
205 curr->end = roundup(curr->start, 4096); 205 curr->end = roundup(curr->start, 4096) + 4096;
206} 206}
207 207
208void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) 208void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)