diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-03-17 16:59:52 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-03-17 16:59:52 -0400 |
| commit | a7fc726bb2e3ab3e960064392aa7cb66999d6927 (patch) | |
| tree | 070f0b32f2dc5d8a06d48f1a15c161d4c97c0295 /kernel | |
| parent | cd21debe5318842a0bbd38c0327cfde2a3b90d65 (diff) | |
| parent | a01851faab4b485e94c2ceaa1d0208a8d16ce367 (diff) | |
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Thomas Gleixner:
"A set of perf related fixes:
- fix a CR4.PCE propagation issue caused by usage of mm instead of
active_mm and therefore propagated the wrong value.
- perf core fixes, which plug a use-after-free issue and make the
event inheritance on fork more robust.
- a tooling fix for symbol handling"
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
perf symbols: Fix symbols__fixup_end heuristic for corner cases
x86/perf: Clarify why x86_pmu_event_mapped() isn't racy
x86/perf: Fix CR4.PCE propagation to use active_mm instead of mm
perf/core: Better explain the inherit magic
perf/core: Simplify perf_event_free_task()
perf/core: Fix event inheritance on fork()
perf/core: Fix use-after-free in perf_release()
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/events/core.c | 64 |
1 files changed, 48 insertions, 16 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index a17ed56c8ce1..ff01cba86f43 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -4256,7 +4256,7 @@ int perf_event_release_kernel(struct perf_event *event) | |||
| 4256 | 4256 | ||
| 4257 | raw_spin_lock_irq(&ctx->lock); | 4257 | raw_spin_lock_irq(&ctx->lock); |
| 4258 | /* | 4258 | /* |
| 4259 | * Mark this even as STATE_DEAD, there is no external reference to it | 4259 | * Mark this event as STATE_DEAD, there is no external reference to it |
| 4260 | * anymore. | 4260 | * anymore. |
| 4261 | * | 4261 | * |
| 4262 | * Anybody acquiring event->child_mutex after the below loop _must_ | 4262 | * Anybody acquiring event->child_mutex after the below loop _must_ |
| @@ -10417,21 +10417,22 @@ void perf_event_free_task(struct task_struct *task) | |||
| 10417 | continue; | 10417 | continue; |
| 10418 | 10418 | ||
| 10419 | mutex_lock(&ctx->mutex); | 10419 | mutex_lock(&ctx->mutex); |
| 10420 | again: | 10420 | raw_spin_lock_irq(&ctx->lock); |
| 10421 | list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, | 10421 | /* |
| 10422 | group_entry) | 10422 | * Destroy the task <-> ctx relation and mark the context dead. |
| 10423 | perf_free_event(event, ctx); | 10423 | * |
| 10424 | * This is important because even though the task hasn't been | ||
| 10425 | * exposed yet the context has been (through child_list). | ||
| 10426 | */ | ||
| 10427 | RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], NULL); | ||
| 10428 | WRITE_ONCE(ctx->task, TASK_TOMBSTONE); | ||
| 10429 | put_task_struct(task); /* cannot be last */ | ||
| 10430 | raw_spin_unlock_irq(&ctx->lock); | ||
| 10424 | 10431 | ||
| 10425 | list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, | 10432 | list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) |
| 10426 | group_entry) | ||
| 10427 | perf_free_event(event, ctx); | 10433 | perf_free_event(event, ctx); |
| 10428 | 10434 | ||
| 10429 | if (!list_empty(&ctx->pinned_groups) || | ||
| 10430 | !list_empty(&ctx->flexible_groups)) | ||
| 10431 | goto again; | ||
| 10432 | |||
| 10433 | mutex_unlock(&ctx->mutex); | 10435 | mutex_unlock(&ctx->mutex); |
| 10434 | |||
| 10435 | put_ctx(ctx); | 10436 | put_ctx(ctx); |
| 10436 | } | 10437 | } |
| 10437 | } | 10438 | } |
| @@ -10469,7 +10470,12 @@ const struct perf_event_attr *perf_event_attrs(struct perf_event *event) | |||
| 10469 | } | 10470 | } |
| 10470 | 10471 | ||
| 10471 | /* | 10472 | /* |
| 10472 | * inherit a event from parent task to child task: | 10473 | * Inherit a event from parent task to child task. |
| 10474 | * | ||
| 10475 | * Returns: | ||
| 10476 | * - valid pointer on success | ||
| 10477 | * - NULL for orphaned events | ||
| 10478 | * - IS_ERR() on error | ||
| 10473 | */ | 10479 | */ |
| 10474 | static struct perf_event * | 10480 | static struct perf_event * |
| 10475 | inherit_event(struct perf_event *parent_event, | 10481 | inherit_event(struct perf_event *parent_event, |
| @@ -10563,6 +10569,16 @@ inherit_event(struct perf_event *parent_event, | |||
| 10563 | return child_event; | 10569 | return child_event; |
| 10564 | } | 10570 | } |
| 10565 | 10571 | ||
| 10572 | /* | ||
| 10573 | * Inherits an event group. | ||
| 10574 | * | ||
| 10575 | * This will quietly suppress orphaned events; !inherit_event() is not an error. | ||
| 10576 | * This matches with perf_event_release_kernel() removing all child events. | ||
| 10577 | * | ||
| 10578 | * Returns: | ||
| 10579 | * - 0 on success | ||
| 10580 | * - <0 on error | ||
| 10581 | */ | ||
| 10566 | static int inherit_group(struct perf_event *parent_event, | 10582 | static int inherit_group(struct perf_event *parent_event, |
| 10567 | struct task_struct *parent, | 10583 | struct task_struct *parent, |
| 10568 | struct perf_event_context *parent_ctx, | 10584 | struct perf_event_context *parent_ctx, |
| @@ -10577,6 +10593,11 @@ static int inherit_group(struct perf_event *parent_event, | |||
| 10577 | child, NULL, child_ctx); | 10593 | child, NULL, child_ctx); |
| 10578 | if (IS_ERR(leader)) | 10594 | if (IS_ERR(leader)) |
| 10579 | return PTR_ERR(leader); | 10595 | return PTR_ERR(leader); |
| 10596 | /* | ||
| 10597 | * @leader can be NULL here because of is_orphaned_event(). In this | ||
| 10598 | * case inherit_event() will create individual events, similar to what | ||
| 10599 | * perf_group_detach() would do anyway. | ||
| 10600 | */ | ||
| 10580 | list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { | 10601 | list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { |
| 10581 | child_ctr = inherit_event(sub, parent, parent_ctx, | 10602 | child_ctr = inherit_event(sub, parent, parent_ctx, |
| 10582 | child, leader, child_ctx); | 10603 | child, leader, child_ctx); |
| @@ -10586,6 +10607,17 @@ static int inherit_group(struct perf_event *parent_event, | |||
| 10586 | return 0; | 10607 | return 0; |
| 10587 | } | 10608 | } |
| 10588 | 10609 | ||
| 10610 | /* | ||
| 10611 | * Creates the child task context and tries to inherit the event-group. | ||
| 10612 | * | ||
| 10613 | * Clears @inherited_all on !attr.inherited or error. Note that we'll leave | ||
| 10614 | * inherited_all set when we 'fail' to inherit an orphaned event; this is | ||
| 10615 | * consistent with perf_event_release_kernel() removing all child events. | ||
| 10616 | * | ||
| 10617 | * Returns: | ||
| 10618 | * - 0 on success | ||
| 10619 | * - <0 on error | ||
| 10620 | */ | ||
| 10589 | static int | 10621 | static int |
| 10590 | inherit_task_group(struct perf_event *event, struct task_struct *parent, | 10622 | inherit_task_group(struct perf_event *event, struct task_struct *parent, |
| 10591 | struct perf_event_context *parent_ctx, | 10623 | struct perf_event_context *parent_ctx, |
| @@ -10608,7 +10640,6 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent, | |||
| 10608 | * First allocate and initialize a context for the | 10640 | * First allocate and initialize a context for the |
| 10609 | * child. | 10641 | * child. |
| 10610 | */ | 10642 | */ |
| 10611 | |||
| 10612 | child_ctx = alloc_perf_context(parent_ctx->pmu, child); | 10643 | child_ctx = alloc_perf_context(parent_ctx->pmu, child); |
| 10613 | if (!child_ctx) | 10644 | if (!child_ctx) |
| 10614 | return -ENOMEM; | 10645 | return -ENOMEM; |
| @@ -10670,7 +10701,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) | |||
| 10670 | ret = inherit_task_group(event, parent, parent_ctx, | 10701 | ret = inherit_task_group(event, parent, parent_ctx, |
| 10671 | child, ctxn, &inherited_all); | 10702 | child, ctxn, &inherited_all); |
| 10672 | if (ret) | 10703 | if (ret) |
| 10673 | break; | 10704 | goto out_unlock; |
| 10674 | } | 10705 | } |
| 10675 | 10706 | ||
| 10676 | /* | 10707 | /* |
| @@ -10686,7 +10717,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) | |||
| 10686 | ret = inherit_task_group(event, parent, parent_ctx, | 10717 | ret = inherit_task_group(event, parent, parent_ctx, |
| 10687 | child, ctxn, &inherited_all); | 10718 | child, ctxn, &inherited_all); |
| 10688 | if (ret) | 10719 | if (ret) |
| 10689 | break; | 10720 | goto out_unlock; |
| 10690 | } | 10721 | } |
| 10691 | 10722 | ||
| 10692 | raw_spin_lock_irqsave(&parent_ctx->lock, flags); | 10723 | raw_spin_lock_irqsave(&parent_ctx->lock, flags); |
| @@ -10714,6 +10745,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) | |||
| 10714 | } | 10745 | } |
| 10715 | 10746 | ||
| 10716 | raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); | 10747 | raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); |
| 10748 | out_unlock: | ||
| 10717 | mutex_unlock(&parent_ctx->mutex); | 10749 | mutex_unlock(&parent_ctx->mutex); |
| 10718 | 10750 | ||
| 10719 | perf_unpin_context(parent_ctx); | 10751 | perf_unpin_context(parent_ctx); |
