diff options
Diffstat (limited to 'kernel/events/core.c')
| -rw-r--r-- | kernel/events/core.c | 64 |
1 files changed, 48 insertions, 16 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 2d7990d4e988..6e75a5c9412d 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -4261,7 +4261,7 @@ int perf_event_release_kernel(struct perf_event *event) | |||
| 4261 | 4261 | ||
| 4262 | raw_spin_lock_irq(&ctx->lock); | 4262 | raw_spin_lock_irq(&ctx->lock); |
| 4263 | /* | 4263 | /* |
| 4264 | * Mark this even as STATE_DEAD, there is no external reference to it | 4264 | * Mark this event as STATE_DEAD, there is no external reference to it |
| 4265 | * anymore. | 4265 | * anymore. |
| 4266 | * | 4266 | * |
| 4267 | * Anybody acquiring event->child_mutex after the below loop _must_ | 4267 | * Anybody acquiring event->child_mutex after the below loop _must_ |
| @@ -10556,21 +10556,22 @@ void perf_event_free_task(struct task_struct *task) | |||
| 10556 | continue; | 10556 | continue; |
| 10557 | 10557 | ||
| 10558 | mutex_lock(&ctx->mutex); | 10558 | mutex_lock(&ctx->mutex); |
| 10559 | again: | 10559 | raw_spin_lock_irq(&ctx->lock); |
| 10560 | list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, | 10560 | /* |
| 10561 | group_entry) | 10561 | * Destroy the task <-> ctx relation and mark the context dead. |
| 10562 | perf_free_event(event, ctx); | 10562 | * |
| 10563 | * This is important because even though the task hasn't been | ||
| 10564 | * exposed yet the context has been (through child_list). | ||
| 10565 | */ | ||
| 10566 | RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], NULL); | ||
| 10567 | WRITE_ONCE(ctx->task, TASK_TOMBSTONE); | ||
| 10568 | put_task_struct(task); /* cannot be last */ | ||
| 10569 | raw_spin_unlock_irq(&ctx->lock); | ||
| 10563 | 10570 | ||
| 10564 | list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, | 10571 | list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) |
| 10565 | group_entry) | ||
| 10566 | perf_free_event(event, ctx); | 10572 | perf_free_event(event, ctx); |
| 10567 | 10573 | ||
| 10568 | if (!list_empty(&ctx->pinned_groups) || | ||
| 10569 | !list_empty(&ctx->flexible_groups)) | ||
| 10570 | goto again; | ||
| 10571 | |||
| 10572 | mutex_unlock(&ctx->mutex); | 10574 | mutex_unlock(&ctx->mutex); |
| 10573 | |||
| 10574 | put_ctx(ctx); | 10575 | put_ctx(ctx); |
| 10575 | } | 10576 | } |
| 10576 | } | 10577 | } |
| @@ -10608,7 +10609,12 @@ const struct perf_event_attr *perf_event_attrs(struct perf_event *event) | |||
| 10608 | } | 10609 | } |
| 10609 | 10610 | ||
| 10610 | /* | 10611 | /* |
| 10611 | * inherit a event from parent task to child task: | 10612 | * Inherit a event from parent task to child task. |
| 10613 | * | ||
| 10614 | * Returns: | ||
| 10615 | * - valid pointer on success | ||
| 10616 | * - NULL for orphaned events | ||
| 10617 | * - IS_ERR() on error | ||
| 10612 | */ | 10618 | */ |
| 10613 | static struct perf_event * | 10619 | static struct perf_event * |
| 10614 | inherit_event(struct perf_event *parent_event, | 10620 | inherit_event(struct perf_event *parent_event, |
| @@ -10702,6 +10708,16 @@ inherit_event(struct perf_event *parent_event, | |||
| 10702 | return child_event; | 10708 | return child_event; |
| 10703 | } | 10709 | } |
| 10704 | 10710 | ||
| 10711 | /* | ||
| 10712 | * Inherits an event group. | ||
| 10713 | * | ||
| 10714 | * This will quietly suppress orphaned events; !inherit_event() is not an error. | ||
| 10715 | * This matches with perf_event_release_kernel() removing all child events. | ||
| 10716 | * | ||
| 10717 | * Returns: | ||
| 10718 | * - 0 on success | ||
| 10719 | * - <0 on error | ||
| 10720 | */ | ||
| 10705 | static int inherit_group(struct perf_event *parent_event, | 10721 | static int inherit_group(struct perf_event *parent_event, |
| 10706 | struct task_struct *parent, | 10722 | struct task_struct *parent, |
| 10707 | struct perf_event_context *parent_ctx, | 10723 | struct perf_event_context *parent_ctx, |
| @@ -10716,6 +10732,11 @@ static int inherit_group(struct perf_event *parent_event, | |||
| 10716 | child, NULL, child_ctx); | 10732 | child, NULL, child_ctx); |
| 10717 | if (IS_ERR(leader)) | 10733 | if (IS_ERR(leader)) |
| 10718 | return PTR_ERR(leader); | 10734 | return PTR_ERR(leader); |
| 10735 | /* | ||
| 10736 | * @leader can be NULL here because of is_orphaned_event(). In this | ||
| 10737 | * case inherit_event() will create individual events, similar to what | ||
| 10738 | * perf_group_detach() would do anyway. | ||
| 10739 | */ | ||
| 10719 | list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { | 10740 | list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { |
| 10720 | child_ctr = inherit_event(sub, parent, parent_ctx, | 10741 | child_ctr = inherit_event(sub, parent, parent_ctx, |
| 10721 | child, leader, child_ctx); | 10742 | child, leader, child_ctx); |
| @@ -10725,6 +10746,17 @@ static int inherit_group(struct perf_event *parent_event, | |||
| 10725 | return 0; | 10746 | return 0; |
| 10726 | } | 10747 | } |
| 10727 | 10748 | ||
| 10749 | /* | ||
| 10750 | * Creates the child task context and tries to inherit the event-group. | ||
| 10751 | * | ||
| 10752 | * Clears @inherited_all on !attr.inherited or error. Note that we'll leave | ||
| 10753 | * inherited_all set when we 'fail' to inherit an orphaned event; this is | ||
| 10754 | * consistent with perf_event_release_kernel() removing all child events. | ||
| 10755 | * | ||
| 10756 | * Returns: | ||
| 10757 | * - 0 on success | ||
| 10758 | * - <0 on error | ||
| 10759 | */ | ||
| 10728 | static int | 10760 | static int |
| 10729 | inherit_task_group(struct perf_event *event, struct task_struct *parent, | 10761 | inherit_task_group(struct perf_event *event, struct task_struct *parent, |
| 10730 | struct perf_event_context *parent_ctx, | 10762 | struct perf_event_context *parent_ctx, |
| @@ -10747,7 +10779,6 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent, | |||
| 10747 | * First allocate and initialize a context for the | 10779 | * First allocate and initialize a context for the |
| 10748 | * child. | 10780 | * child. |
| 10749 | */ | 10781 | */ |
| 10750 | |||
| 10751 | child_ctx = alloc_perf_context(parent_ctx->pmu, child); | 10782 | child_ctx = alloc_perf_context(parent_ctx->pmu, child); |
| 10752 | if (!child_ctx) | 10783 | if (!child_ctx) |
| 10753 | return -ENOMEM; | 10784 | return -ENOMEM; |
| @@ -10809,7 +10840,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) | |||
| 10809 | ret = inherit_task_group(event, parent, parent_ctx, | 10840 | ret = inherit_task_group(event, parent, parent_ctx, |
| 10810 | child, ctxn, &inherited_all); | 10841 | child, ctxn, &inherited_all); |
| 10811 | if (ret) | 10842 | if (ret) |
| 10812 | break; | 10843 | goto out_unlock; |
| 10813 | } | 10844 | } |
| 10814 | 10845 | ||
| 10815 | /* | 10846 | /* |
| @@ -10825,7 +10856,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) | |||
| 10825 | ret = inherit_task_group(event, parent, parent_ctx, | 10856 | ret = inherit_task_group(event, parent, parent_ctx, |
| 10826 | child, ctxn, &inherited_all); | 10857 | child, ctxn, &inherited_all); |
| 10827 | if (ret) | 10858 | if (ret) |
| 10828 | break; | 10859 | goto out_unlock; |
| 10829 | } | 10860 | } |
| 10830 | 10861 | ||
| 10831 | raw_spin_lock_irqsave(&parent_ctx->lock, flags); | 10862 | raw_spin_lock_irqsave(&parent_ctx->lock, flags); |
| @@ -10853,6 +10884,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) | |||
| 10853 | } | 10884 | } |
| 10854 | 10885 | ||
| 10855 | raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); | 10886 | raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); |
| 10887 | out_unlock: | ||
| 10856 | mutex_unlock(&parent_ctx->mutex); | 10888 | mutex_unlock(&parent_ctx->mutex); |
| 10857 | 10889 | ||
| 10858 | perf_unpin_context(parent_ctx); | 10890 | perf_unpin_context(parent_ctx); |
