diff options
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r-- | kernel/perf_event.c | 30 |
1 files changed, 27 insertions, 3 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index b98bed3d8182..65b09a836cc3 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -1620,8 +1620,12 @@ static void rotate_ctx(struct perf_event_context *ctx) | |||
1620 | { | 1620 | { |
1621 | raw_spin_lock(&ctx->lock); | 1621 | raw_spin_lock(&ctx->lock); |
1622 | 1622 | ||
1623 | /* Rotate the first entry last of non-pinned groups */ | 1623 | /* |
1624 | list_rotate_left(&ctx->flexible_groups); | 1624 | * Rotate the first entry last of non-pinned groups. Rotation might be |
1625 | * disabled by the inheritance code. | ||
1626 | */ | ||
1627 | if (!ctx->rotate_disable) | ||
1628 | list_rotate_left(&ctx->flexible_groups); | ||
1625 | 1629 | ||
1626 | raw_spin_unlock(&ctx->lock); | 1630 | raw_spin_unlock(&ctx->lock); |
1627 | } | 1631 | } |
@@ -1773,7 +1777,13 @@ static u64 perf_event_read(struct perf_event *event) | |||
1773 | unsigned long flags; | 1777 | unsigned long flags; |
1774 | 1778 | ||
1775 | raw_spin_lock_irqsave(&ctx->lock, flags); | 1779 | raw_spin_lock_irqsave(&ctx->lock, flags); |
1776 | update_context_time(ctx); | 1780 | /* |
1781 | * may read while context is not active | ||
1782 | * (e.g., thread is blocked), in that case | ||
1783 | * we cannot update context time | ||
1784 | */ | ||
1785 | if (ctx->is_active) | ||
1786 | update_context_time(ctx); | ||
1777 | update_event_times(event); | 1787 | update_event_times(event); |
1778 | raw_spin_unlock_irqrestore(&ctx->lock, flags); | 1788 | raw_spin_unlock_irqrestore(&ctx->lock, flags); |
1779 | } | 1789 | } |
@@ -5616,6 +5626,7 @@ int perf_event_init_task(struct task_struct *child) | |||
5616 | struct perf_event *event; | 5626 | struct perf_event *event; |
5617 | struct task_struct *parent = current; | 5627 | struct task_struct *parent = current; |
5618 | int inherited_all = 1; | 5628 | int inherited_all = 1; |
5629 | unsigned long flags; | ||
5619 | int ret = 0; | 5630 | int ret = 0; |
5620 | 5631 | ||
5621 | child->perf_event_ctxp = NULL; | 5632 | child->perf_event_ctxp = NULL; |
@@ -5656,6 +5667,15 @@ int perf_event_init_task(struct task_struct *child) | |||
5656 | break; | 5667 | break; |
5657 | } | 5668 | } |
5658 | 5669 | ||
5670 | /* | ||
5671 | * We can't hold ctx->lock when iterating the ->flexible_group list due | ||
5672 | * to allocations, but we need to prevent rotation because | ||
5673 | * rotate_ctx() will change the list from interrupt context. | ||
5674 | */ | ||
5675 | raw_spin_lock_irqsave(&parent_ctx->lock, flags); | ||
5676 | parent_ctx->rotate_disable = 1; | ||
5677 | raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); | ||
5678 | |||
5659 | list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { | 5679 | list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { |
5660 | ret = inherit_task_group(event, parent, parent_ctx, child, | 5680 | ret = inherit_task_group(event, parent, parent_ctx, child, |
5661 | &inherited_all); | 5681 | &inherited_all); |
@@ -5663,6 +5683,10 @@ int perf_event_init_task(struct task_struct *child) | |||
5663 | break; | 5683 | break; |
5664 | } | 5684 | } |
5665 | 5685 | ||
5686 | raw_spin_lock_irqsave(&parent_ctx->lock, flags); | ||
5687 | parent_ctx->rotate_disable = 0; | ||
5688 | raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); | ||
5689 | |||
5666 | child_ctx = child->perf_event_ctxp; | 5690 | child_ctx = child->perf_event_ctxp; |
5667 | 5691 | ||
5668 | if (child_ctx && inherited_all) { | 5692 | if (child_ctx && inherited_all) { |