diff options
-rw-r--r-- | include/linux/perf_event.h | 1 | ||||
-rw-r--r-- | kernel/perf_event.c | 22 |
2 files changed, 21 insertions, 2 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 40150f345982..142e3d6042c7 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -850,6 +850,7 @@ struct perf_event_context { | |||
850 | int nr_active; | 850 | int nr_active; |
851 | int is_active; | 851 | int is_active; |
852 | int nr_stat; | 852 | int nr_stat; |
853 | int rotate_disable; | ||
853 | atomic_t refcount; | 854 | atomic_t refcount; |
854 | struct task_struct *task; | 855 | struct task_struct *task; |
855 | 856 | ||
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 671f6c8c8a32..f365dd8ef8b0 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -1622,8 +1622,12 @@ static void rotate_ctx(struct perf_event_context *ctx) | |||
1622 | { | 1622 | { |
1623 | raw_spin_lock(&ctx->lock); | 1623 | raw_spin_lock(&ctx->lock); |
1624 | 1624 | ||
1625 | /* Rotate the first entry last of non-pinned groups */ | 1625 | /* |
1626 | list_rotate_left(&ctx->flexible_groups); | 1626 | * Rotate the first entry last of non-pinned groups. Rotation might be |
1627 | * disabled by the inheritance code. | ||
1628 | */ | ||
1629 | if (!ctx->rotate_disable) | ||
1630 | list_rotate_left(&ctx->flexible_groups); | ||
1627 | 1631 | ||
1628 | raw_spin_unlock(&ctx->lock); | 1632 | raw_spin_unlock(&ctx->lock); |
1629 | } | 1633 | } |
@@ -6162,6 +6166,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn) | |||
6162 | struct perf_event *event; | 6166 | struct perf_event *event; |
6163 | struct task_struct *parent = current; | 6167 | struct task_struct *parent = current; |
6164 | int inherited_all = 1; | 6168 | int inherited_all = 1; |
6169 | unsigned long flags; | ||
6165 | int ret = 0; | 6170 | int ret = 0; |
6166 | 6171 | ||
6167 | child->perf_event_ctxp[ctxn] = NULL; | 6172 | child->perf_event_ctxp[ctxn] = NULL; |
@@ -6202,6 +6207,15 @@ int perf_event_init_context(struct task_struct *child, int ctxn) | |||
6202 | break; | 6207 | break; |
6203 | } | 6208 | } |
6204 | 6209 | ||
6210 | /* | ||
6211 | * We can't hold ctx->lock when iterating the ->flexible_group list due | ||
6212 | * to allocations, but we need to prevent rotation because | ||
6213 | * rotate_ctx() will change the list from interrupt context. | ||
6214 | */ | ||
6215 | raw_spin_lock_irqsave(&parent_ctx->lock, flags); | ||
6216 | parent_ctx->rotate_disable = 1; | ||
6217 | raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); | ||
6218 | |||
6205 | list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { | 6219 | list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { |
6206 | ret = inherit_task_group(event, parent, parent_ctx, | 6220 | ret = inherit_task_group(event, parent, parent_ctx, |
6207 | child, ctxn, &inherited_all); | 6221 | child, ctxn, &inherited_all); |
@@ -6209,6 +6223,10 @@ int perf_event_init_context(struct task_struct *child, int ctxn) | |||
6209 | break; | 6223 | break; |
6210 | } | 6224 | } |
6211 | 6225 | ||
6226 | raw_spin_lock_irqsave(&parent_ctx->lock, flags); | ||
6227 | parent_ctx->rotate_disable = 0; | ||
6228 | raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); | ||
6229 | |||
6212 | child_ctx = child->perf_event_ctxp[ctxn]; | 6230 | child_ctx = child->perf_event_ctxp[ctxn]; |
6213 | 6231 | ||
6214 | if (child_ctx && inherited_all) { | 6232 | if (child_ctx && inherited_all) { |