aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-11-24 04:05:55 -0500
committerIngo Molnar <mingo@elte.hu>2010-11-26 09:00:56 -0500
commitdddd3379a619a4cb8247bfd3c94ca9ae3797aa2e (patch)
tree9b090784b46956d236b51c4addfcd97f575f205e /kernel
parent02a9d03772aa1ff33a26180a2da0bfb191240eda (diff)
perf: Fix inherit vs. context rotation bug
It was found that sometimes children of tasks with inherited events had one extra event. Eventually it turned out to be due to the list rotation no being exclusive with the list iteration in the inheritance code. Cure this by temporarily disabling the rotation while we inherit the events. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Cc: <stable@kernel.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_event.c22
1 files changed, 20 insertions, 2 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 671f6c8c8a32..f365dd8ef8b0 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1622,8 +1622,12 @@ static void rotate_ctx(struct perf_event_context *ctx)
1622{ 1622{
1623 raw_spin_lock(&ctx->lock); 1623 raw_spin_lock(&ctx->lock);
1624 1624
1625 /* Rotate the first entry last of non-pinned groups */ 1625 /*
1626 list_rotate_left(&ctx->flexible_groups); 1626 * Rotate the first entry last of non-pinned groups. Rotation might be
1627 * disabled by the inheritance code.
1628 */
1629 if (!ctx->rotate_disable)
1630 list_rotate_left(&ctx->flexible_groups);
1627 1631
1628 raw_spin_unlock(&ctx->lock); 1632 raw_spin_unlock(&ctx->lock);
1629} 1633}
@@ -6162,6 +6166,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
6162 struct perf_event *event; 6166 struct perf_event *event;
6163 struct task_struct *parent = current; 6167 struct task_struct *parent = current;
6164 int inherited_all = 1; 6168 int inherited_all = 1;
6169 unsigned long flags;
6165 int ret = 0; 6170 int ret = 0;
6166 6171
6167 child->perf_event_ctxp[ctxn] = NULL; 6172 child->perf_event_ctxp[ctxn] = NULL;
@@ -6202,6 +6207,15 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
6202 break; 6207 break;
6203 } 6208 }
6204 6209
6210 /*
6211 * We can't hold ctx->lock when iterating the ->flexible_group list due
6212 * to allocations, but we need to prevent rotation because
6213 * rotate_ctx() will change the list from interrupt context.
6214 */
6215 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
6216 parent_ctx->rotate_disable = 1;
6217 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
6218
6205 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { 6219 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
6206 ret = inherit_task_group(event, parent, parent_ctx, 6220 ret = inherit_task_group(event, parent, parent_ctx,
6207 child, ctxn, &inherited_all); 6221 child, ctxn, &inherited_all);
@@ -6209,6 +6223,10 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
6209 break; 6223 break;
6210 } 6224 }
6211 6225
6226 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
6227 parent_ctx->rotate_disable = 0;
6228 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
6229
6212 child_ctx = child->perf_event_ctxp[ctxn]; 6230 child_ctx = child->perf_event_ctxp[ctxn];
6213 6231
6214 if (child_ctx && inherited_all) { 6232 if (child_ctx && inherited_all) {