aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-02-24 12:45:40 -0500
committerIngo Molnar <mingo@kernel.org>2016-02-25 02:42:32 -0500
commit84c4e620d35f49f486a900af214ad12276afb386 (patch)
tree3cc7f2e5f4c470e9b51efb7e0fe42312e2c83404
parent6dc390ad61ac8dfca5fa9b0823981fb6f7ec17a0 (diff)
perf: Close install vs. exit race
Consider the following scenario: CPU0 CPU1 ctx = find_get_ctx(); perf_event_exit_task_context() mutex_lock(&ctx->mutex); perf_install_in_context(ctx, ...); /* NO-OP */ mutex_unlock(&ctx->mutex); ... perf_release() WARN_ON_ONCE(event->state != STATE_EXIT); Since the event doesn't pass through perf_remove_from_context() because perf_install_in_context() NO-OPs because the ctx is dead, and perf_event_exit_task_context() will not observe the event because its not attached yet, the event->state will not be set. Solve this by revalidating ctx->task after we acquire ctx->mutex and failing the event creation as a whole. Tested-by: Alexander Shishkin <alexander.shishkin@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: dvyukov@google.com Cc: eranian@google.com Cc: oleg@redhat.com Cc: panand@redhat.com Cc: sasha.levin@oracle.com Cc: vince@deater.net Link: http://lkml.kernel.org/r/20160224174947.626853419@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/events/core.c35
1 files changed, 26 insertions, 9 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 0d58522103cd..d7b0316e3465 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2158,13 +2158,15 @@ perf_install_in_context(struct perf_event_context *ctx,
2158 */ 2158 */
2159 raw_spin_lock_irq(&ctx->lock); 2159 raw_spin_lock_irq(&ctx->lock);
2160 task = ctx->task; 2160 task = ctx->task;
2161
2161 /* 2162 /*
2162 * Worse, we cannot even rely on the ctx actually existing anymore. If 2163 * If between ctx = find_get_context() and mutex_lock(&ctx->mutex) the
2163 * between find_get_context() and perf_install_in_context() the task 2164 * ctx gets destroyed, we must not install an event into it.
2164 * went through perf_event_exit_task() its dead and we should not be 2165 *
2165 * adding new events. 2166 * This is normally tested for after we acquire the mutex, so this is
2167 * a sanity check.
2166 */ 2168 */
2167 if (task == TASK_TOMBSTONE) { 2169 if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) {
2168 raw_spin_unlock_irq(&ctx->lock); 2170 raw_spin_unlock_irq(&ctx->lock);
2169 return; 2171 return;
2170 } 2172 }
@@ -8389,10 +8391,19 @@ SYSCALL_DEFINE5(perf_event_open,
8389 if (move_group) { 8391 if (move_group) {
8390 gctx = group_leader->ctx; 8392 gctx = group_leader->ctx;
8391 mutex_lock_double(&gctx->mutex, &ctx->mutex); 8393 mutex_lock_double(&gctx->mutex, &ctx->mutex);
8394 if (gctx->task == TASK_TOMBSTONE) {
8395 err = -ESRCH;
8396 goto err_locked;
8397 }
8392 } else { 8398 } else {
8393 mutex_lock(&ctx->mutex); 8399 mutex_lock(&ctx->mutex);
8394 } 8400 }
8395 8401
8402 if (ctx->task == TASK_TOMBSTONE) {
8403 err = -ESRCH;
8404 goto err_locked;
8405 }
8406
8396 if (!perf_event_validate_size(event)) { 8407 if (!perf_event_validate_size(event)) {
8397 err = -E2BIG; 8408 err = -E2BIG;
8398 goto err_locked; 8409 goto err_locked;
@@ -8563,12 +8574,14 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
8563 8574
8564 WARN_ON_ONCE(ctx->parent_ctx); 8575 WARN_ON_ONCE(ctx->parent_ctx);
8565 mutex_lock(&ctx->mutex); 8576 mutex_lock(&ctx->mutex);
8577 if (ctx->task == TASK_TOMBSTONE) {
8578 err = -ESRCH;
8579 goto err_unlock;
8580 }
8581
8566 if (!exclusive_event_installable(event, ctx)) { 8582 if (!exclusive_event_installable(event, ctx)) {
8567 mutex_unlock(&ctx->mutex);
8568 perf_unpin_context(ctx);
8569 put_ctx(ctx);
8570 err = -EBUSY; 8583 err = -EBUSY;
8571 goto err_free; 8584 goto err_unlock;
8572 } 8585 }
8573 8586
8574 perf_install_in_context(ctx, event, cpu); 8587 perf_install_in_context(ctx, event, cpu);
@@ -8577,6 +8590,10 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
8577 8590
8578 return event; 8591 return event;
8579 8592
8593err_unlock:
8594 mutex_unlock(&ctx->mutex);
8595 perf_unpin_context(ctx);
8596 put_ctx(ctx);
8580err_free: 8597err_free:
8581 free_event(event); 8598 free_event(event);
8582err: 8599err: