aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-04-26 05:36:53 -0400
committerIngo Molnar <mingo@kernel.org>2016-04-28 04:32:41 -0400
commit79c9ce57eb2d5f1497546a3946b4ae21b6fdc438 (patch)
treecdbdbbf5fb4887df3623feeb1b7d4f29e4b49d31
parent0a25556f84d5f79e68e9502bb1f32a43377ab2bf (diff)
perf/core: Fix perf_event_open() vs. execve() race
Jann reported that the ptrace_may_access() check in find_lively_task_by_vpid() is racy against exec(). Specifically: perf_event_open() execve() ptrace_may_access() commit_creds() ... if (get_dumpable() != SUID_DUMP_USER) perf_event_exit_task(); perf_install_in_context() would result in installing a counter across the creds boundary. Fix this by wrapping lots of perf_event_open() in cred_guard_mutex. This should be fine as perf_event_exit_task() is already called with cred_guard_mutex held, so all perf locks already nest inside it. Reported-by: Jann Horn <jannh@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/events/core.c52
1 files changed, 36 insertions, 16 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 2c78b6f47339..4e2ebf6f2f1f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1106,6 +1106,7 @@ static void put_ctx(struct perf_event_context *ctx)
1106 * function. 1106 * function.
1107 * 1107 *
1108 * Lock order: 1108 * Lock order:
1109 * cred_guard_mutex
1109 * task_struct::perf_event_mutex 1110 * task_struct::perf_event_mutex
1110 * perf_event_context::mutex 1111 * perf_event_context::mutex
1111 * perf_event::child_mutex; 1112 * perf_event::child_mutex;
@@ -3421,7 +3422,6 @@ static struct task_struct *
3421find_lively_task_by_vpid(pid_t vpid) 3422find_lively_task_by_vpid(pid_t vpid)
3422{ 3423{
3423 struct task_struct *task; 3424 struct task_struct *task;
3424 int err;
3425 3425
3426 rcu_read_lock(); 3426 rcu_read_lock();
3427 if (!vpid) 3427 if (!vpid)
@@ -3435,16 +3435,7 @@ find_lively_task_by_vpid(pid_t vpid)
3435 if (!task) 3435 if (!task)
3436 return ERR_PTR(-ESRCH); 3436 return ERR_PTR(-ESRCH);
3437 3437
3438 /* Reuse ptrace permission checks for now. */
3439 err = -EACCES;
3440 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
3441 goto errout;
3442
3443 return task; 3438 return task;
3444errout:
3445 put_task_struct(task);
3446 return ERR_PTR(err);
3447
3448} 3439}
3449 3440
3450/* 3441/*
@@ -8414,6 +8405,24 @@ SYSCALL_DEFINE5(perf_event_open,
8414 8405
8415 get_online_cpus(); 8406 get_online_cpus();
8416 8407
8408 if (task) {
8409 err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
8410 if (err)
8411 goto err_cpus;
8412
8413 /*
8414 * Reuse ptrace permission checks for now.
8415 *
8416 * We must hold cred_guard_mutex across this and any potential
8417 * perf_install_in_context() call for this new event to
8418 * serialize against exec() altering our credentials (and the
8419 * perf_event_exit_task() that could imply).
8420 */
8421 err = -EACCES;
8422 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
8423 goto err_cred;
8424 }
8425
8417 if (flags & PERF_FLAG_PID_CGROUP) 8426 if (flags & PERF_FLAG_PID_CGROUP)
8418 cgroup_fd = pid; 8427 cgroup_fd = pid;
8419 8428
@@ -8421,7 +8430,7 @@ SYSCALL_DEFINE5(perf_event_open,
8421 NULL, NULL, cgroup_fd); 8430 NULL, NULL, cgroup_fd);
8422 if (IS_ERR(event)) { 8431 if (IS_ERR(event)) {
8423 err = PTR_ERR(event); 8432 err = PTR_ERR(event);
8424 goto err_cpus; 8433 goto err_cred;
8425 } 8434 }
8426 8435
8427 if (is_sampling_event(event)) { 8436 if (is_sampling_event(event)) {
@@ -8480,11 +8489,6 @@ SYSCALL_DEFINE5(perf_event_open,
8480 goto err_context; 8489 goto err_context;
8481 } 8490 }
8482 8491
8483 if (task) {
8484 put_task_struct(task);
8485 task = NULL;
8486 }
8487
8488 /* 8492 /*
8489 * Look up the group leader (we will attach this event to it): 8493 * Look up the group leader (we will attach this event to it):
8490 */ 8494 */
@@ -8582,6 +8586,11 @@ SYSCALL_DEFINE5(perf_event_open,
8582 8586
8583 WARN_ON_ONCE(ctx->parent_ctx); 8587 WARN_ON_ONCE(ctx->parent_ctx);
8584 8588
8589 /*
8590 * This is the point on no return; we cannot fail hereafter. This is
8591 * where we start modifying current state.
8592 */
8593
8585 if (move_group) { 8594 if (move_group) {
8586 /* 8595 /*
8587 * See perf_event_ctx_lock() for comments on the details 8596 * See perf_event_ctx_lock() for comments on the details
@@ -8653,6 +8662,11 @@ SYSCALL_DEFINE5(perf_event_open,
8653 mutex_unlock(&gctx->mutex); 8662 mutex_unlock(&gctx->mutex);
8654 mutex_unlock(&ctx->mutex); 8663 mutex_unlock(&ctx->mutex);
8655 8664
8665 if (task) {
8666 mutex_unlock(&task->signal->cred_guard_mutex);
8667 put_task_struct(task);
8668 }
8669
8656 put_online_cpus(); 8670 put_online_cpus();
8657 8671
8658 mutex_lock(&current->perf_event_mutex); 8672 mutex_lock(&current->perf_event_mutex);
@@ -8685,6 +8699,9 @@ err_alloc:
8685 */ 8699 */
8686 if (!event_file) 8700 if (!event_file)
8687 free_event(event); 8701 free_event(event);
8702err_cred:
8703 if (task)
8704 mutex_unlock(&task->signal->cred_guard_mutex);
8688err_cpus: 8705err_cpus:
8689 put_online_cpus(); 8706 put_online_cpus();
8690err_task: 8707err_task:
@@ -8969,6 +8986,9 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
8969 8986
8970/* 8987/*
8971 * When a child task exits, feed back event values to parent events. 8988 * When a child task exits, feed back event values to parent events.
8989 *
8990 * Can be called with cred_guard_mutex held when called from
8991 * install_exec_creds().
8972 */ 8992 */
8973void perf_event_exit_task(struct task_struct *child) 8993void perf_event_exit_task(struct task_struct *child)
8974{ 8994{