diff options
author | Matt Helsley <matthltc@us.ibm.com> | 2010-09-13 16:01:20 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-09-15 04:44:00 -0400 |
commit | 38a81da2205f94e8a2a834b51a6b99c91fc7c2e8 (patch) | |
tree | 5227c665f37d1c472608a44aaad0a04be68befb2 /kernel/perf_event.c | |
parent | 2ebd4ffb6d0cb877787b1e42be8485820158857e (diff) |
perf events: Clean up pid passing
The kernel perf event creation path shouldn't use find_task_by_vpid()
because a vpid exists in a specific namespace. find_task_by_vpid() uses
current's pid namespace which isn't always the correct namespace to use
for the vpid in all the places perf_event_create_kernel_counter() (and
thus find_get_context()) is called.
The goal is to clean up pid namespace handling and prevent bugs like:
https://bugzilla.kernel.org/show_bug.cgi?id=17281
Instead of using pids switch find_get_context() to use task struct
pointers directly. The syscall is responsible for resolving the pid to
a task struct. This moves the pid namespace resolution into the syscall
much like every other syscall that takes pid parameters.
Signed-off-by: Matt Helsley <matthltc@us.ibm.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Robin Green <greenrd@greenrd.org>
Cc: Prasad <prasad@linux.vnet.ibm.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
LKML-Reference: <a134e5e392ab0204961fd1a62c84a222bf5874a9.1284407763.git.matthltc@us.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r-- | kernel/perf_event.c | 21 |
1 files changed, 10 insertions, 11 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 3f5309db72f1..86f394e15d53 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -2053,15 +2053,14 @@ errout: | |||
2053 | } | 2053 | } |
2054 | 2054 | ||
2055 | static struct perf_event_context * | 2055 | static struct perf_event_context * |
2056 | find_get_context(struct pmu *pmu, pid_t pid, int cpu) | 2056 | find_get_context(struct pmu *pmu, struct task_struct *task, int cpu) |
2057 | { | 2057 | { |
2058 | struct perf_event_context *ctx; | 2058 | struct perf_event_context *ctx; |
2059 | struct perf_cpu_context *cpuctx; | 2059 | struct perf_cpu_context *cpuctx; |
2060 | struct task_struct *task; | ||
2061 | unsigned long flags; | 2060 | unsigned long flags; |
2062 | int ctxn, err; | 2061 | int ctxn, err; |
2063 | 2062 | ||
2064 | if (pid == -1 && cpu != -1) { | 2063 | if (!task && cpu != -1) { |
2065 | /* Must be root to operate on a CPU event: */ | 2064 | /* Must be root to operate on a CPU event: */ |
2066 | if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) | 2065 | if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) |
2067 | return ERR_PTR(-EACCES); | 2066 | return ERR_PTR(-EACCES); |
@@ -2084,10 +2083,6 @@ find_get_context(struct pmu *pmu, pid_t pid, int cpu) | |||
2084 | return ctx; | 2083 | return ctx; |
2085 | } | 2084 | } |
2086 | 2085 | ||
2087 | task = find_lively_task_by_vpid(pid); | ||
2088 | if (IS_ERR(task)) | ||
2089 | return (void*)task; | ||
2090 | |||
2091 | err = -EINVAL; | 2086 | err = -EINVAL; |
2092 | ctxn = pmu->task_ctx_nr; | 2087 | ctxn = pmu->task_ctx_nr; |
2093 | if (ctxn < 0) | 2088 | if (ctxn < 0) |
@@ -5527,6 +5522,7 @@ SYSCALL_DEFINE5(perf_event_open, | |||
5527 | struct perf_event_context *ctx; | 5522 | struct perf_event_context *ctx; |
5528 | struct file *event_file = NULL; | 5523 | struct file *event_file = NULL; |
5529 | struct file *group_file = NULL; | 5524 | struct file *group_file = NULL; |
5525 | struct task_struct *task = NULL; | ||
5530 | struct pmu *pmu; | 5526 | struct pmu *pmu; |
5531 | int event_fd; | 5527 | int event_fd; |
5532 | int fput_needed = 0; | 5528 | int fput_needed = 0; |
@@ -5581,10 +5577,13 @@ SYSCALL_DEFINE5(perf_event_open, | |||
5581 | if ((pmu->task_ctx_nr == perf_sw_context) && group_leader) | 5577 | if ((pmu->task_ctx_nr == perf_sw_context) && group_leader) |
5582 | pmu = group_leader->pmu; | 5578 | pmu = group_leader->pmu; |
5583 | 5579 | ||
5580 | if (pid != -1) | ||
5581 | task = find_lively_task_by_vpid(pid); | ||
5582 | |||
5584 | /* | 5583 | /* |
5585 | * Get the target context (task or percpu): | 5584 | * Get the target context (task or percpu): |
5586 | */ | 5585 | */ |
5587 | ctx = find_get_context(pmu, pid, cpu); | 5586 | ctx = find_get_context(pmu, task, cpu); |
5588 | if (IS_ERR(ctx)) { | 5587 | if (IS_ERR(ctx)) { |
5589 | err = PTR_ERR(ctx); | 5588 | err = PTR_ERR(ctx); |
5590 | goto err_group_fd; | 5589 | goto err_group_fd; |
@@ -5666,11 +5665,11 @@ err_fd: | |||
5666 | * | 5665 | * |
5667 | * @attr: attributes of the counter to create | 5666 | * @attr: attributes of the counter to create |
5668 | * @cpu: cpu in which the counter is bound | 5667 | * @cpu: cpu in which the counter is bound |
5669 | * @pid: task to profile | 5668 | * @task: task to profile (NULL for percpu) |
5670 | */ | 5669 | */ |
5671 | struct perf_event * | 5670 | struct perf_event * |
5672 | perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, | 5671 | perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, |
5673 | pid_t pid, | 5672 | struct task_struct *task, |
5674 | perf_overflow_handler_t overflow_handler) | 5673 | perf_overflow_handler_t overflow_handler) |
5675 | { | 5674 | { |
5676 | struct perf_event_context *ctx; | 5675 | struct perf_event_context *ctx; |
@@ -5687,7 +5686,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, | |||
5687 | goto err; | 5686 | goto err; |
5688 | } | 5687 | } |
5689 | 5688 | ||
5690 | ctx = find_get_context(event->pmu, pid, cpu); | 5689 | ctx = find_get_context(event->pmu, task, cpu); |
5691 | if (IS_ERR(ctx)) { | 5690 | if (IS_ERR(ctx)) { |
5692 | err = PTR_ERR(ctx); | 5691 | err = PTR_ERR(ctx); |
5693 | goto err_free; | 5692 | goto err_free; |