diff options
author | Matt Helsley <matthltc@us.ibm.com> | 2010-09-13 16:01:19 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-09-15 04:44:00 -0400 |
commit | 2ebd4ffb6d0cb877787b1e42be8485820158857e (patch) | |
tree | 21db710a8f3de28f9554674e947da796a23d2e92 /kernel/perf_event.c | |
parent | d958077d007d98125766d11e82da2fd6497b91d6 (diff) |
perf events: Split out task search into helper
Split out the code which searches for non-exiting tasks into its own
helper. Creating this helper not only makes the code slightly more
readable it prepares to move the search out of find_get_context() in
a subsequent commit.
Signed-off-by: Matt Helsley <matthltc@us.ibm.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Robin Green <greenrd@greenrd.org>
Cc: Prasad <prasad@linux.vnet.ibm.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
LKML-Reference: <561205417b450b8a4bf7488374541d64b4690431.1284407762.git.matthltc@us.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r-- | kernel/perf_event.c | 63 |
1 files changed, 40 insertions, 23 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 440f9ca067b2..3f5309db72f1 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -2015,6 +2015,43 @@ alloc_perf_context(struct pmu *pmu, struct task_struct *task) | |||
2015 | return ctx; | 2015 | return ctx; |
2016 | } | 2016 | } |
2017 | 2017 | ||
2018 | static struct task_struct * | ||
2019 | find_lively_task_by_vpid(pid_t vpid) | ||
2020 | { | ||
2021 | struct task_struct *task; | ||
2022 | int err; | ||
2023 | |||
2024 | rcu_read_lock(); | ||
2025 | if (!vpid) | ||
2026 | task = current; | ||
2027 | else | ||
2028 | task = find_task_by_vpid(vpid); | ||
2029 | if (task) | ||
2030 | get_task_struct(task); | ||
2031 | rcu_read_unlock(); | ||
2032 | |||
2033 | if (!task) | ||
2034 | return ERR_PTR(-ESRCH); | ||
2035 | |||
2036 | /* | ||
2037 | * Can't attach events to a dying task. | ||
2038 | */ | ||
2039 | err = -ESRCH; | ||
2040 | if (task->flags & PF_EXITING) | ||
2041 | goto errout; | ||
2042 | |||
2043 | /* Reuse ptrace permission checks for now. */ | ||
2044 | err = -EACCES; | ||
2045 | if (!ptrace_may_access(task, PTRACE_MODE_READ)) | ||
2046 | goto errout; | ||
2047 | |||
2048 | return task; | ||
2049 | errout: | ||
2050 | put_task_struct(task); | ||
2051 | return ERR_PTR(err); | ||
2052 | |||
2053 | } | ||
2054 | |||
2018 | static struct perf_event_context * | 2055 | static struct perf_event_context * |
2019 | find_get_context(struct pmu *pmu, pid_t pid, int cpu) | 2056 | find_get_context(struct pmu *pmu, pid_t pid, int cpu) |
2020 | { | 2057 | { |
@@ -2047,29 +2084,9 @@ find_get_context(struct pmu *pmu, pid_t pid, int cpu) | |||
2047 | return ctx; | 2084 | return ctx; |
2048 | } | 2085 | } |
2049 | 2086 | ||
2050 | rcu_read_lock(); | 2087 | task = find_lively_task_by_vpid(pid); |
2051 | if (!pid) | 2088 | if (IS_ERR(task)) |
2052 | task = current; | 2089 | return (void*)task; |
2053 | else | ||
2054 | task = find_task_by_vpid(pid); | ||
2055 | if (task) | ||
2056 | get_task_struct(task); | ||
2057 | rcu_read_unlock(); | ||
2058 | |||
2059 | if (!task) | ||
2060 | return ERR_PTR(-ESRCH); | ||
2061 | |||
2062 | /* | ||
2063 | * Can't attach events to a dying task. | ||
2064 | */ | ||
2065 | err = -ESRCH; | ||
2066 | if (task->flags & PF_EXITING) | ||
2067 | goto errout; | ||
2068 | |||
2069 | /* Reuse ptrace permission checks for now. */ | ||
2070 | err = -EACCES; | ||
2071 | if (!ptrace_may_access(task, PTRACE_MODE_READ)) | ||
2072 | goto errout; | ||
2073 | 2090 | ||
2074 | err = -EINVAL; | 2091 | err = -EINVAL; |
2075 | ctxn = pmu->task_ctx_nr; | 2092 | ctxn = pmu->task_ctx_nr; |