diff options
author | Joel Fernandes <joelaf@google.com> | 2017-06-26 22:01:55 -0400 |
---|---|---|
committer | Steven Rostedt (VMware) <rostedt@goodmis.org> | 2017-06-27 13:30:28 -0400 |
commit | d914ba37d7145acb9fd3bb23075c2d56e5a44eb6 (patch) | |
tree | edb1e6c1bfa0f0a92e13e9f891be524a31b23cea /kernel/trace/trace.c | |
parent | 83dd14933e33a45e9b366c572e15505982b46845 (diff) |
tracing: Add support for recording tgid of tasks
Inorder to support recording of tgid, the following changes are made:
* Introduce a new API (tracing_record_taskinfo) to additionally record the tgid
along with the task's comm at the same time. This has has the benefit of not
setting trace_cmdline_save before all the information for a task is saved.
* Add a new API tracing_record_taskinfo_sched_switch to record task information
for 2 tasks at a time (previous and next) and use it from sched_switch probe.
* Preserve the old API (tracing_record_cmdline) and create it as a wrapper
around the new one so that existing callers aren't affected.
* Reuse the existing sched_switch and sched_wakeup probes to record tgid
information and add a new option 'record-tgid' to enable recording of tgid
When record-tgid option isn't enabled to being with, we take care to make sure
that there's isn't memory or runtime overhead.
Link: http://lkml.kernel.org/r/20170627020155.5139-1-joelaf@google.com
Cc: kernel-team@android.com
Cc: Ingo Molnar <mingo@redhat.com>
Tested-by: Michael Sartain <mikesart@gmail.com>
Signed-off-by: Joel Fernandes <joelaf@google.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 105 |
1 files changed, 95 insertions, 10 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 14318ce92b13..ab9db750dd29 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -87,7 +87,7 @@ dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | |||
87 | * tracing is active, only save the comm when a trace event | 87 | * tracing is active, only save the comm when a trace event |
88 | * occurred. | 88 | * occurred. |
89 | */ | 89 | */ |
90 | static DEFINE_PER_CPU(bool, trace_cmdline_save); | 90 | static DEFINE_PER_CPU(bool, trace_taskinfo_save); |
91 | 91 | ||
92 | /* | 92 | /* |
93 | * Kill all tracing for good (never come back). | 93 | * Kill all tracing for good (never come back). |
@@ -790,7 +790,7 @@ EXPORT_SYMBOL_GPL(tracing_on); | |||
790 | static __always_inline void | 790 | static __always_inline void |
791 | __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) | 791 | __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) |
792 | { | 792 | { |
793 | __this_cpu_write(trace_cmdline_save, true); | 793 | __this_cpu_write(trace_taskinfo_save, true); |
794 | 794 | ||
795 | /* If this is the temp buffer, we need to commit fully */ | 795 | /* If this is the temp buffer, we need to commit fully */ |
796 | if (this_cpu_read(trace_buffered_event) == event) { | 796 | if (this_cpu_read(trace_buffered_event) == event) { |
@@ -1709,6 +1709,8 @@ void tracing_reset_all_online_cpus(void) | |||
1709 | } | 1709 | } |
1710 | } | 1710 | } |
1711 | 1711 | ||
1712 | static int *tgid_map; | ||
1713 | |||
1712 | #define SAVED_CMDLINES_DEFAULT 128 | 1714 | #define SAVED_CMDLINES_DEFAULT 128 |
1713 | #define NO_CMDLINE_MAP UINT_MAX | 1715 | #define NO_CMDLINE_MAP UINT_MAX |
1714 | static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; | 1716 | static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
@@ -1722,7 +1724,7 @@ struct saved_cmdlines_buffer { | |||
1722 | static struct saved_cmdlines_buffer *savedcmd; | 1724 | static struct saved_cmdlines_buffer *savedcmd; |
1723 | 1725 | ||
1724 | /* temporary disable recording */ | 1726 | /* temporary disable recording */ |
1725 | static atomic_t trace_record_cmdline_disabled __read_mostly; | 1727 | static atomic_t trace_record_taskinfo_disabled __read_mostly; |
1726 | 1728 | ||
1727 | static inline char *get_saved_cmdlines(int idx) | 1729 | static inline char *get_saved_cmdlines(int idx) |
1728 | { | 1730 | { |
@@ -1990,16 +1992,87 @@ void trace_find_cmdline(int pid, char comm[]) | |||
1990 | preempt_enable(); | 1992 | preempt_enable(); |
1991 | } | 1993 | } |
1992 | 1994 | ||
1993 | void tracing_record_cmdline(struct task_struct *tsk) | 1995 | int trace_find_tgid(int pid) |
1996 | { | ||
1997 | if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT)) | ||
1998 | return 0; | ||
1999 | |||
2000 | return tgid_map[pid]; | ||
2001 | } | ||
2002 | |||
2003 | static int trace_save_tgid(struct task_struct *tsk) | ||
1994 | { | 2004 | { |
1995 | if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on()) | 2005 | if (unlikely(!tgid_map || !tsk->pid || tsk->pid > PID_MAX_DEFAULT)) |
2006 | return 0; | ||
2007 | |||
2008 | tgid_map[tsk->pid] = tsk->tgid; | ||
2009 | return 1; | ||
2010 | } | ||
2011 | |||
2012 | static bool tracing_record_taskinfo_skip(int flags) | ||
2013 | { | ||
2014 | if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID)))) | ||
2015 | return true; | ||
2016 | if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on()) | ||
2017 | return true; | ||
2018 | if (!__this_cpu_read(trace_taskinfo_save)) | ||
2019 | return true; | ||
2020 | return false; | ||
2021 | } | ||
2022 | |||
2023 | /** | ||
2024 | * tracing_record_taskinfo - record the task info of a task | ||
2025 | * | ||
2026 | * @task - task to record | ||
2027 | * @flags - TRACE_RECORD_CMDLINE for recording comm | ||
2028 | * - TRACE_RECORD_TGID for recording tgid | ||
2029 | */ | ||
2030 | void tracing_record_taskinfo(struct task_struct *task, int flags) | ||
2031 | { | ||
2032 | if (tracing_record_taskinfo_skip(flags)) | ||
2033 | return; | ||
2034 | if ((flags & TRACE_RECORD_CMDLINE) && !trace_save_cmdline(task)) | ||
2035 | return; | ||
2036 | if ((flags & TRACE_RECORD_TGID) && !trace_save_tgid(task)) | ||
1996 | return; | 2037 | return; |
1997 | 2038 | ||
1998 | if (!__this_cpu_read(trace_cmdline_save)) | 2039 | __this_cpu_write(trace_taskinfo_save, false); |
2040 | } | ||
2041 | |||
2042 | /** | ||
2043 | * tracing_record_taskinfo_sched_switch - record task info for sched_switch | ||
2044 | * | ||
2045 | * @prev - previous task during sched_switch | ||
2046 | * @next - next task during sched_switch | ||
2047 | * @flags - TRACE_RECORD_CMDLINE for recording comm | ||
2048 | * TRACE_RECORD_TGID for recording tgid | ||
2049 | */ | ||
2050 | void tracing_record_taskinfo_sched_switch(struct task_struct *prev, | ||
2051 | struct task_struct *next, int flags) | ||
2052 | { | ||
2053 | if (tracing_record_taskinfo_skip(flags)) | ||
1999 | return; | 2054 | return; |
2000 | 2055 | ||
2001 | if (trace_save_cmdline(tsk)) | 2056 | if ((flags & TRACE_RECORD_CMDLINE) && |
2002 | __this_cpu_write(trace_cmdline_save, false); | 2057 | (!trace_save_cmdline(prev) || !trace_save_cmdline(next))) |
2058 | return; | ||
2059 | |||
2060 | if ((flags & TRACE_RECORD_TGID) && | ||
2061 | (!trace_save_tgid(prev) || !trace_save_tgid(next))) | ||
2062 | return; | ||
2063 | |||
2064 | __this_cpu_write(trace_taskinfo_save, false); | ||
2065 | } | ||
2066 | |||
2067 | /* Helpers to record a specific task information */ | ||
2068 | void tracing_record_cmdline(struct task_struct *task) | ||
2069 | { | ||
2070 | tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE); | ||
2071 | } | ||
2072 | |||
2073 | void tracing_record_tgid(struct task_struct *task) | ||
2074 | { | ||
2075 | tracing_record_taskinfo(task, TRACE_RECORD_TGID); | ||
2003 | } | 2076 | } |
2004 | 2077 | ||
2005 | /* | 2078 | /* |
@@ -3144,7 +3217,7 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
3144 | #endif | 3217 | #endif |
3145 | 3218 | ||
3146 | if (!iter->snapshot) | 3219 | if (!iter->snapshot) |
3147 | atomic_inc(&trace_record_cmdline_disabled); | 3220 | atomic_inc(&trace_record_taskinfo_disabled); |
3148 | 3221 | ||
3149 | if (*pos != iter->pos) { | 3222 | if (*pos != iter->pos) { |
3150 | iter->ent = NULL; | 3223 | iter->ent = NULL; |
@@ -3189,7 +3262,7 @@ static void s_stop(struct seq_file *m, void *p) | |||
3189 | #endif | 3262 | #endif |
3190 | 3263 | ||
3191 | if (!iter->snapshot) | 3264 | if (!iter->snapshot) |
3192 | atomic_dec(&trace_record_cmdline_disabled); | 3265 | atomic_dec(&trace_record_taskinfo_disabled); |
3193 | 3266 | ||
3194 | trace_access_unlock(iter->cpu_file); | 3267 | trace_access_unlock(iter->cpu_file); |
3195 | trace_event_read_unlock(); | 3268 | trace_event_read_unlock(); |
@@ -4236,6 +4309,18 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) | |||
4236 | if (mask == TRACE_ITER_RECORD_CMD) | 4309 | if (mask == TRACE_ITER_RECORD_CMD) |
4237 | trace_event_enable_cmd_record(enabled); | 4310 | trace_event_enable_cmd_record(enabled); |
4238 | 4311 | ||
4312 | if (mask == TRACE_ITER_RECORD_TGID) { | ||
4313 | if (!tgid_map) | ||
4314 | tgid_map = kzalloc((PID_MAX_DEFAULT + 1) * sizeof(*tgid_map), | ||
4315 | GFP_KERNEL); | ||
4316 | if (!tgid_map) { | ||
4317 | tr->trace_flags &= ~TRACE_ITER_RECORD_TGID; | ||
4318 | return -ENOMEM; | ||
4319 | } | ||
4320 | |||
4321 | trace_event_enable_tgid_record(enabled); | ||
4322 | } | ||
4323 | |||
4239 | if (mask == TRACE_ITER_EVENT_FORK) | 4324 | if (mask == TRACE_ITER_EVENT_FORK) |
4240 | trace_event_follow_fork(tr, enabled); | 4325 | trace_event_follow_fork(tr, enabled); |
4241 | 4326 | ||