aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_output.c
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2015-09-28 10:11:44 -0400
committerSteven Rostedt <rostedt@goodmis.org>2015-09-28 10:11:44 -0400
commit6b1032d53cdbda39ad56c8692bac17a66475b57d (patch)
tree3e5524913fdcdbbe49e62a3dd581842e60483c89 /kernel/trace/trace_output.c
parentca475e831fd59e131bccd60de43c4104d82d02f5 (diff)
tracing: Inject seq_print_userip_objs() into its only user
seq_print_userip_objs() is used only in one location, in one file. Instead of having it as an external function, go one further than making it static, but inject is code into its only user. It doesn't make the calling function much more complex. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_output.c')
-rw-r--r--kernel/trace/trace_output.c81
1 files changed, 36 insertions, 45 deletions
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 8e481a84aeea..881cbdae1913 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -355,50 +355,6 @@ int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
355} 355}
356 356
357int 357int
358seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
359 unsigned long sym_flags)
360{
361 struct mm_struct *mm = NULL;
362 unsigned int i;
363
364 if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
365 struct task_struct *task;
366 /*
367 * we do the lookup on the thread group leader,
368 * since individual threads might have already quit!
369 */
370 rcu_read_lock();
371 task = find_task_by_vpid(entry->tgid);
372 if (task)
373 mm = get_task_mm(task);
374 rcu_read_unlock();
375 }
376
377 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
378 unsigned long ip = entry->caller[i];
379
380 if (ip == ULONG_MAX || trace_seq_has_overflowed(s))
381 break;
382
383 trace_seq_puts(s, " => ");
384
385 if (!ip) {
386 trace_seq_puts(s, "??");
387 trace_seq_putc(s, '\n');
388 continue;
389 }
390
391 seq_print_user_ip(s, mm, ip, sym_flags);
392 trace_seq_putc(s, '\n');
393 }
394
395 if (mm)
396 mmput(mm);
397
398 return !trace_seq_has_overflowed(s);
399}
400
401int
402seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) 358seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
403{ 359{
404 if (!ip) { 360 if (!ip) {
@@ -1081,11 +1037,46 @@ static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
1081{ 1037{
1082 struct userstack_entry *field; 1038 struct userstack_entry *field;
1083 struct trace_seq *s = &iter->seq; 1039 struct trace_seq *s = &iter->seq;
1040 struct mm_struct *mm = NULL;
1041 unsigned int i;
1084 1042
1085 trace_assign_type(field, iter->ent); 1043 trace_assign_type(field, iter->ent);
1086 1044
1087 trace_seq_puts(s, "<user stack trace>\n"); 1045 trace_seq_puts(s, "<user stack trace>\n");
1088 seq_print_userip_objs(field, s, flags); 1046
1047 if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
1048 struct task_struct *task;
1049 /*
1050 * we do the lookup on the thread group leader,
1051 * since individual threads might have already quit!
1052 */
1053 rcu_read_lock();
1054 task = find_task_by_vpid(field->tgid);
1055 if (task)
1056 mm = get_task_mm(task);
1057 rcu_read_unlock();
1058 }
1059
1060 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1061 unsigned long ip = field->caller[i];
1062
1063 if (ip == ULONG_MAX || trace_seq_has_overflowed(s))
1064 break;
1065
1066 trace_seq_puts(s, " => ");
1067
1068 if (!ip) {
1069 trace_seq_puts(s, "??");
1070 trace_seq_putc(s, '\n');
1071 continue;
1072 }
1073
1074 seq_print_user_ip(s, mm, ip, flags);
1075 trace_seq_putc(s, '\n');
1076 }
1077
1078 if (mm)
1079 mmput(mm);
1089 1080
1090 return trace_handle_return(s); 1081 return trace_handle_return(s);
1091} 1082}