diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-05-12 15:20:45 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-05-23 14:41:13 -0400 |
commit | 4e3c3333f3bd7eedfd21b1155b3c7cd24fc7f754 (patch) | |
tree | 1b53d424c618e646cba8e3a8d6b27b5cec9cb8c2 /kernel | |
parent | 77a2b37d227483fe52aead242652aee406c25bf0 (diff) |
ftrace: fix time offset
fix time offset calculations and ordering, plus make code more consistent.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/trace.c | 75 | ||||
-rw-r--r-- | kernel/trace/trace.h | 9 |
2 files changed, 63 insertions, 21 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c736dd2e068d..8755a4370484 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -120,7 +120,7 @@ static DEFINE_SPINLOCK(ftrace_max_lock); | |||
120 | * structure. (this way the maximum trace is permanently saved, | 120 | * structure. (this way the maximum trace is permanently saved, |
121 | * for later retrieval via /debugfs/tracing/latency_trace) | 121 | * for later retrieval via /debugfs/tracing/latency_trace) |
122 | */ | 122 | */ |
123 | static void notrace | 123 | static notrace void |
124 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | 124 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) |
125 | { | 125 | { |
126 | struct trace_array_cpu *data = tr->data[cpu]; | 126 | struct trace_array_cpu *data = tr->data[cpu]; |
@@ -333,15 +333,16 @@ void unregister_tracer(struct tracer *type) | |||
333 | mutex_unlock(&trace_types_lock); | 333 | mutex_unlock(&trace_types_lock); |
334 | } | 334 | } |
335 | 335 | ||
336 | void notrace tracing_reset(struct trace_array_cpu *data) | 336 | notrace void tracing_reset(struct trace_array_cpu *data) |
337 | { | 337 | { |
338 | data->trace_idx = 0; | 338 | data->trace_idx = 0; |
339 | data->trace_current = head_page(data); | 339 | data->trace_current = head_page(data); |
340 | data->trace_current_idx = 0; | 340 | data->trace_current_idx = 0; |
341 | data->time_offset = 0; | ||
341 | } | 342 | } |
342 | 343 | ||
343 | #ifdef CONFIG_FTRACE | 344 | #ifdef CONFIG_FTRACE |
344 | static void notrace | 345 | static notrace void |
345 | function_trace_call(unsigned long ip, unsigned long parent_ip) | 346 | function_trace_call(unsigned long ip, unsigned long parent_ip) |
346 | { | 347 | { |
347 | struct trace_array *tr = &global_trace; | 348 | struct trace_array *tr = &global_trace; |
@@ -398,7 +399,7 @@ static void trace_init_cmdlines(void) | |||
398 | 399 | ||
399 | notrace void trace_stop_cmdline_recording(void); | 400 | notrace void trace_stop_cmdline_recording(void); |
400 | 401 | ||
401 | static void notrace trace_save_cmdline(struct task_struct *tsk) | 402 | static notrace void trace_save_cmdline(struct task_struct *tsk) |
402 | { | 403 | { |
403 | unsigned map; | 404 | unsigned map; |
404 | unsigned idx; | 405 | unsigned idx; |
@@ -624,6 +625,7 @@ static void *find_next_entry_inc(struct trace_iterator *iter) | |||
624 | iter->idx++; | 625 | iter->idx++; |
625 | iter->next_idx[next_cpu]++; | 626 | iter->next_idx[next_cpu]++; |
626 | iter->next_page_idx[next_cpu]++; | 627 | iter->next_page_idx[next_cpu]++; |
628 | |||
627 | if (iter->next_page_idx[next_cpu] >= ENTRIES_PER_PAGE) { | 629 | if (iter->next_page_idx[next_cpu] >= ENTRIES_PER_PAGE) { |
628 | struct trace_array_cpu *data = iter->tr->data[next_cpu]; | 630 | struct trace_array_cpu *data = iter->tr->data[next_cpu]; |
629 | 631 | ||
@@ -635,19 +637,21 @@ static void *find_next_entry_inc(struct trace_iterator *iter) | |||
635 | data->trace_pages.next; | 637 | data->trace_pages.next; |
636 | } | 638 | } |
637 | } | 639 | } |
640 | iter->prev_ent = iter->ent; | ||
641 | iter->prev_cpu = iter->cpu; | ||
642 | |||
638 | iter->ent = next; | 643 | iter->ent = next; |
639 | iter->cpu = next_cpu; | 644 | iter->cpu = next_cpu; |
640 | 645 | ||
641 | return next ? iter : NULL; | 646 | return next ? iter : NULL; |
642 | } | 647 | } |
643 | 648 | ||
644 | static void notrace * | 649 | static notrace void *s_next(struct seq_file *m, void *v, loff_t *pos) |
645 | s_next(struct seq_file *m, void *v, loff_t *pos) | ||
646 | { | 650 | { |
647 | struct trace_iterator *iter = m->private; | 651 | struct trace_iterator *iter = m->private; |
648 | void *ent; | ||
649 | void *last_ent = iter->ent; | 652 | void *last_ent = iter->ent; |
650 | int i = (int)*pos; | 653 | int i = (int)*pos; |
654 | void *ent; | ||
651 | 655 | ||
652 | (*pos)++; | 656 | (*pos)++; |
653 | 657 | ||
@@ -693,6 +697,8 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
693 | iter->ent = NULL; | 697 | iter->ent = NULL; |
694 | iter->cpu = 0; | 698 | iter->cpu = 0; |
695 | iter->idx = -1; | 699 | iter->idx = -1; |
700 | iter->prev_ent = NULL; | ||
701 | iter->prev_cpu = -1; | ||
696 | 702 | ||
697 | for_each_possible_cpu(i) { | 703 | for_each_possible_cpu(i) { |
698 | iter->next_idx[i] = 0; | 704 | iter->next_idx[i] = 0; |
@@ -752,7 +758,7 @@ seq_print_sym_offset(struct seq_file *m, const char *fmt, unsigned long address) | |||
752 | # define IP_FMT "%016lx" | 758 | # define IP_FMT "%016lx" |
753 | #endif | 759 | #endif |
754 | 760 | ||
755 | static void notrace | 761 | static notrace void |
756 | seq_print_ip_sym(struct seq_file *m, unsigned long ip, unsigned long sym_flags) | 762 | seq_print_ip_sym(struct seq_file *m, unsigned long ip, unsigned long sym_flags) |
757 | { | 763 | { |
758 | if (!ip) { | 764 | if (!ip) { |
@@ -769,7 +775,7 @@ seq_print_ip_sym(struct seq_file *m, unsigned long ip, unsigned long sym_flags) | |||
769 | seq_printf(m, " <" IP_FMT ">", ip); | 775 | seq_printf(m, " <" IP_FMT ">", ip); |
770 | } | 776 | } |
771 | 777 | ||
772 | static void notrace print_lat_help_header(struct seq_file *m) | 778 | static notrace void print_lat_help_header(struct seq_file *m) |
773 | { | 779 | { |
774 | seq_puts(m, "# _------=> CPU# \n"); | 780 | seq_puts(m, "# _------=> CPU# \n"); |
775 | seq_puts(m, "# / _-----=> irqs-off \n"); | 781 | seq_puts(m, "# / _-----=> irqs-off \n"); |
@@ -782,14 +788,14 @@ static void notrace print_lat_help_header(struct seq_file *m) | |||
782 | seq_puts(m, "# \\ / ||||| \\ | / \n"); | 788 | seq_puts(m, "# \\ / ||||| \\ | / \n"); |
783 | } | 789 | } |
784 | 790 | ||
785 | static void notrace print_func_help_header(struct seq_file *m) | 791 | static notrace void print_func_help_header(struct seq_file *m) |
786 | { | 792 | { |
787 | seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); | 793 | seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); |
788 | seq_puts(m, "# | | | | |\n"); | 794 | seq_puts(m, "# | | | | |\n"); |
789 | } | 795 | } |
790 | 796 | ||
791 | 797 | ||
792 | static void notrace | 798 | static notrace void |
793 | print_trace_header(struct seq_file *m, struct trace_iterator *iter) | 799 | print_trace_header(struct seq_file *m, struct trace_iterator *iter) |
794 | { | 800 | { |
795 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | 801 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); |
@@ -858,7 +864,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter) | |||
858 | seq_puts(m, "\n"); | 864 | seq_puts(m, "\n"); |
859 | } | 865 | } |
860 | 866 | ||
861 | static void notrace | 867 | static notrace void |
862 | lat_print_generic(struct seq_file *m, struct trace_entry *entry, int cpu) | 868 | lat_print_generic(struct seq_file *m, struct trace_entry *entry, int cpu) |
863 | { | 869 | { |
864 | int hardirq, softirq; | 870 | int hardirq, softirq; |
@@ -895,7 +901,7 @@ lat_print_generic(struct seq_file *m, struct trace_entry *entry, int cpu) | |||
895 | 901 | ||
896 | unsigned long preempt_mark_thresh = 100; | 902 | unsigned long preempt_mark_thresh = 100; |
897 | 903 | ||
898 | static void notrace | 904 | static notrace void |
899 | lat_print_timestamp(struct seq_file *m, unsigned long long abs_usecs, | 905 | lat_print_timestamp(struct seq_file *m, unsigned long long abs_usecs, |
900 | unsigned long rel_usecs) | 906 | unsigned long rel_usecs) |
901 | { | 907 | { |
@@ -910,7 +916,7 @@ lat_print_timestamp(struct seq_file *m, unsigned long long abs_usecs, | |||
910 | 916 | ||
911 | static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; | 917 | static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; |
912 | 918 | ||
913 | static void notrace | 919 | static notrace void |
914 | print_lat_fmt(struct seq_file *m, struct trace_iterator *iter, | 920 | print_lat_fmt(struct seq_file *m, struct trace_iterator *iter, |
915 | unsigned int trace_idx, int cpu) | 921 | unsigned int trace_idx, int cpu) |
916 | { | 922 | { |
@@ -966,20 +972,50 @@ print_lat_fmt(struct seq_file *m, struct trace_iterator *iter, | |||
966 | } | 972 | } |
967 | } | 973 | } |
968 | 974 | ||
969 | static void notrace | 975 | static notrace void sync_time_offset(struct trace_iterator *iter) |
976 | { | ||
977 | struct trace_array_cpu *prev_array, *array; | ||
978 | struct trace_entry *prev_entry, *entry; | ||
979 | cycle_t prev_t, t; | ||
980 | |||
981 | entry = iter->ent; | ||
982 | prev_entry = iter->prev_ent; | ||
983 | if (!prev_entry) | ||
984 | return; | ||
985 | |||
986 | prev_array = iter->tr->data[iter->prev_cpu]; | ||
987 | array = iter->tr->data[iter->cpu]; | ||
988 | |||
989 | prev_t = prev_entry->t + prev_array->time_offset; | ||
990 | t = entry->t + array->time_offset; | ||
991 | |||
992 | /* | ||
993 | * If time goes backwards we increase the offset of | ||
994 | * the current array, to not have observable time warps. | ||
995 | * This will quickly synchronize the time offsets of | ||
996 | * multiple CPUs: | ||
997 | */ | ||
998 | if (t < prev_t) | ||
999 | array->time_offset += prev_t - t; | ||
1000 | } | ||
1001 | |||
1002 | static notrace void | ||
970 | print_trace_fmt(struct seq_file *m, struct trace_iterator *iter) | 1003 | print_trace_fmt(struct seq_file *m, struct trace_iterator *iter) |
971 | { | 1004 | { |
972 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | 1005 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); |
973 | struct trace_entry *entry = iter->ent; | 1006 | struct trace_entry *entry; |
974 | unsigned long usec_rem; | 1007 | unsigned long usec_rem; |
975 | unsigned long long t; | 1008 | unsigned long long t; |
976 | unsigned long secs; | 1009 | unsigned long secs; |
977 | char *comm; | 1010 | char *comm; |
978 | int S; | 1011 | int S; |
979 | 1012 | ||
1013 | sync_time_offset(iter); | ||
1014 | entry = iter->ent; | ||
1015 | |||
980 | comm = trace_find_cmdline(iter->ent->pid); | 1016 | comm = trace_find_cmdline(iter->ent->pid); |
981 | 1017 | ||
982 | t = ns2usecs(entry->t); | 1018 | t = ns2usecs(entry->t + iter->tr->data[iter->cpu]->time_offset); |
983 | usec_rem = do_div(t, 1000000ULL); | 1019 | usec_rem = do_div(t, 1000000ULL); |
984 | secs = (unsigned long)t; | 1020 | secs = (unsigned long)t; |
985 | 1021 | ||
@@ -1158,7 +1194,7 @@ static int tracing_lt_open(struct inode *inode, struct file *file) | |||
1158 | } | 1194 | } |
1159 | 1195 | ||
1160 | 1196 | ||
1161 | static void notrace * | 1197 | static notrace void * |
1162 | t_next(struct seq_file *m, void *v, loff_t *pos) | 1198 | t_next(struct seq_file *m, void *v, loff_t *pos) |
1163 | { | 1199 | { |
1164 | struct tracer *t = m->private; | 1200 | struct tracer *t = m->private; |
@@ -1374,8 +1410,7 @@ tracing_ctrl_read(struct file *filp, char __user *ubuf, | |||
1374 | int r; | 1410 | int r; |
1375 | 1411 | ||
1376 | r = sprintf(buf, "%ld\n", tr->ctrl); | 1412 | r = sprintf(buf, "%ld\n", tr->ctrl); |
1377 | return simple_read_from_buffer(ubuf, cnt, ppos, | 1413 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
1378 | buf, r); | ||
1379 | } | 1414 | } |
1380 | 1415 | ||
1381 | static ssize_t | 1416 | static ssize_t |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index cc1d34b8b771..5df8ff2b84a7 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -56,6 +56,8 @@ struct trace_array_cpu { | |||
56 | void *trace_current; | 56 | void *trace_current; |
57 | struct list_head trace_pages; | 57 | struct list_head trace_pages; |
58 | atomic_t disabled; | 58 | atomic_t disabled; |
59 | cycle_t time_offset; | ||
60 | |||
59 | /* these fields get copied into max-trace: */ | 61 | /* these fields get copied into max-trace: */ |
60 | unsigned trace_current_idx; | 62 | unsigned trace_current_idx; |
61 | unsigned long trace_idx; | 63 | unsigned long trace_idx; |
@@ -114,14 +116,19 @@ struct tracer { | |||
114 | struct trace_iterator { | 116 | struct trace_iterator { |
115 | struct trace_array *tr; | 117 | struct trace_array *tr; |
116 | struct tracer *trace; | 118 | struct tracer *trace; |
119 | |||
117 | struct trace_entry *ent; | 120 | struct trace_entry *ent; |
121 | int cpu; | ||
122 | |||
123 | struct trace_entry *prev_ent; | ||
124 | int prev_cpu; | ||
125 | |||
118 | unsigned long iter_flags; | 126 | unsigned long iter_flags; |
119 | loff_t pos; | 127 | loff_t pos; |
120 | unsigned long next_idx[NR_CPUS]; | 128 | unsigned long next_idx[NR_CPUS]; |
121 | struct list_head *next_page[NR_CPUS]; | 129 | struct list_head *next_page[NR_CPUS]; |
122 | unsigned next_page_idx[NR_CPUS]; | 130 | unsigned next_page_idx[NR_CPUS]; |
123 | long idx; | 131 | long idx; |
124 | int cpu; | ||
125 | }; | 132 | }; |
126 | 133 | ||
127 | void notrace tracing_reset(struct trace_array_cpu *data); | 134 | void notrace tracing_reset(struct trace_array_cpu *data); |