diff options
author | Steven Rostedt <srostedt@redhat.com> | 2011-11-17 10:35:16 -0500 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2011-11-17 11:10:43 -0500 |
commit | 39eaf7ef884dcc44f7ff1bac803ca2a1dcf43544 (patch) | |
tree | 323e858496ef9c587a3e9861dab863af5303f82f /kernel/trace | |
parent | 77271ce4b2c0df0a76ad1cbb6a95b07e1f88c1ea (diff) |
tracing: Add entries in buffer and total entries to default output header
Knowing the number of event entries in the ring buffer compared
to the total number that were written is useful information. The
latency format gives this information and there's no reason that the
default format does not.
This information is now added to the default header, along with the
number of online CPUs:
# tracer: nop
#
# entries-in-buffer/entries-written: 159836/64690869 #P:4
#
# _-----=> irqs-off
# / _----=> need-resched
# | / _---=> hardirq/softirq
# || / _--=> preempt-depth
# ||| / delay
# TASK-PID CPU# |||| TIMESTAMP FUNCTION
# | | | |||| | |
<idle>-0 [000] ...2 49.442971: local_touch_nmi <-cpu_idle
<idle>-0 [000] d..2 49.442973: enter_idle <-cpu_idle
<idle>-0 [000] d..2 49.442974: atomic_notifier_call_chain <-enter_idle
<idle>-0 [000] d..2 49.442976: __atomic_notifier_call_chain <-atomic_notifier
The above shows that the trace contains 159836 entries, but
64690869 were written. One could figure out that there were
64531033 entries that were dropped.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/trace.c | 72 |
1 files changed, 47 insertions, 25 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index fa5cee639962..7392070ffc39 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1845,6 +1845,33 @@ static void s_stop(struct seq_file *m, void *p) | |||
1845 | trace_event_read_unlock(); | 1845 | trace_event_read_unlock(); |
1846 | } | 1846 | } |
1847 | 1847 | ||
1848 | static void | ||
1849 | get_total_entries(struct trace_array *tr, unsigned long *total, unsigned long *entries) | ||
1850 | { | ||
1851 | unsigned long count; | ||
1852 | int cpu; | ||
1853 | |||
1854 | *total = 0; | ||
1855 | *entries = 0; | ||
1856 | |||
1857 | for_each_tracing_cpu(cpu) { | ||
1858 | count = ring_buffer_entries_cpu(tr->buffer, cpu); | ||
1859 | /* | ||
1860 | * If this buffer has skipped entries, then we hold all | ||
1861 | * entries for the trace and we need to ignore the | ||
1862 | * ones before the time stamp. | ||
1863 | */ | ||
1864 | if (tr->data[cpu]->skipped_entries) { | ||
1865 | count -= tr->data[cpu]->skipped_entries; | ||
1866 | /* total is the same as the entries */ | ||
1867 | *total += count; | ||
1868 | } else | ||
1869 | *total += count + | ||
1870 | ring_buffer_overrun_cpu(tr->buffer, cpu); | ||
1871 | *entries += count; | ||
1872 | } | ||
1873 | } | ||
1874 | |||
1848 | static void print_lat_help_header(struct seq_file *m) | 1875 | static void print_lat_help_header(struct seq_file *m) |
1849 | { | 1876 | { |
1850 | seq_puts(m, "# _------=> CPU# \n"); | 1877 | seq_puts(m, "# _------=> CPU# \n"); |
@@ -1857,14 +1884,27 @@ static void print_lat_help_header(struct seq_file *m) | |||
1857 | seq_puts(m, "# \\ / ||||| \\ | / \n"); | 1884 | seq_puts(m, "# \\ / ||||| \\ | / \n"); |
1858 | } | 1885 | } |
1859 | 1886 | ||
1860 | static void print_func_help_header(struct seq_file *m) | 1887 | static void print_event_info(struct trace_array *tr, struct seq_file *m) |
1861 | { | 1888 | { |
1889 | unsigned long total; | ||
1890 | unsigned long entries; | ||
1891 | |||
1892 | get_total_entries(tr, &total, &entries); | ||
1893 | seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n", | ||
1894 | entries, total, num_online_cpus()); | ||
1895 | seq_puts(m, "#\n"); | ||
1896 | } | ||
1897 | |||
1898 | static void print_func_help_header(struct trace_array *tr, struct seq_file *m) | ||
1899 | { | ||
1900 | print_event_info(tr, m); | ||
1862 | seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); | 1901 | seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); |
1863 | seq_puts(m, "# | | | | |\n"); | 1902 | seq_puts(m, "# | | | | |\n"); |
1864 | } | 1903 | } |
1865 | 1904 | ||
1866 | static void print_func_help_header_irq(struct seq_file *m) | 1905 | static void print_func_help_header_irq(struct trace_array *tr, struct seq_file *m) |
1867 | { | 1906 | { |
1907 | print_event_info(tr, m); | ||
1868 | seq_puts(m, "# _-----=> irqs-off\n"); | 1908 | seq_puts(m, "# _-----=> irqs-off\n"); |
1869 | seq_puts(m, "# / _----=> need-resched\n"); | 1909 | seq_puts(m, "# / _----=> need-resched\n"); |
1870 | seq_puts(m, "# | / _---=> hardirq/softirq\n"); | 1910 | seq_puts(m, "# | / _---=> hardirq/softirq\n"); |
@@ -1881,32 +1921,14 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter) | |||
1881 | struct trace_array *tr = iter->tr; | 1921 | struct trace_array *tr = iter->tr; |
1882 | struct trace_array_cpu *data = tr->data[tr->cpu]; | 1922 | struct trace_array_cpu *data = tr->data[tr->cpu]; |
1883 | struct tracer *type = current_trace; | 1923 | struct tracer *type = current_trace; |
1884 | unsigned long entries = 0; | 1924 | unsigned long entries; |
1885 | unsigned long total = 0; | 1925 | unsigned long total; |
1886 | unsigned long count; | ||
1887 | const char *name = "preemption"; | 1926 | const char *name = "preemption"; |
1888 | int cpu; | ||
1889 | 1927 | ||
1890 | if (type) | 1928 | if (type) |
1891 | name = type->name; | 1929 | name = type->name; |
1892 | 1930 | ||
1893 | 1931 | get_total_entries(tr, &total, &entries); | |
1894 | for_each_tracing_cpu(cpu) { | ||
1895 | count = ring_buffer_entries_cpu(tr->buffer, cpu); | ||
1896 | /* | ||
1897 | * If this buffer has skipped entries, then we hold all | ||
1898 | * entries for the trace and we need to ignore the | ||
1899 | * ones before the time stamp. | ||
1900 | */ | ||
1901 | if (tr->data[cpu]->skipped_entries) { | ||
1902 | count -= tr->data[cpu]->skipped_entries; | ||
1903 | /* total is the same as the entries */ | ||
1904 | total += count; | ||
1905 | } else | ||
1906 | total += count + | ||
1907 | ring_buffer_overrun_cpu(tr->buffer, cpu); | ||
1908 | entries += count; | ||
1909 | } | ||
1910 | 1932 | ||
1911 | seq_printf(m, "# %s latency trace v1.1.5 on %s\n", | 1933 | seq_printf(m, "# %s latency trace v1.1.5 on %s\n", |
1912 | name, UTS_RELEASE); | 1934 | name, UTS_RELEASE); |
@@ -2184,9 +2206,9 @@ void trace_default_header(struct seq_file *m) | |||
2184 | } else { | 2206 | } else { |
2185 | if (!(trace_flags & TRACE_ITER_VERBOSE)) { | 2207 | if (!(trace_flags & TRACE_ITER_VERBOSE)) { |
2186 | if (trace_flags & TRACE_ITER_IRQ_INFO) | 2208 | if (trace_flags & TRACE_ITER_IRQ_INFO) |
2187 | print_func_help_header_irq(m); | 2209 | print_func_help_header_irq(iter->tr, m); |
2188 | else | 2210 | else |
2189 | print_func_help_header(m); | 2211 | print_func_help_header(iter->tr, m); |
2190 | } | 2212 | } |
2191 | } | 2213 | } |
2192 | } | 2214 | } |