diff options
author | Steven Rostedt <srostedt@redhat.com> | 2009-12-07 09:11:39 -0500 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2009-12-09 13:55:26 -0500 |
commit | a63ce5b306855bccdacba95c03bfc293316c8ae3 (patch) | |
tree | b6d91266e9333a9cf74d6171bbee0acc0e18096b | |
parent | 29bf4a5e3fed3dde3eb629a0cb1762c1e9217458 (diff) |
tracing: Buffer the output of seq_file in case of filled buffer
If the seq_read fills the buffer it will call s_start again on the next
itertation with the same position. This causes a problem with the
function_graph tracer because it consumes the iteration in order to
determine leaf functions.
What happens is that the iterator stores the entry, and the function
graph plugin will look at the next entry. If that next entry is a return
of the same function and task, then the function is a leaf and the
function_graph plugin calls ring_buffer_read which moves the ring buffer
iterator forward (the trace iterator still points to the function start
entry).
The copying of the trace_seq to the seq_file buffer will fail if the
seq_file buffer is full. The seq_read will not show this entry.
The next read by userspace will cause seq_read to again call s_start
which will reuse the trace iterator entry (the function start entry).
But the function return entry was already consumed. The function graph
plugin will think that this entry is a nested function and not a leaf.
To solve this, the trace code now checks the return status of the
seq_printf (trace_print_seq). If the writing to the seq_file buffer
fails, we set a flag in the iterator (leftover) and we do not reset
the trace_seq buffer. On the next call to s_start, we check the leftover
flag, and if it is set, we just reuse the trace_seq buffer and do not
call into the plugin print functions.
Before this patch:
2) | fput() {
2) | __fput() {
2) 0.550 us | inotify_inode_queue_event();
2) | __fsnotify_parent() {
2) 0.540 us | inotify_dentry_parent_queue_event();
After the patch:
2) | fput() {
2) | __fput() {
2) 0.550 us | inotify_inode_queue_event();
2) 0.548 us | __fsnotify_parent();
2) 0.540 us | inotify_dentry_parent_queue_event();
[
Updated the patch to fix a missing return 0 from the trace_print_seq()
stub when CONFIG_TRACING is disabled.
Reported-by: Ingo Molnar <mingo@elte.hu>
]
Reported-by: Jiri Olsa <jolsa@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r-- | include/linux/ftrace_event.h | 1 | ||||
-rw-r--r-- | include/linux/trace_seq.h | 5 | ||||
-rw-r--r-- | kernel/trace/trace.c | 35 | ||||
-rw-r--r-- | kernel/trace/trace_output.c | 14 |
4 files changed, 47 insertions, 8 deletions
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 47bbdf9c38d0..38f8d6553831 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -57,6 +57,7 @@ struct trace_iterator { | |||
57 | /* The below is zeroed out in pipe_read */ | 57 | /* The below is zeroed out in pipe_read */ |
58 | struct trace_seq seq; | 58 | struct trace_seq seq; |
59 | struct trace_entry *ent; | 59 | struct trace_entry *ent; |
60 | int leftover; | ||
60 | int cpu; | 61 | int cpu; |
61 | u64 ts; | 62 | u64 ts; |
62 | 63 | ||
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h index 09077f6ed128..fad6857bc0f3 100644 --- a/include/linux/trace_seq.h +++ b/include/linux/trace_seq.h | |||
@@ -33,7 +33,7 @@ extern int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args) | |||
33 | __attribute__ ((format (printf, 2, 0))); | 33 | __attribute__ ((format (printf, 2, 0))); |
34 | extern int | 34 | extern int |
35 | trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); | 35 | trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); |
36 | extern void trace_print_seq(struct seq_file *m, struct trace_seq *s); | 36 | extern int trace_print_seq(struct seq_file *m, struct trace_seq *s); |
37 | extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, | 37 | extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, |
38 | size_t cnt); | 38 | size_t cnt); |
39 | extern int trace_seq_puts(struct trace_seq *s, const char *str); | 39 | extern int trace_seq_puts(struct trace_seq *s, const char *str); |
@@ -55,8 +55,9 @@ trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) | |||
55 | return 0; | 55 | return 0; |
56 | } | 56 | } |
57 | 57 | ||
58 | static inline void trace_print_seq(struct seq_file *m, struct trace_seq *s) | 58 | static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s) |
59 | { | 59 | { |
60 | return 0; | ||
60 | } | 61 | } |
61 | static inline ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, | 62 | static inline ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, |
62 | size_t cnt) | 63 | size_t cnt) |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index dc937e1baa91..484114d70743 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1516,6 +1516,8 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos) | |||
1516 | int i = (int)*pos; | 1516 | int i = (int)*pos; |
1517 | void *ent; | 1517 | void *ent; |
1518 | 1518 | ||
1519 | WARN_ON_ONCE(iter->leftover); | ||
1520 | |||
1519 | (*pos)++; | 1521 | (*pos)++; |
1520 | 1522 | ||
1521 | /* can't go backwards */ | 1523 | /* can't go backwards */ |
@@ -1614,8 +1616,16 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
1614 | ; | 1616 | ; |
1615 | 1617 | ||
1616 | } else { | 1618 | } else { |
1617 | l = *pos - 1; | 1619 | /* |
1618 | p = s_next(m, p, &l); | 1620 | * If we overflowed the seq_file before, then we want |
1621 | * to just reuse the trace_seq buffer again. | ||
1622 | */ | ||
1623 | if (iter->leftover) | ||
1624 | p = iter; | ||
1625 | else { | ||
1626 | l = *pos - 1; | ||
1627 | p = s_next(m, p, &l); | ||
1628 | } | ||
1619 | } | 1629 | } |
1620 | 1630 | ||
1621 | trace_event_read_lock(); | 1631 | trace_event_read_lock(); |
@@ -1923,6 +1933,7 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter) | |||
1923 | static int s_show(struct seq_file *m, void *v) | 1933 | static int s_show(struct seq_file *m, void *v) |
1924 | { | 1934 | { |
1925 | struct trace_iterator *iter = v; | 1935 | struct trace_iterator *iter = v; |
1936 | int ret; | ||
1926 | 1937 | ||
1927 | if (iter->ent == NULL) { | 1938 | if (iter->ent == NULL) { |
1928 | if (iter->tr) { | 1939 | if (iter->tr) { |
@@ -1942,9 +1953,27 @@ static int s_show(struct seq_file *m, void *v) | |||
1942 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | 1953 | if (!(trace_flags & TRACE_ITER_VERBOSE)) |
1943 | print_func_help_header(m); | 1954 | print_func_help_header(m); |
1944 | } | 1955 | } |
1956 | } else if (iter->leftover) { | ||
1957 | /* | ||
1958 | * If we filled the seq_file buffer earlier, we | ||
1959 | * want to just show it now. | ||
1960 | */ | ||
1961 | ret = trace_print_seq(m, &iter->seq); | ||
1962 | |||
1963 | /* ret should this time be zero, but you never know */ | ||
1964 | iter->leftover = ret; | ||
1965 | |||
1945 | } else { | 1966 | } else { |
1946 | print_trace_line(iter); | 1967 | print_trace_line(iter); |
1947 | trace_print_seq(m, &iter->seq); | 1968 | ret = trace_print_seq(m, &iter->seq); |
1969 | /* | ||
1970 | * If we overflow the seq_file buffer, then it will | ||
1971 | * ask us for this data again at start up. | ||
1972 | * Use that instead. | ||
1973 | * ret is 0 if seq_file write succeeded. | ||
1974 | * -1 otherwise. | ||
1975 | */ | ||
1976 | iter->leftover = ret; | ||
1948 | } | 1977 | } |
1949 | 1978 | ||
1950 | return 0; | 1979 | return 0; |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index b6c12c6a1bcd..e5cf90fef34e 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -23,13 +23,21 @@ static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; | |||
23 | 23 | ||
24 | static int next_event_type = __TRACE_LAST_TYPE + 1; | 24 | static int next_event_type = __TRACE_LAST_TYPE + 1; |
25 | 25 | ||
26 | void trace_print_seq(struct seq_file *m, struct trace_seq *s) | 26 | int trace_print_seq(struct seq_file *m, struct trace_seq *s) |
27 | { | 27 | { |
28 | int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len; | 28 | int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len; |
29 | int ret; | ||
30 | |||
31 | ret = seq_write(m, s->buffer, len); | ||
29 | 32 | ||
30 | seq_write(m, s->buffer, len); | 33 | /* |
34 | * Only reset this buffer if we successfully wrote to the | ||
35 | * seq_file buffer. | ||
36 | */ | ||
37 | if (!ret) | ||
38 | trace_seq_init(s); | ||
31 | 39 | ||
32 | trace_seq_init(s); | 40 | return ret; |
33 | } | 41 | } |
34 | 42 | ||
35 | enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter) | 43 | enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter) |