diff options
Diffstat (limited to 'kernel/trace/trace.c')
| -rw-r--r-- | kernel/trace/trace.c | 57 |
1 files changed, 48 insertions, 9 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 874f2893cff0..88bd9ae2a9ed 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -1361,11 +1361,7 @@ int trace_array_vprintk(struct trace_array *tr, | |||
| 1361 | pause_graph_tracing(); | 1361 | pause_graph_tracing(); |
| 1362 | raw_local_irq_save(irq_flags); | 1362 | raw_local_irq_save(irq_flags); |
| 1363 | __raw_spin_lock(&trace_buf_lock); | 1363 | __raw_spin_lock(&trace_buf_lock); |
| 1364 | if (args == NULL) { | 1364 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
| 1365 | strncpy(trace_buf, fmt, TRACE_BUF_SIZE); | ||
| 1366 | len = strlen(trace_buf); | ||
| 1367 | } else | ||
| 1368 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | ||
| 1369 | 1365 | ||
| 1370 | size = sizeof(*entry) + len + 1; | 1366 | size = sizeof(*entry) + len + 1; |
| 1371 | buffer = tr->buffer; | 1367 | buffer = tr->buffer; |
| @@ -1516,6 +1512,8 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos) | |||
| 1516 | int i = (int)*pos; | 1512 | int i = (int)*pos; |
| 1517 | void *ent; | 1513 | void *ent; |
| 1518 | 1514 | ||
| 1515 | WARN_ON_ONCE(iter->leftover); | ||
| 1516 | |||
| 1519 | (*pos)++; | 1517 | (*pos)++; |
| 1520 | 1518 | ||
| 1521 | /* can't go backwards */ | 1519 | /* can't go backwards */ |
| @@ -1614,8 +1612,16 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
| 1614 | ; | 1612 | ; |
| 1615 | 1613 | ||
| 1616 | } else { | 1614 | } else { |
| 1617 | l = *pos - 1; | 1615 | /* |
| 1618 | p = s_next(m, p, &l); | 1616 | * If we overflowed the seq_file before, then we want |
| 1617 | * to just reuse the trace_seq buffer again. | ||
| 1618 | */ | ||
| 1619 | if (iter->leftover) | ||
| 1620 | p = iter; | ||
| 1621 | else { | ||
| 1622 | l = *pos - 1; | ||
| 1623 | p = s_next(m, p, &l); | ||
| 1624 | } | ||
| 1619 | } | 1625 | } |
| 1620 | 1626 | ||
| 1621 | trace_event_read_lock(); | 1627 | trace_event_read_lock(); |
| @@ -1923,6 +1929,7 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter) | |||
| 1923 | static int s_show(struct seq_file *m, void *v) | 1929 | static int s_show(struct seq_file *m, void *v) |
| 1924 | { | 1930 | { |
| 1925 | struct trace_iterator *iter = v; | 1931 | struct trace_iterator *iter = v; |
| 1932 | int ret; | ||
| 1926 | 1933 | ||
| 1927 | if (iter->ent == NULL) { | 1934 | if (iter->ent == NULL) { |
| 1928 | if (iter->tr) { | 1935 | if (iter->tr) { |
| @@ -1942,9 +1949,27 @@ static int s_show(struct seq_file *m, void *v) | |||
| 1942 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | 1949 | if (!(trace_flags & TRACE_ITER_VERBOSE)) |
| 1943 | print_func_help_header(m); | 1950 | print_func_help_header(m); |
| 1944 | } | 1951 | } |
| 1952 | } else if (iter->leftover) { | ||
| 1953 | /* | ||
| 1954 | * If we filled the seq_file buffer earlier, we | ||
| 1955 | * want to just show it now. | ||
| 1956 | */ | ||
| 1957 | ret = trace_print_seq(m, &iter->seq); | ||
| 1958 | |||
| 1959 | /* ret should this time be zero, but you never know */ | ||
| 1960 | iter->leftover = ret; | ||
| 1961 | |||
| 1945 | } else { | 1962 | } else { |
| 1946 | print_trace_line(iter); | 1963 | print_trace_line(iter); |
| 1947 | trace_print_seq(m, &iter->seq); | 1964 | ret = trace_print_seq(m, &iter->seq); |
| 1965 | /* | ||
| 1966 | * If we overflow the seq_file buffer, then it will | ||
| 1967 | * ask us for this data again at start up. | ||
| 1968 | * Use that instead. | ||
| 1969 | * ret is 0 if seq_file write succeeded. | ||
| 1970 | * -1 otherwise. | ||
| 1971 | */ | ||
| 1972 | iter->leftover = ret; | ||
| 1948 | } | 1973 | } |
| 1949 | 1974 | ||
| 1950 | return 0; | 1975 | return 0; |
| @@ -2898,6 +2923,10 @@ static int tracing_release_pipe(struct inode *inode, struct file *file) | |||
| 2898 | else | 2923 | else |
| 2899 | cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask); | 2924 | cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask); |
| 2900 | 2925 | ||
| 2926 | |||
| 2927 | if (iter->trace->pipe_close) | ||
| 2928 | iter->trace->pipe_close(iter); | ||
| 2929 | |||
| 2901 | mutex_unlock(&trace_types_lock); | 2930 | mutex_unlock(&trace_types_lock); |
| 2902 | 2931 | ||
| 2903 | free_cpumask_var(iter->started); | 2932 | free_cpumask_var(iter->started); |
| @@ -3320,6 +3349,16 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
| 3320 | return cnt; | 3349 | return cnt; |
| 3321 | } | 3350 | } |
| 3322 | 3351 | ||
| 3352 | static int mark_printk(const char *fmt, ...) | ||
| 3353 | { | ||
| 3354 | int ret; | ||
| 3355 | va_list args; | ||
| 3356 | va_start(args, fmt); | ||
| 3357 | ret = trace_vprintk(0, fmt, args); | ||
| 3358 | va_end(args); | ||
| 3359 | return ret; | ||
| 3360 | } | ||
| 3361 | |||
| 3323 | static ssize_t | 3362 | static ssize_t |
| 3324 | tracing_mark_write(struct file *filp, const char __user *ubuf, | 3363 | tracing_mark_write(struct file *filp, const char __user *ubuf, |
| 3325 | size_t cnt, loff_t *fpos) | 3364 | size_t cnt, loff_t *fpos) |
| @@ -3346,7 +3385,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
| 3346 | } else | 3385 | } else |
| 3347 | buf[cnt] = '\0'; | 3386 | buf[cnt] = '\0'; |
| 3348 | 3387 | ||
| 3349 | cnt = trace_vprintk(0, buf, NULL); | 3388 | cnt = mark_printk("%s", buf); |
| 3350 | kfree(buf); | 3389 | kfree(buf); |
| 3351 | *fpos += cnt; | 3390 | *fpos += cnt; |
| 3352 | 3391 | ||
