diff options
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 76 |
1 files changed, 52 insertions, 24 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 9d3067a62d43..c82dfd92fdfd 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -86,17 +86,17 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set) | |||
86 | */ | 86 | */ |
87 | static int tracing_disabled = 1; | 87 | static int tracing_disabled = 1; |
88 | 88 | ||
89 | DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); | 89 | DEFINE_PER_CPU(int, ftrace_cpu_disabled); |
90 | 90 | ||
91 | static inline void ftrace_disable_cpu(void) | 91 | static inline void ftrace_disable_cpu(void) |
92 | { | 92 | { |
93 | preempt_disable(); | 93 | preempt_disable(); |
94 | local_inc(&__get_cpu_var(ftrace_cpu_disabled)); | 94 | __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled)); |
95 | } | 95 | } |
96 | 96 | ||
97 | static inline void ftrace_enable_cpu(void) | 97 | static inline void ftrace_enable_cpu(void) |
98 | { | 98 | { |
99 | local_dec(&__get_cpu_var(ftrace_cpu_disabled)); | 99 | __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled)); |
100 | preempt_enable(); | 100 | preempt_enable(); |
101 | } | 101 | } |
102 | 102 | ||
@@ -203,7 +203,7 @@ cycle_t ftrace_now(int cpu) | |||
203 | */ | 203 | */ |
204 | static struct trace_array max_tr; | 204 | static struct trace_array max_tr; |
205 | 205 | ||
206 | static DEFINE_PER_CPU(struct trace_array_cpu, max_data); | 206 | static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data); |
207 | 207 | ||
208 | /* tracer_enabled is used to toggle activation of a tracer */ | 208 | /* tracer_enabled is used to toggle activation of a tracer */ |
209 | static int tracer_enabled = 1; | 209 | static int tracer_enabled = 1; |
@@ -1085,7 +1085,7 @@ trace_function(struct trace_array *tr, | |||
1085 | struct ftrace_entry *entry; | 1085 | struct ftrace_entry *entry; |
1086 | 1086 | ||
1087 | /* If we are reading the ring buffer, don't trace */ | 1087 | /* If we are reading the ring buffer, don't trace */ |
1088 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 1088 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) |
1089 | return; | 1089 | return; |
1090 | 1090 | ||
1091 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), | 1091 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), |
@@ -1363,9 +1363,6 @@ int trace_array_vprintk(struct trace_array *tr, | |||
1363 | __raw_spin_lock(&trace_buf_lock); | 1363 | __raw_spin_lock(&trace_buf_lock); |
1364 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 1364 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
1365 | 1365 | ||
1366 | len = min(len, TRACE_BUF_SIZE-1); | ||
1367 | trace_buf[len] = 0; | ||
1368 | |||
1369 | size = sizeof(*entry) + len + 1; | 1366 | size = sizeof(*entry) + len + 1; |
1370 | buffer = tr->buffer; | 1367 | buffer = tr->buffer; |
1371 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, | 1368 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, |
@@ -1373,10 +1370,10 @@ int trace_array_vprintk(struct trace_array *tr, | |||
1373 | if (!event) | 1370 | if (!event) |
1374 | goto out_unlock; | 1371 | goto out_unlock; |
1375 | entry = ring_buffer_event_data(event); | 1372 | entry = ring_buffer_event_data(event); |
1376 | entry->ip = ip; | 1373 | entry->ip = ip; |
1377 | 1374 | ||
1378 | memcpy(&entry->buf, trace_buf, len); | 1375 | memcpy(&entry->buf, trace_buf, len); |
1379 | entry->buf[len] = 0; | 1376 | entry->buf[len] = '\0'; |
1380 | if (!filter_check_discard(call, entry, buffer, event)) | 1377 | if (!filter_check_discard(call, entry, buffer, event)) |
1381 | ring_buffer_unlock_commit(buffer, event); | 1378 | ring_buffer_unlock_commit(buffer, event); |
1382 | 1379 | ||
@@ -1515,6 +1512,8 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos) | |||
1515 | int i = (int)*pos; | 1512 | int i = (int)*pos; |
1516 | void *ent; | 1513 | void *ent; |
1517 | 1514 | ||
1515 | WARN_ON_ONCE(iter->leftover); | ||
1516 | |||
1518 | (*pos)++; | 1517 | (*pos)++; |
1519 | 1518 | ||
1520 | /* can't go backwards */ | 1519 | /* can't go backwards */ |
@@ -1613,8 +1612,16 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
1613 | ; | 1612 | ; |
1614 | 1613 | ||
1615 | } else { | 1614 | } else { |
1616 | l = *pos - 1; | 1615 | /* |
1617 | p = s_next(m, p, &l); | 1616 | * If we overflowed the seq_file before, then we want |
1617 | * to just reuse the trace_seq buffer again. | ||
1618 | */ | ||
1619 | if (iter->leftover) | ||
1620 | p = iter; | ||
1621 | else { | ||
1622 | l = *pos - 1; | ||
1623 | p = s_next(m, p, &l); | ||
1624 | } | ||
1618 | } | 1625 | } |
1619 | 1626 | ||
1620 | trace_event_read_lock(); | 1627 | trace_event_read_lock(); |
@@ -1922,6 +1929,7 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter) | |||
1922 | static int s_show(struct seq_file *m, void *v) | 1929 | static int s_show(struct seq_file *m, void *v) |
1923 | { | 1930 | { |
1924 | struct trace_iterator *iter = v; | 1931 | struct trace_iterator *iter = v; |
1932 | int ret; | ||
1925 | 1933 | ||
1926 | if (iter->ent == NULL) { | 1934 | if (iter->ent == NULL) { |
1927 | if (iter->tr) { | 1935 | if (iter->tr) { |
@@ -1941,9 +1949,27 @@ static int s_show(struct seq_file *m, void *v) | |||
1941 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | 1949 | if (!(trace_flags & TRACE_ITER_VERBOSE)) |
1942 | print_func_help_header(m); | 1950 | print_func_help_header(m); |
1943 | } | 1951 | } |
1952 | } else if (iter->leftover) { | ||
1953 | /* | ||
1954 | * If we filled the seq_file buffer earlier, we | ||
1955 | * want to just show it now. | ||
1956 | */ | ||
1957 | ret = trace_print_seq(m, &iter->seq); | ||
1958 | |||
1959 | /* ret should this time be zero, but you never know */ | ||
1960 | iter->leftover = ret; | ||
1961 | |||
1944 | } else { | 1962 | } else { |
1945 | print_trace_line(iter); | 1963 | print_trace_line(iter); |
1946 | trace_print_seq(m, &iter->seq); | 1964 | ret = trace_print_seq(m, &iter->seq); |
1965 | /* | ||
1966 | * If we overflow the seq_file buffer, then it will | ||
1967 | * ask us for this data again at start up. | ||
1968 | * Use that instead. | ||
1969 | * ret is 0 if seq_file write succeeded. | ||
1970 | * -1 otherwise. | ||
1971 | */ | ||
1972 | iter->leftover = ret; | ||
1947 | } | 1973 | } |
1948 | 1974 | ||
1949 | return 0; | 1975 | return 0; |
@@ -2897,6 +2923,10 @@ static int tracing_release_pipe(struct inode *inode, struct file *file) | |||
2897 | else | 2923 | else |
2898 | cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask); | 2924 | cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask); |
2899 | 2925 | ||
2926 | |||
2927 | if (iter->trace->pipe_close) | ||
2928 | iter->trace->pipe_close(iter); | ||
2929 | |||
2900 | mutex_unlock(&trace_types_lock); | 2930 | mutex_unlock(&trace_types_lock); |
2901 | 2931 | ||
2902 | free_cpumask_var(iter->started); | 2932 | free_cpumask_var(iter->started); |
@@ -3334,7 +3364,6 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
3334 | size_t cnt, loff_t *fpos) | 3364 | size_t cnt, loff_t *fpos) |
3335 | { | 3365 | { |
3336 | char *buf; | 3366 | char *buf; |
3337 | char *end; | ||
3338 | 3367 | ||
3339 | if (tracing_disabled) | 3368 | if (tracing_disabled) |
3340 | return -EINVAL; | 3369 | return -EINVAL; |
@@ -3342,7 +3371,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
3342 | if (cnt > TRACE_BUF_SIZE) | 3371 | if (cnt > TRACE_BUF_SIZE) |
3343 | cnt = TRACE_BUF_SIZE; | 3372 | cnt = TRACE_BUF_SIZE; |
3344 | 3373 | ||
3345 | buf = kmalloc(cnt + 1, GFP_KERNEL); | 3374 | buf = kmalloc(cnt + 2, GFP_KERNEL); |
3346 | if (buf == NULL) | 3375 | if (buf == NULL) |
3347 | return -ENOMEM; | 3376 | return -ENOMEM; |
3348 | 3377 | ||
@@ -3350,14 +3379,13 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
3350 | kfree(buf); | 3379 | kfree(buf); |
3351 | return -EFAULT; | 3380 | return -EFAULT; |
3352 | } | 3381 | } |
3382 | if (buf[cnt-1] != '\n') { | ||
3383 | buf[cnt] = '\n'; | ||
3384 | buf[cnt+1] = '\0'; | ||
3385 | } else | ||
3386 | buf[cnt] = '\0'; | ||
3353 | 3387 | ||
3354 | /* Cut from the first nil or newline. */ | 3388 | cnt = mark_printk("%s", buf); |
3355 | buf[cnt] = '\0'; | ||
3356 | end = strchr(buf, '\n'); | ||
3357 | if (end) | ||
3358 | *end = '\0'; | ||
3359 | |||
3360 | cnt = mark_printk("%s\n", buf); | ||
3361 | kfree(buf); | 3389 | kfree(buf); |
3362 | *fpos += cnt; | 3390 | *fpos += cnt; |
3363 | 3391 | ||
@@ -3730,7 +3758,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf, | |||
3730 | 3758 | ||
3731 | s = kmalloc(sizeof(*s), GFP_KERNEL); | 3759 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
3732 | if (!s) | 3760 | if (!s) |
3733 | return ENOMEM; | 3761 | return -ENOMEM; |
3734 | 3762 | ||
3735 | trace_seq_init(s); | 3763 | trace_seq_init(s); |
3736 | 3764 | ||
@@ -4426,7 +4454,7 @@ __init static int tracer_alloc_buffers(void) | |||
4426 | /* Allocate the first page for all buffers */ | 4454 | /* Allocate the first page for all buffers */ |
4427 | for_each_tracing_cpu(i) { | 4455 | for_each_tracing_cpu(i) { |
4428 | global_trace.data[i] = &per_cpu(global_trace_cpu, i); | 4456 | global_trace.data[i] = &per_cpu(global_trace_cpu, i); |
4429 | max_tr.data[i] = &per_cpu(max_data, i); | 4457 | max_tr.data[i] = &per_cpu(max_tr_data, i); |
4430 | } | 4458 | } |
4431 | 4459 | ||
4432 | trace_init_cmdlines(); | 4460 | trace_init_cmdlines(); |