diff options
Diffstat (limited to 'kernel/trace/trace.c')
| -rw-r--r-- | kernel/trace/trace.c | 74 | 
1 files changed, 7 insertions, 67 deletions
| diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index f11a285ee5bb..48ef4960ec90 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -87,18 +87,6 @@ static int tracing_disabled = 1; | |||
| 87 | 87 | ||
| 88 | DEFINE_PER_CPU(int, ftrace_cpu_disabled); | 88 | DEFINE_PER_CPU(int, ftrace_cpu_disabled); | 
| 89 | 89 | ||
| 90 | static inline void ftrace_disable_cpu(void) | ||
| 91 | { | ||
| 92 | preempt_disable(); | ||
| 93 | __this_cpu_inc(ftrace_cpu_disabled); | ||
| 94 | } | ||
| 95 | |||
| 96 | static inline void ftrace_enable_cpu(void) | ||
| 97 | { | ||
| 98 | __this_cpu_dec(ftrace_cpu_disabled); | ||
| 99 | preempt_enable(); | ||
| 100 | } | ||
| 101 | |||
| 102 | cpumask_var_t __read_mostly tracing_buffer_mask; | 90 | cpumask_var_t __read_mostly tracing_buffer_mask; | 
| 103 | 91 | ||
| 104 | /* | 92 | /* | 
| @@ -748,8 +736,6 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 748 | 736 | ||
| 749 | arch_spin_lock(&ftrace_max_lock); | 737 | arch_spin_lock(&ftrace_max_lock); | 
| 750 | 738 | ||
| 751 | ftrace_disable_cpu(); | ||
| 752 | |||
| 753 | ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); | 739 | ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); | 
| 754 | 740 | ||
| 755 | if (ret == -EBUSY) { | 741 | if (ret == -EBUSY) { | 
| @@ -763,8 +749,6 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 763 | "Failed to swap buffers due to commit in progress\n"); | 749 | "Failed to swap buffers due to commit in progress\n"); | 
| 764 | } | 750 | } | 
| 765 | 751 | ||
| 766 | ftrace_enable_cpu(); | ||
| 767 | |||
| 768 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); | 752 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); | 
| 769 | 753 | ||
| 770 | __update_max_tr(tr, tsk, cpu); | 754 | __update_max_tr(tr, tsk, cpu); | 
| @@ -916,13 +900,6 @@ out: | |||
| 916 | mutex_unlock(&trace_types_lock); | 900 | mutex_unlock(&trace_types_lock); | 
| 917 | } | 901 | } | 
| 918 | 902 | ||
| 919 | static void __tracing_reset(struct ring_buffer *buffer, int cpu) | ||
| 920 | { | ||
| 921 | ftrace_disable_cpu(); | ||
| 922 | ring_buffer_reset_cpu(buffer, cpu); | ||
| 923 | ftrace_enable_cpu(); | ||
| 924 | } | ||
| 925 | |||
| 926 | void tracing_reset(struct trace_array *tr, int cpu) | 903 | void tracing_reset(struct trace_array *tr, int cpu) | 
| 927 | { | 904 | { | 
| 928 | struct ring_buffer *buffer = tr->buffer; | 905 | struct ring_buffer *buffer = tr->buffer; | 
| @@ -931,7 +908,7 @@ void tracing_reset(struct trace_array *tr, int cpu) | |||
| 931 | 908 | ||
| 932 | /* Make sure all commits have finished */ | 909 | /* Make sure all commits have finished */ | 
| 933 | synchronize_sched(); | 910 | synchronize_sched(); | 
| 934 | __tracing_reset(buffer, cpu); | 911 | ring_buffer_reset_cpu(buffer, cpu); | 
| 935 | 912 | ||
| 936 | ring_buffer_record_enable(buffer); | 913 | ring_buffer_record_enable(buffer); | 
| 937 | } | 914 | } | 
| @@ -949,7 +926,7 @@ void tracing_reset_online_cpus(struct trace_array *tr) | |||
| 949 | tr->time_start = ftrace_now(tr->cpu); | 926 | tr->time_start = ftrace_now(tr->cpu); | 
| 950 | 927 | ||
| 951 | for_each_online_cpu(cpu) | 928 | for_each_online_cpu(cpu) | 
| 952 | __tracing_reset(buffer, cpu); | 929 | ring_buffer_reset_cpu(buffer, cpu); | 
| 953 | 930 | ||
| 954 | ring_buffer_record_enable(buffer); | 931 | ring_buffer_record_enable(buffer); | 
| 955 | } | 932 | } | 
| @@ -1733,14 +1710,9 @@ EXPORT_SYMBOL_GPL(trace_vprintk); | |||
| 1733 | 1710 | ||
| 1734 | static void trace_iterator_increment(struct trace_iterator *iter) | 1711 | static void trace_iterator_increment(struct trace_iterator *iter) | 
| 1735 | { | 1712 | { | 
| 1736 | /* Don't allow ftrace to trace into the ring buffers */ | ||
| 1737 | ftrace_disable_cpu(); | ||
| 1738 | |||
| 1739 | iter->idx++; | 1713 | iter->idx++; | 
| 1740 | if (iter->buffer_iter[iter->cpu]) | 1714 | if (iter->buffer_iter[iter->cpu]) | 
| 1741 | ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); | 1715 | ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); | 
| 1742 | |||
| 1743 | ftrace_enable_cpu(); | ||
| 1744 | } | 1716 | } | 
| 1745 | 1717 | ||
| 1746 | static struct trace_entry * | 1718 | static struct trace_entry * | 
| @@ -1750,17 +1722,12 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, | |||
| 1750 | struct ring_buffer_event *event; | 1722 | struct ring_buffer_event *event; | 
| 1751 | struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; | 1723 | struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; | 
| 1752 | 1724 | ||
| 1753 | /* Don't allow ftrace to trace into the ring buffers */ | ||
| 1754 | ftrace_disable_cpu(); | ||
| 1755 | |||
| 1756 | if (buf_iter) | 1725 | if (buf_iter) | 
| 1757 | event = ring_buffer_iter_peek(buf_iter, ts); | 1726 | event = ring_buffer_iter_peek(buf_iter, ts); | 
| 1758 | else | 1727 | else | 
| 1759 | event = ring_buffer_peek(iter->tr->buffer, cpu, ts, | 1728 | event = ring_buffer_peek(iter->tr->buffer, cpu, ts, | 
| 1760 | lost_events); | 1729 | lost_events); | 
| 1761 | 1730 | ||
| 1762 | ftrace_enable_cpu(); | ||
| 1763 | |||
| 1764 | if (event) { | 1731 | if (event) { | 
| 1765 | iter->ent_size = ring_buffer_event_length(event); | 1732 | iter->ent_size = ring_buffer_event_length(event); | 
| 1766 | return ring_buffer_event_data(event); | 1733 | return ring_buffer_event_data(event); | 
| @@ -1850,11 +1817,8 @@ void *trace_find_next_entry_inc(struct trace_iterator *iter) | |||
| 1850 | 1817 | ||
| 1851 | static void trace_consume(struct trace_iterator *iter) | 1818 | static void trace_consume(struct trace_iterator *iter) | 
| 1852 | { | 1819 | { | 
| 1853 | /* Don't allow ftrace to trace into the ring buffers */ | ||
| 1854 | ftrace_disable_cpu(); | ||
| 1855 | ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts, | 1820 | ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts, | 
| 1856 | &iter->lost_events); | 1821 | &iter->lost_events); | 
| 1857 | ftrace_enable_cpu(); | ||
| 1858 | } | 1822 | } | 
| 1859 | 1823 | ||
| 1860 | static void *s_next(struct seq_file *m, void *v, loff_t *pos) | 1824 | static void *s_next(struct seq_file *m, void *v, loff_t *pos) | 
| @@ -1943,16 +1907,12 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
| 1943 | iter->cpu = 0; | 1907 | iter->cpu = 0; | 
| 1944 | iter->idx = -1; | 1908 | iter->idx = -1; | 
| 1945 | 1909 | ||
| 1946 | ftrace_disable_cpu(); | ||
| 1947 | |||
| 1948 | if (cpu_file == TRACE_PIPE_ALL_CPU) { | 1910 | if (cpu_file == TRACE_PIPE_ALL_CPU) { | 
| 1949 | for_each_tracing_cpu(cpu) | 1911 | for_each_tracing_cpu(cpu) | 
| 1950 | tracing_iter_reset(iter, cpu); | 1912 | tracing_iter_reset(iter, cpu); | 
| 1951 | } else | 1913 | } else | 
| 1952 | tracing_iter_reset(iter, cpu_file); | 1914 | tracing_iter_reset(iter, cpu_file); | 
| 1953 | 1915 | ||
| 1954 | ftrace_enable_cpu(); | ||
| 1955 | |||
| 1956 | iter->leftover = 0; | 1916 | iter->leftover = 0; | 
| 1957 | for (p = iter; p && l < *pos; p = s_next(m, p, &l)) | 1917 | for (p = iter; p && l < *pos; p = s_next(m, p, &l)) | 
| 1958 | ; | 1918 | ; | 
| @@ -2413,15 +2373,13 @@ static struct trace_iterator * | |||
| 2413 | __tracing_open(struct inode *inode, struct file *file) | 2373 | __tracing_open(struct inode *inode, struct file *file) | 
| 2414 | { | 2374 | { | 
| 2415 | long cpu_file = (long) inode->i_private; | 2375 | long cpu_file = (long) inode->i_private; | 
| 2416 | void *fail_ret = ERR_PTR(-ENOMEM); | ||
| 2417 | struct trace_iterator *iter; | 2376 | struct trace_iterator *iter; | 
| 2418 | struct seq_file *m; | 2377 | int cpu; | 
| 2419 | int cpu, ret; | ||
| 2420 | 2378 | ||
| 2421 | if (tracing_disabled) | 2379 | if (tracing_disabled) | 
| 2422 | return ERR_PTR(-ENODEV); | 2380 | return ERR_PTR(-ENODEV); | 
| 2423 | 2381 | ||
| 2424 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 2382 | iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter)); | 
| 2425 | if (!iter) | 2383 | if (!iter) | 
| 2426 | return ERR_PTR(-ENOMEM); | 2384 | return ERR_PTR(-ENOMEM); | 
| 2427 | 2385 | ||
| @@ -2478,32 +2436,15 @@ __tracing_open(struct inode *inode, struct file *file) | |||
| 2478 | tracing_iter_reset(iter, cpu); | 2436 | tracing_iter_reset(iter, cpu); | 
| 2479 | } | 2437 | } | 
| 2480 | 2438 | ||
| 2481 | ret = seq_open(file, &tracer_seq_ops); | ||
| 2482 | if (ret < 0) { | ||
| 2483 | fail_ret = ERR_PTR(ret); | ||
| 2484 | goto fail_buffer; | ||
| 2485 | } | ||
| 2486 | |||
| 2487 | m = file->private_data; | ||
| 2488 | m->private = iter; | ||
| 2489 | |||
| 2490 | mutex_unlock(&trace_types_lock); | 2439 | mutex_unlock(&trace_types_lock); | 
| 2491 | 2440 | ||
| 2492 | return iter; | 2441 | return iter; | 
| 2493 | 2442 | ||
| 2494 | fail_buffer: | ||
| 2495 | for_each_tracing_cpu(cpu) { | ||
| 2496 | if (iter->buffer_iter[cpu]) | ||
| 2497 | ring_buffer_read_finish(iter->buffer_iter[cpu]); | ||
| 2498 | } | ||
| 2499 | free_cpumask_var(iter->started); | ||
| 2500 | tracing_start(); | ||
| 2501 | fail: | 2443 | fail: | 
| 2502 | mutex_unlock(&trace_types_lock); | 2444 | mutex_unlock(&trace_types_lock); | 
| 2503 | kfree(iter->trace); | 2445 | kfree(iter->trace); | 
| 2504 | kfree(iter); | 2446 | seq_release_private(inode, file); | 
| 2505 | 2447 | return ERR_PTR(-ENOMEM); | |
| 2506 | return fail_ret; | ||
| 2507 | } | 2448 | } | 
| 2508 | 2449 | ||
| 2509 | int tracing_open_generic(struct inode *inode, struct file *filp) | 2450 | int tracing_open_generic(struct inode *inode, struct file *filp) | 
| @@ -2539,11 +2480,10 @@ static int tracing_release(struct inode *inode, struct file *file) | |||
| 2539 | tracing_start(); | 2480 | tracing_start(); | 
| 2540 | mutex_unlock(&trace_types_lock); | 2481 | mutex_unlock(&trace_types_lock); | 
| 2541 | 2482 | ||
| 2542 | seq_release(inode, file); | ||
| 2543 | mutex_destroy(&iter->mutex); | 2483 | mutex_destroy(&iter->mutex); | 
| 2544 | free_cpumask_var(iter->started); | 2484 | free_cpumask_var(iter->started); | 
| 2545 | kfree(iter->trace); | 2485 | kfree(iter->trace); | 
| 2546 | kfree(iter); | 2486 | seq_release_private(inode, file); | 
| 2547 | return 0; | 2487 | return 0; | 
| 2548 | } | 2488 | } | 
| 2549 | 2489 | ||
