diff options
-rw-r--r-- | kernel/trace/trace.c | 44 |
1 files changed, 2 insertions, 42 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 4fb10ef727d3..48ef4960ec90 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -87,18 +87,6 @@ static int tracing_disabled = 1; | |||
87 | 87 | ||
88 | DEFINE_PER_CPU(int, ftrace_cpu_disabled); | 88 | DEFINE_PER_CPU(int, ftrace_cpu_disabled); |
89 | 89 | ||
90 | static inline void ftrace_disable_cpu(void) | ||
91 | { | ||
92 | preempt_disable(); | ||
93 | __this_cpu_inc(ftrace_cpu_disabled); | ||
94 | } | ||
95 | |||
96 | static inline void ftrace_enable_cpu(void) | ||
97 | { | ||
98 | __this_cpu_dec(ftrace_cpu_disabled); | ||
99 | preempt_enable(); | ||
100 | } | ||
101 | |||
102 | cpumask_var_t __read_mostly tracing_buffer_mask; | 90 | cpumask_var_t __read_mostly tracing_buffer_mask; |
103 | 91 | ||
104 | /* | 92 | /* |
@@ -748,8 +736,6 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
748 | 736 | ||
749 | arch_spin_lock(&ftrace_max_lock); | 737 | arch_spin_lock(&ftrace_max_lock); |
750 | 738 | ||
751 | ftrace_disable_cpu(); | ||
752 | |||
753 | ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); | 739 | ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); |
754 | 740 | ||
755 | if (ret == -EBUSY) { | 741 | if (ret == -EBUSY) { |
@@ -763,8 +749,6 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
763 | "Failed to swap buffers due to commit in progress\n"); | 749 | "Failed to swap buffers due to commit in progress\n"); |
764 | } | 750 | } |
765 | 751 | ||
766 | ftrace_enable_cpu(); | ||
767 | |||
768 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); | 752 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); |
769 | 753 | ||
770 | __update_max_tr(tr, tsk, cpu); | 754 | __update_max_tr(tr, tsk, cpu); |
@@ -916,13 +900,6 @@ out: | |||
916 | mutex_unlock(&trace_types_lock); | 900 | mutex_unlock(&trace_types_lock); |
917 | } | 901 | } |
918 | 902 | ||
919 | static void __tracing_reset(struct ring_buffer *buffer, int cpu) | ||
920 | { | ||
921 | ftrace_disable_cpu(); | ||
922 | ring_buffer_reset_cpu(buffer, cpu); | ||
923 | ftrace_enable_cpu(); | ||
924 | } | ||
925 | |||
926 | void tracing_reset(struct trace_array *tr, int cpu) | 903 | void tracing_reset(struct trace_array *tr, int cpu) |
927 | { | 904 | { |
928 | struct ring_buffer *buffer = tr->buffer; | 905 | struct ring_buffer *buffer = tr->buffer; |
@@ -931,7 +908,7 @@ void tracing_reset(struct trace_array *tr, int cpu) | |||
931 | 908 | ||
932 | /* Make sure all commits have finished */ | 909 | /* Make sure all commits have finished */ |
933 | synchronize_sched(); | 910 | synchronize_sched(); |
934 | __tracing_reset(buffer, cpu); | 911 | ring_buffer_reset_cpu(buffer, cpu); |
935 | 912 | ||
936 | ring_buffer_record_enable(buffer); | 913 | ring_buffer_record_enable(buffer); |
937 | } | 914 | } |
@@ -949,7 +926,7 @@ void tracing_reset_online_cpus(struct trace_array *tr) | |||
949 | tr->time_start = ftrace_now(tr->cpu); | 926 | tr->time_start = ftrace_now(tr->cpu); |
950 | 927 | ||
951 | for_each_online_cpu(cpu) | 928 | for_each_online_cpu(cpu) |
952 | __tracing_reset(buffer, cpu); | 929 | ring_buffer_reset_cpu(buffer, cpu); |
953 | 930 | ||
954 | ring_buffer_record_enable(buffer); | 931 | ring_buffer_record_enable(buffer); |
955 | } | 932 | } |
@@ -1733,14 +1710,9 @@ EXPORT_SYMBOL_GPL(trace_vprintk); | |||
1733 | 1710 | ||
1734 | static void trace_iterator_increment(struct trace_iterator *iter) | 1711 | static void trace_iterator_increment(struct trace_iterator *iter) |
1735 | { | 1712 | { |
1736 | /* Don't allow ftrace to trace into the ring buffers */ | ||
1737 | ftrace_disable_cpu(); | ||
1738 | |||
1739 | iter->idx++; | 1713 | iter->idx++; |
1740 | if (iter->buffer_iter[iter->cpu]) | 1714 | if (iter->buffer_iter[iter->cpu]) |
1741 | ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); | 1715 | ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); |
1742 | |||
1743 | ftrace_enable_cpu(); | ||
1744 | } | 1716 | } |
1745 | 1717 | ||
1746 | static struct trace_entry * | 1718 | static struct trace_entry * |
@@ -1750,17 +1722,12 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, | |||
1750 | struct ring_buffer_event *event; | 1722 | struct ring_buffer_event *event; |
1751 | struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; | 1723 | struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; |
1752 | 1724 | ||
1753 | /* Don't allow ftrace to trace into the ring buffers */ | ||
1754 | ftrace_disable_cpu(); | ||
1755 | |||
1756 | if (buf_iter) | 1725 | if (buf_iter) |
1757 | event = ring_buffer_iter_peek(buf_iter, ts); | 1726 | event = ring_buffer_iter_peek(buf_iter, ts); |
1758 | else | 1727 | else |
1759 | event = ring_buffer_peek(iter->tr->buffer, cpu, ts, | 1728 | event = ring_buffer_peek(iter->tr->buffer, cpu, ts, |
1760 | lost_events); | 1729 | lost_events); |
1761 | 1730 | ||
1762 | ftrace_enable_cpu(); | ||
1763 | |||
1764 | if (event) { | 1731 | if (event) { |
1765 | iter->ent_size = ring_buffer_event_length(event); | 1732 | iter->ent_size = ring_buffer_event_length(event); |
1766 | return ring_buffer_event_data(event); | 1733 | return ring_buffer_event_data(event); |
@@ -1850,11 +1817,8 @@ void *trace_find_next_entry_inc(struct trace_iterator *iter) | |||
1850 | 1817 | ||
1851 | static void trace_consume(struct trace_iterator *iter) | 1818 | static void trace_consume(struct trace_iterator *iter) |
1852 | { | 1819 | { |
1853 | /* Don't allow ftrace to trace into the ring buffers */ | ||
1854 | ftrace_disable_cpu(); | ||
1855 | ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts, | 1820 | ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts, |
1856 | &iter->lost_events); | 1821 | &iter->lost_events); |
1857 | ftrace_enable_cpu(); | ||
1858 | } | 1822 | } |
1859 | 1823 | ||
1860 | static void *s_next(struct seq_file *m, void *v, loff_t *pos) | 1824 | static void *s_next(struct seq_file *m, void *v, loff_t *pos) |
@@ -1943,16 +1907,12 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
1943 | iter->cpu = 0; | 1907 | iter->cpu = 0; |
1944 | iter->idx = -1; | 1908 | iter->idx = -1; |
1945 | 1909 | ||
1946 | ftrace_disable_cpu(); | ||
1947 | |||
1948 | if (cpu_file == TRACE_PIPE_ALL_CPU) { | 1910 | if (cpu_file == TRACE_PIPE_ALL_CPU) { |
1949 | for_each_tracing_cpu(cpu) | 1911 | for_each_tracing_cpu(cpu) |
1950 | tracing_iter_reset(iter, cpu); | 1912 | tracing_iter_reset(iter, cpu); |
1951 | } else | 1913 | } else |
1952 | tracing_iter_reset(iter, cpu_file); | 1914 | tracing_iter_reset(iter, cpu_file); |
1953 | 1915 | ||
1954 | ftrace_enable_cpu(); | ||
1955 | |||
1956 | iter->leftover = 0; | 1916 | iter->leftover = 0; |
1957 | for (p = iter; p && l < *pos; p = s_next(m, p, &l)) | 1917 | for (p = iter; p && l < *pos; p = s_next(m, p, &l)) |
1958 | ; | 1918 | ; |