diff options
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 23 |
1 files changed, 12 insertions, 11 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 05330494a0df..abcbf7ff8743 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -297,11 +297,11 @@ void trace_array_put(struct trace_array *this_tr) | |||
297 | mutex_unlock(&trace_types_lock); | 297 | mutex_unlock(&trace_types_lock); |
298 | } | 298 | } |
299 | 299 | ||
300 | int filter_check_discard(struct ftrace_event_file *file, void *rec, | 300 | int filter_check_discard(struct trace_event_file *file, void *rec, |
301 | struct ring_buffer *buffer, | 301 | struct ring_buffer *buffer, |
302 | struct ring_buffer_event *event) | 302 | struct ring_buffer_event *event) |
303 | { | 303 | { |
304 | if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) && | 304 | if (unlikely(file->flags & EVENT_FILE_FL_FILTERED) && |
305 | !filter_match_preds(file->filter, rec)) { | 305 | !filter_match_preds(file->filter, rec)) { |
306 | ring_buffer_discard_commit(buffer, event); | 306 | ring_buffer_discard_commit(buffer, event); |
307 | return 1; | 307 | return 1; |
@@ -311,7 +311,7 @@ int filter_check_discard(struct ftrace_event_file *file, void *rec, | |||
311 | } | 311 | } |
312 | EXPORT_SYMBOL_GPL(filter_check_discard); | 312 | EXPORT_SYMBOL_GPL(filter_check_discard); |
313 | 313 | ||
314 | int call_filter_check_discard(struct ftrace_event_call *call, void *rec, | 314 | int call_filter_check_discard(struct trace_event_call *call, void *rec, |
315 | struct ring_buffer *buffer, | 315 | struct ring_buffer *buffer, |
316 | struct ring_buffer_event *event) | 316 | struct ring_buffer_event *event) |
317 | { | 317 | { |
@@ -876,6 +876,7 @@ static struct { | |||
876 | { trace_clock_jiffies, "uptime", 0 }, | 876 | { trace_clock_jiffies, "uptime", 0 }, |
877 | { trace_clock, "perf", 1 }, | 877 | { trace_clock, "perf", 1 }, |
878 | { ktime_get_mono_fast_ns, "mono", 1 }, | 878 | { ktime_get_mono_fast_ns, "mono", 1 }, |
879 | { ktime_get_raw_fast_ns, "mono_raw", 1 }, | ||
879 | ARCH_TRACE_CLOCKS | 880 | ARCH_TRACE_CLOCKS |
880 | }; | 881 | }; |
881 | 882 | ||
@@ -1693,13 +1694,13 @@ static struct ring_buffer *temp_buffer; | |||
1693 | 1694 | ||
1694 | struct ring_buffer_event * | 1695 | struct ring_buffer_event * |
1695 | trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, | 1696 | trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, |
1696 | struct ftrace_event_file *ftrace_file, | 1697 | struct trace_event_file *trace_file, |
1697 | int type, unsigned long len, | 1698 | int type, unsigned long len, |
1698 | unsigned long flags, int pc) | 1699 | unsigned long flags, int pc) |
1699 | { | 1700 | { |
1700 | struct ring_buffer_event *entry; | 1701 | struct ring_buffer_event *entry; |
1701 | 1702 | ||
1702 | *current_rb = ftrace_file->tr->trace_buffer.buffer; | 1703 | *current_rb = trace_file->tr->trace_buffer.buffer; |
1703 | entry = trace_buffer_lock_reserve(*current_rb, | 1704 | entry = trace_buffer_lock_reserve(*current_rb, |
1704 | type, len, flags, pc); | 1705 | type, len, flags, pc); |
1705 | /* | 1706 | /* |
@@ -1708,7 +1709,7 @@ trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, | |||
1708 | * to store the trace event for the tigger to use. It's recusive | 1709 | * to store the trace event for the tigger to use. It's recusive |
1709 | * safe and will not be recorded anywhere. | 1710 | * safe and will not be recorded anywhere. |
1710 | */ | 1711 | */ |
1711 | if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) { | 1712 | if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) { |
1712 | *current_rb = temp_buffer; | 1713 | *current_rb = temp_buffer; |
1713 | entry = trace_buffer_lock_reserve(*current_rb, | 1714 | entry = trace_buffer_lock_reserve(*current_rb, |
1714 | type, len, flags, pc); | 1715 | type, len, flags, pc); |
@@ -1760,7 +1761,7 @@ trace_function(struct trace_array *tr, | |||
1760 | unsigned long ip, unsigned long parent_ip, unsigned long flags, | 1761 | unsigned long ip, unsigned long parent_ip, unsigned long flags, |
1761 | int pc) | 1762 | int pc) |
1762 | { | 1763 | { |
1763 | struct ftrace_event_call *call = &event_function; | 1764 | struct trace_event_call *call = &event_function; |
1764 | struct ring_buffer *buffer = tr->trace_buffer.buffer; | 1765 | struct ring_buffer *buffer = tr->trace_buffer.buffer; |
1765 | struct ring_buffer_event *event; | 1766 | struct ring_buffer_event *event; |
1766 | struct ftrace_entry *entry; | 1767 | struct ftrace_entry *entry; |
@@ -1795,7 +1796,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, | |||
1795 | unsigned long flags, | 1796 | unsigned long flags, |
1796 | int skip, int pc, struct pt_regs *regs) | 1797 | int skip, int pc, struct pt_regs *regs) |
1797 | { | 1798 | { |
1798 | struct ftrace_event_call *call = &event_kernel_stack; | 1799 | struct trace_event_call *call = &event_kernel_stack; |
1799 | struct ring_buffer_event *event; | 1800 | struct ring_buffer_event *event; |
1800 | struct stack_entry *entry; | 1801 | struct stack_entry *entry; |
1801 | struct stack_trace trace; | 1802 | struct stack_trace trace; |
@@ -1923,7 +1924,7 @@ static DEFINE_PER_CPU(int, user_stack_count); | |||
1923 | void | 1924 | void |
1924 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | 1925 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) |
1925 | { | 1926 | { |
1926 | struct ftrace_event_call *call = &event_user_stack; | 1927 | struct trace_event_call *call = &event_user_stack; |
1927 | struct ring_buffer_event *event; | 1928 | struct ring_buffer_event *event; |
1928 | struct userstack_entry *entry; | 1929 | struct userstack_entry *entry; |
1929 | struct stack_trace trace; | 1930 | struct stack_trace trace; |
@@ -2129,7 +2130,7 @@ static void trace_printk_start_stop_comm(int enabled) | |||
2129 | */ | 2130 | */ |
2130 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | 2131 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) |
2131 | { | 2132 | { |
2132 | struct ftrace_event_call *call = &event_bprint; | 2133 | struct trace_event_call *call = &event_bprint; |
2133 | struct ring_buffer_event *event; | 2134 | struct ring_buffer_event *event; |
2134 | struct ring_buffer *buffer; | 2135 | struct ring_buffer *buffer; |
2135 | struct trace_array *tr = &global_trace; | 2136 | struct trace_array *tr = &global_trace; |
@@ -2187,7 +2188,7 @@ static int | |||
2187 | __trace_array_vprintk(struct ring_buffer *buffer, | 2188 | __trace_array_vprintk(struct ring_buffer *buffer, |
2188 | unsigned long ip, const char *fmt, va_list args) | 2189 | unsigned long ip, const char *fmt, va_list args) |
2189 | { | 2190 | { |
2190 | struct ftrace_event_call *call = &event_print; | 2191 | struct trace_event_call *call = &event_print; |
2191 | struct ring_buffer_event *event; | 2192 | struct ring_buffer_event *event; |
2192 | int len = 0, size, pc; | 2193 | int len = 0, size, pc; |
2193 | struct print_entry *entry; | 2194 | struct print_entry *entry; |