aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-16 15:23:18 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-16 15:23:18 -0500
commitb29c8306a368cf65782669eba079f81dc861c54d (patch)
tree35d75aa0e671070d4024f11338d3ae89b078b1ed /kernel/trace/trace.c
parent0bde7294e2ada03d0f1cc61cec51274081d9a9cf (diff)
parent3a81a5210b7d33bb6d836b4c4952a54166a336f3 (diff)
Merge tag 'trace-3.13' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing update from Steven Rostedt: "This batch of changes is mostly clean ups and small bug fixes. The only real feature that was added this release is from Namhyung Kim, who introduced "set_graph_notrace" filter that lets you run the function graph tracer and not trace particular functions and their call chain. Tom Zanussi added some updates to the ftrace multibuffer tracing that made it more consistent with the top level tracing. One of the fixes for perf function tracing required an API change in RCU; the addition of "rcu_is_watching()". As Paul McKenney is pushing that change in this release too, he gave me a branch that included all the changes to get that working, and I pulled that into my tree in order to complete the perf function tracing fix" * tag 'trace-3.13' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: tracing: Add rcu annotation for syscall trace descriptors tracing: Do not use signed enums with unsigned long long in fgragh output tracing: Remove unused function ftrace_off_permanent() tracing: Do not assign filp->private_data to freed memory tracing: Add helper function tracing_is_disabled() tracing: Open tracer when ftrace_dump_on_oops is used tracing: Add support for SOFT_DISABLE to syscall events tracing: Make register/unregister_ftrace_command __init tracing: Update event filters for multibuffer recordmcount.pl: Add support for __fentry__ ftrace: Have control op function callback only trace when RCU is watching rcu: Do not trace rcu_is_watching() functions ftrace/x86: skip over the breakpoint for ftrace caller trace/trace_stat: use rbtree postorder iteration helper instead of opencoding ftrace: Add set_graph_notrace filter ftrace: Narrow down the protected area of graph_lock ftrace: Introduce struct ftrace_graph_data ftrace: Get rid of ftrace_graph_filter_enabled tracing: Fix potential out-of-bounds in trace_get_user() tracing: Show more exact help information about snapshot
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c82
1 files changed, 53 insertions, 29 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index d9fea7dfd5d3..9d20cd9743ef 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -235,13 +235,33 @@ void trace_array_put(struct trace_array *this_tr)
235 mutex_unlock(&trace_types_lock); 235 mutex_unlock(&trace_types_lock);
236} 236}
237 237
238int filter_current_check_discard(struct ring_buffer *buffer, 238int filter_check_discard(struct ftrace_event_file *file, void *rec,
239 struct ftrace_event_call *call, void *rec, 239 struct ring_buffer *buffer,
240 struct ring_buffer_event *event) 240 struct ring_buffer_event *event)
241{ 241{
242 return filter_check_discard(call, rec, buffer, event); 242 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
243 !filter_match_preds(file->filter, rec)) {
244 ring_buffer_discard_commit(buffer, event);
245 return 1;
246 }
247
248 return 0;
249}
250EXPORT_SYMBOL_GPL(filter_check_discard);
251
252int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
253 struct ring_buffer *buffer,
254 struct ring_buffer_event *event)
255{
256 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
257 !filter_match_preds(call->filter, rec)) {
258 ring_buffer_discard_commit(buffer, event);
259 return 1;
260 }
261
262 return 0;
243} 263}
244EXPORT_SYMBOL_GPL(filter_current_check_discard); 264EXPORT_SYMBOL_GPL(call_filter_check_discard);
245 265
246cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu) 266cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
247{ 267{
@@ -843,9 +863,12 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
843 if (isspace(ch)) { 863 if (isspace(ch)) {
844 parser->buffer[parser->idx] = 0; 864 parser->buffer[parser->idx] = 0;
845 parser->cont = false; 865 parser->cont = false;
846 } else { 866 } else if (parser->idx < parser->size - 1) {
847 parser->cont = true; 867 parser->cont = true;
848 parser->buffer[parser->idx++] = ch; 868 parser->buffer[parser->idx++] = ch;
869 } else {
870 ret = -EINVAL;
871 goto out;
849 } 872 }
850 873
851 *ppos += read; 874 *ppos += read;
@@ -1261,21 +1284,6 @@ int is_tracing_stopped(void)
1261} 1284}
1262 1285
1263/** 1286/**
1264 * ftrace_off_permanent - disable all ftrace code permanently
1265 *
1266 * This should only be called when a serious anomally has
1267 * been detected. This will turn off the function tracing,
1268 * ring buffers, and other tracing utilites. It takes no
1269 * locks and can be called from any context.
1270 */
1271void ftrace_off_permanent(void)
1272{
1273 tracing_disabled = 1;
1274 ftrace_stop();
1275 tracing_off_permanent();
1276}
1277
1278/**
1279 * tracing_start - quick start of the tracer 1287 * tracing_start - quick start of the tracer
1280 * 1288 *
1281 * If tracing is enabled but was stopped by tracing_stop, 1289 * If tracing is enabled but was stopped by tracing_stop,
@@ -1631,7 +1639,7 @@ trace_function(struct trace_array *tr,
1631 entry->ip = ip; 1639 entry->ip = ip;
1632 entry->parent_ip = parent_ip; 1640 entry->parent_ip = parent_ip;
1633 1641
1634 if (!filter_check_discard(call, entry, buffer, event)) 1642 if (!call_filter_check_discard(call, entry, buffer, event))
1635 __buffer_unlock_commit(buffer, event); 1643 __buffer_unlock_commit(buffer, event);
1636} 1644}
1637 1645
@@ -1715,7 +1723,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
1715 1723
1716 entry->size = trace.nr_entries; 1724 entry->size = trace.nr_entries;
1717 1725
1718 if (!filter_check_discard(call, entry, buffer, event)) 1726 if (!call_filter_check_discard(call, entry, buffer, event))
1719 __buffer_unlock_commit(buffer, event); 1727 __buffer_unlock_commit(buffer, event);
1720 1728
1721 out: 1729 out:
@@ -1817,7 +1825,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1817 trace.entries = entry->caller; 1825 trace.entries = entry->caller;
1818 1826
1819 save_stack_trace_user(&trace); 1827 save_stack_trace_user(&trace);
1820 if (!filter_check_discard(call, entry, buffer, event)) 1828 if (!call_filter_check_discard(call, entry, buffer, event))
1821 __buffer_unlock_commit(buffer, event); 1829 __buffer_unlock_commit(buffer, event);
1822 1830
1823 out_drop_count: 1831 out_drop_count:
@@ -2009,7 +2017,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2009 entry->fmt = fmt; 2017 entry->fmt = fmt;
2010 2018
2011 memcpy(entry->buf, tbuffer, sizeof(u32) * len); 2019 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2012 if (!filter_check_discard(call, entry, buffer, event)) { 2020 if (!call_filter_check_discard(call, entry, buffer, event)) {
2013 __buffer_unlock_commit(buffer, event); 2021 __buffer_unlock_commit(buffer, event);
2014 ftrace_trace_stack(buffer, flags, 6, pc); 2022 ftrace_trace_stack(buffer, flags, 6, pc);
2015 } 2023 }
@@ -2064,7 +2072,7 @@ __trace_array_vprintk(struct ring_buffer *buffer,
2064 2072
2065 memcpy(&entry->buf, tbuffer, len); 2073 memcpy(&entry->buf, tbuffer, len);
2066 entry->buf[len] = '\0'; 2074 entry->buf[len] = '\0';
2067 if (!filter_check_discard(call, entry, buffer, event)) { 2075 if (!call_filter_check_discard(call, entry, buffer, event)) {
2068 __buffer_unlock_commit(buffer, event); 2076 __buffer_unlock_commit(buffer, event);
2069 ftrace_trace_stack(buffer, flags, 6, pc); 2077 ftrace_trace_stack(buffer, flags, 6, pc);
2070 } 2078 }
@@ -2761,7 +2769,7 @@ static void show_snapshot_main_help(struct seq_file *m)
2761 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"); 2769 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2762 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"); 2770 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2763 seq_printf(m, "# Takes a snapshot of the main buffer.\n"); 2771 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
2764 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n"); 2772 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
2765 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n"); 2773 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2766 seq_printf(m, "# is not a '0' or '1')\n"); 2774 seq_printf(m, "# is not a '0' or '1')\n");
2767} 2775}
@@ -2965,6 +2973,11 @@ int tracing_open_generic(struct inode *inode, struct file *filp)
2965 return 0; 2973 return 0;
2966} 2974}
2967 2975
2976bool tracing_is_disabled(void)
2977{
2978 return (tracing_disabled) ? true: false;
2979}
2980
2968/* 2981/*
2969 * Open and update trace_array ref count. 2982 * Open and update trace_array ref count.
2970 * Must have the current trace_array passed to it. 2983 * Must have the current trace_array passed to it.
@@ -5455,12 +5468,12 @@ static struct ftrace_func_command ftrace_snapshot_cmd = {
5455 .func = ftrace_trace_snapshot_callback, 5468 .func = ftrace_trace_snapshot_callback,
5456}; 5469};
5457 5470
5458static int register_snapshot_cmd(void) 5471static __init int register_snapshot_cmd(void)
5459{ 5472{
5460 return register_ftrace_command(&ftrace_snapshot_cmd); 5473 return register_ftrace_command(&ftrace_snapshot_cmd);
5461} 5474}
5462#else 5475#else
5463static inline int register_snapshot_cmd(void) { return 0; } 5476static inline __init int register_snapshot_cmd(void) { return 0; }
5464#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */ 5477#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
5465 5478
5466struct dentry *tracing_init_dentry_tr(struct trace_array *tr) 5479struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
@@ -6254,6 +6267,17 @@ void trace_init_global_iter(struct trace_iterator *iter)
6254 iter->trace = iter->tr->current_trace; 6267 iter->trace = iter->tr->current_trace;
6255 iter->cpu_file = RING_BUFFER_ALL_CPUS; 6268 iter->cpu_file = RING_BUFFER_ALL_CPUS;
6256 iter->trace_buffer = &global_trace.trace_buffer; 6269 iter->trace_buffer = &global_trace.trace_buffer;
6270
6271 if (iter->trace && iter->trace->open)
6272 iter->trace->open(iter);
6273
6274 /* Annotate start of buffers if we had overruns */
6275 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6276 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6277
6278 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6279 if (trace_clocks[iter->tr->clock_id].in_ns)
6280 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6257} 6281}
6258 6282
6259void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) 6283void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)