aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorEric Paris <eparis@redhat.com>2014-03-07 11:41:32 -0500
committerEric Paris <eparis@redhat.com>2014-03-07 11:41:32 -0500
commitb7d3622a39fde7658170b7f3cf6c6889bb8db30d (patch)
tree64f4e781ecb2a85d675e234072b988560bcd25f1 /kernel/trace/trace.c
parentf3411cb2b2e396a41ed3a439863f028db7140a34 (diff)
parentd8ec26d7f8287f5788a494f56e8814210f0e64be (diff)
Merge tag 'v3.13' into for-3.15
Linux 3.13 Conflicts: include/net/xfrm.h Simple merge where v3.13 removed 'extern' from definitions and the audit tree did s/u32/unsigned int/ to the same definitions.
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c85
1 files changed, 55 insertions, 30 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 7974ba20557d..9d20cd9743ef 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -235,13 +235,33 @@ void trace_array_put(struct trace_array *this_tr)
235 mutex_unlock(&trace_types_lock); 235 mutex_unlock(&trace_types_lock);
236} 236}
237 237
238int filter_current_check_discard(struct ring_buffer *buffer, 238int filter_check_discard(struct ftrace_event_file *file, void *rec,
239 struct ftrace_event_call *call, void *rec, 239 struct ring_buffer *buffer,
240 struct ring_buffer_event *event) 240 struct ring_buffer_event *event)
241{ 241{
242 return filter_check_discard(call, rec, buffer, event); 242 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
243 !filter_match_preds(file->filter, rec)) {
244 ring_buffer_discard_commit(buffer, event);
245 return 1;
246 }
247
248 return 0;
249}
250EXPORT_SYMBOL_GPL(filter_check_discard);
251
252int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
253 struct ring_buffer *buffer,
254 struct ring_buffer_event *event)
255{
256 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
257 !filter_match_preds(call->filter, rec)) {
258 ring_buffer_discard_commit(buffer, event);
259 return 1;
260 }
261
262 return 0;
243} 263}
244EXPORT_SYMBOL_GPL(filter_current_check_discard); 264EXPORT_SYMBOL_GPL(call_filter_check_discard);
245 265
246cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu) 266cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
247{ 267{
@@ -843,9 +863,12 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
843 if (isspace(ch)) { 863 if (isspace(ch)) {
844 parser->buffer[parser->idx] = 0; 864 parser->buffer[parser->idx] = 0;
845 parser->cont = false; 865 parser->cont = false;
846 } else { 866 } else if (parser->idx < parser->size - 1) {
847 parser->cont = true; 867 parser->cont = true;
848 parser->buffer[parser->idx++] = ch; 868 parser->buffer[parser->idx++] = ch;
869 } else {
870 ret = -EINVAL;
871 goto out;
849 } 872 }
850 873
851 *ppos += read; 874 *ppos += read;
@@ -1261,21 +1284,6 @@ int is_tracing_stopped(void)
1261} 1284}
1262 1285
1263/** 1286/**
1264 * ftrace_off_permanent - disable all ftrace code permanently
1265 *
1266 * This should only be called when a serious anomally has
1267 * been detected. This will turn off the function tracing,
1268 * ring buffers, and other tracing utilites. It takes no
1269 * locks and can be called from any context.
1270 */
1271void ftrace_off_permanent(void)
1272{
1273 tracing_disabled = 1;
1274 ftrace_stop();
1275 tracing_off_permanent();
1276}
1277
1278/**
1279 * tracing_start - quick start of the tracer 1287 * tracing_start - quick start of the tracer
1280 * 1288 *
1281 * If tracing is enabled but was stopped by tracing_stop, 1289 * If tracing is enabled but was stopped by tracing_stop,
@@ -1509,7 +1517,8 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1509#endif 1517#endif
1510 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | 1518 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1511 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | 1519 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1512 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); 1520 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1521 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
1513} 1522}
1514EXPORT_SYMBOL_GPL(tracing_generic_entry_update); 1523EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1515 1524
@@ -1630,7 +1639,7 @@ trace_function(struct trace_array *tr,
1630 entry->ip = ip; 1639 entry->ip = ip;
1631 entry->parent_ip = parent_ip; 1640 entry->parent_ip = parent_ip;
1632 1641
1633 if (!filter_check_discard(call, entry, buffer, event)) 1642 if (!call_filter_check_discard(call, entry, buffer, event))
1634 __buffer_unlock_commit(buffer, event); 1643 __buffer_unlock_commit(buffer, event);
1635} 1644}
1636 1645
@@ -1714,7 +1723,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
1714 1723
1715 entry->size = trace.nr_entries; 1724 entry->size = trace.nr_entries;
1716 1725
1717 if (!filter_check_discard(call, entry, buffer, event)) 1726 if (!call_filter_check_discard(call, entry, buffer, event))
1718 __buffer_unlock_commit(buffer, event); 1727 __buffer_unlock_commit(buffer, event);
1719 1728
1720 out: 1729 out:
@@ -1816,7 +1825,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1816 trace.entries = entry->caller; 1825 trace.entries = entry->caller;
1817 1826
1818 save_stack_trace_user(&trace); 1827 save_stack_trace_user(&trace);
1819 if (!filter_check_discard(call, entry, buffer, event)) 1828 if (!call_filter_check_discard(call, entry, buffer, event))
1820 __buffer_unlock_commit(buffer, event); 1829 __buffer_unlock_commit(buffer, event);
1821 1830
1822 out_drop_count: 1831 out_drop_count:
@@ -2008,7 +2017,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2008 entry->fmt = fmt; 2017 entry->fmt = fmt;
2009 2018
2010 memcpy(entry->buf, tbuffer, sizeof(u32) * len); 2019 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2011 if (!filter_check_discard(call, entry, buffer, event)) { 2020 if (!call_filter_check_discard(call, entry, buffer, event)) {
2012 __buffer_unlock_commit(buffer, event); 2021 __buffer_unlock_commit(buffer, event);
2013 ftrace_trace_stack(buffer, flags, 6, pc); 2022 ftrace_trace_stack(buffer, flags, 6, pc);
2014 } 2023 }
@@ -2063,7 +2072,7 @@ __trace_array_vprintk(struct ring_buffer *buffer,
2063 2072
2064 memcpy(&entry->buf, tbuffer, len); 2073 memcpy(&entry->buf, tbuffer, len);
2065 entry->buf[len] = '\0'; 2074 entry->buf[len] = '\0';
2066 if (!filter_check_discard(call, entry, buffer, event)) { 2075 if (!call_filter_check_discard(call, entry, buffer, event)) {
2067 __buffer_unlock_commit(buffer, event); 2076 __buffer_unlock_commit(buffer, event);
2068 ftrace_trace_stack(buffer, flags, 6, pc); 2077 ftrace_trace_stack(buffer, flags, 6, pc);
2069 } 2078 }
@@ -2760,7 +2769,7 @@ static void show_snapshot_main_help(struct seq_file *m)
2760 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"); 2769 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2761 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"); 2770 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2762 seq_printf(m, "# Takes a snapshot of the main buffer.\n"); 2771 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
2763 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n"); 2772 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
2764 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n"); 2773 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2765 seq_printf(m, "# is not a '0' or '1')\n"); 2774 seq_printf(m, "# is not a '0' or '1')\n");
2766} 2775}
@@ -2964,6 +2973,11 @@ int tracing_open_generic(struct inode *inode, struct file *filp)
2964 return 0; 2973 return 0;
2965} 2974}
2966 2975
2976bool tracing_is_disabled(void)
2977{
2978 return (tracing_disabled) ? true: false;
2979}
2980
2967/* 2981/*
2968 * Open and update trace_array ref count. 2982 * Open and update trace_array ref count.
2969 * Must have the current trace_array passed to it. 2983 * Must have the current trace_array passed to it.
@@ -5454,12 +5468,12 @@ static struct ftrace_func_command ftrace_snapshot_cmd = {
5454 .func = ftrace_trace_snapshot_callback, 5468 .func = ftrace_trace_snapshot_callback,
5455}; 5469};
5456 5470
5457static int register_snapshot_cmd(void) 5471static __init int register_snapshot_cmd(void)
5458{ 5472{
5459 return register_ftrace_command(&ftrace_snapshot_cmd); 5473 return register_ftrace_command(&ftrace_snapshot_cmd);
5460} 5474}
5461#else 5475#else
5462static inline int register_snapshot_cmd(void) { return 0; } 5476static inline __init int register_snapshot_cmd(void) { return 0; }
5463#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */ 5477#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
5464 5478
5465struct dentry *tracing_init_dentry_tr(struct trace_array *tr) 5479struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
@@ -6253,6 +6267,17 @@ void trace_init_global_iter(struct trace_iterator *iter)
6253 iter->trace = iter->tr->current_trace; 6267 iter->trace = iter->tr->current_trace;
6254 iter->cpu_file = RING_BUFFER_ALL_CPUS; 6268 iter->cpu_file = RING_BUFFER_ALL_CPUS;
6255 iter->trace_buffer = &global_trace.trace_buffer; 6269 iter->trace_buffer = &global_trace.trace_buffer;
6270
6271 if (iter->trace && iter->trace->open)
6272 iter->trace->open(iter);
6273
6274 /* Annotate start of buffers if we had overruns */
6275 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6276 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6277
6278 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6279 if (trace_clocks[iter->tr->clock_id].in_ns)
6280 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6256} 6281}
6257 6282
6258void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) 6283void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)