aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorTom Zanussi <tom.zanussi@linux.intel.com>2013-10-24 09:34:17 -0400
committerSteven Rostedt <rostedt@goodmis.org>2013-11-05 16:50:20 -0500
commitf306cc82a93d6b19f01634b80c580b9755c8b7cc (patch)
tree1a9ef8d44ed192185e4d6da5f7154f9a1c2075cb /kernel/trace/trace.c
parentf02b625d0341519238ab3d9cc8706ff4bd45fb89 (diff)
tracing: Update event filters for multibuffer
The trace event filters are still tied to event calls rather than event files, which means you don't get what you'd expect when using filters in the multibuffer case: Before: # echo 'bytes_alloc > 8192' > /sys/kernel/debug/tracing/events/kmem/kmalloc/filter # cat /sys/kernel/debug/tracing/events/kmem/kmalloc/filter bytes_alloc > 8192 # mkdir /sys/kernel/debug/tracing/instances/test1 # echo 'bytes_alloc > 2048' > /sys/kernel/debug/tracing/instances/test1/events/kmem/kmalloc/filter # cat /sys/kernel/debug/tracing/events/kmem/kmalloc/filter bytes_alloc > 2048 # cat /sys/kernel/debug/tracing/instances/test1/events/kmem/kmalloc/filter bytes_alloc > 2048 Setting the filter in tracing/instances/test1/events shouldn't affect the same event in tracing/events as it does above. After: # echo 'bytes_alloc > 8192' > /sys/kernel/debug/tracing/events/kmem/kmalloc/filter # cat /sys/kernel/debug/tracing/events/kmem/kmalloc/filter bytes_alloc > 8192 # mkdir /sys/kernel/debug/tracing/instances/test1 # echo 'bytes_alloc > 2048' > /sys/kernel/debug/tracing/instances/test1/events/kmem/kmalloc/filter # cat /sys/kernel/debug/tracing/events/kmem/kmalloc/filter bytes_alloc > 8192 # cat /sys/kernel/debug/tracing/instances/test1/events/kmem/kmalloc/filter bytes_alloc > 2048 We'd like to just move the filter directly from ftrace_event_call to ftrace_event_file, but there are a couple cases that don't yet have multibuffer support and therefore have to continue using the current event_call-based filters. For those cases, a new USE_CALL_FILTER bit is added to the event_call flags, whose main purpose is to keep the old behavior for those cases until they can be updated with multibuffer support; at that point, the USE_CALL_FILTER flag (and the new associated call_filter_check_discard() function) can go away. The multibuffer support also made filter_current_check_discard() redundant, so this change removes that function as well and replaces it with filter_check_discard() (or call_filter_check_discard() as appropriate). Link: http://lkml.kernel.org/r/f16e9ce4270c62f46b2e966119225e1c3cca7e60.1382620672.git.tom.zanussi@linux.intel.com Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c40
1 files changed, 30 insertions, 10 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 063a92bad578..489da8b19f30 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -235,13 +235,33 @@ void trace_array_put(struct trace_array *this_tr)
235 mutex_unlock(&trace_types_lock); 235 mutex_unlock(&trace_types_lock);
236} 236}
237 237
238int filter_current_check_discard(struct ring_buffer *buffer, 238int filter_check_discard(struct ftrace_event_file *file, void *rec,
239 struct ftrace_event_call *call, void *rec, 239 struct ring_buffer *buffer,
240 struct ring_buffer_event *event) 240 struct ring_buffer_event *event)
241{ 241{
242 return filter_check_discard(call, rec, buffer, event); 242 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
243 !filter_match_preds(file->filter, rec)) {
244 ring_buffer_discard_commit(buffer, event);
245 return 1;
246 }
247
248 return 0;
249}
250EXPORT_SYMBOL_GPL(filter_check_discard);
251
252int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
253 struct ring_buffer *buffer,
254 struct ring_buffer_event *event)
255{
256 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
257 !filter_match_preds(call->filter, rec)) {
258 ring_buffer_discard_commit(buffer, event);
259 return 1;
260 }
261
262 return 0;
243} 263}
244EXPORT_SYMBOL_GPL(filter_current_check_discard); 264EXPORT_SYMBOL_GPL(call_filter_check_discard);
245 265
246cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu) 266cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
247{ 267{
@@ -1633,7 +1653,7 @@ trace_function(struct trace_array *tr,
1633 entry->ip = ip; 1653 entry->ip = ip;
1634 entry->parent_ip = parent_ip; 1654 entry->parent_ip = parent_ip;
1635 1655
1636 if (!filter_check_discard(call, entry, buffer, event)) 1656 if (!call_filter_check_discard(call, entry, buffer, event))
1637 __buffer_unlock_commit(buffer, event); 1657 __buffer_unlock_commit(buffer, event);
1638} 1658}
1639 1659
@@ -1717,7 +1737,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
1717 1737
1718 entry->size = trace.nr_entries; 1738 entry->size = trace.nr_entries;
1719 1739
1720 if (!filter_check_discard(call, entry, buffer, event)) 1740 if (!call_filter_check_discard(call, entry, buffer, event))
1721 __buffer_unlock_commit(buffer, event); 1741 __buffer_unlock_commit(buffer, event);
1722 1742
1723 out: 1743 out:
@@ -1819,7 +1839,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1819 trace.entries = entry->caller; 1839 trace.entries = entry->caller;
1820 1840
1821 save_stack_trace_user(&trace); 1841 save_stack_trace_user(&trace);
1822 if (!filter_check_discard(call, entry, buffer, event)) 1842 if (!call_filter_check_discard(call, entry, buffer, event))
1823 __buffer_unlock_commit(buffer, event); 1843 __buffer_unlock_commit(buffer, event);
1824 1844
1825 out_drop_count: 1845 out_drop_count:
@@ -2011,7 +2031,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2011 entry->fmt = fmt; 2031 entry->fmt = fmt;
2012 2032
2013 memcpy(entry->buf, tbuffer, sizeof(u32) * len); 2033 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2014 if (!filter_check_discard(call, entry, buffer, event)) { 2034 if (!call_filter_check_discard(call, entry, buffer, event)) {
2015 __buffer_unlock_commit(buffer, event); 2035 __buffer_unlock_commit(buffer, event);
2016 ftrace_trace_stack(buffer, flags, 6, pc); 2036 ftrace_trace_stack(buffer, flags, 6, pc);
2017 } 2037 }
@@ -2066,7 +2086,7 @@ __trace_array_vprintk(struct ring_buffer *buffer,
2066 2086
2067 memcpy(&entry->buf, tbuffer, len); 2087 memcpy(&entry->buf, tbuffer, len);
2068 entry->buf[len] = '\0'; 2088 entry->buf[len] = '\0';
2069 if (!filter_check_discard(call, entry, buffer, event)) { 2089 if (!call_filter_check_discard(call, entry, buffer, event)) {
2070 __buffer_unlock_commit(buffer, event); 2090 __buffer_unlock_commit(buffer, event);
2071 ftrace_trace_stack(buffer, flags, 6, pc); 2091 ftrace_trace_stack(buffer, flags, 6, pc);
2072 } 2092 }