aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/trace/function-graph-fold.vim42
-rw-r--r--include/linux/ftrace_event.h10
-rw-r--r--include/linux/tracepoint.h12
-rw-r--r--include/trace/define_trace.h1
-rw-r--r--include/trace/events/sched.h12
-rw-r--r--include/trace/ftrace.h31
-rw-r--r--kernel/trace/trace.c92
-rw-r--r--kernel/trace/trace.h9
-rw-r--r--kernel/trace/trace_events.c11
-rw-r--r--kernel/trace/trace_events_filter.c47
-rw-r--r--kernel/trace/trace_export.c8
-rw-r--r--kernel/trace/trace_syscalls.c6
12 files changed, 226 insertions, 55 deletions
diff --git a/Documentation/trace/function-graph-fold.vim b/Documentation/trace/function-graph-fold.vim
new file mode 100644
index 000000000000..0544b504c8b0
--- /dev/null
+++ b/Documentation/trace/function-graph-fold.vim
@@ -0,0 +1,42 @@
1" Enable folding for ftrace function_graph traces.
2"
3" To use, :source this file while viewing a function_graph trace, or use vim's
4" -S option to load from the command-line together with a trace. You can then
5" use the usual vim fold commands, such as "za", to open and close nested
6" functions. While closed, a fold will show the total time taken for a call,
7" as would normally appear on the line with the closing brace. Folded
8" functions will not include finish_task_switch(), so folding should remain
9" relatively sane even through a context switch.
10"
11" Note that this will almost certainly only work well with a
12" single-CPU trace (e.g. trace-cmd report --cpu 1).
13
14function! FunctionGraphFoldExpr(lnum)
15 let line = getline(a:lnum)
16 if line[-1:] == '{'
17 if line =~ 'finish_task_switch() {$'
18 return '>1'
19 endif
20 return 'a1'
21 elseif line[-1:] == '}'
22 return 's1'
23 else
24 return '='
25 endif
26endfunction
27
28function! FunctionGraphFoldText()
29 let s = split(getline(v:foldstart), '|', 1)
30 if getline(v:foldend+1) =~ 'finish_task_switch() {$'
31 let s[2] = ' task switch '
32 else
33 let e = split(getline(v:foldend), '|', 1)
34 let s[2] = e[2]
35 endif
36 return join(s, '|')
37endfunction
38
39setlocal foldexpr=FunctionGraphFoldExpr(v:lnum)
40setlocal foldtext=FunctionGraphFoldText()
41setlocal foldcolumn=12
42setlocal foldmethod=expr
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index df5b085c4150..ace2da9e0a0d 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -140,9 +140,17 @@ extern int filter_current_check_discard(struct ftrace_event_call *call,
140 void *rec, 140 void *rec,
141 struct ring_buffer_event *event); 141 struct ring_buffer_event *event);
142 142
143enum {
144 FILTER_OTHER = 0,
145 FILTER_STATIC_STRING,
146 FILTER_DYN_STRING,
147 FILTER_PTR_STRING,
148};
149
143extern int trace_define_field(struct ftrace_event_call *call, 150extern int trace_define_field(struct ftrace_event_call *call,
144 const char *type, const char *name, 151 const char *type, const char *name,
145 int offset, int size, int is_signed); 152 int offset, int size, int is_signed,
153 int filter_type);
146extern int trace_define_common_fields(struct ftrace_event_call *call); 154extern int trace_define_common_fields(struct ftrace_event_call *call);
147 155
148#define is_signed_type(type) (((type)(-1)) < 0) 156#define is_signed_type(type) (((type)(-1)) < 0)
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 846a4ae501eb..63a3f7a80580 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -165,6 +165,15 @@ static inline void tracepoint_synchronize_unregister(void)
165 165
166#define PARAMS(args...) args 166#define PARAMS(args...) args
167 167
168#endif /* _LINUX_TRACEPOINT_H */
169
170/*
171 * Note: we keep the TRACE_EVENT outside the include file ifdef protection.
172 * This is due to the way trace events work. If a file includes two
173 * trace event headers under one "CREATE_TRACE_POINTS" the first include
174 * will override the TRACE_EVENT and break the second include.
175 */
176
168#ifndef TRACE_EVENT 177#ifndef TRACE_EVENT
169/* 178/*
170 * For use with the TRACE_EVENT macro: 179 * For use with the TRACE_EVENT macro:
@@ -276,6 +285,5 @@ static inline void tracepoint_synchronize_unregister(void)
276#define TRACE_EVENT_FN(name, proto, args, struct, \ 285#define TRACE_EVENT_FN(name, proto, args, struct, \
277 assign, print, reg, unreg) \ 286 assign, print, reg, unreg) \
278 DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) 287 DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
279#endif
280 288
281#endif 289#endif /* ifdef TRACE_EVENT (see note above) */
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index 2a969850736d..a89ed590597a 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -61,6 +61,7 @@
61#include <trace/ftrace.h> 61#include <trace/ftrace.h>
62#endif 62#endif
63 63
64#undef TRACE_EVENT
64#undef TRACE_HEADER_MULTI_READ 65#undef TRACE_HEADER_MULTI_READ
65 66
66/* Only undef what we defined in this file */ 67/* Only undef what we defined in this file */
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 8949bb7eb082..a581ef211ff5 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -94,6 +94,7 @@ TRACE_EVENT(sched_wakeup,
94 __field( pid_t, pid ) 94 __field( pid_t, pid )
95 __field( int, prio ) 95 __field( int, prio )
96 __field( int, success ) 96 __field( int, success )
97 __field( int, cpu )
97 ), 98 ),
98 99
99 TP_fast_assign( 100 TP_fast_assign(
@@ -101,11 +102,12 @@ TRACE_EVENT(sched_wakeup,
101 __entry->pid = p->pid; 102 __entry->pid = p->pid;
102 __entry->prio = p->prio; 103 __entry->prio = p->prio;
103 __entry->success = success; 104 __entry->success = success;
105 __entry->cpu = task_cpu(p);
104 ), 106 ),
105 107
106 TP_printk("task %s:%d [%d] success=%d", 108 TP_printk("task %s:%d [%d] success=%d [%03d]",
107 __entry->comm, __entry->pid, __entry->prio, 109 __entry->comm, __entry->pid, __entry->prio,
108 __entry->success) 110 __entry->success, __entry->cpu)
109); 111);
110 112
111/* 113/*
@@ -125,6 +127,7 @@ TRACE_EVENT(sched_wakeup_new,
125 __field( pid_t, pid ) 127 __field( pid_t, pid )
126 __field( int, prio ) 128 __field( int, prio )
127 __field( int, success ) 129 __field( int, success )
130 __field( int, cpu )
128 ), 131 ),
129 132
130 TP_fast_assign( 133 TP_fast_assign(
@@ -132,11 +135,12 @@ TRACE_EVENT(sched_wakeup_new,
132 __entry->pid = p->pid; 135 __entry->pid = p->pid;
133 __entry->prio = p->prio; 136 __entry->prio = p->prio;
134 __entry->success = success; 137 __entry->success = success;
138 __entry->cpu = task_cpu(p);
135 ), 139 ),
136 140
137 TP_printk("task %s:%d [%d] success=%d", 141 TP_printk("task %s:%d [%d] success=%d [%03d]",
138 __entry->comm, __entry->pid, __entry->prio, 142 __entry->comm, __entry->pid, __entry->prio,
139 __entry->success) 143 __entry->success, __entry->cpu)
140); 144);
141 145
142/* 146/*
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 3a0b44bdabf7..360a77ad79e1 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -21,6 +21,9 @@
21#undef __field 21#undef __field
22#define __field(type, item) type item; 22#define __field(type, item) type item;
23 23
24#undef __field_ext
25#define __field_ext(type, item, filter_type) type item;
26
24#undef __array 27#undef __array
25#define __array(type, item, len) type item[len]; 28#define __array(type, item, len) type item[len];
26 29
@@ -71,7 +74,10 @@
71 */ 74 */
72 75
73#undef __field 76#undef __field
74#define __field(type, item); 77#define __field(type, item)
78
79#undef __field_ext
80#define __field_ext(type, item, filter_type)
75 81
76#undef __array 82#undef __array
77#define __array(type, item, len) 83#define __array(type, item, len)
@@ -119,6 +125,9 @@
119 if (!ret) \ 125 if (!ret) \
120 return 0; 126 return 0;
121 127
128#undef __field_ext
129#define __field_ext(type, item, filter_type) __field(type, item)
130
122#undef __array 131#undef __array
123#define __array(type, item, len) \ 132#define __array(type, item, len) \
124 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ 133 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
@@ -274,28 +283,33 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
274 283
275#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 284#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
276 285
277#undef __field 286#undef __field_ext
278#define __field(type, item) \ 287#define __field_ext(type, item, filter_type) \
279 ret = trace_define_field(event_call, #type, #item, \ 288 ret = trace_define_field(event_call, #type, #item, \
280 offsetof(typeof(field), item), \ 289 offsetof(typeof(field), item), \
281 sizeof(field.item), is_signed_type(type)); \ 290 sizeof(field.item), \
291 is_signed_type(type), filter_type); \
282 if (ret) \ 292 if (ret) \
283 return ret; 293 return ret;
284 294
295#undef __field
296#define __field(type, item) __field_ext(type, item, FILTER_OTHER)
297
285#undef __array 298#undef __array
286#define __array(type, item, len) \ 299#define __array(type, item, len) \
287 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ 300 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
288 ret = trace_define_field(event_call, #type "[" #len "]", #item, \ 301 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
289 offsetof(typeof(field), item), \ 302 offsetof(typeof(field), item), \
290 sizeof(field.item), 0); \ 303 sizeof(field.item), 0, FILTER_OTHER); \
291 if (ret) \ 304 if (ret) \
292 return ret; 305 return ret;
293 306
294#undef __dynamic_array 307#undef __dynamic_array
295#define __dynamic_array(type, item, len) \ 308#define __dynamic_array(type, item, len) \
296 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \ 309 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
297 offsetof(typeof(field), __data_loc_##item), \ 310 offsetof(typeof(field), __data_loc_##item), \
298 sizeof(field.__data_loc_##item), 0); 311 sizeof(field.__data_loc_##item), 0, \
312 FILTER_OTHER);
299 313
300#undef __string 314#undef __string
301#define __string(item, src) __dynamic_array(char, item, -1) 315#define __string(item, src) __dynamic_array(char, item, -1)
@@ -329,6 +343,9 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
329#undef __field 343#undef __field
330#define __field(type, item) 344#define __field(type, item)
331 345
346#undef __field_ext
347#define __field_ext(type, item, filter_type)
348
332#undef __array 349#undef __array
333#define __array(type, item, len) 350#define __array(type, item, len)
334 351
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8ac204360a39..63dbc7ff213f 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -323,12 +323,21 @@ static const char *trace_options[] = {
323 "printk-msg-only", 323 "printk-msg-only",
324 "context-info", 324 "context-info",
325 "latency-format", 325 "latency-format",
326 "global-clock",
327 "sleep-time", 326 "sleep-time",
328 "graph-time", 327 "graph-time",
329 NULL 328 NULL
330}; 329};
331 330
331static struct {
332 u64 (*func)(void);
333 const char *name;
334} trace_clocks[] = {
335 { trace_clock_local, "local" },
336 { trace_clock_global, "global" },
337};
338
339int trace_clock_id;
340
332/* 341/*
333 * ftrace_max_lock is used to protect the swapping of buffers 342 * ftrace_max_lock is used to protect the swapping of buffers
334 * when taking a max snapshot. The buffers themselves are 343 * when taking a max snapshot. The buffers themselves are
@@ -2159,22 +2168,6 @@ static void set_tracer_flags(unsigned int mask, int enabled)
2159 trace_flags |= mask; 2168 trace_flags |= mask;
2160 else 2169 else
2161 trace_flags &= ~mask; 2170 trace_flags &= ~mask;
2162
2163 if (mask == TRACE_ITER_GLOBAL_CLK) {
2164 u64 (*func)(void);
2165
2166 if (enabled)
2167 func = trace_clock_global;
2168 else
2169 func = trace_clock_local;
2170
2171 mutex_lock(&trace_types_lock);
2172 ring_buffer_set_clock(global_trace.buffer, func);
2173
2174 if (max_tr.buffer)
2175 ring_buffer_set_clock(max_tr.buffer, func);
2176 mutex_unlock(&trace_types_lock);
2177 }
2178} 2171}
2179 2172
2180static ssize_t 2173static ssize_t
@@ -3142,6 +3135,62 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3142 return cnt; 3135 return cnt;
3143} 3136}
3144 3137
3138static ssize_t tracing_clock_read(struct file *filp, char __user *ubuf,
3139 size_t cnt, loff_t *ppos)
3140{
3141 char buf[64];
3142 int bufiter = 0;
3143 int i;
3144
3145 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
3146 bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter,
3147 "%s%s%s%s", i ? " " : "",
3148 i == trace_clock_id ? "[" : "", trace_clocks[i].name,
3149 i == trace_clock_id ? "]" : "");
3150 bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, "\n");
3151
3152 return simple_read_from_buffer(ubuf, cnt, ppos, buf, bufiter);
3153}
3154
3155static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
3156 size_t cnt, loff_t *fpos)
3157{
3158 char buf[64];
3159 const char *clockstr;
3160 int i;
3161
3162 if (cnt >= sizeof(buf))
3163 return -EINVAL;
3164
3165 if (copy_from_user(&buf, ubuf, cnt))
3166 return -EFAULT;
3167
3168 buf[cnt] = 0;
3169
3170 clockstr = strstrip(buf);
3171
3172 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
3173 if (strcmp(trace_clocks[i].name, clockstr) == 0)
3174 break;
3175 }
3176 if (i == ARRAY_SIZE(trace_clocks))
3177 return -EINVAL;
3178
3179 trace_clock_id = i;
3180
3181 mutex_lock(&trace_types_lock);
3182
3183 ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func);
3184 if (max_tr.buffer)
3185 ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func);
3186
3187 mutex_unlock(&trace_types_lock);
3188
3189 *fpos += cnt;
3190
3191 return cnt;
3192}
3193
3145static const struct file_operations tracing_max_lat_fops = { 3194static const struct file_operations tracing_max_lat_fops = {
3146 .open = tracing_open_generic, 3195 .open = tracing_open_generic,
3147 .read = tracing_max_lat_read, 3196 .read = tracing_max_lat_read,
@@ -3179,6 +3228,12 @@ static const struct file_operations tracing_mark_fops = {
3179 .write = tracing_mark_write, 3228 .write = tracing_mark_write,
3180}; 3229};
3181 3230
3231static const struct file_operations trace_clock_fops = {
3232 .open = tracing_open_generic,
3233 .read = tracing_clock_read,
3234 .write = tracing_clock_write,
3235};
3236
3182struct ftrace_buffer_info { 3237struct ftrace_buffer_info {
3183 struct trace_array *tr; 3238 struct trace_array *tr;
3184 void *spare; 3239 void *spare;
@@ -3918,6 +3973,9 @@ static __init int tracer_init_debugfs(void)
3918 trace_create_file("saved_cmdlines", 0444, d_tracer, 3973 trace_create_file("saved_cmdlines", 0444, d_tracer,
3919 NULL, &tracing_saved_cmdlines_fops); 3974 NULL, &tracing_saved_cmdlines_fops);
3920 3975
3976 trace_create_file("trace_clock", 0644, d_tracer, NULL,
3977 &trace_clock_fops);
3978
3921#ifdef CONFIG_DYNAMIC_FTRACE 3979#ifdef CONFIG_DYNAMIC_FTRACE
3922 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, 3980 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
3923 &ftrace_update_tot_cnt, &tracing_dyn_info_fops); 3981 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 300ef788c976..654fd657bd03 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -568,6 +568,8 @@ trace_vprintk(unsigned long ip, const char *fmt, va_list args);
568 568
569extern unsigned long trace_flags; 569extern unsigned long trace_flags;
570 570
571extern int trace_clock_id;
572
571/* Standard output formatting function used for function return traces */ 573/* Standard output formatting function used for function return traces */
572#ifdef CONFIG_FUNCTION_GRAPH_TRACER 574#ifdef CONFIG_FUNCTION_GRAPH_TRACER
573extern enum print_line_t print_graph_function(struct trace_iterator *iter); 575extern enum print_line_t print_graph_function(struct trace_iterator *iter);
@@ -656,9 +658,8 @@ enum trace_iterator_flags {
656 TRACE_ITER_PRINTK_MSGONLY = 0x10000, 658 TRACE_ITER_PRINTK_MSGONLY = 0x10000,
657 TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */ 659 TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */
658 TRACE_ITER_LATENCY_FMT = 0x40000, 660 TRACE_ITER_LATENCY_FMT = 0x40000,
659 TRACE_ITER_GLOBAL_CLK = 0x80000, 661 TRACE_ITER_SLEEP_TIME = 0x80000,
660 TRACE_ITER_SLEEP_TIME = 0x100000, 662 TRACE_ITER_GRAPH_TIME = 0x100000,
661 TRACE_ITER_GRAPH_TIME = 0x200000,
662}; 663};
663 664
664/* 665/*
@@ -755,6 +756,7 @@ struct ftrace_event_field {
755 struct list_head link; 756 struct list_head link;
756 char *name; 757 char *name;
757 char *type; 758 char *type;
759 int filter_type;
758 int offset; 760 int offset;
759 int size; 761 int size;
760 int is_signed; 762 int is_signed;
@@ -800,6 +802,7 @@ extern int apply_subsystem_event_filter(struct event_subsystem *system,
800 char *filter_string); 802 char *filter_string);
801extern void print_subsystem_event_filter(struct event_subsystem *system, 803extern void print_subsystem_event_filter(struct event_subsystem *system,
802 struct trace_seq *s); 804 struct trace_seq *s);
805extern int filter_assign_type(const char *type);
803 806
804static inline int 807static inline int
805filter_check_discard(struct ftrace_event_call *call, void *rec, 808filter_check_discard(struct ftrace_event_call *call, void *rec,
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 79d352027a61..d33bcdeffe69 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -28,7 +28,8 @@ DEFINE_MUTEX(event_mutex);
28LIST_HEAD(ftrace_events); 28LIST_HEAD(ftrace_events);
29 29
30int trace_define_field(struct ftrace_event_call *call, const char *type, 30int trace_define_field(struct ftrace_event_call *call, const char *type,
31 const char *name, int offset, int size, int is_signed) 31 const char *name, int offset, int size, int is_signed,
32 int filter_type)
32{ 33{
33 struct ftrace_event_field *field; 34 struct ftrace_event_field *field;
34 35
@@ -44,9 +45,15 @@ int trace_define_field(struct ftrace_event_call *call, const char *type,
44 if (!field->type) 45 if (!field->type)
45 goto err; 46 goto err;
46 47
48 if (filter_type == FILTER_OTHER)
49 field->filter_type = filter_assign_type(type);
50 else
51 field->filter_type = filter_type;
52
47 field->offset = offset; 53 field->offset = offset;
48 field->size = size; 54 field->size = size;
49 field->is_signed = is_signed; 55 field->is_signed = is_signed;
56
50 list_add(&field->link, &call->fields); 57 list_add(&field->link, &call->fields);
51 58
52 return 0; 59 return 0;
@@ -66,7 +73,7 @@ EXPORT_SYMBOL_GPL(trace_define_field);
66 ret = trace_define_field(call, #type, "common_" #item, \ 73 ret = trace_define_field(call, #type, "common_" #item, \
67 offsetof(typeof(ent), item), \ 74 offsetof(typeof(ent), item), \
68 sizeof(ent.item), \ 75 sizeof(ent.item), \
69 is_signed_type(type)); \ 76 is_signed_type(type), FILTER_OTHER); \
70 if (ret) \ 77 if (ret) \
71 return ret; 78 return ret;
72 79
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 490337abed75..9f03082c81d8 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -163,6 +163,20 @@ static int filter_pred_string(struct filter_pred *pred, void *event,
163 return match; 163 return match;
164} 164}
165 165
166/* Filter predicate for char * pointers */
167static int filter_pred_pchar(struct filter_pred *pred, void *event,
168 int val1, int val2)
169{
170 char **addr = (char **)(event + pred->offset);
171 int cmp, match;
172
173 cmp = strncmp(*addr, pred->str_val, pred->str_len);
174
175 match = (!cmp) ^ pred->not;
176
177 return match;
178}
179
166/* 180/*
167 * Filter predicate for dynamic sized arrays of characters. 181 * Filter predicate for dynamic sized arrays of characters.
168 * These are implemented through a list of strings at the end 182 * These are implemented through a list of strings at the end
@@ -475,12 +489,7 @@ static int filter_add_pred_fn(struct filter_parse_state *ps,
475 return 0; 489 return 0;
476} 490}
477 491
478enum { 492int filter_assign_type(const char *type)
479 FILTER_STATIC_STRING = 1,
480 FILTER_DYN_STRING
481};
482
483static int is_string_field(const char *type)
484{ 493{
485 if (strstr(type, "__data_loc") && strstr(type, "char")) 494 if (strstr(type, "__data_loc") && strstr(type, "char"))
486 return FILTER_DYN_STRING; 495 return FILTER_DYN_STRING;
@@ -488,12 +497,19 @@ static int is_string_field(const char *type)
488 if (strchr(type, '[') && strstr(type, "char")) 497 if (strchr(type, '[') && strstr(type, "char"))
489 return FILTER_STATIC_STRING; 498 return FILTER_STATIC_STRING;
490 499
491 return 0; 500 return FILTER_OTHER;
501}
502
503static bool is_string_field(struct ftrace_event_field *field)
504{
505 return field->filter_type == FILTER_DYN_STRING ||
506 field->filter_type == FILTER_STATIC_STRING ||
507 field->filter_type == FILTER_PTR_STRING;
492} 508}
493 509
494static int is_legal_op(struct ftrace_event_field *field, int op) 510static int is_legal_op(struct ftrace_event_field *field, int op)
495{ 511{
496 if (is_string_field(field->type) && (op != OP_EQ && op != OP_NE)) 512 if (is_string_field(field) && (op != OP_EQ && op != OP_NE))
497 return 0; 513 return 0;
498 514
499 return 1; 515 return 1;
@@ -550,7 +566,6 @@ static int filter_add_pred(struct filter_parse_state *ps,
550 struct ftrace_event_field *field; 566 struct ftrace_event_field *field;
551 filter_pred_fn_t fn; 567 filter_pred_fn_t fn;
552 unsigned long long val; 568 unsigned long long val;
553 int string_type;
554 int ret; 569 int ret;
555 570
556 pred->fn = filter_pred_none; 571 pred->fn = filter_pred_none;
@@ -578,13 +593,17 @@ static int filter_add_pred(struct filter_parse_state *ps,
578 return -EINVAL; 593 return -EINVAL;
579 } 594 }
580 595
581 string_type = is_string_field(field->type); 596 if (is_string_field(field)) {
582 if (string_type) { 597 pred->str_len = field->size;
583 if (string_type == FILTER_STATIC_STRING) 598
599 if (field->filter_type == FILTER_STATIC_STRING)
584 fn = filter_pred_string; 600 fn = filter_pred_string;
585 else 601 else if (field->filter_type == FILTER_DYN_STRING)
586 fn = filter_pred_strloc; 602 fn = filter_pred_strloc;
587 pred->str_len = field->size; 603 else {
604 fn = filter_pred_pchar;
605 pred->str_len = strlen(pred->str_val);
606 }
588 } else { 607 } else {
589 if (field->is_signed) 608 if (field->is_signed)
590 ret = strict_strtoll(pred->str_val, 0, &val); 609 ret = strict_strtoll(pred->str_val, 0, &val);
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 70875303ae46..029a91f42287 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -158,7 +158,8 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
158#define TRACE_FIELD(type, item, assign) \ 158#define TRACE_FIELD(type, item, assign) \
159 ret = trace_define_field(event_call, #type, #item, \ 159 ret = trace_define_field(event_call, #type, #item, \
160 offsetof(typeof(field), item), \ 160 offsetof(typeof(field), item), \
161 sizeof(field.item), is_signed_type(type)); \ 161 sizeof(field.item), \
162 is_signed_type(type), FILTER_OTHER); \
162 if (ret) \ 163 if (ret) \
163 return ret; 164 return ret;
164 165
@@ -166,7 +167,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
166#define TRACE_FIELD_SPECIAL(type, item, len, cmd) \ 167#define TRACE_FIELD_SPECIAL(type, item, len, cmd) \
167 ret = trace_define_field(event_call, #type "[" #len "]", #item, \ 168 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
168 offsetof(typeof(field), item), \ 169 offsetof(typeof(field), item), \
169 sizeof(field.item), 0); \ 170 sizeof(field.item), 0, FILTER_OTHER); \
170 if (ret) \ 171 if (ret) \
171 return ret; 172 return ret;
172 173
@@ -174,7 +175,8 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
174#define TRACE_FIELD_SIGN(type, item, assign, is_signed) \ 175#define TRACE_FIELD_SIGN(type, item, assign, is_signed) \
175 ret = trace_define_field(event_call, #type, #item, \ 176 ret = trace_define_field(event_call, #type, #item, \
176 offsetof(typeof(field), item), \ 177 offsetof(typeof(field), item), \
177 sizeof(field.item), is_signed); \ 178 sizeof(field.item), is_signed, \
179 FILTER_OTHER); \
178 if (ret) \ 180 if (ret) \
179 return ret; 181 return ret;
180 182
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 2698fe401ebd..85291c4de406 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -195,7 +195,8 @@ int syscall_enter_define_fields(struct ftrace_event_call *call)
195 for (i = 0; i < meta->nb_args; i++) { 195 for (i = 0; i < meta->nb_args; i++) {
196 ret = trace_define_field(call, meta->types[i], 196 ret = trace_define_field(call, meta->types[i],
197 meta->args[i], offset, 197 meta->args[i], offset,
198 sizeof(unsigned long), 0); 198 sizeof(unsigned long), 0,
199 FILTER_OTHER);
199 offset += sizeof(unsigned long); 200 offset += sizeof(unsigned long);
200 } 201 }
201 202
@@ -211,7 +212,8 @@ int syscall_exit_define_fields(struct ftrace_event_call *call)
211 if (ret) 212 if (ret)
212 return ret; 213 return ret;
213 214
214 ret = trace_define_field(call, SYSCALL_FIELD(unsigned long, ret), 0); 215 ret = trace_define_field(call, SYSCALL_FIELD(unsigned long, ret), 0,
216 FILTER_OTHER);
215 217
216 return ret; 218 return ret;
217} 219}