aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_events.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_events.c')
-rw-r--r--kernel/trace/trace_events.c133
1 files changed, 46 insertions, 87 deletions
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 97e2c4d2e9eb..d128f65778e6 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -21,6 +21,7 @@
21 21
22#include "trace_output.h" 22#include "trace_output.h"
23 23
24#undef TRACE_SYSTEM
24#define TRACE_SYSTEM "TRACE_SYSTEM" 25#define TRACE_SYSTEM "TRACE_SYSTEM"
25 26
26DEFINE_MUTEX(event_mutex); 27DEFINE_MUTEX(event_mutex);
@@ -86,7 +87,7 @@ int trace_define_common_fields(struct ftrace_event_call *call)
86 __common_field(unsigned char, flags); 87 __common_field(unsigned char, flags);
87 __common_field(unsigned char, preempt_count); 88 __common_field(unsigned char, preempt_count);
88 __common_field(int, pid); 89 __common_field(int, pid);
89 __common_field(int, tgid); 90 __common_field(int, lock_depth);
90 91
91 return ret; 92 return ret;
92} 93}
@@ -230,73 +231,38 @@ static ssize_t
230ftrace_event_write(struct file *file, const char __user *ubuf, 231ftrace_event_write(struct file *file, const char __user *ubuf,
231 size_t cnt, loff_t *ppos) 232 size_t cnt, loff_t *ppos)
232{ 233{
233 size_t read = 0; 234 struct trace_parser parser;
234 int i, set = 1; 235 ssize_t read, ret;
235 ssize_t ret;
236 char *buf;
237 char ch;
238 236
239 if (!cnt || cnt < 0) 237 if (!cnt)
240 return 0; 238 return 0;
241 239
242 ret = tracing_update_buffers(); 240 ret = tracing_update_buffers();
243 if (ret < 0) 241 if (ret < 0)
244 return ret; 242 return ret;
245 243
246 ret = get_user(ch, ubuf++); 244 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
247 if (ret)
248 return ret;
249 read++;
250 cnt--;
251
252 /* skip white space */
253 while (cnt && isspace(ch)) {
254 ret = get_user(ch, ubuf++);
255 if (ret)
256 return ret;
257 read++;
258 cnt--;
259 }
260
261 /* Only white space found? */
262 if (isspace(ch)) {
263 file->f_pos += read;
264 ret = read;
265 return ret;
266 }
267
268 buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
269 if (!buf)
270 return -ENOMEM; 245 return -ENOMEM;
271 246
272 if (cnt > EVENT_BUF_SIZE) 247 read = trace_get_user(&parser, ubuf, cnt, ppos);
273 cnt = EVENT_BUF_SIZE;
274 248
275 i = 0; 249 if (read >= 0 && trace_parser_loaded((&parser))) {
276 while (cnt && !isspace(ch)) { 250 int set = 1;
277 if (!i && ch == '!') 251
252 if (*parser.buffer == '!')
278 set = 0; 253 set = 0;
279 else
280 buf[i++] = ch;
281 254
282 ret = get_user(ch, ubuf++); 255 parser.buffer[parser.idx] = 0;
256
257 ret = ftrace_set_clr_event(parser.buffer + !set, set);
283 if (ret) 258 if (ret)
284 goto out_free; 259 goto out_put;
285 read++;
286 cnt--;
287 } 260 }
288 buf[i] = 0;
289
290 file->f_pos += read;
291
292 ret = ftrace_set_clr_event(buf, set);
293 if (ret)
294 goto out_free;
295 261
296 ret = read; 262 ret = read;
297 263
298 out_free: 264 out_put:
299 kfree(buf); 265 trace_parser_put(&parser);
300 266
301 return ret; 267 return ret;
302} 268}
@@ -304,42 +270,32 @@ ftrace_event_write(struct file *file, const char __user *ubuf,
304static void * 270static void *
305t_next(struct seq_file *m, void *v, loff_t *pos) 271t_next(struct seq_file *m, void *v, loff_t *pos)
306{ 272{
307 struct list_head *list = m->private; 273 struct ftrace_event_call *call = v;
308 struct ftrace_event_call *call;
309 274
310 (*pos)++; 275 (*pos)++;
311 276
312 for (;;) { 277 list_for_each_entry_continue(call, &ftrace_events, list) {
313 if (list == &ftrace_events)
314 return NULL;
315
316 call = list_entry(list, struct ftrace_event_call, list);
317
318 /* 278 /*
319 * The ftrace subsystem is for showing formats only. 279 * The ftrace subsystem is for showing formats only.
320 * They can not be enabled or disabled via the event files. 280 * They can not be enabled or disabled via the event files.
321 */ 281 */
322 if (call->regfunc) 282 if (call->regfunc)
323 break; 283 return call;
324
325 list = list->next;
326 } 284 }
327 285
328 m->private = list->next; 286 return NULL;
329
330 return call;
331} 287}
332 288
333static void *t_start(struct seq_file *m, loff_t *pos) 289static void *t_start(struct seq_file *m, loff_t *pos)
334{ 290{
335 struct ftrace_event_call *call = NULL; 291 struct ftrace_event_call *call;
336 loff_t l; 292 loff_t l;
337 293
338 mutex_lock(&event_mutex); 294 mutex_lock(&event_mutex);
339 295
340 m->private = ftrace_events.next; 296 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
341 for (l = 0; l <= *pos; ) { 297 for (l = 0; l <= *pos; ) {
342 call = t_next(m, NULL, &l); 298 call = t_next(m, call, &l);
343 if (!call) 299 if (!call)
344 break; 300 break;
345 } 301 }
@@ -349,37 +305,28 @@ static void *t_start(struct seq_file *m, loff_t *pos)
349static void * 305static void *
350s_next(struct seq_file *m, void *v, loff_t *pos) 306s_next(struct seq_file *m, void *v, loff_t *pos)
351{ 307{
352 struct list_head *list = m->private; 308 struct ftrace_event_call *call = v;
353 struct ftrace_event_call *call;
354 309
355 (*pos)++; 310 (*pos)++;
356 311
357 retry: 312 list_for_each_entry_continue(call, &ftrace_events, list) {
358 if (list == &ftrace_events) 313 if (call->enabled)
359 return NULL; 314 return call;
360
361 call = list_entry(list, struct ftrace_event_call, list);
362
363 if (!call->enabled) {
364 list = list->next;
365 goto retry;
366 } 315 }
367 316
368 m->private = list->next; 317 return NULL;
369
370 return call;
371} 318}
372 319
373static void *s_start(struct seq_file *m, loff_t *pos) 320static void *s_start(struct seq_file *m, loff_t *pos)
374{ 321{
375 struct ftrace_event_call *call = NULL; 322 struct ftrace_event_call *call;
376 loff_t l; 323 loff_t l;
377 324
378 mutex_lock(&event_mutex); 325 mutex_lock(&event_mutex);
379 326
380 m->private = ftrace_events.next; 327 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
381 for (l = 0; l <= *pos; ) { 328 for (l = 0; l <= *pos; ) {
382 call = s_next(m, NULL, &l); 329 call = s_next(m, call, &l);
383 if (!call) 330 if (!call)
384 break; 331 break;
385 } 332 }
@@ -578,7 +525,7 @@ static int trace_write_header(struct trace_seq *s)
578 FIELD(unsigned char, flags), 525 FIELD(unsigned char, flags),
579 FIELD(unsigned char, preempt_count), 526 FIELD(unsigned char, preempt_count),
580 FIELD(int, pid), 527 FIELD(int, pid),
581 FIELD(int, tgid)); 528 FIELD(int, lock_depth));
582} 529}
583 530
584static ssize_t 531static ssize_t
@@ -1187,7 +1134,7 @@ static int trace_module_notify(struct notifier_block *self,
1187} 1134}
1188#endif /* CONFIG_MODULES */ 1135#endif /* CONFIG_MODULES */
1189 1136
1190struct notifier_block trace_module_nb = { 1137static struct notifier_block trace_module_nb = {
1191 .notifier_call = trace_module_notify, 1138 .notifier_call = trace_module_notify,
1192 .priority = 0, 1139 .priority = 0,
1193}; 1140};
@@ -1359,6 +1306,18 @@ static __init void event_trace_self_tests(void)
1359 if (!call->regfunc) 1306 if (!call->regfunc)
1360 continue; 1307 continue;
1361 1308
1309/*
1310 * Testing syscall events here is pretty useless, but
1311 * we still do it if configured. But this is time consuming.
1312 * What we really need is a user thread to perform the
1313 * syscalls as we test.
1314 */
1315#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
1316 if (call->system &&
1317 strcmp(call->system, "syscalls") == 0)
1318 continue;
1319#endif
1320
1362 pr_info("Testing event %s: ", call->name); 1321 pr_info("Testing event %s: ", call->name);
1363 1322
1364 /* 1323 /*