aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2008-12-03 15:36:57 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-04 03:09:34 -0500
commitea4e2bc4d9f7370e57a343ccb5e7c0ad3222ec3c (patch)
tree64a4a1d9d7d3de0695cb2e8c7161886ab660e311 /kernel/trace
parentb29144c317fb748dae6d72c0f88eda9d43165b8d (diff)
ftrace: graph of a single function
This patch adds the file: /debugfs/tracing/set_graph_function which can be used along with the function graph tracer. When this file is empty, the function graph tracer will act as usual. When the file has a function in it, the function graph tracer will only trace that function. For example: # echo blk_unplug > /debugfs/tracing/set_graph_function # cat /debugfs/tracing/trace [...] ------------------------------------------ | 2) make-19003 => kjournald-2219 ------------------------------------------ 2) | blk_unplug() { 2) | dm_unplug_all() { 2) | dm_get_table() { 2) 1.381 us | _read_lock(); 2) 0.911 us | dm_table_get(); 2) 1. 76 us | _read_unlock(); 2) + 12.912 us | } 2) | dm_table_unplug_all() { 2) | blk_unplug() { 2) 0.778 us | generic_unplug_device(); 2) 2.409 us | } 2) 5.992 us | } 2) 0.813 us | dm_table_put(); 2) + 29. 90 us | } 2) + 34.532 us | } You can add up to 32 functions into this file. Currently we limit it to 32, but this may change with later improvements. To add another function, use the append '>>': # echo sys_read >> /debugfs/tracing/set_graph_function # cat /debugfs/tracing/set_graph_function blk_unplug sys_read Using the '>' will clear out the function and write anew: # echo sys_write > /debug/tracing/set_graph_function # cat /debug/tracing/set_graph_function sys_write Note, if you have function graph running while doing this, the small time between clearing it and updating it will cause the graph to record all functions. This should not be an issue because after it sets the filter, only those functions will be recorded from then on. If you need to only record a particular function then set this file first before starting the function graph tracer. In the future this side effect may be corrected. The set_graph_function file is similar to the set_ftrace_filter but it does not take wild cards nor does it allow for more than one function to be set with a single write. There is no technical reason why this is the case, I just do not have the time yet to implement that. Note, dynamic ftrace must be enabled for this to appear because it uses the dynamic ftrace records to match the name to the mcount call sites. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c227
-rw-r--r--kernel/trace/trace.c8
-rw-r--r--kernel/trace/trace.h30
3 files changed, 264 insertions, 1 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 65b9e863056b..b17a30350f06 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1320,6 +1320,224 @@ static struct file_operations ftrace_notrace_fops = {
1320 .release = ftrace_notrace_release, 1320 .release = ftrace_notrace_release,
1321}; 1321};
1322 1322
1323#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1324
1325static DEFINE_MUTEX(graph_lock);
1326
1327int ftrace_graph_count;
1328unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
1329
1330static void *
1331g_next(struct seq_file *m, void *v, loff_t *pos)
1332{
1333 unsigned long *array = m->private;
1334 int index = *pos;
1335
1336 (*pos)++;
1337
1338 if (index >= ftrace_graph_count)
1339 return NULL;
1340
1341 return &array[index];
1342}
1343
1344static void *g_start(struct seq_file *m, loff_t *pos)
1345{
1346 void *p = NULL;
1347
1348 mutex_lock(&graph_lock);
1349
1350 p = g_next(m, p, pos);
1351
1352 return p;
1353}
1354
1355static void g_stop(struct seq_file *m, void *p)
1356{
1357 mutex_unlock(&graph_lock);
1358}
1359
1360static int g_show(struct seq_file *m, void *v)
1361{
1362 unsigned long *ptr = v;
1363 char str[KSYM_SYMBOL_LEN];
1364
1365 if (!ptr)
1366 return 0;
1367
1368 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1369
1370 seq_printf(m, "%s\n", str);
1371
1372 return 0;
1373}
1374
1375static struct seq_operations ftrace_graph_seq_ops = {
1376 .start = g_start,
1377 .next = g_next,
1378 .stop = g_stop,
1379 .show = g_show,
1380};
1381
1382static int
1383ftrace_graph_open(struct inode *inode, struct file *file)
1384{
1385 int ret = 0;
1386
1387 if (unlikely(ftrace_disabled))
1388 return -ENODEV;
1389
1390 mutex_lock(&graph_lock);
1391 if ((file->f_mode & FMODE_WRITE) &&
1392 !(file->f_flags & O_APPEND)) {
1393 ftrace_graph_count = 0;
1394 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
1395 }
1396
1397 if (file->f_mode & FMODE_READ) {
1398 ret = seq_open(file, &ftrace_graph_seq_ops);
1399 if (!ret) {
1400 struct seq_file *m = file->private_data;
1401 m->private = ftrace_graph_funcs;
1402 }
1403 } else
1404 file->private_data = ftrace_graph_funcs;
1405 mutex_unlock(&graph_lock);
1406
1407 return ret;
1408}
1409
1410static ssize_t
1411ftrace_graph_read(struct file *file, char __user *ubuf,
1412 size_t cnt, loff_t *ppos)
1413{
1414 if (file->f_mode & FMODE_READ)
1415 return seq_read(file, ubuf, cnt, ppos);
1416 else
1417 return -EPERM;
1418}
1419
1420static int
1421ftrace_set_func(unsigned long *array, int idx, char *buffer)
1422{
1423 char str[KSYM_SYMBOL_LEN];
1424 struct dyn_ftrace *rec;
1425 struct ftrace_page *pg;
1426 int found = 0;
1427 int i;
1428
1429 if (ftrace_disabled)
1430 return -ENODEV;
1431
1432 /* should not be called from interrupt context */
1433 spin_lock(&ftrace_lock);
1434
1435 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1436 for (i = 0; i < pg->index; i++) {
1437 rec = &pg->records[i];
1438
1439 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
1440 continue;
1441
1442 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1443 if (strcmp(str, buffer) == 0) {
1444 found = 1;
1445 array[idx] = rec->ip;
1446 break;
1447 }
1448 }
1449 }
1450 spin_unlock(&ftrace_lock);
1451
1452 return found ? 0 : -EINVAL;
1453}
1454
1455static ssize_t
1456ftrace_graph_write(struct file *file, const char __user *ubuf,
1457 size_t cnt, loff_t *ppos)
1458{
1459 unsigned char buffer[FTRACE_BUFF_MAX+1];
1460 unsigned long *array;
1461 size_t read = 0;
1462 ssize_t ret;
1463 int index = 0;
1464 char ch;
1465
1466 if (!cnt || cnt < 0)
1467 return 0;
1468
1469 mutex_lock(&graph_lock);
1470
1471 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
1472 ret = -EBUSY;
1473 goto out;
1474 }
1475
1476 if (file->f_mode & FMODE_READ) {
1477 struct seq_file *m = file->private_data;
1478 array = m->private;
1479 } else
1480 array = file->private_data;
1481
1482 ret = get_user(ch, ubuf++);
1483 if (ret)
1484 goto out;
1485 read++;
1486 cnt--;
1487
1488 /* skip white space */
1489 while (cnt && isspace(ch)) {
1490 ret = get_user(ch, ubuf++);
1491 if (ret)
1492 goto out;
1493 read++;
1494 cnt--;
1495 }
1496
1497 if (isspace(ch)) {
1498 *ppos += read;
1499 ret = read;
1500 goto out;
1501 }
1502
1503 while (cnt && !isspace(ch)) {
1504 if (index < FTRACE_BUFF_MAX)
1505 buffer[index++] = ch;
1506 else {
1507 ret = -EINVAL;
1508 goto out;
1509 }
1510 ret = get_user(ch, ubuf++);
1511 if (ret)
1512 goto out;
1513 read++;
1514 cnt--;
1515 }
1516 buffer[index] = 0;
1517
1518 /* we allow only one at a time */
1519 ret = ftrace_set_func(array, ftrace_graph_count, buffer);
1520 if (ret)
1521 goto out;
1522
1523 ftrace_graph_count++;
1524
1525 file->f_pos += read;
1526
1527 ret = read;
1528 out:
1529 mutex_unlock(&graph_lock);
1530
1531 return ret;
1532}
1533
1534static const struct file_operations ftrace_graph_fops = {
1535 .open = ftrace_graph_open,
1536 .read = ftrace_graph_read,
1537 .write = ftrace_graph_write,
1538};
1539#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1540
1323static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) 1541static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
1324{ 1542{
1325 struct dentry *entry; 1543 struct dentry *entry;
@@ -1347,6 +1565,15 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
1347 pr_warning("Could not create debugfs " 1565 pr_warning("Could not create debugfs "
1348 "'set_ftrace_notrace' entry\n"); 1566 "'set_ftrace_notrace' entry\n");
1349 1567
1568#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1569 entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
1570 NULL,
1571 &ftrace_graph_fops);
1572 if (!entry)
1573 pr_warning("Could not create debugfs "
1574 "'set_graph_function' entry\n");
1575#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1576
1350 return 0; 1577 return 0;
1351} 1578}
1352 1579
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8b6409a62b54..710b39acd81b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1209,6 +1209,9 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
1209 int cpu; 1209 int cpu;
1210 int pc; 1210 int pc;
1211 1211
1212 if (!ftrace_graph_addr(trace->func))
1213 return 0;
1214
1212 local_irq_save(flags); 1215 local_irq_save(flags);
1213 cpu = raw_smp_processor_id(); 1216 cpu = raw_smp_processor_id();
1214 data = tr->data[cpu]; 1217 data = tr->data[cpu];
@@ -1217,6 +1220,9 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
1217 pc = preempt_count(); 1220 pc = preempt_count();
1218 __trace_graph_entry(tr, data, trace, flags, pc); 1221 __trace_graph_entry(tr, data, trace, flags, pc);
1219 } 1222 }
1223 /* Only do the atomic if it is not already set */
1224 if (!test_tsk_trace_graph(current))
1225 set_tsk_trace_graph(current);
1220 atomic_dec(&data->disabled); 1226 atomic_dec(&data->disabled);
1221 local_irq_restore(flags); 1227 local_irq_restore(flags);
1222 1228
@@ -1240,6 +1246,8 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
1240 pc = preempt_count(); 1246 pc = preempt_count();
1241 __trace_graph_return(tr, data, trace, flags, pc); 1247 __trace_graph_return(tr, data, trace, flags, pc);
1242 } 1248 }
1249 if (!trace->depth)
1250 clear_tsk_trace_graph(current);
1243 atomic_dec(&data->disabled); 1251 atomic_dec(&data->disabled);
1244 local_irq_restore(flags); 1252 local_irq_restore(flags);
1245} 1253}
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 0565ae9a2210..41f026bfc9ed 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -505,13 +505,41 @@ extern unsigned long trace_flags;
505/* Standard output formatting function used for function return traces */ 505/* Standard output formatting function used for function return traces */
506#ifdef CONFIG_FUNCTION_GRAPH_TRACER 506#ifdef CONFIG_FUNCTION_GRAPH_TRACER
507extern enum print_line_t print_graph_function(struct trace_iterator *iter); 507extern enum print_line_t print_graph_function(struct trace_iterator *iter);
508
509#ifdef CONFIG_DYNAMIC_FTRACE
510/* TODO: make this variable */
511#define FTRACE_GRAPH_MAX_FUNCS 32
512extern int ftrace_graph_count;
513extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
514
515static inline int ftrace_graph_addr(unsigned long addr)
516{
517 int i;
518
519 if (!ftrace_graph_count || test_tsk_trace_graph(current))
520 return 1;
521
522 for (i = 0; i < ftrace_graph_count; i++) {
523 if (addr == ftrace_graph_funcs[i])
524 return 1;
525 }
526
527 return 0;
528}
508#else 529#else
530static inline int ftrace_trace_addr(unsigned long addr)
531{
532 return 1
533}
534#endif /* CONFIG_DYNAMIC_FTRACE */
535
536#else /* CONFIG_FUNCTION_GRAPH_TRACER */
509static inline enum print_line_t 537static inline enum print_line_t
510print_graph_function(struct trace_iterator *iter) 538print_graph_function(struct trace_iterator *iter)
511{ 539{
512 return TRACE_TYPE_UNHANDLED; 540 return TRACE_TYPE_UNHANDLED;
513} 541}
514#endif 542#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
515 543
516/* 544/*
517 * trace_iterator_flags is an enumeration that defines bit 545 * trace_iterator_flags is an enumeration that defines bit