aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c242
1 files changed, 198 insertions, 44 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 0029fe62b245..80eda7d254ed 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -257,7 +257,7 @@ unsigned long long ns2usecs(u64 nsec)
257 257
258/* trace_flags that are default zero for instances */ 258/* trace_flags that are default zero for instances */
259#define ZEROED_TRACE_FLAGS \ 259#define ZEROED_TRACE_FLAGS \
260 TRACE_ITER_EVENT_FORK 260 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
261 261
262/* 262/*
263 * The global_trace is the descriptor that holds the top-level tracing 263 * The global_trace is the descriptor that holds the top-level tracing
@@ -757,7 +757,7 @@ __trace_buffer_lock_reserve(struct ring_buffer *buffer,
757 return event; 757 return event;
758} 758}
759 759
760static void tracer_tracing_on(struct trace_array *tr) 760void tracer_tracing_on(struct trace_array *tr)
761{ 761{
762 if (tr->trace_buffer.buffer) 762 if (tr->trace_buffer.buffer)
763 ring_buffer_record_on(tr->trace_buffer.buffer); 763 ring_buffer_record_on(tr->trace_buffer.buffer);
@@ -894,23 +894,8 @@ int __trace_bputs(unsigned long ip, const char *str)
894EXPORT_SYMBOL_GPL(__trace_bputs); 894EXPORT_SYMBOL_GPL(__trace_bputs);
895 895
896#ifdef CONFIG_TRACER_SNAPSHOT 896#ifdef CONFIG_TRACER_SNAPSHOT
897/** 897static void tracing_snapshot_instance(struct trace_array *tr)
898 * trace_snapshot - take a snapshot of the current buffer.
899 *
900 * This causes a swap between the snapshot buffer and the current live
901 * tracing buffer. You can use this to take snapshots of the live
902 * trace when some condition is triggered, but continue to trace.
903 *
904 * Note, make sure to allocate the snapshot with either
905 * a tracing_snapshot_alloc(), or by doing it manually
906 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
907 *
908 * If the snapshot buffer is not allocated, it will stop tracing.
909 * Basically making a permanent snapshot.
910 */
911void tracing_snapshot(void)
912{ 898{
913 struct trace_array *tr = &global_trace;
914 struct tracer *tracer = tr->current_trace; 899 struct tracer *tracer = tr->current_trace;
915 unsigned long flags; 900 unsigned long flags;
916 901
@@ -938,6 +923,27 @@ void tracing_snapshot(void)
938 update_max_tr(tr, current, smp_processor_id()); 923 update_max_tr(tr, current, smp_processor_id());
939 local_irq_restore(flags); 924 local_irq_restore(flags);
940} 925}
926
927/**
928 * trace_snapshot - take a snapshot of the current buffer.
929 *
930 * This causes a swap between the snapshot buffer and the current live
931 * tracing buffer. You can use this to take snapshots of the live
932 * trace when some condition is triggered, but continue to trace.
933 *
934 * Note, make sure to allocate the snapshot with either
935 * a tracing_snapshot_alloc(), or by doing it manually
936 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
937 *
938 * If the snapshot buffer is not allocated, it will stop tracing.
939 * Basically making a permanent snapshot.
940 */
941void tracing_snapshot(void)
942{
943 struct trace_array *tr = &global_trace;
944
945 tracing_snapshot_instance(tr);
946}
941EXPORT_SYMBOL_GPL(tracing_snapshot); 947EXPORT_SYMBOL_GPL(tracing_snapshot);
942 948
943static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, 949static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
@@ -1039,7 +1045,7 @@ void tracing_snapshot_alloc(void)
1039EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); 1045EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1040#endif /* CONFIG_TRACER_SNAPSHOT */ 1046#endif /* CONFIG_TRACER_SNAPSHOT */
1041 1047
1042static void tracer_tracing_off(struct trace_array *tr) 1048void tracer_tracing_off(struct trace_array *tr)
1043{ 1049{
1044 if (tr->trace_buffer.buffer) 1050 if (tr->trace_buffer.buffer)
1045 ring_buffer_record_off(tr->trace_buffer.buffer); 1051 ring_buffer_record_off(tr->trace_buffer.buffer);
@@ -1424,6 +1430,28 @@ static int wait_on_pipe(struct trace_iterator *iter, bool full)
1424} 1430}
1425 1431
1426#ifdef CONFIG_FTRACE_STARTUP_TEST 1432#ifdef CONFIG_FTRACE_STARTUP_TEST
1433static bool selftests_can_run;
1434
1435struct trace_selftests {
1436 struct list_head list;
1437 struct tracer *type;
1438};
1439
1440static LIST_HEAD(postponed_selftests);
1441
1442static int save_selftest(struct tracer *type)
1443{
1444 struct trace_selftests *selftest;
1445
1446 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1447 if (!selftest)
1448 return -ENOMEM;
1449
1450 selftest->type = type;
1451 list_add(&selftest->list, &postponed_selftests);
1452 return 0;
1453}
1454
1427static int run_tracer_selftest(struct tracer *type) 1455static int run_tracer_selftest(struct tracer *type)
1428{ 1456{
1429 struct trace_array *tr = &global_trace; 1457 struct trace_array *tr = &global_trace;
@@ -1434,6 +1462,14 @@ static int run_tracer_selftest(struct tracer *type)
1434 return 0; 1462 return 0;
1435 1463
1436 /* 1464 /*
1465 * If a tracer registers early in boot up (before scheduling is
1466 * initialized and such), then do not run its selftests yet.
1467 * Instead, run it a little later in the boot process.
1468 */
1469 if (!selftests_can_run)
1470 return save_selftest(type);
1471
1472 /*
1437 * Run a selftest on this tracer. 1473 * Run a selftest on this tracer.
1438 * Here we reset the trace buffer, and set the current 1474 * Here we reset the trace buffer, and set the current
1439 * tracer to be this tracer. The tracer can then run some 1475 * tracer to be this tracer. The tracer can then run some
@@ -1482,6 +1518,47 @@ static int run_tracer_selftest(struct tracer *type)
1482 printk(KERN_CONT "PASSED\n"); 1518 printk(KERN_CONT "PASSED\n");
1483 return 0; 1519 return 0;
1484} 1520}
1521
1522static __init int init_trace_selftests(void)
1523{
1524 struct trace_selftests *p, *n;
1525 struct tracer *t, **last;
1526 int ret;
1527
1528 selftests_can_run = true;
1529
1530 mutex_lock(&trace_types_lock);
1531
1532 if (list_empty(&postponed_selftests))
1533 goto out;
1534
1535 pr_info("Running postponed tracer tests:\n");
1536
1537 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1538 ret = run_tracer_selftest(p->type);
1539 /* If the test fails, then warn and remove from available_tracers */
1540 if (ret < 0) {
1541 WARN(1, "tracer: %s failed selftest, disabling\n",
1542 p->type->name);
1543 last = &trace_types;
1544 for (t = trace_types; t; t = t->next) {
1545 if (t == p->type) {
1546 *last = t->next;
1547 break;
1548 }
1549 last = &t->next;
1550 }
1551 }
1552 list_del(&p->list);
1553 kfree(p);
1554 }
1555
1556 out:
1557 mutex_unlock(&trace_types_lock);
1558
1559 return 0;
1560}
1561early_initcall(init_trace_selftests);
1485#else 1562#else
1486static inline int run_tracer_selftest(struct tracer *type) 1563static inline int run_tracer_selftest(struct tracer *type)
1487{ 1564{
@@ -1927,6 +2004,18 @@ void tracing_record_cmdline(struct task_struct *tsk)
1927 __this_cpu_write(trace_cmdline_save, false); 2004 __this_cpu_write(trace_cmdline_save, false);
1928} 2005}
1929 2006
2007/*
2008 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2009 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2010 * simplifies those functions and keeps them in sync.
2011 */
2012enum print_line_t trace_handle_return(struct trace_seq *s)
2013{
2014 return trace_seq_has_overflowed(s) ?
2015 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2016}
2017EXPORT_SYMBOL_GPL(trace_handle_return);
2018
1930void 2019void
1931tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, 2020tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1932 int pc) 2021 int pc)
@@ -4122,6 +4211,9 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
4122 if (mask == TRACE_ITER_EVENT_FORK) 4211 if (mask == TRACE_ITER_EVENT_FORK)
4123 trace_event_follow_fork(tr, enabled); 4212 trace_event_follow_fork(tr, enabled);
4124 4213
4214 if (mask == TRACE_ITER_FUNC_FORK)
4215 ftrace_pid_follow_fork(tr, enabled);
4216
4125 if (mask == TRACE_ITER_OVERWRITE) { 4217 if (mask == TRACE_ITER_OVERWRITE) {
4126 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled); 4218 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
4127#ifdef CONFIG_TRACER_MAX_TRACE 4219#ifdef CONFIG_TRACER_MAX_TRACE
@@ -5962,6 +6054,7 @@ static int tracing_clock_open(struct inode *inode, struct file *file)
5962struct ftrace_buffer_info { 6054struct ftrace_buffer_info {
5963 struct trace_iterator iter; 6055 struct trace_iterator iter;
5964 void *spare; 6056 void *spare;
6057 unsigned int spare_cpu;
5965 unsigned int read; 6058 unsigned int read;
5966}; 6059};
5967 6060
@@ -6291,9 +6384,11 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
6291 return -EBUSY; 6384 return -EBUSY;
6292#endif 6385#endif
6293 6386
6294 if (!info->spare) 6387 if (!info->spare) {
6295 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer, 6388 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6296 iter->cpu_file); 6389 iter->cpu_file);
6390 info->spare_cpu = iter->cpu_file;
6391 }
6297 if (!info->spare) 6392 if (!info->spare)
6298 return -ENOMEM; 6393 return -ENOMEM;
6299 6394
@@ -6353,7 +6448,8 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
6353 __trace_array_put(iter->tr); 6448 __trace_array_put(iter->tr);
6354 6449
6355 if (info->spare) 6450 if (info->spare)
6356 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare); 6451 ring_buffer_free_read_page(iter->trace_buffer->buffer,
6452 info->spare_cpu, info->spare);
6357 kfree(info); 6453 kfree(info);
6358 6454
6359 mutex_unlock(&trace_types_lock); 6455 mutex_unlock(&trace_types_lock);
@@ -6364,6 +6460,7 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
6364struct buffer_ref { 6460struct buffer_ref {
6365 struct ring_buffer *buffer; 6461 struct ring_buffer *buffer;
6366 void *page; 6462 void *page;
6463 int cpu;
6367 int ref; 6464 int ref;
6368}; 6465};
6369 6466
@@ -6375,7 +6472,7 @@ static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6375 if (--ref->ref) 6472 if (--ref->ref)
6376 return; 6473 return;
6377 6474
6378 ring_buffer_free_read_page(ref->buffer, ref->page); 6475 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
6379 kfree(ref); 6476 kfree(ref);
6380 buf->private = 0; 6477 buf->private = 0;
6381} 6478}
@@ -6409,7 +6506,7 @@ static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6409 if (--ref->ref) 6506 if (--ref->ref)
6410 return; 6507 return;
6411 6508
6412 ring_buffer_free_read_page(ref->buffer, ref->page); 6509 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
6413 kfree(ref); 6510 kfree(ref);
6414 spd->partial[i].private = 0; 6511 spd->partial[i].private = 0;
6415} 6512}
@@ -6473,11 +6570,13 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6473 kfree(ref); 6570 kfree(ref);
6474 break; 6571 break;
6475 } 6572 }
6573 ref->cpu = iter->cpu_file;
6476 6574
6477 r = ring_buffer_read_page(ref->buffer, &ref->page, 6575 r = ring_buffer_read_page(ref->buffer, &ref->page,
6478 len, iter->cpu_file, 1); 6576 len, iter->cpu_file, 1);
6479 if (r < 0) { 6577 if (r < 0) {
6480 ring_buffer_free_read_page(ref->buffer, ref->page); 6578 ring_buffer_free_read_page(ref->buffer, ref->cpu,
6579 ref->page);
6481 kfree(ref); 6580 kfree(ref);
6482 break; 6581 break;
6483 } 6582 }
@@ -6648,43 +6747,89 @@ static const struct file_operations tracing_dyn_info_fops = {
6648 6747
6649#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) 6748#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6650static void 6749static void
6651ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data) 6750ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
6751 struct trace_array *tr, struct ftrace_probe_ops *ops,
6752 void *data)
6652{ 6753{
6653 tracing_snapshot(); 6754 tracing_snapshot_instance(tr);
6654} 6755}
6655 6756
6656static void 6757static void
6657ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data) 6758ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
6759 struct trace_array *tr, struct ftrace_probe_ops *ops,
6760 void *data)
6658{ 6761{
6659 unsigned long *count = (long *)data; 6762 struct ftrace_func_mapper *mapper = data;
6763 long *count = NULL;
6660 6764
6661 if (!*count) 6765 if (mapper)
6662 return; 6766 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
6767
6768 if (count) {
6769
6770 if (*count <= 0)
6771 return;
6663 6772
6664 if (*count != -1)
6665 (*count)--; 6773 (*count)--;
6774 }
6666 6775
6667 tracing_snapshot(); 6776 tracing_snapshot_instance(tr);
6668} 6777}
6669 6778
6670static int 6779static int
6671ftrace_snapshot_print(struct seq_file *m, unsigned long ip, 6780ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
6672 struct ftrace_probe_ops *ops, void *data) 6781 struct ftrace_probe_ops *ops, void *data)
6673{ 6782{
6674 long count = (long)data; 6783 struct ftrace_func_mapper *mapper = data;
6784 long *count = NULL;
6675 6785
6676 seq_printf(m, "%ps:", (void *)ip); 6786 seq_printf(m, "%ps:", (void *)ip);
6677 6787
6678 seq_puts(m, "snapshot"); 6788 seq_puts(m, "snapshot");
6679 6789
6680 if (count == -1) 6790 if (mapper)
6681 seq_puts(m, ":unlimited\n"); 6791 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
6792
6793 if (count)
6794 seq_printf(m, ":count=%ld\n", *count);
6682 else 6795 else
6683 seq_printf(m, ":count=%ld\n", count); 6796 seq_puts(m, ":unlimited\n");
6684 6797
6685 return 0; 6798 return 0;
6686} 6799}
6687 6800
6801static int
6802ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
6803 unsigned long ip, void *init_data, void **data)
6804{
6805 struct ftrace_func_mapper *mapper = *data;
6806
6807 if (!mapper) {
6808 mapper = allocate_ftrace_func_mapper();
6809 if (!mapper)
6810 return -ENOMEM;
6811 *data = mapper;
6812 }
6813
6814 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
6815}
6816
6817static void
6818ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
6819 unsigned long ip, void *data)
6820{
6821 struct ftrace_func_mapper *mapper = data;
6822
6823 if (!ip) {
6824 if (!mapper)
6825 return;
6826 free_ftrace_func_mapper(mapper, NULL);
6827 return;
6828 }
6829
6830 ftrace_func_mapper_remove_ip(mapper, ip);
6831}
6832
6688static struct ftrace_probe_ops snapshot_probe_ops = { 6833static struct ftrace_probe_ops snapshot_probe_ops = {
6689 .func = ftrace_snapshot, 6834 .func = ftrace_snapshot,
6690 .print = ftrace_snapshot_print, 6835 .print = ftrace_snapshot_print,
@@ -6693,10 +6838,12 @@ static struct ftrace_probe_ops snapshot_probe_ops = {
6693static struct ftrace_probe_ops snapshot_count_probe_ops = { 6838static struct ftrace_probe_ops snapshot_count_probe_ops = {
6694 .func = ftrace_count_snapshot, 6839 .func = ftrace_count_snapshot,
6695 .print = ftrace_snapshot_print, 6840 .print = ftrace_snapshot_print,
6841 .init = ftrace_snapshot_init,
6842 .free = ftrace_snapshot_free,
6696}; 6843};
6697 6844
6698static int 6845static int
6699ftrace_trace_snapshot_callback(struct ftrace_hash *hash, 6846ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
6700 char *glob, char *cmd, char *param, int enable) 6847 char *glob, char *cmd, char *param, int enable)
6701{ 6848{
6702 struct ftrace_probe_ops *ops; 6849 struct ftrace_probe_ops *ops;
@@ -6710,10 +6857,8 @@ ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6710 6857
6711 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops; 6858 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6712 6859
6713 if (glob[0] == '!') { 6860 if (glob[0] == '!')
6714 unregister_ftrace_function_probe_func(glob+1, ops); 6861 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
6715 return 0;
6716 }
6717 6862
6718 if (!param) 6863 if (!param)
6719 goto out_reg; 6864 goto out_reg;
@@ -6732,11 +6877,11 @@ ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6732 return ret; 6877 return ret;
6733 6878
6734 out_reg: 6879 out_reg:
6735 ret = alloc_snapshot(&global_trace); 6880 ret = alloc_snapshot(tr);
6736 if (ret < 0) 6881 if (ret < 0)
6737 goto out; 6882 goto out;
6738 6883
6739 ret = register_ftrace_function_probe(glob, ops, count); 6884 ret = register_ftrace_function_probe(glob, tr, ops, count);
6740 6885
6741 out: 6886 out:
6742 return ret < 0 ? ret : 0; 6887 return ret < 0 ? ret : 0;
@@ -7347,6 +7492,8 @@ static int instance_mkdir(const char *name)
7347 goto out_free_tr; 7492 goto out_free_tr;
7348 } 7493 }
7349 7494
7495 ftrace_init_trace_array(tr);
7496
7350 init_tracer_tracefs(tr, tr->dir); 7497 init_tracer_tracefs(tr, tr->dir);
7351 init_trace_flags_index(tr); 7498 init_trace_flags_index(tr);
7352 __update_tracer_options(tr); 7499 __update_tracer_options(tr);
@@ -7967,6 +8114,9 @@ __init static int tracer_alloc_buffers(void)
7967 8114
7968 register_tracer(&nop_trace); 8115 register_tracer(&nop_trace);
7969 8116
8117 /* Function tracing may start here (via kernel command line) */
8118 init_function_trace();
8119
7970 /* All seems OK, enable tracing */ 8120 /* All seems OK, enable tracing */
7971 tracing_disabled = 0; 8121 tracing_disabled = 0;
7972 8122
@@ -8001,7 +8151,7 @@ out:
8001 return ret; 8151 return ret;
8002} 8152}
8003 8153
8004void __init trace_init(void) 8154void __init early_trace_init(void)
8005{ 8155{
8006 if (tracepoint_printk) { 8156 if (tracepoint_printk) {
8007 tracepoint_print_iter = 8157 tracepoint_print_iter =
@@ -8012,6 +8162,10 @@ void __init trace_init(void)
8012 static_key_enable(&tracepoint_printk_key.key); 8162 static_key_enable(&tracepoint_printk_key.key);
8013 } 8163 }
8014 tracer_alloc_buffers(); 8164 tracer_alloc_buffers();
8165}
8166
8167void __init trace_init(void)
8168{
8015 trace_event_init(); 8169 trace_event_init();
8016} 8170}
8017 8171