aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-12 06:43:05 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-12 06:43:05 -0500
commit81444a799550214f549caf579cf65a0ca55e70b7 (patch)
tree3288dac0740be2e1e7d1af4ee51d792a6e91edf3 /kernel/trace/trace.c
parenta64d31baed104be25305e9c71585d3ea4ee9a418 (diff)
parentda485e0cb16726797e99a595a399b9fc721b91bc (diff)
Merge branch 'tracing/fastboot' into cpus4096
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c148
1 files changed, 112 insertions, 36 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index a45b59e53fbc..8ebe0070c47a 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -44,6 +44,15 @@
44unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; 44unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
45unsigned long __read_mostly tracing_thresh; 45unsigned long __read_mostly tracing_thresh;
46 46
47/*
48 * We need to change this state when a selftest is running.
49 * A selftest will lurk into the ring-buffer to count the
50 * entries inserted during the selftest although some concurrent
51 * insertions into the ring-buffer such as ftrace_printk could occurred
52 * at the same time, giving false positive or negative results.
53 */
54static bool __read_mostly tracing_selftest_running;
55
47/* For tracers that don't implement custom flags */ 56/* For tracers that don't implement custom flags */
48static struct tracer_opt dummy_tracer_opt[] = { 57static struct tracer_opt dummy_tracer_opt[] = {
49 { } 58 { }
@@ -566,6 +575,8 @@ int register_tracer(struct tracer *type)
566 unlock_kernel(); 575 unlock_kernel();
567 mutex_lock(&trace_types_lock); 576 mutex_lock(&trace_types_lock);
568 577
578 tracing_selftest_running = true;
579
569 for (t = trace_types; t; t = t->next) { 580 for (t = trace_types; t; t = t->next) {
570 if (strcmp(type->name, t->name) == 0) { 581 if (strcmp(type->name, t->name) == 0) {
571 /* already found */ 582 /* already found */
@@ -589,6 +600,7 @@ int register_tracer(struct tracer *type)
589 struct tracer *saved_tracer = current_trace; 600 struct tracer *saved_tracer = current_trace;
590 struct trace_array *tr = &global_trace; 601 struct trace_array *tr = &global_trace;
591 int i; 602 int i;
603
592 /* 604 /*
593 * Run a selftest on this tracer. 605 * Run a selftest on this tracer.
594 * Here we reset the trace buffer, and set the current 606 * Here we reset the trace buffer, and set the current
@@ -624,6 +636,7 @@ int register_tracer(struct tracer *type)
624 max_tracer_type_len = len; 636 max_tracer_type_len = len;
625 637
626 out: 638 out:
639 tracing_selftest_running = false;
627 mutex_unlock(&trace_types_lock); 640 mutex_unlock(&trace_types_lock);
628 lock_kernel(); 641 lock_kernel();
629 642
@@ -804,7 +817,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
804 spin_unlock(&trace_cmdline_lock); 817 spin_unlock(&trace_cmdline_lock);
805} 818}
806 819
807static char *trace_find_cmdline(int pid) 820char *trace_find_cmdline(int pid)
808{ 821{
809 char *cmdline = "<...>"; 822 char *cmdline = "<...>";
810 unsigned map; 823 unsigned map;
@@ -878,15 +891,39 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
878 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 891 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
879} 892}
880 893
881#ifdef CONFIG_FUNCTION_RET_TRACER 894#ifdef CONFIG_FUNCTION_GRAPH_TRACER
882static void __trace_function_return(struct trace_array *tr, 895static void __trace_graph_entry(struct trace_array *tr,
896 struct trace_array_cpu *data,
897 struct ftrace_graph_ent *trace,
898 unsigned long flags,
899 int pc)
900{
901 struct ring_buffer_event *event;
902 struct ftrace_graph_ent_entry *entry;
903 unsigned long irq_flags;
904
905 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
906 return;
907
908 event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
909 &irq_flags);
910 if (!event)
911 return;
912 entry = ring_buffer_event_data(event);
913 tracing_generic_entry_update(&entry->ent, flags, pc);
914 entry->ent.type = TRACE_GRAPH_ENT;
915 entry->graph_ent = *trace;
916 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
917}
918
919static void __trace_graph_return(struct trace_array *tr,
883 struct trace_array_cpu *data, 920 struct trace_array_cpu *data,
884 struct ftrace_retfunc *trace, 921 struct ftrace_graph_ret *trace,
885 unsigned long flags, 922 unsigned long flags,
886 int pc) 923 int pc)
887{ 924{
888 struct ring_buffer_event *event; 925 struct ring_buffer_event *event;
889 struct ftrace_ret_entry *entry; 926 struct ftrace_graph_ret_entry *entry;
890 unsigned long irq_flags; 927 unsigned long irq_flags;
891 928
892 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 929 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
@@ -898,12 +935,8 @@ static void __trace_function_return(struct trace_array *tr,
898 return; 935 return;
899 entry = ring_buffer_event_data(event); 936 entry = ring_buffer_event_data(event);
900 tracing_generic_entry_update(&entry->ent, flags, pc); 937 tracing_generic_entry_update(&entry->ent, flags, pc);
901 entry->ent.type = TRACE_FN_RET; 938 entry->ent.type = TRACE_GRAPH_RET;
902 entry->ip = trace->func; 939 entry->ret = *trace;
903 entry->parent_ip = trace->ret;
904 entry->rettime = trace->rettime;
905 entry->calltime = trace->calltime;
906 entry->overrun = trace->overrun;
907 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); 940 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
908} 941}
909#endif 942#endif
@@ -963,6 +996,7 @@ static void ftrace_trace_userstack(struct trace_array *tr,
963 struct trace_array_cpu *data, 996 struct trace_array_cpu *data,
964 unsigned long flags, int pc) 997 unsigned long flags, int pc)
965{ 998{
999#ifdef CONFIG_STACKTRACE
966 struct ring_buffer_event *event; 1000 struct ring_buffer_event *event;
967 struct userstack_entry *entry; 1001 struct userstack_entry *entry;
968 struct stack_trace trace; 1002 struct stack_trace trace;
@@ -988,6 +1022,7 @@ static void ftrace_trace_userstack(struct trace_array *tr,
988 1022
989 save_stack_trace_user(&trace); 1023 save_stack_trace_user(&trace);
990 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 1024 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
1025#endif
991} 1026}
992 1027
993void __trace_userstack(struct trace_array *tr, 1028void __trace_userstack(struct trace_array *tr,
@@ -1177,8 +1212,8 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
1177 local_irq_restore(flags); 1212 local_irq_restore(flags);
1178} 1213}
1179 1214
1180#ifdef CONFIG_FUNCTION_RET_TRACER 1215#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1181void trace_function_return(struct ftrace_retfunc *trace) 1216int trace_graph_entry(struct ftrace_graph_ent *trace)
1182{ 1217{
1183 struct trace_array *tr = &global_trace; 1218 struct trace_array *tr = &global_trace;
1184 struct trace_array_cpu *data; 1219 struct trace_array_cpu *data;
@@ -1187,18 +1222,52 @@ void trace_function_return(struct ftrace_retfunc *trace)
1187 int cpu; 1222 int cpu;
1188 int pc; 1223 int pc;
1189 1224
1190 raw_local_irq_save(flags); 1225 if (!ftrace_trace_task(current))
1226 return 0;
1227
1228 if (!ftrace_graph_addr(trace->func))
1229 return 0;
1230
1231 local_irq_save(flags);
1191 cpu = raw_smp_processor_id(); 1232 cpu = raw_smp_processor_id();
1192 data = tr->data[cpu]; 1233 data = tr->data[cpu];
1193 disabled = atomic_inc_return(&data->disabled); 1234 disabled = atomic_inc_return(&data->disabled);
1194 if (likely(disabled == 1)) { 1235 if (likely(disabled == 1)) {
1195 pc = preempt_count(); 1236 pc = preempt_count();
1196 __trace_function_return(tr, data, trace, flags, pc); 1237 __trace_graph_entry(tr, data, trace, flags, pc);
1197 } 1238 }
1239 /* Only do the atomic if it is not already set */
1240 if (!test_tsk_trace_graph(current))
1241 set_tsk_trace_graph(current);
1198 atomic_dec(&data->disabled); 1242 atomic_dec(&data->disabled);
1199 raw_local_irq_restore(flags); 1243 local_irq_restore(flags);
1244
1245 return 1;
1200} 1246}
1201#endif /* CONFIG_FUNCTION_RET_TRACER */ 1247
1248void trace_graph_return(struct ftrace_graph_ret *trace)
1249{
1250 struct trace_array *tr = &global_trace;
1251 struct trace_array_cpu *data;
1252 unsigned long flags;
1253 long disabled;
1254 int cpu;
1255 int pc;
1256
1257 local_irq_save(flags);
1258 cpu = raw_smp_processor_id();
1259 data = tr->data[cpu];
1260 disabled = atomic_inc_return(&data->disabled);
1261 if (likely(disabled == 1)) {
1262 pc = preempt_count();
1263 __trace_graph_return(tr, data, trace, flags, pc);
1264 }
1265 if (!trace->depth)
1266 clear_tsk_trace_graph(current);
1267 atomic_dec(&data->disabled);
1268 local_irq_restore(flags);
1269}
1270#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1202 1271
1203static struct ftrace_ops trace_ops __read_mostly = 1272static struct ftrace_ops trace_ops __read_mostly =
1204{ 1273{
@@ -2000,9 +2069,11 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2000 trace_seq_print_cont(s, iter); 2069 trace_seq_print_cont(s, iter);
2001 break; 2070 break;
2002 } 2071 }
2003 case TRACE_FN_RET: { 2072 case TRACE_GRAPH_RET: {
2004 return print_return_function(iter); 2073 return print_graph_function(iter);
2005 break; 2074 }
2075 case TRACE_GRAPH_ENT: {
2076 return print_graph_function(iter);
2006 } 2077 }
2007 case TRACE_BRANCH: { 2078 case TRACE_BRANCH: {
2008 struct trace_branch *field; 2079 struct trace_branch *field;
@@ -2298,7 +2369,9 @@ static int s_show(struct seq_file *m, void *v)
2298 seq_printf(m, "# tracer: %s\n", iter->trace->name); 2369 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2299 seq_puts(m, "#\n"); 2370 seq_puts(m, "#\n");
2300 } 2371 }
2301 if (iter->iter_flags & TRACE_FILE_LAT_FMT) { 2372 if (iter->trace && iter->trace->print_header)
2373 iter->trace->print_header(m);
2374 else if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2302 /* print nothing if the buffers are empty */ 2375 /* print nothing if the buffers are empty */
2303 if (trace_empty(iter)) 2376 if (trace_empty(iter))
2304 return 0; 2377 return 0;
@@ -2350,6 +2423,10 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
2350 iter->trace = current_trace; 2423 iter->trace = current_trace;
2351 iter->pos = -1; 2424 iter->pos = -1;
2352 2425
2426 /* Notify the tracer early; before we stop tracing. */
2427 if (iter->trace && iter->trace->open)
2428 iter->trace->open(iter);
2429
2353 /* Annotate start of buffers if we had overruns */ 2430 /* Annotate start of buffers if we had overruns */
2354 if (ring_buffer_overruns(iter->tr->buffer)) 2431 if (ring_buffer_overruns(iter->tr->buffer))
2355 iter->iter_flags |= TRACE_FILE_ANNOTATE; 2432 iter->iter_flags |= TRACE_FILE_ANNOTATE;
@@ -2375,9 +2452,6 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
2375 /* stop the trace while dumping */ 2452 /* stop the trace while dumping */
2376 tracing_stop(); 2453 tracing_stop();
2377 2454
2378 if (iter->trace && iter->trace->open)
2379 iter->trace->open(iter);
2380
2381 mutex_unlock(&trace_types_lock); 2455 mutex_unlock(&trace_types_lock);
2382 2456
2383 out: 2457 out:
@@ -2597,7 +2671,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2597 if (err) 2671 if (err)
2598 goto err_unlock; 2672 goto err_unlock;
2599 2673
2600 raw_local_irq_disable(); 2674 local_irq_disable();
2601 __raw_spin_lock(&ftrace_max_lock); 2675 __raw_spin_lock(&ftrace_max_lock);
2602 for_each_tracing_cpu(cpu) { 2676 for_each_tracing_cpu(cpu) {
2603 /* 2677 /*
@@ -2614,7 +2688,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2614 } 2688 }
2615 } 2689 }
2616 __raw_spin_unlock(&ftrace_max_lock); 2690 __raw_spin_unlock(&ftrace_max_lock);
2617 raw_local_irq_enable(); 2691 local_irq_enable();
2618 2692
2619 tracing_cpumask = tracing_cpumask_new; 2693 tracing_cpumask = tracing_cpumask_new;
2620 2694
@@ -3285,7 +3359,7 @@ static int mark_printk(const char *fmt, ...)
3285 int ret; 3359 int ret;
3286 va_list args; 3360 va_list args;
3287 va_start(args, fmt); 3361 va_start(args, fmt);
3288 ret = trace_vprintk(0, fmt, args); 3362 ret = trace_vprintk(0, -1, fmt, args);
3289 va_end(args); 3363 va_end(args);
3290 return ret; 3364 return ret;
3291} 3365}
@@ -3514,7 +3588,7 @@ static __init int tracer_init_debugfs(void)
3514 return 0; 3588 return 0;
3515} 3589}
3516 3590
3517int trace_vprintk(unsigned long ip, const char *fmt, va_list args) 3591int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
3518{ 3592{
3519 static DEFINE_SPINLOCK(trace_buf_lock); 3593 static DEFINE_SPINLOCK(trace_buf_lock);
3520 static char trace_buf[TRACE_BUF_SIZE]; 3594 static char trace_buf[TRACE_BUF_SIZE];
@@ -3522,11 +3596,11 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3522 struct ring_buffer_event *event; 3596 struct ring_buffer_event *event;
3523 struct trace_array *tr = &global_trace; 3597 struct trace_array *tr = &global_trace;
3524 struct trace_array_cpu *data; 3598 struct trace_array_cpu *data;
3525 struct print_entry *entry;
3526 unsigned long flags, irq_flags;
3527 int cpu, len = 0, size, pc; 3599 int cpu, len = 0, size, pc;
3600 struct print_entry *entry;
3601 unsigned long irq_flags;
3528 3602
3529 if (tracing_disabled) 3603 if (tracing_disabled || tracing_selftest_running)
3530 return 0; 3604 return 0;
3531 3605
3532 pc = preempt_count(); 3606 pc = preempt_count();
@@ -3537,7 +3611,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3537 if (unlikely(atomic_read(&data->disabled))) 3611 if (unlikely(atomic_read(&data->disabled)))
3538 goto out; 3612 goto out;
3539 3613
3540 spin_lock_irqsave(&trace_buf_lock, flags); 3614 pause_graph_tracing();
3615 spin_lock_irqsave(&trace_buf_lock, irq_flags);
3541 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); 3616 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
3542 3617
3543 len = min(len, TRACE_BUF_SIZE-1); 3618 len = min(len, TRACE_BUF_SIZE-1);
@@ -3548,17 +3623,18 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3548 if (!event) 3623 if (!event)
3549 goto out_unlock; 3624 goto out_unlock;
3550 entry = ring_buffer_event_data(event); 3625 entry = ring_buffer_event_data(event);
3551 tracing_generic_entry_update(&entry->ent, flags, pc); 3626 tracing_generic_entry_update(&entry->ent, irq_flags, pc);
3552 entry->ent.type = TRACE_PRINT; 3627 entry->ent.type = TRACE_PRINT;
3553 entry->ip = ip; 3628 entry->ip = ip;
3629 entry->depth = depth;
3554 3630
3555 memcpy(&entry->buf, trace_buf, len); 3631 memcpy(&entry->buf, trace_buf, len);
3556 entry->buf[len] = 0; 3632 entry->buf[len] = 0;
3557 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 3633 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
3558 3634
3559 out_unlock: 3635 out_unlock:
3560 spin_unlock_irqrestore(&trace_buf_lock, flags); 3636 spin_unlock_irqrestore(&trace_buf_lock, irq_flags);
3561 3637 unpause_graph_tracing();
3562 out: 3638 out:
3563 preempt_enable_notrace(); 3639 preempt_enable_notrace();
3564 3640
@@ -3575,7 +3651,7 @@ int __ftrace_printk(unsigned long ip, const char *fmt, ...)
3575 return 0; 3651 return 0;
3576 3652
3577 va_start(ap, fmt); 3653 va_start(ap, fmt);
3578 ret = trace_vprintk(ip, fmt, ap); 3654 ret = trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
3579 va_end(ap); 3655 va_end(ap);
3580 return ret; 3656 return ret;
3581} 3657}