diff options
author | Steven Rostedt (Red Hat) <rostedt@goodmis.org> | 2014-01-14 11:28:38 -0500 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2014-04-21 13:59:26 -0400 |
commit | 6d9b3fa5e7f663bbfb9d2d80d46136f75319cb28 (patch) | |
tree | eb21578a00e6e1092ba53c79a4a84e8dc8e13388 /kernel | |
parent | 4104d326b670c2b66f575d2004daa28b2d1b4c8d (diff) |
tracing: Move tracing_max_latency into trace_array
In preparation for letting the latency tracers be used by instances,
remove the global tracing_max_latency variable and add a max_latency
field to the trace_array that the latency tracers will now use.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/trace.c | 14 | ||||
-rw-r--r-- | kernel/trace/trace.h | 3 | ||||
-rw-r--r-- | kernel/trace/trace_irqsoff.c | 14 | ||||
-rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 12 | ||||
-rw-r--r-- | kernel/trace/trace_selftest.c | 26 |
5 files changed, 33 insertions, 36 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index fdd33aacdf05..f5fc56bf0227 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -982,8 +982,6 @@ static arch_spinlock_t ftrace_max_lock = | |||
982 | unsigned long __read_mostly tracing_thresh; | 982 | unsigned long __read_mostly tracing_thresh; |
983 | 983 | ||
984 | #ifdef CONFIG_TRACER_MAX_TRACE | 984 | #ifdef CONFIG_TRACER_MAX_TRACE |
985 | unsigned long __read_mostly tracing_max_latency; | ||
986 | |||
987 | /* | 985 | /* |
988 | * Copy the new maximum trace into the separate maximum-trace | 986 | * Copy the new maximum trace into the separate maximum-trace |
989 | * structure. (this way the maximum trace is permanently saved, | 987 | * structure. (this way the maximum trace is permanently saved, |
@@ -1000,7 +998,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
1000 | max_buf->cpu = cpu; | 998 | max_buf->cpu = cpu; |
1001 | max_buf->time_start = data->preempt_timestamp; | 999 | max_buf->time_start = data->preempt_timestamp; |
1002 | 1000 | ||
1003 | max_data->saved_latency = tracing_max_latency; | 1001 | max_data->saved_latency = tr->max_latency; |
1004 | max_data->critical_start = data->critical_start; | 1002 | max_data->critical_start = data->critical_start; |
1005 | max_data->critical_end = data->critical_end; | 1003 | max_data->critical_end = data->critical_end; |
1006 | 1004 | ||
@@ -6328,6 +6326,11 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer) | |||
6328 | trace_create_file("tracing_on", 0644, d_tracer, | 6326 | trace_create_file("tracing_on", 0644, d_tracer, |
6329 | tr, &rb_simple_fops); | 6327 | tr, &rb_simple_fops); |
6330 | 6328 | ||
6329 | #ifdef CONFIG_TRACER_MAX_TRACE | ||
6330 | trace_create_file("tracing_max_latency", 0644, d_tracer, | ||
6331 | &tr->max_latency, &tracing_max_lat_fops); | ||
6332 | #endif | ||
6333 | |||
6331 | if (ftrace_create_function_files(tr, d_tracer)) | 6334 | if (ftrace_create_function_files(tr, d_tracer)) |
6332 | WARN(1, "Could not allocate function filter files"); | 6335 | WARN(1, "Could not allocate function filter files"); |
6333 | 6336 | ||
@@ -6353,11 +6356,6 @@ static __init int tracer_init_debugfs(void) | |||
6353 | 6356 | ||
6354 | init_tracer_debugfs(&global_trace, d_tracer); | 6357 | init_tracer_debugfs(&global_trace, d_tracer); |
6355 | 6358 | ||
6356 | #ifdef CONFIG_TRACER_MAX_TRACE | ||
6357 | trace_create_file("tracing_max_latency", 0644, d_tracer, | ||
6358 | &tracing_max_latency, &tracing_max_lat_fops); | ||
6359 | #endif | ||
6360 | |||
6361 | trace_create_file("tracing_thresh", 0644, d_tracer, | 6359 | trace_create_file("tracing_thresh", 0644, d_tracer, |
6362 | &tracing_thresh, &tracing_max_lat_fops); | 6360 | &tracing_thresh, &tracing_max_lat_fops); |
6363 | 6361 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index df5256be64cd..644a8b533e1d 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -190,6 +190,7 @@ struct trace_array { | |||
190 | */ | 190 | */ |
191 | struct trace_buffer max_buffer; | 191 | struct trace_buffer max_buffer; |
192 | bool allocated_snapshot; | 192 | bool allocated_snapshot; |
193 | unsigned long max_latency; | ||
193 | #endif | 194 | #endif |
194 | int buffer_disabled; | 195 | int buffer_disabled; |
195 | #ifdef CONFIG_FTRACE_SYSCALLS | 196 | #ifdef CONFIG_FTRACE_SYSCALLS |
@@ -599,8 +600,6 @@ extern unsigned long nsecs_to_usecs(unsigned long nsecs); | |||
599 | extern unsigned long tracing_thresh; | 600 | extern unsigned long tracing_thresh; |
600 | 601 | ||
601 | #ifdef CONFIG_TRACER_MAX_TRACE | 602 | #ifdef CONFIG_TRACER_MAX_TRACE |
602 | extern unsigned long tracing_max_latency; | ||
603 | |||
604 | void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); | 603 | void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); |
605 | void update_max_tr_single(struct trace_array *tr, | 604 | void update_max_tr_single(struct trace_array *tr, |
606 | struct task_struct *tsk, int cpu); | 605 | struct task_struct *tsk, int cpu); |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index b5cb047df3e9..40aa300d3491 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -170,7 +170,7 @@ irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | |||
170 | for_each_possible_cpu(cpu) | 170 | for_each_possible_cpu(cpu) |
171 | per_cpu(tracing_cpu, cpu) = 0; | 171 | per_cpu(tracing_cpu, cpu) = 0; |
172 | 172 | ||
173 | tracing_max_latency = 0; | 173 | tr->max_latency = 0; |
174 | tracing_reset_online_cpus(&irqsoff_trace->trace_buffer); | 174 | tracing_reset_online_cpus(&irqsoff_trace->trace_buffer); |
175 | 175 | ||
176 | return start_irqsoff_tracer(irqsoff_trace, set); | 176 | return start_irqsoff_tracer(irqsoff_trace, set); |
@@ -297,13 +297,13 @@ static void irqsoff_print_header(struct seq_file *s) | |||
297 | /* | 297 | /* |
298 | * Should this new latency be reported/recorded? | 298 | * Should this new latency be reported/recorded? |
299 | */ | 299 | */ |
300 | static int report_latency(cycle_t delta) | 300 | static int report_latency(struct trace_array *tr, cycle_t delta) |
301 | { | 301 | { |
302 | if (tracing_thresh) { | 302 | if (tracing_thresh) { |
303 | if (delta < tracing_thresh) | 303 | if (delta < tracing_thresh) |
304 | return 0; | 304 | return 0; |
305 | } else { | 305 | } else { |
306 | if (delta <= tracing_max_latency) | 306 | if (delta <= tr->max_latency) |
307 | return 0; | 307 | return 0; |
308 | } | 308 | } |
309 | return 1; | 309 | return 1; |
@@ -327,13 +327,13 @@ check_critical_timing(struct trace_array *tr, | |||
327 | 327 | ||
328 | pc = preempt_count(); | 328 | pc = preempt_count(); |
329 | 329 | ||
330 | if (!report_latency(delta)) | 330 | if (!report_latency(tr, delta)) |
331 | goto out; | 331 | goto out; |
332 | 332 | ||
333 | raw_spin_lock_irqsave(&max_trace_lock, flags); | 333 | raw_spin_lock_irqsave(&max_trace_lock, flags); |
334 | 334 | ||
335 | /* check if we are still the max latency */ | 335 | /* check if we are still the max latency */ |
336 | if (!report_latency(delta)) | 336 | if (!report_latency(tr, delta)) |
337 | goto out_unlock; | 337 | goto out_unlock; |
338 | 338 | ||
339 | __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); | 339 | __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); |
@@ -346,7 +346,7 @@ check_critical_timing(struct trace_array *tr, | |||
346 | data->critical_end = parent_ip; | 346 | data->critical_end = parent_ip; |
347 | 347 | ||
348 | if (likely(!is_tracing_stopped())) { | 348 | if (likely(!is_tracing_stopped())) { |
349 | tracing_max_latency = delta; | 349 | tr->max_latency = delta; |
350 | update_max_tr_single(tr, current, cpu); | 350 | update_max_tr_single(tr, current, cpu); |
351 | } | 351 | } |
352 | 352 | ||
@@ -605,7 +605,7 @@ static void __irqsoff_tracer_init(struct trace_array *tr) | |||
605 | set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1); | 605 | set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1); |
606 | set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1); | 606 | set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1); |
607 | 607 | ||
608 | tracing_max_latency = 0; | 608 | tr->max_latency = 0; |
609 | irqsoff_trace = tr; | 609 | irqsoff_trace = tr; |
610 | /* make sure that the tracer is visible */ | 610 | /* make sure that the tracer is visible */ |
611 | smp_wmb(); | 611 | smp_wmb(); |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 4dd986defa60..41e0b8aa78ed 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -218,7 +218,7 @@ wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | |||
218 | stop_func_tracer(tr, !set); | 218 | stop_func_tracer(tr, !set); |
219 | 219 | ||
220 | wakeup_reset(wakeup_trace); | 220 | wakeup_reset(wakeup_trace); |
221 | tracing_max_latency = 0; | 221 | tr->max_latency = 0; |
222 | 222 | ||
223 | return start_func_tracer(tr, set); | 223 | return start_func_tracer(tr, set); |
224 | } | 224 | } |
@@ -344,13 +344,13 @@ static void wakeup_print_header(struct seq_file *s) | |||
344 | /* | 344 | /* |
345 | * Should this new latency be reported/recorded? | 345 | * Should this new latency be reported/recorded? |
346 | */ | 346 | */ |
347 | static int report_latency(cycle_t delta) | 347 | static int report_latency(struct trace_array *tr, cycle_t delta) |
348 | { | 348 | { |
349 | if (tracing_thresh) { | 349 | if (tracing_thresh) { |
350 | if (delta < tracing_thresh) | 350 | if (delta < tracing_thresh) |
351 | return 0; | 351 | return 0; |
352 | } else { | 352 | } else { |
353 | if (delta <= tracing_max_latency) | 353 | if (delta <= tr->max_latency) |
354 | return 0; | 354 | return 0; |
355 | } | 355 | } |
356 | return 1; | 356 | return 1; |
@@ -418,11 +418,11 @@ probe_wakeup_sched_switch(void *ignore, | |||
418 | T1 = ftrace_now(cpu); | 418 | T1 = ftrace_now(cpu); |
419 | delta = T1-T0; | 419 | delta = T1-T0; |
420 | 420 | ||
421 | if (!report_latency(delta)) | 421 | if (!report_latency(wakeup_trace, delta)) |
422 | goto out_unlock; | 422 | goto out_unlock; |
423 | 423 | ||
424 | if (likely(!is_tracing_stopped())) { | 424 | if (likely(!is_tracing_stopped())) { |
425 | tracing_max_latency = delta; | 425 | wakeup_trace->max_latency = delta; |
426 | update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu); | 426 | update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu); |
427 | } | 427 | } |
428 | 428 | ||
@@ -609,7 +609,7 @@ static int __wakeup_tracer_init(struct trace_array *tr) | |||
609 | set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1); | 609 | set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1); |
610 | set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1); | 610 | set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1); |
611 | 611 | ||
612 | tracing_max_latency = 0; | 612 | tr->max_latency = 0; |
613 | wakeup_trace = tr; | 613 | wakeup_trace = tr; |
614 | ftrace_init_array_ops(tr, wakeup_tracer_call); | 614 | ftrace_init_array_ops(tr, wakeup_tracer_call); |
615 | start_wakeup_tracer(tr); | 615 | start_wakeup_tracer(tr); |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 519d04affe38..ac3185892960 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -807,7 +807,7 @@ out: | |||
807 | int | 807 | int |
808 | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | 808 | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) |
809 | { | 809 | { |
810 | unsigned long save_max = tracing_max_latency; | 810 | unsigned long save_max = tr->max_latency; |
811 | unsigned long count; | 811 | unsigned long count; |
812 | int ret; | 812 | int ret; |
813 | 813 | ||
@@ -819,7 +819,7 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |||
819 | } | 819 | } |
820 | 820 | ||
821 | /* reset the max latency */ | 821 | /* reset the max latency */ |
822 | tracing_max_latency = 0; | 822 | tr->max_latency = 0; |
823 | /* disable interrupts for a bit */ | 823 | /* disable interrupts for a bit */ |
824 | local_irq_disable(); | 824 | local_irq_disable(); |
825 | udelay(100); | 825 | udelay(100); |
@@ -846,7 +846,7 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |||
846 | ret = -1; | 846 | ret = -1; |
847 | } | 847 | } |
848 | 848 | ||
849 | tracing_max_latency = save_max; | 849 | tr->max_latency = save_max; |
850 | 850 | ||
851 | return ret; | 851 | return ret; |
852 | } | 852 | } |
@@ -856,7 +856,7 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |||
856 | int | 856 | int |
857 | trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | 857 | trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) |
858 | { | 858 | { |
859 | unsigned long save_max = tracing_max_latency; | 859 | unsigned long save_max = tr->max_latency; |
860 | unsigned long count; | 860 | unsigned long count; |
861 | int ret; | 861 | int ret; |
862 | 862 | ||
@@ -881,7 +881,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |||
881 | } | 881 | } |
882 | 882 | ||
883 | /* reset the max latency */ | 883 | /* reset the max latency */ |
884 | tracing_max_latency = 0; | 884 | tr->max_latency = 0; |
885 | /* disable preemption for a bit */ | 885 | /* disable preemption for a bit */ |
886 | preempt_disable(); | 886 | preempt_disable(); |
887 | udelay(100); | 887 | udelay(100); |
@@ -908,7 +908,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |||
908 | ret = -1; | 908 | ret = -1; |
909 | } | 909 | } |
910 | 910 | ||
911 | tracing_max_latency = save_max; | 911 | tr->max_latency = save_max; |
912 | 912 | ||
913 | return ret; | 913 | return ret; |
914 | } | 914 | } |
@@ -918,7 +918,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |||
918 | int | 918 | int |
919 | trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) | 919 | trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) |
920 | { | 920 | { |
921 | unsigned long save_max = tracing_max_latency; | 921 | unsigned long save_max = tr->max_latency; |
922 | unsigned long count; | 922 | unsigned long count; |
923 | int ret; | 923 | int ret; |
924 | 924 | ||
@@ -943,7 +943,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * | |||
943 | } | 943 | } |
944 | 944 | ||
945 | /* reset the max latency */ | 945 | /* reset the max latency */ |
946 | tracing_max_latency = 0; | 946 | tr->max_latency = 0; |
947 | 947 | ||
948 | /* disable preemption and interrupts for a bit */ | 948 | /* disable preemption and interrupts for a bit */ |
949 | preempt_disable(); | 949 | preempt_disable(); |
@@ -978,7 +978,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * | |||
978 | } | 978 | } |
979 | 979 | ||
980 | /* do the test by disabling interrupts first this time */ | 980 | /* do the test by disabling interrupts first this time */ |
981 | tracing_max_latency = 0; | 981 | tr->max_latency = 0; |
982 | tracing_start(); | 982 | tracing_start(); |
983 | trace->start(tr); | 983 | trace->start(tr); |
984 | 984 | ||
@@ -1009,7 +1009,7 @@ out: | |||
1009 | tracing_start(); | 1009 | tracing_start(); |
1010 | out_no_start: | 1010 | out_no_start: |
1011 | trace->reset(tr); | 1011 | trace->reset(tr); |
1012 | tracing_max_latency = save_max; | 1012 | tr->max_latency = save_max; |
1013 | 1013 | ||
1014 | return ret; | 1014 | return ret; |
1015 | } | 1015 | } |
@@ -1062,7 +1062,7 @@ static int trace_wakeup_test_thread(void *data) | |||
1062 | int | 1062 | int |
1063 | trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | 1063 | trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) |
1064 | { | 1064 | { |
1065 | unsigned long save_max = tracing_max_latency; | 1065 | unsigned long save_max = tr->max_latency; |
1066 | struct task_struct *p; | 1066 | struct task_struct *p; |
1067 | struct completion is_ready; | 1067 | struct completion is_ready; |
1068 | unsigned long count; | 1068 | unsigned long count; |
@@ -1088,7 +1088,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |||
1088 | } | 1088 | } |
1089 | 1089 | ||
1090 | /* reset the max latency */ | 1090 | /* reset the max latency */ |
1091 | tracing_max_latency = 0; | 1091 | tr->max_latency = 0; |
1092 | 1092 | ||
1093 | while (p->on_rq) { | 1093 | while (p->on_rq) { |
1094 | /* | 1094 | /* |
@@ -1118,7 +1118,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |||
1118 | trace->reset(tr); | 1118 | trace->reset(tr); |
1119 | tracing_start(); | 1119 | tracing_start(); |
1120 | 1120 | ||
1121 | tracing_max_latency = save_max; | 1121 | tr->max_latency = save_max; |
1122 | 1122 | ||
1123 | /* kill the thread */ | 1123 | /* kill the thread */ |
1124 | kthread_stop(p); | 1124 | kthread_stop(p); |