aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorJiri Olsa <jolsa@redhat.com>2011-03-25 07:05:18 -0400
committerSteven Rostedt <rostedt@goodmis.org>2011-04-04 12:18:24 -0400
commitee5e51f51be755830f57445e268ba50e88ccbdbb (patch)
tree059d6c5843275a5ab94ae01408c1dc6dbe00b2ea /kernel/trace
parent1813dc3776c22ad4b0294a6df8434b9a02c98109 (diff)
tracing: Avoid soft lockup in trace_pipe
running following commands: # enable the binary option echo 1 > ./options/bin # disable context info option echo 0 > ./options/context-info # tracing only events echo 1 > ./events/enable cat trace_pipe plus forcing system to generate many tracing events, is causing lockup (in NON preemptive kernels) inside tracing_read_pipe function. The issue is also easily reproduced by running ltp stress test. (ftrace_stress_test.sh) The reasons are: - bin/hex/raw output functions for events are set to trace_nop_print function, which prints nothing and returns TRACE_TYPE_HANDLED value - LOST EVENT trace do not handle trace_seq overflow These reasons force the while loop in tracing_read_pipe function never to break. The attached patch fixies handling of lost event trace, and changes trace_nop_print to print minimal info, which is needed for the correct tracing_read_pipe processing. v2 changes: - omit the cond_resched changes by trace_nop_print changes - WARN changed to WARN_ONCE and added info to be able to find out the culprit v3 changes: - make more accurate patch comment Signed-off-by: Jiri Olsa <jolsa@redhat.com> LKML-Reference: <20110325110518.GC1922@jolsa.brq.redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace.c15
-rw-r--r--kernel/trace/trace_output.c3
2 files changed, 15 insertions, 3 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 9541c27c1cf2..5af42f478c06 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2013,9 +2013,10 @@ enum print_line_t print_trace_line(struct trace_iterator *iter)
2013{ 2013{
2014 enum print_line_t ret; 2014 enum print_line_t ret;
2015 2015
2016 if (iter->lost_events) 2016 if (iter->lost_events &&
2017 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", 2017 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2018 iter->cpu, iter->lost_events); 2018 iter->cpu, iter->lost_events))
2019 return TRACE_TYPE_PARTIAL_LINE;
2019 2020
2020 if (iter->trace && iter->trace->print_line) { 2021 if (iter->trace && iter->trace->print_line) {
2021 ret = iter->trace->print_line(iter); 2022 ret = iter->trace->print_line(iter);
@@ -3229,6 +3230,14 @@ waitagain:
3229 3230
3230 if (iter->seq.len >= cnt) 3231 if (iter->seq.len >= cnt)
3231 break; 3232 break;
3233
3234 /*
3235 * Setting the full flag means we reached the trace_seq buffer
3236 * size and we should leave by partial output condition above.
3237 * One of the trace_seq_* functions is not used properly.
3238 */
3239 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
3240 iter->ent->type);
3232 } 3241 }
3233 trace_access_unlock(iter->cpu_file); 3242 trace_access_unlock(iter->cpu_file);
3234 trace_event_read_unlock(); 3243 trace_event_read_unlock();
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 456be9063c2d..cf535ccedc86 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -830,6 +830,9 @@ EXPORT_SYMBOL_GPL(unregister_ftrace_event);
830enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags, 830enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags,
831 struct trace_event *event) 831 struct trace_event *event)
832{ 832{
833 if (!trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type))
834 return TRACE_TYPE_PARTIAL_LINE;
835
833 return TRACE_TYPE_HANDLED; 836 return TRACE_TYPE_HANDLED;
834} 837}
835 838