diff options
author | Steven Rostedt (Red Hat) <rostedt@goodmis.org> | 2014-04-29 17:54:37 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2014-04-30 08:40:05 -0400 |
commit | b1169cc69ba96b124df820904a6d3eb775491d7f (patch) | |
tree | 95927d3a0c262c94def2d76b7f591e9d8ad46385 | |
parent | f4874261049e3abdd481359d82cafa5068369ebd (diff) |
tracing: Remove mock up poll wait function
Now that the ring buffer has a built in way to wake up readers
when there's data, using irq_work such that it is safe to do it
in any context. But it was still using the old "poor man's"
wait polling that checks every 1/10 of a second to see if it
should wake up a waiter. This makes the latency for a wake up
excruciatingly long. No need to do that anymore.
Completely remove the different wait_poll types from the tracers
and have them all use the default one now.
Reported-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r-- | kernel/trace/trace.c | 29 | ||||
-rw-r--r-- | kernel/trace/trace.h | 4 | ||||
-rw-r--r-- | kernel/trace/trace_functions.c | 1 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 1 | ||||
-rw-r--r-- | kernel/trace/trace_nop.c | 1 | ||||
-rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 2 |
6 files changed, 4 insertions, 34 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index e058c6091e45..4c392c8238bf 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1085,7 +1085,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
1085 | } | 1085 | } |
1086 | #endif /* CONFIG_TRACER_MAX_TRACE */ | 1086 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
1087 | 1087 | ||
1088 | static void default_wait_pipe(struct trace_iterator *iter) | 1088 | static void wait_on_pipe(struct trace_iterator *iter) |
1089 | { | 1089 | { |
1090 | /* Iterators are static, they should be filled or empty */ | 1090 | /* Iterators are static, they should be filled or empty */ |
1091 | if (trace_buffer_iter(iter, iter->cpu_file)) | 1091 | if (trace_buffer_iter(iter, iter->cpu_file)) |
@@ -1202,8 +1202,6 @@ int register_tracer(struct tracer *type) | |||
1202 | else | 1202 | else |
1203 | if (!type->flags->opts) | 1203 | if (!type->flags->opts) |
1204 | type->flags->opts = dummy_tracer_opt; | 1204 | type->flags->opts = dummy_tracer_opt; |
1205 | if (!type->wait_pipe) | ||
1206 | type->wait_pipe = default_wait_pipe; | ||
1207 | 1205 | ||
1208 | ret = run_tracer_selftest(type); | 1206 | ret = run_tracer_selftest(type); |
1209 | if (ret < 0) | 1207 | if (ret < 0) |
@@ -4207,25 +4205,6 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table) | |||
4207 | return trace_poll(iter, filp, poll_table); | 4205 | return trace_poll(iter, filp, poll_table); |
4208 | } | 4206 | } |
4209 | 4207 | ||
4210 | /* | ||
4211 | * This is a make-shift waitqueue. | ||
4212 | * A tracer might use this callback on some rare cases: | ||
4213 | * | ||
4214 | * 1) the current tracer might hold the runqueue lock when it wakes up | ||
4215 | * a reader, hence a deadlock (sched, function, and function graph tracers) | ||
4216 | * 2) the function tracers, trace all functions, we don't want | ||
4217 | * the overhead of calling wake_up and friends | ||
4218 | * (and tracing them too) | ||
4219 | * | ||
4220 | * Anyway, this is really very primitive wakeup. | ||
4221 | */ | ||
4222 | void poll_wait_pipe(struct trace_iterator *iter) | ||
4223 | { | ||
4224 | set_current_state(TASK_INTERRUPTIBLE); | ||
4225 | /* sleep for 100 msecs, and try again. */ | ||
4226 | schedule_timeout(HZ / 10); | ||
4227 | } | ||
4228 | |||
4229 | /* Must be called with trace_types_lock mutex held. */ | 4208 | /* Must be called with trace_types_lock mutex held. */ |
4230 | static int tracing_wait_pipe(struct file *filp) | 4209 | static int tracing_wait_pipe(struct file *filp) |
4231 | { | 4210 | { |
@@ -4251,7 +4230,7 @@ static int tracing_wait_pipe(struct file *filp) | |||
4251 | 4230 | ||
4252 | mutex_unlock(&iter->mutex); | 4231 | mutex_unlock(&iter->mutex); |
4253 | 4232 | ||
4254 | iter->trace->wait_pipe(iter); | 4233 | wait_on_pipe(iter); |
4255 | 4234 | ||
4256 | mutex_lock(&iter->mutex); | 4235 | mutex_lock(&iter->mutex); |
4257 | 4236 | ||
@@ -5179,7 +5158,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, | |||
5179 | goto out_unlock; | 5158 | goto out_unlock; |
5180 | } | 5159 | } |
5181 | mutex_unlock(&trace_types_lock); | 5160 | mutex_unlock(&trace_types_lock); |
5182 | iter->trace->wait_pipe(iter); | 5161 | wait_on_pipe(iter); |
5183 | mutex_lock(&trace_types_lock); | 5162 | mutex_lock(&trace_types_lock); |
5184 | if (signal_pending(current)) { | 5163 | if (signal_pending(current)) { |
5185 | size = -EINTR; | 5164 | size = -EINTR; |
@@ -5390,7 +5369,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
5390 | goto out; | 5369 | goto out; |
5391 | } | 5370 | } |
5392 | mutex_unlock(&trace_types_lock); | 5371 | mutex_unlock(&trace_types_lock); |
5393 | iter->trace->wait_pipe(iter); | 5372 | wait_on_pipe(iter); |
5394 | mutex_lock(&trace_types_lock); | 5373 | mutex_lock(&trace_types_lock); |
5395 | if (signal_pending(current)) { | 5374 | if (signal_pending(current)) { |
5396 | ret = -EINTR; | 5375 | ret = -EINTR; |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 8624b5041466..3b3e09e61f33 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -338,7 +338,6 @@ struct tracer_flags { | |||
338 | * @stop: called when tracing is paused (echo 0 > tracing_enabled) | 338 | * @stop: called when tracing is paused (echo 0 > tracing_enabled) |
339 | * @open: called when the trace file is opened | 339 | * @open: called when the trace file is opened |
340 | * @pipe_open: called when the trace_pipe file is opened | 340 | * @pipe_open: called when the trace_pipe file is opened |
341 | * @wait_pipe: override how the user waits for traces on trace_pipe | ||
342 | * @close: called when the trace file is released | 341 | * @close: called when the trace file is released |
343 | * @pipe_close: called when the trace_pipe file is released | 342 | * @pipe_close: called when the trace_pipe file is released |
344 | * @read: override the default read callback on trace_pipe | 343 | * @read: override the default read callback on trace_pipe |
@@ -357,7 +356,6 @@ struct tracer { | |||
357 | void (*stop)(struct trace_array *tr); | 356 | void (*stop)(struct trace_array *tr); |
358 | void (*open)(struct trace_iterator *iter); | 357 | void (*open)(struct trace_iterator *iter); |
359 | void (*pipe_open)(struct trace_iterator *iter); | 358 | void (*pipe_open)(struct trace_iterator *iter); |
360 | void (*wait_pipe)(struct trace_iterator *iter); | ||
361 | void (*close)(struct trace_iterator *iter); | 359 | void (*close)(struct trace_iterator *iter); |
362 | void (*pipe_close)(struct trace_iterator *iter); | 360 | void (*pipe_close)(struct trace_iterator *iter); |
363 | ssize_t (*read)(struct trace_iterator *iter, | 361 | ssize_t (*read)(struct trace_iterator *iter, |
@@ -566,8 +564,6 @@ void trace_init_global_iter(struct trace_iterator *iter); | |||
566 | 564 | ||
567 | void tracing_iter_reset(struct trace_iterator *iter, int cpu); | 565 | void tracing_iter_reset(struct trace_iterator *iter, int cpu); |
568 | 566 | ||
569 | void poll_wait_pipe(struct trace_iterator *iter); | ||
570 | |||
571 | void tracing_sched_switch_trace(struct trace_array *tr, | 567 | void tracing_sched_switch_trace(struct trace_array *tr, |
572 | struct task_struct *prev, | 568 | struct task_struct *prev, |
573 | struct task_struct *next, | 569 | struct task_struct *next, |
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 2d9482b8f26a..57f0ec962d2c 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -252,7 +252,6 @@ static struct tracer function_trace __tracer_data = | |||
252 | .init = function_trace_init, | 252 | .init = function_trace_init, |
253 | .reset = function_trace_reset, | 253 | .reset = function_trace_reset, |
254 | .start = function_trace_start, | 254 | .start = function_trace_start, |
255 | .wait_pipe = poll_wait_pipe, | ||
256 | .flags = &func_flags, | 255 | .flags = &func_flags, |
257 | .set_flag = func_set_flag, | 256 | .set_flag = func_set_flag, |
258 | .allow_instances = true, | 257 | .allow_instances = true, |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index deff11200261..b86dd4d8c6a6 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -1505,7 +1505,6 @@ static struct tracer graph_trace __tracer_data = { | |||
1505 | .pipe_open = graph_trace_open, | 1505 | .pipe_open = graph_trace_open, |
1506 | .close = graph_trace_close, | 1506 | .close = graph_trace_close, |
1507 | .pipe_close = graph_trace_close, | 1507 | .pipe_close = graph_trace_close, |
1508 | .wait_pipe = poll_wait_pipe, | ||
1509 | .init = graph_trace_init, | 1508 | .init = graph_trace_init, |
1510 | .reset = graph_trace_reset, | 1509 | .reset = graph_trace_reset, |
1511 | .print_line = print_graph_function, | 1510 | .print_line = print_graph_function, |
diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c index 69a5cc94c01a..fcf0a9e48916 100644 --- a/kernel/trace/trace_nop.c +++ b/kernel/trace/trace_nop.c | |||
@@ -91,7 +91,6 @@ struct tracer nop_trace __read_mostly = | |||
91 | .name = "nop", | 91 | .name = "nop", |
92 | .init = nop_trace_init, | 92 | .init = nop_trace_init, |
93 | .reset = nop_trace_reset, | 93 | .reset = nop_trace_reset, |
94 | .wait_pipe = poll_wait_pipe, | ||
95 | #ifdef CONFIG_FTRACE_SELFTEST | 94 | #ifdef CONFIG_FTRACE_SELFTEST |
96 | .selftest = trace_selftest_startup_nop, | 95 | .selftest = trace_selftest_startup_nop, |
97 | #endif | 96 | #endif |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 1573c03640d2..19bd8928ce94 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -705,7 +705,6 @@ static struct tracer wakeup_rt_tracer __read_mostly = | |||
705 | .reset = wakeup_tracer_reset, | 705 | .reset = wakeup_tracer_reset, |
706 | .start = wakeup_tracer_start, | 706 | .start = wakeup_tracer_start, |
707 | .stop = wakeup_tracer_stop, | 707 | .stop = wakeup_tracer_stop, |
708 | .wait_pipe = poll_wait_pipe, | ||
709 | .print_max = true, | 708 | .print_max = true, |
710 | .print_header = wakeup_print_header, | 709 | .print_header = wakeup_print_header, |
711 | .print_line = wakeup_print_line, | 710 | .print_line = wakeup_print_line, |
@@ -728,7 +727,6 @@ static struct tracer wakeup_dl_tracer __read_mostly = | |||
728 | .reset = wakeup_tracer_reset, | 727 | .reset = wakeup_tracer_reset, |
729 | .start = wakeup_tracer_start, | 728 | .start = wakeup_tracer_start, |
730 | .stop = wakeup_tracer_stop, | 729 | .stop = wakeup_tracer_stop, |
731 | .wait_pipe = poll_wait_pipe, | ||
732 | .print_max = true, | 730 | .print_max = true, |
733 | .print_header = wakeup_print_header, | 731 | .print_header = wakeup_print_header, |
734 | .print_line = wakeup_print_line, | 732 | .print_line = wakeup_print_line, |