diff options
author | Steven Rostedt (Red Hat) <rostedt@goodmis.org> | 2014-04-29 17:54:37 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2014-04-30 08:40:05 -0400 |
commit | b1169cc69ba96b124df820904a6d3eb775491d7f (patch) | |
tree | 95927d3a0c262c94def2d76b7f591e9d8ad46385 /kernel/trace/trace.c | |
parent | f4874261049e3abdd481359d82cafa5068369ebd (diff) |
tracing: Remove mock up poll wait function
Now that the ring buffer has a built in way to wake up readers
when there's data, using irq_work such that it is safe to do it
in any context. But it was still using the old "poor man's"
wait polling that checks every 1/10 of a second to see if it
should wake up a waiter. This makes the latency for a wake up
excruciatingly long. No need to do that anymore.
Completely remove the different wait_poll types from the tracers
and have them all use the default one now.
Reported-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 29 |
1 files changed, 4 insertions, 25 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index e058c6091e45..4c392c8238bf 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1085,7 +1085,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
1085 | } | 1085 | } |
1086 | #endif /* CONFIG_TRACER_MAX_TRACE */ | 1086 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
1087 | 1087 | ||
1088 | static void default_wait_pipe(struct trace_iterator *iter) | 1088 | static void wait_on_pipe(struct trace_iterator *iter) |
1089 | { | 1089 | { |
1090 | /* Iterators are static, they should be filled or empty */ | 1090 | /* Iterators are static, they should be filled or empty */ |
1091 | if (trace_buffer_iter(iter, iter->cpu_file)) | 1091 | if (trace_buffer_iter(iter, iter->cpu_file)) |
@@ -1202,8 +1202,6 @@ int register_tracer(struct tracer *type) | |||
1202 | else | 1202 | else |
1203 | if (!type->flags->opts) | 1203 | if (!type->flags->opts) |
1204 | type->flags->opts = dummy_tracer_opt; | 1204 | type->flags->opts = dummy_tracer_opt; |
1205 | if (!type->wait_pipe) | ||
1206 | type->wait_pipe = default_wait_pipe; | ||
1207 | 1205 | ||
1208 | ret = run_tracer_selftest(type); | 1206 | ret = run_tracer_selftest(type); |
1209 | if (ret < 0) | 1207 | if (ret < 0) |
@@ -4207,25 +4205,6 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table) | |||
4207 | return trace_poll(iter, filp, poll_table); | 4205 | return trace_poll(iter, filp, poll_table); |
4208 | } | 4206 | } |
4209 | 4207 | ||
4210 | /* | ||
4211 | * This is a make-shift waitqueue. | ||
4212 | * A tracer might use this callback on some rare cases: | ||
4213 | * | ||
4214 | * 1) the current tracer might hold the runqueue lock when it wakes up | ||
4215 | * a reader, hence a deadlock (sched, function, and function graph tracers) | ||
4216 | * 2) the function tracers, trace all functions, we don't want | ||
4217 | * the overhead of calling wake_up and friends | ||
4218 | * (and tracing them too) | ||
4219 | * | ||
4220 | * Anyway, this is really very primitive wakeup. | ||
4221 | */ | ||
4222 | void poll_wait_pipe(struct trace_iterator *iter) | ||
4223 | { | ||
4224 | set_current_state(TASK_INTERRUPTIBLE); | ||
4225 | /* sleep for 100 msecs, and try again. */ | ||
4226 | schedule_timeout(HZ / 10); | ||
4227 | } | ||
4228 | |||
4229 | /* Must be called with trace_types_lock mutex held. */ | 4208 | /* Must be called with trace_types_lock mutex held. */ |
4230 | static int tracing_wait_pipe(struct file *filp) | 4209 | static int tracing_wait_pipe(struct file *filp) |
4231 | { | 4210 | { |
@@ -4251,7 +4230,7 @@ static int tracing_wait_pipe(struct file *filp) | |||
4251 | 4230 | ||
4252 | mutex_unlock(&iter->mutex); | 4231 | mutex_unlock(&iter->mutex); |
4253 | 4232 | ||
4254 | iter->trace->wait_pipe(iter); | 4233 | wait_on_pipe(iter); |
4255 | 4234 | ||
4256 | mutex_lock(&iter->mutex); | 4235 | mutex_lock(&iter->mutex); |
4257 | 4236 | ||
@@ -5179,7 +5158,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, | |||
5179 | goto out_unlock; | 5158 | goto out_unlock; |
5180 | } | 5159 | } |
5181 | mutex_unlock(&trace_types_lock); | 5160 | mutex_unlock(&trace_types_lock); |
5182 | iter->trace->wait_pipe(iter); | 5161 | wait_on_pipe(iter); |
5183 | mutex_lock(&trace_types_lock); | 5162 | mutex_lock(&trace_types_lock); |
5184 | if (signal_pending(current)) { | 5163 | if (signal_pending(current)) { |
5185 | size = -EINTR; | 5164 | size = -EINTR; |
@@ -5390,7 +5369,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
5390 | goto out; | 5369 | goto out; |
5391 | } | 5370 | } |
5392 | mutex_unlock(&trace_types_lock); | 5371 | mutex_unlock(&trace_types_lock); |
5393 | iter->trace->wait_pipe(iter); | 5372 | wait_on_pipe(iter); |
5394 | mutex_lock(&trace_types_lock); | 5373 | mutex_lock(&trace_types_lock); |
5395 | if (signal_pending(current)) { | 5374 | if (signal_pending(current)) { |
5396 | ret = -EINTR; | 5375 | ret = -EINTR; |