diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2009-02-10 20:25:00 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-02-17 19:40:20 -0500 |
commit | 6eaaa5d57e76c454479833fc8594cd7c3b75c789 (patch) | |
tree | c8f3c130004199bbbc8d141bbfb0f216539c0724 /kernel/trace/trace.c | |
parent | ac07bcaa8259841905ead3f8cd60b1923ca6c0e5 (diff) |
tracing/core: use appropriate waiting on trace_pipe
Impact: api and pipe waiting change
Currently, the waiting used in tracing_read_pipe() is done through a
100 msecs schedule_timeout() loop which periodically check if there
are traces on the buffer.
This can cause small latencies for programs which are reading the incoming
events.
This patch makes the reader waiting for the trace_wait waitqueue except
for few tracers such as the sched and functions tracers which might be
already hold the runqueue lock while waking up the reader.
This is performed through a new callback wait_pipe() on struct tracer.
If none is implemented on a specific tracer, the default waiting for
trace_wait queue is attached.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 62 |
1 files changed, 41 insertions, 21 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index dc61e82faad9..881a94474d79 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -499,6 +499,9 @@ __acquires(kernel_lock) | |||
499 | else | 499 | else |
500 | if (!type->flags->opts) | 500 | if (!type->flags->opts) |
501 | type->flags->opts = dummy_tracer_opt; | 501 | type->flags->opts = dummy_tracer_opt; |
502 | if (!type->wait_pipe) | ||
503 | type->wait_pipe = default_wait_pipe; | ||
504 | |||
502 | 505 | ||
503 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 506 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
504 | if (type->selftest && !tracing_selftest_disabled) { | 507 | if (type->selftest && !tracing_selftest_disabled) { |
@@ -1064,7 +1067,10 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
1064 | entry->next_prio = wakee->prio; | 1067 | entry->next_prio = wakee->prio; |
1065 | entry->next_state = wakee->state; | 1068 | entry->next_state = wakee->state; |
1066 | entry->next_cpu = task_cpu(wakee); | 1069 | entry->next_cpu = task_cpu(wakee); |
1067 | trace_buffer_unlock_commit(tr, event, flags, pc); | 1070 | |
1071 | ring_buffer_unlock_commit(tr->buffer, event); | ||
1072 | ftrace_trace_stack(tr, flags, 6, pc); | ||
1073 | ftrace_trace_userstack(tr, flags, pc); | ||
1068 | } | 1074 | } |
1069 | 1075 | ||
1070 | void | 1076 | void |
@@ -2392,6 +2398,38 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table) | |||
2392 | } | 2398 | } |
2393 | } | 2399 | } |
2394 | 2400 | ||
2401 | |||
2402 | void default_wait_pipe(struct trace_iterator *iter) | ||
2403 | { | ||
2404 | DEFINE_WAIT(wait); | ||
2405 | |||
2406 | prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE); | ||
2407 | |||
2408 | if (trace_empty(iter)) | ||
2409 | schedule(); | ||
2410 | |||
2411 | finish_wait(&trace_wait, &wait); | ||
2412 | } | ||
2413 | |||
2414 | /* | ||
2415 | * This is a make-shift waitqueue. | ||
2416 | * A tracer might use this callback on some rare cases: | ||
2417 | * | ||
2418 | * 1) the current tracer might hold the runqueue lock when it wakes up | ||
2419 | * a reader, hence a deadlock (sched, function, and function graph tracers) | ||
2420 | * 2) the function tracers, trace all functions, we don't want | ||
2421 | * the overhead of calling wake_up and friends | ||
2422 | * (and tracing them too) | ||
2423 | * | ||
2424 | * Anyway, this is really very primitive wakeup. | ||
2425 | */ | ||
2426 | void poll_wait_pipe(struct trace_iterator *iter) | ||
2427 | { | ||
2428 | set_current_state(TASK_INTERRUPTIBLE); | ||
2429 | /* sleep for 100 msecs, and try again. */ | ||
2430 | schedule_timeout(HZ / 10); | ||
2431 | } | ||
2432 | |||
2395 | /* Must be called with trace_types_lock mutex held. */ | 2433 | /* Must be called with trace_types_lock mutex held. */ |
2396 | static int tracing_wait_pipe(struct file *filp) | 2434 | static int tracing_wait_pipe(struct file *filp) |
2397 | { | 2435 | { |
@@ -2403,30 +2441,14 @@ static int tracing_wait_pipe(struct file *filp) | |||
2403 | return -EAGAIN; | 2441 | return -EAGAIN; |
2404 | } | 2442 | } |
2405 | 2443 | ||
2406 | /* | ||
2407 | * This is a make-shift waitqueue. The reason we don't use | ||
2408 | * an actual wait queue is because: | ||
2409 | * 1) we only ever have one waiter | ||
2410 | * 2) the tracing, traces all functions, we don't want | ||
2411 | * the overhead of calling wake_up and friends | ||
2412 | * (and tracing them too) | ||
2413 | * Anyway, this is really very primitive wakeup. | ||
2414 | */ | ||
2415 | set_current_state(TASK_INTERRUPTIBLE); | ||
2416 | iter->tr->waiter = current; | ||
2417 | |||
2418 | mutex_unlock(&trace_types_lock); | 2444 | mutex_unlock(&trace_types_lock); |
2419 | 2445 | ||
2420 | /* sleep for 100 msecs, and try again. */ | 2446 | iter->trace->wait_pipe(iter); |
2421 | schedule_timeout(HZ/10); | ||
2422 | 2447 | ||
2423 | mutex_lock(&trace_types_lock); | 2448 | mutex_lock(&trace_types_lock); |
2424 | 2449 | ||
2425 | iter->tr->waiter = NULL; | 2450 | if (signal_pending(current)) |
2426 | |||
2427 | if (signal_pending(current)) { | ||
2428 | return -EINTR; | 2451 | return -EINTR; |
2429 | } | ||
2430 | 2452 | ||
2431 | if (iter->trace != current_trace) | 2453 | if (iter->trace != current_trace) |
2432 | return 0; | 2454 | return 0; |
@@ -2442,8 +2464,6 @@ static int tracing_wait_pipe(struct file *filp) | |||
2442 | */ | 2464 | */ |
2443 | if (!tracer_enabled && iter->pos) | 2465 | if (!tracer_enabled && iter->pos) |
2444 | break; | 2466 | break; |
2445 | |||
2446 | continue; | ||
2447 | } | 2467 | } |
2448 | 2468 | ||
2449 | return 1; | 2469 | return 1; |