aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-02-10 20:25:00 -0500
committerIngo Molnar <mingo@elte.hu>2009-02-17 19:40:20 -0500
commit6eaaa5d57e76c454479833fc8594cd7c3b75c789 (patch)
treec8f3c130004199bbbc8d141bbfb0f216539c0724 /kernel/trace
parentac07bcaa8259841905ead3f8cd60b1923ca6c0e5 (diff)
tracing/core: use appropriate waiting on trace_pipe
Impact: api and pipe waiting change Currently, the waiting used in tracing_read_pipe() is done through a 100 msecs schedule_timeout() loop which periodically check if there are traces on the buffer. This can cause small latencies for programs which are reading the incoming events. This patch makes the reader waiting for the trace_wait waitqueue except for few tracers such as the sched and functions tracers which might be already hold the runqueue lock while waking up the reader. This is performed through a new callback wait_pipe() on struct tracer. If none is implemented on a specific tracer, the default waiting for trace_wait queue is attached. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace.c62
-rw-r--r--kernel/trace/trace.h25
-rw-r--r--kernel/trace/trace_functions.c1
-rw-r--r--kernel/trace/trace_functions_graph.c1
-rw-r--r--kernel/trace/trace_sched_switch.c1
-rw-r--r--kernel/trace/trace_sched_wakeup.c1
6 files changed, 67 insertions, 24 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index dc61e82faad9..881a94474d79 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -499,6 +499,9 @@ __acquires(kernel_lock)
499 else 499 else
500 if (!type->flags->opts) 500 if (!type->flags->opts)
501 type->flags->opts = dummy_tracer_opt; 501 type->flags->opts = dummy_tracer_opt;
502 if (!type->wait_pipe)
503 type->wait_pipe = default_wait_pipe;
504
502 505
503#ifdef CONFIG_FTRACE_STARTUP_TEST 506#ifdef CONFIG_FTRACE_STARTUP_TEST
504 if (type->selftest && !tracing_selftest_disabled) { 507 if (type->selftest && !tracing_selftest_disabled) {
@@ -1064,7 +1067,10 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
1064 entry->next_prio = wakee->prio; 1067 entry->next_prio = wakee->prio;
1065 entry->next_state = wakee->state; 1068 entry->next_state = wakee->state;
1066 entry->next_cpu = task_cpu(wakee); 1069 entry->next_cpu = task_cpu(wakee);
1067 trace_buffer_unlock_commit(tr, event, flags, pc); 1070
1071 ring_buffer_unlock_commit(tr->buffer, event);
1072 ftrace_trace_stack(tr, flags, 6, pc);
1073 ftrace_trace_userstack(tr, flags, pc);
1068} 1074}
1069 1075
1070void 1076void
@@ -2392,6 +2398,38 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
2392 } 2398 }
2393} 2399}
2394 2400
2401
2402void default_wait_pipe(struct trace_iterator *iter)
2403{
2404 DEFINE_WAIT(wait);
2405
2406 prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
2407
2408 if (trace_empty(iter))
2409 schedule();
2410
2411 finish_wait(&trace_wait, &wait);
2412}
2413
2414/*
2415 * This is a make-shift waitqueue.
2416 * A tracer might use this callback on some rare cases:
2417 *
2418 * 1) the current tracer might hold the runqueue lock when it wakes up
2419 * a reader, hence a deadlock (sched, function, and function graph tracers)
2420 * 2) the function tracers, trace all functions, we don't want
2421 * the overhead of calling wake_up and friends
2422 * (and tracing them too)
2423 *
2424 * Anyway, this is really very primitive wakeup.
2425 */
2426void poll_wait_pipe(struct trace_iterator *iter)
2427{
2428 set_current_state(TASK_INTERRUPTIBLE);
2429 /* sleep for 100 msecs, and try again. */
2430 schedule_timeout(HZ / 10);
2431}
2432
2395/* Must be called with trace_types_lock mutex held. */ 2433/* Must be called with trace_types_lock mutex held. */
2396static int tracing_wait_pipe(struct file *filp) 2434static int tracing_wait_pipe(struct file *filp)
2397{ 2435{
@@ -2403,30 +2441,14 @@ static int tracing_wait_pipe(struct file *filp)
2403 return -EAGAIN; 2441 return -EAGAIN;
2404 } 2442 }
2405 2443
2406 /*
2407 * This is a make-shift waitqueue. The reason we don't use
2408 * an actual wait queue is because:
2409 * 1) we only ever have one waiter
2410 * 2) the tracing, traces all functions, we don't want
2411 * the overhead of calling wake_up and friends
2412 * (and tracing them too)
2413 * Anyway, this is really very primitive wakeup.
2414 */
2415 set_current_state(TASK_INTERRUPTIBLE);
2416 iter->tr->waiter = current;
2417
2418 mutex_unlock(&trace_types_lock); 2444 mutex_unlock(&trace_types_lock);
2419 2445
2420 /* sleep for 100 msecs, and try again. */ 2446 iter->trace->wait_pipe(iter);
2421 schedule_timeout(HZ/10);
2422 2447
2423 mutex_lock(&trace_types_lock); 2448 mutex_lock(&trace_types_lock);
2424 2449
2425 iter->tr->waiter = NULL; 2450 if (signal_pending(current))
2426
2427 if (signal_pending(current)) {
2428 return -EINTR; 2451 return -EINTR;
2429 }
2430 2452
2431 if (iter->trace != current_trace) 2453 if (iter->trace != current_trace)
2432 return 0; 2454 return 0;
@@ -2442,8 +2464,6 @@ static int tracing_wait_pipe(struct file *filp)
2442 */ 2464 */
2443 if (!tracer_enabled && iter->pos) 2465 if (!tracer_enabled && iter->pos)
2444 break; 2466 break;
2445
2446 continue;
2447 } 2467 }
2448 2468
2449 return 1; 2469 return 1;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index dbff0207b213..eed732c151fc 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -337,18 +337,34 @@ struct tracer_flags {
337#define TRACER_OPT(s, b) .name = #s, .bit = b 337#define TRACER_OPT(s, b) .name = #s, .bit = b
338 338
339 339
340/* 340/**
341 * A specific tracer, represented by methods that operate on a trace array: 341 * struct tracer - a specific tracer and its callbacks to interact with debugfs
342 * @name: the name chosen to select it on the available_tracers file
343 * @init: called when one switches to this tracer (echo name > current_tracer)
344 * @reset: called when one switches to another tracer
345 * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
346 * @stop: called when tracing is paused (echo 0 > tracing_enabled)
347 * @open: called when the trace file is opened
348 * @pipe_open: called when the trace_pipe file is opened
349 * @wait_pipe: override how the user waits for traces on trace_pipe
350 * @close: called when the trace file is released
351 * @read: override the default read callback on trace_pipe
352 * @splice_read: override the default splice_read callback on trace_pipe
353 * @selftest: selftest to run on boot (see trace_selftest.c)
354 * @print_headers: override the first lines that describe your columns
355 * @print_line: callback that prints a trace
356 * @set_flag: signals one of your private flags changed (trace_options file)
357 * @flags: your private flags
342 */ 358 */
343struct tracer { 359struct tracer {
344 const char *name; 360 const char *name;
345 /* Your tracer should raise a warning if init fails */
346 int (*init)(struct trace_array *tr); 361 int (*init)(struct trace_array *tr);
347 void (*reset)(struct trace_array *tr); 362 void (*reset)(struct trace_array *tr);
348 void (*start)(struct trace_array *tr); 363 void (*start)(struct trace_array *tr);
349 void (*stop)(struct trace_array *tr); 364 void (*stop)(struct trace_array *tr);
350 void (*open)(struct trace_iterator *iter); 365 void (*open)(struct trace_iterator *iter);
351 void (*pipe_open)(struct trace_iterator *iter); 366 void (*pipe_open)(struct trace_iterator *iter);
367 void (*wait_pipe)(struct trace_iterator *iter);
352 void (*close)(struct trace_iterator *iter); 368 void (*close)(struct trace_iterator *iter);
353 ssize_t (*read)(struct trace_iterator *iter, 369 ssize_t (*read)(struct trace_iterator *iter,
354 struct file *filp, char __user *ubuf, 370 struct file *filp, char __user *ubuf,
@@ -432,6 +448,9 @@ void tracing_generic_entry_update(struct trace_entry *entry,
432 unsigned long flags, 448 unsigned long flags,
433 int pc); 449 int pc);
434 450
451void default_wait_pipe(struct trace_iterator *iter);
452void poll_wait_pipe(struct trace_iterator *iter);
453
435void ftrace(struct trace_array *tr, 454void ftrace(struct trace_array *tr,
436 struct trace_array_cpu *data, 455 struct trace_array_cpu *data,
437 unsigned long ip, 456 unsigned long ip,
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 4c113a8c466f..c9a0b7df44ff 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -225,6 +225,7 @@ static struct tracer function_trace __read_mostly =
225 .init = function_trace_init, 225 .init = function_trace_init,
226 .reset = function_trace_reset, 226 .reset = function_trace_reset,
227 .start = function_trace_start, 227 .start = function_trace_start,
228 .wait_pipe = poll_wait_pipe,
228 .flags = &func_flags, 229 .flags = &func_flags,
229 .set_flag = func_set_flag, 230 .set_flag = func_set_flag,
230#ifdef CONFIG_FTRACE_SELFTEST 231#ifdef CONFIG_FTRACE_SELFTEST
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 519a0cab1530..0ff5cb661900 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -757,6 +757,7 @@ static struct tracer graph_trace __read_mostly = {
757 .name = "function_graph", 757 .name = "function_graph",
758 .open = graph_trace_open, 758 .open = graph_trace_open,
759 .close = graph_trace_close, 759 .close = graph_trace_close,
760 .wait_pipe = poll_wait_pipe,
760 .init = graph_trace_init, 761 .init = graph_trace_init,
761 .reset = graph_trace_reset, 762 .reset = graph_trace_reset,
762 .print_line = print_graph_function, 763 .print_line = print_graph_function,
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 82fbb5a2df89..77132c2cf3d9 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -221,6 +221,7 @@ static struct tracer sched_switch_trace __read_mostly =
221 .reset = sched_switch_trace_reset, 221 .reset = sched_switch_trace_reset,
222 .start = sched_switch_trace_start, 222 .start = sched_switch_trace_start,
223 .stop = sched_switch_trace_stop, 223 .stop = sched_switch_trace_stop,
224 .wait_pipe = poll_wait_pipe,
224#ifdef CONFIG_FTRACE_SELFTEST 225#ifdef CONFIG_FTRACE_SELFTEST
225 .selftest = trace_selftest_startup_sched_switch, 226 .selftest = trace_selftest_startup_sched_switch,
226#endif 227#endif
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 276c51aaf314..db55f7aaa640 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -380,6 +380,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
380 .reset = wakeup_tracer_reset, 380 .reset = wakeup_tracer_reset,
381 .start = wakeup_tracer_start, 381 .start = wakeup_tracer_start,
382 .stop = wakeup_tracer_stop, 382 .stop = wakeup_tracer_stop,
383 .wait_pipe = poll_wait_pipe,
383 .print_max = 1, 384 .print_max = 1,
384#ifdef CONFIG_FTRACE_SELFTEST 385#ifdef CONFIG_FTRACE_SELFTEST
385 .selftest = trace_selftest_startup_wakeup, 386 .selftest = trace_selftest_startup_wakeup,