diff options
author | Steven Rostedt <srostedt@redhat.com> | 2012-11-01 20:54:21 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2012-11-02 10:21:52 -0400 |
commit | 0d5c6e1c19bab82fad4837108c2902f557d62a04 (patch) | |
tree | ed075db499735ea4d72b9d9d7f992fe7d9a1a328 /kernel/trace/trace.c | |
parent | 02404baf1b47123f1c88c9f9f1f3b00e1e2b10db (diff) |
tracing: Use irq_work for wake ups and remove *_nowake_*() functions
Have the ring buffer commit function use the irq_work infrastructure to
wake up any waiters waiting on the ring buffer for new data. The irq_work
was created for such a purpose, where doing the actual wake up at the
time of adding data is too dangerous, as an event or function trace may
be in the midst of the work queue locks and cause deadlocks. The irq_work
will either delay the action to the next timer interrupt, or trigger an IPI
to itself forcing an interrupt to do the work (in a safe location).
With irq_work, all ring buffer commits can safely do wakeups, removing
the need for the ring buffer commit "nowake" variants, which were used
by events and function tracing. All commits can now safely use the
normal commit, and the "nowake" variants can be removed.
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 121 |
1 files changed, 68 insertions, 53 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d5cbc0d3f209..37d1c703e3ec 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/seq_file.h> | 19 | #include <linux/seq_file.h> |
20 | #include <linux/notifier.h> | 20 | #include <linux/notifier.h> |
21 | #include <linux/irqflags.h> | 21 | #include <linux/irqflags.h> |
22 | #include <linux/irq_work.h> | ||
22 | #include <linux/debugfs.h> | 23 | #include <linux/debugfs.h> |
23 | #include <linux/pagemap.h> | 24 | #include <linux/pagemap.h> |
24 | #include <linux/hardirq.h> | 25 | #include <linux/hardirq.h> |
@@ -85,6 +86,14 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set) | |||
85 | static DEFINE_PER_CPU(bool, trace_cmdline_save); | 86 | static DEFINE_PER_CPU(bool, trace_cmdline_save); |
86 | 87 | ||
87 | /* | 88 | /* |
89 | * When a reader is waiting for data, then this variable is | ||
90 | * set to true. | ||
91 | */ | ||
92 | static bool trace_wakeup_needed; | ||
93 | |||
94 | static struct irq_work trace_work_wakeup; | ||
95 | |||
96 | /* | ||
88 | * Kill all tracing for good (never come back). | 97 | * Kill all tracing for good (never come back). |
89 | * It is initialized to 1 but will turn to zero if the initialization | 98 | * It is initialized to 1 but will turn to zero if the initialization |
90 | * of the tracer is successful. But that is the only place that sets | 99 | * of the tracer is successful. But that is the only place that sets |
@@ -329,12 +338,18 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | |||
329 | static int trace_stop_count; | 338 | static int trace_stop_count; |
330 | static DEFINE_RAW_SPINLOCK(tracing_start_lock); | 339 | static DEFINE_RAW_SPINLOCK(tracing_start_lock); |
331 | 340 | ||
332 | static void wakeup_work_handler(struct work_struct *work) | 341 | /** |
342 | * trace_wake_up - wake up tasks waiting for trace input | ||
343 | * | ||
344 | * Schedules a delayed work to wake up any task that is blocked on the | ||
345 | * trace_wait queue. These is used with trace_poll for tasks polling the | ||
346 | * trace. | ||
347 | */ | ||
348 | static void trace_wake_up(struct irq_work *work) | ||
333 | { | 349 | { |
334 | wake_up(&trace_wait); | 350 | wake_up_all(&trace_wait); |
335 | } | ||
336 | 351 | ||
337 | static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler); | 352 | } |
338 | 353 | ||
339 | /** | 354 | /** |
340 | * tracing_on - enable tracing buffers | 355 | * tracing_on - enable tracing buffers |
@@ -389,22 +404,6 @@ int tracing_is_on(void) | |||
389 | } | 404 | } |
390 | EXPORT_SYMBOL_GPL(tracing_is_on); | 405 | EXPORT_SYMBOL_GPL(tracing_is_on); |
391 | 406 | ||
392 | /** | ||
393 | * trace_wake_up - wake up tasks waiting for trace input | ||
394 | * | ||
395 | * Schedules a delayed work to wake up any task that is blocked on the | ||
396 | * trace_wait queue. These is used with trace_poll for tasks polling the | ||
397 | * trace. | ||
398 | */ | ||
399 | void trace_wake_up(void) | ||
400 | { | ||
401 | const unsigned long delay = msecs_to_jiffies(2); | ||
402 | |||
403 | if (trace_flags & TRACE_ITER_BLOCK) | ||
404 | return; | ||
405 | schedule_delayed_work(&wakeup_work, delay); | ||
406 | } | ||
407 | |||
408 | static int __init set_buf_size(char *str) | 407 | static int __init set_buf_size(char *str) |
409 | { | 408 | { |
410 | unsigned long buf_size; | 409 | unsigned long buf_size; |
@@ -753,6 +752,40 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
753 | } | 752 | } |
754 | #endif /* CONFIG_TRACER_MAX_TRACE */ | 753 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
755 | 754 | ||
755 | static void default_wait_pipe(struct trace_iterator *iter) | ||
756 | { | ||
757 | DEFINE_WAIT(wait); | ||
758 | |||
759 | prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE); | ||
760 | |||
761 | /* | ||
762 | * The events can happen in critical sections where | ||
763 | * checking a work queue can cause deadlocks. | ||
764 | * After adding a task to the queue, this flag is set | ||
765 | * only to notify events to try to wake up the queue | ||
766 | * using irq_work. | ||
767 | * | ||
768 | * We don't clear it even if the buffer is no longer | ||
769 | * empty. The flag only causes the next event to run | ||
770 | * irq_work to do the work queue wake up. The worse | ||
771 | * that can happen if we race with !trace_empty() is that | ||
772 | * an event will cause an irq_work to try to wake up | ||
773 | * an empty queue. | ||
774 | * | ||
775 | * There's no reason to protect this flag either, as | ||
776 | * the work queue and irq_work logic will do the necessary | ||
777 | * synchronization for the wake ups. The only thing | ||
778 | * that is necessary is that the wake up happens after | ||
779 | * a task has been queued. It's OK for spurious wake ups. | ||
780 | */ | ||
781 | trace_wakeup_needed = true; | ||
782 | |||
783 | if (trace_empty(iter)) | ||
784 | schedule(); | ||
785 | |||
786 | finish_wait(&trace_wait, &wait); | ||
787 | } | ||
788 | |||
756 | /** | 789 | /** |
757 | * register_tracer - register a tracer with the ftrace system. | 790 | * register_tracer - register a tracer with the ftrace system. |
758 | * @type - the plugin for the tracer | 791 | * @type - the plugin for the tracer |
@@ -1156,30 +1189,32 @@ void | |||
1156 | __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) | 1189 | __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) |
1157 | { | 1190 | { |
1158 | __this_cpu_write(trace_cmdline_save, true); | 1191 | __this_cpu_write(trace_cmdline_save, true); |
1192 | if (trace_wakeup_needed) { | ||
1193 | trace_wakeup_needed = false; | ||
1194 | /* irq_work_queue() supplies it's own memory barriers */ | ||
1195 | irq_work_queue(&trace_work_wakeup); | ||
1196 | } | ||
1159 | ring_buffer_unlock_commit(buffer, event); | 1197 | ring_buffer_unlock_commit(buffer, event); |
1160 | } | 1198 | } |
1161 | 1199 | ||
1162 | static inline void | 1200 | static inline void |
1163 | __trace_buffer_unlock_commit(struct ring_buffer *buffer, | 1201 | __trace_buffer_unlock_commit(struct ring_buffer *buffer, |
1164 | struct ring_buffer_event *event, | 1202 | struct ring_buffer_event *event, |
1165 | unsigned long flags, int pc, | 1203 | unsigned long flags, int pc) |
1166 | int wake) | ||
1167 | { | 1204 | { |
1168 | __buffer_unlock_commit(buffer, event); | 1205 | __buffer_unlock_commit(buffer, event); |
1169 | 1206 | ||
1170 | ftrace_trace_stack(buffer, flags, 6, pc); | 1207 | ftrace_trace_stack(buffer, flags, 6, pc); |
1171 | ftrace_trace_userstack(buffer, flags, pc); | 1208 | ftrace_trace_userstack(buffer, flags, pc); |
1172 | |||
1173 | if (wake) | ||
1174 | trace_wake_up(); | ||
1175 | } | 1209 | } |
1176 | 1210 | ||
1177 | void trace_buffer_unlock_commit(struct ring_buffer *buffer, | 1211 | void trace_buffer_unlock_commit(struct ring_buffer *buffer, |
1178 | struct ring_buffer_event *event, | 1212 | struct ring_buffer_event *event, |
1179 | unsigned long flags, int pc) | 1213 | unsigned long flags, int pc) |
1180 | { | 1214 | { |
1181 | __trace_buffer_unlock_commit(buffer, event, flags, pc, 1); | 1215 | __trace_buffer_unlock_commit(buffer, event, flags, pc); |
1182 | } | 1216 | } |
1217 | EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit); | ||
1183 | 1218 | ||
1184 | struct ring_buffer_event * | 1219 | struct ring_buffer_event * |
1185 | trace_current_buffer_lock_reserve(struct ring_buffer **current_rb, | 1220 | trace_current_buffer_lock_reserve(struct ring_buffer **current_rb, |
@@ -1196,29 +1231,21 @@ void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, | |||
1196 | struct ring_buffer_event *event, | 1231 | struct ring_buffer_event *event, |
1197 | unsigned long flags, int pc) | 1232 | unsigned long flags, int pc) |
1198 | { | 1233 | { |
1199 | __trace_buffer_unlock_commit(buffer, event, flags, pc, 1); | 1234 | __trace_buffer_unlock_commit(buffer, event, flags, pc); |
1200 | } | 1235 | } |
1201 | EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); | 1236 | EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); |
1202 | 1237 | ||
1203 | void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, | 1238 | void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer, |
1204 | struct ring_buffer_event *event, | 1239 | struct ring_buffer_event *event, |
1205 | unsigned long flags, int pc) | 1240 | unsigned long flags, int pc, |
1206 | { | 1241 | struct pt_regs *regs) |
1207 | __trace_buffer_unlock_commit(buffer, event, flags, pc, 0); | ||
1208 | } | ||
1209 | EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit); | ||
1210 | |||
1211 | void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer, | ||
1212 | struct ring_buffer_event *event, | ||
1213 | unsigned long flags, int pc, | ||
1214 | struct pt_regs *regs) | ||
1215 | { | 1242 | { |
1216 | __buffer_unlock_commit(buffer, event); | 1243 | __buffer_unlock_commit(buffer, event); |
1217 | 1244 | ||
1218 | ftrace_trace_stack_regs(buffer, flags, 0, pc, regs); | 1245 | ftrace_trace_stack_regs(buffer, flags, 0, pc, regs); |
1219 | ftrace_trace_userstack(buffer, flags, pc); | 1246 | ftrace_trace_userstack(buffer, flags, pc); |
1220 | } | 1247 | } |
1221 | EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit_regs); | 1248 | EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs); |
1222 | 1249 | ||
1223 | void trace_current_buffer_discard_commit(struct ring_buffer *buffer, | 1250 | void trace_current_buffer_discard_commit(struct ring_buffer *buffer, |
1224 | struct ring_buffer_event *event) | 1251 | struct ring_buffer_event *event) |
@@ -3354,19 +3381,6 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table) | |||
3354 | } | 3381 | } |
3355 | } | 3382 | } |
3356 | 3383 | ||
3357 | |||
3358 | void default_wait_pipe(struct trace_iterator *iter) | ||
3359 | { | ||
3360 | DEFINE_WAIT(wait); | ||
3361 | |||
3362 | prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE); | ||
3363 | |||
3364 | if (trace_empty(iter)) | ||
3365 | schedule(); | ||
3366 | |||
3367 | finish_wait(&trace_wait, &wait); | ||
3368 | } | ||
3369 | |||
3370 | /* | 3384 | /* |
3371 | * This is a make-shift waitqueue. | 3385 | * This is a make-shift waitqueue. |
3372 | * A tracer might use this callback on some rare cases: | 3386 | * A tracer might use this callback on some rare cases: |
@@ -5107,6 +5121,7 @@ __init static int tracer_alloc_buffers(void) | |||
5107 | #endif | 5121 | #endif |
5108 | 5122 | ||
5109 | trace_init_cmdlines(); | 5123 | trace_init_cmdlines(); |
5124 | init_irq_work(&trace_work_wakeup, trace_wake_up); | ||
5110 | 5125 | ||
5111 | register_tracer(&nop_trace); | 5126 | register_tracer(&nop_trace); |
5112 | current_trace = &nop_trace; | 5127 | current_trace = &nop_trace; |