diff options
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 83 |
1 files changed, 12 insertions, 71 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 3ec146c96df4..b5b25b6575a9 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/seq_file.h> | 19 | #include <linux/seq_file.h> |
20 | #include <linux/notifier.h> | 20 | #include <linux/notifier.h> |
21 | #include <linux/irqflags.h> | 21 | #include <linux/irqflags.h> |
22 | #include <linux/irq_work.h> | ||
23 | #include <linux/debugfs.h> | 22 | #include <linux/debugfs.h> |
24 | #include <linux/pagemap.h> | 23 | #include <linux/pagemap.h> |
25 | #include <linux/hardirq.h> | 24 | #include <linux/hardirq.h> |
@@ -87,14 +86,6 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set) | |||
87 | static DEFINE_PER_CPU(bool, trace_cmdline_save); | 86 | static DEFINE_PER_CPU(bool, trace_cmdline_save); |
88 | 87 | ||
89 | /* | 88 | /* |
90 | * When a reader is waiting for data, then this variable is | ||
91 | * set to true. | ||
92 | */ | ||
93 | static bool trace_wakeup_needed; | ||
94 | |||
95 | static struct irq_work trace_work_wakeup; | ||
96 | |||
97 | /* | ||
98 | * Kill all tracing for good (never come back). | 89 | * Kill all tracing for good (never come back). |
99 | * It is initialized to 1 but will turn to zero if the initialization | 90 | * It is initialized to 1 but will turn to zero if the initialization |
100 | * of the tracer is successful. But that is the only place that sets | 91 | * of the tracer is successful. But that is the only place that sets |
@@ -334,9 +325,6 @@ static inline void trace_access_lock_init(void) | |||
334 | 325 | ||
335 | #endif | 326 | #endif |
336 | 327 | ||
337 | /* trace_wait is a waitqueue for tasks blocked on trace_poll */ | ||
338 | static DECLARE_WAIT_QUEUE_HEAD(trace_wait); | ||
339 | |||
340 | /* trace_flags holds trace_options default values */ | 328 | /* trace_flags holds trace_options default values */ |
341 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | 329 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | |
342 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | | 330 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | |
@@ -344,19 +332,6 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | |||
344 | TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS; | 332 | TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS; |
345 | 333 | ||
346 | /** | 334 | /** |
347 | * trace_wake_up - wake up tasks waiting for trace input | ||
348 | * | ||
349 | * Schedules a delayed work to wake up any task that is blocked on the | ||
350 | * trace_wait queue. These is used with trace_poll for tasks polling the | ||
351 | * trace. | ||
352 | */ | ||
353 | static void trace_wake_up(struct irq_work *work) | ||
354 | { | ||
355 | wake_up_all(&trace_wait); | ||
356 | |||
357 | } | ||
358 | |||
359 | /** | ||
360 | * tracing_on - enable tracing buffers | 335 | * tracing_on - enable tracing buffers |
361 | * | 336 | * |
362 | * This function enables tracing buffers that may have been | 337 | * This function enables tracing buffers that may have been |
@@ -763,36 +738,11 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
763 | 738 | ||
764 | static void default_wait_pipe(struct trace_iterator *iter) | 739 | static void default_wait_pipe(struct trace_iterator *iter) |
765 | { | 740 | { |
766 | DEFINE_WAIT(wait); | 741 | /* Iterators are static, they should be filled or empty */ |
767 | 742 | if (trace_buffer_iter(iter, iter->cpu_file)) | |
768 | prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE); | 743 | return; |
769 | |||
770 | /* | ||
771 | * The events can happen in critical sections where | ||
772 | * checking a work queue can cause deadlocks. | ||
773 | * After adding a task to the queue, this flag is set | ||
774 | * only to notify events to try to wake up the queue | ||
775 | * using irq_work. | ||
776 | * | ||
777 | * We don't clear it even if the buffer is no longer | ||
778 | * empty. The flag only causes the next event to run | ||
779 | * irq_work to do the work queue wake up. The worse | ||
780 | * that can happen if we race with !trace_empty() is that | ||
781 | * an event will cause an irq_work to try to wake up | ||
782 | * an empty queue. | ||
783 | * | ||
784 | * There's no reason to protect this flag either, as | ||
785 | * the work queue and irq_work logic will do the necessary | ||
786 | * synchronization for the wake ups. The only thing | ||
787 | * that is necessary is that the wake up happens after | ||
788 | * a task has been queued. It's OK for spurious wake ups. | ||
789 | */ | ||
790 | trace_wakeup_needed = true; | ||
791 | |||
792 | if (trace_empty(iter)) | ||
793 | schedule(); | ||
794 | 744 | ||
795 | finish_wait(&trace_wait, &wait); | 745 | ring_buffer_wait(iter->tr->buffer, iter->cpu_file); |
796 | } | 746 | } |
797 | 747 | ||
798 | /** | 748 | /** |
@@ -1262,11 +1212,6 @@ void | |||
1262 | __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) | 1212 | __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) |
1263 | { | 1213 | { |
1264 | __this_cpu_write(trace_cmdline_save, true); | 1214 | __this_cpu_write(trace_cmdline_save, true); |
1265 | if (trace_wakeup_needed) { | ||
1266 | trace_wakeup_needed = false; | ||
1267 | /* irq_work_queue() supplies it's own memory barriers */ | ||
1268 | irq_work_queue(&trace_work_wakeup); | ||
1269 | } | ||
1270 | ring_buffer_unlock_commit(buffer, event); | 1215 | ring_buffer_unlock_commit(buffer, event); |
1271 | } | 1216 | } |
1272 | 1217 | ||
@@ -3557,21 +3502,18 @@ static int tracing_release_pipe(struct inode *inode, struct file *file) | |||
3557 | static unsigned int | 3502 | static unsigned int |
3558 | trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table) | 3503 | trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table) |
3559 | { | 3504 | { |
3560 | if (trace_flags & TRACE_ITER_BLOCK) { | 3505 | /* Iterators are static, they should be filled or empty */ |
3506 | if (trace_buffer_iter(iter, iter->cpu_file)) | ||
3507 | return POLLIN | POLLRDNORM; | ||
3508 | |||
3509 | if (trace_flags & TRACE_ITER_BLOCK) | ||
3561 | /* | 3510 | /* |
3562 | * Always select as readable when in blocking mode | 3511 | * Always select as readable when in blocking mode |
3563 | */ | 3512 | */ |
3564 | return POLLIN | POLLRDNORM; | 3513 | return POLLIN | POLLRDNORM; |
3565 | } else { | 3514 | else |
3566 | if (!trace_empty(iter)) | 3515 | return ring_buffer_poll_wait(iter->tr->buffer, iter->cpu_file, |
3567 | return POLLIN | POLLRDNORM; | 3516 | filp, poll_table); |
3568 | trace_wakeup_needed = true; | ||
3569 | poll_wait(filp, &trace_wait, poll_table); | ||
3570 | if (!trace_empty(iter)) | ||
3571 | return POLLIN | POLLRDNORM; | ||
3572 | |||
3573 | return 0; | ||
3574 | } | ||
3575 | } | 3517 | } |
3576 | 3518 | ||
3577 | static unsigned int | 3519 | static unsigned int |
@@ -5701,7 +5643,6 @@ __init static int tracer_alloc_buffers(void) | |||
5701 | #endif | 5643 | #endif |
5702 | 5644 | ||
5703 | trace_init_cmdlines(); | 5645 | trace_init_cmdlines(); |
5704 | init_irq_work(&trace_work_wakeup, trace_wake_up); | ||
5705 | 5646 | ||
5706 | register_tracer(&nop_trace); | 5647 | register_tracer(&nop_trace); |
5707 | 5648 | ||