aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig1
-rw-r--r--kernel/trace/trace.c121
-rw-r--r--kernel/trace/trace.h5
-rw-r--r--kernel/trace/trace_events.c2
-rw-r--r--kernel/trace/trace_kprobe.c8
-rw-r--r--kernel/trace/trace_sched_switch.c2
-rw-r--r--kernel/trace/trace_selftest.c1
7 files changed, 76 insertions, 64 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 4cea4f41c1d..5d89335a485 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -119,6 +119,7 @@ config TRACING
119 select BINARY_PRINTF 119 select BINARY_PRINTF
120 select EVENT_TRACING 120 select EVENT_TRACING
121 select TRACE_CLOCK 121 select TRACE_CLOCK
122 select IRQ_WORK
122 123
123config GENERIC_TRACER 124config GENERIC_TRACER
124 bool 125 bool
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index d5cbc0d3f20..37d1c703e3e 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -19,6 +19,7 @@
19#include <linux/seq_file.h> 19#include <linux/seq_file.h>
20#include <linux/notifier.h> 20#include <linux/notifier.h>
21#include <linux/irqflags.h> 21#include <linux/irqflags.h>
22#include <linux/irq_work.h>
22#include <linux/debugfs.h> 23#include <linux/debugfs.h>
23#include <linux/pagemap.h> 24#include <linux/pagemap.h>
24#include <linux/hardirq.h> 25#include <linux/hardirq.h>
@@ -85,6 +86,14 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
85static DEFINE_PER_CPU(bool, trace_cmdline_save); 86static DEFINE_PER_CPU(bool, trace_cmdline_save);
86 87
87/* 88/*
89 * When a reader is waiting for data, then this variable is
90 * set to true.
91 */
92static bool trace_wakeup_needed;
93
94static struct irq_work trace_work_wakeup;
95
96/*
88 * Kill all tracing for good (never come back). 97 * Kill all tracing for good (never come back).
89 * It is initialized to 1 but will turn to zero if the initialization 98 * It is initialized to 1 but will turn to zero if the initialization
90 * of the tracer is successful. But that is the only place that sets 99 * of the tracer is successful. But that is the only place that sets
@@ -329,12 +338,18 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
329static int trace_stop_count; 338static int trace_stop_count;
330static DEFINE_RAW_SPINLOCK(tracing_start_lock); 339static DEFINE_RAW_SPINLOCK(tracing_start_lock);
331 340
332static void wakeup_work_handler(struct work_struct *work) 341/**
342 * trace_wake_up - wake up tasks waiting for trace input
343 *
344 * Schedules a delayed work to wake up any task that is blocked on the
345 * trace_wait queue. These is used with trace_poll for tasks polling the
346 * trace.
347 */
348static void trace_wake_up(struct irq_work *work)
333{ 349{
334 wake_up(&trace_wait); 350 wake_up_all(&trace_wait);
335}
336 351
337static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler); 352}
338 353
339/** 354/**
340 * tracing_on - enable tracing buffers 355 * tracing_on - enable tracing buffers
@@ -389,22 +404,6 @@ int tracing_is_on(void)
389} 404}
390EXPORT_SYMBOL_GPL(tracing_is_on); 405EXPORT_SYMBOL_GPL(tracing_is_on);
391 406
392/**
393 * trace_wake_up - wake up tasks waiting for trace input
394 *
395 * Schedules a delayed work to wake up any task that is blocked on the
396 * trace_wait queue. These is used with trace_poll for tasks polling the
397 * trace.
398 */
399void trace_wake_up(void)
400{
401 const unsigned long delay = msecs_to_jiffies(2);
402
403 if (trace_flags & TRACE_ITER_BLOCK)
404 return;
405 schedule_delayed_work(&wakeup_work, delay);
406}
407
408static int __init set_buf_size(char *str) 407static int __init set_buf_size(char *str)
409{ 408{
410 unsigned long buf_size; 409 unsigned long buf_size;
@@ -753,6 +752,40 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
753} 752}
754#endif /* CONFIG_TRACER_MAX_TRACE */ 753#endif /* CONFIG_TRACER_MAX_TRACE */
755 754
755static void default_wait_pipe(struct trace_iterator *iter)
756{
757 DEFINE_WAIT(wait);
758
759 prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
760
761 /*
762 * The events can happen in critical sections where
763 * checking a work queue can cause deadlocks.
764 * After adding a task to the queue, this flag is set
765 * only to notify events to try to wake up the queue
766 * using irq_work.
767 *
768 * We don't clear it even if the buffer is no longer
769 * empty. The flag only causes the next event to run
770 * irq_work to do the work queue wake up. The worse
771 * that can happen if we race with !trace_empty() is that
772 * an event will cause an irq_work to try to wake up
773 * an empty queue.
774 *
775 * There's no reason to protect this flag either, as
776 * the work queue and irq_work logic will do the necessary
777 * synchronization for the wake ups. The only thing
778 * that is necessary is that the wake up happens after
779 * a task has been queued. It's OK for spurious wake ups.
780 */
781 trace_wakeup_needed = true;
782
783 if (trace_empty(iter))
784 schedule();
785
786 finish_wait(&trace_wait, &wait);
787}
788
756/** 789/**
757 * register_tracer - register a tracer with the ftrace system. 790 * register_tracer - register a tracer with the ftrace system.
758 * @type - the plugin for the tracer 791 * @type - the plugin for the tracer
@@ -1156,30 +1189,32 @@ void
1156__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) 1189__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1157{ 1190{
1158 __this_cpu_write(trace_cmdline_save, true); 1191 __this_cpu_write(trace_cmdline_save, true);
1192 if (trace_wakeup_needed) {
1193 trace_wakeup_needed = false;
1194 /* irq_work_queue() supplies it's own memory barriers */
1195 irq_work_queue(&trace_work_wakeup);
1196 }
1159 ring_buffer_unlock_commit(buffer, event); 1197 ring_buffer_unlock_commit(buffer, event);
1160} 1198}
1161 1199
1162static inline void 1200static inline void
1163__trace_buffer_unlock_commit(struct ring_buffer *buffer, 1201__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1164 struct ring_buffer_event *event, 1202 struct ring_buffer_event *event,
1165 unsigned long flags, int pc, 1203 unsigned long flags, int pc)
1166 int wake)
1167{ 1204{
1168 __buffer_unlock_commit(buffer, event); 1205 __buffer_unlock_commit(buffer, event);
1169 1206
1170 ftrace_trace_stack(buffer, flags, 6, pc); 1207 ftrace_trace_stack(buffer, flags, 6, pc);
1171 ftrace_trace_userstack(buffer, flags, pc); 1208 ftrace_trace_userstack(buffer, flags, pc);
1172
1173 if (wake)
1174 trace_wake_up();
1175} 1209}
1176 1210
1177void trace_buffer_unlock_commit(struct ring_buffer *buffer, 1211void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1178 struct ring_buffer_event *event, 1212 struct ring_buffer_event *event,
1179 unsigned long flags, int pc) 1213 unsigned long flags, int pc)
1180{ 1214{
1181 __trace_buffer_unlock_commit(buffer, event, flags, pc, 1); 1215 __trace_buffer_unlock_commit(buffer, event, flags, pc);
1182} 1216}
1217EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1183 1218
1184struct ring_buffer_event * 1219struct ring_buffer_event *
1185trace_current_buffer_lock_reserve(struct ring_buffer **current_rb, 1220trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
@@ -1196,29 +1231,21 @@ void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1196 struct ring_buffer_event *event, 1231 struct ring_buffer_event *event,
1197 unsigned long flags, int pc) 1232 unsigned long flags, int pc)
1198{ 1233{
1199 __trace_buffer_unlock_commit(buffer, event, flags, pc, 1); 1234 __trace_buffer_unlock_commit(buffer, event, flags, pc);
1200} 1235}
1201EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); 1236EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
1202 1237
1203void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, 1238void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1204 struct ring_buffer_event *event, 1239 struct ring_buffer_event *event,
1205 unsigned long flags, int pc) 1240 unsigned long flags, int pc,
1206{ 1241 struct pt_regs *regs)
1207 __trace_buffer_unlock_commit(buffer, event, flags, pc, 0);
1208}
1209EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
1210
1211void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1212 struct ring_buffer_event *event,
1213 unsigned long flags, int pc,
1214 struct pt_regs *regs)
1215{ 1242{
1216 __buffer_unlock_commit(buffer, event); 1243 __buffer_unlock_commit(buffer, event);
1217 1244
1218 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs); 1245 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1219 ftrace_trace_userstack(buffer, flags, pc); 1246 ftrace_trace_userstack(buffer, flags, pc);
1220} 1247}
1221EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit_regs); 1248EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1222 1249
1223void trace_current_buffer_discard_commit(struct ring_buffer *buffer, 1250void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1224 struct ring_buffer_event *event) 1251 struct ring_buffer_event *event)
@@ -3354,19 +3381,6 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
3354 } 3381 }
3355} 3382}
3356 3383
3357
3358void default_wait_pipe(struct trace_iterator *iter)
3359{
3360 DEFINE_WAIT(wait);
3361
3362 prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
3363
3364 if (trace_empty(iter))
3365 schedule();
3366
3367 finish_wait(&trace_wait, &wait);
3368}
3369
3370/* 3384/*
3371 * This is a make-shift waitqueue. 3385 * This is a make-shift waitqueue.
3372 * A tracer might use this callback on some rare cases: 3386 * A tracer might use this callback on some rare cases:
@@ -5107,6 +5121,7 @@ __init static int tracer_alloc_buffers(void)
5107#endif 5121#endif
5108 5122
5109 trace_init_cmdlines(); 5123 trace_init_cmdlines();
5124 init_irq_work(&trace_work_wakeup, trace_wake_up);
5110 5125
5111 register_tracer(&nop_trace); 5126 register_tracer(&nop_trace);
5112 current_trace = &nop_trace; 5127 current_trace = &nop_trace;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 3e8a176f64e..55010ed175f 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -327,7 +327,6 @@ trace_buffer_iter(struct trace_iterator *iter, int cpu)
327 327
328int tracer_init(struct tracer *t, struct trace_array *tr); 328int tracer_init(struct tracer *t, struct trace_array *tr);
329int tracing_is_enabled(void); 329int tracing_is_enabled(void);
330void trace_wake_up(void);
331void tracing_reset(struct trace_array *tr, int cpu); 330void tracing_reset(struct trace_array *tr, int cpu);
332void tracing_reset_online_cpus(struct trace_array *tr); 331void tracing_reset_online_cpus(struct trace_array *tr);
333void tracing_reset_current(int cpu); 332void tracing_reset_current(int cpu);
@@ -349,9 +348,6 @@ trace_buffer_lock_reserve(struct ring_buffer *buffer,
349 unsigned long len, 348 unsigned long len,
350 unsigned long flags, 349 unsigned long flags,
351 int pc); 350 int pc);
352void trace_buffer_unlock_commit(struct ring_buffer *buffer,
353 struct ring_buffer_event *event,
354 unsigned long flags, int pc);
355 351
356struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, 352struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
357 struct trace_array_cpu *data); 353 struct trace_array_cpu *data);
@@ -370,7 +366,6 @@ void trace_init_global_iter(struct trace_iterator *iter);
370 366
371void tracing_iter_reset(struct trace_iterator *iter, int cpu); 367void tracing_iter_reset(struct trace_iterator *iter, int cpu);
372 368
373void default_wait_pipe(struct trace_iterator *iter);
374void poll_wait_pipe(struct trace_iterator *iter); 369void poll_wait_pipe(struct trace_iterator *iter);
375 370
376void ftrace(struct trace_array *tr, 371void ftrace(struct trace_array *tr,
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index cb2df3b70f7..880073d0b94 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1760,7 +1760,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
1760 entry->ip = ip; 1760 entry->ip = ip;
1761 entry->parent_ip = parent_ip; 1761 entry->parent_ip = parent_ip;
1762 1762
1763 trace_nowake_buffer_unlock_commit(buffer, event, flags, pc); 1763 trace_buffer_unlock_commit(buffer, event, flags, pc);
1764 1764
1765 out: 1765 out:
1766 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); 1766 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 5a3c533ef06..1865d5f7653 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -751,8 +751,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
751 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); 751 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
752 752
753 if (!filter_current_check_discard(buffer, call, entry, event)) 753 if (!filter_current_check_discard(buffer, call, entry, event))
754 trace_nowake_buffer_unlock_commit_regs(buffer, event, 754 trace_buffer_unlock_commit_regs(buffer, event,
755 irq_flags, pc, regs); 755 irq_flags, pc, regs);
756} 756}
757 757
758/* Kretprobe handler */ 758/* Kretprobe handler */
@@ -784,8 +784,8 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
784 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); 784 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
785 785
786 if (!filter_current_check_discard(buffer, call, entry, event)) 786 if (!filter_current_check_discard(buffer, call, entry, event))
787 trace_nowake_buffer_unlock_commit_regs(buffer, event, 787 trace_buffer_unlock_commit_regs(buffer, event,
788 irq_flags, pc, regs); 788 irq_flags, pc, regs);
789} 789}
790 790
791/* Event entry printers */ 791/* Event entry printers */
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index b0a136ac382..3374c792ccd 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -102,7 +102,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
102 entry->next_cpu = task_cpu(wakee); 102 entry->next_cpu = task_cpu(wakee);
103 103
104 if (!filter_check_discard(call, entry, buffer, event)) 104 if (!filter_check_discard(call, entry, buffer, event))
105 trace_nowake_buffer_unlock_commit(buffer, event, flags, pc); 105 trace_buffer_unlock_commit(buffer, event, flags, pc);
106} 106}
107 107
108static void 108static void
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 091b815f7b0..47623169a81 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -1094,6 +1094,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1094 tracing_stop(); 1094 tracing_stop();
1095 /* check both trace buffers */ 1095 /* check both trace buffers */
1096 ret = trace_test_buffer(tr, NULL); 1096 ret = trace_test_buffer(tr, NULL);
1097 printk("ret = %d\n", ret);
1097 if (!ret) 1098 if (!ret)
1098 ret = trace_test_buffer(&max_tr, &count); 1099 ret = trace_test_buffer(&max_tr, &count);
1099 1100