aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig1
-rw-r--r--kernel/trace/ftrace.c14
-rw-r--r--kernel/trace/ring_buffer.c65
-rw-r--r--kernel/trace/trace.c484
-rw-r--r--kernel/trace/trace.h18
-rw-r--r--kernel/trace/trace_branch.c4
-rw-r--r--kernel/trace/trace_events.c51
-rw-r--r--kernel/trace/trace_events_filter.c4
-rw-r--r--kernel/trace/trace_functions.c7
-rw-r--r--kernel/trace/trace_functions_graph.c6
-rw-r--r--kernel/trace/trace_irqsoff.c16
-rw-r--r--kernel/trace/trace_kprobe.c10
-rw-r--r--kernel/trace/trace_output.c78
-rw-r--r--kernel/trace/trace_probe.c14
-rw-r--r--kernel/trace/trace_sched_switch.c4
-rw-r--r--kernel/trace/trace_sched_wakeup.c12
-rw-r--r--kernel/trace/trace_selftest.c13
-rw-r--r--kernel/trace/trace_stack.c4
-rw-r--r--kernel/trace/trace_syscalls.c61
-rw-r--r--kernel/trace/trace_uprobe.c12
20 files changed, 486 insertions, 392 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 4cea4f41c1d9..5d89335a485f 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -119,6 +119,7 @@ config TRACING
119 select BINARY_PRINTF 119 select BINARY_PRINTF
120 select EVENT_TRACING 120 select EVENT_TRACING
121 select TRACE_CLOCK 121 select TRACE_CLOCK
122 select IRQ_WORK
122 123
123config GENERIC_TRACER 124config GENERIC_TRACER
124 bool 125 bool
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 9dcf15d38380..3ffe4c5ad3f3 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -10,7 +10,7 @@
10 * Based on code in the latency_tracer, that is: 10 * Based on code in the latency_tracer, that is:
11 * 11 *
12 * Copyright (C) 2004-2006 Ingo Molnar 12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III 13 * Copyright (C) 2004 Nadia Yvette Chambers
14 */ 14 */
15 15
16#include <linux/stop_machine.h> 16#include <linux/stop_machine.h>
@@ -2437,7 +2437,7 @@ static void reset_iter_read(struct ftrace_iterator *iter)
2437{ 2437{
2438 iter->pos = 0; 2438 iter->pos = 0;
2439 iter->func_pos = 0; 2439 iter->func_pos = 0;
2440 iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH); 2440 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
2441} 2441}
2442 2442
2443static void *t_start(struct seq_file *m, loff_t *pos) 2443static void *t_start(struct seq_file *m, loff_t *pos)
@@ -2675,12 +2675,12 @@ ftrace_notrace_open(struct inode *inode, struct file *file)
2675} 2675}
2676 2676
2677loff_t 2677loff_t
2678ftrace_regex_lseek(struct file *file, loff_t offset, int origin) 2678ftrace_regex_lseek(struct file *file, loff_t offset, int whence)
2679{ 2679{
2680 loff_t ret; 2680 loff_t ret;
2681 2681
2682 if (file->f_mode & FMODE_READ) 2682 if (file->f_mode & FMODE_READ)
2683 ret = seq_lseek(file, offset, origin); 2683 ret = seq_lseek(file, offset, whence);
2684 else 2684 else
2685 file->f_pos = ret = 1; 2685 file->f_pos = ret = 1;
2686 2686
@@ -2868,7 +2868,7 @@ static int __init ftrace_mod_cmd_init(void)
2868{ 2868{
2869 return register_ftrace_command(&ftrace_mod_cmd); 2869 return register_ftrace_command(&ftrace_mod_cmd);
2870} 2870}
2871device_initcall(ftrace_mod_cmd_init); 2871core_initcall(ftrace_mod_cmd_init);
2872 2872
2873static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, 2873static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
2874 struct ftrace_ops *op, struct pt_regs *pt_regs) 2874 struct ftrace_ops *op, struct pt_regs *pt_regs)
@@ -4055,7 +4055,7 @@ static int __init ftrace_nodyn_init(void)
4055 ftrace_enabled = 1; 4055 ftrace_enabled = 1;
4056 return 0; 4056 return 0;
4057} 4057}
4058device_initcall(ftrace_nodyn_init); 4058core_initcall(ftrace_nodyn_init);
4059 4059
4060static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } 4060static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
4061static inline void ftrace_startup_enable(int command) { } 4061static inline void ftrace_startup_enable(int command) { }
@@ -4381,7 +4381,7 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
4381 if (strlen(tmp) == 0) 4381 if (strlen(tmp) == 0)
4382 return 1; 4382 return 1;
4383 4383
4384 ret = strict_strtol(tmp, 10, &val); 4384 ret = kstrtol(tmp, 10, &val);
4385 if (ret < 0) 4385 if (ret < 0)
4386 return ret; 4386 return ret;
4387 4387
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index b979426d16c6..ce8514feedcd 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -460,9 +460,10 @@ struct ring_buffer_per_cpu {
460 unsigned long lost_events; 460 unsigned long lost_events;
461 unsigned long last_overrun; 461 unsigned long last_overrun;
462 local_t entries_bytes; 462 local_t entries_bytes;
463 local_t commit_overrun;
464 local_t overrun;
465 local_t entries; 463 local_t entries;
464 local_t overrun;
465 local_t commit_overrun;
466 local_t dropped_events;
466 local_t committing; 467 local_t committing;
467 local_t commits; 468 local_t commits;
468 unsigned long read; 469 unsigned long read;
@@ -1396,6 +1397,8 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1396 struct list_head *head_page_with_bit; 1397 struct list_head *head_page_with_bit;
1397 1398
1398 head_page = &rb_set_head_page(cpu_buffer)->list; 1399 head_page = &rb_set_head_page(cpu_buffer)->list;
1400 if (!head_page)
1401 break;
1399 prev_page = head_page->prev; 1402 prev_page = head_page->prev;
1400 1403
1401 first_page = pages->next; 1404 first_page = pages->next;
@@ -1820,7 +1823,7 @@ rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
1820} 1823}
1821 1824
1822/** 1825/**
1823 * ring_buffer_update_event - update event type and data 1826 * rb_update_event - update event type and data
1824 * @event: the even to update 1827 * @event: the even to update
1825 * @type: the type of event 1828 * @type: the type of event
1826 * @length: the size of the event field in the ring buffer 1829 * @length: the size of the event field in the ring buffer
@@ -2155,8 +2158,10 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2155 * If we are not in overwrite mode, 2158 * If we are not in overwrite mode,
2156 * this is easy, just stop here. 2159 * this is easy, just stop here.
2157 */ 2160 */
2158 if (!(buffer->flags & RB_FL_OVERWRITE)) 2161 if (!(buffer->flags & RB_FL_OVERWRITE)) {
2162 local_inc(&cpu_buffer->dropped_events);
2159 goto out_reset; 2163 goto out_reset;
2164 }
2160 2165
2161 ret = rb_handle_head_page(cpu_buffer, 2166 ret = rb_handle_head_page(cpu_buffer,
2162 tail_page, 2167 tail_page,
@@ -2720,8 +2725,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
2720 * and not the length of the event which would hold the header. 2725 * and not the length of the event which would hold the header.
2721 */ 2726 */
2722int ring_buffer_write(struct ring_buffer *buffer, 2727int ring_buffer_write(struct ring_buffer *buffer,
2723 unsigned long length, 2728 unsigned long length,
2724 void *data) 2729 void *data)
2725{ 2730{
2726 struct ring_buffer_per_cpu *cpu_buffer; 2731 struct ring_buffer_per_cpu *cpu_buffer;
2727 struct ring_buffer_event *event; 2732 struct ring_buffer_event *event;
@@ -2929,12 +2934,12 @@ rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
2929 * @buffer: The ring buffer 2934 * @buffer: The ring buffer
2930 * @cpu: The per CPU buffer to read from. 2935 * @cpu: The per CPU buffer to read from.
2931 */ 2936 */
2932unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu) 2937u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
2933{ 2938{
2934 unsigned long flags; 2939 unsigned long flags;
2935 struct ring_buffer_per_cpu *cpu_buffer; 2940 struct ring_buffer_per_cpu *cpu_buffer;
2936 struct buffer_page *bpage; 2941 struct buffer_page *bpage;
2937 unsigned long ret; 2942 u64 ret = 0;
2938 2943
2939 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2944 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2940 return 0; 2945 return 0;
@@ -2949,7 +2954,8 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
2949 bpage = cpu_buffer->reader_page; 2954 bpage = cpu_buffer->reader_page;
2950 else 2955 else
2951 bpage = rb_set_head_page(cpu_buffer); 2956 bpage = rb_set_head_page(cpu_buffer);
2952 ret = bpage->page->time_stamp; 2957 if (bpage)
2958 ret = bpage->page->time_stamp;
2953 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2959 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2954 2960
2955 return ret; 2961 return ret;
@@ -2995,7 +3001,8 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
2995EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); 3001EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
2996 3002
2997/** 3003/**
2998 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer 3004 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
3005 * buffer wrapping around (only if RB_FL_OVERWRITE is on).
2999 * @buffer: The ring buffer 3006 * @buffer: The ring buffer
3000 * @cpu: The per CPU buffer to get the number of overruns from 3007 * @cpu: The per CPU buffer to get the number of overruns from
3001 */ 3008 */
@@ -3015,7 +3022,9 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
3015EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); 3022EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
3016 3023
3017/** 3024/**
3018 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits 3025 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
3026 * commits failing due to the buffer wrapping around while there are uncommitted
3027 * events, such as during an interrupt storm.
3019 * @buffer: The ring buffer 3028 * @buffer: The ring buffer
3020 * @cpu: The per CPU buffer to get the number of overruns from 3029 * @cpu: The per CPU buffer to get the number of overruns from
3021 */ 3030 */
@@ -3036,6 +3045,28 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
3036EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu); 3045EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
3037 3046
3038/** 3047/**
3048 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
3049 * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
3050 * @buffer: The ring buffer
3051 * @cpu: The per CPU buffer to get the number of overruns from
3052 */
3053unsigned long
3054ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
3055{
3056 struct ring_buffer_per_cpu *cpu_buffer;
3057 unsigned long ret;
3058
3059 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3060 return 0;
3061
3062 cpu_buffer = buffer->buffers[cpu];
3063 ret = local_read(&cpu_buffer->dropped_events);
3064
3065 return ret;
3066}
3067EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
3068
3069/**
3039 * ring_buffer_entries - get the number of entries in a buffer 3070 * ring_buffer_entries - get the number of entries in a buffer
3040 * @buffer: The ring buffer 3071 * @buffer: The ring buffer
3041 * 3072 *
@@ -3260,6 +3291,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3260 * Splice the empty reader page into the list around the head. 3291 * Splice the empty reader page into the list around the head.
3261 */ 3292 */
3262 reader = rb_set_head_page(cpu_buffer); 3293 reader = rb_set_head_page(cpu_buffer);
3294 if (!reader)
3295 goto out;
3263 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); 3296 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
3264 cpu_buffer->reader_page->list.prev = reader->list.prev; 3297 cpu_buffer->reader_page->list.prev = reader->list.prev;
3265 3298
@@ -3778,12 +3811,17 @@ void
3778ring_buffer_read_finish(struct ring_buffer_iter *iter) 3811ring_buffer_read_finish(struct ring_buffer_iter *iter)
3779{ 3812{
3780 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 3813 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3814 unsigned long flags;
3781 3815
3782 /* 3816 /*
3783 * Ring buffer is disabled from recording, here's a good place 3817 * Ring buffer is disabled from recording, here's a good place
3784 * to check the integrity of the ring buffer. 3818 * to check the integrity of the ring buffer.
3819 * Must prevent readers from trying to read, as the check
3820 * clears the HEAD page and readers require it.
3785 */ 3821 */
3822 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3786 rb_check_pages(cpu_buffer); 3823 rb_check_pages(cpu_buffer);
3824 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3787 3825
3788 atomic_dec(&cpu_buffer->record_disabled); 3826 atomic_dec(&cpu_buffer->record_disabled);
3789 atomic_dec(&cpu_buffer->buffer->resize_disabled); 3827 atomic_dec(&cpu_buffer->buffer->resize_disabled);
@@ -3864,9 +3902,10 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
3864 local_set(&cpu_buffer->reader_page->page->commit, 0); 3902 local_set(&cpu_buffer->reader_page->page->commit, 0);
3865 cpu_buffer->reader_page->read = 0; 3903 cpu_buffer->reader_page->read = 0;
3866 3904
3867 local_set(&cpu_buffer->commit_overrun, 0);
3868 local_set(&cpu_buffer->entries_bytes, 0); 3905 local_set(&cpu_buffer->entries_bytes, 0);
3869 local_set(&cpu_buffer->overrun, 0); 3906 local_set(&cpu_buffer->overrun, 0);
3907 local_set(&cpu_buffer->commit_overrun, 0);
3908 local_set(&cpu_buffer->dropped_events, 0);
3870 local_set(&cpu_buffer->entries, 0); 3909 local_set(&cpu_buffer->entries, 0);
3871 local_set(&cpu_buffer->committing, 0); 3910 local_set(&cpu_buffer->committing, 0);
3872 local_set(&cpu_buffer->commits, 0); 3911 local_set(&cpu_buffer->commits, 0);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 31e4f55773f1..3c13e46d7d24 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -9,7 +9,7 @@
9 * 9 *
10 * Based on code from the latency_tracer, that is: 10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar 11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 William Lee Irwin III 12 * Copyright (C) 2004 Nadia Yvette Chambers
13 */ 13 */
14#include <linux/ring_buffer.h> 14#include <linux/ring_buffer.h>
15#include <generated/utsrelease.h> 15#include <generated/utsrelease.h>
@@ -19,6 +19,7 @@
19#include <linux/seq_file.h> 19#include <linux/seq_file.h>
20#include <linux/notifier.h> 20#include <linux/notifier.h>
21#include <linux/irqflags.h> 21#include <linux/irqflags.h>
22#include <linux/irq_work.h>
22#include <linux/debugfs.h> 23#include <linux/debugfs.h>
23#include <linux/pagemap.h> 24#include <linux/pagemap.h>
24#include <linux/hardirq.h> 25#include <linux/hardirq.h>
@@ -78,6 +79,21 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
78} 79}
79 80
80/* 81/*
82 * To prevent the comm cache from being overwritten when no
83 * tracing is active, only save the comm when a trace event
84 * occurred.
85 */
86static DEFINE_PER_CPU(bool, trace_cmdline_save);
87
88/*
89 * When a reader is waiting for data, then this variable is
90 * set to true.
91 */
92static bool trace_wakeup_needed;
93
94static struct irq_work trace_work_wakeup;
95
96/*
81 * Kill all tracing for good (never come back). 97 * Kill all tracing for good (never come back).
82 * It is initialized to 1 but will turn to zero if the initialization 98 * It is initialized to 1 but will turn to zero if the initialization
83 * of the tracer is successful. But that is the only place that sets 99 * of the tracer is successful. But that is the only place that sets
@@ -139,6 +155,18 @@ static int __init set_ftrace_dump_on_oops(char *str)
139} 155}
140__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); 156__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
141 157
158
159static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
160static char *trace_boot_options __initdata;
161
162static int __init set_trace_boot_options(char *str)
163{
164 strncpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
165 trace_boot_options = trace_boot_options_buf;
166 return 0;
167}
168__setup("trace_options=", set_trace_boot_options);
169
142unsigned long long ns2usecs(cycle_t nsec) 170unsigned long long ns2usecs(cycle_t nsec)
143{ 171{
144 nsec += 500; 172 nsec += 500;
@@ -198,20 +226,9 @@ static struct trace_array max_tr;
198 226
199static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data); 227static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
200 228
201/* tracer_enabled is used to toggle activation of a tracer */
202static int tracer_enabled = 1;
203
204/**
205 * tracing_is_enabled - return tracer_enabled status
206 *
207 * This function is used by other tracers to know the status
208 * of the tracer_enabled flag. Tracers may use this function
209 * to know if it should enable their features when starting
210 * up. See irqsoff tracer for an example (start_irqsoff_tracer).
211 */
212int tracing_is_enabled(void) 229int tracing_is_enabled(void)
213{ 230{
214 return tracer_enabled; 231 return tracing_is_on();
215} 232}
216 233
217/* 234/*
@@ -333,12 +350,18 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
333static int trace_stop_count; 350static int trace_stop_count;
334static DEFINE_RAW_SPINLOCK(tracing_start_lock); 351static DEFINE_RAW_SPINLOCK(tracing_start_lock);
335 352
336static void wakeup_work_handler(struct work_struct *work) 353/**
354 * trace_wake_up - wake up tasks waiting for trace input
355 *
356 * Schedules a delayed work to wake up any task that is blocked on the
357 * trace_wait queue. These is used with trace_poll for tasks polling the
358 * trace.
359 */
360static void trace_wake_up(struct irq_work *work)
337{ 361{
338 wake_up(&trace_wait); 362 wake_up_all(&trace_wait);
339}
340 363
341static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler); 364}
342 365
343/** 366/**
344 * tracing_on - enable tracing buffers 367 * tracing_on - enable tracing buffers
@@ -393,22 +416,6 @@ int tracing_is_on(void)
393} 416}
394EXPORT_SYMBOL_GPL(tracing_is_on); 417EXPORT_SYMBOL_GPL(tracing_is_on);
395 418
396/**
397 * trace_wake_up - wake up tasks waiting for trace input
398 *
399 * Schedules a delayed work to wake up any task that is blocked on the
400 * trace_wait queue. These is used with trace_poll for tasks polling the
401 * trace.
402 */
403void trace_wake_up(void)
404{
405 const unsigned long delay = msecs_to_jiffies(2);
406
407 if (trace_flags & TRACE_ITER_BLOCK)
408 return;
409 schedule_delayed_work(&wakeup_work, delay);
410}
411
412static int __init set_buf_size(char *str) 419static int __init set_buf_size(char *str)
413{ 420{
414 unsigned long buf_size; 421 unsigned long buf_size;
@@ -431,7 +438,7 @@ static int __init set_tracing_thresh(char *str)
431 438
432 if (!str) 439 if (!str)
433 return 0; 440 return 0;
434 ret = strict_strtoul(str, 0, &threshold); 441 ret = kstrtoul(str, 0, &threshold);
435 if (ret < 0) 442 if (ret < 0)
436 return 0; 443 return 0;
437 tracing_thresh = threshold * 1000; 444 tracing_thresh = threshold * 1000;
@@ -477,10 +484,12 @@ static const char *trace_options[] = {
477static struct { 484static struct {
478 u64 (*func)(void); 485 u64 (*func)(void);
479 const char *name; 486 const char *name;
487 int in_ns; /* is this clock in nanoseconds? */
480} trace_clocks[] = { 488} trace_clocks[] = {
481 { trace_clock_local, "local" }, 489 { trace_clock_local, "local", 1 },
482 { trace_clock_global, "global" }, 490 { trace_clock_global, "global", 1 },
483 { trace_clock_counter, "counter" }, 491 { trace_clock_counter, "counter", 0 },
492 ARCH_TRACE_CLOCKS
484}; 493};
485 494
486int trace_clock_id; 495int trace_clock_id;
@@ -757,6 +766,40 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
757} 766}
758#endif /* CONFIG_TRACER_MAX_TRACE */ 767#endif /* CONFIG_TRACER_MAX_TRACE */
759 768
769static void default_wait_pipe(struct trace_iterator *iter)
770{
771 DEFINE_WAIT(wait);
772
773 prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
774
775 /*
776 * The events can happen in critical sections where
777 * checking a work queue can cause deadlocks.
778 * After adding a task to the queue, this flag is set
779 * only to notify events to try to wake up the queue
780 * using irq_work.
781 *
782 * We don't clear it even if the buffer is no longer
783 * empty. The flag only causes the next event to run
784 * irq_work to do the work queue wake up. The worse
785 * that can happen if we race with !trace_empty() is that
786 * an event will cause an irq_work to try to wake up
787 * an empty queue.
788 *
789 * There's no reason to protect this flag either, as
790 * the work queue and irq_work logic will do the necessary
791 * synchronization for the wake ups. The only thing
792 * that is necessary is that the wake up happens after
793 * a task has been queued. It's OK for spurious wake ups.
794 */
795 trace_wakeup_needed = true;
796
797 if (trace_empty(iter))
798 schedule();
799
800 finish_wait(&trace_wait, &wait);
801}
802
760/** 803/**
761 * register_tracer - register a tracer with the ftrace system. 804 * register_tracer - register a tracer with the ftrace system.
762 * @type - the plugin for the tracer 805 * @type - the plugin for the tracer
@@ -875,32 +918,6 @@ int register_tracer(struct tracer *type)
875 return ret; 918 return ret;
876} 919}
877 920
878void unregister_tracer(struct tracer *type)
879{
880 struct tracer **t;
881
882 mutex_lock(&trace_types_lock);
883 for (t = &trace_types; *t; t = &(*t)->next) {
884 if (*t == type)
885 goto found;
886 }
887 pr_info("Tracer %s not registered\n", type->name);
888 goto out;
889
890 found:
891 *t = (*t)->next;
892
893 if (type == current_trace && tracer_enabled) {
894 tracer_enabled = 0;
895 tracing_stop();
896 if (current_trace->stop)
897 current_trace->stop(&global_trace);
898 current_trace = &nop_trace;
899 }
900out:
901 mutex_unlock(&trace_types_lock);
902}
903
904void tracing_reset(struct trace_array *tr, int cpu) 921void tracing_reset(struct trace_array *tr, int cpu)
905{ 922{
906 struct ring_buffer *buffer = tr->buffer; 923 struct ring_buffer *buffer = tr->buffer;
@@ -1131,10 +1148,14 @@ void trace_find_cmdline(int pid, char comm[])
1131 1148
1132void tracing_record_cmdline(struct task_struct *tsk) 1149void tracing_record_cmdline(struct task_struct *tsk)
1133{ 1150{
1134 if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled || 1151 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1135 !tracing_is_on()) 1152 return;
1153
1154 if (!__this_cpu_read(trace_cmdline_save))
1136 return; 1155 return;
1137 1156
1157 __this_cpu_write(trace_cmdline_save, false);
1158
1138 trace_save_cmdline(tsk); 1159 trace_save_cmdline(tsk);
1139} 1160}
1140 1161
@@ -1178,27 +1199,36 @@ trace_buffer_lock_reserve(struct ring_buffer *buffer,
1178 return event; 1199 return event;
1179} 1200}
1180 1201
1202void
1203__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1204{
1205 __this_cpu_write(trace_cmdline_save, true);
1206 if (trace_wakeup_needed) {
1207 trace_wakeup_needed = false;
1208 /* irq_work_queue() supplies it's own memory barriers */
1209 irq_work_queue(&trace_work_wakeup);
1210 }
1211 ring_buffer_unlock_commit(buffer, event);
1212}
1213
1181static inline void 1214static inline void
1182__trace_buffer_unlock_commit(struct ring_buffer *buffer, 1215__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1183 struct ring_buffer_event *event, 1216 struct ring_buffer_event *event,
1184 unsigned long flags, int pc, 1217 unsigned long flags, int pc)
1185 int wake)
1186{ 1218{
1187 ring_buffer_unlock_commit(buffer, event); 1219 __buffer_unlock_commit(buffer, event);
1188 1220
1189 ftrace_trace_stack(buffer, flags, 6, pc); 1221 ftrace_trace_stack(buffer, flags, 6, pc);
1190 ftrace_trace_userstack(buffer, flags, pc); 1222 ftrace_trace_userstack(buffer, flags, pc);
1191
1192 if (wake)
1193 trace_wake_up();
1194} 1223}
1195 1224
1196void trace_buffer_unlock_commit(struct ring_buffer *buffer, 1225void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1197 struct ring_buffer_event *event, 1226 struct ring_buffer_event *event,
1198 unsigned long flags, int pc) 1227 unsigned long flags, int pc)
1199{ 1228{
1200 __trace_buffer_unlock_commit(buffer, event, flags, pc, 1); 1229 __trace_buffer_unlock_commit(buffer, event, flags, pc);
1201} 1230}
1231EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1202 1232
1203struct ring_buffer_event * 1233struct ring_buffer_event *
1204trace_current_buffer_lock_reserve(struct ring_buffer **current_rb, 1234trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
@@ -1215,29 +1245,21 @@ void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1215 struct ring_buffer_event *event, 1245 struct ring_buffer_event *event,
1216 unsigned long flags, int pc) 1246 unsigned long flags, int pc)
1217{ 1247{
1218 __trace_buffer_unlock_commit(buffer, event, flags, pc, 1); 1248 __trace_buffer_unlock_commit(buffer, event, flags, pc);
1219} 1249}
1220EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); 1250EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
1221 1251
1222void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, 1252void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1223 struct ring_buffer_event *event, 1253 struct ring_buffer_event *event,
1224 unsigned long flags, int pc) 1254 unsigned long flags, int pc,
1225{ 1255 struct pt_regs *regs)
1226 __trace_buffer_unlock_commit(buffer, event, flags, pc, 0);
1227}
1228EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
1229
1230void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1231 struct ring_buffer_event *event,
1232 unsigned long flags, int pc,
1233 struct pt_regs *regs)
1234{ 1256{
1235 ring_buffer_unlock_commit(buffer, event); 1257 __buffer_unlock_commit(buffer, event);
1236 1258
1237 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs); 1259 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1238 ftrace_trace_userstack(buffer, flags, pc); 1260 ftrace_trace_userstack(buffer, flags, pc);
1239} 1261}
1240EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit_regs); 1262EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1241 1263
1242void trace_current_buffer_discard_commit(struct ring_buffer *buffer, 1264void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1243 struct ring_buffer_event *event) 1265 struct ring_buffer_event *event)
@@ -1269,7 +1291,7 @@ trace_function(struct trace_array *tr,
1269 entry->parent_ip = parent_ip; 1291 entry->parent_ip = parent_ip;
1270 1292
1271 if (!filter_check_discard(call, entry, buffer, event)) 1293 if (!filter_check_discard(call, entry, buffer, event))
1272 ring_buffer_unlock_commit(buffer, event); 1294 __buffer_unlock_commit(buffer, event);
1273} 1295}
1274 1296
1275void 1297void
@@ -1362,7 +1384,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
1362 entry->size = trace.nr_entries; 1384 entry->size = trace.nr_entries;
1363 1385
1364 if (!filter_check_discard(call, entry, buffer, event)) 1386 if (!filter_check_discard(call, entry, buffer, event))
1365 ring_buffer_unlock_commit(buffer, event); 1387 __buffer_unlock_commit(buffer, event);
1366 1388
1367 out: 1389 out:
1368 /* Again, don't let gcc optimize things here */ 1390 /* Again, don't let gcc optimize things here */
@@ -1458,7 +1480,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1458 1480
1459 save_stack_trace_user(&trace); 1481 save_stack_trace_user(&trace);
1460 if (!filter_check_discard(call, entry, buffer, event)) 1482 if (!filter_check_discard(call, entry, buffer, event))
1461 ring_buffer_unlock_commit(buffer, event); 1483 __buffer_unlock_commit(buffer, event);
1462 1484
1463 out_drop_count: 1485 out_drop_count:
1464 __this_cpu_dec(user_stack_count); 1486 __this_cpu_dec(user_stack_count);
@@ -1559,10 +1581,10 @@ static int alloc_percpu_trace_buffer(void)
1559 return -ENOMEM; 1581 return -ENOMEM;
1560} 1582}
1561 1583
1584static int buffers_allocated;
1585
1562void trace_printk_init_buffers(void) 1586void trace_printk_init_buffers(void)
1563{ 1587{
1564 static int buffers_allocated;
1565
1566 if (buffers_allocated) 1588 if (buffers_allocated)
1567 return; 1589 return;
1568 1590
@@ -1571,7 +1593,38 @@ void trace_printk_init_buffers(void)
1571 1593
1572 pr_info("ftrace: Allocated trace_printk buffers\n"); 1594 pr_info("ftrace: Allocated trace_printk buffers\n");
1573 1595
1596 /* Expand the buffers to set size */
1597 tracing_update_buffers();
1598
1574 buffers_allocated = 1; 1599 buffers_allocated = 1;
1600
1601 /*
1602 * trace_printk_init_buffers() can be called by modules.
1603 * If that happens, then we need to start cmdline recording
1604 * directly here. If the global_trace.buffer is already
1605 * allocated here, then this was called by module code.
1606 */
1607 if (global_trace.buffer)
1608 tracing_start_cmdline_record();
1609}
1610
1611void trace_printk_start_comm(void)
1612{
1613 /* Start tracing comms if trace printk is set */
1614 if (!buffers_allocated)
1615 return;
1616 tracing_start_cmdline_record();
1617}
1618
1619static void trace_printk_start_stop_comm(int enabled)
1620{
1621 if (!buffers_allocated)
1622 return;
1623
1624 if (enabled)
1625 tracing_start_cmdline_record();
1626 else
1627 tracing_stop_cmdline_record();
1575} 1628}
1576 1629
1577/** 1630/**
@@ -1622,7 +1675,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1622 1675
1623 memcpy(entry->buf, tbuffer, sizeof(u32) * len); 1676 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
1624 if (!filter_check_discard(call, entry, buffer, event)) { 1677 if (!filter_check_discard(call, entry, buffer, event)) {
1625 ring_buffer_unlock_commit(buffer, event); 1678 __buffer_unlock_commit(buffer, event);
1626 ftrace_trace_stack(buffer, flags, 6, pc); 1679 ftrace_trace_stack(buffer, flags, 6, pc);
1627 } 1680 }
1628 1681
@@ -1693,7 +1746,7 @@ int trace_array_vprintk(struct trace_array *tr,
1693 memcpy(&entry->buf, tbuffer, len); 1746 memcpy(&entry->buf, tbuffer, len);
1694 entry->buf[len] = '\0'; 1747 entry->buf[len] = '\0';
1695 if (!filter_check_discard(call, entry, buffer, event)) { 1748 if (!filter_check_discard(call, entry, buffer, event)) {
1696 ring_buffer_unlock_commit(buffer, event); 1749 __buffer_unlock_commit(buffer, event);
1697 ftrace_trace_stack(buffer, flags, 6, pc); 1750 ftrace_trace_stack(buffer, flags, 6, pc);
1698 } 1751 }
1699 out: 1752 out:
@@ -2426,6 +2479,10 @@ __tracing_open(struct inode *inode, struct file *file)
2426 if (ring_buffer_overruns(iter->tr->buffer)) 2479 if (ring_buffer_overruns(iter->tr->buffer))
2427 iter->iter_flags |= TRACE_FILE_ANNOTATE; 2480 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2428 2481
2482 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
2483 if (trace_clocks[trace_clock_id].in_ns)
2484 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
2485
2429 /* stop the trace while dumping */ 2486 /* stop the trace while dumping */
2430 tracing_stop(); 2487 tracing_stop();
2431 2488
@@ -2794,26 +2851,19 @@ static void set_tracer_flags(unsigned int mask, int enabled)
2794 2851
2795 if (mask == TRACE_ITER_OVERWRITE) 2852 if (mask == TRACE_ITER_OVERWRITE)
2796 ring_buffer_change_overwrite(global_trace.buffer, enabled); 2853 ring_buffer_change_overwrite(global_trace.buffer, enabled);
2854
2855 if (mask == TRACE_ITER_PRINTK)
2856 trace_printk_start_stop_comm(enabled);
2797} 2857}
2798 2858
2799static ssize_t 2859static int trace_set_options(char *option)
2800tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2801 size_t cnt, loff_t *ppos)
2802{ 2860{
2803 char buf[64];
2804 char *cmp; 2861 char *cmp;
2805 int neg = 0; 2862 int neg = 0;
2806 int ret; 2863 int ret = 0;
2807 int i; 2864 int i;
2808 2865
2809 if (cnt >= sizeof(buf)) 2866 cmp = strstrip(option);
2810 return -EINVAL;
2811
2812 if (copy_from_user(&buf, ubuf, cnt))
2813 return -EFAULT;
2814
2815 buf[cnt] = 0;
2816 cmp = strstrip(buf);
2817 2867
2818 if (strncmp(cmp, "no", 2) == 0) { 2868 if (strncmp(cmp, "no", 2) == 0) {
2819 neg = 1; 2869 neg = 1;
@@ -2832,10 +2882,27 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2832 mutex_lock(&trace_types_lock); 2882 mutex_lock(&trace_types_lock);
2833 ret = set_tracer_option(current_trace, cmp, neg); 2883 ret = set_tracer_option(current_trace, cmp, neg);
2834 mutex_unlock(&trace_types_lock); 2884 mutex_unlock(&trace_types_lock);
2835 if (ret)
2836 return ret;
2837 } 2885 }
2838 2886
2887 return ret;
2888}
2889
2890static ssize_t
2891tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2892 size_t cnt, loff_t *ppos)
2893{
2894 char buf[64];
2895
2896 if (cnt >= sizeof(buf))
2897 return -EINVAL;
2898
2899 if (copy_from_user(&buf, ubuf, cnt))
2900 return -EFAULT;
2901
2902 buf[cnt] = 0;
2903
2904 trace_set_options(buf);
2905
2839 *ppos += cnt; 2906 *ppos += cnt;
2840 2907
2841 return cnt; 2908 return cnt;
@@ -2940,56 +3007,6 @@ static const struct file_operations tracing_saved_cmdlines_fops = {
2940}; 3007};
2941 3008
2942static ssize_t 3009static ssize_t
2943tracing_ctrl_read(struct file *filp, char __user *ubuf,
2944 size_t cnt, loff_t *ppos)
2945{
2946 char buf[64];
2947 int r;
2948
2949 r = sprintf(buf, "%u\n", tracer_enabled);
2950 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2951}
2952
2953static ssize_t
2954tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2955 size_t cnt, loff_t *ppos)
2956{
2957 struct trace_array *tr = filp->private_data;
2958 unsigned long val;
2959 int ret;
2960
2961 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
2962 if (ret)
2963 return ret;
2964
2965 val = !!val;
2966
2967 mutex_lock(&trace_types_lock);
2968 if (tracer_enabled ^ val) {
2969
2970 /* Only need to warn if this is used to change the state */
2971 WARN_ONCE(1, "tracing_enabled is deprecated. Use tracing_on");
2972
2973 if (val) {
2974 tracer_enabled = 1;
2975 if (current_trace->start)
2976 current_trace->start(tr);
2977 tracing_start();
2978 } else {
2979 tracer_enabled = 0;
2980 tracing_stop();
2981 if (current_trace->stop)
2982 current_trace->stop(tr);
2983 }
2984 }
2985 mutex_unlock(&trace_types_lock);
2986
2987 *ppos += cnt;
2988
2989 return cnt;
2990}
2991
2992static ssize_t
2993tracing_set_trace_read(struct file *filp, char __user *ubuf, 3010tracing_set_trace_read(struct file *filp, char __user *ubuf,
2994 size_t cnt, loff_t *ppos) 3011 size_t cnt, loff_t *ppos)
2995{ 3012{
@@ -3019,6 +3036,31 @@ static void set_buffer_entries(struct trace_array *tr, unsigned long val)
3019 tr->data[cpu]->entries = val; 3036 tr->data[cpu]->entries = val;
3020} 3037}
3021 3038
3039/* resize @tr's buffer to the size of @size_tr's entries */
3040static int resize_buffer_duplicate_size(struct trace_array *tr,
3041 struct trace_array *size_tr, int cpu_id)
3042{
3043 int cpu, ret = 0;
3044
3045 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3046 for_each_tracing_cpu(cpu) {
3047 ret = ring_buffer_resize(tr->buffer,
3048 size_tr->data[cpu]->entries, cpu);
3049 if (ret < 0)
3050 break;
3051 tr->data[cpu]->entries = size_tr->data[cpu]->entries;
3052 }
3053 } else {
3054 ret = ring_buffer_resize(tr->buffer,
3055 size_tr->data[cpu_id]->entries, cpu_id);
3056 if (ret == 0)
3057 tr->data[cpu_id]->entries =
3058 size_tr->data[cpu_id]->entries;
3059 }
3060
3061 return ret;
3062}
3063
3022static int __tracing_resize_ring_buffer(unsigned long size, int cpu) 3064static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
3023{ 3065{
3024 int ret; 3066 int ret;
@@ -3030,6 +3072,10 @@ static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
3030 */ 3072 */
3031 ring_buffer_expanded = 1; 3073 ring_buffer_expanded = 1;
3032 3074
3075 /* May be called before buffers are initialized */
3076 if (!global_trace.buffer)
3077 return 0;
3078
3033 ret = ring_buffer_resize(global_trace.buffer, size, cpu); 3079 ret = ring_buffer_resize(global_trace.buffer, size, cpu);
3034 if (ret < 0) 3080 if (ret < 0)
3035 return ret; 3081 return ret;
@@ -3039,23 +3085,8 @@ static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
3039 3085
3040 ret = ring_buffer_resize(max_tr.buffer, size, cpu); 3086 ret = ring_buffer_resize(max_tr.buffer, size, cpu);
3041 if (ret < 0) { 3087 if (ret < 0) {
3042 int r = 0; 3088 int r = resize_buffer_duplicate_size(&global_trace,
3043 3089 &global_trace, cpu);
3044 if (cpu == RING_BUFFER_ALL_CPUS) {
3045 int i;
3046 for_each_tracing_cpu(i) {
3047 r = ring_buffer_resize(global_trace.buffer,
3048 global_trace.data[i]->entries,
3049 i);
3050 if (r < 0)
3051 break;
3052 }
3053 } else {
3054 r = ring_buffer_resize(global_trace.buffer,
3055 global_trace.data[cpu]->entries,
3056 cpu);
3057 }
3058
3059 if (r < 0) { 3090 if (r < 0) {
3060 /* 3091 /*
3061 * AARGH! We are left with different 3092 * AARGH! We are left with different
@@ -3193,17 +3224,11 @@ static int tracing_set_tracer(const char *buf)
3193 3224
3194 topts = create_trace_option_files(t); 3225 topts = create_trace_option_files(t);
3195 if (t->use_max_tr) { 3226 if (t->use_max_tr) {
3196 int cpu;
3197 /* we need to make per cpu buffer sizes equivalent */ 3227 /* we need to make per cpu buffer sizes equivalent */
3198 for_each_tracing_cpu(cpu) { 3228 ret = resize_buffer_duplicate_size(&max_tr, &global_trace,
3199 ret = ring_buffer_resize(max_tr.buffer, 3229 RING_BUFFER_ALL_CPUS);
3200 global_trace.data[cpu]->entries, 3230 if (ret < 0)
3201 cpu); 3231 goto out;
3202 if (ret < 0)
3203 goto out;
3204 max_tr.data[cpu]->entries =
3205 global_trace.data[cpu]->entries;
3206 }
3207 } 3232 }
3208 3233
3209 if (t->init) { 3234 if (t->init) {
@@ -3325,6 +3350,10 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
3325 if (trace_flags & TRACE_ITER_LATENCY_FMT) 3350 if (trace_flags & TRACE_ITER_LATENCY_FMT)
3326 iter->iter_flags |= TRACE_FILE_LAT_FMT; 3351 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3327 3352
3353 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3354 if (trace_clocks[trace_clock_id].in_ns)
3355 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3356
3328 iter->cpu_file = cpu_file; 3357 iter->cpu_file = cpu_file;
3329 iter->tr = &global_trace; 3358 iter->tr = &global_trace;
3330 mutex_init(&iter->mutex); 3359 mutex_init(&iter->mutex);
@@ -3385,19 +3414,6 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
3385 } 3414 }
3386} 3415}
3387 3416
3388
3389void default_wait_pipe(struct trace_iterator *iter)
3390{
3391 DEFINE_WAIT(wait);
3392
3393 prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
3394
3395 if (trace_empty(iter))
3396 schedule();
3397
3398 finish_wait(&trace_wait, &wait);
3399}
3400
3401/* 3417/*
3402 * This is a make-shift waitqueue. 3418 * This is a make-shift waitqueue.
3403 * A tracer might use this callback on some rare cases: 3419 * A tracer might use this callback on some rare cases:
@@ -3446,7 +3462,7 @@ static int tracing_wait_pipe(struct file *filp)
3446 * 3462 *
3447 * iter->pos will be 0 if we haven't read anything. 3463 * iter->pos will be 0 if we haven't read anything.
3448 */ 3464 */
3449 if (!tracer_enabled && iter->pos) 3465 if (!tracing_is_enabled() && iter->pos)
3450 break; 3466 break;
3451 } 3467 }
3452 3468
@@ -3955,7 +3971,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3955 } else 3971 } else
3956 entry->buf[cnt] = '\0'; 3972 entry->buf[cnt] = '\0';
3957 3973
3958 ring_buffer_unlock_commit(buffer, event); 3974 __buffer_unlock_commit(buffer, event);
3959 3975
3960 written = cnt; 3976 written = cnt;
3961 3977
@@ -4016,6 +4032,14 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4016 if (max_tr.buffer) 4032 if (max_tr.buffer)
4017 ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func); 4033 ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func);
4018 4034
4035 /*
4036 * New clock may not be consistent with the previous clock.
4037 * Reset the buffer so that it doesn't have incomparable timestamps.
4038 */
4039 tracing_reset_online_cpus(&global_trace);
4040 if (max_tr.buffer)
4041 tracing_reset_online_cpus(&max_tr);
4042
4019 mutex_unlock(&trace_types_lock); 4043 mutex_unlock(&trace_types_lock);
4020 4044
4021 *fpos += cnt; 4045 *fpos += cnt;
@@ -4037,13 +4061,6 @@ static const struct file_operations tracing_max_lat_fops = {
4037 .llseek = generic_file_llseek, 4061 .llseek = generic_file_llseek,
4038}; 4062};
4039 4063
4040static const struct file_operations tracing_ctrl_fops = {
4041 .open = tracing_open_generic,
4042 .read = tracing_ctrl_read,
4043 .write = tracing_ctrl_write,
4044 .llseek = generic_file_llseek,
4045};
4046
4047static const struct file_operations set_tracer_fops = { 4064static const struct file_operations set_tracer_fops = {
4048 .open = tracing_open_generic, 4065 .open = tracing_open_generic,
4049 .read = tracing_set_trace_read, 4066 .read = tracing_set_trace_read,
@@ -4260,13 +4277,11 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4260 return -ENOMEM; 4277 return -ENOMEM;
4261 4278
4262 if (*ppos & (PAGE_SIZE - 1)) { 4279 if (*ppos & (PAGE_SIZE - 1)) {
4263 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
4264 ret = -EINVAL; 4280 ret = -EINVAL;
4265 goto out; 4281 goto out;
4266 } 4282 }
4267 4283
4268 if (len & (PAGE_SIZE - 1)) { 4284 if (len & (PAGE_SIZE - 1)) {
4269 WARN_ONCE(1, "Ftrace: splice_read should page-align\n");
4270 if (len < PAGE_SIZE) { 4285 if (len < PAGE_SIZE) {
4271 ret = -EINVAL; 4286 ret = -EINVAL;
4272 goto out; 4287 goto out;
@@ -4377,13 +4392,27 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
4377 cnt = ring_buffer_bytes_cpu(tr->buffer, cpu); 4392 cnt = ring_buffer_bytes_cpu(tr->buffer, cpu);
4378 trace_seq_printf(s, "bytes: %ld\n", cnt); 4393 trace_seq_printf(s, "bytes: %ld\n", cnt);
4379 4394
4380 t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu)); 4395 if (trace_clocks[trace_clock_id].in_ns) {
4381 usec_rem = do_div(t, USEC_PER_SEC); 4396 /* local or global for trace_clock */
4382 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", t, usec_rem); 4397 t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu));
4398 usec_rem = do_div(t, USEC_PER_SEC);
4399 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
4400 t, usec_rem);
4401
4402 t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu));
4403 usec_rem = do_div(t, USEC_PER_SEC);
4404 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
4405 } else {
4406 /* counter or tsc mode for trace_clock */
4407 trace_seq_printf(s, "oldest event ts: %llu\n",
4408 ring_buffer_oldest_event_ts(tr->buffer, cpu));
4409
4410 trace_seq_printf(s, "now ts: %llu\n",
4411 ring_buffer_time_stamp(tr->buffer, cpu));
4412 }
4383 4413
4384 t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu)); 4414 cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu);
4385 usec_rem = do_div(t, USEC_PER_SEC); 4415 trace_seq_printf(s, "dropped events: %ld\n", cnt);
4386 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
4387 4416
4388 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); 4417 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
4389 4418
@@ -4788,10 +4817,17 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
4788 return ret; 4817 return ret;
4789 4818
4790 if (buffer) { 4819 if (buffer) {
4791 if (val) 4820 mutex_lock(&trace_types_lock);
4821 if (val) {
4792 ring_buffer_record_on(buffer); 4822 ring_buffer_record_on(buffer);
4793 else 4823 if (current_trace->start)
4824 current_trace->start(tr);
4825 } else {
4794 ring_buffer_record_off(buffer); 4826 ring_buffer_record_off(buffer);
4827 if (current_trace->stop)
4828 current_trace->stop(tr);
4829 }
4830 mutex_unlock(&trace_types_lock);
4795 } 4831 }
4796 4832
4797 (*ppos)++; 4833 (*ppos)++;
@@ -4815,9 +4851,6 @@ static __init int tracer_init_debugfs(void)
4815 4851
4816 d_tracer = tracing_init_dentry(); 4852 d_tracer = tracing_init_dentry();
4817 4853
4818 trace_create_file("tracing_enabled", 0644, d_tracer,
4819 &global_trace, &tracing_ctrl_fops);
4820
4821 trace_create_file("trace_options", 0644, d_tracer, 4854 trace_create_file("trace_options", 0644, d_tracer,
4822 NULL, &tracing_iter_fops); 4855 NULL, &tracing_iter_fops);
4823 4856
@@ -5089,6 +5122,7 @@ __init static int tracer_alloc_buffers(void)
5089 5122
5090 /* Only allocate trace_printk buffers if a trace_printk exists */ 5123 /* Only allocate trace_printk buffers if a trace_printk exists */
5091 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt) 5124 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
5125 /* Must be called before global_trace.buffer is allocated */
5092 trace_printk_init_buffers(); 5126 trace_printk_init_buffers();
5093 5127
5094 /* To save memory, keep the ring buffer size to its minimum */ 5128 /* To save memory, keep the ring buffer size to its minimum */
@@ -5136,6 +5170,7 @@ __init static int tracer_alloc_buffers(void)
5136#endif 5170#endif
5137 5171
5138 trace_init_cmdlines(); 5172 trace_init_cmdlines();
5173 init_irq_work(&trace_work_wakeup, trace_wake_up);
5139 5174
5140 register_tracer(&nop_trace); 5175 register_tracer(&nop_trace);
5141 current_trace = &nop_trace; 5176 current_trace = &nop_trace;
@@ -5147,6 +5182,13 @@ __init static int tracer_alloc_buffers(void)
5147 5182
5148 register_die_notifier(&trace_die_notifier); 5183 register_die_notifier(&trace_die_notifier);
5149 5184
5185 while (trace_boot_options) {
5186 char *option;
5187
5188 option = strsep(&trace_boot_options, ",");
5189 trace_set_options(option);
5190 }
5191
5150 return 0; 5192 return 0;
5151 5193
5152out_free_cpumask: 5194out_free_cpumask:
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index c15f528c1af4..c75d7988902c 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -285,8 +285,8 @@ struct tracer {
285 int (*set_flag)(u32 old_flags, u32 bit, int set); 285 int (*set_flag)(u32 old_flags, u32 bit, int set);
286 struct tracer *next; 286 struct tracer *next;
287 struct tracer_flags *flags; 287 struct tracer_flags *flags;
288 int print_max; 288 bool print_max;
289 int use_max_tr; 289 bool use_max_tr;
290}; 290};
291 291
292 292
@@ -327,7 +327,6 @@ trace_buffer_iter(struct trace_iterator *iter, int cpu)
327 327
328int tracer_init(struct tracer *t, struct trace_array *tr); 328int tracer_init(struct tracer *t, struct trace_array *tr);
329int tracing_is_enabled(void); 329int tracing_is_enabled(void);
330void trace_wake_up(void);
331void tracing_reset(struct trace_array *tr, int cpu); 330void tracing_reset(struct trace_array *tr, int cpu);
332void tracing_reset_online_cpus(struct trace_array *tr); 331void tracing_reset_online_cpus(struct trace_array *tr);
333void tracing_reset_current(int cpu); 332void tracing_reset_current(int cpu);
@@ -349,9 +348,6 @@ trace_buffer_lock_reserve(struct ring_buffer *buffer,
349 unsigned long len, 348 unsigned long len,
350 unsigned long flags, 349 unsigned long flags,
351 int pc); 350 int pc);
352void trace_buffer_unlock_commit(struct ring_buffer *buffer,
353 struct ring_buffer_event *event,
354 unsigned long flags, int pc);
355 351
356struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, 352struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
357 struct trace_array_cpu *data); 353 struct trace_array_cpu *data);
@@ -359,6 +355,9 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
359struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 355struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
360 int *ent_cpu, u64 *ent_ts); 356 int *ent_cpu, u64 *ent_ts);
361 357
358void __buffer_unlock_commit(struct ring_buffer *buffer,
359 struct ring_buffer_event *event);
360
362int trace_empty(struct trace_iterator *iter); 361int trace_empty(struct trace_iterator *iter);
363 362
364void *trace_find_next_entry_inc(struct trace_iterator *iter); 363void *trace_find_next_entry_inc(struct trace_iterator *iter);
@@ -367,7 +366,6 @@ void trace_init_global_iter(struct trace_iterator *iter);
367 366
368void tracing_iter_reset(struct trace_iterator *iter, int cpu); 367void tracing_iter_reset(struct trace_iterator *iter, int cpu);
369 368
370void default_wait_pipe(struct trace_iterator *iter);
371void poll_wait_pipe(struct trace_iterator *iter); 369void poll_wait_pipe(struct trace_iterator *iter);
372 370
373void ftrace(struct trace_array *tr, 371void ftrace(struct trace_array *tr,
@@ -407,12 +405,7 @@ void tracing_sched_switch_assign_trace(struct trace_array *tr);
407void tracing_stop_sched_switch_record(void); 405void tracing_stop_sched_switch_record(void);
408void tracing_start_sched_switch_record(void); 406void tracing_start_sched_switch_record(void);
409int register_tracer(struct tracer *type); 407int register_tracer(struct tracer *type);
410void unregister_tracer(struct tracer *type);
411int is_tracing_stopped(void); 408int is_tracing_stopped(void);
412enum trace_file_type {
413 TRACE_FILE_LAT_FMT = 1,
414 TRACE_FILE_ANNOTATE = 2,
415};
416 409
417extern cpumask_var_t __read_mostly tracing_buffer_mask; 410extern cpumask_var_t __read_mostly tracing_buffer_mask;
418 411
@@ -841,6 +834,7 @@ extern const char *__start___trace_bprintk_fmt[];
841extern const char *__stop___trace_bprintk_fmt[]; 834extern const char *__stop___trace_bprintk_fmt[];
842 835
843void trace_printk_init_buffers(void); 836void trace_printk_init_buffers(void);
837void trace_printk_start_comm(void);
844 838
845#undef FTRACE_ENTRY 839#undef FTRACE_ENTRY
846#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ 840#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 8d3538b4ea5f..95e96842ed29 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -77,7 +77,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
77 entry->correct = val == expect; 77 entry->correct = val == expect;
78 78
79 if (!filter_check_discard(call, entry, buffer, event)) 79 if (!filter_check_discard(call, entry, buffer, event))
80 ring_buffer_unlock_commit(buffer, event); 80 __buffer_unlock_commit(buffer, event);
81 81
82 out: 82 out:
83 atomic_dec(&tr->data[cpu]->disabled); 83 atomic_dec(&tr->data[cpu]->disabled);
@@ -199,7 +199,7 @@ __init static int init_branch_tracer(void)
199 } 199 }
200 return register_tracer(&branch_trace); 200 return register_tracer(&branch_trace);
201} 201}
202device_initcall(init_branch_tracer); 202core_initcall(init_branch_tracer);
203 203
204#else 204#else
205static inline 205static inline
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index d608d09d08c0..880073d0b946 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -491,19 +491,6 @@ static void t_stop(struct seq_file *m, void *p)
491 mutex_unlock(&event_mutex); 491 mutex_unlock(&event_mutex);
492} 492}
493 493
494static int
495ftrace_event_seq_open(struct inode *inode, struct file *file)
496{
497 const struct seq_operations *seq_ops;
498
499 if ((file->f_mode & FMODE_WRITE) &&
500 (file->f_flags & O_TRUNC))
501 ftrace_clear_events();
502
503 seq_ops = inode->i_private;
504 return seq_open(file, seq_ops);
505}
506
507static ssize_t 494static ssize_t
508event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, 495event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
509 loff_t *ppos) 496 loff_t *ppos)
@@ -980,6 +967,9 @@ show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
980 return r; 967 return r;
981} 968}
982 969
970static int ftrace_event_avail_open(struct inode *inode, struct file *file);
971static int ftrace_event_set_open(struct inode *inode, struct file *file);
972
983static const struct seq_operations show_event_seq_ops = { 973static const struct seq_operations show_event_seq_ops = {
984 .start = t_start, 974 .start = t_start,
985 .next = t_next, 975 .next = t_next,
@@ -995,14 +985,14 @@ static const struct seq_operations show_set_event_seq_ops = {
995}; 985};
996 986
997static const struct file_operations ftrace_avail_fops = { 987static const struct file_operations ftrace_avail_fops = {
998 .open = ftrace_event_seq_open, 988 .open = ftrace_event_avail_open,
999 .read = seq_read, 989 .read = seq_read,
1000 .llseek = seq_lseek, 990 .llseek = seq_lseek,
1001 .release = seq_release, 991 .release = seq_release,
1002}; 992};
1003 993
1004static const struct file_operations ftrace_set_event_fops = { 994static const struct file_operations ftrace_set_event_fops = {
1005 .open = ftrace_event_seq_open, 995 .open = ftrace_event_set_open,
1006 .read = seq_read, 996 .read = seq_read,
1007 .write = ftrace_event_write, 997 .write = ftrace_event_write,
1008 .llseek = seq_lseek, 998 .llseek = seq_lseek,
@@ -1078,6 +1068,26 @@ static struct dentry *event_trace_events_dir(void)
1078 return d_events; 1068 return d_events;
1079} 1069}
1080 1070
1071static int
1072ftrace_event_avail_open(struct inode *inode, struct file *file)
1073{
1074 const struct seq_operations *seq_ops = &show_event_seq_ops;
1075
1076 return seq_open(file, seq_ops);
1077}
1078
1079static int
1080ftrace_event_set_open(struct inode *inode, struct file *file)
1081{
1082 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1083
1084 if ((file->f_mode & FMODE_WRITE) &&
1085 (file->f_flags & O_TRUNC))
1086 ftrace_clear_events();
1087
1088 return seq_open(file, seq_ops);
1089}
1090
1081static struct dentry * 1091static struct dentry *
1082event_subsystem_dir(const char *name, struct dentry *d_events) 1092event_subsystem_dir(const char *name, struct dentry *d_events)
1083{ 1093{
@@ -1489,6 +1499,9 @@ static __init int event_trace_enable(void)
1489 if (ret) 1499 if (ret)
1490 pr_warn("Failed to enable trace event: %s\n", token); 1500 pr_warn("Failed to enable trace event: %s\n", token);
1491 } 1501 }
1502
1503 trace_printk_start_comm();
1504
1492 return 0; 1505 return 0;
1493} 1506}
1494 1507
@@ -1505,15 +1518,13 @@ static __init int event_trace_init(void)
1505 return 0; 1518 return 0;
1506 1519
1507 entry = debugfs_create_file("available_events", 0444, d_tracer, 1520 entry = debugfs_create_file("available_events", 0444, d_tracer,
1508 (void *)&show_event_seq_ops, 1521 NULL, &ftrace_avail_fops);
1509 &ftrace_avail_fops);
1510 if (!entry) 1522 if (!entry)
1511 pr_warning("Could not create debugfs " 1523 pr_warning("Could not create debugfs "
1512 "'available_events' entry\n"); 1524 "'available_events' entry\n");
1513 1525
1514 entry = debugfs_create_file("set_event", 0644, d_tracer, 1526 entry = debugfs_create_file("set_event", 0644, d_tracer,
1515 (void *)&show_set_event_seq_ops, 1527 NULL, &ftrace_set_event_fops);
1516 &ftrace_set_event_fops);
1517 if (!entry) 1528 if (!entry)
1518 pr_warning("Could not create debugfs " 1529 pr_warning("Could not create debugfs "
1519 "'set_event' entry\n"); 1530 "'set_event' entry\n");
@@ -1749,7 +1760,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
1749 entry->ip = ip; 1760 entry->ip = ip;
1750 entry->parent_ip = parent_ip; 1761 entry->parent_ip = parent_ip;
1751 1762
1752 trace_nowake_buffer_unlock_commit(buffer, event, flags, pc); 1763 trace_buffer_unlock_commit(buffer, event, flags, pc);
1753 1764
1754 out: 1765 out:
1755 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); 1766 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index c154797a7ff7..e5b0ca8b8d4d 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1000,9 +1000,9 @@ static int init_pred(struct filter_parse_state *ps,
1000 } 1000 }
1001 } else { 1001 } else {
1002 if (field->is_signed) 1002 if (field->is_signed)
1003 ret = strict_strtoll(pred->regex.pattern, 0, &val); 1003 ret = kstrtoll(pred->regex.pattern, 0, &val);
1004 else 1004 else
1005 ret = strict_strtoull(pred->regex.pattern, 0, &val); 1005 ret = kstrtoull(pred->regex.pattern, 0, &val);
1006 if (ret) { 1006 if (ret) {
1007 parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0); 1007 parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
1008 return -EINVAL; 1008 return -EINVAL;
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 507a7a9630bf..8e3ad8082ab7 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -7,7 +7,7 @@
7 * Based on code from the latency_tracer, that is: 7 * Based on code from the latency_tracer, that is:
8 * 8 *
9 * Copyright (C) 2004-2006 Ingo Molnar 9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III 10 * Copyright (C) 2004 Nadia Yvette Chambers
11 */ 11 */
12#include <linux/ring_buffer.h> 12#include <linux/ring_buffer.h>
13#include <linux/debugfs.h> 13#include <linux/debugfs.h>
@@ -366,7 +366,7 @@ ftrace_trace_onoff_callback(struct ftrace_hash *hash,
366 * We use the callback data field (which is a pointer) 366 * We use the callback data field (which is a pointer)
367 * as our counter. 367 * as our counter.
368 */ 368 */
369 ret = strict_strtoul(number, 0, (unsigned long *)&count); 369 ret = kstrtoul(number, 0, (unsigned long *)&count);
370 if (ret) 370 if (ret)
371 return ret; 371 return ret;
372 372
@@ -411,5 +411,4 @@ static __init int init_function_trace(void)
411 init_func_cmd_traceon(); 411 init_func_cmd_traceon();
412 return register_tracer(&function_trace); 412 return register_tracer(&function_trace);
413} 413}
414device_initcall(init_function_trace); 414core_initcall(init_function_trace);
415
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 99b4378393d5..4edb4b74eb7e 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -223,7 +223,7 @@ int __trace_graph_entry(struct trace_array *tr,
223 entry = ring_buffer_event_data(event); 223 entry = ring_buffer_event_data(event);
224 entry->graph_ent = *trace; 224 entry->graph_ent = *trace;
225 if (!filter_current_check_discard(buffer, call, entry, event)) 225 if (!filter_current_check_discard(buffer, call, entry, event))
226 ring_buffer_unlock_commit(buffer, event); 226 __buffer_unlock_commit(buffer, event);
227 227
228 return 1; 228 return 1;
229} 229}
@@ -327,7 +327,7 @@ void __trace_graph_return(struct trace_array *tr,
327 entry = ring_buffer_event_data(event); 327 entry = ring_buffer_event_data(event);
328 entry->ret = *trace; 328 entry->ret = *trace;
329 if (!filter_current_check_discard(buffer, call, entry, event)) 329 if (!filter_current_check_discard(buffer, call, entry, event))
330 ring_buffer_unlock_commit(buffer, event); 330 __buffer_unlock_commit(buffer, event);
331} 331}
332 332
333void trace_graph_return(struct ftrace_graph_ret *trace) 333void trace_graph_return(struct ftrace_graph_ret *trace)
@@ -1474,4 +1474,4 @@ static __init int init_graph_trace(void)
1474 return register_tracer(&graph_trace); 1474 return register_tracer(&graph_trace);
1475} 1475}
1476 1476
1477device_initcall(init_graph_trace); 1477core_initcall(init_graph_trace);
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index d98ee8283b29..713a2cac4881 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -7,7 +7,7 @@
7 * From code in the latency_tracer, that is: 7 * From code in the latency_tracer, that is:
8 * 8 *
9 * Copyright (C) 2004-2006 Ingo Molnar 9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III 10 * Copyright (C) 2004 Nadia Yvette Chambers
11 */ 11 */
12#include <linux/kallsyms.h> 12#include <linux/kallsyms.h>
13#include <linux/debugfs.h> 13#include <linux/debugfs.h>
@@ -604,7 +604,7 @@ static struct tracer irqsoff_tracer __read_mostly =
604 .reset = irqsoff_tracer_reset, 604 .reset = irqsoff_tracer_reset,
605 .start = irqsoff_tracer_start, 605 .start = irqsoff_tracer_start,
606 .stop = irqsoff_tracer_stop, 606 .stop = irqsoff_tracer_stop,
607 .print_max = 1, 607 .print_max = true,
608 .print_header = irqsoff_print_header, 608 .print_header = irqsoff_print_header,
609 .print_line = irqsoff_print_line, 609 .print_line = irqsoff_print_line,
610 .flags = &tracer_flags, 610 .flags = &tracer_flags,
@@ -614,7 +614,7 @@ static struct tracer irqsoff_tracer __read_mostly =
614#endif 614#endif
615 .open = irqsoff_trace_open, 615 .open = irqsoff_trace_open,
616 .close = irqsoff_trace_close, 616 .close = irqsoff_trace_close,
617 .use_max_tr = 1, 617 .use_max_tr = true,
618}; 618};
619# define register_irqsoff(trace) register_tracer(&trace) 619# define register_irqsoff(trace) register_tracer(&trace)
620#else 620#else
@@ -637,7 +637,7 @@ static struct tracer preemptoff_tracer __read_mostly =
637 .reset = irqsoff_tracer_reset, 637 .reset = irqsoff_tracer_reset,
638 .start = irqsoff_tracer_start, 638 .start = irqsoff_tracer_start,
639 .stop = irqsoff_tracer_stop, 639 .stop = irqsoff_tracer_stop,
640 .print_max = 1, 640 .print_max = true,
641 .print_header = irqsoff_print_header, 641 .print_header = irqsoff_print_header,
642 .print_line = irqsoff_print_line, 642 .print_line = irqsoff_print_line,
643 .flags = &tracer_flags, 643 .flags = &tracer_flags,
@@ -647,7 +647,7 @@ static struct tracer preemptoff_tracer __read_mostly =
647#endif 647#endif
648 .open = irqsoff_trace_open, 648 .open = irqsoff_trace_open,
649 .close = irqsoff_trace_close, 649 .close = irqsoff_trace_close,
650 .use_max_tr = 1, 650 .use_max_tr = true,
651}; 651};
652# define register_preemptoff(trace) register_tracer(&trace) 652# define register_preemptoff(trace) register_tracer(&trace)
653#else 653#else
@@ -672,7 +672,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
672 .reset = irqsoff_tracer_reset, 672 .reset = irqsoff_tracer_reset,
673 .start = irqsoff_tracer_start, 673 .start = irqsoff_tracer_start,
674 .stop = irqsoff_tracer_stop, 674 .stop = irqsoff_tracer_stop,
675 .print_max = 1, 675 .print_max = true,
676 .print_header = irqsoff_print_header, 676 .print_header = irqsoff_print_header,
677 .print_line = irqsoff_print_line, 677 .print_line = irqsoff_print_line,
678 .flags = &tracer_flags, 678 .flags = &tracer_flags,
@@ -682,7 +682,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
682#endif 682#endif
683 .open = irqsoff_trace_open, 683 .open = irqsoff_trace_open,
684 .close = irqsoff_trace_close, 684 .close = irqsoff_trace_close,
685 .use_max_tr = 1, 685 .use_max_tr = true,
686}; 686};
687 687
688# define register_preemptirqsoff(trace) register_tracer(&trace) 688# define register_preemptirqsoff(trace) register_tracer(&trace)
@@ -698,4 +698,4 @@ __init static int init_irqsoff_tracer(void)
698 698
699 return 0; 699 return 0;
700} 700}
701device_initcall(init_irqsoff_tracer); 701core_initcall(init_irqsoff_tracer);
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 1a2117043bb1..1865d5f76538 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -444,7 +444,7 @@ static int create_trace_probe(int argc, char **argv)
444 return -EINVAL; 444 return -EINVAL;
445 } 445 }
446 /* an address specified */ 446 /* an address specified */
447 ret = strict_strtoul(&argv[1][0], 0, (unsigned long *)&addr); 447 ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
448 if (ret) { 448 if (ret) {
449 pr_info("Failed to parse address.\n"); 449 pr_info("Failed to parse address.\n");
450 return ret; 450 return ret;
@@ -751,8 +751,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
751 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); 751 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
752 752
753 if (!filter_current_check_discard(buffer, call, entry, event)) 753 if (!filter_current_check_discard(buffer, call, entry, event))
754 trace_nowake_buffer_unlock_commit_regs(buffer, event, 754 trace_buffer_unlock_commit_regs(buffer, event,
755 irq_flags, pc, regs); 755 irq_flags, pc, regs);
756} 756}
757 757
758/* Kretprobe handler */ 758/* Kretprobe handler */
@@ -784,8 +784,8 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
784 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); 784 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
785 785
786 if (!filter_current_check_discard(buffer, call, entry, event)) 786 if (!filter_current_check_discard(buffer, call, entry, event))
787 trace_nowake_buffer_unlock_commit_regs(buffer, event, 787 trace_buffer_unlock_commit_regs(buffer, event,
788 irq_flags, pc, regs); 788 irq_flags, pc, regs);
789} 789}
790 790
791/* Event entry printers */ 791/* Event entry printers */
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 123b189c732c..194d79602dc7 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -610,24 +610,54 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
610 return trace_print_lat_fmt(s, entry); 610 return trace_print_lat_fmt(s, entry);
611} 611}
612 612
613static unsigned long preempt_mark_thresh = 100; 613static unsigned long preempt_mark_thresh_us = 100;
614 614
615static int 615static int
616lat_print_timestamp(struct trace_seq *s, u64 abs_usecs, 616lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
617 unsigned long rel_usecs)
618{ 617{
619 return trace_seq_printf(s, " %4lldus%c: ", abs_usecs, 618 unsigned long verbose = trace_flags & TRACE_ITER_VERBOSE;
620 rel_usecs > preempt_mark_thresh ? '!' : 619 unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS;
621 rel_usecs > 1 ? '+' : ' '); 620 unsigned long long abs_ts = iter->ts - iter->tr->time_start;
621 unsigned long long rel_ts = next_ts - iter->ts;
622 struct trace_seq *s = &iter->seq;
623
624 if (in_ns) {
625 abs_ts = ns2usecs(abs_ts);
626 rel_ts = ns2usecs(rel_ts);
627 }
628
629 if (verbose && in_ns) {
630 unsigned long abs_usec = do_div(abs_ts, USEC_PER_MSEC);
631 unsigned long abs_msec = (unsigned long)abs_ts;
632 unsigned long rel_usec = do_div(rel_ts, USEC_PER_MSEC);
633 unsigned long rel_msec = (unsigned long)rel_ts;
634
635 return trace_seq_printf(
636 s, "[%08llx] %ld.%03ldms (+%ld.%03ldms): ",
637 ns2usecs(iter->ts),
638 abs_msec, abs_usec,
639 rel_msec, rel_usec);
640 } else if (verbose && !in_ns) {
641 return trace_seq_printf(
642 s, "[%016llx] %lld (+%lld): ",
643 iter->ts, abs_ts, rel_ts);
644 } else if (!verbose && in_ns) {
645 return trace_seq_printf(
646 s, " %4lldus%c: ",
647 abs_ts,
648 rel_ts > preempt_mark_thresh_us ? '!' :
649 rel_ts > 1 ? '+' : ' ');
650 } else { /* !verbose && !in_ns */
651 return trace_seq_printf(s, " %4lld: ", abs_ts);
652 }
622} 653}
623 654
624int trace_print_context(struct trace_iterator *iter) 655int trace_print_context(struct trace_iterator *iter)
625{ 656{
626 struct trace_seq *s = &iter->seq; 657 struct trace_seq *s = &iter->seq;
627 struct trace_entry *entry = iter->ent; 658 struct trace_entry *entry = iter->ent;
628 unsigned long long t = ns2usecs(iter->ts); 659 unsigned long long t;
629 unsigned long usec_rem = do_div(t, USEC_PER_SEC); 660 unsigned long secs, usec_rem;
630 unsigned long secs = (unsigned long)t;
631 char comm[TASK_COMM_LEN]; 661 char comm[TASK_COMM_LEN];
632 int ret; 662 int ret;
633 663
@@ -644,8 +674,13 @@ int trace_print_context(struct trace_iterator *iter)
644 return 0; 674 return 0;
645 } 675 }
646 676
647 return trace_seq_printf(s, " %5lu.%06lu: ", 677 if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) {
648 secs, usec_rem); 678 t = ns2usecs(iter->ts);
679 usec_rem = do_div(t, USEC_PER_SEC);
680 secs = (unsigned long)t;
681 return trace_seq_printf(s, " %5lu.%06lu: ", secs, usec_rem);
682 } else
683 return trace_seq_printf(s, " %12llu: ", iter->ts);
649} 684}
650 685
651int trace_print_lat_context(struct trace_iterator *iter) 686int trace_print_lat_context(struct trace_iterator *iter)
@@ -659,36 +694,29 @@ int trace_print_lat_context(struct trace_iterator *iter)
659 *next_entry = trace_find_next_entry(iter, NULL, 694 *next_entry = trace_find_next_entry(iter, NULL,
660 &next_ts); 695 &next_ts);
661 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE); 696 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
662 unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
663 unsigned long rel_usecs;
664 697
665 /* Restore the original ent_size */ 698 /* Restore the original ent_size */
666 iter->ent_size = ent_size; 699 iter->ent_size = ent_size;
667 700
668 if (!next_entry) 701 if (!next_entry)
669 next_ts = iter->ts; 702 next_ts = iter->ts;
670 rel_usecs = ns2usecs(next_ts - iter->ts);
671 703
672 if (verbose) { 704 if (verbose) {
673 char comm[TASK_COMM_LEN]; 705 char comm[TASK_COMM_LEN];
674 706
675 trace_find_cmdline(entry->pid, comm); 707 trace_find_cmdline(entry->pid, comm);
676 708
677 ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08llx]" 709 ret = trace_seq_printf(
678 " %ld.%03ldms (+%ld.%03ldms): ", comm, 710 s, "%16s %5d %3d %d %08x %08lx ",
679 entry->pid, iter->cpu, entry->flags, 711 comm, entry->pid, iter->cpu, entry->flags,
680 entry->preempt_count, iter->idx, 712 entry->preempt_count, iter->idx);
681 ns2usecs(iter->ts),
682 abs_usecs / USEC_PER_MSEC,
683 abs_usecs % USEC_PER_MSEC,
684 rel_usecs / USEC_PER_MSEC,
685 rel_usecs % USEC_PER_MSEC);
686 } else { 713 } else {
687 ret = lat_print_generic(s, entry, iter->cpu); 714 ret = lat_print_generic(s, entry, iter->cpu);
688 if (ret)
689 ret = lat_print_timestamp(s, abs_usecs, rel_usecs);
690 } 715 }
691 716
717 if (ret)
718 ret = lat_print_timestamp(iter, next_ts);
719
692 return ret; 720 return ret;
693} 721}
694 722
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index daa9980153af..412e959709b4 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -441,7 +441,7 @@ static const struct fetch_type *find_fetch_type(const char *type)
441 goto fail; 441 goto fail;
442 442
443 type++; 443 type++;
444 if (strict_strtoul(type, 0, &bs)) 444 if (kstrtoul(type, 0, &bs))
445 goto fail; 445 goto fail;
446 446
447 switch (bs) { 447 switch (bs) {
@@ -501,8 +501,8 @@ int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset)
501 501
502 tmp = strchr(symbol, '+'); 502 tmp = strchr(symbol, '+');
503 if (tmp) { 503 if (tmp) {
504 /* skip sign because strict_strtol doesn't accept '+' */ 504 /* skip sign because kstrtoul doesn't accept '+' */
505 ret = strict_strtoul(tmp + 1, 0, offset); 505 ret = kstrtoul(tmp + 1, 0, offset);
506 if (ret) 506 if (ret)
507 return ret; 507 return ret;
508 508
@@ -533,7 +533,7 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t,
533 else 533 else
534 ret = -EINVAL; 534 ret = -EINVAL;
535 } else if (isdigit(arg[5])) { 535 } else if (isdigit(arg[5])) {
536 ret = strict_strtoul(arg + 5, 10, &param); 536 ret = kstrtoul(arg + 5, 10, &param);
537 if (ret || param > PARAM_MAX_STACK) 537 if (ret || param > PARAM_MAX_STACK)
538 ret = -EINVAL; 538 ret = -EINVAL;
539 else { 539 else {
@@ -579,7 +579,7 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t,
579 579
580 case '@': /* memory or symbol */ 580 case '@': /* memory or symbol */
581 if (isdigit(arg[1])) { 581 if (isdigit(arg[1])) {
582 ret = strict_strtoul(arg + 1, 0, &param); 582 ret = kstrtoul(arg + 1, 0, &param);
583 if (ret) 583 if (ret)
584 break; 584 break;
585 585
@@ -597,14 +597,14 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t,
597 break; 597 break;
598 598
599 case '+': /* deref memory */ 599 case '+': /* deref memory */
600 arg++; /* Skip '+', because strict_strtol() rejects it. */ 600 arg++; /* Skip '+', because kstrtol() rejects it. */
601 case '-': 601 case '-':
602 tmp = strchr(arg, '('); 602 tmp = strchr(arg, '(');
603 if (!tmp) 603 if (!tmp)
604 break; 604 break;
605 605
606 *tmp = '\0'; 606 *tmp = '\0';
607 ret = strict_strtol(arg, 0, &offset); 607 ret = kstrtol(arg, 0, &offset);
608 608
609 if (ret) 609 if (ret)
610 break; 610 break;
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 7e62c0a18456..3374c792ccd8 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -102,9 +102,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
102 entry->next_cpu = task_cpu(wakee); 102 entry->next_cpu = task_cpu(wakee);
103 103
104 if (!filter_check_discard(call, entry, buffer, event)) 104 if (!filter_check_discard(call, entry, buffer, event))
105 ring_buffer_unlock_commit(buffer, event); 105 trace_buffer_unlock_commit(buffer, event, flags, pc);
106 ftrace_trace_stack(tr->buffer, flags, 6, pc);
107 ftrace_trace_userstack(tr->buffer, flags, pc);
108} 106}
109 107
110static void 108static void
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 02170c00c413..9fe45fcefca0 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -7,7 +7,7 @@
7 * Based on code from the latency_tracer, that is: 7 * Based on code from the latency_tracer, that is:
8 * 8 *
9 * Copyright (C) 2004-2006 Ingo Molnar 9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III 10 * Copyright (C) 2004 Nadia Yvette Chambers
11 */ 11 */
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/fs.h> 13#include <linux/fs.h>
@@ -589,7 +589,7 @@ static struct tracer wakeup_tracer __read_mostly =
589 .reset = wakeup_tracer_reset, 589 .reset = wakeup_tracer_reset,
590 .start = wakeup_tracer_start, 590 .start = wakeup_tracer_start,
591 .stop = wakeup_tracer_stop, 591 .stop = wakeup_tracer_stop,
592 .print_max = 1, 592 .print_max = true,
593 .print_header = wakeup_print_header, 593 .print_header = wakeup_print_header,
594 .print_line = wakeup_print_line, 594 .print_line = wakeup_print_line,
595 .flags = &tracer_flags, 595 .flags = &tracer_flags,
@@ -599,7 +599,7 @@ static struct tracer wakeup_tracer __read_mostly =
599#endif 599#endif
600 .open = wakeup_trace_open, 600 .open = wakeup_trace_open,
601 .close = wakeup_trace_close, 601 .close = wakeup_trace_close,
602 .use_max_tr = 1, 602 .use_max_tr = true,
603}; 603};
604 604
605static struct tracer wakeup_rt_tracer __read_mostly = 605static struct tracer wakeup_rt_tracer __read_mostly =
@@ -610,7 +610,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
610 .start = wakeup_tracer_start, 610 .start = wakeup_tracer_start,
611 .stop = wakeup_tracer_stop, 611 .stop = wakeup_tracer_stop,
612 .wait_pipe = poll_wait_pipe, 612 .wait_pipe = poll_wait_pipe,
613 .print_max = 1, 613 .print_max = true,
614 .print_header = wakeup_print_header, 614 .print_header = wakeup_print_header,
615 .print_line = wakeup_print_line, 615 .print_line = wakeup_print_line,
616 .flags = &tracer_flags, 616 .flags = &tracer_flags,
@@ -620,7 +620,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
620#endif 620#endif
621 .open = wakeup_trace_open, 621 .open = wakeup_trace_open,
622 .close = wakeup_trace_close, 622 .close = wakeup_trace_close,
623 .use_max_tr = 1, 623 .use_max_tr = true,
624}; 624};
625 625
626__init static int init_wakeup_tracer(void) 626__init static int init_wakeup_tracer(void)
@@ -637,4 +637,4 @@ __init static int init_wakeup_tracer(void)
637 637
638 return 0; 638 return 0;
639} 639}
640device_initcall(init_wakeup_tracer); 640core_initcall(init_wakeup_tracer);
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 2c00a691a540..47623169a815 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -320,7 +320,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
320 int (*func)(void)) 320 int (*func)(void))
321{ 321{
322 int save_ftrace_enabled = ftrace_enabled; 322 int save_ftrace_enabled = ftrace_enabled;
323 int save_tracer_enabled = tracer_enabled;
324 unsigned long count; 323 unsigned long count;
325 char *func_name; 324 char *func_name;
326 int ret; 325 int ret;
@@ -331,7 +330,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
331 330
332 /* enable tracing, and record the filter function */ 331 /* enable tracing, and record the filter function */
333 ftrace_enabled = 1; 332 ftrace_enabled = 1;
334 tracer_enabled = 1;
335 333
336 /* passed in by parameter to fool gcc from optimizing */ 334 /* passed in by parameter to fool gcc from optimizing */
337 func(); 335 func();
@@ -395,7 +393,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
395 393
396 out: 394 out:
397 ftrace_enabled = save_ftrace_enabled; 395 ftrace_enabled = save_ftrace_enabled;
398 tracer_enabled = save_tracer_enabled;
399 396
400 /* Enable tracing on all functions again */ 397 /* Enable tracing on all functions again */
401 ftrace_set_global_filter(NULL, 0, 1); 398 ftrace_set_global_filter(NULL, 0, 1);
@@ -452,7 +449,6 @@ static int
452trace_selftest_function_recursion(void) 449trace_selftest_function_recursion(void)
453{ 450{
454 int save_ftrace_enabled = ftrace_enabled; 451 int save_ftrace_enabled = ftrace_enabled;
455 int save_tracer_enabled = tracer_enabled;
456 char *func_name; 452 char *func_name;
457 int len; 453 int len;
458 int ret; 454 int ret;
@@ -465,7 +461,6 @@ trace_selftest_function_recursion(void)
465 461
466 /* enable tracing, and record the filter function */ 462 /* enable tracing, and record the filter function */
467 ftrace_enabled = 1; 463 ftrace_enabled = 1;
468 tracer_enabled = 1;
469 464
470 /* Handle PPC64 '.' name */ 465 /* Handle PPC64 '.' name */
471 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 466 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
@@ -534,7 +529,6 @@ trace_selftest_function_recursion(void)
534 ret = 0; 529 ret = 0;
535out: 530out:
536 ftrace_enabled = save_ftrace_enabled; 531 ftrace_enabled = save_ftrace_enabled;
537 tracer_enabled = save_tracer_enabled;
538 532
539 return ret; 533 return ret;
540} 534}
@@ -569,7 +563,6 @@ static int
569trace_selftest_function_regs(void) 563trace_selftest_function_regs(void)
570{ 564{
571 int save_ftrace_enabled = ftrace_enabled; 565 int save_ftrace_enabled = ftrace_enabled;
572 int save_tracer_enabled = tracer_enabled;
573 char *func_name; 566 char *func_name;
574 int len; 567 int len;
575 int ret; 568 int ret;
@@ -586,7 +579,6 @@ trace_selftest_function_regs(void)
586 579
587 /* enable tracing, and record the filter function */ 580 /* enable tracing, and record the filter function */
588 ftrace_enabled = 1; 581 ftrace_enabled = 1;
589 tracer_enabled = 1;
590 582
591 /* Handle PPC64 '.' name */ 583 /* Handle PPC64 '.' name */
592 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 584 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
@@ -648,7 +640,6 @@ trace_selftest_function_regs(void)
648 ret = 0; 640 ret = 0;
649out: 641out:
650 ftrace_enabled = save_ftrace_enabled; 642 ftrace_enabled = save_ftrace_enabled;
651 tracer_enabled = save_tracer_enabled;
652 643
653 return ret; 644 return ret;
654} 645}
@@ -662,7 +653,6 @@ int
662trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) 653trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
663{ 654{
664 int save_ftrace_enabled = ftrace_enabled; 655 int save_ftrace_enabled = ftrace_enabled;
665 int save_tracer_enabled = tracer_enabled;
666 unsigned long count; 656 unsigned long count;
667 int ret; 657 int ret;
668 658
@@ -671,7 +661,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
671 661
672 /* start the tracing */ 662 /* start the tracing */
673 ftrace_enabled = 1; 663 ftrace_enabled = 1;
674 tracer_enabled = 1;
675 664
676 ret = tracer_init(trace, tr); 665 ret = tracer_init(trace, tr);
677 if (ret) { 666 if (ret) {
@@ -708,7 +697,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
708 ret = trace_selftest_function_regs(); 697 ret = trace_selftest_function_regs();
709 out: 698 out:
710 ftrace_enabled = save_ftrace_enabled; 699 ftrace_enabled = save_ftrace_enabled;
711 tracer_enabled = save_tracer_enabled;
712 700
713 /* kill ftrace totally if we failed */ 701 /* kill ftrace totally if we failed */
714 if (ret) 702 if (ret)
@@ -1106,6 +1094,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1106 tracing_stop(); 1094 tracing_stop();
1107 /* check both trace buffers */ 1095 /* check both trace buffers */
1108 ret = trace_test_buffer(tr, NULL); 1096 ret = trace_test_buffer(tr, NULL);
1097 printk("ret = %d\n", ret);
1109 if (!ret) 1098 if (!ret)
1110 ret = trace_test_buffer(&max_tr, &count); 1099 ret = trace_test_buffer(&max_tr, &count);
1111 1100
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 0c1b165778e5..42ca822fc701 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -33,7 +33,6 @@ static unsigned long max_stack_size;
33static arch_spinlock_t max_stack_lock = 33static arch_spinlock_t max_stack_lock =
34 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 34 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
35 35
36static int stack_trace_disabled __read_mostly;
37static DEFINE_PER_CPU(int, trace_active); 36static DEFINE_PER_CPU(int, trace_active);
38static DEFINE_MUTEX(stack_sysctl_mutex); 37static DEFINE_MUTEX(stack_sysctl_mutex);
39 38
@@ -116,9 +115,6 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
116{ 115{
117 int cpu; 116 int cpu;
118 117
119 if (unlikely(!ftrace_enabled || stack_trace_disabled))
120 return;
121
122 preempt_disable_notrace(); 118 preempt_disable_notrace();
123 119
124 cpu = raw_smp_processor_id(); 120 cpu = raw_smp_processor_id();
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 2485a7d09b11..7609dd6714c2 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -21,9 +21,6 @@ static int syscall_enter_register(struct ftrace_event_call *event,
21static int syscall_exit_register(struct ftrace_event_call *event, 21static int syscall_exit_register(struct ftrace_event_call *event,
22 enum trace_reg type, void *data); 22 enum trace_reg type, void *data);
23 23
24static int syscall_enter_define_fields(struct ftrace_event_call *call);
25static int syscall_exit_define_fields(struct ftrace_event_call *call);
26
27static struct list_head * 24static struct list_head *
28syscall_get_enter_fields(struct ftrace_event_call *call) 25syscall_get_enter_fields(struct ftrace_event_call *call)
29{ 26{
@@ -32,30 +29,6 @@ syscall_get_enter_fields(struct ftrace_event_call *call)
32 return &entry->enter_fields; 29 return &entry->enter_fields;
33} 30}
34 31
35struct trace_event_functions enter_syscall_print_funcs = {
36 .trace = print_syscall_enter,
37};
38
39struct trace_event_functions exit_syscall_print_funcs = {
40 .trace = print_syscall_exit,
41};
42
43struct ftrace_event_class event_class_syscall_enter = {
44 .system = "syscalls",
45 .reg = syscall_enter_register,
46 .define_fields = syscall_enter_define_fields,
47 .get_fields = syscall_get_enter_fields,
48 .raw_init = init_syscall_trace,
49};
50
51struct ftrace_event_class event_class_syscall_exit = {
52 .system = "syscalls",
53 .reg = syscall_exit_register,
54 .define_fields = syscall_exit_define_fields,
55 .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields),
56 .raw_init = init_syscall_trace,
57};
58
59extern struct syscall_metadata *__start_syscalls_metadata[]; 32extern struct syscall_metadata *__start_syscalls_metadata[];
60extern struct syscall_metadata *__stop_syscalls_metadata[]; 33extern struct syscall_metadata *__stop_syscalls_metadata[];
61 34
@@ -432,7 +405,7 @@ void unreg_event_syscall_exit(struct ftrace_event_call *call)
432 mutex_unlock(&syscall_trace_lock); 405 mutex_unlock(&syscall_trace_lock);
433} 406}
434 407
435int init_syscall_trace(struct ftrace_event_call *call) 408static int init_syscall_trace(struct ftrace_event_call *call)
436{ 409{
437 int id; 410 int id;
438 int num; 411 int num;
@@ -457,6 +430,30 @@ int init_syscall_trace(struct ftrace_event_call *call)
457 return id; 430 return id;
458} 431}
459 432
433struct trace_event_functions enter_syscall_print_funcs = {
434 .trace = print_syscall_enter,
435};
436
437struct trace_event_functions exit_syscall_print_funcs = {
438 .trace = print_syscall_exit,
439};
440
441struct ftrace_event_class event_class_syscall_enter = {
442 .system = "syscalls",
443 .reg = syscall_enter_register,
444 .define_fields = syscall_enter_define_fields,
445 .get_fields = syscall_get_enter_fields,
446 .raw_init = init_syscall_trace,
447};
448
449struct ftrace_event_class event_class_syscall_exit = {
450 .system = "syscalls",
451 .reg = syscall_exit_register,
452 .define_fields = syscall_exit_define_fields,
453 .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields),
454 .raw_init = init_syscall_trace,
455};
456
460unsigned long __init __weak arch_syscall_addr(int nr) 457unsigned long __init __weak arch_syscall_addr(int nr)
461{ 458{
462 return (unsigned long)sys_call_table[nr]; 459 return (unsigned long)sys_call_table[nr];
@@ -537,7 +534,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
537 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); 534 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
538} 535}
539 536
540int perf_sysenter_enable(struct ftrace_event_call *call) 537static int perf_sysenter_enable(struct ftrace_event_call *call)
541{ 538{
542 int ret = 0; 539 int ret = 0;
543 int num; 540 int num;
@@ -558,7 +555,7 @@ int perf_sysenter_enable(struct ftrace_event_call *call)
558 return ret; 555 return ret;
559} 556}
560 557
561void perf_sysenter_disable(struct ftrace_event_call *call) 558static void perf_sysenter_disable(struct ftrace_event_call *call)
562{ 559{
563 int num; 560 int num;
564 561
@@ -615,7 +612,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
615 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); 612 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
616} 613}
617 614
618int perf_sysexit_enable(struct ftrace_event_call *call) 615static int perf_sysexit_enable(struct ftrace_event_call *call)
619{ 616{
620 int ret = 0; 617 int ret = 0;
621 int num; 618 int num;
@@ -636,7 +633,7 @@ int perf_sysexit_enable(struct ftrace_event_call *call)
636 return ret; 633 return ret;
637} 634}
638 635
639void perf_sysexit_disable(struct ftrace_event_call *call) 636static void perf_sysexit_disable(struct ftrace_event_call *call)
640{ 637{
641 int num; 638 int num;
642 639
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 03003cd7dd96..c86e6d4f67fb 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -22,6 +22,7 @@
22#include <linux/uaccess.h> 22#include <linux/uaccess.h>
23#include <linux/uprobes.h> 23#include <linux/uprobes.h>
24#include <linux/namei.h> 24#include <linux/namei.h>
25#include <linux/string.h>
25 26
26#include "trace_probe.h" 27#include "trace_probe.h"
27 28
@@ -189,7 +190,7 @@ static int create_trace_uprobe(int argc, char **argv)
189 if (argv[0][0] == '-') 190 if (argv[0][0] == '-')
190 is_delete = true; 191 is_delete = true;
191 else if (argv[0][0] != 'p') { 192 else if (argv[0][0] != 'p') {
192 pr_info("Probe definition must be started with 'p', 'r' or" " '-'.\n"); 193 pr_info("Probe definition must be started with 'p' or '-'.\n");
193 return -EINVAL; 194 return -EINVAL;
194 } 195 }
195 196
@@ -252,7 +253,7 @@ static int create_trace_uprobe(int argc, char **argv)
252 if (ret) 253 if (ret)
253 goto fail_address_parse; 254 goto fail_address_parse;
254 255
255 ret = strict_strtoul(arg, 0, &offset); 256 ret = kstrtoul(arg, 0, &offset);
256 if (ret) 257 if (ret)
257 goto fail_address_parse; 258 goto fail_address_parse;
258 259
@@ -263,16 +264,15 @@ static int create_trace_uprobe(int argc, char **argv)
263 264
264 /* setup a probe */ 265 /* setup a probe */
265 if (!event) { 266 if (!event) {
266 char *tail = strrchr(filename, '/'); 267 char *tail;
267 char *ptr; 268 char *ptr;
268 269
269 ptr = kstrdup((tail ? tail + 1 : filename), GFP_KERNEL); 270 tail = kstrdup(kbasename(filename), GFP_KERNEL);
270 if (!ptr) { 271 if (!tail) {
271 ret = -ENOMEM; 272 ret = -ENOMEM;
272 goto fail_address_parse; 273 goto fail_address_parse;
273 } 274 }
274 275
275 tail = ptr;
276 ptr = strpbrk(tail, ".-_"); 276 ptr = strpbrk(tail, ".-_");
277 if (ptr) 277 if (ptr)
278 *ptr = '\0'; 278 *ptr = '\0';