diff options
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 2721 |
1 files changed, 1722 insertions, 999 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8f3fb3db61c3..4185d5221633 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/utsrelease.h> | 14 | #include <linux/utsrelease.h> |
15 | #include <linux/kallsyms.h> | 15 | #include <linux/kallsyms.h> |
16 | #include <linux/seq_file.h> | 16 | #include <linux/seq_file.h> |
17 | #include <linux/notifier.h> | ||
17 | #include <linux/debugfs.h> | 18 | #include <linux/debugfs.h> |
18 | #include <linux/pagemap.h> | 19 | #include <linux/pagemap.h> |
19 | #include <linux/hardirq.h> | 20 | #include <linux/hardirq.h> |
@@ -22,6 +23,7 @@ | |||
22 | #include <linux/ftrace.h> | 23 | #include <linux/ftrace.h> |
23 | #include <linux/module.h> | 24 | #include <linux/module.h> |
24 | #include <linux/percpu.h> | 25 | #include <linux/percpu.h> |
26 | #include <linux/kdebug.h> | ||
25 | #include <linux/ctype.h> | 27 | #include <linux/ctype.h> |
26 | #include <linux/init.h> | 28 | #include <linux/init.h> |
27 | #include <linux/poll.h> | 29 | #include <linux/poll.h> |
@@ -31,24 +33,97 @@ | |||
31 | #include <linux/writeback.h> | 33 | #include <linux/writeback.h> |
32 | 34 | ||
33 | #include <linux/stacktrace.h> | 35 | #include <linux/stacktrace.h> |
36 | #include <linux/ring_buffer.h> | ||
37 | #include <linux/irqflags.h> | ||
34 | 38 | ||
35 | #include "trace.h" | 39 | #include "trace.h" |
36 | 40 | ||
41 | #define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) | ||
42 | |||
37 | unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; | 43 | unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; |
38 | unsigned long __read_mostly tracing_thresh; | 44 | unsigned long __read_mostly tracing_thresh; |
39 | 45 | ||
40 | static unsigned long __read_mostly tracing_nr_buffers; | 46 | /* |
47 | * We need to change this state when a selftest is running. | ||
48 | * A selftest will lurk into the ring-buffer to count the | ||
49 | * entries inserted during the selftest although some concurrent | ||
50 | * insertions into the ring-buffer such as ftrace_printk could occurred | ||
51 | * at the same time, giving false positive or negative results. | ||
52 | */ | ||
53 | static bool __read_mostly tracing_selftest_running; | ||
54 | |||
55 | /* For tracers that don't implement custom flags */ | ||
56 | static struct tracer_opt dummy_tracer_opt[] = { | ||
57 | { } | ||
58 | }; | ||
59 | |||
60 | static struct tracer_flags dummy_tracer_flags = { | ||
61 | .val = 0, | ||
62 | .opts = dummy_tracer_opt | ||
63 | }; | ||
64 | |||
65 | static int dummy_set_flag(u32 old_flags, u32 bit, int set) | ||
66 | { | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | /* | ||
71 | * Kill all tracing for good (never come back). | ||
72 | * It is initialized to 1 but will turn to zero if the initialization | ||
73 | * of the tracer is successful. But that is the only place that sets | ||
74 | * this back to zero. | ||
75 | */ | ||
76 | int tracing_disabled = 1; | ||
77 | |||
78 | static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); | ||
79 | |||
80 | static inline void ftrace_disable_cpu(void) | ||
81 | { | ||
82 | preempt_disable(); | ||
83 | local_inc(&__get_cpu_var(ftrace_cpu_disabled)); | ||
84 | } | ||
85 | |||
86 | static inline void ftrace_enable_cpu(void) | ||
87 | { | ||
88 | local_dec(&__get_cpu_var(ftrace_cpu_disabled)); | ||
89 | preempt_enable(); | ||
90 | } | ||
91 | |||
41 | static cpumask_t __read_mostly tracing_buffer_mask; | 92 | static cpumask_t __read_mostly tracing_buffer_mask; |
42 | 93 | ||
43 | #define for_each_tracing_cpu(cpu) \ | 94 | #define for_each_tracing_cpu(cpu) \ |
44 | for_each_cpu_mask(cpu, tracing_buffer_mask) | 95 | for_each_cpu_mask(cpu, tracing_buffer_mask) |
45 | 96 | ||
46 | static int trace_alloc_page(void); | 97 | /* |
47 | static int trace_free_page(void); | 98 | * ftrace_dump_on_oops - variable to dump ftrace buffer on oops |
99 | * | ||
100 | * If there is an oops (or kernel panic) and the ftrace_dump_on_oops | ||
101 | * is set, then ftrace_dump is called. This will output the contents | ||
102 | * of the ftrace buffers to the console. This is very useful for | ||
103 | * capturing traces that lead to crashes and outputing it to a | ||
104 | * serial console. | ||
105 | * | ||
106 | * It is default off, but you can enable it with either specifying | ||
107 | * "ftrace_dump_on_oops" in the kernel command line, or setting | ||
108 | * /proc/sys/kernel/ftrace_dump_on_oops to true. | ||
109 | */ | ||
110 | int ftrace_dump_on_oops; | ||
48 | 111 | ||
49 | static int tracing_disabled = 1; | 112 | static int tracing_set_tracer(char *buf); |
50 | 113 | ||
51 | static unsigned long tracing_pages_allocated; | 114 | static int __init set_ftrace(char *str) |
115 | { | ||
116 | tracing_set_tracer(str); | ||
117 | return 1; | ||
118 | } | ||
119 | __setup("ftrace", set_ftrace); | ||
120 | |||
121 | static int __init set_ftrace_dump_on_oops(char *str) | ||
122 | { | ||
123 | ftrace_dump_on_oops = 1; | ||
124 | return 1; | ||
125 | } | ||
126 | __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); | ||
52 | 127 | ||
53 | long | 128 | long |
54 | ns2usecs(cycle_t nsec) | 129 | ns2usecs(cycle_t nsec) |
@@ -60,7 +135,9 @@ ns2usecs(cycle_t nsec) | |||
60 | 135 | ||
61 | cycle_t ftrace_now(int cpu) | 136 | cycle_t ftrace_now(int cpu) |
62 | { | 137 | { |
63 | return cpu_clock(cpu); | 138 | u64 ts = ring_buffer_time_stamp(cpu); |
139 | ring_buffer_normalize_time_stamp(cpu, &ts); | ||
140 | return ts; | ||
64 | } | 141 | } |
65 | 142 | ||
66 | /* | 143 | /* |
@@ -96,15 +173,35 @@ static DEFINE_PER_CPU(struct trace_array_cpu, max_data); | |||
96 | /* tracer_enabled is used to toggle activation of a tracer */ | 173 | /* tracer_enabled is used to toggle activation of a tracer */ |
97 | static int tracer_enabled = 1; | 174 | static int tracer_enabled = 1; |
98 | 175 | ||
176 | /** | ||
177 | * tracing_is_enabled - return tracer_enabled status | ||
178 | * | ||
179 | * This function is used by other tracers to know the status | ||
180 | * of the tracer_enabled flag. Tracers may use this function | ||
181 | * to know if it should enable their features when starting | ||
182 | * up. See irqsoff tracer for an example (start_irqsoff_tracer). | ||
183 | */ | ||
184 | int tracing_is_enabled(void) | ||
185 | { | ||
186 | return tracer_enabled; | ||
187 | } | ||
188 | |||
99 | /* function tracing enabled */ | 189 | /* function tracing enabled */ |
100 | int ftrace_function_enabled; | 190 | int ftrace_function_enabled; |
101 | 191 | ||
102 | /* | 192 | /* |
103 | * trace_nr_entries is the number of entries that is allocated | 193 | * trace_buf_size is the size in bytes that is allocated |
104 | * for a buffer. Note, the number of entries is always rounded | 194 | * for a buffer. Note, the number of bytes is always rounded |
105 | * to ENTRIES_PER_PAGE. | 195 | * to page size. |
196 | * | ||
197 | * This number is purposely set to a low number of 16384. | ||
198 | * If the dump on oops happens, it will be much appreciated | ||
199 | * to not have to wait for all that output. Anyway this can be | ||
200 | * boot time and run time configurable. | ||
106 | */ | 201 | */ |
107 | static unsigned long trace_nr_entries = 65536UL; | 202 | #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */ |
203 | |||
204 | static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; | ||
108 | 205 | ||
109 | /* trace_types holds a link list of available tracers. */ | 206 | /* trace_types holds a link list of available tracers. */ |
110 | static struct tracer *trace_types __read_mostly; | 207 | static struct tracer *trace_types __read_mostly; |
@@ -130,26 +227,9 @@ static DEFINE_MUTEX(trace_types_lock); | |||
130 | /* trace_wait is a waitqueue for tasks blocked on trace_poll */ | 227 | /* trace_wait is a waitqueue for tasks blocked on trace_poll */ |
131 | static DECLARE_WAIT_QUEUE_HEAD(trace_wait); | 228 | static DECLARE_WAIT_QUEUE_HEAD(trace_wait); |
132 | 229 | ||
133 | /* trace_flags holds iter_ctrl options */ | 230 | /* trace_flags holds trace_options default values */ |
134 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT; | 231 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | |
135 | 232 | TRACE_ITER_ANNOTATE; | |
136 | static notrace void no_trace_init(struct trace_array *tr) | ||
137 | { | ||
138 | int cpu; | ||
139 | |||
140 | ftrace_function_enabled = 0; | ||
141 | if(tr->ctrl) | ||
142 | for_each_online_cpu(cpu) | ||
143 | tracing_reset(tr->data[cpu]); | ||
144 | tracer_enabled = 0; | ||
145 | } | ||
146 | |||
147 | /* dummy trace to disable tracing */ | ||
148 | static struct tracer no_tracer __read_mostly = { | ||
149 | .name = "none", | ||
150 | .init = no_trace_init | ||
151 | }; | ||
152 | |||
153 | 233 | ||
154 | /** | 234 | /** |
155 | * trace_wake_up - wake up tasks waiting for trace input | 235 | * trace_wake_up - wake up tasks waiting for trace input |
@@ -167,51 +247,27 @@ void trace_wake_up(void) | |||
167 | wake_up(&trace_wait); | 247 | wake_up(&trace_wait); |
168 | } | 248 | } |
169 | 249 | ||
170 | #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry)) | 250 | static int __init set_buf_size(char *str) |
171 | |||
172 | static int __init set_nr_entries(char *str) | ||
173 | { | 251 | { |
174 | unsigned long nr_entries; | 252 | unsigned long buf_size; |
175 | int ret; | 253 | int ret; |
176 | 254 | ||
177 | if (!str) | 255 | if (!str) |
178 | return 0; | 256 | return 0; |
179 | ret = strict_strtoul(str, 0, &nr_entries); | 257 | ret = strict_strtoul(str, 0, &buf_size); |
180 | /* nr_entries can not be zero */ | 258 | /* nr_entries can not be zero */ |
181 | if (ret < 0 || nr_entries == 0) | 259 | if (ret < 0 || buf_size == 0) |
182 | return 0; | 260 | return 0; |
183 | trace_nr_entries = nr_entries; | 261 | trace_buf_size = buf_size; |
184 | return 1; | 262 | return 1; |
185 | } | 263 | } |
186 | __setup("trace_entries=", set_nr_entries); | 264 | __setup("trace_buf_size=", set_buf_size); |
187 | 265 | ||
188 | unsigned long nsecs_to_usecs(unsigned long nsecs) | 266 | unsigned long nsecs_to_usecs(unsigned long nsecs) |
189 | { | 267 | { |
190 | return nsecs / 1000; | 268 | return nsecs / 1000; |
191 | } | 269 | } |
192 | 270 | ||
193 | /* | ||
194 | * trace_flag_type is an enumeration that holds different | ||
195 | * states when a trace occurs. These are: | ||
196 | * IRQS_OFF - interrupts were disabled | ||
197 | * NEED_RESCED - reschedule is requested | ||
198 | * HARDIRQ - inside an interrupt handler | ||
199 | * SOFTIRQ - inside a softirq handler | ||
200 | */ | ||
201 | enum trace_flag_type { | ||
202 | TRACE_FLAG_IRQS_OFF = 0x01, | ||
203 | TRACE_FLAG_NEED_RESCHED = 0x02, | ||
204 | TRACE_FLAG_HARDIRQ = 0x04, | ||
205 | TRACE_FLAG_SOFTIRQ = 0x08, | ||
206 | }; | ||
207 | |||
208 | /* | ||
209 | * TRACE_ITER_SYM_MASK masks the options in trace_flags that | ||
210 | * control the output of kernel symbols. | ||
211 | */ | ||
212 | #define TRACE_ITER_SYM_MASK \ | ||
213 | (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) | ||
214 | |||
215 | /* These must match the bit postions in trace_iterator_flags */ | 271 | /* These must match the bit postions in trace_iterator_flags */ |
216 | static const char *trace_options[] = { | 272 | static const char *trace_options[] = { |
217 | "print-parent", | 273 | "print-parent", |
@@ -224,6 +280,13 @@ static const char *trace_options[] = { | |||
224 | "block", | 280 | "block", |
225 | "stacktrace", | 281 | "stacktrace", |
226 | "sched-tree", | 282 | "sched-tree", |
283 | "ftrace_printk", | ||
284 | "ftrace_preempt", | ||
285 | "branch", | ||
286 | "annotate", | ||
287 | "userstacktrace", | ||
288 | "sym-userobj", | ||
289 | "printk-msg-only", | ||
227 | NULL | 290 | NULL |
228 | }; | 291 | }; |
229 | 292 | ||
@@ -257,7 +320,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
257 | 320 | ||
258 | memcpy(data->comm, tsk->comm, TASK_COMM_LEN); | 321 | memcpy(data->comm, tsk->comm, TASK_COMM_LEN); |
259 | data->pid = tsk->pid; | 322 | data->pid = tsk->pid; |
260 | data->uid = tsk->uid; | 323 | data->uid = task_uid(tsk); |
261 | data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; | 324 | data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; |
262 | data->policy = tsk->policy; | 325 | data->policy = tsk->policy; |
263 | data->rt_priority = tsk->rt_priority; | 326 | data->rt_priority = tsk->rt_priority; |
@@ -266,54 +329,6 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
266 | tracing_record_cmdline(current); | 329 | tracing_record_cmdline(current); |
267 | } | 330 | } |
268 | 331 | ||
269 | #define CHECK_COND(cond) \ | ||
270 | if (unlikely(cond)) { \ | ||
271 | tracing_disabled = 1; \ | ||
272 | WARN_ON(1); \ | ||
273 | return -1; \ | ||
274 | } | ||
275 | |||
276 | /** | ||
277 | * check_pages - integrity check of trace buffers | ||
278 | * | ||
279 | * As a safty measure we check to make sure the data pages have not | ||
280 | * been corrupted. | ||
281 | */ | ||
282 | int check_pages(struct trace_array_cpu *data) | ||
283 | { | ||
284 | struct page *page, *tmp; | ||
285 | |||
286 | CHECK_COND(data->trace_pages.next->prev != &data->trace_pages); | ||
287 | CHECK_COND(data->trace_pages.prev->next != &data->trace_pages); | ||
288 | |||
289 | list_for_each_entry_safe(page, tmp, &data->trace_pages, lru) { | ||
290 | CHECK_COND(page->lru.next->prev != &page->lru); | ||
291 | CHECK_COND(page->lru.prev->next != &page->lru); | ||
292 | } | ||
293 | |||
294 | return 0; | ||
295 | } | ||
296 | |||
297 | /** | ||
298 | * head_page - page address of the first page in per_cpu buffer. | ||
299 | * | ||
300 | * head_page returns the page address of the first page in | ||
301 | * a per_cpu buffer. This also preforms various consistency | ||
302 | * checks to make sure the buffer has not been corrupted. | ||
303 | */ | ||
304 | void *head_page(struct trace_array_cpu *data) | ||
305 | { | ||
306 | struct page *page; | ||
307 | |||
308 | if (list_empty(&data->trace_pages)) | ||
309 | return NULL; | ||
310 | |||
311 | page = list_entry(data->trace_pages.next, struct page, lru); | ||
312 | BUG_ON(&page->lru == &data->trace_pages); | ||
313 | |||
314 | return page_address(page); | ||
315 | } | ||
316 | |||
317 | /** | 332 | /** |
318 | * trace_seq_printf - sequence printing of trace information | 333 | * trace_seq_printf - sequence printing of trace information |
319 | * @s: trace sequence descriptor | 334 | * @s: trace sequence descriptor |
@@ -395,34 +410,51 @@ trace_seq_putmem(struct trace_seq *s, void *mem, size_t len) | |||
395 | return len; | 410 | return len; |
396 | } | 411 | } |
397 | 412 | ||
398 | #define HEX_CHARS 17 | 413 | #define MAX_MEMHEX_BYTES 8 |
399 | static const char hex2asc[] = "0123456789abcdef"; | 414 | #define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1) |
400 | 415 | ||
401 | static int | 416 | static int |
402 | trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len) | 417 | trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len) |
403 | { | 418 | { |
404 | unsigned char hex[HEX_CHARS]; | 419 | unsigned char hex[HEX_CHARS]; |
405 | unsigned char *data = mem; | 420 | unsigned char *data = mem; |
406 | unsigned char byte; | ||
407 | int i, j; | 421 | int i, j; |
408 | 422 | ||
409 | BUG_ON(len >= HEX_CHARS); | ||
410 | |||
411 | #ifdef __BIG_ENDIAN | 423 | #ifdef __BIG_ENDIAN |
412 | for (i = 0, j = 0; i < len; i++) { | 424 | for (i = 0, j = 0; i < len; i++) { |
413 | #else | 425 | #else |
414 | for (i = len-1, j = 0; i >= 0; i--) { | 426 | for (i = len-1, j = 0; i >= 0; i--) { |
415 | #endif | 427 | #endif |
416 | byte = data[i]; | 428 | hex[j++] = hex_asc_hi(data[i]); |
417 | 429 | hex[j++] = hex_asc_lo(data[i]); | |
418 | hex[j++] = hex2asc[byte & 0x0f]; | ||
419 | hex[j++] = hex2asc[byte >> 4]; | ||
420 | } | 430 | } |
421 | hex[j++] = ' '; | 431 | hex[j++] = ' '; |
422 | 432 | ||
423 | return trace_seq_putmem(s, hex, j); | 433 | return trace_seq_putmem(s, hex, j); |
424 | } | 434 | } |
425 | 435 | ||
436 | static int | ||
437 | trace_seq_path(struct trace_seq *s, struct path *path) | ||
438 | { | ||
439 | unsigned char *p; | ||
440 | |||
441 | if (s->len >= (PAGE_SIZE - 1)) | ||
442 | return 0; | ||
443 | p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len); | ||
444 | if (!IS_ERR(p)) { | ||
445 | p = mangle_path(s->buffer + s->len, p, "\n"); | ||
446 | if (p) { | ||
447 | s->len = p - s->buffer; | ||
448 | return 1; | ||
449 | } | ||
450 | } else { | ||
451 | s->buffer[s->len++] = '?'; | ||
452 | return 1; | ||
453 | } | ||
454 | |||
455 | return 0; | ||
456 | } | ||
457 | |||
426 | static void | 458 | static void |
427 | trace_seq_reset(struct trace_seq *s) | 459 | trace_seq_reset(struct trace_seq *s) |
428 | { | 460 | { |
@@ -460,34 +492,6 @@ trace_print_seq(struct seq_file *m, struct trace_seq *s) | |||
460 | trace_seq_reset(s); | 492 | trace_seq_reset(s); |
461 | } | 493 | } |
462 | 494 | ||
463 | /* | ||
464 | * flip the trace buffers between two trace descriptors. | ||
465 | * This usually is the buffers between the global_trace and | ||
466 | * the max_tr to record a snapshot of a current trace. | ||
467 | * | ||
468 | * The ftrace_max_lock must be held. | ||
469 | */ | ||
470 | static void | ||
471 | flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2) | ||
472 | { | ||
473 | struct list_head flip_pages; | ||
474 | |||
475 | INIT_LIST_HEAD(&flip_pages); | ||
476 | |||
477 | memcpy(&tr1->trace_head_idx, &tr2->trace_head_idx, | ||
478 | sizeof(struct trace_array_cpu) - | ||
479 | offsetof(struct trace_array_cpu, trace_head_idx)); | ||
480 | |||
481 | check_pages(tr1); | ||
482 | check_pages(tr2); | ||
483 | list_splice_init(&tr1->trace_pages, &flip_pages); | ||
484 | list_splice_init(&tr2->trace_pages, &tr1->trace_pages); | ||
485 | list_splice_init(&flip_pages, &tr2->trace_pages); | ||
486 | BUG_ON(!list_empty(&flip_pages)); | ||
487 | check_pages(tr1); | ||
488 | check_pages(tr2); | ||
489 | } | ||
490 | |||
491 | /** | 495 | /** |
492 | * update_max_tr - snapshot all trace buffers from global_trace to max_tr | 496 | * update_max_tr - snapshot all trace buffers from global_trace to max_tr |
493 | * @tr: tracer | 497 | * @tr: tracer |
@@ -500,17 +504,17 @@ flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2) | |||
500 | void | 504 | void |
501 | update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | 505 | update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) |
502 | { | 506 | { |
503 | struct trace_array_cpu *data; | 507 | struct ring_buffer *buf = tr->buffer; |
504 | int i; | ||
505 | 508 | ||
506 | WARN_ON_ONCE(!irqs_disabled()); | 509 | WARN_ON_ONCE(!irqs_disabled()); |
507 | __raw_spin_lock(&ftrace_max_lock); | 510 | __raw_spin_lock(&ftrace_max_lock); |
508 | /* clear out all the previous traces */ | 511 | |
509 | for_each_tracing_cpu(i) { | 512 | tr->buffer = max_tr.buffer; |
510 | data = tr->data[i]; | 513 | max_tr.buffer = buf; |
511 | flip_trace(max_tr.data[i], data); | 514 | |
512 | tracing_reset(data); | 515 | ftrace_disable_cpu(); |
513 | } | 516 | ring_buffer_reset(tr->buffer); |
517 | ftrace_enable_cpu(); | ||
514 | 518 | ||
515 | __update_max_tr(tr, tsk, cpu); | 519 | __update_max_tr(tr, tsk, cpu); |
516 | __raw_spin_unlock(&ftrace_max_lock); | 520 | __raw_spin_unlock(&ftrace_max_lock); |
@@ -527,16 +531,19 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
527 | void | 531 | void |
528 | update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | 532 | update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) |
529 | { | 533 | { |
530 | struct trace_array_cpu *data = tr->data[cpu]; | 534 | int ret; |
531 | int i; | ||
532 | 535 | ||
533 | WARN_ON_ONCE(!irqs_disabled()); | 536 | WARN_ON_ONCE(!irqs_disabled()); |
534 | __raw_spin_lock(&ftrace_max_lock); | 537 | __raw_spin_lock(&ftrace_max_lock); |
535 | for_each_tracing_cpu(i) | ||
536 | tracing_reset(max_tr.data[i]); | ||
537 | 538 | ||
538 | flip_trace(max_tr.data[cpu], data); | 539 | ftrace_disable_cpu(); |
539 | tracing_reset(data); | 540 | |
541 | ring_buffer_reset(max_tr.buffer); | ||
542 | ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); | ||
543 | |||
544 | ftrace_enable_cpu(); | ||
545 | |||
546 | WARN_ON_ONCE(ret); | ||
540 | 547 | ||
541 | __update_max_tr(tr, tsk, cpu); | 548 | __update_max_tr(tr, tsk, cpu); |
542 | __raw_spin_unlock(&ftrace_max_lock); | 549 | __raw_spin_unlock(&ftrace_max_lock); |
@@ -559,7 +566,17 @@ int register_tracer(struct tracer *type) | |||
559 | return -1; | 566 | return -1; |
560 | } | 567 | } |
561 | 568 | ||
569 | /* | ||
570 | * When this gets called we hold the BKL which means that | ||
571 | * preemption is disabled. Various trace selftests however | ||
572 | * need to disable and enable preemption for successful tests. | ||
573 | * So we drop the BKL here and grab it after the tests again. | ||
574 | */ | ||
575 | unlock_kernel(); | ||
562 | mutex_lock(&trace_types_lock); | 576 | mutex_lock(&trace_types_lock); |
577 | |||
578 | tracing_selftest_running = true; | ||
579 | |||
563 | for (t = trace_types; t; t = t->next) { | 580 | for (t = trace_types; t; t = t->next) { |
564 | if (strcmp(type->name, t->name) == 0) { | 581 | if (strcmp(type->name, t->name) == 0) { |
565 | /* already found */ | 582 | /* already found */ |
@@ -570,13 +587,20 @@ int register_tracer(struct tracer *type) | |||
570 | } | 587 | } |
571 | } | 588 | } |
572 | 589 | ||
590 | if (!type->set_flag) | ||
591 | type->set_flag = &dummy_set_flag; | ||
592 | if (!type->flags) | ||
593 | type->flags = &dummy_tracer_flags; | ||
594 | else | ||
595 | if (!type->flags->opts) | ||
596 | type->flags->opts = dummy_tracer_opt; | ||
597 | |||
573 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 598 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
574 | if (type->selftest) { | 599 | if (type->selftest) { |
575 | struct tracer *saved_tracer = current_trace; | 600 | struct tracer *saved_tracer = current_trace; |
576 | struct trace_array_cpu *data; | ||
577 | struct trace_array *tr = &global_trace; | 601 | struct trace_array *tr = &global_trace; |
578 | int saved_ctrl = tr->ctrl; | ||
579 | int i; | 602 | int i; |
603 | |||
580 | /* | 604 | /* |
581 | * Run a selftest on this tracer. | 605 | * Run a selftest on this tracer. |
582 | * Here we reset the trace buffer, and set the current | 606 | * Here we reset the trace buffer, and set the current |
@@ -584,31 +608,23 @@ int register_tracer(struct tracer *type) | |||
584 | * internal tracing to verify that everything is in order. | 608 | * internal tracing to verify that everything is in order. |
585 | * If we fail, we do not register this tracer. | 609 | * If we fail, we do not register this tracer. |
586 | */ | 610 | */ |
587 | for_each_tracing_cpu(i) { | 611 | for_each_tracing_cpu(i) |
588 | data = tr->data[i]; | 612 | tracing_reset(tr, i); |
589 | if (!head_page(data)) | 613 | |
590 | continue; | ||
591 | tracing_reset(data); | ||
592 | } | ||
593 | current_trace = type; | 614 | current_trace = type; |
594 | tr->ctrl = 0; | ||
595 | /* the test is responsible for initializing and enabling */ | 615 | /* the test is responsible for initializing and enabling */ |
596 | pr_info("Testing tracer %s: ", type->name); | 616 | pr_info("Testing tracer %s: ", type->name); |
597 | ret = type->selftest(type, tr); | 617 | ret = type->selftest(type, tr); |
598 | /* the test is responsible for resetting too */ | 618 | /* the test is responsible for resetting too */ |
599 | current_trace = saved_tracer; | 619 | current_trace = saved_tracer; |
600 | tr->ctrl = saved_ctrl; | ||
601 | if (ret) { | 620 | if (ret) { |
602 | printk(KERN_CONT "FAILED!\n"); | 621 | printk(KERN_CONT "FAILED!\n"); |
603 | goto out; | 622 | goto out; |
604 | } | 623 | } |
605 | /* Only reset on passing, to avoid touching corrupted buffers */ | 624 | /* Only reset on passing, to avoid touching corrupted buffers */ |
606 | for_each_tracing_cpu(i) { | 625 | for_each_tracing_cpu(i) |
607 | data = tr->data[i]; | 626 | tracing_reset(tr, i); |
608 | if (!head_page(data)) | 627 | |
609 | continue; | ||
610 | tracing_reset(data); | ||
611 | } | ||
612 | printk(KERN_CONT "PASSED\n"); | 628 | printk(KERN_CONT "PASSED\n"); |
613 | } | 629 | } |
614 | #endif | 630 | #endif |
@@ -620,7 +636,9 @@ int register_tracer(struct tracer *type) | |||
620 | max_tracer_type_len = len; | 636 | max_tracer_type_len = len; |
621 | 637 | ||
622 | out: | 638 | out: |
639 | tracing_selftest_running = false; | ||
623 | mutex_unlock(&trace_types_lock); | 640 | mutex_unlock(&trace_types_lock); |
641 | lock_kernel(); | ||
624 | 642 | ||
625 | return ret; | 643 | return ret; |
626 | } | 644 | } |
@@ -653,13 +671,21 @@ void unregister_tracer(struct tracer *type) | |||
653 | mutex_unlock(&trace_types_lock); | 671 | mutex_unlock(&trace_types_lock); |
654 | } | 672 | } |
655 | 673 | ||
656 | void tracing_reset(struct trace_array_cpu *data) | 674 | void tracing_reset(struct trace_array *tr, int cpu) |
657 | { | 675 | { |
658 | data->trace_idx = 0; | 676 | ftrace_disable_cpu(); |
659 | data->overrun = 0; | 677 | ring_buffer_reset_cpu(tr->buffer, cpu); |
660 | data->trace_head = data->trace_tail = head_page(data); | 678 | ftrace_enable_cpu(); |
661 | data->trace_head_idx = 0; | 679 | } |
662 | data->trace_tail_idx = 0; | 680 | |
681 | void tracing_reset_online_cpus(struct trace_array *tr) | ||
682 | { | ||
683 | int cpu; | ||
684 | |||
685 | tr->time_start = ftrace_now(tr->cpu); | ||
686 | |||
687 | for_each_online_cpu(cpu) | ||
688 | tracing_reset(tr, cpu); | ||
663 | } | 689 | } |
664 | 690 | ||
665 | #define SAVED_CMDLINES 128 | 691 | #define SAVED_CMDLINES 128 |
@@ -679,6 +705,91 @@ static void trace_init_cmdlines(void) | |||
679 | cmdline_idx = 0; | 705 | cmdline_idx = 0; |
680 | } | 706 | } |
681 | 707 | ||
708 | static int trace_stop_count; | ||
709 | static DEFINE_SPINLOCK(tracing_start_lock); | ||
710 | |||
711 | /** | ||
712 | * ftrace_off_permanent - disable all ftrace code permanently | ||
713 | * | ||
714 | * This should only be called when a serious anomally has | ||
715 | * been detected. This will turn off the function tracing, | ||
716 | * ring buffers, and other tracing utilites. It takes no | ||
717 | * locks and can be called from any context. | ||
718 | */ | ||
719 | void ftrace_off_permanent(void) | ||
720 | { | ||
721 | tracing_disabled = 1; | ||
722 | ftrace_stop(); | ||
723 | tracing_off_permanent(); | ||
724 | } | ||
725 | |||
726 | /** | ||
727 | * tracing_start - quick start of the tracer | ||
728 | * | ||
729 | * If tracing is enabled but was stopped by tracing_stop, | ||
730 | * this will start the tracer back up. | ||
731 | */ | ||
732 | void tracing_start(void) | ||
733 | { | ||
734 | struct ring_buffer *buffer; | ||
735 | unsigned long flags; | ||
736 | |||
737 | if (tracing_disabled) | ||
738 | return; | ||
739 | |||
740 | spin_lock_irqsave(&tracing_start_lock, flags); | ||
741 | if (--trace_stop_count) | ||
742 | goto out; | ||
743 | |||
744 | if (trace_stop_count < 0) { | ||
745 | /* Someone screwed up their debugging */ | ||
746 | WARN_ON_ONCE(1); | ||
747 | trace_stop_count = 0; | ||
748 | goto out; | ||
749 | } | ||
750 | |||
751 | |||
752 | buffer = global_trace.buffer; | ||
753 | if (buffer) | ||
754 | ring_buffer_record_enable(buffer); | ||
755 | |||
756 | buffer = max_tr.buffer; | ||
757 | if (buffer) | ||
758 | ring_buffer_record_enable(buffer); | ||
759 | |||
760 | ftrace_start(); | ||
761 | out: | ||
762 | spin_unlock_irqrestore(&tracing_start_lock, flags); | ||
763 | } | ||
764 | |||
765 | /** | ||
766 | * tracing_stop - quick stop of the tracer | ||
767 | * | ||
768 | * Light weight way to stop tracing. Use in conjunction with | ||
769 | * tracing_start. | ||
770 | */ | ||
771 | void tracing_stop(void) | ||
772 | { | ||
773 | struct ring_buffer *buffer; | ||
774 | unsigned long flags; | ||
775 | |||
776 | ftrace_stop(); | ||
777 | spin_lock_irqsave(&tracing_start_lock, flags); | ||
778 | if (trace_stop_count++) | ||
779 | goto out; | ||
780 | |||
781 | buffer = global_trace.buffer; | ||
782 | if (buffer) | ||
783 | ring_buffer_record_disable(buffer); | ||
784 | |||
785 | buffer = max_tr.buffer; | ||
786 | if (buffer) | ||
787 | ring_buffer_record_disable(buffer); | ||
788 | |||
789 | out: | ||
790 | spin_unlock_irqrestore(&tracing_start_lock, flags); | ||
791 | } | ||
792 | |||
682 | void trace_stop_cmdline_recording(void); | 793 | void trace_stop_cmdline_recording(void); |
683 | 794 | ||
684 | static void trace_save_cmdline(struct task_struct *tsk) | 795 | static void trace_save_cmdline(struct task_struct *tsk) |
@@ -716,7 +827,7 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
716 | spin_unlock(&trace_cmdline_lock); | 827 | spin_unlock(&trace_cmdline_lock); |
717 | } | 828 | } |
718 | 829 | ||
719 | static char *trace_find_cmdline(int pid) | 830 | char *trace_find_cmdline(int pid) |
720 | { | 831 | { |
721 | char *cmdline = "<...>"; | 832 | char *cmdline = "<...>"; |
722 | unsigned map; | 833 | unsigned map; |
@@ -745,82 +856,21 @@ void tracing_record_cmdline(struct task_struct *tsk) | |||
745 | trace_save_cmdline(tsk); | 856 | trace_save_cmdline(tsk); |
746 | } | 857 | } |
747 | 858 | ||
748 | static inline struct list_head * | 859 | void |
749 | trace_next_list(struct trace_array_cpu *data, struct list_head *next) | 860 | tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, |
750 | { | 861 | int pc) |
751 | /* | ||
752 | * Roundrobin - but skip the head (which is not a real page): | ||
753 | */ | ||
754 | next = next->next; | ||
755 | if (unlikely(next == &data->trace_pages)) | ||
756 | next = next->next; | ||
757 | BUG_ON(next == &data->trace_pages); | ||
758 | |||
759 | return next; | ||
760 | } | ||
761 | |||
762 | static inline void * | ||
763 | trace_next_page(struct trace_array_cpu *data, void *addr) | ||
764 | { | ||
765 | struct list_head *next; | ||
766 | struct page *page; | ||
767 | |||
768 | page = virt_to_page(addr); | ||
769 | |||
770 | next = trace_next_list(data, &page->lru); | ||
771 | page = list_entry(next, struct page, lru); | ||
772 | |||
773 | return page_address(page); | ||
774 | } | ||
775 | |||
776 | static inline struct trace_entry * | ||
777 | tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data) | ||
778 | { | ||
779 | unsigned long idx, idx_next; | ||
780 | struct trace_entry *entry; | ||
781 | |||
782 | data->trace_idx++; | ||
783 | idx = data->trace_head_idx; | ||
784 | idx_next = idx + 1; | ||
785 | |||
786 | BUG_ON(idx * TRACE_ENTRY_SIZE >= PAGE_SIZE); | ||
787 | |||
788 | entry = data->trace_head + idx * TRACE_ENTRY_SIZE; | ||
789 | |||
790 | if (unlikely(idx_next >= ENTRIES_PER_PAGE)) { | ||
791 | data->trace_head = trace_next_page(data, data->trace_head); | ||
792 | idx_next = 0; | ||
793 | } | ||
794 | |||
795 | if (data->trace_head == data->trace_tail && | ||
796 | idx_next == data->trace_tail_idx) { | ||
797 | /* overrun */ | ||
798 | data->overrun++; | ||
799 | data->trace_tail_idx++; | ||
800 | if (data->trace_tail_idx >= ENTRIES_PER_PAGE) { | ||
801 | data->trace_tail = | ||
802 | trace_next_page(data, data->trace_tail); | ||
803 | data->trace_tail_idx = 0; | ||
804 | } | ||
805 | } | ||
806 | |||
807 | data->trace_head_idx = idx_next; | ||
808 | |||
809 | return entry; | ||
810 | } | ||
811 | |||
812 | static inline void | ||
813 | tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags) | ||
814 | { | 862 | { |
815 | struct task_struct *tsk = current; | 863 | struct task_struct *tsk = current; |
816 | unsigned long pc; | ||
817 | 864 | ||
818 | pc = preempt_count(); | 865 | entry->preempt_count = pc & 0xff; |
819 | 866 | entry->pid = (tsk) ? tsk->pid : 0; | |
820 | entry->preempt_count = pc & 0xff; | 867 | entry->tgid = (tsk) ? tsk->tgid : 0; |
821 | entry->pid = (tsk) ? tsk->pid : 0; | 868 | entry->flags = |
822 | entry->t = ftrace_now(raw_smp_processor_id()); | 869 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT |
823 | entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | | 870 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | |
871 | #else | ||
872 | TRACE_FLAG_IRQS_NOSUPPORT | | ||
873 | #endif | ||
824 | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | | 874 | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | |
825 | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | | 875 | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | |
826 | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); | 876 | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); |
@@ -828,145 +878,233 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags) | |||
828 | 878 | ||
829 | void | 879 | void |
830 | trace_function(struct trace_array *tr, struct trace_array_cpu *data, | 880 | trace_function(struct trace_array *tr, struct trace_array_cpu *data, |
831 | unsigned long ip, unsigned long parent_ip, unsigned long flags) | 881 | unsigned long ip, unsigned long parent_ip, unsigned long flags, |
882 | int pc) | ||
832 | { | 883 | { |
833 | struct trace_entry *entry; | 884 | struct ring_buffer_event *event; |
885 | struct ftrace_entry *entry; | ||
834 | unsigned long irq_flags; | 886 | unsigned long irq_flags; |
835 | 887 | ||
836 | raw_local_irq_save(irq_flags); | 888 | /* If we are reading the ring buffer, don't trace */ |
837 | __raw_spin_lock(&data->lock); | 889 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) |
838 | entry = tracing_get_trace_entry(tr, data); | 890 | return; |
839 | tracing_generic_entry_update(entry, flags); | ||
840 | entry->type = TRACE_FN; | ||
841 | entry->fn.ip = ip; | ||
842 | entry->fn.parent_ip = parent_ip; | ||
843 | __raw_spin_unlock(&data->lock); | ||
844 | raw_local_irq_restore(irq_flags); | ||
845 | } | ||
846 | 891 | ||
847 | void | 892 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
848 | ftrace(struct trace_array *tr, struct trace_array_cpu *data, | 893 | &irq_flags); |
849 | unsigned long ip, unsigned long parent_ip, unsigned long flags) | 894 | if (!event) |
850 | { | 895 | return; |
851 | if (likely(!atomic_read(&data->disabled))) | 896 | entry = ring_buffer_event_data(event); |
852 | trace_function(tr, data, ip, parent_ip, flags); | 897 | tracing_generic_entry_update(&entry->ent, flags, pc); |
898 | entry->ent.type = TRACE_FN; | ||
899 | entry->ip = ip; | ||
900 | entry->parent_ip = parent_ip; | ||
901 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | ||
902 | } | ||
903 | |||
904 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
905 | static void __trace_graph_entry(struct trace_array *tr, | ||
906 | struct trace_array_cpu *data, | ||
907 | struct ftrace_graph_ent *trace, | ||
908 | unsigned long flags, | ||
909 | int pc) | ||
910 | { | ||
911 | struct ring_buffer_event *event; | ||
912 | struct ftrace_graph_ent_entry *entry; | ||
913 | unsigned long irq_flags; | ||
914 | |||
915 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | ||
916 | return; | ||
917 | |||
918 | event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry), | ||
919 | &irq_flags); | ||
920 | if (!event) | ||
921 | return; | ||
922 | entry = ring_buffer_event_data(event); | ||
923 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
924 | entry->ent.type = TRACE_GRAPH_ENT; | ||
925 | entry->graph_ent = *trace; | ||
926 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); | ||
853 | } | 927 | } |
854 | 928 | ||
855 | #ifdef CONFIG_MMIOTRACE | 929 | static void __trace_graph_return(struct trace_array *tr, |
856 | void __trace_mmiotrace_rw(struct trace_array *tr, struct trace_array_cpu *data, | 930 | struct trace_array_cpu *data, |
857 | struct mmiotrace_rw *rw) | 931 | struct ftrace_graph_ret *trace, |
932 | unsigned long flags, | ||
933 | int pc) | ||
858 | { | 934 | { |
859 | struct trace_entry *entry; | 935 | struct ring_buffer_event *event; |
936 | struct ftrace_graph_ret_entry *entry; | ||
860 | unsigned long irq_flags; | 937 | unsigned long irq_flags; |
861 | 938 | ||
862 | raw_local_irq_save(irq_flags); | 939 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) |
863 | __raw_spin_lock(&data->lock); | 940 | return; |
864 | |||
865 | entry = tracing_get_trace_entry(tr, data); | ||
866 | tracing_generic_entry_update(entry, 0); | ||
867 | entry->type = TRACE_MMIO_RW; | ||
868 | entry->mmiorw = *rw; | ||
869 | 941 | ||
870 | __raw_spin_unlock(&data->lock); | 942 | event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry), |
871 | raw_local_irq_restore(irq_flags); | 943 | &irq_flags); |
944 | if (!event) | ||
945 | return; | ||
946 | entry = ring_buffer_event_data(event); | ||
947 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
948 | entry->ent.type = TRACE_GRAPH_RET; | ||
949 | entry->ret = *trace; | ||
950 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); | ||
951 | } | ||
952 | #endif | ||
872 | 953 | ||
873 | trace_wake_up(); | 954 | void |
955 | ftrace(struct trace_array *tr, struct trace_array_cpu *data, | ||
956 | unsigned long ip, unsigned long parent_ip, unsigned long flags, | ||
957 | int pc) | ||
958 | { | ||
959 | if (likely(!atomic_read(&data->disabled))) | ||
960 | trace_function(tr, data, ip, parent_ip, flags, pc); | ||
874 | } | 961 | } |
875 | 962 | ||
876 | void __trace_mmiotrace_map(struct trace_array *tr, struct trace_array_cpu *data, | 963 | static void ftrace_trace_stack(struct trace_array *tr, |
877 | struct mmiotrace_map *map) | 964 | struct trace_array_cpu *data, |
965 | unsigned long flags, | ||
966 | int skip, int pc) | ||
878 | { | 967 | { |
879 | struct trace_entry *entry; | 968 | #ifdef CONFIG_STACKTRACE |
969 | struct ring_buffer_event *event; | ||
970 | struct stack_entry *entry; | ||
971 | struct stack_trace trace; | ||
880 | unsigned long irq_flags; | 972 | unsigned long irq_flags; |
881 | 973 | ||
882 | raw_local_irq_save(irq_flags); | 974 | if (!(trace_flags & TRACE_ITER_STACKTRACE)) |
883 | __raw_spin_lock(&data->lock); | 975 | return; |
884 | 976 | ||
885 | entry = tracing_get_trace_entry(tr, data); | 977 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
886 | tracing_generic_entry_update(entry, 0); | 978 | &irq_flags); |
887 | entry->type = TRACE_MMIO_MAP; | 979 | if (!event) |
888 | entry->mmiomap = *map; | 980 | return; |
981 | entry = ring_buffer_event_data(event); | ||
982 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
983 | entry->ent.type = TRACE_STACK; | ||
889 | 984 | ||
890 | __raw_spin_unlock(&data->lock); | 985 | memset(&entry->caller, 0, sizeof(entry->caller)); |
891 | raw_local_irq_restore(irq_flags); | ||
892 | 986 | ||
893 | trace_wake_up(); | 987 | trace.nr_entries = 0; |
894 | } | 988 | trace.max_entries = FTRACE_STACK_ENTRIES; |
989 | trace.skip = skip; | ||
990 | trace.entries = entry->caller; | ||
991 | |||
992 | save_stack_trace(&trace); | ||
993 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | ||
895 | #endif | 994 | #endif |
995 | } | ||
896 | 996 | ||
897 | void __trace_stack(struct trace_array *tr, | 997 | void __trace_stack(struct trace_array *tr, |
898 | struct trace_array_cpu *data, | 998 | struct trace_array_cpu *data, |
899 | unsigned long flags, | 999 | unsigned long flags, |
900 | int skip) | 1000 | int skip) |
901 | { | 1001 | { |
902 | struct trace_entry *entry; | 1002 | ftrace_trace_stack(tr, data, flags, skip, preempt_count()); |
1003 | } | ||
1004 | |||
1005 | static void ftrace_trace_userstack(struct trace_array *tr, | ||
1006 | struct trace_array_cpu *data, | ||
1007 | unsigned long flags, int pc) | ||
1008 | { | ||
1009 | #ifdef CONFIG_STACKTRACE | ||
1010 | struct ring_buffer_event *event; | ||
1011 | struct userstack_entry *entry; | ||
903 | struct stack_trace trace; | 1012 | struct stack_trace trace; |
1013 | unsigned long irq_flags; | ||
904 | 1014 | ||
905 | if (!(trace_flags & TRACE_ITER_STACKTRACE)) | 1015 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) |
906 | return; | 1016 | return; |
907 | 1017 | ||
908 | entry = tracing_get_trace_entry(tr, data); | 1018 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
909 | tracing_generic_entry_update(entry, flags); | 1019 | &irq_flags); |
910 | entry->type = TRACE_STACK; | 1020 | if (!event) |
1021 | return; | ||
1022 | entry = ring_buffer_event_data(event); | ||
1023 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
1024 | entry->ent.type = TRACE_USER_STACK; | ||
911 | 1025 | ||
912 | memset(&entry->stack, 0, sizeof(entry->stack)); | 1026 | memset(&entry->caller, 0, sizeof(entry->caller)); |
913 | 1027 | ||
914 | trace.nr_entries = 0; | 1028 | trace.nr_entries = 0; |
915 | trace.max_entries = FTRACE_STACK_ENTRIES; | 1029 | trace.max_entries = FTRACE_STACK_ENTRIES; |
916 | trace.skip = skip; | 1030 | trace.skip = 0; |
917 | trace.entries = entry->stack.caller; | 1031 | trace.entries = entry->caller; |
918 | 1032 | ||
919 | save_stack_trace(&trace); | 1033 | save_stack_trace_user(&trace); |
1034 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | ||
1035 | #endif | ||
920 | } | 1036 | } |
921 | 1037 | ||
922 | void | 1038 | void __trace_userstack(struct trace_array *tr, |
923 | __trace_special(void *__tr, void *__data, | 1039 | struct trace_array_cpu *data, |
924 | unsigned long arg1, unsigned long arg2, unsigned long arg3) | 1040 | unsigned long flags) |
925 | { | 1041 | { |
1042 | ftrace_trace_userstack(tr, data, flags, preempt_count()); | ||
1043 | } | ||
1044 | |||
1045 | static void | ||
1046 | ftrace_trace_special(void *__tr, void *__data, | ||
1047 | unsigned long arg1, unsigned long arg2, unsigned long arg3, | ||
1048 | int pc) | ||
1049 | { | ||
1050 | struct ring_buffer_event *event; | ||
926 | struct trace_array_cpu *data = __data; | 1051 | struct trace_array_cpu *data = __data; |
927 | struct trace_array *tr = __tr; | 1052 | struct trace_array *tr = __tr; |
928 | struct trace_entry *entry; | 1053 | struct special_entry *entry; |
929 | unsigned long irq_flags; | 1054 | unsigned long irq_flags; |
930 | 1055 | ||
931 | raw_local_irq_save(irq_flags); | 1056 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
932 | __raw_spin_lock(&data->lock); | 1057 | &irq_flags); |
933 | entry = tracing_get_trace_entry(tr, data); | 1058 | if (!event) |
934 | tracing_generic_entry_update(entry, 0); | 1059 | return; |
935 | entry->type = TRACE_SPECIAL; | 1060 | entry = ring_buffer_event_data(event); |
936 | entry->special.arg1 = arg1; | 1061 | tracing_generic_entry_update(&entry->ent, 0, pc); |
937 | entry->special.arg2 = arg2; | 1062 | entry->ent.type = TRACE_SPECIAL; |
938 | entry->special.arg3 = arg3; | 1063 | entry->arg1 = arg1; |
939 | __trace_stack(tr, data, irq_flags, 4); | 1064 | entry->arg2 = arg2; |
940 | __raw_spin_unlock(&data->lock); | 1065 | entry->arg3 = arg3; |
941 | raw_local_irq_restore(irq_flags); | 1066 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
1067 | ftrace_trace_stack(tr, data, irq_flags, 4, pc); | ||
1068 | ftrace_trace_userstack(tr, data, irq_flags, pc); | ||
942 | 1069 | ||
943 | trace_wake_up(); | 1070 | trace_wake_up(); |
944 | } | 1071 | } |
945 | 1072 | ||
946 | void | 1073 | void |
1074 | __trace_special(void *__tr, void *__data, | ||
1075 | unsigned long arg1, unsigned long arg2, unsigned long arg3) | ||
1076 | { | ||
1077 | ftrace_trace_special(__tr, __data, arg1, arg2, arg3, preempt_count()); | ||
1078 | } | ||
1079 | |||
1080 | void | ||
947 | tracing_sched_switch_trace(struct trace_array *tr, | 1081 | tracing_sched_switch_trace(struct trace_array *tr, |
948 | struct trace_array_cpu *data, | 1082 | struct trace_array_cpu *data, |
949 | struct task_struct *prev, | 1083 | struct task_struct *prev, |
950 | struct task_struct *next, | 1084 | struct task_struct *next, |
951 | unsigned long flags) | 1085 | unsigned long flags, int pc) |
952 | { | 1086 | { |
953 | struct trace_entry *entry; | 1087 | struct ring_buffer_event *event; |
1088 | struct ctx_switch_entry *entry; | ||
954 | unsigned long irq_flags; | 1089 | unsigned long irq_flags; |
955 | 1090 | ||
956 | raw_local_irq_save(irq_flags); | 1091 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
957 | __raw_spin_lock(&data->lock); | 1092 | &irq_flags); |
958 | entry = tracing_get_trace_entry(tr, data); | 1093 | if (!event) |
959 | tracing_generic_entry_update(entry, flags); | 1094 | return; |
960 | entry->type = TRACE_CTX; | 1095 | entry = ring_buffer_event_data(event); |
961 | entry->ctx.prev_pid = prev->pid; | 1096 | tracing_generic_entry_update(&entry->ent, flags, pc); |
962 | entry->ctx.prev_prio = prev->prio; | 1097 | entry->ent.type = TRACE_CTX; |
963 | entry->ctx.prev_state = prev->state; | 1098 | entry->prev_pid = prev->pid; |
964 | entry->ctx.next_pid = next->pid; | 1099 | entry->prev_prio = prev->prio; |
965 | entry->ctx.next_prio = next->prio; | 1100 | entry->prev_state = prev->state; |
966 | entry->ctx.next_state = next->state; | 1101 | entry->next_pid = next->pid; |
967 | __trace_stack(tr, data, flags, 5); | 1102 | entry->next_prio = next->prio; |
968 | __raw_spin_unlock(&data->lock); | 1103 | entry->next_state = next->state; |
969 | raw_local_irq_restore(irq_flags); | 1104 | entry->next_cpu = task_cpu(next); |
1105 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | ||
1106 | ftrace_trace_stack(tr, data, flags, 5, pc); | ||
1107 | ftrace_trace_userstack(tr, data, flags, pc); | ||
970 | } | 1108 | } |
971 | 1109 | ||
972 | void | 1110 | void |
@@ -974,25 +1112,29 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
974 | struct trace_array_cpu *data, | 1112 | struct trace_array_cpu *data, |
975 | struct task_struct *wakee, | 1113 | struct task_struct *wakee, |
976 | struct task_struct *curr, | 1114 | struct task_struct *curr, |
977 | unsigned long flags) | 1115 | unsigned long flags, int pc) |
978 | { | 1116 | { |
979 | struct trace_entry *entry; | 1117 | struct ring_buffer_event *event; |
1118 | struct ctx_switch_entry *entry; | ||
980 | unsigned long irq_flags; | 1119 | unsigned long irq_flags; |
981 | 1120 | ||
982 | raw_local_irq_save(irq_flags); | 1121 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
983 | __raw_spin_lock(&data->lock); | 1122 | &irq_flags); |
984 | entry = tracing_get_trace_entry(tr, data); | 1123 | if (!event) |
985 | tracing_generic_entry_update(entry, flags); | 1124 | return; |
986 | entry->type = TRACE_WAKE; | 1125 | entry = ring_buffer_event_data(event); |
987 | entry->ctx.prev_pid = curr->pid; | 1126 | tracing_generic_entry_update(&entry->ent, flags, pc); |
988 | entry->ctx.prev_prio = curr->prio; | 1127 | entry->ent.type = TRACE_WAKE; |
989 | entry->ctx.prev_state = curr->state; | 1128 | entry->prev_pid = curr->pid; |
990 | entry->ctx.next_pid = wakee->pid; | 1129 | entry->prev_prio = curr->prio; |
991 | entry->ctx.next_prio = wakee->prio; | 1130 | entry->prev_state = curr->state; |
992 | entry->ctx.next_state = wakee->state; | 1131 | entry->next_pid = wakee->pid; |
993 | __trace_stack(tr, data, flags, 6); | 1132 | entry->next_prio = wakee->prio; |
994 | __raw_spin_unlock(&data->lock); | 1133 | entry->next_state = wakee->state; |
995 | raw_local_irq_restore(irq_flags); | 1134 | entry->next_cpu = task_cpu(wakee); |
1135 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | ||
1136 | ftrace_trace_stack(tr, data, flags, 6, pc); | ||
1137 | ftrace_trace_userstack(tr, data, flags, pc); | ||
996 | 1138 | ||
997 | trace_wake_up(); | 1139 | trace_wake_up(); |
998 | } | 1140 | } |
@@ -1003,25 +1145,52 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | |||
1003 | struct trace_array *tr = &global_trace; | 1145 | struct trace_array *tr = &global_trace; |
1004 | struct trace_array_cpu *data; | 1146 | struct trace_array_cpu *data; |
1005 | unsigned long flags; | 1147 | unsigned long flags; |
1006 | long disabled; | ||
1007 | int cpu; | 1148 | int cpu; |
1149 | int pc; | ||
1008 | 1150 | ||
1009 | if (tracing_disabled || current_trace == &no_tracer || !tr->ctrl) | 1151 | if (tracing_disabled) |
1010 | return; | 1152 | return; |
1011 | 1153 | ||
1154 | pc = preempt_count(); | ||
1012 | local_irq_save(flags); | 1155 | local_irq_save(flags); |
1013 | cpu = raw_smp_processor_id(); | 1156 | cpu = raw_smp_processor_id(); |
1014 | data = tr->data[cpu]; | 1157 | data = tr->data[cpu]; |
1158 | |||
1159 | if (likely(atomic_inc_return(&data->disabled) == 1)) | ||
1160 | ftrace_trace_special(tr, data, arg1, arg2, arg3, pc); | ||
1161 | |||
1162 | atomic_dec(&data->disabled); | ||
1163 | local_irq_restore(flags); | ||
1164 | } | ||
1165 | |||
1166 | #ifdef CONFIG_FUNCTION_TRACER | ||
1167 | static void | ||
1168 | function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) | ||
1169 | { | ||
1170 | struct trace_array *tr = &global_trace; | ||
1171 | struct trace_array_cpu *data; | ||
1172 | unsigned long flags; | ||
1173 | long disabled; | ||
1174 | int cpu, resched; | ||
1175 | int pc; | ||
1176 | |||
1177 | if (unlikely(!ftrace_function_enabled)) | ||
1178 | return; | ||
1179 | |||
1180 | pc = preempt_count(); | ||
1181 | resched = ftrace_preempt_disable(); | ||
1182 | local_save_flags(flags); | ||
1183 | cpu = raw_smp_processor_id(); | ||
1184 | data = tr->data[cpu]; | ||
1015 | disabled = atomic_inc_return(&data->disabled); | 1185 | disabled = atomic_inc_return(&data->disabled); |
1016 | 1186 | ||
1017 | if (likely(disabled == 1)) | 1187 | if (likely(disabled == 1)) |
1018 | __trace_special(tr, data, arg1, arg2, arg3); | 1188 | trace_function(tr, data, ip, parent_ip, flags, pc); |
1019 | 1189 | ||
1020 | atomic_dec(&data->disabled); | 1190 | atomic_dec(&data->disabled); |
1021 | local_irq_restore(flags); | 1191 | ftrace_preempt_enable(resched); |
1022 | } | 1192 | } |
1023 | 1193 | ||
1024 | #ifdef CONFIG_FTRACE | ||
1025 | static void | 1194 | static void |
1026 | function_trace_call(unsigned long ip, unsigned long parent_ip) | 1195 | function_trace_call(unsigned long ip, unsigned long parent_ip) |
1027 | { | 1196 | { |
@@ -1030,24 +1199,85 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) | |||
1030 | unsigned long flags; | 1199 | unsigned long flags; |
1031 | long disabled; | 1200 | long disabled; |
1032 | int cpu; | 1201 | int cpu; |
1202 | int pc; | ||
1033 | 1203 | ||
1034 | if (unlikely(!ftrace_function_enabled)) | 1204 | if (unlikely(!ftrace_function_enabled)) |
1035 | return; | 1205 | return; |
1036 | 1206 | ||
1037 | if (skip_trace(ip)) | 1207 | /* |
1038 | return; | 1208 | * Need to use raw, since this must be called before the |
1209 | * recursive protection is performed. | ||
1210 | */ | ||
1211 | local_irq_save(flags); | ||
1212 | cpu = raw_smp_processor_id(); | ||
1213 | data = tr->data[cpu]; | ||
1214 | disabled = atomic_inc_return(&data->disabled); | ||
1215 | |||
1216 | if (likely(disabled == 1)) { | ||
1217 | pc = preempt_count(); | ||
1218 | trace_function(tr, data, ip, parent_ip, flags, pc); | ||
1219 | } | ||
1220 | |||
1221 | atomic_dec(&data->disabled); | ||
1222 | local_irq_restore(flags); | ||
1223 | } | ||
1224 | |||
1225 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
1226 | int trace_graph_entry(struct ftrace_graph_ent *trace) | ||
1227 | { | ||
1228 | struct trace_array *tr = &global_trace; | ||
1229 | struct trace_array_cpu *data; | ||
1230 | unsigned long flags; | ||
1231 | long disabled; | ||
1232 | int cpu; | ||
1233 | int pc; | ||
1234 | |||
1235 | if (!ftrace_trace_task(current)) | ||
1236 | return 0; | ||
1237 | |||
1238 | if (!ftrace_graph_addr(trace->func)) | ||
1239 | return 0; | ||
1039 | 1240 | ||
1040 | local_irq_save(flags); | 1241 | local_irq_save(flags); |
1041 | cpu = raw_smp_processor_id(); | 1242 | cpu = raw_smp_processor_id(); |
1042 | data = tr->data[cpu]; | 1243 | data = tr->data[cpu]; |
1043 | disabled = atomic_inc_return(&data->disabled); | 1244 | disabled = atomic_inc_return(&data->disabled); |
1245 | if (likely(disabled == 1)) { | ||
1246 | pc = preempt_count(); | ||
1247 | __trace_graph_entry(tr, data, trace, flags, pc); | ||
1248 | } | ||
1249 | /* Only do the atomic if it is not already set */ | ||
1250 | if (!test_tsk_trace_graph(current)) | ||
1251 | set_tsk_trace_graph(current); | ||
1252 | atomic_dec(&data->disabled); | ||
1253 | local_irq_restore(flags); | ||
1044 | 1254 | ||
1045 | if (likely(disabled == 1)) | 1255 | return 1; |
1046 | trace_function(tr, data, ip, parent_ip, flags); | 1256 | } |
1047 | 1257 | ||
1258 | void trace_graph_return(struct ftrace_graph_ret *trace) | ||
1259 | { | ||
1260 | struct trace_array *tr = &global_trace; | ||
1261 | struct trace_array_cpu *data; | ||
1262 | unsigned long flags; | ||
1263 | long disabled; | ||
1264 | int cpu; | ||
1265 | int pc; | ||
1266 | |||
1267 | local_irq_save(flags); | ||
1268 | cpu = raw_smp_processor_id(); | ||
1269 | data = tr->data[cpu]; | ||
1270 | disabled = atomic_inc_return(&data->disabled); | ||
1271 | if (likely(disabled == 1)) { | ||
1272 | pc = preempt_count(); | ||
1273 | __trace_graph_return(tr, data, trace, flags, pc); | ||
1274 | } | ||
1275 | if (!trace->depth) | ||
1276 | clear_tsk_trace_graph(current); | ||
1048 | atomic_dec(&data->disabled); | 1277 | atomic_dec(&data->disabled); |
1049 | local_irq_restore(flags); | 1278 | local_irq_restore(flags); |
1050 | } | 1279 | } |
1280 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
1051 | 1281 | ||
1052 | static struct ftrace_ops trace_ops __read_mostly = | 1282 | static struct ftrace_ops trace_ops __read_mostly = |
1053 | { | 1283 | { |
@@ -1057,9 +1287,14 @@ static struct ftrace_ops trace_ops __read_mostly = | |||
1057 | void tracing_start_function_trace(void) | 1287 | void tracing_start_function_trace(void) |
1058 | { | 1288 | { |
1059 | ftrace_function_enabled = 0; | 1289 | ftrace_function_enabled = 0; |
1290 | |||
1291 | if (trace_flags & TRACE_ITER_PREEMPTONLY) | ||
1292 | trace_ops.func = function_trace_call_preempt_only; | ||
1293 | else | ||
1294 | trace_ops.func = function_trace_call; | ||
1295 | |||
1060 | register_ftrace_function(&trace_ops); | 1296 | register_ftrace_function(&trace_ops); |
1061 | if (tracer_enabled) | 1297 | ftrace_function_enabled = 1; |
1062 | ftrace_function_enabled = 1; | ||
1063 | } | 1298 | } |
1064 | 1299 | ||
1065 | void tracing_stop_function_trace(void) | 1300 | void tracing_stop_function_trace(void) |
@@ -1071,113 +1306,99 @@ void tracing_stop_function_trace(void) | |||
1071 | 1306 | ||
1072 | enum trace_file_type { | 1307 | enum trace_file_type { |
1073 | TRACE_FILE_LAT_FMT = 1, | 1308 | TRACE_FILE_LAT_FMT = 1, |
1309 | TRACE_FILE_ANNOTATE = 2, | ||
1074 | }; | 1310 | }; |
1075 | 1311 | ||
1076 | static struct trace_entry * | 1312 | static void trace_iterator_increment(struct trace_iterator *iter) |
1077 | trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data, | ||
1078 | struct trace_iterator *iter, int cpu) | ||
1079 | { | 1313 | { |
1080 | struct page *page; | 1314 | /* Don't allow ftrace to trace into the ring buffers */ |
1081 | struct trace_entry *array; | 1315 | ftrace_disable_cpu(); |
1082 | 1316 | ||
1083 | if (iter->next_idx[cpu] >= tr->entries || | 1317 | iter->idx++; |
1084 | iter->next_idx[cpu] >= data->trace_idx || | 1318 | if (iter->buffer_iter[iter->cpu]) |
1085 | (data->trace_head == data->trace_tail && | 1319 | ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); |
1086 | data->trace_head_idx == data->trace_tail_idx)) | ||
1087 | return NULL; | ||
1088 | 1320 | ||
1089 | if (!iter->next_page[cpu]) { | 1321 | ftrace_enable_cpu(); |
1090 | /* Initialize the iterator for this cpu trace buffer */ | 1322 | } |
1091 | WARN_ON(!data->trace_tail); | ||
1092 | page = virt_to_page(data->trace_tail); | ||
1093 | iter->next_page[cpu] = &page->lru; | ||
1094 | iter->next_page_idx[cpu] = data->trace_tail_idx; | ||
1095 | } | ||
1096 | 1323 | ||
1097 | page = list_entry(iter->next_page[cpu], struct page, lru); | 1324 | static struct trace_entry * |
1098 | BUG_ON(&data->trace_pages == &page->lru); | 1325 | peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) |
1326 | { | ||
1327 | struct ring_buffer_event *event; | ||
1328 | struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; | ||
1099 | 1329 | ||
1100 | array = page_address(page); | 1330 | /* Don't allow ftrace to trace into the ring buffers */ |
1331 | ftrace_disable_cpu(); | ||
1101 | 1332 | ||
1102 | WARN_ON(iter->next_page_idx[cpu] >= ENTRIES_PER_PAGE); | 1333 | if (buf_iter) |
1103 | return &array[iter->next_page_idx[cpu]]; | 1334 | event = ring_buffer_iter_peek(buf_iter, ts); |
1335 | else | ||
1336 | event = ring_buffer_peek(iter->tr->buffer, cpu, ts); | ||
1337 | |||
1338 | ftrace_enable_cpu(); | ||
1339 | |||
1340 | return event ? ring_buffer_event_data(event) : NULL; | ||
1104 | } | 1341 | } |
1105 | 1342 | ||
1106 | static struct trace_entry * | 1343 | static struct trace_entry * |
1107 | find_next_entry(struct trace_iterator *iter, int *ent_cpu) | 1344 | __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) |
1108 | { | 1345 | { |
1109 | struct trace_array *tr = iter->tr; | 1346 | struct ring_buffer *buffer = iter->tr->buffer; |
1110 | struct trace_entry *ent, *next = NULL; | 1347 | struct trace_entry *ent, *next = NULL; |
1348 | u64 next_ts = 0, ts; | ||
1111 | int next_cpu = -1; | 1349 | int next_cpu = -1; |
1112 | int cpu; | 1350 | int cpu; |
1113 | 1351 | ||
1114 | for_each_tracing_cpu(cpu) { | 1352 | for_each_tracing_cpu(cpu) { |
1115 | if (!head_page(tr->data[cpu])) | 1353 | |
1354 | if (ring_buffer_empty_cpu(buffer, cpu)) | ||
1116 | continue; | 1355 | continue; |
1117 | ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu); | 1356 | |
1357 | ent = peek_next_entry(iter, cpu, &ts); | ||
1358 | |||
1118 | /* | 1359 | /* |
1119 | * Pick the entry with the smallest timestamp: | 1360 | * Pick the entry with the smallest timestamp: |
1120 | */ | 1361 | */ |
1121 | if (ent && (!next || ent->t < next->t)) { | 1362 | if (ent && (!next || ts < next_ts)) { |
1122 | next = ent; | 1363 | next = ent; |
1123 | next_cpu = cpu; | 1364 | next_cpu = cpu; |
1365 | next_ts = ts; | ||
1124 | } | 1366 | } |
1125 | } | 1367 | } |
1126 | 1368 | ||
1127 | if (ent_cpu) | 1369 | if (ent_cpu) |
1128 | *ent_cpu = next_cpu; | 1370 | *ent_cpu = next_cpu; |
1129 | 1371 | ||
1372 | if (ent_ts) | ||
1373 | *ent_ts = next_ts; | ||
1374 | |||
1130 | return next; | 1375 | return next; |
1131 | } | 1376 | } |
1132 | 1377 | ||
1133 | static void trace_iterator_increment(struct trace_iterator *iter) | 1378 | /* Find the next real entry, without updating the iterator itself */ |
1379 | static struct trace_entry * | ||
1380 | find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | ||
1134 | { | 1381 | { |
1135 | iter->idx++; | 1382 | return __find_next_entry(iter, ent_cpu, ent_ts); |
1136 | iter->next_idx[iter->cpu]++; | ||
1137 | iter->next_page_idx[iter->cpu]++; | ||
1138 | |||
1139 | if (iter->next_page_idx[iter->cpu] >= ENTRIES_PER_PAGE) { | ||
1140 | struct trace_array_cpu *data = iter->tr->data[iter->cpu]; | ||
1141 | |||
1142 | iter->next_page_idx[iter->cpu] = 0; | ||
1143 | iter->next_page[iter->cpu] = | ||
1144 | trace_next_list(data, iter->next_page[iter->cpu]); | ||
1145 | } | ||
1146 | } | 1383 | } |
1147 | 1384 | ||
1148 | static void trace_consume(struct trace_iterator *iter) | 1385 | /* Find the next real entry, and increment the iterator to the next entry */ |
1386 | static void *find_next_entry_inc(struct trace_iterator *iter) | ||
1149 | { | 1387 | { |
1150 | struct trace_array_cpu *data = iter->tr->data[iter->cpu]; | 1388 | iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); |
1151 | 1389 | ||
1152 | data->trace_tail_idx++; | 1390 | if (iter->ent) |
1153 | if (data->trace_tail_idx >= ENTRIES_PER_PAGE) { | 1391 | trace_iterator_increment(iter); |
1154 | data->trace_tail = trace_next_page(data, data->trace_tail); | ||
1155 | data->trace_tail_idx = 0; | ||
1156 | } | ||
1157 | 1392 | ||
1158 | /* Check if we empty it, then reset the index */ | 1393 | return iter->ent ? iter : NULL; |
1159 | if (data->trace_head == data->trace_tail && | ||
1160 | data->trace_head_idx == data->trace_tail_idx) | ||
1161 | data->trace_idx = 0; | ||
1162 | } | 1394 | } |
1163 | 1395 | ||
1164 | static void *find_next_entry_inc(struct trace_iterator *iter) | 1396 | static void trace_consume(struct trace_iterator *iter) |
1165 | { | 1397 | { |
1166 | struct trace_entry *next; | 1398 | /* Don't allow ftrace to trace into the ring buffers */ |
1167 | int next_cpu = -1; | 1399 | ftrace_disable_cpu(); |
1168 | 1400 | ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts); | |
1169 | next = find_next_entry(iter, &next_cpu); | 1401 | ftrace_enable_cpu(); |
1170 | |||
1171 | iter->prev_ent = iter->ent; | ||
1172 | iter->prev_cpu = iter->cpu; | ||
1173 | |||
1174 | iter->ent = next; | ||
1175 | iter->cpu = next_cpu; | ||
1176 | |||
1177 | if (next) | ||
1178 | trace_iterator_increment(iter); | ||
1179 | |||
1180 | return next ? iter : NULL; | ||
1181 | } | 1402 | } |
1182 | 1403 | ||
1183 | static void *s_next(struct seq_file *m, void *v, loff_t *pos) | 1404 | static void *s_next(struct seq_file *m, void *v, loff_t *pos) |
@@ -1210,7 +1431,7 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
1210 | struct trace_iterator *iter = m->private; | 1431 | struct trace_iterator *iter = m->private; |
1211 | void *p = NULL; | 1432 | void *p = NULL; |
1212 | loff_t l = 0; | 1433 | loff_t l = 0; |
1213 | int i; | 1434 | int cpu; |
1214 | 1435 | ||
1215 | mutex_lock(&trace_types_lock); | 1436 | mutex_lock(&trace_types_lock); |
1216 | 1437 | ||
@@ -1221,22 +1442,19 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
1221 | 1442 | ||
1222 | atomic_inc(&trace_record_cmdline_disabled); | 1443 | atomic_inc(&trace_record_cmdline_disabled); |
1223 | 1444 | ||
1224 | /* let the tracer grab locks here if needed */ | ||
1225 | if (current_trace->start) | ||
1226 | current_trace->start(iter); | ||
1227 | |||
1228 | if (*pos != iter->pos) { | 1445 | if (*pos != iter->pos) { |
1229 | iter->ent = NULL; | 1446 | iter->ent = NULL; |
1230 | iter->cpu = 0; | 1447 | iter->cpu = 0; |
1231 | iter->idx = -1; | 1448 | iter->idx = -1; |
1232 | iter->prev_ent = NULL; | ||
1233 | iter->prev_cpu = -1; | ||
1234 | 1449 | ||
1235 | for_each_tracing_cpu(i) { | 1450 | ftrace_disable_cpu(); |
1236 | iter->next_idx[i] = 0; | 1451 | |
1237 | iter->next_page[i] = NULL; | 1452 | for_each_tracing_cpu(cpu) { |
1453 | ring_buffer_iter_reset(iter->buffer_iter[cpu]); | ||
1238 | } | 1454 | } |
1239 | 1455 | ||
1456 | ftrace_enable_cpu(); | ||
1457 | |||
1240 | for (p = iter; p && l < *pos; p = s_next(m, p, &l)) | 1458 | for (p = iter; p && l < *pos; p = s_next(m, p, &l)) |
1241 | ; | 1459 | ; |
1242 | 1460 | ||
@@ -1250,28 +1468,24 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
1250 | 1468 | ||
1251 | static void s_stop(struct seq_file *m, void *p) | 1469 | static void s_stop(struct seq_file *m, void *p) |
1252 | { | 1470 | { |
1253 | struct trace_iterator *iter = m->private; | ||
1254 | |||
1255 | atomic_dec(&trace_record_cmdline_disabled); | 1471 | atomic_dec(&trace_record_cmdline_disabled); |
1256 | |||
1257 | /* let the tracer release locks here if needed */ | ||
1258 | if (current_trace && current_trace == iter->trace && iter->trace->stop) | ||
1259 | iter->trace->stop(iter); | ||
1260 | |||
1261 | mutex_unlock(&trace_types_lock); | 1472 | mutex_unlock(&trace_types_lock); |
1262 | } | 1473 | } |
1263 | 1474 | ||
1264 | #define KRETPROBE_MSG "[unknown/kretprobe'd]" | ||
1265 | |||
1266 | #ifdef CONFIG_KRETPROBES | 1475 | #ifdef CONFIG_KRETPROBES |
1267 | static inline int kretprobed(unsigned long addr) | 1476 | static inline const char *kretprobed(const char *name) |
1268 | { | 1477 | { |
1269 | return addr == (unsigned long)kretprobe_trampoline; | 1478 | static const char tramp_name[] = "kretprobe_trampoline"; |
1479 | int size = sizeof(tramp_name); | ||
1480 | |||
1481 | if (strncmp(tramp_name, name, size) == 0) | ||
1482 | return "[unknown/kretprobe'd]"; | ||
1483 | return name; | ||
1270 | } | 1484 | } |
1271 | #else | 1485 | #else |
1272 | static inline int kretprobed(unsigned long addr) | 1486 | static inline const char *kretprobed(const char *name) |
1273 | { | 1487 | { |
1274 | return 0; | 1488 | return name; |
1275 | } | 1489 | } |
1276 | #endif /* CONFIG_KRETPROBES */ | 1490 | #endif /* CONFIG_KRETPROBES */ |
1277 | 1491 | ||
@@ -1280,10 +1494,13 @@ seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address) | |||
1280 | { | 1494 | { |
1281 | #ifdef CONFIG_KALLSYMS | 1495 | #ifdef CONFIG_KALLSYMS |
1282 | char str[KSYM_SYMBOL_LEN]; | 1496 | char str[KSYM_SYMBOL_LEN]; |
1497 | const char *name; | ||
1283 | 1498 | ||
1284 | kallsyms_lookup(address, NULL, NULL, NULL, str); | 1499 | kallsyms_lookup(address, NULL, NULL, NULL, str); |
1285 | 1500 | ||
1286 | return trace_seq_printf(s, fmt, str); | 1501 | name = kretprobed(str); |
1502 | |||
1503 | return trace_seq_printf(s, fmt, name); | ||
1287 | #endif | 1504 | #endif |
1288 | return 1; | 1505 | return 1; |
1289 | } | 1506 | } |
@@ -1294,9 +1511,12 @@ seq_print_sym_offset(struct trace_seq *s, const char *fmt, | |||
1294 | { | 1511 | { |
1295 | #ifdef CONFIG_KALLSYMS | 1512 | #ifdef CONFIG_KALLSYMS |
1296 | char str[KSYM_SYMBOL_LEN]; | 1513 | char str[KSYM_SYMBOL_LEN]; |
1514 | const char *name; | ||
1297 | 1515 | ||
1298 | sprint_symbol(str, address); | 1516 | sprint_symbol(str, address); |
1299 | return trace_seq_printf(s, fmt, str); | 1517 | name = kretprobed(str); |
1518 | |||
1519 | return trace_seq_printf(s, fmt, name); | ||
1300 | #endif | 1520 | #endif |
1301 | return 1; | 1521 | return 1; |
1302 | } | 1522 | } |
@@ -1307,7 +1527,7 @@ seq_print_sym_offset(struct trace_seq *s, const char *fmt, | |||
1307 | # define IP_FMT "%016lx" | 1527 | # define IP_FMT "%016lx" |
1308 | #endif | 1528 | #endif |
1309 | 1529 | ||
1310 | static int | 1530 | int |
1311 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) | 1531 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) |
1312 | { | 1532 | { |
1313 | int ret; | 1533 | int ret; |
@@ -1328,23 +1548,95 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) | |||
1328 | return ret; | 1548 | return ret; |
1329 | } | 1549 | } |
1330 | 1550 | ||
1551 | static inline int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, | ||
1552 | unsigned long ip, unsigned long sym_flags) | ||
1553 | { | ||
1554 | struct file *file = NULL; | ||
1555 | unsigned long vmstart = 0; | ||
1556 | int ret = 1; | ||
1557 | |||
1558 | if (mm) { | ||
1559 | const struct vm_area_struct *vma; | ||
1560 | |||
1561 | down_read(&mm->mmap_sem); | ||
1562 | vma = find_vma(mm, ip); | ||
1563 | if (vma) { | ||
1564 | file = vma->vm_file; | ||
1565 | vmstart = vma->vm_start; | ||
1566 | } | ||
1567 | if (file) { | ||
1568 | ret = trace_seq_path(s, &file->f_path); | ||
1569 | if (ret) | ||
1570 | ret = trace_seq_printf(s, "[+0x%lx]", ip - vmstart); | ||
1571 | } | ||
1572 | up_read(&mm->mmap_sem); | ||
1573 | } | ||
1574 | if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file)) | ||
1575 | ret = trace_seq_printf(s, " <" IP_FMT ">", ip); | ||
1576 | return ret; | ||
1577 | } | ||
1578 | |||
1579 | static int | ||
1580 | seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s, | ||
1581 | unsigned long sym_flags) | ||
1582 | { | ||
1583 | struct mm_struct *mm = NULL; | ||
1584 | int ret = 1; | ||
1585 | unsigned int i; | ||
1586 | |||
1587 | if (trace_flags & TRACE_ITER_SYM_USEROBJ) { | ||
1588 | struct task_struct *task; | ||
1589 | /* | ||
1590 | * we do the lookup on the thread group leader, | ||
1591 | * since individual threads might have already quit! | ||
1592 | */ | ||
1593 | rcu_read_lock(); | ||
1594 | task = find_task_by_vpid(entry->ent.tgid); | ||
1595 | if (task) | ||
1596 | mm = get_task_mm(task); | ||
1597 | rcu_read_unlock(); | ||
1598 | } | ||
1599 | |||
1600 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { | ||
1601 | unsigned long ip = entry->caller[i]; | ||
1602 | |||
1603 | if (ip == ULONG_MAX || !ret) | ||
1604 | break; | ||
1605 | if (i && ret) | ||
1606 | ret = trace_seq_puts(s, " <- "); | ||
1607 | if (!ip) { | ||
1608 | if (ret) | ||
1609 | ret = trace_seq_puts(s, "??"); | ||
1610 | continue; | ||
1611 | } | ||
1612 | if (!ret) | ||
1613 | break; | ||
1614 | if (ret) | ||
1615 | ret = seq_print_user_ip(s, mm, ip, sym_flags); | ||
1616 | } | ||
1617 | |||
1618 | if (mm) | ||
1619 | mmput(mm); | ||
1620 | return ret; | ||
1621 | } | ||
1622 | |||
1331 | static void print_lat_help_header(struct seq_file *m) | 1623 | static void print_lat_help_header(struct seq_file *m) |
1332 | { | 1624 | { |
1333 | seq_puts(m, "# _------=> CPU# \n"); | 1625 | seq_puts(m, "# _------=> CPU# \n"); |
1334 | seq_puts(m, "# / _-----=> irqs-off \n"); | 1626 | seq_puts(m, "# / _-----=> irqs-off \n"); |
1335 | seq_puts(m, "# | / _----=> need-resched \n"); | 1627 | seq_puts(m, "# | / _----=> need-resched \n"); |
1336 | seq_puts(m, "# || / _---=> hardirq/softirq \n"); | 1628 | seq_puts(m, "# || / _---=> hardirq/softirq \n"); |
1337 | seq_puts(m, "# ||| / _--=> preempt-depth \n"); | 1629 | seq_puts(m, "# ||| / _--=> preempt-depth \n"); |
1338 | seq_puts(m, "# |||| / \n"); | 1630 | seq_puts(m, "# |||| / \n"); |
1339 | seq_puts(m, "# ||||| delay \n"); | 1631 | seq_puts(m, "# ||||| delay \n"); |
1340 | seq_puts(m, "# cmd pid ||||| time | caller \n"); | 1632 | seq_puts(m, "# cmd pid ||||| time | caller \n"); |
1341 | seq_puts(m, "# \\ / ||||| \\ | / \n"); | 1633 | seq_puts(m, "# \\ / ||||| \\ | / \n"); |
1342 | } | 1634 | } |
1343 | 1635 | ||
1344 | static void print_func_help_header(struct seq_file *m) | 1636 | static void print_func_help_header(struct seq_file *m) |
1345 | { | 1637 | { |
1346 | seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); | 1638 | seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); |
1347 | seq_puts(m, "# | | | | |\n"); | 1639 | seq_puts(m, "# | | | | |\n"); |
1348 | } | 1640 | } |
1349 | 1641 | ||
1350 | 1642 | ||
@@ -1355,23 +1647,16 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter) | |||
1355 | struct trace_array *tr = iter->tr; | 1647 | struct trace_array *tr = iter->tr; |
1356 | struct trace_array_cpu *data = tr->data[tr->cpu]; | 1648 | struct trace_array_cpu *data = tr->data[tr->cpu]; |
1357 | struct tracer *type = current_trace; | 1649 | struct tracer *type = current_trace; |
1358 | unsigned long total = 0; | 1650 | unsigned long total; |
1359 | unsigned long entries = 0; | 1651 | unsigned long entries; |
1360 | int cpu; | ||
1361 | const char *name = "preemption"; | 1652 | const char *name = "preemption"; |
1362 | 1653 | ||
1363 | if (type) | 1654 | if (type) |
1364 | name = type->name; | 1655 | name = type->name; |
1365 | 1656 | ||
1366 | for_each_tracing_cpu(cpu) { | 1657 | entries = ring_buffer_entries(iter->tr->buffer); |
1367 | if (head_page(tr->data[cpu])) { | 1658 | total = entries + |
1368 | total += tr->data[cpu]->trace_idx; | 1659 | ring_buffer_overruns(iter->tr->buffer); |
1369 | if (tr->data[cpu]->trace_idx > tr->entries) | ||
1370 | entries += tr->entries; | ||
1371 | else | ||
1372 | entries += tr->data[cpu]->trace_idx; | ||
1373 | } | ||
1374 | } | ||
1375 | 1660 | ||
1376 | seq_printf(m, "%s latency trace v1.1.5 on %s\n", | 1661 | seq_printf(m, "%s latency trace v1.1.5 on %s\n", |
1377 | name, UTS_RELEASE); | 1662 | name, UTS_RELEASE); |
@@ -1428,9 +1713,10 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) | |||
1428 | comm = trace_find_cmdline(entry->pid); | 1713 | comm = trace_find_cmdline(entry->pid); |
1429 | 1714 | ||
1430 | trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid); | 1715 | trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid); |
1431 | trace_seq_printf(s, "%d", cpu); | 1716 | trace_seq_printf(s, "%3d", cpu); |
1432 | trace_seq_printf(s, "%c%c", | 1717 | trace_seq_printf(s, "%c%c", |
1433 | (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.', | 1718 | (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : |
1719 | (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : '.', | ||
1434 | ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.')); | 1720 | ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.')); |
1435 | 1721 | ||
1436 | hardirq = entry->flags & TRACE_FLAG_HARDIRQ; | 1722 | hardirq = entry->flags & TRACE_FLAG_HARDIRQ; |
@@ -1457,7 +1743,7 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) | |||
1457 | unsigned long preempt_mark_thresh = 100; | 1743 | unsigned long preempt_mark_thresh = 100; |
1458 | 1744 | ||
1459 | static void | 1745 | static void |
1460 | lat_print_timestamp(struct trace_seq *s, unsigned long long abs_usecs, | 1746 | lat_print_timestamp(struct trace_seq *s, u64 abs_usecs, |
1461 | unsigned long rel_usecs) | 1747 | unsigned long rel_usecs) |
1462 | { | 1748 | { |
1463 | trace_seq_printf(s, " %4lldus", abs_usecs); | 1749 | trace_seq_printf(s, " %4lldus", abs_usecs); |
@@ -1471,34 +1757,101 @@ lat_print_timestamp(struct trace_seq *s, unsigned long long abs_usecs, | |||
1471 | 1757 | ||
1472 | static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; | 1758 | static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; |
1473 | 1759 | ||
1474 | static int | 1760 | static int task_state_char(unsigned long state) |
1761 | { | ||
1762 | int bit = state ? __ffs(state) + 1 : 0; | ||
1763 | |||
1764 | return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?'; | ||
1765 | } | ||
1766 | |||
1767 | /* | ||
1768 | * The message is supposed to contain an ending newline. | ||
1769 | * If the printing stops prematurely, try to add a newline of our own. | ||
1770 | */ | ||
1771 | void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter) | ||
1772 | { | ||
1773 | struct trace_entry *ent; | ||
1774 | struct trace_field_cont *cont; | ||
1775 | bool ok = true; | ||
1776 | |||
1777 | ent = peek_next_entry(iter, iter->cpu, NULL); | ||
1778 | if (!ent || ent->type != TRACE_CONT) { | ||
1779 | trace_seq_putc(s, '\n'); | ||
1780 | return; | ||
1781 | } | ||
1782 | |||
1783 | do { | ||
1784 | cont = (struct trace_field_cont *)ent; | ||
1785 | if (ok) | ||
1786 | ok = (trace_seq_printf(s, "%s", cont->buf) > 0); | ||
1787 | |||
1788 | ftrace_disable_cpu(); | ||
1789 | |||
1790 | if (iter->buffer_iter[iter->cpu]) | ||
1791 | ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); | ||
1792 | else | ||
1793 | ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL); | ||
1794 | |||
1795 | ftrace_enable_cpu(); | ||
1796 | |||
1797 | ent = peek_next_entry(iter, iter->cpu, NULL); | ||
1798 | } while (ent && ent->type == TRACE_CONT); | ||
1799 | |||
1800 | if (!ok) | ||
1801 | trace_seq_putc(s, '\n'); | ||
1802 | } | ||
1803 | |||
1804 | static void test_cpu_buff_start(struct trace_iterator *iter) | ||
1805 | { | ||
1806 | struct trace_seq *s = &iter->seq; | ||
1807 | |||
1808 | if (!(trace_flags & TRACE_ITER_ANNOTATE)) | ||
1809 | return; | ||
1810 | |||
1811 | if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) | ||
1812 | return; | ||
1813 | |||
1814 | if (cpu_isset(iter->cpu, iter->started)) | ||
1815 | return; | ||
1816 | |||
1817 | cpu_set(iter->cpu, iter->started); | ||
1818 | trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); | ||
1819 | } | ||
1820 | |||
1821 | static enum print_line_t | ||
1475 | print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) | 1822 | print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) |
1476 | { | 1823 | { |
1477 | struct trace_seq *s = &iter->seq; | 1824 | struct trace_seq *s = &iter->seq; |
1478 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | 1825 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); |
1479 | struct trace_entry *next_entry = find_next_entry(iter, NULL); | 1826 | struct trace_entry *next_entry; |
1480 | unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE); | 1827 | unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE); |
1481 | struct trace_entry *entry = iter->ent; | 1828 | struct trace_entry *entry = iter->ent; |
1482 | unsigned long abs_usecs; | 1829 | unsigned long abs_usecs; |
1483 | unsigned long rel_usecs; | 1830 | unsigned long rel_usecs; |
1831 | u64 next_ts; | ||
1484 | char *comm; | 1832 | char *comm; |
1485 | int S, T; | 1833 | int S, T; |
1486 | int i; | 1834 | int i; |
1487 | unsigned state; | ||
1488 | 1835 | ||
1836 | if (entry->type == TRACE_CONT) | ||
1837 | return TRACE_TYPE_HANDLED; | ||
1838 | |||
1839 | test_cpu_buff_start(iter); | ||
1840 | |||
1841 | next_entry = find_next_entry(iter, NULL, &next_ts); | ||
1489 | if (!next_entry) | 1842 | if (!next_entry) |
1490 | next_entry = entry; | 1843 | next_ts = iter->ts; |
1491 | rel_usecs = ns2usecs(next_entry->t - entry->t); | 1844 | rel_usecs = ns2usecs(next_ts - iter->ts); |
1492 | abs_usecs = ns2usecs(entry->t - iter->tr->time_start); | 1845 | abs_usecs = ns2usecs(iter->ts - iter->tr->time_start); |
1493 | 1846 | ||
1494 | if (verbose) { | 1847 | if (verbose) { |
1495 | comm = trace_find_cmdline(entry->pid); | 1848 | comm = trace_find_cmdline(entry->pid); |
1496 | trace_seq_printf(s, "%16s %5d %d %d %08x %08x [%08lx]" | 1849 | trace_seq_printf(s, "%16s %5d %3d %d %08x %08x [%08lx]" |
1497 | " %ld.%03ldms (+%ld.%03ldms): ", | 1850 | " %ld.%03ldms (+%ld.%03ldms): ", |
1498 | comm, | 1851 | comm, |
1499 | entry->pid, cpu, entry->flags, | 1852 | entry->pid, cpu, entry->flags, |
1500 | entry->preempt_count, trace_idx, | 1853 | entry->preempt_count, trace_idx, |
1501 | ns2usecs(entry->t), | 1854 | ns2usecs(iter->ts), |
1502 | abs_usecs/1000, | 1855 | abs_usecs/1000, |
1503 | abs_usecs % 1000, rel_usecs/1000, | 1856 | abs_usecs % 1000, rel_usecs/1000, |
1504 | rel_usecs % 1000); | 1857 | rel_usecs % 1000); |
@@ -1507,52 +1860,99 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) | |||
1507 | lat_print_timestamp(s, abs_usecs, rel_usecs); | 1860 | lat_print_timestamp(s, abs_usecs, rel_usecs); |
1508 | } | 1861 | } |
1509 | switch (entry->type) { | 1862 | switch (entry->type) { |
1510 | case TRACE_FN: | 1863 | case TRACE_FN: { |
1511 | seq_print_ip_sym(s, entry->fn.ip, sym_flags); | 1864 | struct ftrace_entry *field; |
1865 | |||
1866 | trace_assign_type(field, entry); | ||
1867 | |||
1868 | seq_print_ip_sym(s, field->ip, sym_flags); | ||
1512 | trace_seq_puts(s, " ("); | 1869 | trace_seq_puts(s, " ("); |
1513 | if (kretprobed(entry->fn.parent_ip)) | 1870 | seq_print_ip_sym(s, field->parent_ip, sym_flags); |
1514 | trace_seq_puts(s, KRETPROBE_MSG); | ||
1515 | else | ||
1516 | seq_print_ip_sym(s, entry->fn.parent_ip, sym_flags); | ||
1517 | trace_seq_puts(s, ")\n"); | 1871 | trace_seq_puts(s, ")\n"); |
1518 | break; | 1872 | break; |
1873 | } | ||
1519 | case TRACE_CTX: | 1874 | case TRACE_CTX: |
1520 | case TRACE_WAKE: | 1875 | case TRACE_WAKE: { |
1521 | T = entry->ctx.next_state < sizeof(state_to_char) ? | 1876 | struct ctx_switch_entry *field; |
1522 | state_to_char[entry->ctx.next_state] : 'X'; | 1877 | |
1523 | 1878 | trace_assign_type(field, entry); | |
1524 | state = entry->ctx.prev_state ? __ffs(entry->ctx.prev_state) + 1 : 0; | 1879 | |
1525 | S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X'; | 1880 | T = task_state_char(field->next_state); |
1526 | comm = trace_find_cmdline(entry->ctx.next_pid); | 1881 | S = task_state_char(field->prev_state); |
1527 | trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c %s\n", | 1882 | comm = trace_find_cmdline(field->next_pid); |
1528 | entry->ctx.prev_pid, | 1883 | trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", |
1529 | entry->ctx.prev_prio, | 1884 | field->prev_pid, |
1885 | field->prev_prio, | ||
1530 | S, entry->type == TRACE_CTX ? "==>" : " +", | 1886 | S, entry->type == TRACE_CTX ? "==>" : " +", |
1531 | entry->ctx.next_pid, | 1887 | field->next_cpu, |
1532 | entry->ctx.next_prio, | 1888 | field->next_pid, |
1889 | field->next_prio, | ||
1533 | T, comm); | 1890 | T, comm); |
1534 | break; | 1891 | break; |
1535 | case TRACE_SPECIAL: | 1892 | } |
1893 | case TRACE_SPECIAL: { | ||
1894 | struct special_entry *field; | ||
1895 | |||
1896 | trace_assign_type(field, entry); | ||
1897 | |||
1536 | trace_seq_printf(s, "# %ld %ld %ld\n", | 1898 | trace_seq_printf(s, "# %ld %ld %ld\n", |
1537 | entry->special.arg1, | 1899 | field->arg1, |
1538 | entry->special.arg2, | 1900 | field->arg2, |
1539 | entry->special.arg3); | 1901 | field->arg3); |
1540 | break; | 1902 | break; |
1541 | case TRACE_STACK: | 1903 | } |
1904 | case TRACE_STACK: { | ||
1905 | struct stack_entry *field; | ||
1906 | |||
1907 | trace_assign_type(field, entry); | ||
1908 | |||
1542 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { | 1909 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { |
1543 | if (i) | 1910 | if (i) |
1544 | trace_seq_puts(s, " <= "); | 1911 | trace_seq_puts(s, " <= "); |
1545 | seq_print_ip_sym(s, entry->stack.caller[i], sym_flags); | 1912 | seq_print_ip_sym(s, field->caller[i], sym_flags); |
1546 | } | 1913 | } |
1547 | trace_seq_puts(s, "\n"); | 1914 | trace_seq_puts(s, "\n"); |
1548 | break; | 1915 | break; |
1916 | } | ||
1917 | case TRACE_PRINT: { | ||
1918 | struct print_entry *field; | ||
1919 | |||
1920 | trace_assign_type(field, entry); | ||
1921 | |||
1922 | seq_print_ip_sym(s, field->ip, sym_flags); | ||
1923 | trace_seq_printf(s, ": %s", field->buf); | ||
1924 | if (entry->flags & TRACE_FLAG_CONT) | ||
1925 | trace_seq_print_cont(s, iter); | ||
1926 | break; | ||
1927 | } | ||
1928 | case TRACE_BRANCH: { | ||
1929 | struct trace_branch *field; | ||
1930 | |||
1931 | trace_assign_type(field, entry); | ||
1932 | |||
1933 | trace_seq_printf(s, "[%s] %s:%s:%d\n", | ||
1934 | field->correct ? " ok " : " MISS ", | ||
1935 | field->func, | ||
1936 | field->file, | ||
1937 | field->line); | ||
1938 | break; | ||
1939 | } | ||
1940 | case TRACE_USER_STACK: { | ||
1941 | struct userstack_entry *field; | ||
1942 | |||
1943 | trace_assign_type(field, entry); | ||
1944 | |||
1945 | seq_print_userip_objs(field, s, sym_flags); | ||
1946 | trace_seq_putc(s, '\n'); | ||
1947 | break; | ||
1948 | } | ||
1549 | default: | 1949 | default: |
1550 | trace_seq_printf(s, "Unknown type %d\n", entry->type); | 1950 | trace_seq_printf(s, "Unknown type %d\n", entry->type); |
1551 | } | 1951 | } |
1552 | return 1; | 1952 | return TRACE_TYPE_HANDLED; |
1553 | } | 1953 | } |
1554 | 1954 | ||
1555 | static int print_trace_fmt(struct trace_iterator *iter) | 1955 | static enum print_line_t print_trace_fmt(struct trace_iterator *iter) |
1556 | { | 1956 | { |
1557 | struct trace_seq *s = &iter->seq; | 1957 | struct trace_seq *s = &iter->seq; |
1558 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | 1958 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); |
@@ -1567,90 +1967,154 @@ static int print_trace_fmt(struct trace_iterator *iter) | |||
1567 | 1967 | ||
1568 | entry = iter->ent; | 1968 | entry = iter->ent; |
1569 | 1969 | ||
1970 | if (entry->type == TRACE_CONT) | ||
1971 | return TRACE_TYPE_HANDLED; | ||
1972 | |||
1973 | test_cpu_buff_start(iter); | ||
1974 | |||
1570 | comm = trace_find_cmdline(iter->ent->pid); | 1975 | comm = trace_find_cmdline(iter->ent->pid); |
1571 | 1976 | ||
1572 | t = ns2usecs(entry->t); | 1977 | t = ns2usecs(iter->ts); |
1573 | usec_rem = do_div(t, 1000000ULL); | 1978 | usec_rem = do_div(t, 1000000ULL); |
1574 | secs = (unsigned long)t; | 1979 | secs = (unsigned long)t; |
1575 | 1980 | ||
1576 | ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid); | 1981 | ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid); |
1577 | if (!ret) | 1982 | if (!ret) |
1578 | return 0; | 1983 | return TRACE_TYPE_PARTIAL_LINE; |
1579 | ret = trace_seq_printf(s, "[%02d] ", iter->cpu); | 1984 | ret = trace_seq_printf(s, "[%03d] ", iter->cpu); |
1580 | if (!ret) | 1985 | if (!ret) |
1581 | return 0; | 1986 | return TRACE_TYPE_PARTIAL_LINE; |
1582 | ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem); | 1987 | ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem); |
1583 | if (!ret) | 1988 | if (!ret) |
1584 | return 0; | 1989 | return TRACE_TYPE_PARTIAL_LINE; |
1585 | 1990 | ||
1586 | switch (entry->type) { | 1991 | switch (entry->type) { |
1587 | case TRACE_FN: | 1992 | case TRACE_FN: { |
1588 | ret = seq_print_ip_sym(s, entry->fn.ip, sym_flags); | 1993 | struct ftrace_entry *field; |
1994 | |||
1995 | trace_assign_type(field, entry); | ||
1996 | |||
1997 | ret = seq_print_ip_sym(s, field->ip, sym_flags); | ||
1589 | if (!ret) | 1998 | if (!ret) |
1590 | return 0; | 1999 | return TRACE_TYPE_PARTIAL_LINE; |
1591 | if ((sym_flags & TRACE_ITER_PRINT_PARENT) && | 2000 | if ((sym_flags & TRACE_ITER_PRINT_PARENT) && |
1592 | entry->fn.parent_ip) { | 2001 | field->parent_ip) { |
1593 | ret = trace_seq_printf(s, " <-"); | 2002 | ret = trace_seq_printf(s, " <-"); |
1594 | if (!ret) | 2003 | if (!ret) |
1595 | return 0; | 2004 | return TRACE_TYPE_PARTIAL_LINE; |
1596 | if (kretprobed(entry->fn.parent_ip)) | 2005 | ret = seq_print_ip_sym(s, |
1597 | ret = trace_seq_puts(s, KRETPROBE_MSG); | 2006 | field->parent_ip, |
1598 | else | 2007 | sym_flags); |
1599 | ret = seq_print_ip_sym(s, entry->fn.parent_ip, | ||
1600 | sym_flags); | ||
1601 | if (!ret) | 2008 | if (!ret) |
1602 | return 0; | 2009 | return TRACE_TYPE_PARTIAL_LINE; |
1603 | } | 2010 | } |
1604 | ret = trace_seq_printf(s, "\n"); | 2011 | ret = trace_seq_printf(s, "\n"); |
1605 | if (!ret) | 2012 | if (!ret) |
1606 | return 0; | 2013 | return TRACE_TYPE_PARTIAL_LINE; |
1607 | break; | 2014 | break; |
2015 | } | ||
1608 | case TRACE_CTX: | 2016 | case TRACE_CTX: |
1609 | case TRACE_WAKE: | 2017 | case TRACE_WAKE: { |
1610 | S = entry->ctx.prev_state < sizeof(state_to_char) ? | 2018 | struct ctx_switch_entry *field; |
1611 | state_to_char[entry->ctx.prev_state] : 'X'; | 2019 | |
1612 | T = entry->ctx.next_state < sizeof(state_to_char) ? | 2020 | trace_assign_type(field, entry); |
1613 | state_to_char[entry->ctx.next_state] : 'X'; | 2021 | |
1614 | ret = trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c\n", | 2022 | T = task_state_char(field->next_state); |
1615 | entry->ctx.prev_pid, | 2023 | S = task_state_char(field->prev_state); |
1616 | entry->ctx.prev_prio, | 2024 | ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n", |
2025 | field->prev_pid, | ||
2026 | field->prev_prio, | ||
1617 | S, | 2027 | S, |
1618 | entry->type == TRACE_CTX ? "==>" : " +", | 2028 | entry->type == TRACE_CTX ? "==>" : " +", |
1619 | entry->ctx.next_pid, | 2029 | field->next_cpu, |
1620 | entry->ctx.next_prio, | 2030 | field->next_pid, |
2031 | field->next_prio, | ||
1621 | T); | 2032 | T); |
1622 | if (!ret) | 2033 | if (!ret) |
1623 | return 0; | 2034 | return TRACE_TYPE_PARTIAL_LINE; |
1624 | break; | 2035 | break; |
1625 | case TRACE_SPECIAL: | 2036 | } |
2037 | case TRACE_SPECIAL: { | ||
2038 | struct special_entry *field; | ||
2039 | |||
2040 | trace_assign_type(field, entry); | ||
2041 | |||
1626 | ret = trace_seq_printf(s, "# %ld %ld %ld\n", | 2042 | ret = trace_seq_printf(s, "# %ld %ld %ld\n", |
1627 | entry->special.arg1, | 2043 | field->arg1, |
1628 | entry->special.arg2, | 2044 | field->arg2, |
1629 | entry->special.arg3); | 2045 | field->arg3); |
1630 | if (!ret) | 2046 | if (!ret) |
1631 | return 0; | 2047 | return TRACE_TYPE_PARTIAL_LINE; |
1632 | break; | 2048 | break; |
1633 | case TRACE_STACK: | 2049 | } |
2050 | case TRACE_STACK: { | ||
2051 | struct stack_entry *field; | ||
2052 | |||
2053 | trace_assign_type(field, entry); | ||
2054 | |||
1634 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { | 2055 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { |
1635 | if (i) { | 2056 | if (i) { |
1636 | ret = trace_seq_puts(s, " <= "); | 2057 | ret = trace_seq_puts(s, " <= "); |
1637 | if (!ret) | 2058 | if (!ret) |
1638 | return 0; | 2059 | return TRACE_TYPE_PARTIAL_LINE; |
1639 | } | 2060 | } |
1640 | ret = seq_print_ip_sym(s, entry->stack.caller[i], | 2061 | ret = seq_print_ip_sym(s, field->caller[i], |
1641 | sym_flags); | 2062 | sym_flags); |
1642 | if (!ret) | 2063 | if (!ret) |
1643 | return 0; | 2064 | return TRACE_TYPE_PARTIAL_LINE; |
1644 | } | 2065 | } |
1645 | ret = trace_seq_puts(s, "\n"); | 2066 | ret = trace_seq_puts(s, "\n"); |
1646 | if (!ret) | 2067 | if (!ret) |
1647 | return 0; | 2068 | return TRACE_TYPE_PARTIAL_LINE; |
1648 | break; | 2069 | break; |
1649 | } | 2070 | } |
1650 | return 1; | 2071 | case TRACE_PRINT: { |
2072 | struct print_entry *field; | ||
2073 | |||
2074 | trace_assign_type(field, entry); | ||
2075 | |||
2076 | seq_print_ip_sym(s, field->ip, sym_flags); | ||
2077 | trace_seq_printf(s, ": %s", field->buf); | ||
2078 | if (entry->flags & TRACE_FLAG_CONT) | ||
2079 | trace_seq_print_cont(s, iter); | ||
2080 | break; | ||
2081 | } | ||
2082 | case TRACE_GRAPH_RET: { | ||
2083 | return print_graph_function(iter); | ||
2084 | } | ||
2085 | case TRACE_GRAPH_ENT: { | ||
2086 | return print_graph_function(iter); | ||
2087 | } | ||
2088 | case TRACE_BRANCH: { | ||
2089 | struct trace_branch *field; | ||
2090 | |||
2091 | trace_assign_type(field, entry); | ||
2092 | |||
2093 | trace_seq_printf(s, "[%s] %s:%s:%d\n", | ||
2094 | field->correct ? " ok " : " MISS ", | ||
2095 | field->func, | ||
2096 | field->file, | ||
2097 | field->line); | ||
2098 | break; | ||
2099 | } | ||
2100 | case TRACE_USER_STACK: { | ||
2101 | struct userstack_entry *field; | ||
2102 | |||
2103 | trace_assign_type(field, entry); | ||
2104 | |||
2105 | ret = seq_print_userip_objs(field, s, sym_flags); | ||
2106 | if (!ret) | ||
2107 | return TRACE_TYPE_PARTIAL_LINE; | ||
2108 | ret = trace_seq_putc(s, '\n'); | ||
2109 | if (!ret) | ||
2110 | return TRACE_TYPE_PARTIAL_LINE; | ||
2111 | break; | ||
2112 | } | ||
2113 | } | ||
2114 | return TRACE_TYPE_HANDLED; | ||
1651 | } | 2115 | } |
1652 | 2116 | ||
1653 | static int print_raw_fmt(struct trace_iterator *iter) | 2117 | static enum print_line_t print_raw_fmt(struct trace_iterator *iter) |
1654 | { | 2118 | { |
1655 | struct trace_seq *s = &iter->seq; | 2119 | struct trace_seq *s = &iter->seq; |
1656 | struct trace_entry *entry; | 2120 | struct trace_entry *entry; |
@@ -1659,47 +2123,75 @@ static int print_raw_fmt(struct trace_iterator *iter) | |||
1659 | 2123 | ||
1660 | entry = iter->ent; | 2124 | entry = iter->ent; |
1661 | 2125 | ||
2126 | if (entry->type == TRACE_CONT) | ||
2127 | return TRACE_TYPE_HANDLED; | ||
2128 | |||
1662 | ret = trace_seq_printf(s, "%d %d %llu ", | 2129 | ret = trace_seq_printf(s, "%d %d %llu ", |
1663 | entry->pid, iter->cpu, entry->t); | 2130 | entry->pid, iter->cpu, iter->ts); |
1664 | if (!ret) | 2131 | if (!ret) |
1665 | return 0; | 2132 | return TRACE_TYPE_PARTIAL_LINE; |
1666 | 2133 | ||
1667 | switch (entry->type) { | 2134 | switch (entry->type) { |
1668 | case TRACE_FN: | 2135 | case TRACE_FN: { |
2136 | struct ftrace_entry *field; | ||
2137 | |||
2138 | trace_assign_type(field, entry); | ||
2139 | |||
1669 | ret = trace_seq_printf(s, "%x %x\n", | 2140 | ret = trace_seq_printf(s, "%x %x\n", |
1670 | entry->fn.ip, entry->fn.parent_ip); | 2141 | field->ip, |
2142 | field->parent_ip); | ||
1671 | if (!ret) | 2143 | if (!ret) |
1672 | return 0; | 2144 | return TRACE_TYPE_PARTIAL_LINE; |
1673 | break; | 2145 | break; |
2146 | } | ||
1674 | case TRACE_CTX: | 2147 | case TRACE_CTX: |
1675 | case TRACE_WAKE: | 2148 | case TRACE_WAKE: { |
1676 | S = entry->ctx.prev_state < sizeof(state_to_char) ? | 2149 | struct ctx_switch_entry *field; |
1677 | state_to_char[entry->ctx.prev_state] : 'X'; | 2150 | |
1678 | T = entry->ctx.next_state < sizeof(state_to_char) ? | 2151 | trace_assign_type(field, entry); |
1679 | state_to_char[entry->ctx.next_state] : 'X'; | 2152 | |
1680 | if (entry->type == TRACE_WAKE) | 2153 | T = task_state_char(field->next_state); |
1681 | S = '+'; | 2154 | S = entry->type == TRACE_WAKE ? '+' : |
1682 | ret = trace_seq_printf(s, "%d %d %c %d %d %c\n", | 2155 | task_state_char(field->prev_state); |
1683 | entry->ctx.prev_pid, | 2156 | ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n", |
1684 | entry->ctx.prev_prio, | 2157 | field->prev_pid, |
2158 | field->prev_prio, | ||
1685 | S, | 2159 | S, |
1686 | entry->ctx.next_pid, | 2160 | field->next_cpu, |
1687 | entry->ctx.next_prio, | 2161 | field->next_pid, |
2162 | field->next_prio, | ||
1688 | T); | 2163 | T); |
1689 | if (!ret) | 2164 | if (!ret) |
1690 | return 0; | 2165 | return TRACE_TYPE_PARTIAL_LINE; |
1691 | break; | 2166 | break; |
2167 | } | ||
1692 | case TRACE_SPECIAL: | 2168 | case TRACE_SPECIAL: |
1693 | case TRACE_STACK: | 2169 | case TRACE_USER_STACK: |
2170 | case TRACE_STACK: { | ||
2171 | struct special_entry *field; | ||
2172 | |||
2173 | trace_assign_type(field, entry); | ||
2174 | |||
1694 | ret = trace_seq_printf(s, "# %ld %ld %ld\n", | 2175 | ret = trace_seq_printf(s, "# %ld %ld %ld\n", |
1695 | entry->special.arg1, | 2176 | field->arg1, |
1696 | entry->special.arg2, | 2177 | field->arg2, |
1697 | entry->special.arg3); | 2178 | field->arg3); |
1698 | if (!ret) | 2179 | if (!ret) |
1699 | return 0; | 2180 | return TRACE_TYPE_PARTIAL_LINE; |
1700 | break; | 2181 | break; |
1701 | } | 2182 | } |
1702 | return 1; | 2183 | case TRACE_PRINT: { |
2184 | struct print_entry *field; | ||
2185 | |||
2186 | trace_assign_type(field, entry); | ||
2187 | |||
2188 | trace_seq_printf(s, "# %lx %s", field->ip, field->buf); | ||
2189 | if (entry->flags & TRACE_FLAG_CONT) | ||
2190 | trace_seq_print_cont(s, iter); | ||
2191 | break; | ||
2192 | } | ||
2193 | } | ||
2194 | return TRACE_TYPE_HANDLED; | ||
1703 | } | 2195 | } |
1704 | 2196 | ||
1705 | #define SEQ_PUT_FIELD_RET(s, x) \ | 2197 | #define SEQ_PUT_FIELD_RET(s, x) \ |
@@ -1710,11 +2202,12 @@ do { \ | |||
1710 | 2202 | ||
1711 | #define SEQ_PUT_HEX_FIELD_RET(s, x) \ | 2203 | #define SEQ_PUT_HEX_FIELD_RET(s, x) \ |
1712 | do { \ | 2204 | do { \ |
2205 | BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES); \ | ||
1713 | if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \ | 2206 | if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \ |
1714 | return 0; \ | 2207 | return 0; \ |
1715 | } while (0) | 2208 | } while (0) |
1716 | 2209 | ||
1717 | static int print_hex_fmt(struct trace_iterator *iter) | 2210 | static enum print_line_t print_hex_fmt(struct trace_iterator *iter) |
1718 | { | 2211 | { |
1719 | struct trace_seq *s = &iter->seq; | 2212 | struct trace_seq *s = &iter->seq; |
1720 | unsigned char newline = '\n'; | 2213 | unsigned char newline = '\n'; |
@@ -1723,97 +2216,162 @@ static int print_hex_fmt(struct trace_iterator *iter) | |||
1723 | 2216 | ||
1724 | entry = iter->ent; | 2217 | entry = iter->ent; |
1725 | 2218 | ||
2219 | if (entry->type == TRACE_CONT) | ||
2220 | return TRACE_TYPE_HANDLED; | ||
2221 | |||
1726 | SEQ_PUT_HEX_FIELD_RET(s, entry->pid); | 2222 | SEQ_PUT_HEX_FIELD_RET(s, entry->pid); |
1727 | SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); | 2223 | SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); |
1728 | SEQ_PUT_HEX_FIELD_RET(s, entry->t); | 2224 | SEQ_PUT_HEX_FIELD_RET(s, iter->ts); |
1729 | 2225 | ||
1730 | switch (entry->type) { | 2226 | switch (entry->type) { |
1731 | case TRACE_FN: | 2227 | case TRACE_FN: { |
1732 | SEQ_PUT_HEX_FIELD_RET(s, entry->fn.ip); | 2228 | struct ftrace_entry *field; |
1733 | SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip); | 2229 | |
2230 | trace_assign_type(field, entry); | ||
2231 | |||
2232 | SEQ_PUT_HEX_FIELD_RET(s, field->ip); | ||
2233 | SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip); | ||
1734 | break; | 2234 | break; |
2235 | } | ||
1735 | case TRACE_CTX: | 2236 | case TRACE_CTX: |
1736 | case TRACE_WAKE: | 2237 | case TRACE_WAKE: { |
1737 | S = entry->ctx.prev_state < sizeof(state_to_char) ? | 2238 | struct ctx_switch_entry *field; |
1738 | state_to_char[entry->ctx.prev_state] : 'X'; | 2239 | |
1739 | T = entry->ctx.next_state < sizeof(state_to_char) ? | 2240 | trace_assign_type(field, entry); |
1740 | state_to_char[entry->ctx.next_state] : 'X'; | 2241 | |
1741 | if (entry->type == TRACE_WAKE) | 2242 | T = task_state_char(field->next_state); |
1742 | S = '+'; | 2243 | S = entry->type == TRACE_WAKE ? '+' : |
1743 | SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_pid); | 2244 | task_state_char(field->prev_state); |
1744 | SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_prio); | 2245 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); |
2246 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio); | ||
1745 | SEQ_PUT_HEX_FIELD_RET(s, S); | 2247 | SEQ_PUT_HEX_FIELD_RET(s, S); |
1746 | SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_pid); | 2248 | SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu); |
1747 | SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_prio); | 2249 | SEQ_PUT_HEX_FIELD_RET(s, field->next_pid); |
1748 | SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip); | 2250 | SEQ_PUT_HEX_FIELD_RET(s, field->next_prio); |
1749 | SEQ_PUT_HEX_FIELD_RET(s, T); | 2251 | SEQ_PUT_HEX_FIELD_RET(s, T); |
1750 | break; | 2252 | break; |
2253 | } | ||
1751 | case TRACE_SPECIAL: | 2254 | case TRACE_SPECIAL: |
1752 | case TRACE_STACK: | 2255 | case TRACE_USER_STACK: |
1753 | SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg1); | 2256 | case TRACE_STACK: { |
1754 | SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg2); | 2257 | struct special_entry *field; |
1755 | SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg3); | 2258 | |
2259 | trace_assign_type(field, entry); | ||
2260 | |||
2261 | SEQ_PUT_HEX_FIELD_RET(s, field->arg1); | ||
2262 | SEQ_PUT_HEX_FIELD_RET(s, field->arg2); | ||
2263 | SEQ_PUT_HEX_FIELD_RET(s, field->arg3); | ||
1756 | break; | 2264 | break; |
1757 | } | 2265 | } |
2266 | } | ||
1758 | SEQ_PUT_FIELD_RET(s, newline); | 2267 | SEQ_PUT_FIELD_RET(s, newline); |
1759 | 2268 | ||
1760 | return 1; | 2269 | return TRACE_TYPE_HANDLED; |
2270 | } | ||
2271 | |||
2272 | static enum print_line_t print_printk_msg_only(struct trace_iterator *iter) | ||
2273 | { | ||
2274 | struct trace_seq *s = &iter->seq; | ||
2275 | struct trace_entry *entry = iter->ent; | ||
2276 | struct print_entry *field; | ||
2277 | int ret; | ||
2278 | |||
2279 | trace_assign_type(field, entry); | ||
2280 | |||
2281 | ret = trace_seq_printf(s, field->buf); | ||
2282 | if (!ret) | ||
2283 | return TRACE_TYPE_PARTIAL_LINE; | ||
2284 | |||
2285 | if (entry->flags & TRACE_FLAG_CONT) | ||
2286 | trace_seq_print_cont(s, iter); | ||
2287 | |||
2288 | return TRACE_TYPE_HANDLED; | ||
1761 | } | 2289 | } |
1762 | 2290 | ||
1763 | static int print_bin_fmt(struct trace_iterator *iter) | 2291 | static enum print_line_t print_bin_fmt(struct trace_iterator *iter) |
1764 | { | 2292 | { |
1765 | struct trace_seq *s = &iter->seq; | 2293 | struct trace_seq *s = &iter->seq; |
1766 | struct trace_entry *entry; | 2294 | struct trace_entry *entry; |
1767 | 2295 | ||
1768 | entry = iter->ent; | 2296 | entry = iter->ent; |
1769 | 2297 | ||
2298 | if (entry->type == TRACE_CONT) | ||
2299 | return TRACE_TYPE_HANDLED; | ||
2300 | |||
1770 | SEQ_PUT_FIELD_RET(s, entry->pid); | 2301 | SEQ_PUT_FIELD_RET(s, entry->pid); |
1771 | SEQ_PUT_FIELD_RET(s, entry->cpu); | 2302 | SEQ_PUT_FIELD_RET(s, entry->cpu); |
1772 | SEQ_PUT_FIELD_RET(s, entry->t); | 2303 | SEQ_PUT_FIELD_RET(s, iter->ts); |
1773 | 2304 | ||
1774 | switch (entry->type) { | 2305 | switch (entry->type) { |
1775 | case TRACE_FN: | 2306 | case TRACE_FN: { |
1776 | SEQ_PUT_FIELD_RET(s, entry->fn.ip); | 2307 | struct ftrace_entry *field; |
1777 | SEQ_PUT_FIELD_RET(s, entry->fn.parent_ip); | 2308 | |
2309 | trace_assign_type(field, entry); | ||
2310 | |||
2311 | SEQ_PUT_FIELD_RET(s, field->ip); | ||
2312 | SEQ_PUT_FIELD_RET(s, field->parent_ip); | ||
1778 | break; | 2313 | break; |
1779 | case TRACE_CTX: | 2314 | } |
1780 | SEQ_PUT_FIELD_RET(s, entry->ctx.prev_pid); | 2315 | case TRACE_CTX: { |
1781 | SEQ_PUT_FIELD_RET(s, entry->ctx.prev_prio); | 2316 | struct ctx_switch_entry *field; |
1782 | SEQ_PUT_FIELD_RET(s, entry->ctx.prev_state); | 2317 | |
1783 | SEQ_PUT_FIELD_RET(s, entry->ctx.next_pid); | 2318 | trace_assign_type(field, entry); |
1784 | SEQ_PUT_FIELD_RET(s, entry->ctx.next_prio); | 2319 | |
1785 | SEQ_PUT_FIELD_RET(s, entry->ctx.next_state); | 2320 | SEQ_PUT_FIELD_RET(s, field->prev_pid); |
2321 | SEQ_PUT_FIELD_RET(s, field->prev_prio); | ||
2322 | SEQ_PUT_FIELD_RET(s, field->prev_state); | ||
2323 | SEQ_PUT_FIELD_RET(s, field->next_pid); | ||
2324 | SEQ_PUT_FIELD_RET(s, field->next_prio); | ||
2325 | SEQ_PUT_FIELD_RET(s, field->next_state); | ||
1786 | break; | 2326 | break; |
2327 | } | ||
1787 | case TRACE_SPECIAL: | 2328 | case TRACE_SPECIAL: |
1788 | case TRACE_STACK: | 2329 | case TRACE_USER_STACK: |
1789 | SEQ_PUT_FIELD_RET(s, entry->special.arg1); | 2330 | case TRACE_STACK: { |
1790 | SEQ_PUT_FIELD_RET(s, entry->special.arg2); | 2331 | struct special_entry *field; |
1791 | SEQ_PUT_FIELD_RET(s, entry->special.arg3); | 2332 | |
2333 | trace_assign_type(field, entry); | ||
2334 | |||
2335 | SEQ_PUT_FIELD_RET(s, field->arg1); | ||
2336 | SEQ_PUT_FIELD_RET(s, field->arg2); | ||
2337 | SEQ_PUT_FIELD_RET(s, field->arg3); | ||
1792 | break; | 2338 | break; |
1793 | } | 2339 | } |
2340 | } | ||
1794 | return 1; | 2341 | return 1; |
1795 | } | 2342 | } |
1796 | 2343 | ||
1797 | static int trace_empty(struct trace_iterator *iter) | 2344 | static int trace_empty(struct trace_iterator *iter) |
1798 | { | 2345 | { |
1799 | struct trace_array_cpu *data; | ||
1800 | int cpu; | 2346 | int cpu; |
1801 | 2347 | ||
1802 | for_each_tracing_cpu(cpu) { | 2348 | for_each_tracing_cpu(cpu) { |
1803 | data = iter->tr->data[cpu]; | 2349 | if (iter->buffer_iter[cpu]) { |
1804 | 2350 | if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) | |
1805 | if (head_page(data) && data->trace_idx && | 2351 | return 0; |
1806 | (data->trace_tail != data->trace_head || | 2352 | } else { |
1807 | data->trace_tail_idx != data->trace_head_idx)) | 2353 | if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) |
1808 | return 0; | 2354 | return 0; |
2355 | } | ||
1809 | } | 2356 | } |
2357 | |||
1810 | return 1; | 2358 | return 1; |
1811 | } | 2359 | } |
1812 | 2360 | ||
1813 | static int print_trace_line(struct trace_iterator *iter) | 2361 | static enum print_line_t print_trace_line(struct trace_iterator *iter) |
1814 | { | 2362 | { |
1815 | if (iter->trace && iter->trace->print_line) | 2363 | enum print_line_t ret; |
1816 | return iter->trace->print_line(iter); | 2364 | |
2365 | if (iter->trace && iter->trace->print_line) { | ||
2366 | ret = iter->trace->print_line(iter); | ||
2367 | if (ret != TRACE_TYPE_UNHANDLED) | ||
2368 | return ret; | ||
2369 | } | ||
2370 | |||
2371 | if (iter->ent->type == TRACE_PRINT && | ||
2372 | trace_flags & TRACE_ITER_PRINTK && | ||
2373 | trace_flags & TRACE_ITER_PRINTK_MSGONLY) | ||
2374 | return print_printk_msg_only(iter); | ||
1817 | 2375 | ||
1818 | if (trace_flags & TRACE_ITER_BIN) | 2376 | if (trace_flags & TRACE_ITER_BIN) |
1819 | return print_bin_fmt(iter); | 2377 | return print_bin_fmt(iter); |
@@ -1839,7 +2397,9 @@ static int s_show(struct seq_file *m, void *v) | |||
1839 | seq_printf(m, "# tracer: %s\n", iter->trace->name); | 2397 | seq_printf(m, "# tracer: %s\n", iter->trace->name); |
1840 | seq_puts(m, "#\n"); | 2398 | seq_puts(m, "#\n"); |
1841 | } | 2399 | } |
1842 | if (iter->iter_flags & TRACE_FILE_LAT_FMT) { | 2400 | if (iter->trace && iter->trace->print_header) |
2401 | iter->trace->print_header(m); | ||
2402 | else if (iter->iter_flags & TRACE_FILE_LAT_FMT) { | ||
1843 | /* print nothing if the buffers are empty */ | 2403 | /* print nothing if the buffers are empty */ |
1844 | if (trace_empty(iter)) | 2404 | if (trace_empty(iter)) |
1845 | return 0; | 2405 | return 0; |
@@ -1869,6 +2429,8 @@ static struct trace_iterator * | |||
1869 | __tracing_open(struct inode *inode, struct file *file, int *ret) | 2429 | __tracing_open(struct inode *inode, struct file *file, int *ret) |
1870 | { | 2430 | { |
1871 | struct trace_iterator *iter; | 2431 | struct trace_iterator *iter; |
2432 | struct seq_file *m; | ||
2433 | int cpu; | ||
1872 | 2434 | ||
1873 | if (tracing_disabled) { | 2435 | if (tracing_disabled) { |
1874 | *ret = -ENODEV; | 2436 | *ret = -ENODEV; |
@@ -1889,28 +2451,49 @@ __tracing_open(struct inode *inode, struct file *file, int *ret) | |||
1889 | iter->trace = current_trace; | 2451 | iter->trace = current_trace; |
1890 | iter->pos = -1; | 2452 | iter->pos = -1; |
1891 | 2453 | ||
2454 | /* Notify the tracer early; before we stop tracing. */ | ||
2455 | if (iter->trace && iter->trace->open) | ||
2456 | iter->trace->open(iter); | ||
2457 | |||
2458 | /* Annotate start of buffers if we had overruns */ | ||
2459 | if (ring_buffer_overruns(iter->tr->buffer)) | ||
2460 | iter->iter_flags |= TRACE_FILE_ANNOTATE; | ||
2461 | |||
2462 | |||
2463 | for_each_tracing_cpu(cpu) { | ||
2464 | |||
2465 | iter->buffer_iter[cpu] = | ||
2466 | ring_buffer_read_start(iter->tr->buffer, cpu); | ||
2467 | |||
2468 | if (!iter->buffer_iter[cpu]) | ||
2469 | goto fail_buffer; | ||
2470 | } | ||
2471 | |||
1892 | /* TODO stop tracer */ | 2472 | /* TODO stop tracer */ |
1893 | *ret = seq_open(file, &tracer_seq_ops); | 2473 | *ret = seq_open(file, &tracer_seq_ops); |
1894 | if (!*ret) { | 2474 | if (*ret) |
1895 | struct seq_file *m = file->private_data; | 2475 | goto fail_buffer; |
1896 | m->private = iter; | ||
1897 | 2476 | ||
1898 | /* stop the trace while dumping */ | 2477 | m = file->private_data; |
1899 | if (iter->tr->ctrl) { | 2478 | m->private = iter; |
1900 | tracer_enabled = 0; | 2479 | |
1901 | ftrace_function_enabled = 0; | 2480 | /* stop the trace while dumping */ |
1902 | } | 2481 | tracing_stop(); |
1903 | 2482 | ||
1904 | if (iter->trace && iter->trace->open) | ||
1905 | iter->trace->open(iter); | ||
1906 | } else { | ||
1907 | kfree(iter); | ||
1908 | iter = NULL; | ||
1909 | } | ||
1910 | mutex_unlock(&trace_types_lock); | 2483 | mutex_unlock(&trace_types_lock); |
1911 | 2484 | ||
1912 | out: | 2485 | out: |
1913 | return iter; | 2486 | return iter; |
2487 | |||
2488 | fail_buffer: | ||
2489 | for_each_tracing_cpu(cpu) { | ||
2490 | if (iter->buffer_iter[cpu]) | ||
2491 | ring_buffer_read_finish(iter->buffer_iter[cpu]); | ||
2492 | } | ||
2493 | mutex_unlock(&trace_types_lock); | ||
2494 | kfree(iter); | ||
2495 | |||
2496 | return ERR_PTR(-ENOMEM); | ||
1914 | } | 2497 | } |
1915 | 2498 | ||
1916 | int tracing_open_generic(struct inode *inode, struct file *filp) | 2499 | int tracing_open_generic(struct inode *inode, struct file *filp) |
@@ -1926,20 +2509,19 @@ int tracing_release(struct inode *inode, struct file *file) | |||
1926 | { | 2509 | { |
1927 | struct seq_file *m = (struct seq_file *)file->private_data; | 2510 | struct seq_file *m = (struct seq_file *)file->private_data; |
1928 | struct trace_iterator *iter = m->private; | 2511 | struct trace_iterator *iter = m->private; |
2512 | int cpu; | ||
1929 | 2513 | ||
1930 | mutex_lock(&trace_types_lock); | 2514 | mutex_lock(&trace_types_lock); |
2515 | for_each_tracing_cpu(cpu) { | ||
2516 | if (iter->buffer_iter[cpu]) | ||
2517 | ring_buffer_read_finish(iter->buffer_iter[cpu]); | ||
2518 | } | ||
2519 | |||
1931 | if (iter->trace && iter->trace->close) | 2520 | if (iter->trace && iter->trace->close) |
1932 | iter->trace->close(iter); | 2521 | iter->trace->close(iter); |
1933 | 2522 | ||
1934 | /* reenable tracing if it was previously enabled */ | 2523 | /* reenable tracing if it was previously enabled */ |
1935 | if (iter->tr->ctrl) { | 2524 | tracing_start(); |
1936 | tracer_enabled = 1; | ||
1937 | /* | ||
1938 | * It is safe to enable function tracing even if it | ||
1939 | * isn't used | ||
1940 | */ | ||
1941 | ftrace_function_enabled = 1; | ||
1942 | } | ||
1943 | mutex_unlock(&trace_types_lock); | 2525 | mutex_unlock(&trace_types_lock); |
1944 | 2526 | ||
1945 | seq_release(inode, file); | 2527 | seq_release(inode, file); |
@@ -2117,7 +2699,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2117 | if (err) | 2699 | if (err) |
2118 | goto err_unlock; | 2700 | goto err_unlock; |
2119 | 2701 | ||
2120 | raw_local_irq_disable(); | 2702 | local_irq_disable(); |
2121 | __raw_spin_lock(&ftrace_max_lock); | 2703 | __raw_spin_lock(&ftrace_max_lock); |
2122 | for_each_tracing_cpu(cpu) { | 2704 | for_each_tracing_cpu(cpu) { |
2123 | /* | 2705 | /* |
@@ -2134,7 +2716,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2134 | } | 2716 | } |
2135 | } | 2717 | } |
2136 | __raw_spin_unlock(&ftrace_max_lock); | 2718 | __raw_spin_unlock(&ftrace_max_lock); |
2137 | raw_local_irq_enable(); | 2719 | local_irq_enable(); |
2138 | 2720 | ||
2139 | tracing_cpumask = tracing_cpumask_new; | 2721 | tracing_cpumask = tracing_cpumask_new; |
2140 | 2722 | ||
@@ -2155,13 +2737,16 @@ static struct file_operations tracing_cpumask_fops = { | |||
2155 | }; | 2737 | }; |
2156 | 2738 | ||
2157 | static ssize_t | 2739 | static ssize_t |
2158 | tracing_iter_ctrl_read(struct file *filp, char __user *ubuf, | 2740 | tracing_trace_options_read(struct file *filp, char __user *ubuf, |
2159 | size_t cnt, loff_t *ppos) | 2741 | size_t cnt, loff_t *ppos) |
2160 | { | 2742 | { |
2743 | int i; | ||
2161 | char *buf; | 2744 | char *buf; |
2162 | int r = 0; | 2745 | int r = 0; |
2163 | int len = 0; | 2746 | int len = 0; |
2164 | int i; | 2747 | u32 tracer_flags = current_trace->flags->val; |
2748 | struct tracer_opt *trace_opts = current_trace->flags->opts; | ||
2749 | |||
2165 | 2750 | ||
2166 | /* calulate max size */ | 2751 | /* calulate max size */ |
2167 | for (i = 0; trace_options[i]; i++) { | 2752 | for (i = 0; trace_options[i]; i++) { |
@@ -2169,6 +2754,15 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf, | |||
2169 | len += 3; /* "no" and space */ | 2754 | len += 3; /* "no" and space */ |
2170 | } | 2755 | } |
2171 | 2756 | ||
2757 | /* | ||
2758 | * Increase the size with names of options specific | ||
2759 | * of the current tracer. | ||
2760 | */ | ||
2761 | for (i = 0; trace_opts[i].name; i++) { | ||
2762 | len += strlen(trace_opts[i].name); | ||
2763 | len += 3; /* "no" and space */ | ||
2764 | } | ||
2765 | |||
2172 | /* +2 for \n and \0 */ | 2766 | /* +2 for \n and \0 */ |
2173 | buf = kmalloc(len + 2, GFP_KERNEL); | 2767 | buf = kmalloc(len + 2, GFP_KERNEL); |
2174 | if (!buf) | 2768 | if (!buf) |
@@ -2181,6 +2775,15 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf, | |||
2181 | r += sprintf(buf + r, "no%s ", trace_options[i]); | 2775 | r += sprintf(buf + r, "no%s ", trace_options[i]); |
2182 | } | 2776 | } |
2183 | 2777 | ||
2778 | for (i = 0; trace_opts[i].name; i++) { | ||
2779 | if (tracer_flags & trace_opts[i].bit) | ||
2780 | r += sprintf(buf + r, "%s ", | ||
2781 | trace_opts[i].name); | ||
2782 | else | ||
2783 | r += sprintf(buf + r, "no%s ", | ||
2784 | trace_opts[i].name); | ||
2785 | } | ||
2786 | |||
2184 | r += sprintf(buf + r, "\n"); | 2787 | r += sprintf(buf + r, "\n"); |
2185 | WARN_ON(r >= len + 2); | 2788 | WARN_ON(r >= len + 2); |
2186 | 2789 | ||
@@ -2191,13 +2794,48 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf, | |||
2191 | return r; | 2794 | return r; |
2192 | } | 2795 | } |
2193 | 2796 | ||
2797 | /* Try to assign a tracer specific option */ | ||
2798 | static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | ||
2799 | { | ||
2800 | struct tracer_flags *trace_flags = trace->flags; | ||
2801 | struct tracer_opt *opts = NULL; | ||
2802 | int ret = 0, i = 0; | ||
2803 | int len; | ||
2804 | |||
2805 | for (i = 0; trace_flags->opts[i].name; i++) { | ||
2806 | opts = &trace_flags->opts[i]; | ||
2807 | len = strlen(opts->name); | ||
2808 | |||
2809 | if (strncmp(cmp, opts->name, len) == 0) { | ||
2810 | ret = trace->set_flag(trace_flags->val, | ||
2811 | opts->bit, !neg); | ||
2812 | break; | ||
2813 | } | ||
2814 | } | ||
2815 | /* Not found */ | ||
2816 | if (!trace_flags->opts[i].name) | ||
2817 | return -EINVAL; | ||
2818 | |||
2819 | /* Refused to handle */ | ||
2820 | if (ret) | ||
2821 | return ret; | ||
2822 | |||
2823 | if (neg) | ||
2824 | trace_flags->val &= ~opts->bit; | ||
2825 | else | ||
2826 | trace_flags->val |= opts->bit; | ||
2827 | |||
2828 | return 0; | ||
2829 | } | ||
2830 | |||
2194 | static ssize_t | 2831 | static ssize_t |
2195 | tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf, | 2832 | tracing_trace_options_write(struct file *filp, const char __user *ubuf, |
2196 | size_t cnt, loff_t *ppos) | 2833 | size_t cnt, loff_t *ppos) |
2197 | { | 2834 | { |
2198 | char buf[64]; | 2835 | char buf[64]; |
2199 | char *cmp = buf; | 2836 | char *cmp = buf; |
2200 | int neg = 0; | 2837 | int neg = 0; |
2838 | int ret; | ||
2201 | int i; | 2839 | int i; |
2202 | 2840 | ||
2203 | if (cnt >= sizeof(buf)) | 2841 | if (cnt >= sizeof(buf)) |
@@ -2224,11 +2862,13 @@ tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf, | |||
2224 | break; | 2862 | break; |
2225 | } | 2863 | } |
2226 | } | 2864 | } |
2227 | /* | 2865 | |
2228 | * If no option could be set, return an error: | 2866 | /* If no option could be set, test the specific tracer options */ |
2229 | */ | 2867 | if (!trace_options[i]) { |
2230 | if (!trace_options[i]) | 2868 | ret = set_tracer_option(current_trace, cmp, neg); |
2231 | return -EINVAL; | 2869 | if (ret) |
2870 | return ret; | ||
2871 | } | ||
2232 | 2872 | ||
2233 | filp->f_pos += cnt; | 2873 | filp->f_pos += cnt; |
2234 | 2874 | ||
@@ -2237,8 +2877,8 @@ tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf, | |||
2237 | 2877 | ||
2238 | static struct file_operations tracing_iter_fops = { | 2878 | static struct file_operations tracing_iter_fops = { |
2239 | .open = tracing_open_generic, | 2879 | .open = tracing_open_generic, |
2240 | .read = tracing_iter_ctrl_read, | 2880 | .read = tracing_trace_options_read, |
2241 | .write = tracing_iter_ctrl_write, | 2881 | .write = tracing_trace_options_write, |
2242 | }; | 2882 | }; |
2243 | 2883 | ||
2244 | static const char readme_msg[] = | 2884 | static const char readme_msg[] = |
@@ -2252,9 +2892,9 @@ static const char readme_msg[] = | |||
2252 | "# echo sched_switch > /debug/tracing/current_tracer\n" | 2892 | "# echo sched_switch > /debug/tracing/current_tracer\n" |
2253 | "# cat /debug/tracing/current_tracer\n" | 2893 | "# cat /debug/tracing/current_tracer\n" |
2254 | "sched_switch\n" | 2894 | "sched_switch\n" |
2255 | "# cat /debug/tracing/iter_ctrl\n" | 2895 | "# cat /debug/tracing/trace_options\n" |
2256 | "noprint-parent nosym-offset nosym-addr noverbose\n" | 2896 | "noprint-parent nosym-offset nosym-addr noverbose\n" |
2257 | "# echo print-parent > /debug/tracing/iter_ctrl\n" | 2897 | "# echo print-parent > /debug/tracing/trace_options\n" |
2258 | "# echo 1 > /debug/tracing/tracing_enabled\n" | 2898 | "# echo 1 > /debug/tracing/tracing_enabled\n" |
2259 | "# cat /debug/tracing/trace > /tmp/trace.txt\n" | 2899 | "# cat /debug/tracing/trace > /tmp/trace.txt\n" |
2260 | "echo 0 > /debug/tracing/tracing_enabled\n" | 2900 | "echo 0 > /debug/tracing/tracing_enabled\n" |
@@ -2277,11 +2917,10 @@ static ssize_t | |||
2277 | tracing_ctrl_read(struct file *filp, char __user *ubuf, | 2917 | tracing_ctrl_read(struct file *filp, char __user *ubuf, |
2278 | size_t cnt, loff_t *ppos) | 2918 | size_t cnt, loff_t *ppos) |
2279 | { | 2919 | { |
2280 | struct trace_array *tr = filp->private_data; | ||
2281 | char buf[64]; | 2920 | char buf[64]; |
2282 | int r; | 2921 | int r; |
2283 | 2922 | ||
2284 | r = sprintf(buf, "%ld\n", tr->ctrl); | 2923 | r = sprintf(buf, "%u\n", tracer_enabled); |
2285 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 2924 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
2286 | } | 2925 | } |
2287 | 2926 | ||
@@ -2309,16 +2948,18 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf, | |||
2309 | val = !!val; | 2948 | val = !!val; |
2310 | 2949 | ||
2311 | mutex_lock(&trace_types_lock); | 2950 | mutex_lock(&trace_types_lock); |
2312 | if (tr->ctrl ^ val) { | 2951 | if (tracer_enabled ^ val) { |
2313 | if (val) | 2952 | if (val) { |
2314 | tracer_enabled = 1; | 2953 | tracer_enabled = 1; |
2315 | else | 2954 | if (current_trace->start) |
2955 | current_trace->start(tr); | ||
2956 | tracing_start(); | ||
2957 | } else { | ||
2316 | tracer_enabled = 0; | 2958 | tracer_enabled = 0; |
2317 | 2959 | tracing_stop(); | |
2318 | tr->ctrl = val; | 2960 | if (current_trace->stop) |
2319 | 2961 | current_trace->stop(tr); | |
2320 | if (current_trace && current_trace->ctrl_update) | 2962 | } |
2321 | current_trace->ctrl_update(tr); | ||
2322 | } | 2963 | } |
2323 | mutex_unlock(&trace_types_lock); | 2964 | mutex_unlock(&trace_types_lock); |
2324 | 2965 | ||
@@ -2344,14 +2985,52 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf, | |||
2344 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 2985 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
2345 | } | 2986 | } |
2346 | 2987 | ||
2988 | static int tracing_set_tracer(char *buf) | ||
2989 | { | ||
2990 | struct trace_array *tr = &global_trace; | ||
2991 | struct tracer *t; | ||
2992 | int ret = 0; | ||
2993 | |||
2994 | mutex_lock(&trace_types_lock); | ||
2995 | for (t = trace_types; t; t = t->next) { | ||
2996 | if (strcmp(t->name, buf) == 0) | ||
2997 | break; | ||
2998 | } | ||
2999 | if (!t) { | ||
3000 | ret = -EINVAL; | ||
3001 | goto out; | ||
3002 | } | ||
3003 | if (t == current_trace) | ||
3004 | goto out; | ||
3005 | |||
3006 | trace_branch_disable(); | ||
3007 | if (current_trace && current_trace->reset) | ||
3008 | current_trace->reset(tr); | ||
3009 | |||
3010 | current_trace = t; | ||
3011 | if (t->init) { | ||
3012 | ret = t->init(tr); | ||
3013 | if (ret) | ||
3014 | goto out; | ||
3015 | } | ||
3016 | |||
3017 | trace_branch_enable(tr); | ||
3018 | out: | ||
3019 | mutex_unlock(&trace_types_lock); | ||
3020 | |||
3021 | return ret; | ||
3022 | } | ||
3023 | |||
2347 | static ssize_t | 3024 | static ssize_t |
2348 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, | 3025 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, |
2349 | size_t cnt, loff_t *ppos) | 3026 | size_t cnt, loff_t *ppos) |
2350 | { | 3027 | { |
2351 | struct trace_array *tr = &global_trace; | ||
2352 | struct tracer *t; | ||
2353 | char buf[max_tracer_type_len+1]; | 3028 | char buf[max_tracer_type_len+1]; |
2354 | int i; | 3029 | int i; |
3030 | size_t ret; | ||
3031 | int err; | ||
3032 | |||
3033 | ret = cnt; | ||
2355 | 3034 | ||
2356 | if (cnt > max_tracer_type_len) | 3035 | if (cnt > max_tracer_type_len) |
2357 | cnt = max_tracer_type_len; | 3036 | cnt = max_tracer_type_len; |
@@ -2365,27 +3044,13 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf, | |||
2365 | for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) | 3044 | for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) |
2366 | buf[i] = 0; | 3045 | buf[i] = 0; |
2367 | 3046 | ||
2368 | mutex_lock(&trace_types_lock); | 3047 | err = tracing_set_tracer(buf); |
2369 | for (t = trace_types; t; t = t->next) { | 3048 | if (err) |
2370 | if (strcmp(t->name, buf) == 0) | 3049 | return err; |
2371 | break; | ||
2372 | } | ||
2373 | if (!t || t == current_trace) | ||
2374 | goto out; | ||
2375 | |||
2376 | if (current_trace && current_trace->reset) | ||
2377 | current_trace->reset(tr); | ||
2378 | |||
2379 | current_trace = t; | ||
2380 | if (t->init) | ||
2381 | t->init(tr); | ||
2382 | 3050 | ||
2383 | out: | 3051 | filp->f_pos += ret; |
2384 | mutex_unlock(&trace_types_lock); | ||
2385 | 3052 | ||
2386 | filp->f_pos += cnt; | 3053 | return ret; |
2387 | |||
2388 | return cnt; | ||
2389 | } | 3054 | } |
2390 | 3055 | ||
2391 | static ssize_t | 3056 | static ssize_t |
@@ -2450,6 +3115,10 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) | |||
2450 | return -ENOMEM; | 3115 | return -ENOMEM; |
2451 | 3116 | ||
2452 | mutex_lock(&trace_types_lock); | 3117 | mutex_lock(&trace_types_lock); |
3118 | |||
3119 | /* trace pipe does not show start of buffer */ | ||
3120 | cpus_setall(iter->started); | ||
3121 | |||
2453 | iter->tr = &global_trace; | 3122 | iter->tr = &global_trace; |
2454 | iter->trace = current_trace; | 3123 | iter->trace = current_trace; |
2455 | filp->private_data = iter; | 3124 | filp->private_data = iter; |
@@ -2500,20 +3169,12 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, | |||
2500 | size_t cnt, loff_t *ppos) | 3169 | size_t cnt, loff_t *ppos) |
2501 | { | 3170 | { |
2502 | struct trace_iterator *iter = filp->private_data; | 3171 | struct trace_iterator *iter = filp->private_data; |
2503 | struct trace_array_cpu *data; | ||
2504 | static cpumask_t mask; | ||
2505 | unsigned long flags; | ||
2506 | #ifdef CONFIG_FTRACE | ||
2507 | int ftrace_save; | ||
2508 | #endif | ||
2509 | int cpu; | ||
2510 | ssize_t sret; | 3172 | ssize_t sret; |
2511 | 3173 | ||
2512 | /* return any leftover data */ | 3174 | /* return any leftover data */ |
2513 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); | 3175 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); |
2514 | if (sret != -EBUSY) | 3176 | if (sret != -EBUSY) |
2515 | return sret; | 3177 | return sret; |
2516 | sret = 0; | ||
2517 | 3178 | ||
2518 | trace_seq_reset(&iter->seq); | 3179 | trace_seq_reset(&iter->seq); |
2519 | 3180 | ||
@@ -2524,6 +3185,8 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, | |||
2524 | goto out; | 3185 | goto out; |
2525 | } | 3186 | } |
2526 | 3187 | ||
3188 | waitagain: | ||
3189 | sret = 0; | ||
2527 | while (trace_empty(iter)) { | 3190 | while (trace_empty(iter)) { |
2528 | 3191 | ||
2529 | if ((filp->f_flags & O_NONBLOCK)) { | 3192 | if ((filp->f_flags & O_NONBLOCK)) { |
@@ -2588,46 +3251,12 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, | |||
2588 | offsetof(struct trace_iterator, seq)); | 3251 | offsetof(struct trace_iterator, seq)); |
2589 | iter->pos = -1; | 3252 | iter->pos = -1; |
2590 | 3253 | ||
2591 | /* | ||
2592 | * We need to stop all tracing on all CPUS to read the | ||
2593 | * the next buffer. This is a bit expensive, but is | ||
2594 | * not done often. We fill all what we can read, | ||
2595 | * and then release the locks again. | ||
2596 | */ | ||
2597 | |||
2598 | cpus_clear(mask); | ||
2599 | local_irq_save(flags); | ||
2600 | #ifdef CONFIG_FTRACE | ||
2601 | ftrace_save = ftrace_enabled; | ||
2602 | ftrace_enabled = 0; | ||
2603 | #endif | ||
2604 | smp_wmb(); | ||
2605 | for_each_tracing_cpu(cpu) { | ||
2606 | data = iter->tr->data[cpu]; | ||
2607 | |||
2608 | if (!head_page(data) || !data->trace_idx) | ||
2609 | continue; | ||
2610 | |||
2611 | atomic_inc(&data->disabled); | ||
2612 | cpu_set(cpu, mask); | ||
2613 | } | ||
2614 | |||
2615 | for_each_cpu_mask(cpu, mask) { | ||
2616 | data = iter->tr->data[cpu]; | ||
2617 | __raw_spin_lock(&data->lock); | ||
2618 | |||
2619 | if (data->overrun > iter->last_overrun[cpu]) | ||
2620 | iter->overrun[cpu] += | ||
2621 | data->overrun - iter->last_overrun[cpu]; | ||
2622 | iter->last_overrun[cpu] = data->overrun; | ||
2623 | } | ||
2624 | |||
2625 | while (find_next_entry_inc(iter) != NULL) { | 3254 | while (find_next_entry_inc(iter) != NULL) { |
2626 | int ret; | 3255 | enum print_line_t ret; |
2627 | int len = iter->seq.len; | 3256 | int len = iter->seq.len; |
2628 | 3257 | ||
2629 | ret = print_trace_line(iter); | 3258 | ret = print_trace_line(iter); |
2630 | if (!ret) { | 3259 | if (ret == TRACE_TYPE_PARTIAL_LINE) { |
2631 | /* don't print partial lines */ | 3260 | /* don't print partial lines */ |
2632 | iter->seq.len = len; | 3261 | iter->seq.len = len; |
2633 | break; | 3262 | break; |
@@ -2639,26 +3268,17 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, | |||
2639 | break; | 3268 | break; |
2640 | } | 3269 | } |
2641 | 3270 | ||
2642 | for_each_cpu_mask(cpu, mask) { | ||
2643 | data = iter->tr->data[cpu]; | ||
2644 | __raw_spin_unlock(&data->lock); | ||
2645 | } | ||
2646 | |||
2647 | for_each_cpu_mask(cpu, mask) { | ||
2648 | data = iter->tr->data[cpu]; | ||
2649 | atomic_dec(&data->disabled); | ||
2650 | } | ||
2651 | #ifdef CONFIG_FTRACE | ||
2652 | ftrace_enabled = ftrace_save; | ||
2653 | #endif | ||
2654 | local_irq_restore(flags); | ||
2655 | |||
2656 | /* Now copy what we have to the user */ | 3271 | /* Now copy what we have to the user */ |
2657 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); | 3272 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); |
2658 | if (iter->seq.readpos >= iter->seq.len) | 3273 | if (iter->seq.readpos >= iter->seq.len) |
2659 | trace_seq_reset(&iter->seq); | 3274 | trace_seq_reset(&iter->seq); |
3275 | |||
3276 | /* | ||
3277 | * If there was nothing to send to user, inspite of consuming trace | ||
3278 | * entries, go back to wait for more entries. | ||
3279 | */ | ||
2660 | if (sret == -EBUSY) | 3280 | if (sret == -EBUSY) |
2661 | sret = 0; | 3281 | goto waitagain; |
2662 | 3282 | ||
2663 | out: | 3283 | out: |
2664 | mutex_unlock(&trace_types_lock); | 3284 | mutex_unlock(&trace_types_lock); |
@@ -2674,7 +3294,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf, | |||
2674 | char buf[64]; | 3294 | char buf[64]; |
2675 | int r; | 3295 | int r; |
2676 | 3296 | ||
2677 | r = sprintf(buf, "%lu\n", tr->entries); | 3297 | r = sprintf(buf, "%lu\n", tr->entries >> 10); |
2678 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 3298 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
2679 | } | 3299 | } |
2680 | 3300 | ||
@@ -2684,7 +3304,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
2684 | { | 3304 | { |
2685 | unsigned long val; | 3305 | unsigned long val; |
2686 | char buf[64]; | 3306 | char buf[64]; |
2687 | int i, ret; | 3307 | int ret, cpu; |
2688 | 3308 | ||
2689 | if (cnt >= sizeof(buf)) | 3309 | if (cnt >= sizeof(buf)) |
2690 | return -EINVAL; | 3310 | return -EINVAL; |
@@ -2704,71 +3324,109 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
2704 | 3324 | ||
2705 | mutex_lock(&trace_types_lock); | 3325 | mutex_lock(&trace_types_lock); |
2706 | 3326 | ||
2707 | if (current_trace != &no_tracer) { | 3327 | tracing_stop(); |
2708 | cnt = -EBUSY; | ||
2709 | pr_info("ftrace: set current_tracer to none" | ||
2710 | " before modifying buffer size\n"); | ||
2711 | goto out; | ||
2712 | } | ||
2713 | |||
2714 | if (val > global_trace.entries) { | ||
2715 | long pages_requested; | ||
2716 | unsigned long freeable_pages; | ||
2717 | |||
2718 | /* make sure we have enough memory before mapping */ | ||
2719 | pages_requested = | ||
2720 | (val + (ENTRIES_PER_PAGE-1)) / ENTRIES_PER_PAGE; | ||
2721 | |||
2722 | /* account for each buffer (and max_tr) */ | ||
2723 | pages_requested *= tracing_nr_buffers * 2; | ||
2724 | 3328 | ||
2725 | /* Check for overflow */ | 3329 | /* disable all cpu buffers */ |
2726 | if (pages_requested < 0) { | 3330 | for_each_tracing_cpu(cpu) { |
2727 | cnt = -ENOMEM; | 3331 | if (global_trace.data[cpu]) |
2728 | goto out; | 3332 | atomic_inc(&global_trace.data[cpu]->disabled); |
2729 | } | 3333 | if (max_tr.data[cpu]) |
3334 | atomic_inc(&max_tr.data[cpu]->disabled); | ||
3335 | } | ||
2730 | 3336 | ||
2731 | freeable_pages = determine_dirtyable_memory(); | 3337 | /* value is in KB */ |
3338 | val <<= 10; | ||
2732 | 3339 | ||
2733 | /* we only allow to request 1/4 of useable memory */ | 3340 | if (val != global_trace.entries) { |
2734 | if (pages_requested > | 3341 | ret = ring_buffer_resize(global_trace.buffer, val); |
2735 | ((freeable_pages + tracing_pages_allocated) / 4)) { | 3342 | if (ret < 0) { |
2736 | cnt = -ENOMEM; | 3343 | cnt = ret; |
2737 | goto out; | 3344 | goto out; |
2738 | } | 3345 | } |
2739 | 3346 | ||
2740 | while (global_trace.entries < val) { | 3347 | ret = ring_buffer_resize(max_tr.buffer, val); |
2741 | if (trace_alloc_page()) { | 3348 | if (ret < 0) { |
2742 | cnt = -ENOMEM; | 3349 | int r; |
2743 | goto out; | 3350 | cnt = ret; |
3351 | r = ring_buffer_resize(global_trace.buffer, | ||
3352 | global_trace.entries); | ||
3353 | if (r < 0) { | ||
3354 | /* AARGH! We are left with different | ||
3355 | * size max buffer!!!! */ | ||
3356 | WARN_ON(1); | ||
3357 | tracing_disabled = 1; | ||
2744 | } | 3358 | } |
2745 | /* double check that we don't go over the known pages */ | 3359 | goto out; |
2746 | if (tracing_pages_allocated > pages_requested) | ||
2747 | break; | ||
2748 | } | 3360 | } |
2749 | 3361 | ||
2750 | } else { | 3362 | global_trace.entries = val; |
2751 | /* include the number of entries in val (inc of page entries) */ | ||
2752 | while (global_trace.entries > val + (ENTRIES_PER_PAGE - 1)) | ||
2753 | trace_free_page(); | ||
2754 | } | 3363 | } |
2755 | 3364 | ||
2756 | /* check integrity */ | ||
2757 | for_each_tracing_cpu(i) | ||
2758 | check_pages(global_trace.data[i]); | ||
2759 | |||
2760 | filp->f_pos += cnt; | 3365 | filp->f_pos += cnt; |
2761 | 3366 | ||
2762 | /* If check pages failed, return ENOMEM */ | 3367 | /* If check pages failed, return ENOMEM */ |
2763 | if (tracing_disabled) | 3368 | if (tracing_disabled) |
2764 | cnt = -ENOMEM; | 3369 | cnt = -ENOMEM; |
2765 | out: | 3370 | out: |
3371 | for_each_tracing_cpu(cpu) { | ||
3372 | if (global_trace.data[cpu]) | ||
3373 | atomic_dec(&global_trace.data[cpu]->disabled); | ||
3374 | if (max_tr.data[cpu]) | ||
3375 | atomic_dec(&max_tr.data[cpu]->disabled); | ||
3376 | } | ||
3377 | |||
3378 | tracing_start(); | ||
2766 | max_tr.entries = global_trace.entries; | 3379 | max_tr.entries = global_trace.entries; |
2767 | mutex_unlock(&trace_types_lock); | 3380 | mutex_unlock(&trace_types_lock); |
2768 | 3381 | ||
2769 | return cnt; | 3382 | return cnt; |
2770 | } | 3383 | } |
2771 | 3384 | ||
3385 | static int mark_printk(const char *fmt, ...) | ||
3386 | { | ||
3387 | int ret; | ||
3388 | va_list args; | ||
3389 | va_start(args, fmt); | ||
3390 | ret = trace_vprintk(0, -1, fmt, args); | ||
3391 | va_end(args); | ||
3392 | return ret; | ||
3393 | } | ||
3394 | |||
3395 | static ssize_t | ||
3396 | tracing_mark_write(struct file *filp, const char __user *ubuf, | ||
3397 | size_t cnt, loff_t *fpos) | ||
3398 | { | ||
3399 | char *buf; | ||
3400 | char *end; | ||
3401 | |||
3402 | if (tracing_disabled) | ||
3403 | return -EINVAL; | ||
3404 | |||
3405 | if (cnt > TRACE_BUF_SIZE) | ||
3406 | cnt = TRACE_BUF_SIZE; | ||
3407 | |||
3408 | buf = kmalloc(cnt + 1, GFP_KERNEL); | ||
3409 | if (buf == NULL) | ||
3410 | return -ENOMEM; | ||
3411 | |||
3412 | if (copy_from_user(buf, ubuf, cnt)) { | ||
3413 | kfree(buf); | ||
3414 | return -EFAULT; | ||
3415 | } | ||
3416 | |||
3417 | /* Cut from the first nil or newline. */ | ||
3418 | buf[cnt] = '\0'; | ||
3419 | end = strchr(buf, '\n'); | ||
3420 | if (end) | ||
3421 | *end = '\0'; | ||
3422 | |||
3423 | cnt = mark_printk("%s\n", buf); | ||
3424 | kfree(buf); | ||
3425 | *fpos += cnt; | ||
3426 | |||
3427 | return cnt; | ||
3428 | } | ||
3429 | |||
2772 | static struct file_operations tracing_max_lat_fops = { | 3430 | static struct file_operations tracing_max_lat_fops = { |
2773 | .open = tracing_open_generic, | 3431 | .open = tracing_open_generic, |
2774 | .read = tracing_max_lat_read, | 3432 | .read = tracing_max_lat_read, |
@@ -2800,24 +3458,45 @@ static struct file_operations tracing_entries_fops = { | |||
2800 | .write = tracing_entries_write, | 3458 | .write = tracing_entries_write, |
2801 | }; | 3459 | }; |
2802 | 3460 | ||
3461 | static struct file_operations tracing_mark_fops = { | ||
3462 | .open = tracing_open_generic, | ||
3463 | .write = tracing_mark_write, | ||
3464 | }; | ||
3465 | |||
2803 | #ifdef CONFIG_DYNAMIC_FTRACE | 3466 | #ifdef CONFIG_DYNAMIC_FTRACE |
2804 | 3467 | ||
3468 | int __weak ftrace_arch_read_dyn_info(char *buf, int size) | ||
3469 | { | ||
3470 | return 0; | ||
3471 | } | ||
3472 | |||
2805 | static ssize_t | 3473 | static ssize_t |
2806 | tracing_read_long(struct file *filp, char __user *ubuf, | 3474 | tracing_read_dyn_info(struct file *filp, char __user *ubuf, |
2807 | size_t cnt, loff_t *ppos) | 3475 | size_t cnt, loff_t *ppos) |
2808 | { | 3476 | { |
3477 | static char ftrace_dyn_info_buffer[1024]; | ||
3478 | static DEFINE_MUTEX(dyn_info_mutex); | ||
2809 | unsigned long *p = filp->private_data; | 3479 | unsigned long *p = filp->private_data; |
2810 | char buf[64]; | 3480 | char *buf = ftrace_dyn_info_buffer; |
3481 | int size = ARRAY_SIZE(ftrace_dyn_info_buffer); | ||
2811 | int r; | 3482 | int r; |
2812 | 3483 | ||
2813 | r = sprintf(buf, "%ld\n", *p); | 3484 | mutex_lock(&dyn_info_mutex); |
3485 | r = sprintf(buf, "%ld ", *p); | ||
2814 | 3486 | ||
2815 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 3487 | r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r); |
3488 | buf[r++] = '\n'; | ||
3489 | |||
3490 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
3491 | |||
3492 | mutex_unlock(&dyn_info_mutex); | ||
3493 | |||
3494 | return r; | ||
2816 | } | 3495 | } |
2817 | 3496 | ||
2818 | static struct file_operations tracing_read_long_fops = { | 3497 | static struct file_operations tracing_dyn_info_fops = { |
2819 | .open = tracing_open_generic, | 3498 | .open = tracing_open_generic, |
2820 | .read = tracing_read_long, | 3499 | .read = tracing_read_dyn_info, |
2821 | }; | 3500 | }; |
2822 | #endif | 3501 | #endif |
2823 | 3502 | ||
@@ -2846,7 +3525,7 @@ struct dentry *tracing_init_dentry(void) | |||
2846 | #include "trace_selftest.c" | 3525 | #include "trace_selftest.c" |
2847 | #endif | 3526 | #endif |
2848 | 3527 | ||
2849 | static __init void tracer_init_debugfs(void) | 3528 | static __init int tracer_init_debugfs(void) |
2850 | { | 3529 | { |
2851 | struct dentry *d_tracer; | 3530 | struct dentry *d_tracer; |
2852 | struct dentry *entry; | 3531 | struct dentry *entry; |
@@ -2858,10 +3537,10 @@ static __init void tracer_init_debugfs(void) | |||
2858 | if (!entry) | 3537 | if (!entry) |
2859 | pr_warning("Could not create debugfs 'tracing_enabled' entry\n"); | 3538 | pr_warning("Could not create debugfs 'tracing_enabled' entry\n"); |
2860 | 3539 | ||
2861 | entry = debugfs_create_file("iter_ctrl", 0644, d_tracer, | 3540 | entry = debugfs_create_file("trace_options", 0644, d_tracer, |
2862 | NULL, &tracing_iter_fops); | 3541 | NULL, &tracing_iter_fops); |
2863 | if (!entry) | 3542 | if (!entry) |
2864 | pr_warning("Could not create debugfs 'iter_ctrl' entry\n"); | 3543 | pr_warning("Could not create debugfs 'trace_options' entry\n"); |
2865 | 3544 | ||
2866 | entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer, | 3545 | entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer, |
2867 | NULL, &tracing_cpumask_fops); | 3546 | NULL, &tracing_cpumask_fops); |
@@ -2881,12 +3560,12 @@ static __init void tracer_init_debugfs(void) | |||
2881 | entry = debugfs_create_file("available_tracers", 0444, d_tracer, | 3560 | entry = debugfs_create_file("available_tracers", 0444, d_tracer, |
2882 | &global_trace, &show_traces_fops); | 3561 | &global_trace, &show_traces_fops); |
2883 | if (!entry) | 3562 | if (!entry) |
2884 | pr_warning("Could not create debugfs 'trace' entry\n"); | 3563 | pr_warning("Could not create debugfs 'available_tracers' entry\n"); |
2885 | 3564 | ||
2886 | entry = debugfs_create_file("current_tracer", 0444, d_tracer, | 3565 | entry = debugfs_create_file("current_tracer", 0444, d_tracer, |
2887 | &global_trace, &set_tracer_fops); | 3566 | &global_trace, &set_tracer_fops); |
2888 | if (!entry) | 3567 | if (!entry) |
2889 | pr_warning("Could not create debugfs 'trace' entry\n"); | 3568 | pr_warning("Could not create debugfs 'current_tracer' entry\n"); |
2890 | 3569 | ||
2891 | entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer, | 3570 | entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer, |
2892 | &tracing_max_latency, | 3571 | &tracing_max_latency, |
@@ -2899,7 +3578,7 @@ static __init void tracer_init_debugfs(void) | |||
2899 | &tracing_thresh, &tracing_max_lat_fops); | 3578 | &tracing_thresh, &tracing_max_lat_fops); |
2900 | if (!entry) | 3579 | if (!entry) |
2901 | pr_warning("Could not create debugfs " | 3580 | pr_warning("Could not create debugfs " |
2902 | "'tracing_threash' entry\n"); | 3581 | "'tracing_thresh' entry\n"); |
2903 | entry = debugfs_create_file("README", 0644, d_tracer, | 3582 | entry = debugfs_create_file("README", 0644, d_tracer, |
2904 | NULL, &tracing_readme_fops); | 3583 | NULL, &tracing_readme_fops); |
2905 | if (!entry) | 3584 | if (!entry) |
@@ -2909,18 +3588,24 @@ static __init void tracer_init_debugfs(void) | |||
2909 | NULL, &tracing_pipe_fops); | 3588 | NULL, &tracing_pipe_fops); |
2910 | if (!entry) | 3589 | if (!entry) |
2911 | pr_warning("Could not create debugfs " | 3590 | pr_warning("Could not create debugfs " |
2912 | "'tracing_threash' entry\n"); | 3591 | "'trace_pipe' entry\n"); |
2913 | 3592 | ||
2914 | entry = debugfs_create_file("trace_entries", 0644, d_tracer, | 3593 | entry = debugfs_create_file("buffer_size_kb", 0644, d_tracer, |
2915 | &global_trace, &tracing_entries_fops); | 3594 | &global_trace, &tracing_entries_fops); |
2916 | if (!entry) | 3595 | if (!entry) |
2917 | pr_warning("Could not create debugfs " | 3596 | pr_warning("Could not create debugfs " |
2918 | "'tracing_threash' entry\n"); | 3597 | "'buffer_size_kb' entry\n"); |
3598 | |||
3599 | entry = debugfs_create_file("trace_marker", 0220, d_tracer, | ||
3600 | NULL, &tracing_mark_fops); | ||
3601 | if (!entry) | ||
3602 | pr_warning("Could not create debugfs " | ||
3603 | "'trace_marker' entry\n"); | ||
2919 | 3604 | ||
2920 | #ifdef CONFIG_DYNAMIC_FTRACE | 3605 | #ifdef CONFIG_DYNAMIC_FTRACE |
2921 | entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, | 3606 | entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, |
2922 | &ftrace_update_tot_cnt, | 3607 | &ftrace_update_tot_cnt, |
2923 | &tracing_read_long_fops); | 3608 | &tracing_dyn_info_fops); |
2924 | if (!entry) | 3609 | if (!entry) |
2925 | pr_warning("Could not create debugfs " | 3610 | pr_warning("Could not create debugfs " |
2926 | "'dyn_ftrace_total_info' entry\n"); | 3611 | "'dyn_ftrace_total_info' entry\n"); |
@@ -2928,230 +3613,268 @@ static __init void tracer_init_debugfs(void) | |||
2928 | #ifdef CONFIG_SYSPROF_TRACER | 3613 | #ifdef CONFIG_SYSPROF_TRACER |
2929 | init_tracer_sysprof_debugfs(d_tracer); | 3614 | init_tracer_sysprof_debugfs(d_tracer); |
2930 | #endif | 3615 | #endif |
3616 | return 0; | ||
2931 | } | 3617 | } |
2932 | 3618 | ||
2933 | static int trace_alloc_page(void) | 3619 | int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) |
2934 | { | 3620 | { |
3621 | static DEFINE_SPINLOCK(trace_buf_lock); | ||
3622 | static char trace_buf[TRACE_BUF_SIZE]; | ||
3623 | |||
3624 | struct ring_buffer_event *event; | ||
3625 | struct trace_array *tr = &global_trace; | ||
2935 | struct trace_array_cpu *data; | 3626 | struct trace_array_cpu *data; |
2936 | struct page *page, *tmp; | 3627 | int cpu, len = 0, size, pc; |
2937 | LIST_HEAD(pages); | 3628 | struct print_entry *entry; |
2938 | void *array; | 3629 | unsigned long irq_flags; |
2939 | unsigned pages_allocated = 0; | ||
2940 | int i; | ||
2941 | 3630 | ||
2942 | /* first allocate a page for each CPU */ | 3631 | if (tracing_disabled || tracing_selftest_running) |
2943 | for_each_tracing_cpu(i) { | 3632 | return 0; |
2944 | array = (void *)__get_free_page(GFP_KERNEL); | ||
2945 | if (array == NULL) { | ||
2946 | printk(KERN_ERR "tracer: failed to allocate page" | ||
2947 | "for trace buffer!\n"); | ||
2948 | goto free_pages; | ||
2949 | } | ||
2950 | 3633 | ||
2951 | pages_allocated++; | 3634 | pc = preempt_count(); |
2952 | page = virt_to_page(array); | 3635 | preempt_disable_notrace(); |
2953 | list_add(&page->lru, &pages); | 3636 | cpu = raw_smp_processor_id(); |
3637 | data = tr->data[cpu]; | ||
2954 | 3638 | ||
2955 | /* Only allocate if we are actually using the max trace */ | 3639 | if (unlikely(atomic_read(&data->disabled))) |
2956 | #ifdef CONFIG_TRACER_MAX_TRACE | 3640 | goto out; |
2957 | array = (void *)__get_free_page(GFP_KERNEL); | ||
2958 | if (array == NULL) { | ||
2959 | printk(KERN_ERR "tracer: failed to allocate page" | ||
2960 | "for trace buffer!\n"); | ||
2961 | goto free_pages; | ||
2962 | } | ||
2963 | pages_allocated++; | ||
2964 | page = virt_to_page(array); | ||
2965 | list_add(&page->lru, &pages); | ||
2966 | #endif | ||
2967 | } | ||
2968 | 3641 | ||
2969 | /* Now that we successfully allocate a page per CPU, add them */ | 3642 | pause_graph_tracing(); |
2970 | for_each_tracing_cpu(i) { | 3643 | spin_lock_irqsave(&trace_buf_lock, irq_flags); |
2971 | data = global_trace.data[i]; | 3644 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
2972 | page = list_entry(pages.next, struct page, lru); | 3645 | |
2973 | list_del_init(&page->lru); | 3646 | len = min(len, TRACE_BUF_SIZE-1); |
2974 | list_add_tail(&page->lru, &data->trace_pages); | 3647 | trace_buf[len] = 0; |
2975 | ClearPageLRU(page); | 3648 | |
3649 | size = sizeof(*entry) + len + 1; | ||
3650 | event = ring_buffer_lock_reserve(tr->buffer, size, &irq_flags); | ||
3651 | if (!event) | ||
3652 | goto out_unlock; | ||
3653 | entry = ring_buffer_event_data(event); | ||
3654 | tracing_generic_entry_update(&entry->ent, irq_flags, pc); | ||
3655 | entry->ent.type = TRACE_PRINT; | ||
3656 | entry->ip = ip; | ||
3657 | entry->depth = depth; | ||
3658 | |||
3659 | memcpy(&entry->buf, trace_buf, len); | ||
3660 | entry->buf[len] = 0; | ||
3661 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | ||
3662 | |||
3663 | out_unlock: | ||
3664 | spin_unlock_irqrestore(&trace_buf_lock, irq_flags); | ||
3665 | unpause_graph_tracing(); | ||
3666 | out: | ||
3667 | preempt_enable_notrace(); | ||
2976 | 3668 | ||
2977 | #ifdef CONFIG_TRACER_MAX_TRACE | 3669 | return len; |
2978 | data = max_tr.data[i]; | 3670 | } |
2979 | page = list_entry(pages.next, struct page, lru); | 3671 | EXPORT_SYMBOL_GPL(trace_vprintk); |
2980 | list_del_init(&page->lru); | ||
2981 | list_add_tail(&page->lru, &data->trace_pages); | ||
2982 | SetPageLRU(page); | ||
2983 | #endif | ||
2984 | } | ||
2985 | tracing_pages_allocated += pages_allocated; | ||
2986 | global_trace.entries += ENTRIES_PER_PAGE; | ||
2987 | 3672 | ||
2988 | return 0; | 3673 | int __ftrace_printk(unsigned long ip, const char *fmt, ...) |
3674 | { | ||
3675 | int ret; | ||
3676 | va_list ap; | ||
2989 | 3677 | ||
2990 | free_pages: | 3678 | if (!(trace_flags & TRACE_ITER_PRINTK)) |
2991 | list_for_each_entry_safe(page, tmp, &pages, lru) { | 3679 | return 0; |
2992 | list_del_init(&page->lru); | 3680 | |
2993 | __free_page(page); | 3681 | va_start(ap, fmt); |
3682 | ret = trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap); | ||
3683 | va_end(ap); | ||
3684 | return ret; | ||
3685 | } | ||
3686 | EXPORT_SYMBOL_GPL(__ftrace_printk); | ||
3687 | |||
3688 | static int trace_panic_handler(struct notifier_block *this, | ||
3689 | unsigned long event, void *unused) | ||
3690 | { | ||
3691 | if (ftrace_dump_on_oops) | ||
3692 | ftrace_dump(); | ||
3693 | return NOTIFY_OK; | ||
3694 | } | ||
3695 | |||
3696 | static struct notifier_block trace_panic_notifier = { | ||
3697 | .notifier_call = trace_panic_handler, | ||
3698 | .next = NULL, | ||
3699 | .priority = 150 /* priority: INT_MAX >= x >= 0 */ | ||
3700 | }; | ||
3701 | |||
3702 | static int trace_die_handler(struct notifier_block *self, | ||
3703 | unsigned long val, | ||
3704 | void *data) | ||
3705 | { | ||
3706 | switch (val) { | ||
3707 | case DIE_OOPS: | ||
3708 | if (ftrace_dump_on_oops) | ||
3709 | ftrace_dump(); | ||
3710 | break; | ||
3711 | default: | ||
3712 | break; | ||
2994 | } | 3713 | } |
2995 | return -ENOMEM; | 3714 | return NOTIFY_OK; |
3715 | } | ||
3716 | |||
3717 | static struct notifier_block trace_die_notifier = { | ||
3718 | .notifier_call = trace_die_handler, | ||
3719 | .priority = 200 | ||
3720 | }; | ||
3721 | |||
3722 | /* | ||
3723 | * printk is set to max of 1024, we really don't need it that big. | ||
3724 | * Nothing should be printing 1000 characters anyway. | ||
3725 | */ | ||
3726 | #define TRACE_MAX_PRINT 1000 | ||
3727 | |||
3728 | /* | ||
3729 | * Define here KERN_TRACE so that we have one place to modify | ||
3730 | * it if we decide to change what log level the ftrace dump | ||
3731 | * should be at. | ||
3732 | */ | ||
3733 | #define KERN_TRACE KERN_INFO | ||
3734 | |||
3735 | static void | ||
3736 | trace_printk_seq(struct trace_seq *s) | ||
3737 | { | ||
3738 | /* Probably should print a warning here. */ | ||
3739 | if (s->len >= 1000) | ||
3740 | s->len = 1000; | ||
3741 | |||
3742 | /* should be zero ended, but we are paranoid. */ | ||
3743 | s->buffer[s->len] = 0; | ||
3744 | |||
3745 | printk(KERN_TRACE "%s", s->buffer); | ||
3746 | |||
3747 | trace_seq_reset(s); | ||
2996 | } | 3748 | } |
2997 | 3749 | ||
2998 | static int trace_free_page(void) | 3750 | void ftrace_dump(void) |
2999 | { | 3751 | { |
3000 | struct trace_array_cpu *data; | 3752 | static DEFINE_SPINLOCK(ftrace_dump_lock); |
3001 | struct page *page; | 3753 | /* use static because iter can be a bit big for the stack */ |
3002 | struct list_head *p; | 3754 | static struct trace_iterator iter; |
3003 | int i; | 3755 | static cpumask_t mask; |
3004 | int ret = 0; | 3756 | static int dump_ran; |
3757 | unsigned long flags; | ||
3758 | int cnt = 0, cpu; | ||
3005 | 3759 | ||
3006 | /* free one page from each buffer */ | 3760 | /* only one dump */ |
3007 | for_each_tracing_cpu(i) { | 3761 | spin_lock_irqsave(&ftrace_dump_lock, flags); |
3008 | data = global_trace.data[i]; | 3762 | if (dump_ran) |
3009 | p = data->trace_pages.next; | 3763 | goto out; |
3010 | if (p == &data->trace_pages) { | ||
3011 | /* should never happen */ | ||
3012 | WARN_ON(1); | ||
3013 | tracing_disabled = 1; | ||
3014 | ret = -1; | ||
3015 | break; | ||
3016 | } | ||
3017 | page = list_entry(p, struct page, lru); | ||
3018 | ClearPageLRU(page); | ||
3019 | list_del(&page->lru); | ||
3020 | tracing_pages_allocated--; | ||
3021 | tracing_pages_allocated--; | ||
3022 | __free_page(page); | ||
3023 | 3764 | ||
3024 | tracing_reset(data); | 3765 | dump_ran = 1; |
3025 | 3766 | ||
3026 | #ifdef CONFIG_TRACER_MAX_TRACE | 3767 | /* No turning back! */ |
3027 | data = max_tr.data[i]; | 3768 | ftrace_kill(); |
3028 | p = data->trace_pages.next; | 3769 | |
3029 | if (p == &data->trace_pages) { | 3770 | for_each_tracing_cpu(cpu) { |
3030 | /* should never happen */ | 3771 | atomic_inc(&global_trace.data[cpu]->disabled); |
3031 | WARN_ON(1); | 3772 | } |
3032 | tracing_disabled = 1; | 3773 | |
3033 | ret = -1; | 3774 | /* don't look at user memory in panic mode */ |
3034 | break; | 3775 | trace_flags &= ~TRACE_ITER_SYM_USEROBJ; |
3776 | |||
3777 | printk(KERN_TRACE "Dumping ftrace buffer:\n"); | ||
3778 | |||
3779 | iter.tr = &global_trace; | ||
3780 | iter.trace = current_trace; | ||
3781 | |||
3782 | /* | ||
3783 | * We need to stop all tracing on all CPUS to read the | ||
3784 | * the next buffer. This is a bit expensive, but is | ||
3785 | * not done often. We fill all what we can read, | ||
3786 | * and then release the locks again. | ||
3787 | */ | ||
3788 | |||
3789 | cpus_clear(mask); | ||
3790 | |||
3791 | while (!trace_empty(&iter)) { | ||
3792 | |||
3793 | if (!cnt) | ||
3794 | printk(KERN_TRACE "---------------------------------\n"); | ||
3795 | |||
3796 | cnt++; | ||
3797 | |||
3798 | /* reset all but tr, trace, and overruns */ | ||
3799 | memset(&iter.seq, 0, | ||
3800 | sizeof(struct trace_iterator) - | ||
3801 | offsetof(struct trace_iterator, seq)); | ||
3802 | iter.iter_flags |= TRACE_FILE_LAT_FMT; | ||
3803 | iter.pos = -1; | ||
3804 | |||
3805 | if (find_next_entry_inc(&iter) != NULL) { | ||
3806 | print_trace_line(&iter); | ||
3807 | trace_consume(&iter); | ||
3035 | } | 3808 | } |
3036 | page = list_entry(p, struct page, lru); | ||
3037 | ClearPageLRU(page); | ||
3038 | list_del(&page->lru); | ||
3039 | __free_page(page); | ||
3040 | 3809 | ||
3041 | tracing_reset(data); | 3810 | trace_printk_seq(&iter.seq); |
3042 | #endif | ||
3043 | } | 3811 | } |
3044 | global_trace.entries -= ENTRIES_PER_PAGE; | ||
3045 | 3812 | ||
3046 | return ret; | 3813 | if (!cnt) |
3814 | printk(KERN_TRACE " (ftrace buffer empty)\n"); | ||
3815 | else | ||
3816 | printk(KERN_TRACE "---------------------------------\n"); | ||
3817 | |||
3818 | out: | ||
3819 | spin_unlock_irqrestore(&ftrace_dump_lock, flags); | ||
3047 | } | 3820 | } |
3048 | 3821 | ||
3049 | __init static int tracer_alloc_buffers(void) | 3822 | __init static int tracer_alloc_buffers(void) |
3050 | { | 3823 | { |
3051 | struct trace_array_cpu *data; | 3824 | struct trace_array_cpu *data; |
3052 | void *array; | ||
3053 | struct page *page; | ||
3054 | int pages = 0; | ||
3055 | int ret = -ENOMEM; | ||
3056 | int i; | 3825 | int i; |
3057 | 3826 | ||
3058 | /* TODO: make the number of buffers hot pluggable with CPUS */ | 3827 | /* TODO: make the number of buffers hot pluggable with CPUS */ |
3059 | tracing_nr_buffers = num_possible_cpus(); | ||
3060 | tracing_buffer_mask = cpu_possible_map; | 3828 | tracing_buffer_mask = cpu_possible_map; |
3061 | 3829 | ||
3062 | /* Allocate the first page for all buffers */ | 3830 | global_trace.buffer = ring_buffer_alloc(trace_buf_size, |
3063 | for_each_tracing_cpu(i) { | 3831 | TRACE_BUFFER_FLAGS); |
3064 | data = global_trace.data[i] = &per_cpu(global_trace_cpu, i); | 3832 | if (!global_trace.buffer) { |
3065 | max_tr.data[i] = &per_cpu(max_data, i); | 3833 | printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); |
3066 | 3834 | WARN_ON(1); | |
3067 | array = (void *)__get_free_page(GFP_KERNEL); | 3835 | return 0; |
3068 | if (array == NULL) { | 3836 | } |
3069 | printk(KERN_ERR "tracer: failed to allocate page" | 3837 | global_trace.entries = ring_buffer_size(global_trace.buffer); |
3070 | "for trace buffer!\n"); | ||
3071 | goto free_buffers; | ||
3072 | } | ||
3073 | |||
3074 | /* set the array to the list */ | ||
3075 | INIT_LIST_HEAD(&data->trace_pages); | ||
3076 | page = virt_to_page(array); | ||
3077 | list_add(&page->lru, &data->trace_pages); | ||
3078 | /* use the LRU flag to differentiate the two buffers */ | ||
3079 | ClearPageLRU(page); | ||
3080 | |||
3081 | data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | ||
3082 | max_tr.data[i]->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | ||
3083 | 3838 | ||
3084 | /* Only allocate if we are actually using the max trace */ | ||
3085 | #ifdef CONFIG_TRACER_MAX_TRACE | 3839 | #ifdef CONFIG_TRACER_MAX_TRACE |
3086 | array = (void *)__get_free_page(GFP_KERNEL); | 3840 | max_tr.buffer = ring_buffer_alloc(trace_buf_size, |
3087 | if (array == NULL) { | 3841 | TRACE_BUFFER_FLAGS); |
3088 | printk(KERN_ERR "tracer: failed to allocate page" | 3842 | if (!max_tr.buffer) { |
3089 | "for trace buffer!\n"); | 3843 | printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); |
3090 | goto free_buffers; | 3844 | WARN_ON(1); |
3091 | } | 3845 | ring_buffer_free(global_trace.buffer); |
3092 | 3846 | return 0; | |
3093 | INIT_LIST_HEAD(&max_tr.data[i]->trace_pages); | ||
3094 | page = virt_to_page(array); | ||
3095 | list_add(&page->lru, &max_tr.data[i]->trace_pages); | ||
3096 | SetPageLRU(page); | ||
3097 | #endif | ||
3098 | } | 3847 | } |
3848 | max_tr.entries = ring_buffer_size(max_tr.buffer); | ||
3849 | WARN_ON(max_tr.entries != global_trace.entries); | ||
3850 | #endif | ||
3099 | 3851 | ||
3100 | /* | 3852 | /* Allocate the first page for all buffers */ |
3101 | * Since we allocate by orders of pages, we may be able to | 3853 | for_each_tracing_cpu(i) { |
3102 | * round up a bit. | 3854 | data = global_trace.data[i] = &per_cpu(global_trace_cpu, i); |
3103 | */ | 3855 | max_tr.data[i] = &per_cpu(max_data, i); |
3104 | global_trace.entries = ENTRIES_PER_PAGE; | ||
3105 | pages++; | ||
3106 | |||
3107 | while (global_trace.entries < trace_nr_entries) { | ||
3108 | if (trace_alloc_page()) | ||
3109 | break; | ||
3110 | pages++; | ||
3111 | } | 3856 | } |
3112 | max_tr.entries = global_trace.entries; | ||
3113 | |||
3114 | pr_info("tracer: %d pages allocated for %ld entries of %ld bytes\n", | ||
3115 | pages, trace_nr_entries, (long)TRACE_ENTRY_SIZE); | ||
3116 | pr_info(" actual entries %ld\n", global_trace.entries); | ||
3117 | |||
3118 | tracer_init_debugfs(); | ||
3119 | 3857 | ||
3120 | trace_init_cmdlines(); | 3858 | trace_init_cmdlines(); |
3121 | 3859 | ||
3122 | register_tracer(&no_tracer); | 3860 | register_tracer(&nop_trace); |
3123 | current_trace = &no_tracer; | 3861 | #ifdef CONFIG_BOOT_TRACER |
3862 | register_tracer(&boot_tracer); | ||
3863 | current_trace = &boot_tracer; | ||
3864 | current_trace->init(&global_trace); | ||
3865 | #else | ||
3866 | current_trace = &nop_trace; | ||
3867 | #endif | ||
3124 | 3868 | ||
3125 | /* All seems OK, enable tracing */ | 3869 | /* All seems OK, enable tracing */ |
3126 | global_trace.ctrl = tracer_enabled; | ||
3127 | tracing_disabled = 0; | 3870 | tracing_disabled = 0; |
3128 | 3871 | ||
3129 | return 0; | 3872 | atomic_notifier_chain_register(&panic_notifier_list, |
3873 | &trace_panic_notifier); | ||
3130 | 3874 | ||
3131 | free_buffers: | 3875 | register_die_notifier(&trace_die_notifier); |
3132 | for (i-- ; i >= 0; i--) { | ||
3133 | struct page *page, *tmp; | ||
3134 | struct trace_array_cpu *data = global_trace.data[i]; | ||
3135 | |||
3136 | if (data) { | ||
3137 | list_for_each_entry_safe(page, tmp, | ||
3138 | &data->trace_pages, lru) { | ||
3139 | list_del_init(&page->lru); | ||
3140 | __free_page(page); | ||
3141 | } | ||
3142 | } | ||
3143 | 3876 | ||
3144 | #ifdef CONFIG_TRACER_MAX_TRACE | 3877 | return 0; |
3145 | data = max_tr.data[i]; | ||
3146 | if (data) { | ||
3147 | list_for_each_entry_safe(page, tmp, | ||
3148 | &data->trace_pages, lru) { | ||
3149 | list_del_init(&page->lru); | ||
3150 | __free_page(page); | ||
3151 | } | ||
3152 | } | ||
3153 | #endif | ||
3154 | } | ||
3155 | return ret; | ||
3156 | } | 3878 | } |
3157 | fs_initcall(tracer_alloc_buffers); | 3879 | early_initcall(tracer_alloc_buffers); |
3880 | fs_initcall(tracer_init_debugfs); | ||