diff options
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/ftrace.c | 147 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 118 | ||||
-rw-r--r-- | kernel/trace/trace.c | 1 | ||||
-rw-r--r-- | kernel/trace/trace_mmiotrace.c | 16 | ||||
-rw-r--r-- | kernel/trace/trace_stack.c | 24 |
5 files changed, 210 insertions, 96 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 4a39d24568c8..78db083390f0 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -185,7 +185,6 @@ enum { | |||
185 | }; | 185 | }; |
186 | 186 | ||
187 | static int ftrace_filtered; | 187 | static int ftrace_filtered; |
188 | static int tracing_on; | ||
189 | 188 | ||
190 | static LIST_HEAD(ftrace_new_addrs); | 189 | static LIST_HEAD(ftrace_new_addrs); |
191 | 190 | ||
@@ -327,96 +326,89 @@ ftrace_record_ip(unsigned long ip) | |||
327 | 326 | ||
328 | static int | 327 | static int |
329 | __ftrace_replace_code(struct dyn_ftrace *rec, | 328 | __ftrace_replace_code(struct dyn_ftrace *rec, |
330 | unsigned char *old, unsigned char *new, int enable) | 329 | unsigned char *nop, int enable) |
331 | { | 330 | { |
332 | unsigned long ip, fl; | 331 | unsigned long ip, fl; |
332 | unsigned char *call, *old, *new; | ||
333 | 333 | ||
334 | ip = rec->ip; | 334 | ip = rec->ip; |
335 | 335 | ||
336 | if (ftrace_filtered && enable) { | 336 | /* |
337 | * If this record is not to be traced and | ||
338 | * it is not enabled then do nothing. | ||
339 | * | ||
340 | * If this record is not to be traced and | ||
341 | * it is enabled then disabled it. | ||
342 | * | ||
343 | */ | ||
344 | if (rec->flags & FTRACE_FL_NOTRACE) { | ||
345 | if (rec->flags & FTRACE_FL_ENABLED) | ||
346 | rec->flags &= ~FTRACE_FL_ENABLED; | ||
347 | else | ||
348 | return 0; | ||
349 | |||
350 | } else if (ftrace_filtered && enable) { | ||
337 | /* | 351 | /* |
338 | * If filtering is on: | 352 | * Filtering is on: |
339 | * | ||
340 | * If this record is set to be filtered and | ||
341 | * is enabled then do nothing. | ||
342 | * | ||
343 | * If this record is set to be filtered and | ||
344 | * it is not enabled, enable it. | ||
345 | * | ||
346 | * If this record is not set to be filtered | ||
347 | * and it is not enabled do nothing. | ||
348 | * | ||
349 | * If this record is set not to trace then | ||
350 | * do nothing. | ||
351 | * | ||
352 | * If this record is set not to trace and | ||
353 | * it is enabled then disable it. | ||
354 | * | ||
355 | * If this record is not set to be filtered and | ||
356 | * it is enabled, disable it. | ||
357 | */ | 353 | */ |
358 | 354 | ||
359 | fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE | | 355 | fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED); |
360 | FTRACE_FL_ENABLED); | ||
361 | 356 | ||
362 | if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) || | 357 | /* Record is filtered and enabled, do nothing */ |
363 | (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) || | 358 | if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) |
364 | !fl || (fl == FTRACE_FL_NOTRACE)) | ||
365 | return 0; | 359 | return 0; |
366 | 360 | ||
367 | /* | 361 | /* Record is not filtered and is not enabled do nothing */ |
368 | * If it is enabled disable it, | 362 | if (!fl) |
369 | * otherwise enable it! | 363 | return 0; |
370 | */ | 364 | |
371 | if (fl & FTRACE_FL_ENABLED) { | 365 | /* Record is not filtered but enabled, disable it */ |
372 | /* swap new and old */ | 366 | if (fl == FTRACE_FL_ENABLED) |
373 | new = old; | ||
374 | old = ftrace_call_replace(ip, FTRACE_ADDR); | ||
375 | rec->flags &= ~FTRACE_FL_ENABLED; | 367 | rec->flags &= ~FTRACE_FL_ENABLED; |
376 | } else { | 368 | else |
377 | new = ftrace_call_replace(ip, FTRACE_ADDR); | 369 | /* Otherwise record is filtered but not enabled, enable it */ |
378 | rec->flags |= FTRACE_FL_ENABLED; | 370 | rec->flags |= FTRACE_FL_ENABLED; |
379 | } | ||
380 | } else { | 371 | } else { |
372 | /* Disable or not filtered */ | ||
381 | 373 | ||
382 | if (enable) { | 374 | if (enable) { |
383 | /* | 375 | /* if record is enabled, do nothing */ |
384 | * If this record is set not to trace and is | ||
385 | * not enabled, do nothing. | ||
386 | */ | ||
387 | fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED); | ||
388 | if (fl == FTRACE_FL_NOTRACE) | ||
389 | return 0; | ||
390 | |||
391 | new = ftrace_call_replace(ip, FTRACE_ADDR); | ||
392 | } else | ||
393 | old = ftrace_call_replace(ip, FTRACE_ADDR); | ||
394 | |||
395 | if (enable) { | ||
396 | if (rec->flags & FTRACE_FL_ENABLED) | 376 | if (rec->flags & FTRACE_FL_ENABLED) |
397 | return 0; | 377 | return 0; |
378 | |||
398 | rec->flags |= FTRACE_FL_ENABLED; | 379 | rec->flags |= FTRACE_FL_ENABLED; |
380 | |||
399 | } else { | 381 | } else { |
382 | |||
383 | /* if record is not enabled do nothing */ | ||
400 | if (!(rec->flags & FTRACE_FL_ENABLED)) | 384 | if (!(rec->flags & FTRACE_FL_ENABLED)) |
401 | return 0; | 385 | return 0; |
386 | |||
402 | rec->flags &= ~FTRACE_FL_ENABLED; | 387 | rec->flags &= ~FTRACE_FL_ENABLED; |
403 | } | 388 | } |
404 | } | 389 | } |
405 | 390 | ||
391 | call = ftrace_call_replace(ip, FTRACE_ADDR); | ||
392 | |||
393 | if (rec->flags & FTRACE_FL_ENABLED) { | ||
394 | old = nop; | ||
395 | new = call; | ||
396 | } else { | ||
397 | old = call; | ||
398 | new = nop; | ||
399 | } | ||
400 | |||
406 | return ftrace_modify_code(ip, old, new); | 401 | return ftrace_modify_code(ip, old, new); |
407 | } | 402 | } |
408 | 403 | ||
409 | static void ftrace_replace_code(int enable) | 404 | static void ftrace_replace_code(int enable) |
410 | { | 405 | { |
411 | int i, failed; | 406 | int i, failed; |
412 | unsigned char *new = NULL, *old = NULL; | 407 | unsigned char *nop = NULL; |
413 | struct dyn_ftrace *rec; | 408 | struct dyn_ftrace *rec; |
414 | struct ftrace_page *pg; | 409 | struct ftrace_page *pg; |
415 | 410 | ||
416 | if (enable) | 411 | nop = ftrace_nop_replace(); |
417 | old = ftrace_nop_replace(); | ||
418 | else | ||
419 | new = ftrace_nop_replace(); | ||
420 | 412 | ||
421 | for (pg = ftrace_pages_start; pg; pg = pg->next) { | 413 | for (pg = ftrace_pages_start; pg; pg = pg->next) { |
422 | for (i = 0; i < pg->index; i++) { | 414 | for (i = 0; i < pg->index; i++) { |
@@ -434,7 +426,7 @@ static void ftrace_replace_code(int enable) | |||
434 | unfreeze_record(rec); | 426 | unfreeze_record(rec); |
435 | } | 427 | } |
436 | 428 | ||
437 | failed = __ftrace_replace_code(rec, old, new, enable); | 429 | failed = __ftrace_replace_code(rec, nop, enable); |
438 | if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { | 430 | if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { |
439 | rec->flags |= FTRACE_FL_FAILED; | 431 | rec->flags |= FTRACE_FL_FAILED; |
440 | if ((system_state == SYSTEM_BOOTING) || | 432 | if ((system_state == SYSTEM_BOOTING) || |
@@ -506,13 +498,10 @@ static int __ftrace_modify_code(void *data) | |||
506 | { | 498 | { |
507 | int *command = data; | 499 | int *command = data; |
508 | 500 | ||
509 | if (*command & FTRACE_ENABLE_CALLS) { | 501 | if (*command & FTRACE_ENABLE_CALLS) |
510 | ftrace_replace_code(1); | 502 | ftrace_replace_code(1); |
511 | tracing_on = 1; | 503 | else if (*command & FTRACE_DISABLE_CALLS) |
512 | } else if (*command & FTRACE_DISABLE_CALLS) { | ||
513 | ftrace_replace_code(0); | 504 | ftrace_replace_code(0); |
514 | tracing_on = 0; | ||
515 | } | ||
516 | 505 | ||
517 | if (*command & FTRACE_UPDATE_TRACE_FUNC) | 506 | if (*command & FTRACE_UPDATE_TRACE_FUNC) |
518 | ftrace_update_ftrace_func(ftrace_trace_function); | 507 | ftrace_update_ftrace_func(ftrace_trace_function); |
@@ -538,8 +527,7 @@ static void ftrace_startup(void) | |||
538 | 527 | ||
539 | mutex_lock(&ftrace_start_lock); | 528 | mutex_lock(&ftrace_start_lock); |
540 | ftrace_start++; | 529 | ftrace_start++; |
541 | if (ftrace_start == 1) | 530 | command |= FTRACE_ENABLE_CALLS; |
542 | command |= FTRACE_ENABLE_CALLS; | ||
543 | 531 | ||
544 | if (saved_ftrace_func != ftrace_trace_function) { | 532 | if (saved_ftrace_func != ftrace_trace_function) { |
545 | saved_ftrace_func = ftrace_trace_function; | 533 | saved_ftrace_func = ftrace_trace_function; |
@@ -677,7 +665,7 @@ static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) | |||
677 | 665 | ||
678 | cnt = num_to_init / ENTRIES_PER_PAGE; | 666 | cnt = num_to_init / ENTRIES_PER_PAGE; |
679 | pr_info("ftrace: allocating %ld entries in %d pages\n", | 667 | pr_info("ftrace: allocating %ld entries in %d pages\n", |
680 | num_to_init, cnt); | 668 | num_to_init, cnt + 1); |
681 | 669 | ||
682 | for (i = 0; i < cnt; i++) { | 670 | for (i = 0; i < cnt; i++) { |
683 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); | 671 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); |
@@ -738,6 +726,9 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
738 | ((iter->flags & FTRACE_ITER_FAILURES) && | 726 | ((iter->flags & FTRACE_ITER_FAILURES) && |
739 | !(rec->flags & FTRACE_FL_FAILED)) || | 727 | !(rec->flags & FTRACE_FL_FAILED)) || |
740 | 728 | ||
729 | ((iter->flags & FTRACE_ITER_FILTER) && | ||
730 | !(rec->flags & FTRACE_FL_FILTER)) || | ||
731 | |||
741 | ((iter->flags & FTRACE_ITER_NOTRACE) && | 732 | ((iter->flags & FTRACE_ITER_NOTRACE) && |
742 | !(rec->flags & FTRACE_FL_NOTRACE))) { | 733 | !(rec->flags & FTRACE_FL_NOTRACE))) { |
743 | rec = NULL; | 734 | rec = NULL; |
@@ -757,13 +748,11 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
757 | void *p = NULL; | 748 | void *p = NULL; |
758 | loff_t l = -1; | 749 | loff_t l = -1; |
759 | 750 | ||
760 | if (*pos != iter->pos) { | 751 | if (*pos > iter->pos) |
761 | for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l)) | 752 | *pos = iter->pos; |
762 | ; | 753 | |
763 | } else { | 754 | l = *pos; |
764 | l = *pos; | 755 | p = t_next(m, p, &l); |
765 | p = t_next(m, p, &l); | ||
766 | } | ||
767 | 756 | ||
768 | return p; | 757 | return p; |
769 | } | 758 | } |
@@ -774,15 +763,21 @@ static void t_stop(struct seq_file *m, void *p) | |||
774 | 763 | ||
775 | static int t_show(struct seq_file *m, void *v) | 764 | static int t_show(struct seq_file *m, void *v) |
776 | { | 765 | { |
766 | struct ftrace_iterator *iter = m->private; | ||
777 | struct dyn_ftrace *rec = v; | 767 | struct dyn_ftrace *rec = v; |
778 | char str[KSYM_SYMBOL_LEN]; | 768 | char str[KSYM_SYMBOL_LEN]; |
769 | int ret = 0; | ||
779 | 770 | ||
780 | if (!rec) | 771 | if (!rec) |
781 | return 0; | 772 | return 0; |
782 | 773 | ||
783 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | 774 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); |
784 | 775 | ||
785 | seq_printf(m, "%s\n", str); | 776 | ret = seq_printf(m, "%s\n", str); |
777 | if (ret < 0) { | ||
778 | iter->pos--; | ||
779 | iter->idx--; | ||
780 | } | ||
786 | 781 | ||
787 | return 0; | 782 | return 0; |
788 | } | 783 | } |
@@ -808,7 +803,7 @@ ftrace_avail_open(struct inode *inode, struct file *file) | |||
808 | return -ENOMEM; | 803 | return -ENOMEM; |
809 | 804 | ||
810 | iter->pg = ftrace_pages_start; | 805 | iter->pg = ftrace_pages_start; |
811 | iter->pos = -1; | 806 | iter->pos = 0; |
812 | 807 | ||
813 | ret = seq_open(file, &show_ftrace_seq_ops); | 808 | ret = seq_open(file, &show_ftrace_seq_ops); |
814 | if (!ret) { | 809 | if (!ret) { |
@@ -895,7 +890,7 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable) | |||
895 | 890 | ||
896 | if (file->f_mode & FMODE_READ) { | 891 | if (file->f_mode & FMODE_READ) { |
897 | iter->pg = ftrace_pages_start; | 892 | iter->pg = ftrace_pages_start; |
898 | iter->pos = -1; | 893 | iter->pos = 0; |
899 | iter->flags = enable ? FTRACE_ITER_FILTER : | 894 | iter->flags = enable ? FTRACE_ITER_FILTER : |
900 | FTRACE_ITER_NOTRACE; | 895 | FTRACE_ITER_NOTRACE; |
901 | 896 | ||
@@ -1186,7 +1181,7 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable) | |||
1186 | 1181 | ||
1187 | mutex_lock(&ftrace_sysctl_lock); | 1182 | mutex_lock(&ftrace_sysctl_lock); |
1188 | mutex_lock(&ftrace_start_lock); | 1183 | mutex_lock(&ftrace_start_lock); |
1189 | if (iter->filtered && ftrace_start && ftrace_enabled) | 1184 | if (ftrace_start && ftrace_enabled) |
1190 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); | 1185 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); |
1191 | mutex_unlock(&ftrace_start_lock); | 1186 | mutex_unlock(&ftrace_start_lock); |
1192 | mutex_unlock(&ftrace_sysctl_lock); | 1187 | mutex_unlock(&ftrace_sysctl_lock); |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 2f76193c3489..668bbb5ef2bd 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -16,14 +16,49 @@ | |||
16 | #include <linux/list.h> | 16 | #include <linux/list.h> |
17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
18 | 18 | ||
19 | #include "trace.h" | ||
20 | |||
21 | /* Global flag to disable all recording to ring buffers */ | ||
22 | static int ring_buffers_off __read_mostly; | ||
23 | |||
24 | /** | ||
25 | * tracing_on - enable all tracing buffers | ||
26 | * | ||
27 | * This function enables all tracing buffers that may have been | ||
28 | * disabled with tracing_off. | ||
29 | */ | ||
30 | void tracing_on(void) | ||
31 | { | ||
32 | ring_buffers_off = 0; | ||
33 | } | ||
34 | |||
35 | /** | ||
36 | * tracing_off - turn off all tracing buffers | ||
37 | * | ||
38 | * This function stops all tracing buffers from recording data. | ||
39 | * It does not disable any overhead the tracers themselves may | ||
40 | * be causing. This function simply causes all recording to | ||
41 | * the ring buffers to fail. | ||
42 | */ | ||
43 | void tracing_off(void) | ||
44 | { | ||
45 | ring_buffers_off = 1; | ||
46 | } | ||
47 | |||
19 | /* Up this if you want to test the TIME_EXTENTS and normalization */ | 48 | /* Up this if you want to test the TIME_EXTENTS and normalization */ |
20 | #define DEBUG_SHIFT 0 | 49 | #define DEBUG_SHIFT 0 |
21 | 50 | ||
22 | /* FIXME!!! */ | 51 | /* FIXME!!! */ |
23 | u64 ring_buffer_time_stamp(int cpu) | 52 | u64 ring_buffer_time_stamp(int cpu) |
24 | { | 53 | { |
54 | u64 time; | ||
55 | |||
56 | preempt_disable_notrace(); | ||
25 | /* shift to debug/test normalization and TIME_EXTENTS */ | 57 | /* shift to debug/test normalization and TIME_EXTENTS */ |
26 | return sched_clock() << DEBUG_SHIFT; | 58 | time = sched_clock() << DEBUG_SHIFT; |
59 | preempt_enable_notrace(); | ||
60 | |||
61 | return time; | ||
27 | } | 62 | } |
28 | 63 | ||
29 | void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) | 64 | void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) |
@@ -503,6 +538,12 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
503 | LIST_HEAD(pages); | 538 | LIST_HEAD(pages); |
504 | int i, cpu; | 539 | int i, cpu; |
505 | 540 | ||
541 | /* | ||
542 | * Always succeed at resizing a non-existent buffer: | ||
543 | */ | ||
544 | if (!buffer) | ||
545 | return size; | ||
546 | |||
506 | size = DIV_ROUND_UP(size, BUF_PAGE_SIZE); | 547 | size = DIV_ROUND_UP(size, BUF_PAGE_SIZE); |
507 | size *= BUF_PAGE_SIZE; | 548 | size *= BUF_PAGE_SIZE; |
508 | buffer_size = buffer->pages * BUF_PAGE_SIZE; | 549 | buffer_size = buffer->pages * BUF_PAGE_SIZE; |
@@ -576,6 +617,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
576 | list_del_init(&page->list); | 617 | list_del_init(&page->list); |
577 | free_buffer_page(page); | 618 | free_buffer_page(page); |
578 | } | 619 | } |
620 | mutex_unlock(&buffer->mutex); | ||
579 | return -ENOMEM; | 621 | return -ENOMEM; |
580 | } | 622 | } |
581 | 623 | ||
@@ -1133,6 +1175,9 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, | |||
1133 | struct ring_buffer_event *event; | 1175 | struct ring_buffer_event *event; |
1134 | int cpu, resched; | 1176 | int cpu, resched; |
1135 | 1177 | ||
1178 | if (ring_buffers_off) | ||
1179 | return NULL; | ||
1180 | |||
1136 | if (atomic_read(&buffer->record_disabled)) | 1181 | if (atomic_read(&buffer->record_disabled)) |
1137 | return NULL; | 1182 | return NULL; |
1138 | 1183 | ||
@@ -1170,7 +1215,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, | |||
1170 | 1215 | ||
1171 | out: | 1216 | out: |
1172 | if (resched) | 1217 | if (resched) |
1173 | preempt_enable_notrace(); | 1218 | preempt_enable_no_resched_notrace(); |
1174 | else | 1219 | else |
1175 | preempt_enable_notrace(); | 1220 | preempt_enable_notrace(); |
1176 | return NULL; | 1221 | return NULL; |
@@ -1249,6 +1294,9 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
1249 | int ret = -EBUSY; | 1294 | int ret = -EBUSY; |
1250 | int cpu, resched; | 1295 | int cpu, resched; |
1251 | 1296 | ||
1297 | if (ring_buffers_off) | ||
1298 | return -EBUSY; | ||
1299 | |||
1252 | if (atomic_read(&buffer->record_disabled)) | 1300 | if (atomic_read(&buffer->record_disabled)) |
1253 | return -EBUSY; | 1301 | return -EBUSY; |
1254 | 1302 | ||
@@ -2070,3 +2118,69 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | |||
2070 | return 0; | 2118 | return 0; |
2071 | } | 2119 | } |
2072 | 2120 | ||
2121 | static ssize_t | ||
2122 | rb_simple_read(struct file *filp, char __user *ubuf, | ||
2123 | size_t cnt, loff_t *ppos) | ||
2124 | { | ||
2125 | int *p = filp->private_data; | ||
2126 | char buf[64]; | ||
2127 | int r; | ||
2128 | |||
2129 | /* !ring_buffers_off == tracing_on */ | ||
2130 | r = sprintf(buf, "%d\n", !*p); | ||
2131 | |||
2132 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
2133 | } | ||
2134 | |||
2135 | static ssize_t | ||
2136 | rb_simple_write(struct file *filp, const char __user *ubuf, | ||
2137 | size_t cnt, loff_t *ppos) | ||
2138 | { | ||
2139 | int *p = filp->private_data; | ||
2140 | char buf[64]; | ||
2141 | long val; | ||
2142 | int ret; | ||
2143 | |||
2144 | if (cnt >= sizeof(buf)) | ||
2145 | return -EINVAL; | ||
2146 | |||
2147 | if (copy_from_user(&buf, ubuf, cnt)) | ||
2148 | return -EFAULT; | ||
2149 | |||
2150 | buf[cnt] = 0; | ||
2151 | |||
2152 | ret = strict_strtoul(buf, 10, &val); | ||
2153 | if (ret < 0) | ||
2154 | return ret; | ||
2155 | |||
2156 | /* !ring_buffers_off == tracing_on */ | ||
2157 | *p = !val; | ||
2158 | |||
2159 | (*ppos)++; | ||
2160 | |||
2161 | return cnt; | ||
2162 | } | ||
2163 | |||
2164 | static struct file_operations rb_simple_fops = { | ||
2165 | .open = tracing_open_generic, | ||
2166 | .read = rb_simple_read, | ||
2167 | .write = rb_simple_write, | ||
2168 | }; | ||
2169 | |||
2170 | |||
2171 | static __init int rb_init_debugfs(void) | ||
2172 | { | ||
2173 | struct dentry *d_tracer; | ||
2174 | struct dentry *entry; | ||
2175 | |||
2176 | d_tracer = tracing_init_dentry(); | ||
2177 | |||
2178 | entry = debugfs_create_file("tracing_on", 0644, d_tracer, | ||
2179 | &ring_buffers_off, &rb_simple_fops); | ||
2180 | if (!entry) | ||
2181 | pr_warning("Could not create debugfs 'tracing_on' entry\n"); | ||
2182 | |||
2183 | return 0; | ||
2184 | } | ||
2185 | |||
2186 | fs_initcall(rb_init_debugfs); | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 697eda36b86a..d86e3252f300 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1936,6 +1936,7 @@ __tracing_open(struct inode *inode, struct file *file, int *ret) | |||
1936 | ring_buffer_read_finish(iter->buffer_iter[cpu]); | 1936 | ring_buffer_read_finish(iter->buffer_iter[cpu]); |
1937 | } | 1937 | } |
1938 | mutex_unlock(&trace_types_lock); | 1938 | mutex_unlock(&trace_types_lock); |
1939 | kfree(iter); | ||
1939 | 1940 | ||
1940 | return ERR_PTR(-ENOMEM); | 1941 | return ERR_PTR(-ENOMEM); |
1941 | } | 1942 | } |
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index f28484618ff0..e62cbf78eab6 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -18,12 +18,14 @@ struct header_iter { | |||
18 | 18 | ||
19 | static struct trace_array *mmio_trace_array; | 19 | static struct trace_array *mmio_trace_array; |
20 | static bool overrun_detected; | 20 | static bool overrun_detected; |
21 | static unsigned long prev_overruns; | ||
21 | 22 | ||
22 | static void mmio_reset_data(struct trace_array *tr) | 23 | static void mmio_reset_data(struct trace_array *tr) |
23 | { | 24 | { |
24 | int cpu; | 25 | int cpu; |
25 | 26 | ||
26 | overrun_detected = false; | 27 | overrun_detected = false; |
28 | prev_overruns = 0; | ||
27 | tr->time_start = ftrace_now(tr->cpu); | 29 | tr->time_start = ftrace_now(tr->cpu); |
28 | 30 | ||
29 | for_each_online_cpu(cpu) | 31 | for_each_online_cpu(cpu) |
@@ -128,16 +130,12 @@ static void mmio_close(struct trace_iterator *iter) | |||
128 | 130 | ||
129 | static unsigned long count_overruns(struct trace_iterator *iter) | 131 | static unsigned long count_overruns(struct trace_iterator *iter) |
130 | { | 132 | { |
131 | int cpu; | ||
132 | unsigned long cnt = 0; | 133 | unsigned long cnt = 0; |
133 | /* FIXME: */ | 134 | unsigned long over = ring_buffer_overruns(iter->tr->buffer); |
134 | #if 0 | 135 | |
135 | for_each_online_cpu(cpu) { | 136 | if (over > prev_overruns) |
136 | cnt += iter->overrun[cpu]; | 137 | cnt = over - prev_overruns; |
137 | iter->overrun[cpu] = 0; | 138 | prev_overruns = over; |
138 | } | ||
139 | #endif | ||
140 | (void)cpu; | ||
141 | return cnt; | 139 | return cnt; |
142 | } | 140 | } |
143 | 141 | ||
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index be682b62fe58..3bdb44bde4b7 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -184,11 +184,16 @@ static struct file_operations stack_max_size_fops = { | |||
184 | static void * | 184 | static void * |
185 | t_next(struct seq_file *m, void *v, loff_t *pos) | 185 | t_next(struct seq_file *m, void *v, loff_t *pos) |
186 | { | 186 | { |
187 | long i = (long)m->private; | 187 | long i; |
188 | 188 | ||
189 | (*pos)++; | 189 | (*pos)++; |
190 | 190 | ||
191 | i++; | 191 | if (v == SEQ_START_TOKEN) |
192 | i = 0; | ||
193 | else { | ||
194 | i = *(long *)v; | ||
195 | i++; | ||
196 | } | ||
192 | 197 | ||
193 | if (i >= max_stack_trace.nr_entries || | 198 | if (i >= max_stack_trace.nr_entries || |
194 | stack_dump_trace[i] == ULONG_MAX) | 199 | stack_dump_trace[i] == ULONG_MAX) |
@@ -201,12 +206,15 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
201 | 206 | ||
202 | static void *t_start(struct seq_file *m, loff_t *pos) | 207 | static void *t_start(struct seq_file *m, loff_t *pos) |
203 | { | 208 | { |
204 | void *t = &m->private; | 209 | void *t = SEQ_START_TOKEN; |
205 | loff_t l = 0; | 210 | loff_t l = 0; |
206 | 211 | ||
207 | local_irq_disable(); | 212 | local_irq_disable(); |
208 | __raw_spin_lock(&max_stack_lock); | 213 | __raw_spin_lock(&max_stack_lock); |
209 | 214 | ||
215 | if (*pos == 0) | ||
216 | return SEQ_START_TOKEN; | ||
217 | |||
210 | for (; t && l < *pos; t = t_next(m, t, &l)) | 218 | for (; t && l < *pos; t = t_next(m, t, &l)) |
211 | ; | 219 | ; |
212 | 220 | ||
@@ -235,10 +243,10 @@ static int trace_lookup_stack(struct seq_file *m, long i) | |||
235 | 243 | ||
236 | static int t_show(struct seq_file *m, void *v) | 244 | static int t_show(struct seq_file *m, void *v) |
237 | { | 245 | { |
238 | long i = *(long *)v; | 246 | long i; |
239 | int size; | 247 | int size; |
240 | 248 | ||
241 | if (i < 0) { | 249 | if (v == SEQ_START_TOKEN) { |
242 | seq_printf(m, " Depth Size Location" | 250 | seq_printf(m, " Depth Size Location" |
243 | " (%d entries)\n" | 251 | " (%d entries)\n" |
244 | " ----- ---- --------\n", | 252 | " ----- ---- --------\n", |
@@ -246,6 +254,8 @@ static int t_show(struct seq_file *m, void *v) | |||
246 | return 0; | 254 | return 0; |
247 | } | 255 | } |
248 | 256 | ||
257 | i = *(long *)v; | ||
258 | |||
249 | if (i >= max_stack_trace.nr_entries || | 259 | if (i >= max_stack_trace.nr_entries || |
250 | stack_dump_trace[i] == ULONG_MAX) | 260 | stack_dump_trace[i] == ULONG_MAX) |
251 | return 0; | 261 | return 0; |
@@ -275,10 +285,6 @@ static int stack_trace_open(struct inode *inode, struct file *file) | |||
275 | int ret; | 285 | int ret; |
276 | 286 | ||
277 | ret = seq_open(file, &stack_trace_seq_ops); | 287 | ret = seq_open(file, &stack_trace_seq_ops); |
278 | if (!ret) { | ||
279 | struct seq_file *m = file->private_data; | ||
280 | m->private = (void *)-1; | ||
281 | } | ||
282 | 288 | ||
283 | return ret; | 289 | return ret; |
284 | } | 290 | } |