diff options
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/ftrace.c | 147 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 118 | ||||
-rw-r--r-- | kernel/trace/trace.c | 20 |
3 files changed, 205 insertions, 80 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 4a39d24568c8..78db083390f0 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -185,7 +185,6 @@ enum { | |||
185 | }; | 185 | }; |
186 | 186 | ||
187 | static int ftrace_filtered; | 187 | static int ftrace_filtered; |
188 | static int tracing_on; | ||
189 | 188 | ||
190 | static LIST_HEAD(ftrace_new_addrs); | 189 | static LIST_HEAD(ftrace_new_addrs); |
191 | 190 | ||
@@ -327,96 +326,89 @@ ftrace_record_ip(unsigned long ip) | |||
327 | 326 | ||
328 | static int | 327 | static int |
329 | __ftrace_replace_code(struct dyn_ftrace *rec, | 328 | __ftrace_replace_code(struct dyn_ftrace *rec, |
330 | unsigned char *old, unsigned char *new, int enable) | 329 | unsigned char *nop, int enable) |
331 | { | 330 | { |
332 | unsigned long ip, fl; | 331 | unsigned long ip, fl; |
332 | unsigned char *call, *old, *new; | ||
333 | 333 | ||
334 | ip = rec->ip; | 334 | ip = rec->ip; |
335 | 335 | ||
336 | if (ftrace_filtered && enable) { | 336 | /* |
337 | * If this record is not to be traced and | ||
338 | * it is not enabled then do nothing. | ||
339 | * | ||
340 | * If this record is not to be traced and | ||
341 | * it is enabled then disabled it. | ||
342 | * | ||
343 | */ | ||
344 | if (rec->flags & FTRACE_FL_NOTRACE) { | ||
345 | if (rec->flags & FTRACE_FL_ENABLED) | ||
346 | rec->flags &= ~FTRACE_FL_ENABLED; | ||
347 | else | ||
348 | return 0; | ||
349 | |||
350 | } else if (ftrace_filtered && enable) { | ||
337 | /* | 351 | /* |
338 | * If filtering is on: | 352 | * Filtering is on: |
339 | * | ||
340 | * If this record is set to be filtered and | ||
341 | * is enabled then do nothing. | ||
342 | * | ||
343 | * If this record is set to be filtered and | ||
344 | * it is not enabled, enable it. | ||
345 | * | ||
346 | * If this record is not set to be filtered | ||
347 | * and it is not enabled do nothing. | ||
348 | * | ||
349 | * If this record is set not to trace then | ||
350 | * do nothing. | ||
351 | * | ||
352 | * If this record is set not to trace and | ||
353 | * it is enabled then disable it. | ||
354 | * | ||
355 | * If this record is not set to be filtered and | ||
356 | * it is enabled, disable it. | ||
357 | */ | 353 | */ |
358 | 354 | ||
359 | fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE | | 355 | fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED); |
360 | FTRACE_FL_ENABLED); | ||
361 | 356 | ||
362 | if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) || | 357 | /* Record is filtered and enabled, do nothing */ |
363 | (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) || | 358 | if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) |
364 | !fl || (fl == FTRACE_FL_NOTRACE)) | ||
365 | return 0; | 359 | return 0; |
366 | 360 | ||
367 | /* | 361 | /* Record is not filtered and is not enabled do nothing */ |
368 | * If it is enabled disable it, | 362 | if (!fl) |
369 | * otherwise enable it! | 363 | return 0; |
370 | */ | 364 | |
371 | if (fl & FTRACE_FL_ENABLED) { | 365 | /* Record is not filtered but enabled, disable it */ |
372 | /* swap new and old */ | 366 | if (fl == FTRACE_FL_ENABLED) |
373 | new = old; | ||
374 | old = ftrace_call_replace(ip, FTRACE_ADDR); | ||
375 | rec->flags &= ~FTRACE_FL_ENABLED; | 367 | rec->flags &= ~FTRACE_FL_ENABLED; |
376 | } else { | 368 | else |
377 | new = ftrace_call_replace(ip, FTRACE_ADDR); | 369 | /* Otherwise record is filtered but not enabled, enable it */ |
378 | rec->flags |= FTRACE_FL_ENABLED; | 370 | rec->flags |= FTRACE_FL_ENABLED; |
379 | } | ||
380 | } else { | 371 | } else { |
372 | /* Disable or not filtered */ | ||
381 | 373 | ||
382 | if (enable) { | 374 | if (enable) { |
383 | /* | 375 | /* if record is enabled, do nothing */ |
384 | * If this record is set not to trace and is | ||
385 | * not enabled, do nothing. | ||
386 | */ | ||
387 | fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED); | ||
388 | if (fl == FTRACE_FL_NOTRACE) | ||
389 | return 0; | ||
390 | |||
391 | new = ftrace_call_replace(ip, FTRACE_ADDR); | ||
392 | } else | ||
393 | old = ftrace_call_replace(ip, FTRACE_ADDR); | ||
394 | |||
395 | if (enable) { | ||
396 | if (rec->flags & FTRACE_FL_ENABLED) | 376 | if (rec->flags & FTRACE_FL_ENABLED) |
397 | return 0; | 377 | return 0; |
378 | |||
398 | rec->flags |= FTRACE_FL_ENABLED; | 379 | rec->flags |= FTRACE_FL_ENABLED; |
380 | |||
399 | } else { | 381 | } else { |
382 | |||
383 | /* if record is not enabled do nothing */ | ||
400 | if (!(rec->flags & FTRACE_FL_ENABLED)) | 384 | if (!(rec->flags & FTRACE_FL_ENABLED)) |
401 | return 0; | 385 | return 0; |
386 | |||
402 | rec->flags &= ~FTRACE_FL_ENABLED; | 387 | rec->flags &= ~FTRACE_FL_ENABLED; |
403 | } | 388 | } |
404 | } | 389 | } |
405 | 390 | ||
391 | call = ftrace_call_replace(ip, FTRACE_ADDR); | ||
392 | |||
393 | if (rec->flags & FTRACE_FL_ENABLED) { | ||
394 | old = nop; | ||
395 | new = call; | ||
396 | } else { | ||
397 | old = call; | ||
398 | new = nop; | ||
399 | } | ||
400 | |||
406 | return ftrace_modify_code(ip, old, new); | 401 | return ftrace_modify_code(ip, old, new); |
407 | } | 402 | } |
408 | 403 | ||
409 | static void ftrace_replace_code(int enable) | 404 | static void ftrace_replace_code(int enable) |
410 | { | 405 | { |
411 | int i, failed; | 406 | int i, failed; |
412 | unsigned char *new = NULL, *old = NULL; | 407 | unsigned char *nop = NULL; |
413 | struct dyn_ftrace *rec; | 408 | struct dyn_ftrace *rec; |
414 | struct ftrace_page *pg; | 409 | struct ftrace_page *pg; |
415 | 410 | ||
416 | if (enable) | 411 | nop = ftrace_nop_replace(); |
417 | old = ftrace_nop_replace(); | ||
418 | else | ||
419 | new = ftrace_nop_replace(); | ||
420 | 412 | ||
421 | for (pg = ftrace_pages_start; pg; pg = pg->next) { | 413 | for (pg = ftrace_pages_start; pg; pg = pg->next) { |
422 | for (i = 0; i < pg->index; i++) { | 414 | for (i = 0; i < pg->index; i++) { |
@@ -434,7 +426,7 @@ static void ftrace_replace_code(int enable) | |||
434 | unfreeze_record(rec); | 426 | unfreeze_record(rec); |
435 | } | 427 | } |
436 | 428 | ||
437 | failed = __ftrace_replace_code(rec, old, new, enable); | 429 | failed = __ftrace_replace_code(rec, nop, enable); |
438 | if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { | 430 | if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { |
439 | rec->flags |= FTRACE_FL_FAILED; | 431 | rec->flags |= FTRACE_FL_FAILED; |
440 | if ((system_state == SYSTEM_BOOTING) || | 432 | if ((system_state == SYSTEM_BOOTING) || |
@@ -506,13 +498,10 @@ static int __ftrace_modify_code(void *data) | |||
506 | { | 498 | { |
507 | int *command = data; | 499 | int *command = data; |
508 | 500 | ||
509 | if (*command & FTRACE_ENABLE_CALLS) { | 501 | if (*command & FTRACE_ENABLE_CALLS) |
510 | ftrace_replace_code(1); | 502 | ftrace_replace_code(1); |
511 | tracing_on = 1; | 503 | else if (*command & FTRACE_DISABLE_CALLS) |
512 | } else if (*command & FTRACE_DISABLE_CALLS) { | ||
513 | ftrace_replace_code(0); | 504 | ftrace_replace_code(0); |
514 | tracing_on = 0; | ||
515 | } | ||
516 | 505 | ||
517 | if (*command & FTRACE_UPDATE_TRACE_FUNC) | 506 | if (*command & FTRACE_UPDATE_TRACE_FUNC) |
518 | ftrace_update_ftrace_func(ftrace_trace_function); | 507 | ftrace_update_ftrace_func(ftrace_trace_function); |
@@ -538,8 +527,7 @@ static void ftrace_startup(void) | |||
538 | 527 | ||
539 | mutex_lock(&ftrace_start_lock); | 528 | mutex_lock(&ftrace_start_lock); |
540 | ftrace_start++; | 529 | ftrace_start++; |
541 | if (ftrace_start == 1) | 530 | command |= FTRACE_ENABLE_CALLS; |
542 | command |= FTRACE_ENABLE_CALLS; | ||
543 | 531 | ||
544 | if (saved_ftrace_func != ftrace_trace_function) { | 532 | if (saved_ftrace_func != ftrace_trace_function) { |
545 | saved_ftrace_func = ftrace_trace_function; | 533 | saved_ftrace_func = ftrace_trace_function; |
@@ -677,7 +665,7 @@ static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) | |||
677 | 665 | ||
678 | cnt = num_to_init / ENTRIES_PER_PAGE; | 666 | cnt = num_to_init / ENTRIES_PER_PAGE; |
679 | pr_info("ftrace: allocating %ld entries in %d pages\n", | 667 | pr_info("ftrace: allocating %ld entries in %d pages\n", |
680 | num_to_init, cnt); | 668 | num_to_init, cnt + 1); |
681 | 669 | ||
682 | for (i = 0; i < cnt; i++) { | 670 | for (i = 0; i < cnt; i++) { |
683 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); | 671 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); |
@@ -738,6 +726,9 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
738 | ((iter->flags & FTRACE_ITER_FAILURES) && | 726 | ((iter->flags & FTRACE_ITER_FAILURES) && |
739 | !(rec->flags & FTRACE_FL_FAILED)) || | 727 | !(rec->flags & FTRACE_FL_FAILED)) || |
740 | 728 | ||
729 | ((iter->flags & FTRACE_ITER_FILTER) && | ||
730 | !(rec->flags & FTRACE_FL_FILTER)) || | ||
731 | |||
741 | ((iter->flags & FTRACE_ITER_NOTRACE) && | 732 | ((iter->flags & FTRACE_ITER_NOTRACE) && |
742 | !(rec->flags & FTRACE_FL_NOTRACE))) { | 733 | !(rec->flags & FTRACE_FL_NOTRACE))) { |
743 | rec = NULL; | 734 | rec = NULL; |
@@ -757,13 +748,11 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
757 | void *p = NULL; | 748 | void *p = NULL; |
758 | loff_t l = -1; | 749 | loff_t l = -1; |
759 | 750 | ||
760 | if (*pos != iter->pos) { | 751 | if (*pos > iter->pos) |
761 | for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l)) | 752 | *pos = iter->pos; |
762 | ; | 753 | |
763 | } else { | 754 | l = *pos; |
764 | l = *pos; | 755 | p = t_next(m, p, &l); |
765 | p = t_next(m, p, &l); | ||
766 | } | ||
767 | 756 | ||
768 | return p; | 757 | return p; |
769 | } | 758 | } |
@@ -774,15 +763,21 @@ static void t_stop(struct seq_file *m, void *p) | |||
774 | 763 | ||
775 | static int t_show(struct seq_file *m, void *v) | 764 | static int t_show(struct seq_file *m, void *v) |
776 | { | 765 | { |
766 | struct ftrace_iterator *iter = m->private; | ||
777 | struct dyn_ftrace *rec = v; | 767 | struct dyn_ftrace *rec = v; |
778 | char str[KSYM_SYMBOL_LEN]; | 768 | char str[KSYM_SYMBOL_LEN]; |
769 | int ret = 0; | ||
779 | 770 | ||
780 | if (!rec) | 771 | if (!rec) |
781 | return 0; | 772 | return 0; |
782 | 773 | ||
783 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | 774 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); |
784 | 775 | ||
785 | seq_printf(m, "%s\n", str); | 776 | ret = seq_printf(m, "%s\n", str); |
777 | if (ret < 0) { | ||
778 | iter->pos--; | ||
779 | iter->idx--; | ||
780 | } | ||
786 | 781 | ||
787 | return 0; | 782 | return 0; |
788 | } | 783 | } |
@@ -808,7 +803,7 @@ ftrace_avail_open(struct inode *inode, struct file *file) | |||
808 | return -ENOMEM; | 803 | return -ENOMEM; |
809 | 804 | ||
810 | iter->pg = ftrace_pages_start; | 805 | iter->pg = ftrace_pages_start; |
811 | iter->pos = -1; | 806 | iter->pos = 0; |
812 | 807 | ||
813 | ret = seq_open(file, &show_ftrace_seq_ops); | 808 | ret = seq_open(file, &show_ftrace_seq_ops); |
814 | if (!ret) { | 809 | if (!ret) { |
@@ -895,7 +890,7 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable) | |||
895 | 890 | ||
896 | if (file->f_mode & FMODE_READ) { | 891 | if (file->f_mode & FMODE_READ) { |
897 | iter->pg = ftrace_pages_start; | 892 | iter->pg = ftrace_pages_start; |
898 | iter->pos = -1; | 893 | iter->pos = 0; |
899 | iter->flags = enable ? FTRACE_ITER_FILTER : | 894 | iter->flags = enable ? FTRACE_ITER_FILTER : |
900 | FTRACE_ITER_NOTRACE; | 895 | FTRACE_ITER_NOTRACE; |
901 | 896 | ||
@@ -1186,7 +1181,7 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable) | |||
1186 | 1181 | ||
1187 | mutex_lock(&ftrace_sysctl_lock); | 1182 | mutex_lock(&ftrace_sysctl_lock); |
1188 | mutex_lock(&ftrace_start_lock); | 1183 | mutex_lock(&ftrace_start_lock); |
1189 | if (iter->filtered && ftrace_start && ftrace_enabled) | 1184 | if (ftrace_start && ftrace_enabled) |
1190 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); | 1185 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); |
1191 | mutex_unlock(&ftrace_start_lock); | 1186 | mutex_unlock(&ftrace_start_lock); |
1192 | mutex_unlock(&ftrace_sysctl_lock); | 1187 | mutex_unlock(&ftrace_sysctl_lock); |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 3f3380638646..f780e9552f91 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -16,14 +16,49 @@ | |||
16 | #include <linux/list.h> | 16 | #include <linux/list.h> |
17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
18 | 18 | ||
19 | #include "trace.h" | ||
20 | |||
21 | /* Global flag to disable all recording to ring buffers */ | ||
22 | static int ring_buffers_off __read_mostly; | ||
23 | |||
24 | /** | ||
25 | * tracing_on - enable all tracing buffers | ||
26 | * | ||
27 | * This function enables all tracing buffers that may have been | ||
28 | * disabled with tracing_off. | ||
29 | */ | ||
30 | void tracing_on(void) | ||
31 | { | ||
32 | ring_buffers_off = 0; | ||
33 | } | ||
34 | |||
35 | /** | ||
36 | * tracing_off - turn off all tracing buffers | ||
37 | * | ||
38 | * This function stops all tracing buffers from recording data. | ||
39 | * It does not disable any overhead the tracers themselves may | ||
40 | * be causing. This function simply causes all recording to | ||
41 | * the ring buffers to fail. | ||
42 | */ | ||
43 | void tracing_off(void) | ||
44 | { | ||
45 | ring_buffers_off = 1; | ||
46 | } | ||
47 | |||
19 | /* Up this if you want to test the TIME_EXTENTS and normalization */ | 48 | /* Up this if you want to test the TIME_EXTENTS and normalization */ |
20 | #define DEBUG_SHIFT 0 | 49 | #define DEBUG_SHIFT 0 |
21 | 50 | ||
22 | /* FIXME!!! */ | 51 | /* FIXME!!! */ |
23 | u64 ring_buffer_time_stamp(int cpu) | 52 | u64 ring_buffer_time_stamp(int cpu) |
24 | { | 53 | { |
54 | u64 time; | ||
55 | |||
56 | preempt_disable_notrace(); | ||
25 | /* shift to debug/test normalization and TIME_EXTENTS */ | 57 | /* shift to debug/test normalization and TIME_EXTENTS */ |
26 | return sched_clock() << DEBUG_SHIFT; | 58 | time = sched_clock() << DEBUG_SHIFT; |
59 | preempt_enable_notrace(); | ||
60 | |||
61 | return time; | ||
27 | } | 62 | } |
28 | 63 | ||
29 | void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) | 64 | void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) |
@@ -503,6 +538,12 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
503 | LIST_HEAD(pages); | 538 | LIST_HEAD(pages); |
504 | int i, cpu; | 539 | int i, cpu; |
505 | 540 | ||
541 | /* | ||
542 | * Always succeed at resizing a non-existent buffer: | ||
543 | */ | ||
544 | if (!buffer) | ||
545 | return size; | ||
546 | |||
506 | size = DIV_ROUND_UP(size, BUF_PAGE_SIZE); | 547 | size = DIV_ROUND_UP(size, BUF_PAGE_SIZE); |
507 | size *= BUF_PAGE_SIZE; | 548 | size *= BUF_PAGE_SIZE; |
508 | buffer_size = buffer->pages * BUF_PAGE_SIZE; | 549 | buffer_size = buffer->pages * BUF_PAGE_SIZE; |
@@ -576,6 +617,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
576 | list_del_init(&page->list); | 617 | list_del_init(&page->list); |
577 | free_buffer_page(page); | 618 | free_buffer_page(page); |
578 | } | 619 | } |
620 | mutex_unlock(&buffer->mutex); | ||
579 | return -ENOMEM; | 621 | return -ENOMEM; |
580 | } | 622 | } |
581 | 623 | ||
@@ -1060,7 +1102,7 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, | |||
1060 | 1102 | ||
1061 | /* Did the write stamp get updated already? */ | 1103 | /* Did the write stamp get updated already? */ |
1062 | if (unlikely(ts < cpu_buffer->write_stamp)) | 1104 | if (unlikely(ts < cpu_buffer->write_stamp)) |
1063 | goto again; | 1105 | delta = 0; |
1064 | 1106 | ||
1065 | if (test_time_stamp(delta)) { | 1107 | if (test_time_stamp(delta)) { |
1066 | 1108 | ||
@@ -1133,6 +1175,9 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, | |||
1133 | struct ring_buffer_event *event; | 1175 | struct ring_buffer_event *event; |
1134 | int cpu, resched; | 1176 | int cpu, resched; |
1135 | 1177 | ||
1178 | if (ring_buffers_off) | ||
1179 | return NULL; | ||
1180 | |||
1136 | if (atomic_read(&buffer->record_disabled)) | 1181 | if (atomic_read(&buffer->record_disabled)) |
1137 | return NULL; | 1182 | return NULL; |
1138 | 1183 | ||
@@ -1249,6 +1294,9 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
1249 | int ret = -EBUSY; | 1294 | int ret = -EBUSY; |
1250 | int cpu, resched; | 1295 | int cpu, resched; |
1251 | 1296 | ||
1297 | if (ring_buffers_off) | ||
1298 | return -EBUSY; | ||
1299 | |||
1252 | if (atomic_read(&buffer->record_disabled)) | 1300 | if (atomic_read(&buffer->record_disabled)) |
1253 | return -EBUSY; | 1301 | return -EBUSY; |
1254 | 1302 | ||
@@ -2070,3 +2118,69 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | |||
2070 | return 0; | 2118 | return 0; |
2071 | } | 2119 | } |
2072 | 2120 | ||
2121 | static ssize_t | ||
2122 | rb_simple_read(struct file *filp, char __user *ubuf, | ||
2123 | size_t cnt, loff_t *ppos) | ||
2124 | { | ||
2125 | int *p = filp->private_data; | ||
2126 | char buf[64]; | ||
2127 | int r; | ||
2128 | |||
2129 | /* !ring_buffers_off == tracing_on */ | ||
2130 | r = sprintf(buf, "%d\n", !*p); | ||
2131 | |||
2132 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
2133 | } | ||
2134 | |||
2135 | static ssize_t | ||
2136 | rb_simple_write(struct file *filp, const char __user *ubuf, | ||
2137 | size_t cnt, loff_t *ppos) | ||
2138 | { | ||
2139 | int *p = filp->private_data; | ||
2140 | char buf[64]; | ||
2141 | long val; | ||
2142 | int ret; | ||
2143 | |||
2144 | if (cnt >= sizeof(buf)) | ||
2145 | return -EINVAL; | ||
2146 | |||
2147 | if (copy_from_user(&buf, ubuf, cnt)) | ||
2148 | return -EFAULT; | ||
2149 | |||
2150 | buf[cnt] = 0; | ||
2151 | |||
2152 | ret = strict_strtoul(buf, 10, &val); | ||
2153 | if (ret < 0) | ||
2154 | return ret; | ||
2155 | |||
2156 | /* !ring_buffers_off == tracing_on */ | ||
2157 | *p = !val; | ||
2158 | |||
2159 | (*ppos)++; | ||
2160 | |||
2161 | return cnt; | ||
2162 | } | ||
2163 | |||
2164 | static struct file_operations rb_simple_fops = { | ||
2165 | .open = tracing_open_generic, | ||
2166 | .read = rb_simple_read, | ||
2167 | .write = rb_simple_write, | ||
2168 | }; | ||
2169 | |||
2170 | |||
2171 | static __init int rb_init_debugfs(void) | ||
2172 | { | ||
2173 | struct dentry *d_tracer; | ||
2174 | struct dentry *entry; | ||
2175 | |||
2176 | d_tracer = tracing_init_dentry(); | ||
2177 | |||
2178 | entry = debugfs_create_file("tracing_on", 0644, d_tracer, | ||
2179 | &ring_buffers_off, &rb_simple_fops); | ||
2180 | if (!entry) | ||
2181 | pr_warning("Could not create debugfs 'tracing_on' entry\n"); | ||
2182 | |||
2183 | return 0; | ||
2184 | } | ||
2185 | |||
2186 | fs_initcall(rb_init_debugfs); | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 9f3b478f9171..d86e3252f300 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1755,7 +1755,7 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter) | |||
1755 | return TRACE_TYPE_HANDLED; | 1755 | return TRACE_TYPE_HANDLED; |
1756 | 1756 | ||
1757 | SEQ_PUT_FIELD_RET(s, entry->pid); | 1757 | SEQ_PUT_FIELD_RET(s, entry->pid); |
1758 | SEQ_PUT_FIELD_RET(s, iter->cpu); | 1758 | SEQ_PUT_FIELD_RET(s, entry->cpu); |
1759 | SEQ_PUT_FIELD_RET(s, iter->ts); | 1759 | SEQ_PUT_FIELD_RET(s, iter->ts); |
1760 | 1760 | ||
1761 | switch (entry->type) { | 1761 | switch (entry->type) { |
@@ -1936,6 +1936,7 @@ __tracing_open(struct inode *inode, struct file *file, int *ret) | |||
1936 | ring_buffer_read_finish(iter->buffer_iter[cpu]); | 1936 | ring_buffer_read_finish(iter->buffer_iter[cpu]); |
1937 | } | 1937 | } |
1938 | mutex_unlock(&trace_types_lock); | 1938 | mutex_unlock(&trace_types_lock); |
1939 | kfree(iter); | ||
1939 | 1940 | ||
1940 | return ERR_PTR(-ENOMEM); | 1941 | return ERR_PTR(-ENOMEM); |
1941 | } | 1942 | } |
@@ -2676,7 +2677,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
2676 | { | 2677 | { |
2677 | unsigned long val; | 2678 | unsigned long val; |
2678 | char buf[64]; | 2679 | char buf[64]; |
2679 | int ret; | 2680 | int ret, cpu; |
2680 | struct trace_array *tr = filp->private_data; | 2681 | struct trace_array *tr = filp->private_data; |
2681 | 2682 | ||
2682 | if (cnt >= sizeof(buf)) | 2683 | if (cnt >= sizeof(buf)) |
@@ -2704,6 +2705,14 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
2704 | goto out; | 2705 | goto out; |
2705 | } | 2706 | } |
2706 | 2707 | ||
2708 | /* disable all cpu buffers */ | ||
2709 | for_each_tracing_cpu(cpu) { | ||
2710 | if (global_trace.data[cpu]) | ||
2711 | atomic_inc(&global_trace.data[cpu]->disabled); | ||
2712 | if (max_tr.data[cpu]) | ||
2713 | atomic_inc(&max_tr.data[cpu]->disabled); | ||
2714 | } | ||
2715 | |||
2707 | if (val != global_trace.entries) { | 2716 | if (val != global_trace.entries) { |
2708 | ret = ring_buffer_resize(global_trace.buffer, val); | 2717 | ret = ring_buffer_resize(global_trace.buffer, val); |
2709 | if (ret < 0) { | 2718 | if (ret < 0) { |
@@ -2735,6 +2744,13 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
2735 | if (tracing_disabled) | 2744 | if (tracing_disabled) |
2736 | cnt = -ENOMEM; | 2745 | cnt = -ENOMEM; |
2737 | out: | 2746 | out: |
2747 | for_each_tracing_cpu(cpu) { | ||
2748 | if (global_trace.data[cpu]) | ||
2749 | atomic_dec(&global_trace.data[cpu]->disabled); | ||
2750 | if (max_tr.data[cpu]) | ||
2751 | atomic_dec(&max_tr.data[cpu]->disabled); | ||
2752 | } | ||
2753 | |||
2738 | max_tr.entries = global_trace.entries; | 2754 | max_tr.entries = global_trace.entries; |
2739 | mutex_unlock(&trace_types_lock); | 2755 | mutex_unlock(&trace_types_lock); |
2740 | 2756 | ||