aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c135
1 files changed, 55 insertions, 80 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index d6736b93dc2a..ba14a22be4cc 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -341,7 +341,7 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
341/* trace_flags holds trace_options default values */ 341/* trace_flags holds trace_options default values */
342unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | 342unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
343 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | 343 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
344 TRACE_ITER_GRAPH_TIME; 344 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD;
345 345
346static int trace_stop_count; 346static int trace_stop_count;
347static DEFINE_SPINLOCK(tracing_start_lock); 347static DEFINE_SPINLOCK(tracing_start_lock);
@@ -425,6 +425,7 @@ static const char *trace_options[] = {
425 "latency-format", 425 "latency-format",
426 "sleep-time", 426 "sleep-time",
427 "graph-time", 427 "graph-time",
428 "record-cmd",
428 NULL 429 NULL
429}; 430};
430 431
@@ -656,6 +657,10 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
656 return; 657 return;
657 658
658 WARN_ON_ONCE(!irqs_disabled()); 659 WARN_ON_ONCE(!irqs_disabled());
660 if (!current_trace->use_max_tr) {
661 WARN_ON_ONCE(1);
662 return;
663 }
659 arch_spin_lock(&ftrace_max_lock); 664 arch_spin_lock(&ftrace_max_lock);
660 665
661 tr->buffer = max_tr.buffer; 666 tr->buffer = max_tr.buffer;
@@ -682,6 +687,11 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
682 return; 687 return;
683 688
684 WARN_ON_ONCE(!irqs_disabled()); 689 WARN_ON_ONCE(!irqs_disabled());
690 if (!current_trace->use_max_tr) {
691 WARN_ON_ONCE(1);
692 return;
693 }
694
685 arch_spin_lock(&ftrace_max_lock); 695 arch_spin_lock(&ftrace_max_lock);
686 696
687 ftrace_disable_cpu(); 697 ftrace_disable_cpu();
@@ -726,18 +736,11 @@ __acquires(kernel_lock)
726 return -1; 736 return -1;
727 } 737 }
728 738
729 if (strlen(type->name) > MAX_TRACER_SIZE) { 739 if (strlen(type->name) >= MAX_TRACER_SIZE) {
730 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); 740 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
731 return -1; 741 return -1;
732 } 742 }
733 743
734 /*
735 * When this gets called we hold the BKL which means that
736 * preemption is disabled. Various trace selftests however
737 * need to disable and enable preemption for successful tests.
738 * So we drop the BKL here and grab it after the tests again.
739 */
740 unlock_kernel();
741 mutex_lock(&trace_types_lock); 744 mutex_lock(&trace_types_lock);
742 745
743 tracing_selftest_running = true; 746 tracing_selftest_running = true;
@@ -819,7 +822,6 @@ __acquires(kernel_lock)
819#endif 822#endif
820 823
821 out_unlock: 824 out_unlock:
822 lock_kernel();
823 return ret; 825 return ret;
824} 826}
825 827
@@ -1328,61 +1330,6 @@ static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1328 1330
1329#endif /* CONFIG_STACKTRACE */ 1331#endif /* CONFIG_STACKTRACE */
1330 1332
1331static void
1332ftrace_trace_special(void *__tr,
1333 unsigned long arg1, unsigned long arg2, unsigned long arg3,
1334 int pc)
1335{
1336 struct ftrace_event_call *call = &event_special;
1337 struct ring_buffer_event *event;
1338 struct trace_array *tr = __tr;
1339 struct ring_buffer *buffer = tr->buffer;
1340 struct special_entry *entry;
1341
1342 event = trace_buffer_lock_reserve(buffer, TRACE_SPECIAL,
1343 sizeof(*entry), 0, pc);
1344 if (!event)
1345 return;
1346 entry = ring_buffer_event_data(event);
1347 entry->arg1 = arg1;
1348 entry->arg2 = arg2;
1349 entry->arg3 = arg3;
1350
1351 if (!filter_check_discard(call, entry, buffer, event))
1352 trace_buffer_unlock_commit(buffer, event, 0, pc);
1353}
1354
1355void
1356__trace_special(void *__tr, void *__data,
1357 unsigned long arg1, unsigned long arg2, unsigned long arg3)
1358{
1359 ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count());
1360}
1361
1362void
1363ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1364{
1365 struct trace_array *tr = &global_trace;
1366 struct trace_array_cpu *data;
1367 unsigned long flags;
1368 int cpu;
1369 int pc;
1370
1371 if (tracing_disabled)
1372 return;
1373
1374 pc = preempt_count();
1375 local_irq_save(flags);
1376 cpu = raw_smp_processor_id();
1377 data = tr->data[cpu];
1378
1379 if (likely(atomic_inc_return(&data->disabled) == 1))
1380 ftrace_trace_special(tr, arg1, arg2, arg3, pc);
1381
1382 atomic_dec(&data->disabled);
1383 local_irq_restore(flags);
1384}
1385
1386/** 1333/**
1387 * trace_vbprintk - write binary msg to tracing buffer 1334 * trace_vbprintk - write binary msg to tracing buffer
1388 * 1335 *
@@ -1401,7 +1348,6 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1401 struct bprint_entry *entry; 1348 struct bprint_entry *entry;
1402 unsigned long flags; 1349 unsigned long flags;
1403 int disable; 1350 int disable;
1404 int resched;
1405 int cpu, len = 0, size, pc; 1351 int cpu, len = 0, size, pc;
1406 1352
1407 if (unlikely(tracing_selftest_running || tracing_disabled)) 1353 if (unlikely(tracing_selftest_running || tracing_disabled))
@@ -1411,7 +1357,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1411 pause_graph_tracing(); 1357 pause_graph_tracing();
1412 1358
1413 pc = preempt_count(); 1359 pc = preempt_count();
1414 resched = ftrace_preempt_disable(); 1360 preempt_disable_notrace();
1415 cpu = raw_smp_processor_id(); 1361 cpu = raw_smp_processor_id();
1416 data = tr->data[cpu]; 1362 data = tr->data[cpu];
1417 1363
@@ -1449,7 +1395,7 @@ out_unlock:
1449 1395
1450out: 1396out:
1451 atomic_dec_return(&data->disabled); 1397 atomic_dec_return(&data->disabled);
1452 ftrace_preempt_enable(resched); 1398 preempt_enable_notrace();
1453 unpause_graph_tracing(); 1399 unpause_graph_tracing();
1454 1400
1455 return len; 1401 return len;
@@ -2386,6 +2332,7 @@ static const struct file_operations show_traces_fops = {
2386 .open = show_traces_open, 2332 .open = show_traces_open,
2387 .read = seq_read, 2333 .read = seq_read,
2388 .release = seq_release, 2334 .release = seq_release,
2335 .llseek = seq_lseek,
2389}; 2336};
2390 2337
2391/* 2338/*
@@ -2479,6 +2426,7 @@ static const struct file_operations tracing_cpumask_fops = {
2479 .open = tracing_open_generic, 2426 .open = tracing_open_generic,
2480 .read = tracing_cpumask_read, 2427 .read = tracing_cpumask_read,
2481 .write = tracing_cpumask_write, 2428 .write = tracing_cpumask_write,
2429 .llseek = generic_file_llseek,
2482}; 2430};
2483 2431
2484static int tracing_trace_options_show(struct seq_file *m, void *v) 2432static int tracing_trace_options_show(struct seq_file *m, void *v)
@@ -2554,6 +2502,9 @@ static void set_tracer_flags(unsigned int mask, int enabled)
2554 trace_flags |= mask; 2502 trace_flags |= mask;
2555 else 2503 else
2556 trace_flags &= ~mask; 2504 trace_flags &= ~mask;
2505
2506 if (mask == TRACE_ITER_RECORD_CMD)
2507 trace_event_enable_cmd_record(enabled);
2557} 2508}
2558 2509
2559static ssize_t 2510static ssize_t
@@ -2645,6 +2596,7 @@ tracing_readme_read(struct file *filp, char __user *ubuf,
2645static const struct file_operations tracing_readme_fops = { 2596static const struct file_operations tracing_readme_fops = {
2646 .open = tracing_open_generic, 2597 .open = tracing_open_generic,
2647 .read = tracing_readme_read, 2598 .read = tracing_readme_read,
2599 .llseek = generic_file_llseek,
2648}; 2600};
2649 2601
2650static ssize_t 2602static ssize_t
@@ -2695,6 +2647,7 @@ tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
2695static const struct file_operations tracing_saved_cmdlines_fops = { 2647static const struct file_operations tracing_saved_cmdlines_fops = {
2696 .open = tracing_open_generic, 2648 .open = tracing_open_generic,
2697 .read = tracing_saved_cmdlines_read, 2649 .read = tracing_saved_cmdlines_read,
2650 .llseek = generic_file_llseek,
2698}; 2651};
2699 2652
2700static ssize_t 2653static ssize_t
@@ -2790,6 +2743,9 @@ static int tracing_resize_ring_buffer(unsigned long size)
2790 if (ret < 0) 2743 if (ret < 0)
2791 return ret; 2744 return ret;
2792 2745
2746 if (!current_trace->use_max_tr)
2747 goto out;
2748
2793 ret = ring_buffer_resize(max_tr.buffer, size); 2749 ret = ring_buffer_resize(max_tr.buffer, size);
2794 if (ret < 0) { 2750 if (ret < 0) {
2795 int r; 2751 int r;
@@ -2817,11 +2773,14 @@ static int tracing_resize_ring_buffer(unsigned long size)
2817 return ret; 2773 return ret;
2818 } 2774 }
2819 2775
2776 max_tr.entries = size;
2777 out:
2820 global_trace.entries = size; 2778 global_trace.entries = size;
2821 2779
2822 return ret; 2780 return ret;
2823} 2781}
2824 2782
2783
2825/** 2784/**
2826 * tracing_update_buffers - used by tracing facility to expand ring buffers 2785 * tracing_update_buffers - used by tracing facility to expand ring buffers
2827 * 2786 *
@@ -2882,12 +2841,26 @@ static int tracing_set_tracer(const char *buf)
2882 trace_branch_disable(); 2841 trace_branch_disable();
2883 if (current_trace && current_trace->reset) 2842 if (current_trace && current_trace->reset)
2884 current_trace->reset(tr); 2843 current_trace->reset(tr);
2885 2844 if (current_trace && current_trace->use_max_tr) {
2845 /*
2846 * We don't free the ring buffer. instead, resize it because
2847 * The max_tr ring buffer has some state (e.g. ring->clock) and
2848 * we want preserve it.
2849 */
2850 ring_buffer_resize(max_tr.buffer, 1);
2851 max_tr.entries = 1;
2852 }
2886 destroy_trace_option_files(topts); 2853 destroy_trace_option_files(topts);
2887 2854
2888 current_trace = t; 2855 current_trace = t;
2889 2856
2890 topts = create_trace_option_files(current_trace); 2857 topts = create_trace_option_files(current_trace);
2858 if (current_trace->use_max_tr) {
2859 ret = ring_buffer_resize(max_tr.buffer, global_trace.entries);
2860 if (ret < 0)
2861 goto out;
2862 max_tr.entries = global_trace.entries;
2863 }
2891 2864
2892 if (t->init) { 2865 if (t->init) {
2893 ret = tracer_init(t, tr); 2866 ret = tracer_init(t, tr);
@@ -3024,6 +2997,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
3024 if (iter->trace->pipe_open) 2997 if (iter->trace->pipe_open)
3025 iter->trace->pipe_open(iter); 2998 iter->trace->pipe_open(iter);
3026 2999
3000 nonseekable_open(inode, filp);
3027out: 3001out:
3028 mutex_unlock(&trace_types_lock); 3002 mutex_unlock(&trace_types_lock);
3029 return ret; 3003 return ret;
@@ -3469,7 +3443,6 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
3469 } 3443 }
3470 3444
3471 tracing_start(); 3445 tracing_start();
3472 max_tr.entries = global_trace.entries;
3473 mutex_unlock(&trace_types_lock); 3446 mutex_unlock(&trace_types_lock);
3474 3447
3475 return cnt; 3448 return cnt;
@@ -3582,18 +3555,21 @@ static const struct file_operations tracing_max_lat_fops = {
3582 .open = tracing_open_generic, 3555 .open = tracing_open_generic,
3583 .read = tracing_max_lat_read, 3556 .read = tracing_max_lat_read,
3584 .write = tracing_max_lat_write, 3557 .write = tracing_max_lat_write,
3558 .llseek = generic_file_llseek,
3585}; 3559};
3586 3560
3587static const struct file_operations tracing_ctrl_fops = { 3561static const struct file_operations tracing_ctrl_fops = {
3588 .open = tracing_open_generic, 3562 .open = tracing_open_generic,
3589 .read = tracing_ctrl_read, 3563 .read = tracing_ctrl_read,
3590 .write = tracing_ctrl_write, 3564 .write = tracing_ctrl_write,
3565 .llseek = generic_file_llseek,
3591}; 3566};
3592 3567
3593static const struct file_operations set_tracer_fops = { 3568static const struct file_operations set_tracer_fops = {
3594 .open = tracing_open_generic, 3569 .open = tracing_open_generic,
3595 .read = tracing_set_trace_read, 3570 .read = tracing_set_trace_read,
3596 .write = tracing_set_trace_write, 3571 .write = tracing_set_trace_write,
3572 .llseek = generic_file_llseek,
3597}; 3573};
3598 3574
3599static const struct file_operations tracing_pipe_fops = { 3575static const struct file_operations tracing_pipe_fops = {
@@ -3602,17 +3578,20 @@ static const struct file_operations tracing_pipe_fops = {
3602 .read = tracing_read_pipe, 3578 .read = tracing_read_pipe,
3603 .splice_read = tracing_splice_read_pipe, 3579 .splice_read = tracing_splice_read_pipe,
3604 .release = tracing_release_pipe, 3580 .release = tracing_release_pipe,
3581 .llseek = no_llseek,
3605}; 3582};
3606 3583
3607static const struct file_operations tracing_entries_fops = { 3584static const struct file_operations tracing_entries_fops = {
3608 .open = tracing_open_generic, 3585 .open = tracing_open_generic,
3609 .read = tracing_entries_read, 3586 .read = tracing_entries_read,
3610 .write = tracing_entries_write, 3587 .write = tracing_entries_write,
3588 .llseek = generic_file_llseek,
3611}; 3589};
3612 3590
3613static const struct file_operations tracing_mark_fops = { 3591static const struct file_operations tracing_mark_fops = {
3614 .open = tracing_open_generic, 3592 .open = tracing_open_generic,
3615 .write = tracing_mark_write, 3593 .write = tracing_mark_write,
3594 .llseek = generic_file_llseek,
3616}; 3595};
3617 3596
3618static const struct file_operations trace_clock_fops = { 3597static const struct file_operations trace_clock_fops = {
@@ -3918,6 +3897,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
3918static const struct file_operations tracing_stats_fops = { 3897static const struct file_operations tracing_stats_fops = {
3919 .open = tracing_open_generic, 3898 .open = tracing_open_generic,
3920 .read = tracing_stats_read, 3899 .read = tracing_stats_read,
3900 .llseek = generic_file_llseek,
3921}; 3901};
3922 3902
3923#ifdef CONFIG_DYNAMIC_FTRACE 3903#ifdef CONFIG_DYNAMIC_FTRACE
@@ -3954,6 +3934,7 @@ tracing_read_dyn_info(struct file *filp, char __user *ubuf,
3954static const struct file_operations tracing_dyn_info_fops = { 3934static const struct file_operations tracing_dyn_info_fops = {
3955 .open = tracing_open_generic, 3935 .open = tracing_open_generic,
3956 .read = tracing_read_dyn_info, 3936 .read = tracing_read_dyn_info,
3937 .llseek = generic_file_llseek,
3957}; 3938};
3958#endif 3939#endif
3959 3940
@@ -4107,6 +4088,7 @@ static const struct file_operations trace_options_fops = {
4107 .open = tracing_open_generic, 4088 .open = tracing_open_generic,
4108 .read = trace_options_read, 4089 .read = trace_options_read,
4109 .write = trace_options_write, 4090 .write = trace_options_write,
4091 .llseek = generic_file_llseek,
4110}; 4092};
4111 4093
4112static ssize_t 4094static ssize_t
@@ -4158,6 +4140,7 @@ static const struct file_operations trace_options_core_fops = {
4158 .open = tracing_open_generic, 4140 .open = tracing_open_generic,
4159 .read = trace_options_core_read, 4141 .read = trace_options_core_read,
4160 .write = trace_options_core_write, 4142 .write = trace_options_core_write,
4143 .llseek = generic_file_llseek,
4161}; 4144};
4162 4145
4163struct dentry *trace_create_file(const char *name, 4146struct dentry *trace_create_file(const char *name,
@@ -4347,9 +4330,6 @@ static __init int tracer_init_debugfs(void)
4347 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, 4330 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
4348 &ftrace_update_tot_cnt, &tracing_dyn_info_fops); 4331 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
4349#endif 4332#endif
4350#ifdef CONFIG_SYSPROF_TRACER
4351 init_tracer_sysprof_debugfs(d_tracer);
4352#endif
4353 4333
4354 create_trace_options_dir(); 4334 create_trace_options_dir();
4355 4335
@@ -4576,16 +4556,14 @@ __init static int tracer_alloc_buffers(void)
4576 4556
4577 4557
4578#ifdef CONFIG_TRACER_MAX_TRACE 4558#ifdef CONFIG_TRACER_MAX_TRACE
4579 max_tr.buffer = ring_buffer_alloc(ring_buf_size, 4559 max_tr.buffer = ring_buffer_alloc(1, TRACE_BUFFER_FLAGS);
4580 TRACE_BUFFER_FLAGS);
4581 if (!max_tr.buffer) { 4560 if (!max_tr.buffer) {
4582 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); 4561 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
4583 WARN_ON(1); 4562 WARN_ON(1);
4584 ring_buffer_free(global_trace.buffer); 4563 ring_buffer_free(global_trace.buffer);
4585 goto out_free_cpumask; 4564 goto out_free_cpumask;
4586 } 4565 }
4587 max_tr.entries = ring_buffer_size(max_tr.buffer); 4566 max_tr.entries = 1;
4588 WARN_ON(max_tr.entries != global_trace.entries);
4589#endif 4567#endif
4590 4568
4591 /* Allocate the first page for all buffers */ 4569 /* Allocate the first page for all buffers */
@@ -4598,9 +4576,6 @@ __init static int tracer_alloc_buffers(void)
4598 4576
4599 register_tracer(&nop_trace); 4577 register_tracer(&nop_trace);
4600 current_trace = &nop_trace; 4578 current_trace = &nop_trace;
4601#ifdef CONFIG_BOOT_TRACER
4602 register_tracer(&boot_tracer);
4603#endif
4604 /* All seems OK, enable tracing */ 4579 /* All seems OK, enable tracing */
4605 tracing_disabled = 0; 4580 tracing_disabled = 0;
4606 4581