aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-07-23 03:10:29 -0400
committerIngo Molnar <mingo@elte.hu>2010-07-23 03:10:29 -0400
commit3a01736e70a7d629140695ba46a901266b4460cc (patch)
tree49ff8ce1e7c6a267f0ce84b5daddbe6666bc4253 /kernel/trace
parent4c21adf26f8fcf86a755b9b9f55c2e9fd241e1fb (diff)
parent24a461d537f49f9da6533d83100999ea08c6c755 (diff)
Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/core
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ring_buffer.c2
-rw-r--r--kernel/trace/trace.c46
-rw-r--r--kernel/trace/trace.h4
-rw-r--r--kernel/trace/trace_events.c30
-rw-r--r--kernel/trace/trace_irqsoff.c3
-rw-r--r--kernel/trace/trace_output.c3
-rw-r--r--kernel/trace/trace_sched_wakeup.c2
7 files changed, 76 insertions, 14 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 28d0615a513f..3632ce87674f 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -443,6 +443,7 @@ int ring_buffer_print_page_header(struct trace_seq *s)
443 */ 443 */
444struct ring_buffer_per_cpu { 444struct ring_buffer_per_cpu {
445 int cpu; 445 int cpu;
446 atomic_t record_disabled;
446 struct ring_buffer *buffer; 447 struct ring_buffer *buffer;
447 spinlock_t reader_lock; /* serialize readers */ 448 spinlock_t reader_lock; /* serialize readers */
448 arch_spinlock_t lock; 449 arch_spinlock_t lock;
@@ -462,7 +463,6 @@ struct ring_buffer_per_cpu {
462 unsigned long read; 463 unsigned long read;
463 u64 write_stamp; 464 u64 write_stamp;
464 u64 read_stamp; 465 u64 read_stamp;
465 atomic_t record_disabled;
466}; 466};
467 467
468struct ring_buffer { 468struct ring_buffer {
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c1752dac613e..4b1122d0df37 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -344,7 +344,7 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
344/* trace_flags holds trace_options default values */ 344/* trace_flags holds trace_options default values */
345unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | 345unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
346 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | 346 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
347 TRACE_ITER_GRAPH_TIME; 347 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD;
348 348
349static int trace_stop_count; 349static int trace_stop_count;
350static DEFINE_SPINLOCK(tracing_start_lock); 350static DEFINE_SPINLOCK(tracing_start_lock);
@@ -428,6 +428,7 @@ static const char *trace_options[] = {
428 "latency-format", 428 "latency-format",
429 "sleep-time", 429 "sleep-time",
430 "graph-time", 430 "graph-time",
431 "record-cmd",
431 NULL 432 NULL
432}; 433};
433 434
@@ -659,6 +660,10 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
659 return; 660 return;
660 661
661 WARN_ON_ONCE(!irqs_disabled()); 662 WARN_ON_ONCE(!irqs_disabled());
663 if (!current_trace->use_max_tr) {
664 WARN_ON_ONCE(1);
665 return;
666 }
662 arch_spin_lock(&ftrace_max_lock); 667 arch_spin_lock(&ftrace_max_lock);
663 668
664 tr->buffer = max_tr.buffer; 669 tr->buffer = max_tr.buffer;
@@ -685,6 +690,11 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
685 return; 690 return;
686 691
687 WARN_ON_ONCE(!irqs_disabled()); 692 WARN_ON_ONCE(!irqs_disabled());
693 if (!current_trace->use_max_tr) {
694 WARN_ON_ONCE(1);
695 return;
696 }
697
688 arch_spin_lock(&ftrace_max_lock); 698 arch_spin_lock(&ftrace_max_lock);
689 699
690 ftrace_disable_cpu(); 700 ftrace_disable_cpu();
@@ -729,7 +739,7 @@ __acquires(kernel_lock)
729 return -1; 739 return -1;
730 } 740 }
731 741
732 if (strlen(type->name) > MAX_TRACER_SIZE) { 742 if (strlen(type->name) >= MAX_TRACER_SIZE) {
733 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); 743 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
734 return -1; 744 return -1;
735 } 745 }
@@ -2508,6 +2518,9 @@ static void set_tracer_flags(unsigned int mask, int enabled)
2508 trace_flags |= mask; 2518 trace_flags |= mask;
2509 else 2519 else
2510 trace_flags &= ~mask; 2520 trace_flags &= ~mask;
2521
2522 if (mask == TRACE_ITER_RECORD_CMD)
2523 trace_event_enable_cmd_record(enabled);
2511} 2524}
2512 2525
2513static ssize_t 2526static ssize_t
@@ -2746,6 +2759,9 @@ static int tracing_resize_ring_buffer(unsigned long size)
2746 if (ret < 0) 2759 if (ret < 0)
2747 return ret; 2760 return ret;
2748 2761
2762 if (!current_trace->use_max_tr)
2763 goto out;
2764
2749 ret = ring_buffer_resize(max_tr.buffer, size); 2765 ret = ring_buffer_resize(max_tr.buffer, size);
2750 if (ret < 0) { 2766 if (ret < 0) {
2751 int r; 2767 int r;
@@ -2773,11 +2789,14 @@ static int tracing_resize_ring_buffer(unsigned long size)
2773 return ret; 2789 return ret;
2774 } 2790 }
2775 2791
2792 max_tr.entries = size;
2793 out:
2776 global_trace.entries = size; 2794 global_trace.entries = size;
2777 2795
2778 return ret; 2796 return ret;
2779} 2797}
2780 2798
2799
2781/** 2800/**
2782 * tracing_update_buffers - used by tracing facility to expand ring buffers 2801 * tracing_update_buffers - used by tracing facility to expand ring buffers
2783 * 2802 *
@@ -2838,12 +2857,26 @@ static int tracing_set_tracer(const char *buf)
2838 trace_branch_disable(); 2857 trace_branch_disable();
2839 if (current_trace && current_trace->reset) 2858 if (current_trace && current_trace->reset)
2840 current_trace->reset(tr); 2859 current_trace->reset(tr);
2841 2860 if (current_trace && current_trace->use_max_tr) {
2861 /*
2862 * We don't free the ring buffer. instead, resize it because
2863 * The max_tr ring buffer has some state (e.g. ring->clock) and
2864 * we want preserve it.
2865 */
2866 ring_buffer_resize(max_tr.buffer, 1);
2867 max_tr.entries = 1;
2868 }
2842 destroy_trace_option_files(topts); 2869 destroy_trace_option_files(topts);
2843 2870
2844 current_trace = t; 2871 current_trace = t;
2845 2872
2846 topts = create_trace_option_files(current_trace); 2873 topts = create_trace_option_files(current_trace);
2874 if (current_trace->use_max_tr) {
2875 ret = ring_buffer_resize(max_tr.buffer, global_trace.entries);
2876 if (ret < 0)
2877 goto out;
2878 max_tr.entries = global_trace.entries;
2879 }
2847 2880
2848 if (t->init) { 2881 if (t->init) {
2849 ret = tracer_init(t, tr); 2882 ret = tracer_init(t, tr);
@@ -3426,7 +3459,6 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
3426 } 3459 }
3427 3460
3428 tracing_start(); 3461 tracing_start();
3429 max_tr.entries = global_trace.entries;
3430 mutex_unlock(&trace_types_lock); 3462 mutex_unlock(&trace_types_lock);
3431 3463
3432 return cnt; 3464 return cnt;
@@ -4531,16 +4563,14 @@ __init static int tracer_alloc_buffers(void)
4531 4563
4532 4564
4533#ifdef CONFIG_TRACER_MAX_TRACE 4565#ifdef CONFIG_TRACER_MAX_TRACE
4534 max_tr.buffer = ring_buffer_alloc(ring_buf_size, 4566 max_tr.buffer = ring_buffer_alloc(1, TRACE_BUFFER_FLAGS);
4535 TRACE_BUFFER_FLAGS);
4536 if (!max_tr.buffer) { 4567 if (!max_tr.buffer) {
4537 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); 4568 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
4538 WARN_ON(1); 4569 WARN_ON(1);
4539 ring_buffer_free(global_trace.buffer); 4570 ring_buffer_free(global_trace.buffer);
4540 goto out_free_cpumask; 4571 goto out_free_cpumask;
4541 } 4572 }
4542 max_tr.entries = ring_buffer_size(max_tr.buffer); 4573 max_tr.entries = 1;
4543 WARN_ON(max_tr.entries != global_trace.entries);
4544#endif 4574#endif
4545 4575
4546 /* Allocate the first page for all buffers */ 4576 /* Allocate the first page for all buffers */
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 638a5887e2ec..d05c873dd4b2 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -274,6 +274,7 @@ struct tracer {
274 struct tracer *next; 274 struct tracer *next;
275 int print_max; 275 int print_max;
276 struct tracer_flags *flags; 276 struct tracer_flags *flags;
277 int use_max_tr;
277}; 278};
278 279
279 280
@@ -581,6 +582,7 @@ enum trace_iterator_flags {
581 TRACE_ITER_LATENCY_FMT = 0x20000, 582 TRACE_ITER_LATENCY_FMT = 0x20000,
582 TRACE_ITER_SLEEP_TIME = 0x40000, 583 TRACE_ITER_SLEEP_TIME = 0x40000,
583 TRACE_ITER_GRAPH_TIME = 0x80000, 584 TRACE_ITER_GRAPH_TIME = 0x80000,
585 TRACE_ITER_RECORD_CMD = 0x100000,
584}; 586};
585 587
586/* 588/*
@@ -713,6 +715,8 @@ filter_check_discard(struct ftrace_event_call *call, void *rec,
713 return 0; 715 return 0;
714} 716}
715 717
718extern void trace_event_enable_cmd_record(bool enable);
719
716extern struct mutex event_mutex; 720extern struct mutex event_mutex;
717extern struct list_head ftrace_events; 721extern struct list_head ftrace_events;
718 722
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index e8e6043f4d29..09b4fa6e4d3b 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -170,6 +170,26 @@ int ftrace_event_reg(struct ftrace_event_call *call, enum trace_reg type)
170} 170}
171EXPORT_SYMBOL_GPL(ftrace_event_reg); 171EXPORT_SYMBOL_GPL(ftrace_event_reg);
172 172
173void trace_event_enable_cmd_record(bool enable)
174{
175 struct ftrace_event_call *call;
176
177 mutex_lock(&event_mutex);
178 list_for_each_entry(call, &ftrace_events, list) {
179 if (!(call->flags & TRACE_EVENT_FL_ENABLED))
180 continue;
181
182 if (enable) {
183 tracing_start_cmdline_record();
184 call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
185 } else {
186 tracing_stop_cmdline_record();
187 call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
188 }
189 }
190 mutex_unlock(&event_mutex);
191}
192
173static int ftrace_event_enable_disable(struct ftrace_event_call *call, 193static int ftrace_event_enable_disable(struct ftrace_event_call *call,
174 int enable) 194 int enable)
175{ 195{
@@ -179,13 +199,19 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call,
179 case 0: 199 case 0:
180 if (call->flags & TRACE_EVENT_FL_ENABLED) { 200 if (call->flags & TRACE_EVENT_FL_ENABLED) {
181 call->flags &= ~TRACE_EVENT_FL_ENABLED; 201 call->flags &= ~TRACE_EVENT_FL_ENABLED;
182 tracing_stop_cmdline_record(); 202 if (call->flags & TRACE_EVENT_FL_RECORDED_CMD) {
203 tracing_stop_cmdline_record();
204 call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
205 }
183 call->class->reg(call, TRACE_REG_UNREGISTER); 206 call->class->reg(call, TRACE_REG_UNREGISTER);
184 } 207 }
185 break; 208 break;
186 case 1: 209 case 1:
187 if (!(call->flags & TRACE_EVENT_FL_ENABLED)) { 210 if (!(call->flags & TRACE_EVENT_FL_ENABLED)) {
188 tracing_start_cmdline_record(); 211 if (trace_flags & TRACE_ITER_RECORD_CMD) {
212 tracing_start_cmdline_record();
213 call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
214 }
189 ret = call->class->reg(call, TRACE_REG_REGISTER); 215 ret = call->class->reg(call, TRACE_REG_REGISTER);
190 if (ret) { 216 if (ret) {
191 tracing_stop_cmdline_record(); 217 tracing_stop_cmdline_record();
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 6fd486e0cef4..73a6b0601f2e 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -649,6 +649,7 @@ static struct tracer irqsoff_tracer __read_mostly =
649#endif 649#endif
650 .open = irqsoff_trace_open, 650 .open = irqsoff_trace_open,
651 .close = irqsoff_trace_close, 651 .close = irqsoff_trace_close,
652 .use_max_tr = 1,
652}; 653};
653# define register_irqsoff(trace) register_tracer(&trace) 654# define register_irqsoff(trace) register_tracer(&trace)
654#else 655#else
@@ -681,6 +682,7 @@ static struct tracer preemptoff_tracer __read_mostly =
681#endif 682#endif
682 .open = irqsoff_trace_open, 683 .open = irqsoff_trace_open,
683 .close = irqsoff_trace_close, 684 .close = irqsoff_trace_close,
685 .use_max_tr = 1,
684}; 686};
685# define register_preemptoff(trace) register_tracer(&trace) 687# define register_preemptoff(trace) register_tracer(&trace)
686#else 688#else
@@ -715,6 +717,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
715#endif 717#endif
716 .open = irqsoff_trace_open, 718 .open = irqsoff_trace_open,
717 .close = irqsoff_trace_close, 719 .close = irqsoff_trace_close,
720 .use_max_tr = 1,
718}; 721};
719 722
720# define register_preemptirqsoff(trace) register_tracer(&trace) 723# define register_preemptirqsoff(trace) register_tracer(&trace)
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index a46197b80b7f..02272baa2206 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -16,9 +16,6 @@
16 16
17DECLARE_RWSEM(trace_event_mutex); 17DECLARE_RWSEM(trace_event_mutex);
18 18
19DEFINE_PER_CPU(struct trace_seq, ftrace_event_seq);
20EXPORT_PER_CPU_SYMBOL(ftrace_event_seq);
21
22static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; 19static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
23 20
24static int next_event_type = __TRACE_LAST_TYPE + 1; 21static int next_event_type = __TRACE_LAST_TYPE + 1;
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index c9fd5bd02036..4086eae6e81b 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -382,6 +382,7 @@ static struct tracer wakeup_tracer __read_mostly =
382#ifdef CONFIG_FTRACE_SELFTEST 382#ifdef CONFIG_FTRACE_SELFTEST
383 .selftest = trace_selftest_startup_wakeup, 383 .selftest = trace_selftest_startup_wakeup,
384#endif 384#endif
385 .use_max_tr = 1,
385}; 386};
386 387
387static struct tracer wakeup_rt_tracer __read_mostly = 388static struct tracer wakeup_rt_tracer __read_mostly =
@@ -396,6 +397,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
396#ifdef CONFIG_FTRACE_SELFTEST 397#ifdef CONFIG_FTRACE_SELFTEST
397 .selftest = trace_selftest_startup_wakeup, 398 .selftest = trace_selftest_startup_wakeup,
398#endif 399#endif
400 .use_max_tr = 1,
399}; 401};
400 402
401__init static int init_wakeup_tracer(void) 403__init static int init_wakeup_tracer(void)