aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-04-02 04:20:05 -0400
committerThomas Gleixner <tglx@linutronix.de>2010-04-02 04:20:05 -0400
commitac4d7f0bed9b03204539de81c8b62b35c9f4e16a (patch)
tree6afd6fa0580f6a6f316788222b026f86b83647ac /kernel/trace
parent1630ae851f808a7fe563ed8e988a42d3326bd75a (diff)
parent19f00f070c17584b5acaf186baf4d12a7d2ed125 (diff)
Merge branch 'master' of
git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-2.6.33.y Conflicts: Makefile Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c2
-rw-r--r--kernel/trace/ring_buffer.c12
-rw-r--r--kernel/trace/trace.c25
3 files changed, 28 insertions, 11 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index a6d0ebe9fc12..9ed787fc3dff 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3365,6 +3365,7 @@ void ftrace_graph_init_task(struct task_struct *t)
3365{ 3365{
3366 /* Make sure we do not use the parent ret_stack */ 3366 /* Make sure we do not use the parent ret_stack */
3367 t->ret_stack = NULL; 3367 t->ret_stack = NULL;
3368 t->curr_ret_stack = -1;
3368 3369
3369 if (ftrace_graph_active) { 3370 if (ftrace_graph_active) {
3370 struct ftrace_ret_stack *ret_stack; 3371 struct ftrace_ret_stack *ret_stack;
@@ -3374,7 +3375,6 @@ void ftrace_graph_init_task(struct task_struct *t)
3374 GFP_KERNEL); 3375 GFP_KERNEL);
3375 if (!ret_stack) 3376 if (!ret_stack)
3376 return; 3377 return;
3377 t->curr_ret_stack = -1;
3378 atomic_set(&t->tracing_graph_pause, 0); 3378 atomic_set(&t->tracing_graph_pause, 0);
3379 atomic_set(&t->trace_overrun, 0); 3379 atomic_set(&t->trace_overrun, 0);
3380 t->ftrace_timestamp = 0; 3380 t->ftrace_timestamp = 0;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 9e095ef33be6..850918a4a8ee 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2239,12 +2239,12 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
2239 if (ring_buffer_flags != RB_BUFFERS_ON) 2239 if (ring_buffer_flags != RB_BUFFERS_ON)
2240 return NULL; 2240 return NULL;
2241 2241
2242 if (atomic_read(&buffer->record_disabled))
2243 return NULL;
2244
2245 /* If we are tracing schedule, we don't want to recurse */ 2242 /* If we are tracing schedule, we don't want to recurse */
2246 resched = ftrace_preempt_disable(); 2243 resched = ftrace_preempt_disable();
2247 2244
2245 if (atomic_read(&buffer->record_disabled))
2246 goto out_nocheck;
2247
2248 if (trace_recursive_lock()) 2248 if (trace_recursive_lock())
2249 goto out_nocheck; 2249 goto out_nocheck;
2250 2250
@@ -2476,11 +2476,11 @@ int ring_buffer_write(struct ring_buffer *buffer,
2476 if (ring_buffer_flags != RB_BUFFERS_ON) 2476 if (ring_buffer_flags != RB_BUFFERS_ON)
2477 return -EBUSY; 2477 return -EBUSY;
2478 2478
2479 if (atomic_read(&buffer->record_disabled))
2480 return -EBUSY;
2481
2482 resched = ftrace_preempt_disable(); 2479 resched = ftrace_preempt_disable();
2483 2480
2481 if (atomic_read(&buffer->record_disabled))
2482 goto out;
2483
2484 cpu = raw_smp_processor_id(); 2484 cpu = raw_smp_processor_id();
2485 2485
2486 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2486 if (!cpumask_test_cpu(cpu, buffer->cpumask))
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 4af5b218f953..9b66ee14b39f 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -752,10 +752,10 @@ out:
752 mutex_unlock(&trace_types_lock); 752 mutex_unlock(&trace_types_lock);
753} 753}
754 754
755static void __tracing_reset(struct trace_array *tr, int cpu) 755static void __tracing_reset(struct ring_buffer *buffer, int cpu)
756{ 756{
757 ftrace_disable_cpu(); 757 ftrace_disable_cpu();
758 ring_buffer_reset_cpu(tr->buffer, cpu); 758 ring_buffer_reset_cpu(buffer, cpu);
759 ftrace_enable_cpu(); 759 ftrace_enable_cpu();
760} 760}
761 761
@@ -767,7 +767,7 @@ void tracing_reset(struct trace_array *tr, int cpu)
767 767
768 /* Make sure all commits have finished */ 768 /* Make sure all commits have finished */
769 synchronize_sched(); 769 synchronize_sched();
770 __tracing_reset(tr, cpu); 770 __tracing_reset(buffer, cpu);
771 771
772 ring_buffer_record_enable(buffer); 772 ring_buffer_record_enable(buffer);
773} 773}
@@ -785,7 +785,7 @@ void tracing_reset_online_cpus(struct trace_array *tr)
785 tr->time_start = ftrace_now(tr->cpu); 785 tr->time_start = ftrace_now(tr->cpu);
786 786
787 for_each_online_cpu(cpu) 787 for_each_online_cpu(cpu)
788 __tracing_reset(tr, cpu); 788 __tracing_reset(buffer, cpu);
789 789
790 ring_buffer_record_enable(buffer); 790 ring_buffer_record_enable(buffer);
791} 791}
@@ -862,6 +862,8 @@ void tracing_start(void)
862 goto out; 862 goto out;
863 } 863 }
864 864
865 /* Prevent the buffers from switching */
866 arch_spin_lock(&ftrace_max_lock);
865 867
866 buffer = global_trace.buffer; 868 buffer = global_trace.buffer;
867 if (buffer) 869 if (buffer)
@@ -871,6 +873,8 @@ void tracing_start(void)
871 if (buffer) 873 if (buffer)
872 ring_buffer_record_enable(buffer); 874 ring_buffer_record_enable(buffer);
873 875
876 arch_spin_unlock(&ftrace_max_lock);
877
874 ftrace_start(); 878 ftrace_start();
875 out: 879 out:
876 raw_spin_unlock_irqrestore(&tracing_start_lock, flags); 880 raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
@@ -892,6 +896,9 @@ void tracing_stop(void)
892 if (trace_stop_count++) 896 if (trace_stop_count++)
893 goto out; 897 goto out;
894 898
899 /* Prevent the buffers from switching */
900 arch_spin_lock(&ftrace_max_lock);
901
895 buffer = global_trace.buffer; 902 buffer = global_trace.buffer;
896 if (buffer) 903 if (buffer)
897 ring_buffer_record_disable(buffer); 904 ring_buffer_record_disable(buffer);
@@ -900,6 +907,8 @@ void tracing_stop(void)
900 if (buffer) 907 if (buffer)
901 ring_buffer_record_disable(buffer); 908 ring_buffer_record_disable(buffer);
902 909
910 arch_spin_unlock(&ftrace_max_lock);
911
903 out: 912 out:
904 raw_spin_unlock_irqrestore(&tracing_start_lock, flags); 913 raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
905} 914}
@@ -1187,6 +1196,13 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1187 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) 1196 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1188 return; 1197 return;
1189 1198
1199 /*
1200 * NMIs can not handle page faults, even with fix ups.
1201 * The save user stack can (and often does) fault.
1202 */
1203 if (unlikely(in_nmi()))
1204 return;
1205
1190 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, 1206 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1191 sizeof(*entry), flags, pc); 1207 sizeof(*entry), flags, pc);
1192 if (!event) 1208 if (!event)
@@ -1633,6 +1649,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1633 1649
1634 ftrace_enable_cpu(); 1650 ftrace_enable_cpu();
1635 1651
1652 iter->leftover = 0;
1636 for (p = iter; p && l < *pos; p = s_next(m, p, &l)) 1653 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
1637 ; 1654 ;
1638 1655