aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-04-07 07:47:33 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-07 07:47:45 -0400
commit93776a8ec746cf9d32c36e5a5b23d28d8be28826 (patch)
tree6c472ae9f709246ee5268e1d71559d07839fb965 /kernel/trace/trace.c
parent34886c8bc590f078d4c0b88f50d061326639198d (diff)
parentd508afb437daee7cf07da085b635c44a4ebf9b38 (diff)
Merge branch 'linus' into tracing/core
Merge reason: update to upstream tracing facilities Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c42
1 files changed, 32 insertions, 10 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 5d1a16cae376..2a81decf99bc 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -852,15 +852,25 @@ static void ftrace_trace_stack(struct trace_array *tr,
852static void ftrace_trace_userstack(struct trace_array *tr, 852static void ftrace_trace_userstack(struct trace_array *tr,
853 unsigned long flags, int pc); 853 unsigned long flags, int pc);
854 854
855void trace_buffer_unlock_commit(struct trace_array *tr, 855static inline void __trace_buffer_unlock_commit(struct trace_array *tr,
856 struct ring_buffer_event *event, 856 struct ring_buffer_event *event,
857 unsigned long flags, int pc) 857 unsigned long flags, int pc,
858 int wake)
858{ 859{
859 ring_buffer_unlock_commit(tr->buffer, event); 860 ring_buffer_unlock_commit(tr->buffer, event);
860 861
861 ftrace_trace_stack(tr, flags, 6, pc); 862 ftrace_trace_stack(tr, flags, 6, pc);
862 ftrace_trace_userstack(tr, flags, pc); 863 ftrace_trace_userstack(tr, flags, pc);
863 trace_wake_up(); 864
865 if (wake)
866 trace_wake_up();
867}
868
869void trace_buffer_unlock_commit(struct trace_array *tr,
870 struct ring_buffer_event *event,
871 unsigned long flags, int pc)
872{
873 __trace_buffer_unlock_commit(tr, event, flags, pc, 1);
864} 874}
865 875
866struct ring_buffer_event * 876struct ring_buffer_event *
@@ -874,7 +884,13 @@ trace_current_buffer_lock_reserve(unsigned char type, unsigned long len,
874void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, 884void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
875 unsigned long flags, int pc) 885 unsigned long flags, int pc)
876{ 886{
877 return trace_buffer_unlock_commit(&global_trace, event, flags, pc); 887 return __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 1);
888}
889
890void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event,
891 unsigned long flags, int pc)
892{
893 return __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 0);
878} 894}
879 895
880void 896void
@@ -900,7 +916,7 @@ trace_function(struct trace_array *tr,
900} 916}
901 917
902#ifdef CONFIG_FUNCTION_GRAPH_TRACER 918#ifdef CONFIG_FUNCTION_GRAPH_TRACER
903static void __trace_graph_entry(struct trace_array *tr, 919static int __trace_graph_entry(struct trace_array *tr,
904 struct ftrace_graph_ent *trace, 920 struct ftrace_graph_ent *trace,
905 unsigned long flags, 921 unsigned long flags,
906 int pc) 922 int pc)
@@ -909,15 +925,17 @@ static void __trace_graph_entry(struct trace_array *tr,
909 struct ftrace_graph_ent_entry *entry; 925 struct ftrace_graph_ent_entry *entry;
910 926
911 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 927 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
912 return; 928 return 0;
913 929
914 event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_ENT, 930 event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_ENT,
915 sizeof(*entry), flags, pc); 931 sizeof(*entry), flags, pc);
916 if (!event) 932 if (!event)
917 return; 933 return 0;
918 entry = ring_buffer_event_data(event); 934 entry = ring_buffer_event_data(event);
919 entry->graph_ent = *trace; 935 entry->graph_ent = *trace;
920 ring_buffer_unlock_commit(global_trace.buffer, event); 936 ring_buffer_unlock_commit(global_trace.buffer, event);
937
938 return 1;
921} 939}
922 940
923static void __trace_graph_return(struct trace_array *tr, 941static void __trace_graph_return(struct trace_array *tr,
@@ -1138,6 +1156,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
1138 struct trace_array_cpu *data; 1156 struct trace_array_cpu *data;
1139 unsigned long flags; 1157 unsigned long flags;
1140 long disabled; 1158 long disabled;
1159 int ret;
1141 int cpu; 1160 int cpu;
1142 int pc; 1161 int pc;
1143 1162
@@ -1153,15 +1172,18 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
1153 disabled = atomic_inc_return(&data->disabled); 1172 disabled = atomic_inc_return(&data->disabled);
1154 if (likely(disabled == 1)) { 1173 if (likely(disabled == 1)) {
1155 pc = preempt_count(); 1174 pc = preempt_count();
1156 __trace_graph_entry(tr, trace, flags, pc); 1175 ret = __trace_graph_entry(tr, trace, flags, pc);
1176 } else {
1177 ret = 0;
1157 } 1178 }
1158 /* Only do the atomic if it is not already set */ 1179 /* Only do the atomic if it is not already set */
1159 if (!test_tsk_trace_graph(current)) 1180 if (!test_tsk_trace_graph(current))
1160 set_tsk_trace_graph(current); 1181 set_tsk_trace_graph(current);
1182
1161 atomic_dec(&data->disabled); 1183 atomic_dec(&data->disabled);
1162 local_irq_restore(flags); 1184 local_irq_restore(flags);
1163 1185
1164 return 1; 1186 return ret;
1165} 1187}
1166 1188
1167void trace_graph_return(struct ftrace_graph_ret *trace) 1189void trace_graph_return(struct ftrace_graph_ret *trace)