aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-09-02 14:17:06 -0400
committerSteven Rostedt <rostedt@goodmis.org>2009-09-04 18:59:39 -0400
commite77405ad80f53966524b5c31244e13fbbbecbd84 (patch)
tree65c05f9e1573e9958e52bb72655e00c8592aacd2 /kernel/trace/trace.c
parentf633903af2ceb0cec07d45e499a072b6593d0ed1 (diff)
tracing: pass around ring buffer instead of tracer
The latency tracers (irqsoff and wakeup) can swap trace buffers on the fly. If an event is happening and has reserved data on one of the buffers, and the latency tracer swaps the global buffer with the max buffer, the result is that the event may commit the data to the wrong buffer. This patch changes the API to the trace recording to be recieve the buffer that was used to reserve a commit. Then this buffer can be passed in to the commit. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c117
1 files changed, 67 insertions, 50 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 0418e2650d41..0c61836e30e7 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -169,10 +169,11 @@ static struct trace_array global_trace;
169 169
170static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); 170static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
171 171
172int filter_current_check_discard(struct ftrace_event_call *call, void *rec, 172int filter_current_check_discard(struct ring_buffer *buffer,
173 struct ftrace_event_call *call, void *rec,
173 struct ring_buffer_event *event) 174 struct ring_buffer_event *event)
174{ 175{
175 return filter_check_discard(call, rec, global_trace.buffer, event); 176 return filter_check_discard(call, rec, buffer, event);
176} 177}
177EXPORT_SYMBOL_GPL(filter_current_check_discard); 178EXPORT_SYMBOL_GPL(filter_current_check_discard);
178 179
@@ -887,14 +888,15 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
887} 888}
888EXPORT_SYMBOL_GPL(tracing_generic_entry_update); 889EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
889 890
890struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, 891struct ring_buffer_event *
891 int type, 892trace_buffer_lock_reserve(struct ring_buffer *buffer,
892 unsigned long len, 893 int type,
893 unsigned long flags, int pc) 894 unsigned long len,
895 unsigned long flags, int pc)
894{ 896{
895 struct ring_buffer_event *event; 897 struct ring_buffer_event *event;
896 898
897 event = ring_buffer_lock_reserve(tr->buffer, len); 899 event = ring_buffer_lock_reserve(buffer, len);
898 if (event != NULL) { 900 if (event != NULL) {
899 struct trace_entry *ent = ring_buffer_event_data(event); 901 struct trace_entry *ent = ring_buffer_event_data(event);
900 902
@@ -905,53 +907,59 @@ struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
905 return event; 907 return event;
906} 908}
907 909
908static inline void __trace_buffer_unlock_commit(struct trace_array *tr, 910static inline void
909 struct ring_buffer_event *event, 911__trace_buffer_unlock_commit(struct ring_buffer *buffer,
910 unsigned long flags, int pc, 912 struct ring_buffer_event *event,
911 int wake) 913 unsigned long flags, int pc,
914 int wake)
912{ 915{
913 ring_buffer_unlock_commit(tr->buffer, event); 916 ring_buffer_unlock_commit(buffer, event);
914 917
915 ftrace_trace_stack(tr, flags, 6, pc); 918 ftrace_trace_stack(buffer, flags, 6, pc);
916 ftrace_trace_userstack(tr, flags, pc); 919 ftrace_trace_userstack(buffer, flags, pc);
917 920
918 if (wake) 921 if (wake)
919 trace_wake_up(); 922 trace_wake_up();
920} 923}
921 924
922void trace_buffer_unlock_commit(struct trace_array *tr, 925void trace_buffer_unlock_commit(struct ring_buffer *buffer,
923 struct ring_buffer_event *event, 926 struct ring_buffer_event *event,
924 unsigned long flags, int pc) 927 unsigned long flags, int pc)
925{ 928{
926 __trace_buffer_unlock_commit(tr, event, flags, pc, 1); 929 __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
927} 930}
928 931
929struct ring_buffer_event * 932struct ring_buffer_event *
930trace_current_buffer_lock_reserve(int type, unsigned long len, 933trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
934 int type, unsigned long len,
931 unsigned long flags, int pc) 935 unsigned long flags, int pc)
932{ 936{
933 return trace_buffer_lock_reserve(&global_trace, 937 *current_rb = global_trace.buffer;
938 return trace_buffer_lock_reserve(*current_rb,
934 type, len, flags, pc); 939 type, len, flags, pc);
935} 940}
936EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve); 941EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
937 942
938void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, 943void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
944 struct ring_buffer_event *event,
939 unsigned long flags, int pc) 945 unsigned long flags, int pc)
940{ 946{
941 __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 1); 947 __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
942} 948}
943EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); 949EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
944 950
945void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event, 951void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
946 unsigned long flags, int pc) 952 struct ring_buffer_event *event,
953 unsigned long flags, int pc)
947{ 954{
948 __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 0); 955 __trace_buffer_unlock_commit(buffer, event, flags, pc, 0);
949} 956}
950EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit); 957EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
951 958
952void trace_current_buffer_discard_commit(struct ring_buffer_event *event) 959void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
960 struct ring_buffer_event *event)
953{ 961{
954 ring_buffer_discard_commit(global_trace.buffer, event); 962 ring_buffer_discard_commit(buffer, event);
955} 963}
956EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit); 964EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
957 965
@@ -961,6 +969,7 @@ trace_function(struct trace_array *tr,
961 int pc) 969 int pc)
962{ 970{
963 struct ftrace_event_call *call = &event_function; 971 struct ftrace_event_call *call = &event_function;
972 struct ring_buffer *buffer = tr->buffer;
964 struct ring_buffer_event *event; 973 struct ring_buffer_event *event;
965 struct ftrace_entry *entry; 974 struct ftrace_entry *entry;
966 975
@@ -968,7 +977,7 @@ trace_function(struct trace_array *tr,
968 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 977 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
969 return; 978 return;
970 979
971 event = trace_buffer_lock_reserve(tr, TRACE_FN, sizeof(*entry), 980 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
972 flags, pc); 981 flags, pc);
973 if (!event) 982 if (!event)
974 return; 983 return;
@@ -976,8 +985,8 @@ trace_function(struct trace_array *tr,
976 entry->ip = ip; 985 entry->ip = ip;
977 entry->parent_ip = parent_ip; 986 entry->parent_ip = parent_ip;
978 987
979 if (!filter_check_discard(call, entry, tr->buffer, event)) 988 if (!filter_check_discard(call, entry, buffer, event))
980 ring_buffer_unlock_commit(tr->buffer, event); 989 ring_buffer_unlock_commit(buffer, event);
981} 990}
982 991
983void 992void
@@ -990,7 +999,7 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
990} 999}
991 1000
992#ifdef CONFIG_STACKTRACE 1001#ifdef CONFIG_STACKTRACE
993static void __ftrace_trace_stack(struct trace_array *tr, 1002static void __ftrace_trace_stack(struct ring_buffer *buffer,
994 unsigned long flags, 1003 unsigned long flags,
995 int skip, int pc) 1004 int skip, int pc)
996{ 1005{
@@ -999,7 +1008,7 @@ static void __ftrace_trace_stack(struct trace_array *tr,
999 struct stack_entry *entry; 1008 struct stack_entry *entry;
1000 struct stack_trace trace; 1009 struct stack_trace trace;
1001 1010
1002 event = trace_buffer_lock_reserve(tr, TRACE_STACK, 1011 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1003 sizeof(*entry), flags, pc); 1012 sizeof(*entry), flags, pc);
1004 if (!event) 1013 if (!event)
1005 return; 1014 return;
@@ -1012,26 +1021,27 @@ static void __ftrace_trace_stack(struct trace_array *tr,
1012 trace.entries = entry->caller; 1021 trace.entries = entry->caller;
1013 1022
1014 save_stack_trace(&trace); 1023 save_stack_trace(&trace);
1015 if (!filter_check_discard(call, entry, tr->buffer, event)) 1024 if (!filter_check_discard(call, entry, buffer, event))
1016 ring_buffer_unlock_commit(tr->buffer, event); 1025 ring_buffer_unlock_commit(buffer, event);
1017} 1026}
1018 1027
1019void ftrace_trace_stack(struct trace_array *tr, unsigned long flags, int skip, 1028void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1020 int pc) 1029 int skip, int pc)
1021{ 1030{
1022 if (!(trace_flags & TRACE_ITER_STACKTRACE)) 1031 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1023 return; 1032 return;
1024 1033
1025 __ftrace_trace_stack(tr, flags, skip, pc); 1034 __ftrace_trace_stack(buffer, flags, skip, pc);
1026} 1035}
1027 1036
1028void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, 1037void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1029 int pc) 1038 int pc)
1030{ 1039{
1031 __ftrace_trace_stack(tr, flags, skip, pc); 1040 __ftrace_trace_stack(tr->buffer, flags, skip, pc);
1032} 1041}
1033 1042
1034void ftrace_trace_userstack(struct trace_array *tr, unsigned long flags, int pc) 1043void
1044ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1035{ 1045{
1036 struct ftrace_event_call *call = &event_user_stack; 1046 struct ftrace_event_call *call = &event_user_stack;
1037 struct ring_buffer_event *event; 1047 struct ring_buffer_event *event;
@@ -1041,7 +1051,7 @@ void ftrace_trace_userstack(struct trace_array *tr, unsigned long flags, int pc)
1041 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) 1051 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1042 return; 1052 return;
1043 1053
1044 event = trace_buffer_lock_reserve(tr, TRACE_USER_STACK, 1054 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1045 sizeof(*entry), flags, pc); 1055 sizeof(*entry), flags, pc);
1046 if (!event) 1056 if (!event)
1047 return; 1057 return;
@@ -1055,8 +1065,8 @@ void ftrace_trace_userstack(struct trace_array *tr, unsigned long flags, int pc)
1055 trace.entries = entry->caller; 1065 trace.entries = entry->caller;
1056 1066
1057 save_stack_trace_user(&trace); 1067 save_stack_trace_user(&trace);
1058 if (!filter_check_discard(call, entry, tr->buffer, event)) 1068 if (!filter_check_discard(call, entry, buffer, event))
1059 ring_buffer_unlock_commit(tr->buffer, event); 1069 ring_buffer_unlock_commit(buffer, event);
1060} 1070}
1061 1071
1062#ifdef UNUSED 1072#ifdef UNUSED
@@ -1075,9 +1085,10 @@ ftrace_trace_special(void *__tr,
1075{ 1085{
1076 struct ring_buffer_event *event; 1086 struct ring_buffer_event *event;
1077 struct trace_array *tr = __tr; 1087 struct trace_array *tr = __tr;
1088 struct ring_buffer *buffer = tr->buffer;
1078 struct special_entry *entry; 1089 struct special_entry *entry;
1079 1090
1080 event = trace_buffer_lock_reserve(tr, TRACE_SPECIAL, 1091 event = trace_buffer_lock_reserve(buffer, TRACE_SPECIAL,
1081 sizeof(*entry), 0, pc); 1092 sizeof(*entry), 0, pc);
1082 if (!event) 1093 if (!event)
1083 return; 1094 return;
@@ -1085,7 +1096,7 @@ ftrace_trace_special(void *__tr,
1085 entry->arg1 = arg1; 1096 entry->arg1 = arg1;
1086 entry->arg2 = arg2; 1097 entry->arg2 = arg2;
1087 entry->arg3 = arg3; 1098 entry->arg3 = arg3;
1088 trace_buffer_unlock_commit(tr, event, 0, pc); 1099 trace_buffer_unlock_commit(buffer, event, 0, pc);
1089} 1100}
1090 1101
1091void 1102void
@@ -1131,6 +1142,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1131 1142
1132 struct ftrace_event_call *call = &event_bprint; 1143 struct ftrace_event_call *call = &event_bprint;
1133 struct ring_buffer_event *event; 1144 struct ring_buffer_event *event;
1145 struct ring_buffer *buffer;
1134 struct trace_array *tr = &global_trace; 1146 struct trace_array *tr = &global_trace;
1135 struct trace_array_cpu *data; 1147 struct trace_array_cpu *data;
1136 struct bprint_entry *entry; 1148 struct bprint_entry *entry;
@@ -1163,7 +1175,9 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1163 goto out_unlock; 1175 goto out_unlock;
1164 1176
1165 size = sizeof(*entry) + sizeof(u32) * len; 1177 size = sizeof(*entry) + sizeof(u32) * len;
1166 event = trace_buffer_lock_reserve(tr, TRACE_BPRINT, size, flags, pc); 1178 buffer = tr->buffer;
1179 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
1180 flags, pc);
1167 if (!event) 1181 if (!event)
1168 goto out_unlock; 1182 goto out_unlock;
1169 entry = ring_buffer_event_data(event); 1183 entry = ring_buffer_event_data(event);
@@ -1171,8 +1185,8 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1171 entry->fmt = fmt; 1185 entry->fmt = fmt;
1172 1186
1173 memcpy(entry->buf, trace_buf, sizeof(u32) * len); 1187 memcpy(entry->buf, trace_buf, sizeof(u32) * len);
1174 if (!filter_check_discard(call, entry, tr->buffer, event)) 1188 if (!filter_check_discard(call, entry, buffer, event))
1175 ring_buffer_unlock_commit(tr->buffer, event); 1189 ring_buffer_unlock_commit(buffer, event);
1176 1190
1177out_unlock: 1191out_unlock:
1178 __raw_spin_unlock(&trace_buf_lock); 1192 __raw_spin_unlock(&trace_buf_lock);
@@ -1194,6 +1208,7 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1194 1208
1195 struct ftrace_event_call *call = &event_print; 1209 struct ftrace_event_call *call = &event_print;
1196 struct ring_buffer_event *event; 1210 struct ring_buffer_event *event;
1211 struct ring_buffer *buffer;
1197 struct trace_array *tr = &global_trace; 1212 struct trace_array *tr = &global_trace;
1198 struct trace_array_cpu *data; 1213 struct trace_array_cpu *data;
1199 int cpu, len = 0, size, pc; 1214 int cpu, len = 0, size, pc;
@@ -1222,7 +1237,9 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1222 trace_buf[len] = 0; 1237 trace_buf[len] = 0;
1223 1238
1224 size = sizeof(*entry) + len + 1; 1239 size = sizeof(*entry) + len + 1;
1225 event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, irq_flags, pc); 1240 buffer = tr->buffer;
1241 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
1242 irq_flags, pc);
1226 if (!event) 1243 if (!event)
1227 goto out_unlock; 1244 goto out_unlock;
1228 entry = ring_buffer_event_data(event); 1245 entry = ring_buffer_event_data(event);
@@ -1230,8 +1247,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1230 1247
1231 memcpy(&entry->buf, trace_buf, len); 1248 memcpy(&entry->buf, trace_buf, len);
1232 entry->buf[len] = 0; 1249 entry->buf[len] = 0;
1233 if (!filter_check_discard(call, entry, tr->buffer, event)) 1250 if (!filter_check_discard(call, entry, buffer, event))
1234 ring_buffer_unlock_commit(tr->buffer, event); 1251 ring_buffer_unlock_commit(buffer, event);
1235 1252
1236 out_unlock: 1253 out_unlock:
1237 __raw_spin_unlock(&trace_buf_lock); 1254 __raw_spin_unlock(&trace_buf_lock);