diff options
-rw-r--r-- | kernel/trace/trace.c | 35 | ||||
-rw-r--r-- | kernel/trace/trace.h | 3 | ||||
-rw-r--r-- | kernel/trace/trace_branch.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 4 |
4 files changed, 33 insertions, 11 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index b90a827a4641..88111b08b2c1 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -78,6 +78,13 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set) | |||
78 | } | 78 | } |
79 | 79 | ||
80 | /* | 80 | /* |
81 | * To prevent the comm cache from being overwritten when no | ||
82 | * tracing is active, only save the comm when a trace event | ||
83 | * occurred. | ||
84 | */ | ||
85 | static DEFINE_PER_CPU(bool, trace_cmdline_save); | ||
86 | |||
87 | /* | ||
81 | * Kill all tracing for good (never come back). | 88 | * Kill all tracing for good (never come back). |
82 | * It is initialized to 1 but will turn to zero if the initialization | 89 | * It is initialized to 1 but will turn to zero if the initialization |
83 | * of the tracer is successful. But that is the only place that sets | 90 | * of the tracer is successful. But that is the only place that sets |
@@ -1135,6 +1142,11 @@ void tracing_record_cmdline(struct task_struct *tsk) | |||
1135 | !tracing_is_on()) | 1142 | !tracing_is_on()) |
1136 | return; | 1143 | return; |
1137 | 1144 | ||
1145 | if (!__this_cpu_read(trace_cmdline_save)) | ||
1146 | return; | ||
1147 | |||
1148 | __this_cpu_write(trace_cmdline_save, false); | ||
1149 | |||
1138 | trace_save_cmdline(tsk); | 1150 | trace_save_cmdline(tsk); |
1139 | } | 1151 | } |
1140 | 1152 | ||
@@ -1178,13 +1190,20 @@ trace_buffer_lock_reserve(struct ring_buffer *buffer, | |||
1178 | return event; | 1190 | return event; |
1179 | } | 1191 | } |
1180 | 1192 | ||
1193 | void | ||
1194 | __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) | ||
1195 | { | ||
1196 | __this_cpu_write(trace_cmdline_save, true); | ||
1197 | ring_buffer_unlock_commit(buffer, event); | ||
1198 | } | ||
1199 | |||
1181 | static inline void | 1200 | static inline void |
1182 | __trace_buffer_unlock_commit(struct ring_buffer *buffer, | 1201 | __trace_buffer_unlock_commit(struct ring_buffer *buffer, |
1183 | struct ring_buffer_event *event, | 1202 | struct ring_buffer_event *event, |
1184 | unsigned long flags, int pc, | 1203 | unsigned long flags, int pc, |
1185 | int wake) | 1204 | int wake) |
1186 | { | 1205 | { |
1187 | ring_buffer_unlock_commit(buffer, event); | 1206 | __buffer_unlock_commit(buffer, event); |
1188 | 1207 | ||
1189 | ftrace_trace_stack(buffer, flags, 6, pc); | 1208 | ftrace_trace_stack(buffer, flags, 6, pc); |
1190 | ftrace_trace_userstack(buffer, flags, pc); | 1209 | ftrace_trace_userstack(buffer, flags, pc); |
@@ -1232,7 +1251,7 @@ void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer, | |||
1232 | unsigned long flags, int pc, | 1251 | unsigned long flags, int pc, |
1233 | struct pt_regs *regs) | 1252 | struct pt_regs *regs) |
1234 | { | 1253 | { |
1235 | ring_buffer_unlock_commit(buffer, event); | 1254 | __buffer_unlock_commit(buffer, event); |
1236 | 1255 | ||
1237 | ftrace_trace_stack_regs(buffer, flags, 0, pc, regs); | 1256 | ftrace_trace_stack_regs(buffer, flags, 0, pc, regs); |
1238 | ftrace_trace_userstack(buffer, flags, pc); | 1257 | ftrace_trace_userstack(buffer, flags, pc); |
@@ -1269,7 +1288,7 @@ trace_function(struct trace_array *tr, | |||
1269 | entry->parent_ip = parent_ip; | 1288 | entry->parent_ip = parent_ip; |
1270 | 1289 | ||
1271 | if (!filter_check_discard(call, entry, buffer, event)) | 1290 | if (!filter_check_discard(call, entry, buffer, event)) |
1272 | ring_buffer_unlock_commit(buffer, event); | 1291 | __buffer_unlock_commit(buffer, event); |
1273 | } | 1292 | } |
1274 | 1293 | ||
1275 | void | 1294 | void |
@@ -1362,7 +1381,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, | |||
1362 | entry->size = trace.nr_entries; | 1381 | entry->size = trace.nr_entries; |
1363 | 1382 | ||
1364 | if (!filter_check_discard(call, entry, buffer, event)) | 1383 | if (!filter_check_discard(call, entry, buffer, event)) |
1365 | ring_buffer_unlock_commit(buffer, event); | 1384 | __buffer_unlock_commit(buffer, event); |
1366 | 1385 | ||
1367 | out: | 1386 | out: |
1368 | /* Again, don't let gcc optimize things here */ | 1387 | /* Again, don't let gcc optimize things here */ |
@@ -1458,7 +1477,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | |||
1458 | 1477 | ||
1459 | save_stack_trace_user(&trace); | 1478 | save_stack_trace_user(&trace); |
1460 | if (!filter_check_discard(call, entry, buffer, event)) | 1479 | if (!filter_check_discard(call, entry, buffer, event)) |
1461 | ring_buffer_unlock_commit(buffer, event); | 1480 | __buffer_unlock_commit(buffer, event); |
1462 | 1481 | ||
1463 | out_drop_count: | 1482 | out_drop_count: |
1464 | __this_cpu_dec(user_stack_count); | 1483 | __this_cpu_dec(user_stack_count); |
@@ -1653,7 +1672,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1653 | 1672 | ||
1654 | memcpy(entry->buf, tbuffer, sizeof(u32) * len); | 1673 | memcpy(entry->buf, tbuffer, sizeof(u32) * len); |
1655 | if (!filter_check_discard(call, entry, buffer, event)) { | 1674 | if (!filter_check_discard(call, entry, buffer, event)) { |
1656 | ring_buffer_unlock_commit(buffer, event); | 1675 | __buffer_unlock_commit(buffer, event); |
1657 | ftrace_trace_stack(buffer, flags, 6, pc); | 1676 | ftrace_trace_stack(buffer, flags, 6, pc); |
1658 | } | 1677 | } |
1659 | 1678 | ||
@@ -1724,7 +1743,7 @@ int trace_array_vprintk(struct trace_array *tr, | |||
1724 | memcpy(&entry->buf, tbuffer, len); | 1743 | memcpy(&entry->buf, tbuffer, len); |
1725 | entry->buf[len] = '\0'; | 1744 | entry->buf[len] = '\0'; |
1726 | if (!filter_check_discard(call, entry, buffer, event)) { | 1745 | if (!filter_check_discard(call, entry, buffer, event)) { |
1727 | ring_buffer_unlock_commit(buffer, event); | 1746 | __buffer_unlock_commit(buffer, event); |
1728 | ftrace_trace_stack(buffer, flags, 6, pc); | 1747 | ftrace_trace_stack(buffer, flags, 6, pc); |
1729 | } | 1748 | } |
1730 | out: | 1749 | out: |
@@ -3993,7 +4012,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
3993 | } else | 4012 | } else |
3994 | entry->buf[cnt] = '\0'; | 4013 | entry->buf[cnt] = '\0'; |
3995 | 4014 | ||
3996 | ring_buffer_unlock_commit(buffer, event); | 4015 | __buffer_unlock_commit(buffer, event); |
3997 | 4016 | ||
3998 | written = cnt; | 4017 | written = cnt; |
3999 | 4018 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 7824a55bd3fc..839ae003a053 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -359,6 +359,9 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, | |||
359 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | 359 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, |
360 | int *ent_cpu, u64 *ent_ts); | 360 | int *ent_cpu, u64 *ent_ts); |
361 | 361 | ||
362 | void __buffer_unlock_commit(struct ring_buffer *buffer, | ||
363 | struct ring_buffer_event *event); | ||
364 | |||
362 | int trace_empty(struct trace_iterator *iter); | 365 | int trace_empty(struct trace_iterator *iter); |
363 | 366 | ||
364 | void *trace_find_next_entry_inc(struct trace_iterator *iter); | 367 | void *trace_find_next_entry_inc(struct trace_iterator *iter); |
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index bd3e0eef4eaa..95e96842ed29 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c | |||
@@ -77,7 +77,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
77 | entry->correct = val == expect; | 77 | entry->correct = val == expect; |
78 | 78 | ||
79 | if (!filter_check_discard(call, entry, buffer, event)) | 79 | if (!filter_check_discard(call, entry, buffer, event)) |
80 | ring_buffer_unlock_commit(buffer, event); | 80 | __buffer_unlock_commit(buffer, event); |
81 | 81 | ||
82 | out: | 82 | out: |
83 | atomic_dec(&tr->data[cpu]->disabled); | 83 | atomic_dec(&tr->data[cpu]->disabled); |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index a84b55879bc4..4edb4b74eb7e 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -223,7 +223,7 @@ int __trace_graph_entry(struct trace_array *tr, | |||
223 | entry = ring_buffer_event_data(event); | 223 | entry = ring_buffer_event_data(event); |
224 | entry->graph_ent = *trace; | 224 | entry->graph_ent = *trace; |
225 | if (!filter_current_check_discard(buffer, call, entry, event)) | 225 | if (!filter_current_check_discard(buffer, call, entry, event)) |
226 | ring_buffer_unlock_commit(buffer, event); | 226 | __buffer_unlock_commit(buffer, event); |
227 | 227 | ||
228 | return 1; | 228 | return 1; |
229 | } | 229 | } |
@@ -327,7 +327,7 @@ void __trace_graph_return(struct trace_array *tr, | |||
327 | entry = ring_buffer_event_data(event); | 327 | entry = ring_buffer_event_data(event); |
328 | entry->ret = *trace; | 328 | entry->ret = *trace; |
329 | if (!filter_current_check_discard(buffer, call, entry, event)) | 329 | if (!filter_current_check_discard(buffer, call, entry, event)) |
330 | ring_buffer_unlock_commit(buffer, event); | 330 | __buffer_unlock_commit(buffer, event); |
331 | } | 331 | } |
332 | 332 | ||
333 | void trace_graph_return(struct ftrace_graph_ret *trace) | 333 | void trace_graph_return(struct ftrace_graph_ret *trace) |