diff options
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 461 |
1 files changed, 285 insertions, 176 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index cda81ec58d9f..8c358395d338 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/writeback.h> | 17 | #include <linux/writeback.h> |
18 | #include <linux/kallsyms.h> | 18 | #include <linux/kallsyms.h> |
19 | #include <linux/seq_file.h> | 19 | #include <linux/seq_file.h> |
20 | #include <linux/smp_lock.h> | ||
20 | #include <linux/notifier.h> | 21 | #include <linux/notifier.h> |
21 | #include <linux/irqflags.h> | 22 | #include <linux/irqflags.h> |
22 | #include <linux/debugfs.h> | 23 | #include <linux/debugfs.h> |
@@ -171,6 +172,13 @@ static struct trace_array global_trace; | |||
171 | 172 | ||
172 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); | 173 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); |
173 | 174 | ||
175 | int filter_current_check_discard(struct ftrace_event_call *call, void *rec, | ||
176 | struct ring_buffer_event *event) | ||
177 | { | ||
178 | return filter_check_discard(call, rec, global_trace.buffer, event); | ||
179 | } | ||
180 | EXPORT_SYMBOL_GPL(filter_current_check_discard); | ||
181 | |||
174 | cycle_t ftrace_now(int cpu) | 182 | cycle_t ftrace_now(int cpu) |
175 | { | 183 | { |
176 | u64 ts; | 184 | u64 ts; |
@@ -255,7 +263,8 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait); | |||
255 | 263 | ||
256 | /* trace_flags holds trace_options default values */ | 264 | /* trace_flags holds trace_options default values */ |
257 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | 265 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | |
258 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME; | 266 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | |
267 | TRACE_ITER_GRAPH_TIME; | ||
259 | 268 | ||
260 | /** | 269 | /** |
261 | * trace_wake_up - wake up tasks waiting for trace input | 270 | * trace_wake_up - wake up tasks waiting for trace input |
@@ -276,13 +285,12 @@ void trace_wake_up(void) | |||
276 | static int __init set_buf_size(char *str) | 285 | static int __init set_buf_size(char *str) |
277 | { | 286 | { |
278 | unsigned long buf_size; | 287 | unsigned long buf_size; |
279 | int ret; | ||
280 | 288 | ||
281 | if (!str) | 289 | if (!str) |
282 | return 0; | 290 | return 0; |
283 | ret = strict_strtoul(str, 0, &buf_size); | 291 | buf_size = memparse(str, &str); |
284 | /* nr_entries can not be zero */ | 292 | /* nr_entries can not be zero */ |
285 | if (ret < 0 || buf_size == 0) | 293 | if (buf_size == 0) |
286 | return 0; | 294 | return 0; |
287 | trace_buf_size = buf_size; | 295 | trace_buf_size = buf_size; |
288 | return 1; | 296 | return 1; |
@@ -317,6 +325,7 @@ static const char *trace_options[] = { | |||
317 | "latency-format", | 325 | "latency-format", |
318 | "global-clock", | 326 | "global-clock", |
319 | "sleep-time", | 327 | "sleep-time", |
328 | "graph-time", | ||
320 | NULL | 329 | NULL |
321 | }; | 330 | }; |
322 | 331 | ||
@@ -335,7 +344,7 @@ static raw_spinlock_t ftrace_max_lock = | |||
335 | /* | 344 | /* |
336 | * Copy the new maximum trace into the separate maximum-trace | 345 | * Copy the new maximum trace into the separate maximum-trace |
337 | * structure. (this way the maximum trace is permanently saved, | 346 | * structure. (this way the maximum trace is permanently saved, |
338 | * for later retrieval via /debugfs/tracing/latency_trace) | 347 | * for later retrieval via /sys/kernel/debug/tracing/latency_trace) |
339 | */ | 348 | */ |
340 | static void | 349 | static void |
341 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | 350 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) |
@@ -402,17 +411,6 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | |||
402 | return cnt; | 411 | return cnt; |
403 | } | 412 | } |
404 | 413 | ||
405 | static void | ||
406 | trace_print_seq(struct seq_file *m, struct trace_seq *s) | ||
407 | { | ||
408 | int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len; | ||
409 | |||
410 | s->buffer[len] = 0; | ||
411 | seq_puts(m, s->buffer); | ||
412 | |||
413 | trace_seq_init(s); | ||
414 | } | ||
415 | |||
416 | /** | 414 | /** |
417 | * update_max_tr - snapshot all trace buffers from global_trace to max_tr | 415 | * update_max_tr - snapshot all trace buffers from global_trace to max_tr |
418 | * @tr: tracer | 416 | * @tr: tracer |
@@ -641,6 +639,16 @@ void tracing_reset_online_cpus(struct trace_array *tr) | |||
641 | tracing_reset(tr, cpu); | 639 | tracing_reset(tr, cpu); |
642 | } | 640 | } |
643 | 641 | ||
642 | void tracing_reset_current(int cpu) | ||
643 | { | ||
644 | tracing_reset(&global_trace, cpu); | ||
645 | } | ||
646 | |||
647 | void tracing_reset_current_online_cpus(void) | ||
648 | { | ||
649 | tracing_reset_online_cpus(&global_trace); | ||
650 | } | ||
651 | |||
644 | #define SAVED_CMDLINES 128 | 652 | #define SAVED_CMDLINES 128 |
645 | #define NO_CMDLINE_MAP UINT_MAX | 653 | #define NO_CMDLINE_MAP UINT_MAX |
646 | static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; | 654 | static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; |
@@ -800,6 +808,7 @@ void trace_find_cmdline(int pid, char comm[]) | |||
800 | return; | 808 | return; |
801 | } | 809 | } |
802 | 810 | ||
811 | preempt_disable(); | ||
803 | __raw_spin_lock(&trace_cmdline_lock); | 812 | __raw_spin_lock(&trace_cmdline_lock); |
804 | map = map_pid_to_cmdline[pid]; | 813 | map = map_pid_to_cmdline[pid]; |
805 | if (map != NO_CMDLINE_MAP) | 814 | if (map != NO_CMDLINE_MAP) |
@@ -808,6 +817,7 @@ void trace_find_cmdline(int pid, char comm[]) | |||
808 | strcpy(comm, "<...>"); | 817 | strcpy(comm, "<...>"); |
809 | 818 | ||
810 | __raw_spin_unlock(&trace_cmdline_lock); | 819 | __raw_spin_unlock(&trace_cmdline_lock); |
820 | preempt_enable(); | ||
811 | } | 821 | } |
812 | 822 | ||
813 | void tracing_record_cmdline(struct task_struct *tsk) | 823 | void tracing_record_cmdline(struct task_struct *tsk) |
@@ -838,9 +848,10 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | |||
838 | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | | 848 | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | |
839 | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); | 849 | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); |
840 | } | 850 | } |
851 | EXPORT_SYMBOL_GPL(tracing_generic_entry_update); | ||
841 | 852 | ||
842 | struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, | 853 | struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, |
843 | unsigned char type, | 854 | int type, |
844 | unsigned long len, | 855 | unsigned long len, |
845 | unsigned long flags, int pc) | 856 | unsigned long flags, int pc) |
846 | { | 857 | { |
@@ -883,30 +894,40 @@ void trace_buffer_unlock_commit(struct trace_array *tr, | |||
883 | } | 894 | } |
884 | 895 | ||
885 | struct ring_buffer_event * | 896 | struct ring_buffer_event * |
886 | trace_current_buffer_lock_reserve(unsigned char type, unsigned long len, | 897 | trace_current_buffer_lock_reserve(int type, unsigned long len, |
887 | unsigned long flags, int pc) | 898 | unsigned long flags, int pc) |
888 | { | 899 | { |
889 | return trace_buffer_lock_reserve(&global_trace, | 900 | return trace_buffer_lock_reserve(&global_trace, |
890 | type, len, flags, pc); | 901 | type, len, flags, pc); |
891 | } | 902 | } |
903 | EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve); | ||
892 | 904 | ||
893 | void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, | 905 | void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, |
894 | unsigned long flags, int pc) | 906 | unsigned long flags, int pc) |
895 | { | 907 | { |
896 | return __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 1); | 908 | __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 1); |
897 | } | 909 | } |
910 | EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); | ||
898 | 911 | ||
899 | void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event, | 912 | void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event, |
900 | unsigned long flags, int pc) | 913 | unsigned long flags, int pc) |
901 | { | 914 | { |
902 | return __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 0); | 915 | __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 0); |
916 | } | ||
917 | EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit); | ||
918 | |||
919 | void trace_current_buffer_discard_commit(struct ring_buffer_event *event) | ||
920 | { | ||
921 | ring_buffer_discard_commit(global_trace.buffer, event); | ||
903 | } | 922 | } |
923 | EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit); | ||
904 | 924 | ||
905 | void | 925 | void |
906 | trace_function(struct trace_array *tr, | 926 | trace_function(struct trace_array *tr, |
907 | unsigned long ip, unsigned long parent_ip, unsigned long flags, | 927 | unsigned long ip, unsigned long parent_ip, unsigned long flags, |
908 | int pc) | 928 | int pc) |
909 | { | 929 | { |
930 | struct ftrace_event_call *call = &event_function; | ||
910 | struct ring_buffer_event *event; | 931 | struct ring_buffer_event *event; |
911 | struct ftrace_entry *entry; | 932 | struct ftrace_entry *entry; |
912 | 933 | ||
@@ -921,7 +942,9 @@ trace_function(struct trace_array *tr, | |||
921 | entry = ring_buffer_event_data(event); | 942 | entry = ring_buffer_event_data(event); |
922 | entry->ip = ip; | 943 | entry->ip = ip; |
923 | entry->parent_ip = parent_ip; | 944 | entry->parent_ip = parent_ip; |
924 | ring_buffer_unlock_commit(tr->buffer, event); | 945 | |
946 | if (!filter_check_discard(call, entry, tr->buffer, event)) | ||
947 | ring_buffer_unlock_commit(tr->buffer, event); | ||
925 | } | 948 | } |
926 | 949 | ||
927 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 950 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
@@ -930,6 +953,7 @@ static int __trace_graph_entry(struct trace_array *tr, | |||
930 | unsigned long flags, | 953 | unsigned long flags, |
931 | int pc) | 954 | int pc) |
932 | { | 955 | { |
956 | struct ftrace_event_call *call = &event_funcgraph_entry; | ||
933 | struct ring_buffer_event *event; | 957 | struct ring_buffer_event *event; |
934 | struct ftrace_graph_ent_entry *entry; | 958 | struct ftrace_graph_ent_entry *entry; |
935 | 959 | ||
@@ -942,7 +966,8 @@ static int __trace_graph_entry(struct trace_array *tr, | |||
942 | return 0; | 966 | return 0; |
943 | entry = ring_buffer_event_data(event); | 967 | entry = ring_buffer_event_data(event); |
944 | entry->graph_ent = *trace; | 968 | entry->graph_ent = *trace; |
945 | ring_buffer_unlock_commit(global_trace.buffer, event); | 969 | if (!filter_current_check_discard(call, entry, event)) |
970 | ring_buffer_unlock_commit(global_trace.buffer, event); | ||
946 | 971 | ||
947 | return 1; | 972 | return 1; |
948 | } | 973 | } |
@@ -952,6 +977,7 @@ static void __trace_graph_return(struct trace_array *tr, | |||
952 | unsigned long flags, | 977 | unsigned long flags, |
953 | int pc) | 978 | int pc) |
954 | { | 979 | { |
980 | struct ftrace_event_call *call = &event_funcgraph_exit; | ||
955 | struct ring_buffer_event *event; | 981 | struct ring_buffer_event *event; |
956 | struct ftrace_graph_ret_entry *entry; | 982 | struct ftrace_graph_ret_entry *entry; |
957 | 983 | ||
@@ -964,7 +990,8 @@ static void __trace_graph_return(struct trace_array *tr, | |||
964 | return; | 990 | return; |
965 | entry = ring_buffer_event_data(event); | 991 | entry = ring_buffer_event_data(event); |
966 | entry->ret = *trace; | 992 | entry->ret = *trace; |
967 | ring_buffer_unlock_commit(global_trace.buffer, event); | 993 | if (!filter_current_check_discard(call, entry, event)) |
994 | ring_buffer_unlock_commit(global_trace.buffer, event); | ||
968 | } | 995 | } |
969 | #endif | 996 | #endif |
970 | 997 | ||
@@ -982,6 +1009,7 @@ static void __ftrace_trace_stack(struct trace_array *tr, | |||
982 | int skip, int pc) | 1009 | int skip, int pc) |
983 | { | 1010 | { |
984 | #ifdef CONFIG_STACKTRACE | 1011 | #ifdef CONFIG_STACKTRACE |
1012 | struct ftrace_event_call *call = &event_kernel_stack; | ||
985 | struct ring_buffer_event *event; | 1013 | struct ring_buffer_event *event; |
986 | struct stack_entry *entry; | 1014 | struct stack_entry *entry; |
987 | struct stack_trace trace; | 1015 | struct stack_trace trace; |
@@ -999,7 +1027,8 @@ static void __ftrace_trace_stack(struct trace_array *tr, | |||
999 | trace.entries = entry->caller; | 1027 | trace.entries = entry->caller; |
1000 | 1028 | ||
1001 | save_stack_trace(&trace); | 1029 | save_stack_trace(&trace); |
1002 | ring_buffer_unlock_commit(tr->buffer, event); | 1030 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
1031 | ring_buffer_unlock_commit(tr->buffer, event); | ||
1003 | #endif | 1032 | #endif |
1004 | } | 1033 | } |
1005 | 1034 | ||
@@ -1024,6 +1053,7 @@ static void ftrace_trace_userstack(struct trace_array *tr, | |||
1024 | unsigned long flags, int pc) | 1053 | unsigned long flags, int pc) |
1025 | { | 1054 | { |
1026 | #ifdef CONFIG_STACKTRACE | 1055 | #ifdef CONFIG_STACKTRACE |
1056 | struct ftrace_event_call *call = &event_user_stack; | ||
1027 | struct ring_buffer_event *event; | 1057 | struct ring_buffer_event *event; |
1028 | struct userstack_entry *entry; | 1058 | struct userstack_entry *entry; |
1029 | struct stack_trace trace; | 1059 | struct stack_trace trace; |
@@ -1045,7 +1075,8 @@ static void ftrace_trace_userstack(struct trace_array *tr, | |||
1045 | trace.entries = entry->caller; | 1075 | trace.entries = entry->caller; |
1046 | 1076 | ||
1047 | save_stack_trace_user(&trace); | 1077 | save_stack_trace_user(&trace); |
1048 | ring_buffer_unlock_commit(tr->buffer, event); | 1078 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
1079 | ring_buffer_unlock_commit(tr->buffer, event); | ||
1049 | #endif | 1080 | #endif |
1050 | } | 1081 | } |
1051 | 1082 | ||
@@ -1089,6 +1120,7 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
1089 | struct task_struct *next, | 1120 | struct task_struct *next, |
1090 | unsigned long flags, int pc) | 1121 | unsigned long flags, int pc) |
1091 | { | 1122 | { |
1123 | struct ftrace_event_call *call = &event_context_switch; | ||
1092 | struct ring_buffer_event *event; | 1124 | struct ring_buffer_event *event; |
1093 | struct ctx_switch_entry *entry; | 1125 | struct ctx_switch_entry *entry; |
1094 | 1126 | ||
@@ -1104,7 +1136,9 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
1104 | entry->next_prio = next->prio; | 1136 | entry->next_prio = next->prio; |
1105 | entry->next_state = next->state; | 1137 | entry->next_state = next->state; |
1106 | entry->next_cpu = task_cpu(next); | 1138 | entry->next_cpu = task_cpu(next); |
1107 | trace_buffer_unlock_commit(tr, event, flags, pc); | 1139 | |
1140 | if (!filter_check_discard(call, entry, tr->buffer, event)) | ||
1141 | trace_buffer_unlock_commit(tr, event, flags, pc); | ||
1108 | } | 1142 | } |
1109 | 1143 | ||
1110 | void | 1144 | void |
@@ -1113,6 +1147,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
1113 | struct task_struct *curr, | 1147 | struct task_struct *curr, |
1114 | unsigned long flags, int pc) | 1148 | unsigned long flags, int pc) |
1115 | { | 1149 | { |
1150 | struct ftrace_event_call *call = &event_wakeup; | ||
1116 | struct ring_buffer_event *event; | 1151 | struct ring_buffer_event *event; |
1117 | struct ctx_switch_entry *entry; | 1152 | struct ctx_switch_entry *entry; |
1118 | 1153 | ||
@@ -1129,7 +1164,8 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
1129 | entry->next_state = wakee->state; | 1164 | entry->next_state = wakee->state; |
1130 | entry->next_cpu = task_cpu(wakee); | 1165 | entry->next_cpu = task_cpu(wakee); |
1131 | 1166 | ||
1132 | ring_buffer_unlock_commit(tr->buffer, event); | 1167 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
1168 | ring_buffer_unlock_commit(tr->buffer, event); | ||
1133 | ftrace_trace_stack(tr, flags, 6, pc); | 1169 | ftrace_trace_stack(tr, flags, 6, pc); |
1134 | ftrace_trace_userstack(tr, flags, pc); | 1170 | ftrace_trace_userstack(tr, flags, pc); |
1135 | } | 1171 | } |
@@ -1230,11 +1266,13 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1230 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 1266 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; |
1231 | static u32 trace_buf[TRACE_BUF_SIZE]; | 1267 | static u32 trace_buf[TRACE_BUF_SIZE]; |
1232 | 1268 | ||
1269 | struct ftrace_event_call *call = &event_bprint; | ||
1233 | struct ring_buffer_event *event; | 1270 | struct ring_buffer_event *event; |
1234 | struct trace_array *tr = &global_trace; | 1271 | struct trace_array *tr = &global_trace; |
1235 | struct trace_array_cpu *data; | 1272 | struct trace_array_cpu *data; |
1236 | struct bprint_entry *entry; | 1273 | struct bprint_entry *entry; |
1237 | unsigned long flags; | 1274 | unsigned long flags; |
1275 | int disable; | ||
1238 | int resched; | 1276 | int resched; |
1239 | int cpu, len = 0, size, pc; | 1277 | int cpu, len = 0, size, pc; |
1240 | 1278 | ||
@@ -1249,7 +1287,8 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1249 | cpu = raw_smp_processor_id(); | 1287 | cpu = raw_smp_processor_id(); |
1250 | data = tr->data[cpu]; | 1288 | data = tr->data[cpu]; |
1251 | 1289 | ||
1252 | if (unlikely(atomic_read(&data->disabled))) | 1290 | disable = atomic_inc_return(&data->disabled); |
1291 | if (unlikely(disable != 1)) | ||
1253 | goto out; | 1292 | goto out; |
1254 | 1293 | ||
1255 | /* Lockdep uses trace_printk for lock tracing */ | 1294 | /* Lockdep uses trace_printk for lock tracing */ |
@@ -1269,13 +1308,15 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1269 | entry->fmt = fmt; | 1308 | entry->fmt = fmt; |
1270 | 1309 | ||
1271 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); | 1310 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); |
1272 | ring_buffer_unlock_commit(tr->buffer, event); | 1311 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
1312 | ring_buffer_unlock_commit(tr->buffer, event); | ||
1273 | 1313 | ||
1274 | out_unlock: | 1314 | out_unlock: |
1275 | __raw_spin_unlock(&trace_buf_lock); | 1315 | __raw_spin_unlock(&trace_buf_lock); |
1276 | local_irq_restore(flags); | 1316 | local_irq_restore(flags); |
1277 | 1317 | ||
1278 | out: | 1318 | out: |
1319 | atomic_dec_return(&data->disabled); | ||
1279 | ftrace_preempt_enable(resched); | 1320 | ftrace_preempt_enable(resched); |
1280 | unpause_graph_tracing(); | 1321 | unpause_graph_tracing(); |
1281 | 1322 | ||
@@ -1288,12 +1329,14 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
1288 | static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; | 1329 | static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; |
1289 | static char trace_buf[TRACE_BUF_SIZE]; | 1330 | static char trace_buf[TRACE_BUF_SIZE]; |
1290 | 1331 | ||
1332 | struct ftrace_event_call *call = &event_print; | ||
1291 | struct ring_buffer_event *event; | 1333 | struct ring_buffer_event *event; |
1292 | struct trace_array *tr = &global_trace; | 1334 | struct trace_array *tr = &global_trace; |
1293 | struct trace_array_cpu *data; | 1335 | struct trace_array_cpu *data; |
1294 | int cpu, len = 0, size, pc; | 1336 | int cpu, len = 0, size, pc; |
1295 | struct print_entry *entry; | 1337 | struct print_entry *entry; |
1296 | unsigned long irq_flags; | 1338 | unsigned long irq_flags; |
1339 | int disable; | ||
1297 | 1340 | ||
1298 | if (tracing_disabled || tracing_selftest_running) | 1341 | if (tracing_disabled || tracing_selftest_running) |
1299 | return 0; | 1342 | return 0; |
@@ -1303,7 +1346,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
1303 | cpu = raw_smp_processor_id(); | 1346 | cpu = raw_smp_processor_id(); |
1304 | data = tr->data[cpu]; | 1347 | data = tr->data[cpu]; |
1305 | 1348 | ||
1306 | if (unlikely(atomic_read(&data->disabled))) | 1349 | disable = atomic_inc_return(&data->disabled); |
1350 | if (unlikely(disable != 1)) | ||
1307 | goto out; | 1351 | goto out; |
1308 | 1352 | ||
1309 | pause_graph_tracing(); | 1353 | pause_graph_tracing(); |
@@ -1323,13 +1367,15 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
1323 | 1367 | ||
1324 | memcpy(&entry->buf, trace_buf, len); | 1368 | memcpy(&entry->buf, trace_buf, len); |
1325 | entry->buf[len] = 0; | 1369 | entry->buf[len] = 0; |
1326 | ring_buffer_unlock_commit(tr->buffer, event); | 1370 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
1371 | ring_buffer_unlock_commit(tr->buffer, event); | ||
1327 | 1372 | ||
1328 | out_unlock: | 1373 | out_unlock: |
1329 | __raw_spin_unlock(&trace_buf_lock); | 1374 | __raw_spin_unlock(&trace_buf_lock); |
1330 | raw_local_irq_restore(irq_flags); | 1375 | raw_local_irq_restore(irq_flags); |
1331 | unpause_graph_tracing(); | 1376 | unpause_graph_tracing(); |
1332 | out: | 1377 | out: |
1378 | atomic_dec_return(&data->disabled); | ||
1333 | preempt_enable_notrace(); | 1379 | preempt_enable_notrace(); |
1334 | 1380 | ||
1335 | return len; | 1381 | return len; |
@@ -1526,12 +1572,14 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
1526 | p = s_next(m, p, &l); | 1572 | p = s_next(m, p, &l); |
1527 | } | 1573 | } |
1528 | 1574 | ||
1575 | trace_event_read_lock(); | ||
1529 | return p; | 1576 | return p; |
1530 | } | 1577 | } |
1531 | 1578 | ||
1532 | static void s_stop(struct seq_file *m, void *p) | 1579 | static void s_stop(struct seq_file *m, void *p) |
1533 | { | 1580 | { |
1534 | atomic_dec(&trace_record_cmdline_disabled); | 1581 | atomic_dec(&trace_record_cmdline_disabled); |
1582 | trace_event_read_unlock(); | ||
1535 | } | 1583 | } |
1536 | 1584 | ||
1537 | static void print_lat_help_header(struct seq_file *m) | 1585 | static void print_lat_help_header(struct seq_file *m) |
@@ -1774,6 +1822,7 @@ static int trace_empty(struct trace_iterator *iter) | |||
1774 | return 1; | 1822 | return 1; |
1775 | } | 1823 | } |
1776 | 1824 | ||
1825 | /* Called with trace_event_read_lock() held. */ | ||
1777 | static enum print_line_t print_trace_line(struct trace_iterator *iter) | 1826 | static enum print_line_t print_trace_line(struct trace_iterator *iter) |
1778 | { | 1827 | { |
1779 | enum print_line_t ret; | 1828 | enum print_line_t ret; |
@@ -1983,7 +2032,7 @@ static int tracing_open(struct inode *inode, struct file *file) | |||
1983 | 2032 | ||
1984 | /* If this file was open for write, then erase contents */ | 2033 | /* If this file was open for write, then erase contents */ |
1985 | if ((file->f_mode & FMODE_WRITE) && | 2034 | if ((file->f_mode & FMODE_WRITE) && |
1986 | !(file->f_flags & O_APPEND)) { | 2035 | (file->f_flags & O_TRUNC)) { |
1987 | long cpu = (long) inode->i_private; | 2036 | long cpu = (long) inode->i_private; |
1988 | 2037 | ||
1989 | if (cpu == TRACE_PIPE_ALL_CPU) | 2038 | if (cpu == TRACE_PIPE_ALL_CPU) |
@@ -2005,25 +2054,23 @@ static int tracing_open(struct inode *inode, struct file *file) | |||
2005 | static void * | 2054 | static void * |
2006 | t_next(struct seq_file *m, void *v, loff_t *pos) | 2055 | t_next(struct seq_file *m, void *v, loff_t *pos) |
2007 | { | 2056 | { |
2008 | struct tracer *t = m->private; | 2057 | struct tracer *t = v; |
2009 | 2058 | ||
2010 | (*pos)++; | 2059 | (*pos)++; |
2011 | 2060 | ||
2012 | if (t) | 2061 | if (t) |
2013 | t = t->next; | 2062 | t = t->next; |
2014 | 2063 | ||
2015 | m->private = t; | ||
2016 | |||
2017 | return t; | 2064 | return t; |
2018 | } | 2065 | } |
2019 | 2066 | ||
2020 | static void *t_start(struct seq_file *m, loff_t *pos) | 2067 | static void *t_start(struct seq_file *m, loff_t *pos) |
2021 | { | 2068 | { |
2022 | struct tracer *t = m->private; | 2069 | struct tracer *t; |
2023 | loff_t l = 0; | 2070 | loff_t l = 0; |
2024 | 2071 | ||
2025 | mutex_lock(&trace_types_lock); | 2072 | mutex_lock(&trace_types_lock); |
2026 | for (; t && l < *pos; t = t_next(m, t, &l)) | 2073 | for (t = trace_types; t && l < *pos; t = t_next(m, t, &l)) |
2027 | ; | 2074 | ; |
2028 | 2075 | ||
2029 | return t; | 2076 | return t; |
@@ -2059,18 +2106,10 @@ static struct seq_operations show_traces_seq_ops = { | |||
2059 | 2106 | ||
2060 | static int show_traces_open(struct inode *inode, struct file *file) | 2107 | static int show_traces_open(struct inode *inode, struct file *file) |
2061 | { | 2108 | { |
2062 | int ret; | ||
2063 | |||
2064 | if (tracing_disabled) | 2109 | if (tracing_disabled) |
2065 | return -ENODEV; | 2110 | return -ENODEV; |
2066 | 2111 | ||
2067 | ret = seq_open(file, &show_traces_seq_ops); | 2112 | return seq_open(file, &show_traces_seq_ops); |
2068 | if (!ret) { | ||
2069 | struct seq_file *m = file->private_data; | ||
2070 | m->private = trace_types; | ||
2071 | } | ||
2072 | |||
2073 | return ret; | ||
2074 | } | 2113 | } |
2075 | 2114 | ||
2076 | static ssize_t | 2115 | static ssize_t |
@@ -2143,11 +2182,12 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2143 | if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) | 2182 | if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) |
2144 | return -ENOMEM; | 2183 | return -ENOMEM; |
2145 | 2184 | ||
2146 | mutex_lock(&tracing_cpumask_update_lock); | ||
2147 | err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); | 2185 | err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); |
2148 | if (err) | 2186 | if (err) |
2149 | goto err_unlock; | 2187 | goto err_unlock; |
2150 | 2188 | ||
2189 | mutex_lock(&tracing_cpumask_update_lock); | ||
2190 | |||
2151 | local_irq_disable(); | 2191 | local_irq_disable(); |
2152 | __raw_spin_lock(&ftrace_max_lock); | 2192 | __raw_spin_lock(&ftrace_max_lock); |
2153 | for_each_tracing_cpu(cpu) { | 2193 | for_each_tracing_cpu(cpu) { |
@@ -2175,8 +2215,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2175 | return count; | 2215 | return count; |
2176 | 2216 | ||
2177 | err_unlock: | 2217 | err_unlock: |
2178 | mutex_unlock(&tracing_cpumask_update_lock); | 2218 | free_cpumask_var(tracing_cpumask_new); |
2179 | free_cpumask_var(tracing_cpumask); | ||
2180 | 2219 | ||
2181 | return err; | 2220 | return err; |
2182 | } | 2221 | } |
@@ -2366,21 +2405,20 @@ static const struct file_operations tracing_iter_fops = { | |||
2366 | 2405 | ||
2367 | static const char readme_msg[] = | 2406 | static const char readme_msg[] = |
2368 | "tracing mini-HOWTO:\n\n" | 2407 | "tracing mini-HOWTO:\n\n" |
2369 | "# mkdir /debug\n" | 2408 | "# mount -t debugfs nodev /sys/kernel/debug\n\n" |
2370 | "# mount -t debugfs nodev /debug\n\n" | 2409 | "# cat /sys/kernel/debug/tracing/available_tracers\n" |
2371 | "# cat /debug/tracing/available_tracers\n" | ||
2372 | "wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n" | 2410 | "wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n" |
2373 | "# cat /debug/tracing/current_tracer\n" | 2411 | "# cat /sys/kernel/debug/tracing/current_tracer\n" |
2374 | "nop\n" | 2412 | "nop\n" |
2375 | "# echo sched_switch > /debug/tracing/current_tracer\n" | 2413 | "# echo sched_switch > /sys/kernel/debug/tracing/current_tracer\n" |
2376 | "# cat /debug/tracing/current_tracer\n" | 2414 | "# cat /sys/kernel/debug/tracing/current_tracer\n" |
2377 | "sched_switch\n" | 2415 | "sched_switch\n" |
2378 | "# cat /debug/tracing/trace_options\n" | 2416 | "# cat /sys/kernel/debug/tracing/trace_options\n" |
2379 | "noprint-parent nosym-offset nosym-addr noverbose\n" | 2417 | "noprint-parent nosym-offset nosym-addr noverbose\n" |
2380 | "# echo print-parent > /debug/tracing/trace_options\n" | 2418 | "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n" |
2381 | "# echo 1 > /debug/tracing/tracing_enabled\n" | 2419 | "# echo 1 > /sys/kernel/debug/tracing/tracing_enabled\n" |
2382 | "# cat /debug/tracing/trace > /tmp/trace.txt\n" | 2420 | "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n" |
2383 | "# echo 0 > /debug/tracing/tracing_enabled\n" | 2421 | "# echo 0 > /sys/kernel/debug/tracing/tracing_enabled\n" |
2384 | ; | 2422 | ; |
2385 | 2423 | ||
2386 | static ssize_t | 2424 | static ssize_t |
@@ -2397,6 +2435,56 @@ static const struct file_operations tracing_readme_fops = { | |||
2397 | }; | 2435 | }; |
2398 | 2436 | ||
2399 | static ssize_t | 2437 | static ssize_t |
2438 | tracing_saved_cmdlines_read(struct file *file, char __user *ubuf, | ||
2439 | size_t cnt, loff_t *ppos) | ||
2440 | { | ||
2441 | char *buf_comm; | ||
2442 | char *file_buf; | ||
2443 | char *buf; | ||
2444 | int len = 0; | ||
2445 | int pid; | ||
2446 | int i; | ||
2447 | |||
2448 | file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL); | ||
2449 | if (!file_buf) | ||
2450 | return -ENOMEM; | ||
2451 | |||
2452 | buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL); | ||
2453 | if (!buf_comm) { | ||
2454 | kfree(file_buf); | ||
2455 | return -ENOMEM; | ||
2456 | } | ||
2457 | |||
2458 | buf = file_buf; | ||
2459 | |||
2460 | for (i = 0; i < SAVED_CMDLINES; i++) { | ||
2461 | int r; | ||
2462 | |||
2463 | pid = map_cmdline_to_pid[i]; | ||
2464 | if (pid == -1 || pid == NO_CMDLINE_MAP) | ||
2465 | continue; | ||
2466 | |||
2467 | trace_find_cmdline(pid, buf_comm); | ||
2468 | r = sprintf(buf, "%d %s\n", pid, buf_comm); | ||
2469 | buf += r; | ||
2470 | len += r; | ||
2471 | } | ||
2472 | |||
2473 | len = simple_read_from_buffer(ubuf, cnt, ppos, | ||
2474 | file_buf, len); | ||
2475 | |||
2476 | kfree(file_buf); | ||
2477 | kfree(buf_comm); | ||
2478 | |||
2479 | return len; | ||
2480 | } | ||
2481 | |||
2482 | static const struct file_operations tracing_saved_cmdlines_fops = { | ||
2483 | .open = tracing_open_generic, | ||
2484 | .read = tracing_saved_cmdlines_read, | ||
2485 | }; | ||
2486 | |||
2487 | static ssize_t | ||
2400 | tracing_ctrl_read(struct file *filp, char __user *ubuf, | 2488 | tracing_ctrl_read(struct file *filp, char __user *ubuf, |
2401 | size_t cnt, loff_t *ppos) | 2489 | size_t cnt, loff_t *ppos) |
2402 | { | 2490 | { |
@@ -2728,6 +2816,9 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) | |||
2728 | /* trace pipe does not show start of buffer */ | 2816 | /* trace pipe does not show start of buffer */ |
2729 | cpumask_setall(iter->started); | 2817 | cpumask_setall(iter->started); |
2730 | 2818 | ||
2819 | if (trace_flags & TRACE_ITER_LATENCY_FMT) | ||
2820 | iter->iter_flags |= TRACE_FILE_LAT_FMT; | ||
2821 | |||
2731 | iter->cpu_file = cpu_file; | 2822 | iter->cpu_file = cpu_file; |
2732 | iter->tr = &global_trace; | 2823 | iter->tr = &global_trace; |
2733 | mutex_init(&iter->mutex); | 2824 | mutex_init(&iter->mutex); |
@@ -2915,6 +3006,7 @@ waitagain: | |||
2915 | offsetof(struct trace_iterator, seq)); | 3006 | offsetof(struct trace_iterator, seq)); |
2916 | iter->pos = -1; | 3007 | iter->pos = -1; |
2917 | 3008 | ||
3009 | trace_event_read_lock(); | ||
2918 | while (find_next_entry_inc(iter) != NULL) { | 3010 | while (find_next_entry_inc(iter) != NULL) { |
2919 | enum print_line_t ret; | 3011 | enum print_line_t ret; |
2920 | int len = iter->seq.len; | 3012 | int len = iter->seq.len; |
@@ -2931,6 +3023,7 @@ waitagain: | |||
2931 | if (iter->seq.len >= cnt) | 3023 | if (iter->seq.len >= cnt) |
2932 | break; | 3024 | break; |
2933 | } | 3025 | } |
3026 | trace_event_read_unlock(); | ||
2934 | 3027 | ||
2935 | /* Now copy what we have to the user */ | 3028 | /* Now copy what we have to the user */ |
2936 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); | 3029 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); |
@@ -2993,7 +3086,8 @@ tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) | |||
2993 | break; | 3086 | break; |
2994 | } | 3087 | } |
2995 | 3088 | ||
2996 | trace_consume(iter); | 3089 | if (ret != TRACE_TYPE_NO_CONSUME) |
3090 | trace_consume(iter); | ||
2997 | rem -= count; | 3091 | rem -= count; |
2998 | if (!find_next_entry_inc(iter)) { | 3092 | if (!find_next_entry_inc(iter)) { |
2999 | rem = 0; | 3093 | rem = 0; |
@@ -3053,6 +3147,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
3053 | goto out_err; | 3147 | goto out_err; |
3054 | } | 3148 | } |
3055 | 3149 | ||
3150 | trace_event_read_lock(); | ||
3151 | |||
3056 | /* Fill as many pages as possible. */ | 3152 | /* Fill as many pages as possible. */ |
3057 | for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) { | 3153 | for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) { |
3058 | pages[i] = alloc_page(GFP_KERNEL); | 3154 | pages[i] = alloc_page(GFP_KERNEL); |
@@ -3075,6 +3171,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
3075 | trace_seq_init(&iter->seq); | 3171 | trace_seq_init(&iter->seq); |
3076 | } | 3172 | } |
3077 | 3173 | ||
3174 | trace_event_read_unlock(); | ||
3078 | mutex_unlock(&iter->mutex); | 3175 | mutex_unlock(&iter->mutex); |
3079 | 3176 | ||
3080 | spd.nr_pages = i; | 3177 | spd.nr_pages = i; |
@@ -3425,7 +3522,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
3425 | .spd_release = buffer_spd_release, | 3522 | .spd_release = buffer_spd_release, |
3426 | }; | 3523 | }; |
3427 | struct buffer_ref *ref; | 3524 | struct buffer_ref *ref; |
3428 | int size, i; | 3525 | int entries, size, i; |
3429 | size_t ret; | 3526 | size_t ret; |
3430 | 3527 | ||
3431 | if (*ppos & (PAGE_SIZE - 1)) { | 3528 | if (*ppos & (PAGE_SIZE - 1)) { |
@@ -3440,7 +3537,9 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
3440 | len &= PAGE_MASK; | 3537 | len &= PAGE_MASK; |
3441 | } | 3538 | } |
3442 | 3539 | ||
3443 | for (i = 0; i < PIPE_BUFFERS && len; i++, len -= PAGE_SIZE) { | 3540 | entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); |
3541 | |||
3542 | for (i = 0; i < PIPE_BUFFERS && len && entries; i++, len -= PAGE_SIZE) { | ||
3444 | struct page *page; | 3543 | struct page *page; |
3445 | int r; | 3544 | int r; |
3446 | 3545 | ||
@@ -3457,7 +3556,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
3457 | } | 3556 | } |
3458 | 3557 | ||
3459 | r = ring_buffer_read_page(ref->buffer, &ref->page, | 3558 | r = ring_buffer_read_page(ref->buffer, &ref->page, |
3460 | len, info->cpu, 0); | 3559 | len, info->cpu, 1); |
3461 | if (r < 0) { | 3560 | if (r < 0) { |
3462 | ring_buffer_free_read_page(ref->buffer, | 3561 | ring_buffer_free_read_page(ref->buffer, |
3463 | ref->page); | 3562 | ref->page); |
@@ -3481,6 +3580,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
3481 | spd.partial[i].private = (unsigned long)ref; | 3580 | spd.partial[i].private = (unsigned long)ref; |
3482 | spd.nr_pages++; | 3581 | spd.nr_pages++; |
3483 | *ppos += PAGE_SIZE; | 3582 | *ppos += PAGE_SIZE; |
3583 | |||
3584 | entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); | ||
3484 | } | 3585 | } |
3485 | 3586 | ||
3486 | spd.nr_pages = i; | 3587 | spd.nr_pages = i; |
@@ -3508,6 +3609,45 @@ static const struct file_operations tracing_buffers_fops = { | |||
3508 | .llseek = no_llseek, | 3609 | .llseek = no_llseek, |
3509 | }; | 3610 | }; |
3510 | 3611 | ||
3612 | static ssize_t | ||
3613 | tracing_stats_read(struct file *filp, char __user *ubuf, | ||
3614 | size_t count, loff_t *ppos) | ||
3615 | { | ||
3616 | unsigned long cpu = (unsigned long)filp->private_data; | ||
3617 | struct trace_array *tr = &global_trace; | ||
3618 | struct trace_seq *s; | ||
3619 | unsigned long cnt; | ||
3620 | |||
3621 | s = kmalloc(sizeof(*s), GFP_KERNEL); | ||
3622 | if (!s) | ||
3623 | return ENOMEM; | ||
3624 | |||
3625 | trace_seq_init(s); | ||
3626 | |||
3627 | cnt = ring_buffer_entries_cpu(tr->buffer, cpu); | ||
3628 | trace_seq_printf(s, "entries: %ld\n", cnt); | ||
3629 | |||
3630 | cnt = ring_buffer_overrun_cpu(tr->buffer, cpu); | ||
3631 | trace_seq_printf(s, "overrun: %ld\n", cnt); | ||
3632 | |||
3633 | cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu); | ||
3634 | trace_seq_printf(s, "commit overrun: %ld\n", cnt); | ||
3635 | |||
3636 | cnt = ring_buffer_nmi_dropped_cpu(tr->buffer, cpu); | ||
3637 | trace_seq_printf(s, "nmi dropped: %ld\n", cnt); | ||
3638 | |||
3639 | count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); | ||
3640 | |||
3641 | kfree(s); | ||
3642 | |||
3643 | return count; | ||
3644 | } | ||
3645 | |||
3646 | static const struct file_operations tracing_stats_fops = { | ||
3647 | .open = tracing_open_generic, | ||
3648 | .read = tracing_stats_read, | ||
3649 | }; | ||
3650 | |||
3511 | #ifdef CONFIG_DYNAMIC_FTRACE | 3651 | #ifdef CONFIG_DYNAMIC_FTRACE |
3512 | 3652 | ||
3513 | int __weak ftrace_arch_read_dyn_info(char *buf, int size) | 3653 | int __weak ftrace_arch_read_dyn_info(char *buf, int size) |
@@ -3597,7 +3737,7 @@ struct dentry *tracing_dentry_percpu(void) | |||
3597 | static void tracing_init_debugfs_percpu(long cpu) | 3737 | static void tracing_init_debugfs_percpu(long cpu) |
3598 | { | 3738 | { |
3599 | struct dentry *d_percpu = tracing_dentry_percpu(); | 3739 | struct dentry *d_percpu = tracing_dentry_percpu(); |
3600 | struct dentry *entry, *d_cpu; | 3740 | struct dentry *d_cpu; |
3601 | /* strlen(cpu) + MAX(log10(cpu)) + '\0' */ | 3741 | /* strlen(cpu) + MAX(log10(cpu)) + '\0' */ |
3602 | char cpu_dir[7]; | 3742 | char cpu_dir[7]; |
3603 | 3743 | ||
@@ -3612,21 +3752,18 @@ static void tracing_init_debugfs_percpu(long cpu) | |||
3612 | } | 3752 | } |
3613 | 3753 | ||
3614 | /* per cpu trace_pipe */ | 3754 | /* per cpu trace_pipe */ |
3615 | entry = debugfs_create_file("trace_pipe", 0444, d_cpu, | 3755 | trace_create_file("trace_pipe", 0444, d_cpu, |
3616 | (void *) cpu, &tracing_pipe_fops); | 3756 | (void *) cpu, &tracing_pipe_fops); |
3617 | if (!entry) | ||
3618 | pr_warning("Could not create debugfs 'trace_pipe' entry\n"); | ||
3619 | 3757 | ||
3620 | /* per cpu trace */ | 3758 | /* per cpu trace */ |
3621 | entry = debugfs_create_file("trace", 0644, d_cpu, | 3759 | trace_create_file("trace", 0644, d_cpu, |
3622 | (void *) cpu, &tracing_fops); | 3760 | (void *) cpu, &tracing_fops); |
3623 | if (!entry) | ||
3624 | pr_warning("Could not create debugfs 'trace' entry\n"); | ||
3625 | 3761 | ||
3626 | entry = debugfs_create_file("trace_pipe_raw", 0444, d_cpu, | 3762 | trace_create_file("trace_pipe_raw", 0444, d_cpu, |
3627 | (void *) cpu, &tracing_buffers_fops); | 3763 | (void *) cpu, &tracing_buffers_fops); |
3628 | if (!entry) | 3764 | |
3629 | pr_warning("Could not create debugfs 'trace_pipe_raw' entry\n"); | 3765 | trace_create_file("stats", 0444, d_cpu, |
3766 | (void *) cpu, &tracing_stats_fops); | ||
3630 | } | 3767 | } |
3631 | 3768 | ||
3632 | #ifdef CONFIG_FTRACE_SELFTEST | 3769 | #ifdef CONFIG_FTRACE_SELFTEST |
@@ -3759,17 +3896,9 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
3759 | if (ret < 0) | 3896 | if (ret < 0) |
3760 | return ret; | 3897 | return ret; |
3761 | 3898 | ||
3762 | switch (val) { | 3899 | if (val != 0 && val != 1) |
3763 | case 0: | ||
3764 | trace_flags &= ~(1 << index); | ||
3765 | break; | ||
3766 | case 1: | ||
3767 | trace_flags |= 1 << index; | ||
3768 | break; | ||
3769 | |||
3770 | default: | ||
3771 | return -EINVAL; | 3900 | return -EINVAL; |
3772 | } | 3901 | set_tracer_flags(1 << index, val); |
3773 | 3902 | ||
3774 | *ppos += cnt; | 3903 | *ppos += cnt; |
3775 | 3904 | ||
@@ -3782,6 +3911,22 @@ static const struct file_operations trace_options_core_fops = { | |||
3782 | .write = trace_options_core_write, | 3911 | .write = trace_options_core_write, |
3783 | }; | 3912 | }; |
3784 | 3913 | ||
3914 | struct dentry *trace_create_file(const char *name, | ||
3915 | mode_t mode, | ||
3916 | struct dentry *parent, | ||
3917 | void *data, | ||
3918 | const struct file_operations *fops) | ||
3919 | { | ||
3920 | struct dentry *ret; | ||
3921 | |||
3922 | ret = debugfs_create_file(name, mode, parent, data, fops); | ||
3923 | if (!ret) | ||
3924 | pr_warning("Could not create debugfs '%s' entry\n", name); | ||
3925 | |||
3926 | return ret; | ||
3927 | } | ||
3928 | |||
3929 | |||
3785 | static struct dentry *trace_options_init_dentry(void) | 3930 | static struct dentry *trace_options_init_dentry(void) |
3786 | { | 3931 | { |
3787 | struct dentry *d_tracer; | 3932 | struct dentry *d_tracer; |
@@ -3809,7 +3954,6 @@ create_trace_option_file(struct trace_option_dentry *topt, | |||
3809 | struct tracer_opt *opt) | 3954 | struct tracer_opt *opt) |
3810 | { | 3955 | { |
3811 | struct dentry *t_options; | 3956 | struct dentry *t_options; |
3812 | struct dentry *entry; | ||
3813 | 3957 | ||
3814 | t_options = trace_options_init_dentry(); | 3958 | t_options = trace_options_init_dentry(); |
3815 | if (!t_options) | 3959 | if (!t_options) |
@@ -3818,11 +3962,9 @@ create_trace_option_file(struct trace_option_dentry *topt, | |||
3818 | topt->flags = flags; | 3962 | topt->flags = flags; |
3819 | topt->opt = opt; | 3963 | topt->opt = opt; |
3820 | 3964 | ||
3821 | entry = debugfs_create_file(opt->name, 0644, t_options, topt, | 3965 | topt->entry = trace_create_file(opt->name, 0644, t_options, topt, |
3822 | &trace_options_fops); | 3966 | &trace_options_fops); |
3823 | 3967 | ||
3824 | topt->entry = entry; | ||
3825 | |||
3826 | } | 3968 | } |
3827 | 3969 | ||
3828 | static struct trace_option_dentry * | 3970 | static struct trace_option_dentry * |
@@ -3877,123 +4019,84 @@ static struct dentry * | |||
3877 | create_trace_option_core_file(const char *option, long index) | 4019 | create_trace_option_core_file(const char *option, long index) |
3878 | { | 4020 | { |
3879 | struct dentry *t_options; | 4021 | struct dentry *t_options; |
3880 | struct dentry *entry; | ||
3881 | 4022 | ||
3882 | t_options = trace_options_init_dentry(); | 4023 | t_options = trace_options_init_dentry(); |
3883 | if (!t_options) | 4024 | if (!t_options) |
3884 | return NULL; | 4025 | return NULL; |
3885 | 4026 | ||
3886 | entry = debugfs_create_file(option, 0644, t_options, (void *)index, | 4027 | return trace_create_file(option, 0644, t_options, (void *)index, |
3887 | &trace_options_core_fops); | 4028 | &trace_options_core_fops); |
3888 | |||
3889 | return entry; | ||
3890 | } | 4029 | } |
3891 | 4030 | ||
3892 | static __init void create_trace_options_dir(void) | 4031 | static __init void create_trace_options_dir(void) |
3893 | { | 4032 | { |
3894 | struct dentry *t_options; | 4033 | struct dentry *t_options; |
3895 | struct dentry *entry; | ||
3896 | int i; | 4034 | int i; |
3897 | 4035 | ||
3898 | t_options = trace_options_init_dentry(); | 4036 | t_options = trace_options_init_dentry(); |
3899 | if (!t_options) | 4037 | if (!t_options) |
3900 | return; | 4038 | return; |
3901 | 4039 | ||
3902 | for (i = 0; trace_options[i]; i++) { | 4040 | for (i = 0; trace_options[i]; i++) |
3903 | entry = create_trace_option_core_file(trace_options[i], i); | 4041 | create_trace_option_core_file(trace_options[i], i); |
3904 | if (!entry) | ||
3905 | pr_warning("Could not create debugfs %s entry\n", | ||
3906 | trace_options[i]); | ||
3907 | } | ||
3908 | } | 4042 | } |
3909 | 4043 | ||
3910 | static __init int tracer_init_debugfs(void) | 4044 | static __init int tracer_init_debugfs(void) |
3911 | { | 4045 | { |
3912 | struct dentry *d_tracer; | 4046 | struct dentry *d_tracer; |
3913 | struct dentry *entry; | ||
3914 | int cpu; | 4047 | int cpu; |
3915 | 4048 | ||
3916 | d_tracer = tracing_init_dentry(); | 4049 | d_tracer = tracing_init_dentry(); |
3917 | 4050 | ||
3918 | entry = debugfs_create_file("tracing_enabled", 0644, d_tracer, | 4051 | trace_create_file("tracing_enabled", 0644, d_tracer, |
3919 | &global_trace, &tracing_ctrl_fops); | 4052 | &global_trace, &tracing_ctrl_fops); |
3920 | if (!entry) | ||
3921 | pr_warning("Could not create debugfs 'tracing_enabled' entry\n"); | ||
3922 | 4053 | ||
3923 | entry = debugfs_create_file("trace_options", 0644, d_tracer, | 4054 | trace_create_file("trace_options", 0644, d_tracer, |
3924 | NULL, &tracing_iter_fops); | 4055 | NULL, &tracing_iter_fops); |
3925 | if (!entry) | ||
3926 | pr_warning("Could not create debugfs 'trace_options' entry\n"); | ||
3927 | 4056 | ||
3928 | create_trace_options_dir(); | 4057 | trace_create_file("tracing_cpumask", 0644, d_tracer, |
4058 | NULL, &tracing_cpumask_fops); | ||
4059 | |||
4060 | trace_create_file("trace", 0644, d_tracer, | ||
4061 | (void *) TRACE_PIPE_ALL_CPU, &tracing_fops); | ||
4062 | |||
4063 | trace_create_file("available_tracers", 0444, d_tracer, | ||
4064 | &global_trace, &show_traces_fops); | ||
4065 | |||
4066 | trace_create_file("current_tracer", 0644, d_tracer, | ||
4067 | &global_trace, &set_tracer_fops); | ||
3929 | 4068 | ||
3930 | entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer, | 4069 | trace_create_file("tracing_max_latency", 0644, d_tracer, |
3931 | NULL, &tracing_cpumask_fops); | 4070 | &tracing_max_latency, &tracing_max_lat_fops); |
3932 | if (!entry) | 4071 | |
3933 | pr_warning("Could not create debugfs 'tracing_cpumask' entry\n"); | 4072 | trace_create_file("tracing_thresh", 0644, d_tracer, |
3934 | 4073 | &tracing_thresh, &tracing_max_lat_fops); | |
3935 | entry = debugfs_create_file("trace", 0644, d_tracer, | 4074 | |
3936 | (void *) TRACE_PIPE_ALL_CPU, &tracing_fops); | 4075 | trace_create_file("README", 0444, d_tracer, |
3937 | if (!entry) | 4076 | NULL, &tracing_readme_fops); |
3938 | pr_warning("Could not create debugfs 'trace' entry\n"); | 4077 | |
3939 | 4078 | trace_create_file("trace_pipe", 0444, d_tracer, | |
3940 | entry = debugfs_create_file("available_tracers", 0444, d_tracer, | ||
3941 | &global_trace, &show_traces_fops); | ||
3942 | if (!entry) | ||
3943 | pr_warning("Could not create debugfs 'available_tracers' entry\n"); | ||
3944 | |||
3945 | entry = debugfs_create_file("current_tracer", 0444, d_tracer, | ||
3946 | &global_trace, &set_tracer_fops); | ||
3947 | if (!entry) | ||
3948 | pr_warning("Could not create debugfs 'current_tracer' entry\n"); | ||
3949 | |||
3950 | entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer, | ||
3951 | &tracing_max_latency, | ||
3952 | &tracing_max_lat_fops); | ||
3953 | if (!entry) | ||
3954 | pr_warning("Could not create debugfs " | ||
3955 | "'tracing_max_latency' entry\n"); | ||
3956 | |||
3957 | entry = debugfs_create_file("tracing_thresh", 0644, d_tracer, | ||
3958 | &tracing_thresh, &tracing_max_lat_fops); | ||
3959 | if (!entry) | ||
3960 | pr_warning("Could not create debugfs " | ||
3961 | "'tracing_thresh' entry\n"); | ||
3962 | entry = debugfs_create_file("README", 0644, d_tracer, | ||
3963 | NULL, &tracing_readme_fops); | ||
3964 | if (!entry) | ||
3965 | pr_warning("Could not create debugfs 'README' entry\n"); | ||
3966 | |||
3967 | entry = debugfs_create_file("trace_pipe", 0444, d_tracer, | ||
3968 | (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops); | 4079 | (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops); |
3969 | if (!entry) | 4080 | |
3970 | pr_warning("Could not create debugfs " | 4081 | trace_create_file("buffer_size_kb", 0644, d_tracer, |
3971 | "'trace_pipe' entry\n"); | 4082 | &global_trace, &tracing_entries_fops); |
3972 | 4083 | ||
3973 | entry = debugfs_create_file("buffer_size_kb", 0644, d_tracer, | 4084 | trace_create_file("trace_marker", 0220, d_tracer, |
3974 | &global_trace, &tracing_entries_fops); | 4085 | NULL, &tracing_mark_fops); |
3975 | if (!entry) | 4086 | |
3976 | pr_warning("Could not create debugfs " | 4087 | trace_create_file("saved_cmdlines", 0444, d_tracer, |
3977 | "'buffer_size_kb' entry\n"); | 4088 | NULL, &tracing_saved_cmdlines_fops); |
3978 | |||
3979 | entry = debugfs_create_file("trace_marker", 0220, d_tracer, | ||
3980 | NULL, &tracing_mark_fops); | ||
3981 | if (!entry) | ||
3982 | pr_warning("Could not create debugfs " | ||
3983 | "'trace_marker' entry\n"); | ||
3984 | 4089 | ||
3985 | #ifdef CONFIG_DYNAMIC_FTRACE | 4090 | #ifdef CONFIG_DYNAMIC_FTRACE |
3986 | entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, | 4091 | trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, |
3987 | &ftrace_update_tot_cnt, | 4092 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); |
3988 | &tracing_dyn_info_fops); | ||
3989 | if (!entry) | ||
3990 | pr_warning("Could not create debugfs " | ||
3991 | "'dyn_ftrace_total_info' entry\n"); | ||
3992 | #endif | 4093 | #endif |
3993 | #ifdef CONFIG_SYSPROF_TRACER | 4094 | #ifdef CONFIG_SYSPROF_TRACER |
3994 | init_tracer_sysprof_debugfs(d_tracer); | 4095 | init_tracer_sysprof_debugfs(d_tracer); |
3995 | #endif | 4096 | #endif |
3996 | 4097 | ||
4098 | create_trace_options_dir(); | ||
4099 | |||
3997 | for_each_tracing_cpu(cpu) | 4100 | for_each_tracing_cpu(cpu) |
3998 | tracing_init_debugfs_percpu(cpu); | 4101 | tracing_init_debugfs_percpu(cpu); |
3999 | 4102 | ||
@@ -4064,7 +4167,8 @@ trace_printk_seq(struct trace_seq *s) | |||
4064 | 4167 | ||
4065 | static void __ftrace_dump(bool disable_tracing) | 4168 | static void __ftrace_dump(bool disable_tracing) |
4066 | { | 4169 | { |
4067 | static DEFINE_SPINLOCK(ftrace_dump_lock); | 4170 | static raw_spinlock_t ftrace_dump_lock = |
4171 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | ||
4068 | /* use static because iter can be a bit big for the stack */ | 4172 | /* use static because iter can be a bit big for the stack */ |
4069 | static struct trace_iterator iter; | 4173 | static struct trace_iterator iter; |
4070 | unsigned int old_userobj; | 4174 | unsigned int old_userobj; |
@@ -4073,7 +4177,8 @@ static void __ftrace_dump(bool disable_tracing) | |||
4073 | int cnt = 0, cpu; | 4177 | int cnt = 0, cpu; |
4074 | 4178 | ||
4075 | /* only one dump */ | 4179 | /* only one dump */ |
4076 | spin_lock_irqsave(&ftrace_dump_lock, flags); | 4180 | local_irq_save(flags); |
4181 | __raw_spin_lock(&ftrace_dump_lock); | ||
4077 | if (dump_ran) | 4182 | if (dump_ran) |
4078 | goto out; | 4183 | goto out; |
4079 | 4184 | ||
@@ -4122,8 +4227,11 @@ static void __ftrace_dump(bool disable_tracing) | |||
4122 | iter.pos = -1; | 4227 | iter.pos = -1; |
4123 | 4228 | ||
4124 | if (find_next_entry_inc(&iter) != NULL) { | 4229 | if (find_next_entry_inc(&iter) != NULL) { |
4125 | print_trace_line(&iter); | 4230 | int ret; |
4126 | trace_consume(&iter); | 4231 | |
4232 | ret = print_trace_line(&iter); | ||
4233 | if (ret != TRACE_TYPE_NO_CONSUME) | ||
4234 | trace_consume(&iter); | ||
4127 | } | 4235 | } |
4128 | 4236 | ||
4129 | trace_printk_seq(&iter.seq); | 4237 | trace_printk_seq(&iter.seq); |
@@ -4145,7 +4253,8 @@ static void __ftrace_dump(bool disable_tracing) | |||
4145 | } | 4253 | } |
4146 | 4254 | ||
4147 | out: | 4255 | out: |
4148 | spin_unlock_irqrestore(&ftrace_dump_lock, flags); | 4256 | __raw_spin_unlock(&ftrace_dump_lock); |
4257 | local_irq_restore(flags); | ||
4149 | } | 4258 | } |
4150 | 4259 | ||
4151 | /* By default: disable tracing after the dump */ | 4260 | /* By default: disable tracing after the dump */ |