diff options
Diffstat (limited to 'kernel/trace/trace.c')
| -rw-r--r-- | kernel/trace/trace.c | 412 |
1 files changed, 267 insertions, 145 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index cda81ec58d9f..076fa6f0ee48 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -171,6 +171,13 @@ static struct trace_array global_trace; | |||
| 171 | 171 | ||
| 172 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); | 172 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); |
| 173 | 173 | ||
| 174 | int filter_current_check_discard(struct ftrace_event_call *call, void *rec, | ||
| 175 | struct ring_buffer_event *event) | ||
| 176 | { | ||
| 177 | return filter_check_discard(call, rec, global_trace.buffer, event); | ||
| 178 | } | ||
| 179 | EXPORT_SYMBOL_GPL(filter_current_check_discard); | ||
| 180 | |||
| 174 | cycle_t ftrace_now(int cpu) | 181 | cycle_t ftrace_now(int cpu) |
| 175 | { | 182 | { |
| 176 | u64 ts; | 183 | u64 ts; |
| @@ -255,7 +262,8 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait); | |||
| 255 | 262 | ||
| 256 | /* trace_flags holds trace_options default values */ | 263 | /* trace_flags holds trace_options default values */ |
| 257 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | 264 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | |
| 258 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME; | 265 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | |
| 266 | TRACE_ITER_GRAPH_TIME; | ||
| 259 | 267 | ||
| 260 | /** | 268 | /** |
| 261 | * trace_wake_up - wake up tasks waiting for trace input | 269 | * trace_wake_up - wake up tasks waiting for trace input |
| @@ -317,6 +325,7 @@ static const char *trace_options[] = { | |||
| 317 | "latency-format", | 325 | "latency-format", |
| 318 | "global-clock", | 326 | "global-clock", |
| 319 | "sleep-time", | 327 | "sleep-time", |
| 328 | "graph-time", | ||
| 320 | NULL | 329 | NULL |
| 321 | }; | 330 | }; |
| 322 | 331 | ||
| @@ -335,7 +344,7 @@ static raw_spinlock_t ftrace_max_lock = | |||
| 335 | /* | 344 | /* |
| 336 | * Copy the new maximum trace into the separate maximum-trace | 345 | * Copy the new maximum trace into the separate maximum-trace |
| 337 | * structure. (this way the maximum trace is permanently saved, | 346 | * structure. (this way the maximum trace is permanently saved, |
| 338 | * for later retrieval via /debugfs/tracing/latency_trace) | 347 | * for later retrieval via /sys/kernel/debug/tracing/latency_trace) |
| 339 | */ | 348 | */ |
| 340 | static void | 349 | static void |
| 341 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | 350 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) |
| @@ -402,17 +411,6 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | |||
| 402 | return cnt; | 411 | return cnt; |
| 403 | } | 412 | } |
| 404 | 413 | ||
| 405 | static void | ||
| 406 | trace_print_seq(struct seq_file *m, struct trace_seq *s) | ||
| 407 | { | ||
| 408 | int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len; | ||
| 409 | |||
| 410 | s->buffer[len] = 0; | ||
| 411 | seq_puts(m, s->buffer); | ||
| 412 | |||
| 413 | trace_seq_init(s); | ||
| 414 | } | ||
| 415 | |||
| 416 | /** | 414 | /** |
| 417 | * update_max_tr - snapshot all trace buffers from global_trace to max_tr | 415 | * update_max_tr - snapshot all trace buffers from global_trace to max_tr |
| 418 | * @tr: tracer | 416 | * @tr: tracer |
| @@ -641,6 +639,16 @@ void tracing_reset_online_cpus(struct trace_array *tr) | |||
| 641 | tracing_reset(tr, cpu); | 639 | tracing_reset(tr, cpu); |
| 642 | } | 640 | } |
| 643 | 641 | ||
| 642 | void tracing_reset_current(int cpu) | ||
| 643 | { | ||
| 644 | tracing_reset(&global_trace, cpu); | ||
| 645 | } | ||
| 646 | |||
| 647 | void tracing_reset_current_online_cpus(void) | ||
| 648 | { | ||
| 649 | tracing_reset_online_cpus(&global_trace); | ||
| 650 | } | ||
| 651 | |||
| 644 | #define SAVED_CMDLINES 128 | 652 | #define SAVED_CMDLINES 128 |
| 645 | #define NO_CMDLINE_MAP UINT_MAX | 653 | #define NO_CMDLINE_MAP UINT_MAX |
| 646 | static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; | 654 | static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; |
| @@ -800,6 +808,7 @@ void trace_find_cmdline(int pid, char comm[]) | |||
| 800 | return; | 808 | return; |
| 801 | } | 809 | } |
| 802 | 810 | ||
| 811 | preempt_disable(); | ||
| 803 | __raw_spin_lock(&trace_cmdline_lock); | 812 | __raw_spin_lock(&trace_cmdline_lock); |
| 804 | map = map_pid_to_cmdline[pid]; | 813 | map = map_pid_to_cmdline[pid]; |
| 805 | if (map != NO_CMDLINE_MAP) | 814 | if (map != NO_CMDLINE_MAP) |
| @@ -808,6 +817,7 @@ void trace_find_cmdline(int pid, char comm[]) | |||
| 808 | strcpy(comm, "<...>"); | 817 | strcpy(comm, "<...>"); |
| 809 | 818 | ||
| 810 | __raw_spin_unlock(&trace_cmdline_lock); | 819 | __raw_spin_unlock(&trace_cmdline_lock); |
| 820 | preempt_enable(); | ||
| 811 | } | 821 | } |
| 812 | 822 | ||
| 813 | void tracing_record_cmdline(struct task_struct *tsk) | 823 | void tracing_record_cmdline(struct task_struct *tsk) |
| @@ -840,7 +850,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | |||
| 840 | } | 850 | } |
| 841 | 851 | ||
| 842 | struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, | 852 | struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, |
| 843 | unsigned char type, | 853 | int type, |
| 844 | unsigned long len, | 854 | unsigned long len, |
| 845 | unsigned long flags, int pc) | 855 | unsigned long flags, int pc) |
| 846 | { | 856 | { |
| @@ -883,30 +893,40 @@ void trace_buffer_unlock_commit(struct trace_array *tr, | |||
| 883 | } | 893 | } |
| 884 | 894 | ||
| 885 | struct ring_buffer_event * | 895 | struct ring_buffer_event * |
| 886 | trace_current_buffer_lock_reserve(unsigned char type, unsigned long len, | 896 | trace_current_buffer_lock_reserve(int type, unsigned long len, |
| 887 | unsigned long flags, int pc) | 897 | unsigned long flags, int pc) |
| 888 | { | 898 | { |
| 889 | return trace_buffer_lock_reserve(&global_trace, | 899 | return trace_buffer_lock_reserve(&global_trace, |
| 890 | type, len, flags, pc); | 900 | type, len, flags, pc); |
| 891 | } | 901 | } |
| 902 | EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve); | ||
| 892 | 903 | ||
| 893 | void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, | 904 | void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, |
| 894 | unsigned long flags, int pc) | 905 | unsigned long flags, int pc) |
| 895 | { | 906 | { |
| 896 | return __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 1); | 907 | __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 1); |
| 897 | } | 908 | } |
| 909 | EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); | ||
| 898 | 910 | ||
| 899 | void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event, | 911 | void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event, |
| 900 | unsigned long flags, int pc) | 912 | unsigned long flags, int pc) |
| 901 | { | 913 | { |
| 902 | return __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 0); | 914 | __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 0); |
| 915 | } | ||
| 916 | EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit); | ||
| 917 | |||
| 918 | void trace_current_buffer_discard_commit(struct ring_buffer_event *event) | ||
| 919 | { | ||
| 920 | ring_buffer_discard_commit(global_trace.buffer, event); | ||
| 903 | } | 921 | } |
| 922 | EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit); | ||
| 904 | 923 | ||
| 905 | void | 924 | void |
| 906 | trace_function(struct trace_array *tr, | 925 | trace_function(struct trace_array *tr, |
| 907 | unsigned long ip, unsigned long parent_ip, unsigned long flags, | 926 | unsigned long ip, unsigned long parent_ip, unsigned long flags, |
| 908 | int pc) | 927 | int pc) |
| 909 | { | 928 | { |
| 929 | struct ftrace_event_call *call = &event_function; | ||
| 910 | struct ring_buffer_event *event; | 930 | struct ring_buffer_event *event; |
| 911 | struct ftrace_entry *entry; | 931 | struct ftrace_entry *entry; |
| 912 | 932 | ||
| @@ -921,7 +941,9 @@ trace_function(struct trace_array *tr, | |||
| 921 | entry = ring_buffer_event_data(event); | 941 | entry = ring_buffer_event_data(event); |
| 922 | entry->ip = ip; | 942 | entry->ip = ip; |
| 923 | entry->parent_ip = parent_ip; | 943 | entry->parent_ip = parent_ip; |
| 924 | ring_buffer_unlock_commit(tr->buffer, event); | 944 | |
| 945 | if (!filter_check_discard(call, entry, tr->buffer, event)) | ||
| 946 | ring_buffer_unlock_commit(tr->buffer, event); | ||
| 925 | } | 947 | } |
| 926 | 948 | ||
| 927 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 949 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| @@ -930,6 +952,7 @@ static int __trace_graph_entry(struct trace_array *tr, | |||
| 930 | unsigned long flags, | 952 | unsigned long flags, |
| 931 | int pc) | 953 | int pc) |
| 932 | { | 954 | { |
| 955 | struct ftrace_event_call *call = &event_funcgraph_entry; | ||
| 933 | struct ring_buffer_event *event; | 956 | struct ring_buffer_event *event; |
| 934 | struct ftrace_graph_ent_entry *entry; | 957 | struct ftrace_graph_ent_entry *entry; |
| 935 | 958 | ||
| @@ -942,7 +965,8 @@ static int __trace_graph_entry(struct trace_array *tr, | |||
| 942 | return 0; | 965 | return 0; |
| 943 | entry = ring_buffer_event_data(event); | 966 | entry = ring_buffer_event_data(event); |
| 944 | entry->graph_ent = *trace; | 967 | entry->graph_ent = *trace; |
| 945 | ring_buffer_unlock_commit(global_trace.buffer, event); | 968 | if (!filter_current_check_discard(call, entry, event)) |
| 969 | ring_buffer_unlock_commit(global_trace.buffer, event); | ||
| 946 | 970 | ||
| 947 | return 1; | 971 | return 1; |
| 948 | } | 972 | } |
| @@ -952,6 +976,7 @@ static void __trace_graph_return(struct trace_array *tr, | |||
| 952 | unsigned long flags, | 976 | unsigned long flags, |
| 953 | int pc) | 977 | int pc) |
| 954 | { | 978 | { |
| 979 | struct ftrace_event_call *call = &event_funcgraph_exit; | ||
| 955 | struct ring_buffer_event *event; | 980 | struct ring_buffer_event *event; |
| 956 | struct ftrace_graph_ret_entry *entry; | 981 | struct ftrace_graph_ret_entry *entry; |
| 957 | 982 | ||
| @@ -964,7 +989,8 @@ static void __trace_graph_return(struct trace_array *tr, | |||
| 964 | return; | 989 | return; |
| 965 | entry = ring_buffer_event_data(event); | 990 | entry = ring_buffer_event_data(event); |
| 966 | entry->ret = *trace; | 991 | entry->ret = *trace; |
| 967 | ring_buffer_unlock_commit(global_trace.buffer, event); | 992 | if (!filter_current_check_discard(call, entry, event)) |
| 993 | ring_buffer_unlock_commit(global_trace.buffer, event); | ||
| 968 | } | 994 | } |
| 969 | #endif | 995 | #endif |
| 970 | 996 | ||
| @@ -982,6 +1008,7 @@ static void __ftrace_trace_stack(struct trace_array *tr, | |||
| 982 | int skip, int pc) | 1008 | int skip, int pc) |
| 983 | { | 1009 | { |
| 984 | #ifdef CONFIG_STACKTRACE | 1010 | #ifdef CONFIG_STACKTRACE |
| 1011 | struct ftrace_event_call *call = &event_kernel_stack; | ||
| 985 | struct ring_buffer_event *event; | 1012 | struct ring_buffer_event *event; |
| 986 | struct stack_entry *entry; | 1013 | struct stack_entry *entry; |
| 987 | struct stack_trace trace; | 1014 | struct stack_trace trace; |
| @@ -999,7 +1026,8 @@ static void __ftrace_trace_stack(struct trace_array *tr, | |||
| 999 | trace.entries = entry->caller; | 1026 | trace.entries = entry->caller; |
| 1000 | 1027 | ||
| 1001 | save_stack_trace(&trace); | 1028 | save_stack_trace(&trace); |
| 1002 | ring_buffer_unlock_commit(tr->buffer, event); | 1029 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
| 1030 | ring_buffer_unlock_commit(tr->buffer, event); | ||
| 1003 | #endif | 1031 | #endif |
| 1004 | } | 1032 | } |
| 1005 | 1033 | ||
| @@ -1024,6 +1052,7 @@ static void ftrace_trace_userstack(struct trace_array *tr, | |||
| 1024 | unsigned long flags, int pc) | 1052 | unsigned long flags, int pc) |
| 1025 | { | 1053 | { |
| 1026 | #ifdef CONFIG_STACKTRACE | 1054 | #ifdef CONFIG_STACKTRACE |
| 1055 | struct ftrace_event_call *call = &event_user_stack; | ||
| 1027 | struct ring_buffer_event *event; | 1056 | struct ring_buffer_event *event; |
| 1028 | struct userstack_entry *entry; | 1057 | struct userstack_entry *entry; |
| 1029 | struct stack_trace trace; | 1058 | struct stack_trace trace; |
| @@ -1045,7 +1074,8 @@ static void ftrace_trace_userstack(struct trace_array *tr, | |||
| 1045 | trace.entries = entry->caller; | 1074 | trace.entries = entry->caller; |
| 1046 | 1075 | ||
| 1047 | save_stack_trace_user(&trace); | 1076 | save_stack_trace_user(&trace); |
| 1048 | ring_buffer_unlock_commit(tr->buffer, event); | 1077 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
| 1078 | ring_buffer_unlock_commit(tr->buffer, event); | ||
| 1049 | #endif | 1079 | #endif |
| 1050 | } | 1080 | } |
| 1051 | 1081 | ||
| @@ -1089,6 +1119,7 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
| 1089 | struct task_struct *next, | 1119 | struct task_struct *next, |
| 1090 | unsigned long flags, int pc) | 1120 | unsigned long flags, int pc) |
| 1091 | { | 1121 | { |
| 1122 | struct ftrace_event_call *call = &event_context_switch; | ||
| 1092 | struct ring_buffer_event *event; | 1123 | struct ring_buffer_event *event; |
| 1093 | struct ctx_switch_entry *entry; | 1124 | struct ctx_switch_entry *entry; |
| 1094 | 1125 | ||
| @@ -1104,7 +1135,9 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
| 1104 | entry->next_prio = next->prio; | 1135 | entry->next_prio = next->prio; |
| 1105 | entry->next_state = next->state; | 1136 | entry->next_state = next->state; |
| 1106 | entry->next_cpu = task_cpu(next); | 1137 | entry->next_cpu = task_cpu(next); |
| 1107 | trace_buffer_unlock_commit(tr, event, flags, pc); | 1138 | |
| 1139 | if (!filter_check_discard(call, entry, tr->buffer, event)) | ||
| 1140 | trace_buffer_unlock_commit(tr, event, flags, pc); | ||
| 1108 | } | 1141 | } |
| 1109 | 1142 | ||
| 1110 | void | 1143 | void |
| @@ -1113,6 +1146,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
| 1113 | struct task_struct *curr, | 1146 | struct task_struct *curr, |
| 1114 | unsigned long flags, int pc) | 1147 | unsigned long flags, int pc) |
| 1115 | { | 1148 | { |
| 1149 | struct ftrace_event_call *call = &event_wakeup; | ||
| 1116 | struct ring_buffer_event *event; | 1150 | struct ring_buffer_event *event; |
| 1117 | struct ctx_switch_entry *entry; | 1151 | struct ctx_switch_entry *entry; |
| 1118 | 1152 | ||
| @@ -1129,7 +1163,8 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
| 1129 | entry->next_state = wakee->state; | 1163 | entry->next_state = wakee->state; |
| 1130 | entry->next_cpu = task_cpu(wakee); | 1164 | entry->next_cpu = task_cpu(wakee); |
| 1131 | 1165 | ||
| 1132 | ring_buffer_unlock_commit(tr->buffer, event); | 1166 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
| 1167 | ring_buffer_unlock_commit(tr->buffer, event); | ||
| 1133 | ftrace_trace_stack(tr, flags, 6, pc); | 1168 | ftrace_trace_stack(tr, flags, 6, pc); |
| 1134 | ftrace_trace_userstack(tr, flags, pc); | 1169 | ftrace_trace_userstack(tr, flags, pc); |
| 1135 | } | 1170 | } |
| @@ -1230,11 +1265,13 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 1230 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 1265 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; |
| 1231 | static u32 trace_buf[TRACE_BUF_SIZE]; | 1266 | static u32 trace_buf[TRACE_BUF_SIZE]; |
| 1232 | 1267 | ||
| 1268 | struct ftrace_event_call *call = &event_bprint; | ||
| 1233 | struct ring_buffer_event *event; | 1269 | struct ring_buffer_event *event; |
| 1234 | struct trace_array *tr = &global_trace; | 1270 | struct trace_array *tr = &global_trace; |
| 1235 | struct trace_array_cpu *data; | 1271 | struct trace_array_cpu *data; |
| 1236 | struct bprint_entry *entry; | 1272 | struct bprint_entry *entry; |
| 1237 | unsigned long flags; | 1273 | unsigned long flags; |
| 1274 | int disable; | ||
| 1238 | int resched; | 1275 | int resched; |
| 1239 | int cpu, len = 0, size, pc; | 1276 | int cpu, len = 0, size, pc; |
| 1240 | 1277 | ||
| @@ -1249,7 +1286,8 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 1249 | cpu = raw_smp_processor_id(); | 1286 | cpu = raw_smp_processor_id(); |
| 1250 | data = tr->data[cpu]; | 1287 | data = tr->data[cpu]; |
| 1251 | 1288 | ||
| 1252 | if (unlikely(atomic_read(&data->disabled))) | 1289 | disable = atomic_inc_return(&data->disabled); |
| 1290 | if (unlikely(disable != 1)) | ||
| 1253 | goto out; | 1291 | goto out; |
| 1254 | 1292 | ||
| 1255 | /* Lockdep uses trace_printk for lock tracing */ | 1293 | /* Lockdep uses trace_printk for lock tracing */ |
| @@ -1269,13 +1307,15 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 1269 | entry->fmt = fmt; | 1307 | entry->fmt = fmt; |
| 1270 | 1308 | ||
| 1271 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); | 1309 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); |
| 1272 | ring_buffer_unlock_commit(tr->buffer, event); | 1310 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
| 1311 | ring_buffer_unlock_commit(tr->buffer, event); | ||
| 1273 | 1312 | ||
| 1274 | out_unlock: | 1313 | out_unlock: |
| 1275 | __raw_spin_unlock(&trace_buf_lock); | 1314 | __raw_spin_unlock(&trace_buf_lock); |
| 1276 | local_irq_restore(flags); | 1315 | local_irq_restore(flags); |
| 1277 | 1316 | ||
| 1278 | out: | 1317 | out: |
| 1318 | atomic_dec_return(&data->disabled); | ||
| 1279 | ftrace_preempt_enable(resched); | 1319 | ftrace_preempt_enable(resched); |
| 1280 | unpause_graph_tracing(); | 1320 | unpause_graph_tracing(); |
| 1281 | 1321 | ||
| @@ -1288,12 +1328,14 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 1288 | static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; | 1328 | static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; |
| 1289 | static char trace_buf[TRACE_BUF_SIZE]; | 1329 | static char trace_buf[TRACE_BUF_SIZE]; |
| 1290 | 1330 | ||
| 1331 | struct ftrace_event_call *call = &event_print; | ||
| 1291 | struct ring_buffer_event *event; | 1332 | struct ring_buffer_event *event; |
| 1292 | struct trace_array *tr = &global_trace; | 1333 | struct trace_array *tr = &global_trace; |
| 1293 | struct trace_array_cpu *data; | 1334 | struct trace_array_cpu *data; |
| 1294 | int cpu, len = 0, size, pc; | 1335 | int cpu, len = 0, size, pc; |
| 1295 | struct print_entry *entry; | 1336 | struct print_entry *entry; |
| 1296 | unsigned long irq_flags; | 1337 | unsigned long irq_flags; |
| 1338 | int disable; | ||
| 1297 | 1339 | ||
| 1298 | if (tracing_disabled || tracing_selftest_running) | 1340 | if (tracing_disabled || tracing_selftest_running) |
| 1299 | return 0; | 1341 | return 0; |
| @@ -1303,7 +1345,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 1303 | cpu = raw_smp_processor_id(); | 1345 | cpu = raw_smp_processor_id(); |
| 1304 | data = tr->data[cpu]; | 1346 | data = tr->data[cpu]; |
| 1305 | 1347 | ||
| 1306 | if (unlikely(atomic_read(&data->disabled))) | 1348 | disable = atomic_inc_return(&data->disabled); |
| 1349 | if (unlikely(disable != 1)) | ||
| 1307 | goto out; | 1350 | goto out; |
| 1308 | 1351 | ||
| 1309 | pause_graph_tracing(); | 1352 | pause_graph_tracing(); |
| @@ -1323,13 +1366,15 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 1323 | 1366 | ||
| 1324 | memcpy(&entry->buf, trace_buf, len); | 1367 | memcpy(&entry->buf, trace_buf, len); |
| 1325 | entry->buf[len] = 0; | 1368 | entry->buf[len] = 0; |
| 1326 | ring_buffer_unlock_commit(tr->buffer, event); | 1369 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
| 1370 | ring_buffer_unlock_commit(tr->buffer, event); | ||
| 1327 | 1371 | ||
| 1328 | out_unlock: | 1372 | out_unlock: |
| 1329 | __raw_spin_unlock(&trace_buf_lock); | 1373 | __raw_spin_unlock(&trace_buf_lock); |
| 1330 | raw_local_irq_restore(irq_flags); | 1374 | raw_local_irq_restore(irq_flags); |
| 1331 | unpause_graph_tracing(); | 1375 | unpause_graph_tracing(); |
| 1332 | out: | 1376 | out: |
| 1377 | atomic_dec_return(&data->disabled); | ||
| 1333 | preempt_enable_notrace(); | 1378 | preempt_enable_notrace(); |
| 1334 | 1379 | ||
| 1335 | return len; | 1380 | return len; |
| @@ -1526,12 +1571,14 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
| 1526 | p = s_next(m, p, &l); | 1571 | p = s_next(m, p, &l); |
| 1527 | } | 1572 | } |
| 1528 | 1573 | ||
| 1574 | trace_event_read_lock(); | ||
| 1529 | return p; | 1575 | return p; |
| 1530 | } | 1576 | } |
| 1531 | 1577 | ||
| 1532 | static void s_stop(struct seq_file *m, void *p) | 1578 | static void s_stop(struct seq_file *m, void *p) |
| 1533 | { | 1579 | { |
| 1534 | atomic_dec(&trace_record_cmdline_disabled); | 1580 | atomic_dec(&trace_record_cmdline_disabled); |
| 1581 | trace_event_read_unlock(); | ||
| 1535 | } | 1582 | } |
| 1536 | 1583 | ||
| 1537 | static void print_lat_help_header(struct seq_file *m) | 1584 | static void print_lat_help_header(struct seq_file *m) |
| @@ -1774,6 +1821,7 @@ static int trace_empty(struct trace_iterator *iter) | |||
| 1774 | return 1; | 1821 | return 1; |
| 1775 | } | 1822 | } |
| 1776 | 1823 | ||
| 1824 | /* Called with trace_event_read_lock() held. */ | ||
| 1777 | static enum print_line_t print_trace_line(struct trace_iterator *iter) | 1825 | static enum print_line_t print_trace_line(struct trace_iterator *iter) |
| 1778 | { | 1826 | { |
| 1779 | enum print_line_t ret; | 1827 | enum print_line_t ret; |
| @@ -2143,11 +2191,12 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
| 2143 | if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) | 2191 | if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) |
| 2144 | return -ENOMEM; | 2192 | return -ENOMEM; |
| 2145 | 2193 | ||
| 2146 | mutex_lock(&tracing_cpumask_update_lock); | ||
| 2147 | err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); | 2194 | err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); |
| 2148 | if (err) | 2195 | if (err) |
| 2149 | goto err_unlock; | 2196 | goto err_unlock; |
| 2150 | 2197 | ||
| 2198 | mutex_lock(&tracing_cpumask_update_lock); | ||
| 2199 | |||
| 2151 | local_irq_disable(); | 2200 | local_irq_disable(); |
| 2152 | __raw_spin_lock(&ftrace_max_lock); | 2201 | __raw_spin_lock(&ftrace_max_lock); |
| 2153 | for_each_tracing_cpu(cpu) { | 2202 | for_each_tracing_cpu(cpu) { |
| @@ -2175,8 +2224,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
| 2175 | return count; | 2224 | return count; |
| 2176 | 2225 | ||
| 2177 | err_unlock: | 2226 | err_unlock: |
| 2178 | mutex_unlock(&tracing_cpumask_update_lock); | 2227 | free_cpumask_var(tracing_cpumask_new); |
| 2179 | free_cpumask_var(tracing_cpumask); | ||
| 2180 | 2228 | ||
| 2181 | return err; | 2229 | return err; |
| 2182 | } | 2230 | } |
| @@ -2366,21 +2414,20 @@ static const struct file_operations tracing_iter_fops = { | |||
| 2366 | 2414 | ||
| 2367 | static const char readme_msg[] = | 2415 | static const char readme_msg[] = |
| 2368 | "tracing mini-HOWTO:\n\n" | 2416 | "tracing mini-HOWTO:\n\n" |
| 2369 | "# mkdir /debug\n" | 2417 | "# mount -t debugfs nodev /sys/kernel/debug\n\n" |
| 2370 | "# mount -t debugfs nodev /debug\n\n" | 2418 | "# cat /sys/kernel/debug/tracing/available_tracers\n" |
| 2371 | "# cat /debug/tracing/available_tracers\n" | ||
| 2372 | "wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n" | 2419 | "wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n" |
| 2373 | "# cat /debug/tracing/current_tracer\n" | 2420 | "# cat /sys/kernel/debug/tracing/current_tracer\n" |
| 2374 | "nop\n" | 2421 | "nop\n" |
| 2375 | "# echo sched_switch > /debug/tracing/current_tracer\n" | 2422 | "# echo sched_switch > /sys/kernel/debug/tracing/current_tracer\n" |
| 2376 | "# cat /debug/tracing/current_tracer\n" | 2423 | "# cat /sys/kernel/debug/tracing/current_tracer\n" |
| 2377 | "sched_switch\n" | 2424 | "sched_switch\n" |
| 2378 | "# cat /debug/tracing/trace_options\n" | 2425 | "# cat /sys/kernel/debug/tracing/trace_options\n" |
| 2379 | "noprint-parent nosym-offset nosym-addr noverbose\n" | 2426 | "noprint-parent nosym-offset nosym-addr noverbose\n" |
| 2380 | "# echo print-parent > /debug/tracing/trace_options\n" | 2427 | "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n" |
| 2381 | "# echo 1 > /debug/tracing/tracing_enabled\n" | 2428 | "# echo 1 > /sys/kernel/debug/tracing/tracing_enabled\n" |
| 2382 | "# cat /debug/tracing/trace > /tmp/trace.txt\n" | 2429 | "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n" |
| 2383 | "# echo 0 > /debug/tracing/tracing_enabled\n" | 2430 | "# echo 0 > /sys/kernel/debug/tracing/tracing_enabled\n" |
| 2384 | ; | 2431 | ; |
| 2385 | 2432 | ||
| 2386 | static ssize_t | 2433 | static ssize_t |
| @@ -2397,6 +2444,56 @@ static const struct file_operations tracing_readme_fops = { | |||
| 2397 | }; | 2444 | }; |
| 2398 | 2445 | ||
| 2399 | static ssize_t | 2446 | static ssize_t |
| 2447 | tracing_saved_cmdlines_read(struct file *file, char __user *ubuf, | ||
| 2448 | size_t cnt, loff_t *ppos) | ||
| 2449 | { | ||
| 2450 | char *buf_comm; | ||
| 2451 | char *file_buf; | ||
| 2452 | char *buf; | ||
| 2453 | int len = 0; | ||
| 2454 | int pid; | ||
| 2455 | int i; | ||
| 2456 | |||
| 2457 | file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL); | ||
| 2458 | if (!file_buf) | ||
| 2459 | return -ENOMEM; | ||
| 2460 | |||
| 2461 | buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL); | ||
| 2462 | if (!buf_comm) { | ||
| 2463 | kfree(file_buf); | ||
| 2464 | return -ENOMEM; | ||
| 2465 | } | ||
| 2466 | |||
| 2467 | buf = file_buf; | ||
| 2468 | |||
| 2469 | for (i = 0; i < SAVED_CMDLINES; i++) { | ||
| 2470 | int r; | ||
| 2471 | |||
| 2472 | pid = map_cmdline_to_pid[i]; | ||
| 2473 | if (pid == -1 || pid == NO_CMDLINE_MAP) | ||
| 2474 | continue; | ||
| 2475 | |||
| 2476 | trace_find_cmdline(pid, buf_comm); | ||
| 2477 | r = sprintf(buf, "%d %s\n", pid, buf_comm); | ||
| 2478 | buf += r; | ||
| 2479 | len += r; | ||
| 2480 | } | ||
| 2481 | |||
| 2482 | len = simple_read_from_buffer(ubuf, cnt, ppos, | ||
| 2483 | file_buf, len); | ||
| 2484 | |||
| 2485 | kfree(file_buf); | ||
| 2486 | kfree(buf_comm); | ||
| 2487 | |||
| 2488 | return len; | ||
| 2489 | } | ||
| 2490 | |||
| 2491 | static const struct file_operations tracing_saved_cmdlines_fops = { | ||
| 2492 | .open = tracing_open_generic, | ||
| 2493 | .read = tracing_saved_cmdlines_read, | ||
| 2494 | }; | ||
| 2495 | |||
| 2496 | static ssize_t | ||
| 2400 | tracing_ctrl_read(struct file *filp, char __user *ubuf, | 2497 | tracing_ctrl_read(struct file *filp, char __user *ubuf, |
| 2401 | size_t cnt, loff_t *ppos) | 2498 | size_t cnt, loff_t *ppos) |
| 2402 | { | 2499 | { |
| @@ -2728,6 +2825,9 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) | |||
| 2728 | /* trace pipe does not show start of buffer */ | 2825 | /* trace pipe does not show start of buffer */ |
| 2729 | cpumask_setall(iter->started); | 2826 | cpumask_setall(iter->started); |
| 2730 | 2827 | ||
| 2828 | if (trace_flags & TRACE_ITER_LATENCY_FMT) | ||
| 2829 | iter->iter_flags |= TRACE_FILE_LAT_FMT; | ||
| 2830 | |||
| 2731 | iter->cpu_file = cpu_file; | 2831 | iter->cpu_file = cpu_file; |
| 2732 | iter->tr = &global_trace; | 2832 | iter->tr = &global_trace; |
| 2733 | mutex_init(&iter->mutex); | 2833 | mutex_init(&iter->mutex); |
| @@ -2915,6 +3015,7 @@ waitagain: | |||
| 2915 | offsetof(struct trace_iterator, seq)); | 3015 | offsetof(struct trace_iterator, seq)); |
| 2916 | iter->pos = -1; | 3016 | iter->pos = -1; |
| 2917 | 3017 | ||
| 3018 | trace_event_read_lock(); | ||
| 2918 | while (find_next_entry_inc(iter) != NULL) { | 3019 | while (find_next_entry_inc(iter) != NULL) { |
| 2919 | enum print_line_t ret; | 3020 | enum print_line_t ret; |
| 2920 | int len = iter->seq.len; | 3021 | int len = iter->seq.len; |
| @@ -2931,6 +3032,7 @@ waitagain: | |||
| 2931 | if (iter->seq.len >= cnt) | 3032 | if (iter->seq.len >= cnt) |
| 2932 | break; | 3033 | break; |
| 2933 | } | 3034 | } |
| 3035 | trace_event_read_unlock(); | ||
| 2934 | 3036 | ||
| 2935 | /* Now copy what we have to the user */ | 3037 | /* Now copy what we have to the user */ |
| 2936 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); | 3038 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); |
| @@ -3053,6 +3155,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
| 3053 | goto out_err; | 3155 | goto out_err; |
| 3054 | } | 3156 | } |
| 3055 | 3157 | ||
| 3158 | trace_event_read_lock(); | ||
| 3159 | |||
| 3056 | /* Fill as many pages as possible. */ | 3160 | /* Fill as many pages as possible. */ |
| 3057 | for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) { | 3161 | for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) { |
| 3058 | pages[i] = alloc_page(GFP_KERNEL); | 3162 | pages[i] = alloc_page(GFP_KERNEL); |
| @@ -3075,6 +3179,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
| 3075 | trace_seq_init(&iter->seq); | 3179 | trace_seq_init(&iter->seq); |
| 3076 | } | 3180 | } |
| 3077 | 3181 | ||
| 3182 | trace_event_read_unlock(); | ||
| 3078 | mutex_unlock(&iter->mutex); | 3183 | mutex_unlock(&iter->mutex); |
| 3079 | 3184 | ||
| 3080 | spd.nr_pages = i; | 3185 | spd.nr_pages = i; |
| @@ -3425,7 +3530,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
| 3425 | .spd_release = buffer_spd_release, | 3530 | .spd_release = buffer_spd_release, |
| 3426 | }; | 3531 | }; |
| 3427 | struct buffer_ref *ref; | 3532 | struct buffer_ref *ref; |
| 3428 | int size, i; | 3533 | int entries, size, i; |
| 3429 | size_t ret; | 3534 | size_t ret; |
| 3430 | 3535 | ||
| 3431 | if (*ppos & (PAGE_SIZE - 1)) { | 3536 | if (*ppos & (PAGE_SIZE - 1)) { |
| @@ -3440,7 +3545,9 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
| 3440 | len &= PAGE_MASK; | 3545 | len &= PAGE_MASK; |
| 3441 | } | 3546 | } |
| 3442 | 3547 | ||
| 3443 | for (i = 0; i < PIPE_BUFFERS && len; i++, len -= PAGE_SIZE) { | 3548 | entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); |
| 3549 | |||
| 3550 | for (i = 0; i < PIPE_BUFFERS && len && entries; i++, len -= PAGE_SIZE) { | ||
| 3444 | struct page *page; | 3551 | struct page *page; |
| 3445 | int r; | 3552 | int r; |
| 3446 | 3553 | ||
| @@ -3457,7 +3564,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
| 3457 | } | 3564 | } |
| 3458 | 3565 | ||
| 3459 | r = ring_buffer_read_page(ref->buffer, &ref->page, | 3566 | r = ring_buffer_read_page(ref->buffer, &ref->page, |
| 3460 | len, info->cpu, 0); | 3567 | len, info->cpu, 1); |
| 3461 | if (r < 0) { | 3568 | if (r < 0) { |
| 3462 | ring_buffer_free_read_page(ref->buffer, | 3569 | ring_buffer_free_read_page(ref->buffer, |
| 3463 | ref->page); | 3570 | ref->page); |
| @@ -3481,6 +3588,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
| 3481 | spd.partial[i].private = (unsigned long)ref; | 3588 | spd.partial[i].private = (unsigned long)ref; |
| 3482 | spd.nr_pages++; | 3589 | spd.nr_pages++; |
| 3483 | *ppos += PAGE_SIZE; | 3590 | *ppos += PAGE_SIZE; |
| 3591 | |||
| 3592 | entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); | ||
| 3484 | } | 3593 | } |
| 3485 | 3594 | ||
| 3486 | spd.nr_pages = i; | 3595 | spd.nr_pages = i; |
| @@ -3508,6 +3617,45 @@ static const struct file_operations tracing_buffers_fops = { | |||
| 3508 | .llseek = no_llseek, | 3617 | .llseek = no_llseek, |
| 3509 | }; | 3618 | }; |
| 3510 | 3619 | ||
| 3620 | static ssize_t | ||
| 3621 | tracing_stats_read(struct file *filp, char __user *ubuf, | ||
| 3622 | size_t count, loff_t *ppos) | ||
| 3623 | { | ||
| 3624 | unsigned long cpu = (unsigned long)filp->private_data; | ||
| 3625 | struct trace_array *tr = &global_trace; | ||
| 3626 | struct trace_seq *s; | ||
| 3627 | unsigned long cnt; | ||
| 3628 | |||
| 3629 | s = kmalloc(sizeof(*s), GFP_KERNEL); | ||
| 3630 | if (!s) | ||
| 3631 | return ENOMEM; | ||
| 3632 | |||
| 3633 | trace_seq_init(s); | ||
| 3634 | |||
| 3635 | cnt = ring_buffer_entries_cpu(tr->buffer, cpu); | ||
| 3636 | trace_seq_printf(s, "entries: %ld\n", cnt); | ||
| 3637 | |||
| 3638 | cnt = ring_buffer_overrun_cpu(tr->buffer, cpu); | ||
| 3639 | trace_seq_printf(s, "overrun: %ld\n", cnt); | ||
| 3640 | |||
| 3641 | cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu); | ||
| 3642 | trace_seq_printf(s, "commit overrun: %ld\n", cnt); | ||
| 3643 | |||
| 3644 | cnt = ring_buffer_nmi_dropped_cpu(tr->buffer, cpu); | ||
| 3645 | trace_seq_printf(s, "nmi dropped: %ld\n", cnt); | ||
| 3646 | |||
| 3647 | count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); | ||
| 3648 | |||
| 3649 | kfree(s); | ||
| 3650 | |||
| 3651 | return count; | ||
| 3652 | } | ||
| 3653 | |||
| 3654 | static const struct file_operations tracing_stats_fops = { | ||
| 3655 | .open = tracing_open_generic, | ||
| 3656 | .read = tracing_stats_read, | ||
| 3657 | }; | ||
| 3658 | |||
| 3511 | #ifdef CONFIG_DYNAMIC_FTRACE | 3659 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 3512 | 3660 | ||
| 3513 | int __weak ftrace_arch_read_dyn_info(char *buf, int size) | 3661 | int __weak ftrace_arch_read_dyn_info(char *buf, int size) |
| @@ -3597,7 +3745,7 @@ struct dentry *tracing_dentry_percpu(void) | |||
| 3597 | static void tracing_init_debugfs_percpu(long cpu) | 3745 | static void tracing_init_debugfs_percpu(long cpu) |
| 3598 | { | 3746 | { |
| 3599 | struct dentry *d_percpu = tracing_dentry_percpu(); | 3747 | struct dentry *d_percpu = tracing_dentry_percpu(); |
| 3600 | struct dentry *entry, *d_cpu; | 3748 | struct dentry *d_cpu; |
| 3601 | /* strlen(cpu) + MAX(log10(cpu)) + '\0' */ | 3749 | /* strlen(cpu) + MAX(log10(cpu)) + '\0' */ |
| 3602 | char cpu_dir[7]; | 3750 | char cpu_dir[7]; |
| 3603 | 3751 | ||
| @@ -3612,21 +3760,18 @@ static void tracing_init_debugfs_percpu(long cpu) | |||
| 3612 | } | 3760 | } |
| 3613 | 3761 | ||
| 3614 | /* per cpu trace_pipe */ | 3762 | /* per cpu trace_pipe */ |
| 3615 | entry = debugfs_create_file("trace_pipe", 0444, d_cpu, | 3763 | trace_create_file("trace_pipe", 0444, d_cpu, |
| 3616 | (void *) cpu, &tracing_pipe_fops); | 3764 | (void *) cpu, &tracing_pipe_fops); |
| 3617 | if (!entry) | ||
| 3618 | pr_warning("Could not create debugfs 'trace_pipe' entry\n"); | ||
| 3619 | 3765 | ||
| 3620 | /* per cpu trace */ | 3766 | /* per cpu trace */ |
| 3621 | entry = debugfs_create_file("trace", 0644, d_cpu, | 3767 | trace_create_file("trace", 0644, d_cpu, |
| 3622 | (void *) cpu, &tracing_fops); | 3768 | (void *) cpu, &tracing_fops); |
| 3623 | if (!entry) | ||
| 3624 | pr_warning("Could not create debugfs 'trace' entry\n"); | ||
| 3625 | 3769 | ||
| 3626 | entry = debugfs_create_file("trace_pipe_raw", 0444, d_cpu, | 3770 | trace_create_file("trace_pipe_raw", 0444, d_cpu, |
| 3627 | (void *) cpu, &tracing_buffers_fops); | 3771 | (void *) cpu, &tracing_buffers_fops); |
| 3628 | if (!entry) | 3772 | |
| 3629 | pr_warning("Could not create debugfs 'trace_pipe_raw' entry\n"); | 3773 | trace_create_file("stats", 0444, d_cpu, |
| 3774 | (void *) cpu, &tracing_stats_fops); | ||
| 3630 | } | 3775 | } |
| 3631 | 3776 | ||
| 3632 | #ifdef CONFIG_FTRACE_SELFTEST | 3777 | #ifdef CONFIG_FTRACE_SELFTEST |
| @@ -3782,6 +3927,22 @@ static const struct file_operations trace_options_core_fops = { | |||
| 3782 | .write = trace_options_core_write, | 3927 | .write = trace_options_core_write, |
| 3783 | }; | 3928 | }; |
| 3784 | 3929 | ||
| 3930 | struct dentry *trace_create_file(const char *name, | ||
| 3931 | mode_t mode, | ||
| 3932 | struct dentry *parent, | ||
| 3933 | void *data, | ||
| 3934 | const struct file_operations *fops) | ||
| 3935 | { | ||
| 3936 | struct dentry *ret; | ||
| 3937 | |||
| 3938 | ret = debugfs_create_file(name, mode, parent, data, fops); | ||
| 3939 | if (!ret) | ||
| 3940 | pr_warning("Could not create debugfs '%s' entry\n", name); | ||
| 3941 | |||
| 3942 | return ret; | ||
| 3943 | } | ||
| 3944 | |||
| 3945 | |||
| 3785 | static struct dentry *trace_options_init_dentry(void) | 3946 | static struct dentry *trace_options_init_dentry(void) |
| 3786 | { | 3947 | { |
| 3787 | struct dentry *d_tracer; | 3948 | struct dentry *d_tracer; |
| @@ -3809,7 +3970,6 @@ create_trace_option_file(struct trace_option_dentry *topt, | |||
| 3809 | struct tracer_opt *opt) | 3970 | struct tracer_opt *opt) |
| 3810 | { | 3971 | { |
| 3811 | struct dentry *t_options; | 3972 | struct dentry *t_options; |
| 3812 | struct dentry *entry; | ||
| 3813 | 3973 | ||
| 3814 | t_options = trace_options_init_dentry(); | 3974 | t_options = trace_options_init_dentry(); |
| 3815 | if (!t_options) | 3975 | if (!t_options) |
| @@ -3818,11 +3978,9 @@ create_trace_option_file(struct trace_option_dentry *topt, | |||
| 3818 | topt->flags = flags; | 3978 | topt->flags = flags; |
| 3819 | topt->opt = opt; | 3979 | topt->opt = opt; |
| 3820 | 3980 | ||
| 3821 | entry = debugfs_create_file(opt->name, 0644, t_options, topt, | 3981 | topt->entry = trace_create_file(opt->name, 0644, t_options, topt, |
| 3822 | &trace_options_fops); | 3982 | &trace_options_fops); |
| 3823 | 3983 | ||
| 3824 | topt->entry = entry; | ||
| 3825 | |||
| 3826 | } | 3984 | } |
| 3827 | 3985 | ||
| 3828 | static struct trace_option_dentry * | 3986 | static struct trace_option_dentry * |
| @@ -3877,123 +4035,84 @@ static struct dentry * | |||
| 3877 | create_trace_option_core_file(const char *option, long index) | 4035 | create_trace_option_core_file(const char *option, long index) |
| 3878 | { | 4036 | { |
| 3879 | struct dentry *t_options; | 4037 | struct dentry *t_options; |
| 3880 | struct dentry *entry; | ||
| 3881 | 4038 | ||
| 3882 | t_options = trace_options_init_dentry(); | 4039 | t_options = trace_options_init_dentry(); |
| 3883 | if (!t_options) | 4040 | if (!t_options) |
| 3884 | return NULL; | 4041 | return NULL; |
| 3885 | 4042 | ||
| 3886 | entry = debugfs_create_file(option, 0644, t_options, (void *)index, | 4043 | return trace_create_file(option, 0644, t_options, (void *)index, |
| 3887 | &trace_options_core_fops); | 4044 | &trace_options_core_fops); |
| 3888 | |||
| 3889 | return entry; | ||
| 3890 | } | 4045 | } |
| 3891 | 4046 | ||
| 3892 | static __init void create_trace_options_dir(void) | 4047 | static __init void create_trace_options_dir(void) |
| 3893 | { | 4048 | { |
| 3894 | struct dentry *t_options; | 4049 | struct dentry *t_options; |
| 3895 | struct dentry *entry; | ||
| 3896 | int i; | 4050 | int i; |
| 3897 | 4051 | ||
| 3898 | t_options = trace_options_init_dentry(); | 4052 | t_options = trace_options_init_dentry(); |
| 3899 | if (!t_options) | 4053 | if (!t_options) |
| 3900 | return; | 4054 | return; |
| 3901 | 4055 | ||
| 3902 | for (i = 0; trace_options[i]; i++) { | 4056 | for (i = 0; trace_options[i]; i++) |
| 3903 | entry = create_trace_option_core_file(trace_options[i], i); | 4057 | create_trace_option_core_file(trace_options[i], i); |
| 3904 | if (!entry) | ||
| 3905 | pr_warning("Could not create debugfs %s entry\n", | ||
| 3906 | trace_options[i]); | ||
| 3907 | } | ||
| 3908 | } | 4058 | } |
| 3909 | 4059 | ||
| 3910 | static __init int tracer_init_debugfs(void) | 4060 | static __init int tracer_init_debugfs(void) |
| 3911 | { | 4061 | { |
| 3912 | struct dentry *d_tracer; | 4062 | struct dentry *d_tracer; |
| 3913 | struct dentry *entry; | ||
| 3914 | int cpu; | 4063 | int cpu; |
| 3915 | 4064 | ||
| 3916 | d_tracer = tracing_init_dentry(); | 4065 | d_tracer = tracing_init_dentry(); |
| 3917 | 4066 | ||
| 3918 | entry = debugfs_create_file("tracing_enabled", 0644, d_tracer, | 4067 | trace_create_file("tracing_enabled", 0644, d_tracer, |
| 3919 | &global_trace, &tracing_ctrl_fops); | 4068 | &global_trace, &tracing_ctrl_fops); |
| 3920 | if (!entry) | ||
| 3921 | pr_warning("Could not create debugfs 'tracing_enabled' entry\n"); | ||
| 3922 | 4069 | ||
| 3923 | entry = debugfs_create_file("trace_options", 0644, d_tracer, | 4070 | trace_create_file("trace_options", 0644, d_tracer, |
| 3924 | NULL, &tracing_iter_fops); | 4071 | NULL, &tracing_iter_fops); |
| 3925 | if (!entry) | ||
| 3926 | pr_warning("Could not create debugfs 'trace_options' entry\n"); | ||
| 3927 | 4072 | ||
| 3928 | create_trace_options_dir(); | 4073 | trace_create_file("tracing_cpumask", 0644, d_tracer, |
| 4074 | NULL, &tracing_cpumask_fops); | ||
| 3929 | 4075 | ||
| 3930 | entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer, | 4076 | trace_create_file("trace", 0644, d_tracer, |
| 3931 | NULL, &tracing_cpumask_fops); | 4077 | (void *) TRACE_PIPE_ALL_CPU, &tracing_fops); |
| 3932 | if (!entry) | 4078 | |
| 3933 | pr_warning("Could not create debugfs 'tracing_cpumask' entry\n"); | 4079 | trace_create_file("available_tracers", 0444, d_tracer, |
| 3934 | 4080 | &global_trace, &show_traces_fops); | |
| 3935 | entry = debugfs_create_file("trace", 0644, d_tracer, | 4081 | |
| 3936 | (void *) TRACE_PIPE_ALL_CPU, &tracing_fops); | 4082 | trace_create_file("current_tracer", 0644, d_tracer, |
| 3937 | if (!entry) | 4083 | &global_trace, &set_tracer_fops); |
| 3938 | pr_warning("Could not create debugfs 'trace' entry\n"); | 4084 | |
| 3939 | 4085 | trace_create_file("tracing_max_latency", 0644, d_tracer, | |
| 3940 | entry = debugfs_create_file("available_tracers", 0444, d_tracer, | 4086 | &tracing_max_latency, &tracing_max_lat_fops); |
| 3941 | &global_trace, &show_traces_fops); | 4087 | |
| 3942 | if (!entry) | 4088 | trace_create_file("tracing_thresh", 0644, d_tracer, |
| 3943 | pr_warning("Could not create debugfs 'available_tracers' entry\n"); | 4089 | &tracing_thresh, &tracing_max_lat_fops); |
| 3944 | 4090 | ||
| 3945 | entry = debugfs_create_file("current_tracer", 0444, d_tracer, | 4091 | trace_create_file("README", 0444, d_tracer, |
| 3946 | &global_trace, &set_tracer_fops); | 4092 | NULL, &tracing_readme_fops); |
| 3947 | if (!entry) | 4093 | |
| 3948 | pr_warning("Could not create debugfs 'current_tracer' entry\n"); | 4094 | trace_create_file("trace_pipe", 0444, d_tracer, |
| 3949 | |||
| 3950 | entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer, | ||
| 3951 | &tracing_max_latency, | ||
| 3952 | &tracing_max_lat_fops); | ||
| 3953 | if (!entry) | ||
| 3954 | pr_warning("Could not create debugfs " | ||
| 3955 | "'tracing_max_latency' entry\n"); | ||
| 3956 | |||
| 3957 | entry = debugfs_create_file("tracing_thresh", 0644, d_tracer, | ||
| 3958 | &tracing_thresh, &tracing_max_lat_fops); | ||
| 3959 | if (!entry) | ||
| 3960 | pr_warning("Could not create debugfs " | ||
| 3961 | "'tracing_thresh' entry\n"); | ||
| 3962 | entry = debugfs_create_file("README", 0644, d_tracer, | ||
| 3963 | NULL, &tracing_readme_fops); | ||
| 3964 | if (!entry) | ||
| 3965 | pr_warning("Could not create debugfs 'README' entry\n"); | ||
| 3966 | |||
| 3967 | entry = debugfs_create_file("trace_pipe", 0444, d_tracer, | ||
| 3968 | (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops); | 4095 | (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops); |
| 3969 | if (!entry) | 4096 | |
| 3970 | pr_warning("Could not create debugfs " | 4097 | trace_create_file("buffer_size_kb", 0644, d_tracer, |
| 3971 | "'trace_pipe' entry\n"); | 4098 | &global_trace, &tracing_entries_fops); |
| 3972 | 4099 | ||
| 3973 | entry = debugfs_create_file("buffer_size_kb", 0644, d_tracer, | 4100 | trace_create_file("trace_marker", 0220, d_tracer, |
| 3974 | &global_trace, &tracing_entries_fops); | 4101 | NULL, &tracing_mark_fops); |
| 3975 | if (!entry) | 4102 | |
| 3976 | pr_warning("Could not create debugfs " | 4103 | trace_create_file("saved_cmdlines", 0444, d_tracer, |
| 3977 | "'buffer_size_kb' entry\n"); | 4104 | NULL, &tracing_saved_cmdlines_fops); |
| 3978 | |||
| 3979 | entry = debugfs_create_file("trace_marker", 0220, d_tracer, | ||
| 3980 | NULL, &tracing_mark_fops); | ||
| 3981 | if (!entry) | ||
| 3982 | pr_warning("Could not create debugfs " | ||
| 3983 | "'trace_marker' entry\n"); | ||
| 3984 | 4105 | ||
| 3985 | #ifdef CONFIG_DYNAMIC_FTRACE | 4106 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 3986 | entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, | 4107 | trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, |
| 3987 | &ftrace_update_tot_cnt, | 4108 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); |
| 3988 | &tracing_dyn_info_fops); | ||
| 3989 | if (!entry) | ||
| 3990 | pr_warning("Could not create debugfs " | ||
| 3991 | "'dyn_ftrace_total_info' entry\n"); | ||
| 3992 | #endif | 4109 | #endif |
| 3993 | #ifdef CONFIG_SYSPROF_TRACER | 4110 | #ifdef CONFIG_SYSPROF_TRACER |
| 3994 | init_tracer_sysprof_debugfs(d_tracer); | 4111 | init_tracer_sysprof_debugfs(d_tracer); |
| 3995 | #endif | 4112 | #endif |
| 3996 | 4113 | ||
| 4114 | create_trace_options_dir(); | ||
| 4115 | |||
| 3997 | for_each_tracing_cpu(cpu) | 4116 | for_each_tracing_cpu(cpu) |
| 3998 | tracing_init_debugfs_percpu(cpu); | 4117 | tracing_init_debugfs_percpu(cpu); |
| 3999 | 4118 | ||
| @@ -4064,7 +4183,8 @@ trace_printk_seq(struct trace_seq *s) | |||
| 4064 | 4183 | ||
| 4065 | static void __ftrace_dump(bool disable_tracing) | 4184 | static void __ftrace_dump(bool disable_tracing) |
| 4066 | { | 4185 | { |
| 4067 | static DEFINE_SPINLOCK(ftrace_dump_lock); | 4186 | static raw_spinlock_t ftrace_dump_lock = |
| 4187 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | ||
| 4068 | /* use static because iter can be a bit big for the stack */ | 4188 | /* use static because iter can be a bit big for the stack */ |
| 4069 | static struct trace_iterator iter; | 4189 | static struct trace_iterator iter; |
| 4070 | unsigned int old_userobj; | 4190 | unsigned int old_userobj; |
| @@ -4073,7 +4193,8 @@ static void __ftrace_dump(bool disable_tracing) | |||
| 4073 | int cnt = 0, cpu; | 4193 | int cnt = 0, cpu; |
| 4074 | 4194 | ||
| 4075 | /* only one dump */ | 4195 | /* only one dump */ |
| 4076 | spin_lock_irqsave(&ftrace_dump_lock, flags); | 4196 | local_irq_save(flags); |
| 4197 | __raw_spin_lock(&ftrace_dump_lock); | ||
| 4077 | if (dump_ran) | 4198 | if (dump_ran) |
| 4078 | goto out; | 4199 | goto out; |
| 4079 | 4200 | ||
| @@ -4145,7 +4266,8 @@ static void __ftrace_dump(bool disable_tracing) | |||
| 4145 | } | 4266 | } |
| 4146 | 4267 | ||
| 4147 | out: | 4268 | out: |
| 4148 | spin_unlock_irqrestore(&ftrace_dump_lock, flags); | 4269 | __raw_spin_unlock(&ftrace_dump_lock); |
| 4270 | local_irq_restore(flags); | ||
| 4149 | } | 4271 | } |
| 4150 | 4272 | ||
| 4151 | /* By default: disable tracing after the dump */ | 4273 | /* By default: disable tracing after the dump */ |
