diff options
Diffstat (limited to 'kernel/trace/trace.c')
| -rw-r--r-- | kernel/trace/trace.c | 452 |
1 files changed, 265 insertions, 187 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 874f2893cff0..032c57ca6502 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -12,7 +12,7 @@ | |||
| 12 | * Copyright (C) 2004 William Lee Irwin III | 12 | * Copyright (C) 2004 William Lee Irwin III |
| 13 | */ | 13 | */ |
| 14 | #include <linux/ring_buffer.h> | 14 | #include <linux/ring_buffer.h> |
| 15 | #include <linux/utsrelease.h> | 15 | #include <generated/utsrelease.h> |
| 16 | #include <linux/stacktrace.h> | 16 | #include <linux/stacktrace.h> |
| 17 | #include <linux/writeback.h> | 17 | #include <linux/writeback.h> |
| 18 | #include <linux/kallsyms.h> | 18 | #include <linux/kallsyms.h> |
| @@ -32,6 +32,7 @@ | |||
| 32 | #include <linux/splice.h> | 32 | #include <linux/splice.h> |
| 33 | #include <linux/kdebug.h> | 33 | #include <linux/kdebug.h> |
| 34 | #include <linux/string.h> | 34 | #include <linux/string.h> |
| 35 | #include <linux/rwsem.h> | ||
| 35 | #include <linux/ctype.h> | 36 | #include <linux/ctype.h> |
| 36 | #include <linux/init.h> | 37 | #include <linux/init.h> |
| 37 | #include <linux/poll.h> | 38 | #include <linux/poll.h> |
| @@ -86,25 +87,22 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set) | |||
| 86 | */ | 87 | */ |
| 87 | static int tracing_disabled = 1; | 88 | static int tracing_disabled = 1; |
| 88 | 89 | ||
| 89 | DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); | 90 | DEFINE_PER_CPU(int, ftrace_cpu_disabled); |
| 90 | 91 | ||
| 91 | static inline void ftrace_disable_cpu(void) | 92 | static inline void ftrace_disable_cpu(void) |
| 92 | { | 93 | { |
| 93 | preempt_disable(); | 94 | preempt_disable(); |
| 94 | local_inc(&__get_cpu_var(ftrace_cpu_disabled)); | 95 | __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled)); |
| 95 | } | 96 | } |
| 96 | 97 | ||
| 97 | static inline void ftrace_enable_cpu(void) | 98 | static inline void ftrace_enable_cpu(void) |
| 98 | { | 99 | { |
| 99 | local_dec(&__get_cpu_var(ftrace_cpu_disabled)); | 100 | __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled)); |
| 100 | preempt_enable(); | 101 | preempt_enable(); |
| 101 | } | 102 | } |
| 102 | 103 | ||
| 103 | static cpumask_var_t __read_mostly tracing_buffer_mask; | 104 | static cpumask_var_t __read_mostly tracing_buffer_mask; |
| 104 | 105 | ||
| 105 | /* Define which cpu buffers are currently read in trace_pipe */ | ||
| 106 | static cpumask_var_t tracing_reader_cpumask; | ||
| 107 | |||
| 108 | #define for_each_tracing_cpu(cpu) \ | 106 | #define for_each_tracing_cpu(cpu) \ |
| 109 | for_each_cpu(cpu, tracing_buffer_mask) | 107 | for_each_cpu(cpu, tracing_buffer_mask) |
| 110 | 108 | ||
| @@ -203,7 +201,7 @@ cycle_t ftrace_now(int cpu) | |||
| 203 | */ | 201 | */ |
| 204 | static struct trace_array max_tr; | 202 | static struct trace_array max_tr; |
| 205 | 203 | ||
| 206 | static DEFINE_PER_CPU(struct trace_array_cpu, max_data); | 204 | static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data); |
| 207 | 205 | ||
| 208 | /* tracer_enabled is used to toggle activation of a tracer */ | 206 | /* tracer_enabled is used to toggle activation of a tracer */ |
| 209 | static int tracer_enabled = 1; | 207 | static int tracer_enabled = 1; |
| @@ -243,12 +241,91 @@ static struct tracer *current_trace __read_mostly; | |||
| 243 | 241 | ||
| 244 | /* | 242 | /* |
| 245 | * trace_types_lock is used to protect the trace_types list. | 243 | * trace_types_lock is used to protect the trace_types list. |
| 246 | * This lock is also used to keep user access serialized. | ||
| 247 | * Accesses from userspace will grab this lock while userspace | ||
| 248 | * activities happen inside the kernel. | ||
| 249 | */ | 244 | */ |
| 250 | static DEFINE_MUTEX(trace_types_lock); | 245 | static DEFINE_MUTEX(trace_types_lock); |
| 251 | 246 | ||
| 247 | /* | ||
| 248 | * serialize the access of the ring buffer | ||
| 249 | * | ||
| 250 | * ring buffer serializes readers, but it is low level protection. | ||
| 251 | * The validity of the events (which returns by ring_buffer_peek() ..etc) | ||
| 252 | * are not protected by ring buffer. | ||
| 253 | * | ||
| 254 | * The content of events may become garbage if we allow other process consumes | ||
| 255 | * these events concurrently: | ||
| 256 | * A) the page of the consumed events may become a normal page | ||
| 257 | * (not reader page) in ring buffer, and this page will be rewrited | ||
| 258 | * by events producer. | ||
| 259 | * B) The page of the consumed events may become a page for splice_read, | ||
| 260 | * and this page will be returned to system. | ||
| 261 | * | ||
| 262 | * These primitives allow multi process access to different cpu ring buffer | ||
| 263 | * concurrently. | ||
| 264 | * | ||
| 265 | * These primitives don't distinguish read-only and read-consume access. | ||
| 266 | * Multi read-only access are also serialized. | ||
| 267 | */ | ||
| 268 | |||
| 269 | #ifdef CONFIG_SMP | ||
| 270 | static DECLARE_RWSEM(all_cpu_access_lock); | ||
| 271 | static DEFINE_PER_CPU(struct mutex, cpu_access_lock); | ||
| 272 | |||
| 273 | static inline void trace_access_lock(int cpu) | ||
| 274 | { | ||
| 275 | if (cpu == TRACE_PIPE_ALL_CPU) { | ||
| 276 | /* gain it for accessing the whole ring buffer. */ | ||
| 277 | down_write(&all_cpu_access_lock); | ||
| 278 | } else { | ||
| 279 | /* gain it for accessing a cpu ring buffer. */ | ||
| 280 | |||
| 281 | /* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */ | ||
| 282 | down_read(&all_cpu_access_lock); | ||
| 283 | |||
| 284 | /* Secondly block other access to this @cpu ring buffer. */ | ||
| 285 | mutex_lock(&per_cpu(cpu_access_lock, cpu)); | ||
| 286 | } | ||
| 287 | } | ||
| 288 | |||
| 289 | static inline void trace_access_unlock(int cpu) | ||
| 290 | { | ||
| 291 | if (cpu == TRACE_PIPE_ALL_CPU) { | ||
| 292 | up_write(&all_cpu_access_lock); | ||
| 293 | } else { | ||
| 294 | mutex_unlock(&per_cpu(cpu_access_lock, cpu)); | ||
| 295 | up_read(&all_cpu_access_lock); | ||
| 296 | } | ||
| 297 | } | ||
| 298 | |||
| 299 | static inline void trace_access_lock_init(void) | ||
| 300 | { | ||
| 301 | int cpu; | ||
| 302 | |||
| 303 | for_each_possible_cpu(cpu) | ||
| 304 | mutex_init(&per_cpu(cpu_access_lock, cpu)); | ||
| 305 | } | ||
| 306 | |||
| 307 | #else | ||
| 308 | |||
| 309 | static DEFINE_MUTEX(access_lock); | ||
| 310 | |||
| 311 | static inline void trace_access_lock(int cpu) | ||
| 312 | { | ||
| 313 | (void)cpu; | ||
| 314 | mutex_lock(&access_lock); | ||
| 315 | } | ||
| 316 | |||
| 317 | static inline void trace_access_unlock(int cpu) | ||
| 318 | { | ||
| 319 | (void)cpu; | ||
| 320 | mutex_unlock(&access_lock); | ||
| 321 | } | ||
| 322 | |||
| 323 | static inline void trace_access_lock_init(void) | ||
| 324 | { | ||
| 325 | } | ||
| 326 | |||
| 327 | #endif | ||
| 328 | |||
| 252 | /* trace_wait is a waitqueue for tasks blocked on trace_poll */ | 329 | /* trace_wait is a waitqueue for tasks blocked on trace_poll */ |
| 253 | static DECLARE_WAIT_QUEUE_HEAD(trace_wait); | 330 | static DECLARE_WAIT_QUEUE_HEAD(trace_wait); |
| 254 | 331 | ||
| @@ -313,7 +390,6 @@ static const char *trace_options[] = { | |||
| 313 | "bin", | 390 | "bin", |
| 314 | "block", | 391 | "block", |
| 315 | "stacktrace", | 392 | "stacktrace", |
| 316 | "sched-tree", | ||
| 317 | "trace_printk", | 393 | "trace_printk", |
| 318 | "ftrace_preempt", | 394 | "ftrace_preempt", |
| 319 | "branch", | 395 | "branch", |
| @@ -493,15 +569,15 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | |||
| 493 | * protected by per_cpu spinlocks. But the action of the swap | 569 | * protected by per_cpu spinlocks. But the action of the swap |
| 494 | * needs its own lock. | 570 | * needs its own lock. |
| 495 | * | 571 | * |
| 496 | * This is defined as a raw_spinlock_t in order to help | 572 | * This is defined as a arch_spinlock_t in order to help |
| 497 | * with performance when lockdep debugging is enabled. | 573 | * with performance when lockdep debugging is enabled. |
| 498 | * | 574 | * |
| 499 | * It is also used in other places outside the update_max_tr | 575 | * It is also used in other places outside the update_max_tr |
| 500 | * so it needs to be defined outside of the | 576 | * so it needs to be defined outside of the |
| 501 | * CONFIG_TRACER_MAX_TRACE. | 577 | * CONFIG_TRACER_MAX_TRACE. |
| 502 | */ | 578 | */ |
| 503 | static raw_spinlock_t ftrace_max_lock = | 579 | static arch_spinlock_t ftrace_max_lock = |
| 504 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 580 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
| 505 | 581 | ||
| 506 | #ifdef CONFIG_TRACER_MAX_TRACE | 582 | #ifdef CONFIG_TRACER_MAX_TRACE |
| 507 | unsigned long __read_mostly tracing_max_latency; | 583 | unsigned long __read_mostly tracing_max_latency; |
| @@ -555,13 +631,13 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 555 | return; | 631 | return; |
| 556 | 632 | ||
| 557 | WARN_ON_ONCE(!irqs_disabled()); | 633 | WARN_ON_ONCE(!irqs_disabled()); |
| 558 | __raw_spin_lock(&ftrace_max_lock); | 634 | arch_spin_lock(&ftrace_max_lock); |
| 559 | 635 | ||
| 560 | tr->buffer = max_tr.buffer; | 636 | tr->buffer = max_tr.buffer; |
| 561 | max_tr.buffer = buf; | 637 | max_tr.buffer = buf; |
| 562 | 638 | ||
| 563 | __update_max_tr(tr, tsk, cpu); | 639 | __update_max_tr(tr, tsk, cpu); |
| 564 | __raw_spin_unlock(&ftrace_max_lock); | 640 | arch_spin_unlock(&ftrace_max_lock); |
| 565 | } | 641 | } |
| 566 | 642 | ||
| 567 | /** | 643 | /** |
| @@ -581,7 +657,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 581 | return; | 657 | return; |
| 582 | 658 | ||
| 583 | WARN_ON_ONCE(!irqs_disabled()); | 659 | WARN_ON_ONCE(!irqs_disabled()); |
| 584 | __raw_spin_lock(&ftrace_max_lock); | 660 | arch_spin_lock(&ftrace_max_lock); |
| 585 | 661 | ||
| 586 | ftrace_disable_cpu(); | 662 | ftrace_disable_cpu(); |
| 587 | 663 | ||
| @@ -603,7 +679,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 603 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); | 679 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); |
| 604 | 680 | ||
| 605 | __update_max_tr(tr, tsk, cpu); | 681 | __update_max_tr(tr, tsk, cpu); |
| 606 | __raw_spin_unlock(&ftrace_max_lock); | 682 | arch_spin_unlock(&ftrace_max_lock); |
| 607 | } | 683 | } |
| 608 | #endif /* CONFIG_TRACER_MAX_TRACE */ | 684 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
| 609 | 685 | ||
| @@ -802,7 +878,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; | |||
| 802 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; | 878 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; |
| 803 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; | 879 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; |
| 804 | static int cmdline_idx; | 880 | static int cmdline_idx; |
| 805 | static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; | 881 | static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
| 806 | 882 | ||
| 807 | /* temporary disable recording */ | 883 | /* temporary disable recording */ |
| 808 | static atomic_t trace_record_cmdline_disabled __read_mostly; | 884 | static atomic_t trace_record_cmdline_disabled __read_mostly; |
| @@ -915,7 +991,7 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
| 915 | * nor do we want to disable interrupts, | 991 | * nor do we want to disable interrupts, |
| 916 | * so if we miss here, then better luck next time. | 992 | * so if we miss here, then better luck next time. |
| 917 | */ | 993 | */ |
| 918 | if (!__raw_spin_trylock(&trace_cmdline_lock)) | 994 | if (!arch_spin_trylock(&trace_cmdline_lock)) |
| 919 | return; | 995 | return; |
| 920 | 996 | ||
| 921 | idx = map_pid_to_cmdline[tsk->pid]; | 997 | idx = map_pid_to_cmdline[tsk->pid]; |
| @@ -940,7 +1016,7 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
| 940 | 1016 | ||
| 941 | memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); | 1017 | memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); |
| 942 | 1018 | ||
| 943 | __raw_spin_unlock(&trace_cmdline_lock); | 1019 | arch_spin_unlock(&trace_cmdline_lock); |
| 944 | } | 1020 | } |
| 945 | 1021 | ||
| 946 | void trace_find_cmdline(int pid, char comm[]) | 1022 | void trace_find_cmdline(int pid, char comm[]) |
| @@ -952,20 +1028,25 @@ void trace_find_cmdline(int pid, char comm[]) | |||
| 952 | return; | 1028 | return; |
| 953 | } | 1029 | } |
| 954 | 1030 | ||
| 1031 | if (WARN_ON_ONCE(pid < 0)) { | ||
| 1032 | strcpy(comm, "<XXX>"); | ||
| 1033 | return; | ||
| 1034 | } | ||
| 1035 | |||
| 955 | if (pid > PID_MAX_DEFAULT) { | 1036 | if (pid > PID_MAX_DEFAULT) { |
| 956 | strcpy(comm, "<...>"); | 1037 | strcpy(comm, "<...>"); |
| 957 | return; | 1038 | return; |
| 958 | } | 1039 | } |
| 959 | 1040 | ||
| 960 | preempt_disable(); | 1041 | preempt_disable(); |
| 961 | __raw_spin_lock(&trace_cmdline_lock); | 1042 | arch_spin_lock(&trace_cmdline_lock); |
| 962 | map = map_pid_to_cmdline[pid]; | 1043 | map = map_pid_to_cmdline[pid]; |
| 963 | if (map != NO_CMDLINE_MAP) | 1044 | if (map != NO_CMDLINE_MAP) |
| 964 | strcpy(comm, saved_cmdlines[map]); | 1045 | strcpy(comm, saved_cmdlines[map]); |
| 965 | else | 1046 | else |
| 966 | strcpy(comm, "<...>"); | 1047 | strcpy(comm, "<...>"); |
| 967 | 1048 | ||
| 968 | __raw_spin_unlock(&trace_cmdline_lock); | 1049 | arch_spin_unlock(&trace_cmdline_lock); |
| 969 | preempt_enable(); | 1050 | preempt_enable(); |
| 970 | } | 1051 | } |
| 971 | 1052 | ||
| @@ -1085,7 +1166,7 @@ trace_function(struct trace_array *tr, | |||
| 1085 | struct ftrace_entry *entry; | 1166 | struct ftrace_entry *entry; |
| 1086 | 1167 | ||
| 1087 | /* If we are reading the ring buffer, don't trace */ | 1168 | /* If we are reading the ring buffer, don't trace */ |
| 1088 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 1169 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) |
| 1089 | return; | 1170 | return; |
| 1090 | 1171 | ||
| 1091 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), | 1172 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), |
| @@ -1151,6 +1232,22 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, | |||
| 1151 | __ftrace_trace_stack(tr->buffer, flags, skip, pc); | 1232 | __ftrace_trace_stack(tr->buffer, flags, skip, pc); |
| 1152 | } | 1233 | } |
| 1153 | 1234 | ||
| 1235 | /** | ||
| 1236 | * trace_dump_stack - record a stack back trace in the trace buffer | ||
| 1237 | */ | ||
| 1238 | void trace_dump_stack(void) | ||
| 1239 | { | ||
| 1240 | unsigned long flags; | ||
| 1241 | |||
| 1242 | if (tracing_disabled || tracing_selftest_running) | ||
| 1243 | return; | ||
| 1244 | |||
| 1245 | local_save_flags(flags); | ||
| 1246 | |||
| 1247 | /* skipping 3 traces, seems to get us at the caller of this function */ | ||
| 1248 | __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); | ||
| 1249 | } | ||
| 1250 | |||
| 1154 | void | 1251 | void |
| 1155 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | 1252 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) |
| 1156 | { | 1253 | { |
| @@ -1251,8 +1348,8 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | |||
| 1251 | */ | 1348 | */ |
| 1252 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | 1349 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) |
| 1253 | { | 1350 | { |
| 1254 | static raw_spinlock_t trace_buf_lock = | 1351 | static arch_spinlock_t trace_buf_lock = |
| 1255 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 1352 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
| 1256 | static u32 trace_buf[TRACE_BUF_SIZE]; | 1353 | static u32 trace_buf[TRACE_BUF_SIZE]; |
| 1257 | 1354 | ||
| 1258 | struct ftrace_event_call *call = &event_bprint; | 1355 | struct ftrace_event_call *call = &event_bprint; |
| @@ -1283,7 +1380,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 1283 | 1380 | ||
| 1284 | /* Lockdep uses trace_printk for lock tracing */ | 1381 | /* Lockdep uses trace_printk for lock tracing */ |
| 1285 | local_irq_save(flags); | 1382 | local_irq_save(flags); |
| 1286 | __raw_spin_lock(&trace_buf_lock); | 1383 | arch_spin_lock(&trace_buf_lock); |
| 1287 | len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 1384 | len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
| 1288 | 1385 | ||
| 1289 | if (len > TRACE_BUF_SIZE || len < 0) | 1386 | if (len > TRACE_BUF_SIZE || len < 0) |
| @@ -1300,11 +1397,13 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 1300 | entry->fmt = fmt; | 1397 | entry->fmt = fmt; |
| 1301 | 1398 | ||
| 1302 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); | 1399 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); |
| 1303 | if (!filter_check_discard(call, entry, buffer, event)) | 1400 | if (!filter_check_discard(call, entry, buffer, event)) { |
| 1304 | ring_buffer_unlock_commit(buffer, event); | 1401 | ring_buffer_unlock_commit(buffer, event); |
| 1402 | ftrace_trace_stack(buffer, flags, 6, pc); | ||
| 1403 | } | ||
| 1305 | 1404 | ||
| 1306 | out_unlock: | 1405 | out_unlock: |
| 1307 | __raw_spin_unlock(&trace_buf_lock); | 1406 | arch_spin_unlock(&trace_buf_lock); |
| 1308 | local_irq_restore(flags); | 1407 | local_irq_restore(flags); |
| 1309 | 1408 | ||
| 1310 | out: | 1409 | out: |
| @@ -1334,7 +1433,7 @@ int trace_array_printk(struct trace_array *tr, | |||
| 1334 | int trace_array_vprintk(struct trace_array *tr, | 1433 | int trace_array_vprintk(struct trace_array *tr, |
| 1335 | unsigned long ip, const char *fmt, va_list args) | 1434 | unsigned long ip, const char *fmt, va_list args) |
| 1336 | { | 1435 | { |
| 1337 | static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; | 1436 | static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
| 1338 | static char trace_buf[TRACE_BUF_SIZE]; | 1437 | static char trace_buf[TRACE_BUF_SIZE]; |
| 1339 | 1438 | ||
| 1340 | struct ftrace_event_call *call = &event_print; | 1439 | struct ftrace_event_call *call = &event_print; |
| @@ -1360,12 +1459,8 @@ int trace_array_vprintk(struct trace_array *tr, | |||
| 1360 | 1459 | ||
| 1361 | pause_graph_tracing(); | 1460 | pause_graph_tracing(); |
| 1362 | raw_local_irq_save(irq_flags); | 1461 | raw_local_irq_save(irq_flags); |
| 1363 | __raw_spin_lock(&trace_buf_lock); | 1462 | arch_spin_lock(&trace_buf_lock); |
| 1364 | if (args == NULL) { | 1463 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
| 1365 | strncpy(trace_buf, fmt, TRACE_BUF_SIZE); | ||
| 1366 | len = strlen(trace_buf); | ||
| 1367 | } else | ||
| 1368 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | ||
| 1369 | 1464 | ||
| 1370 | size = sizeof(*entry) + len + 1; | 1465 | size = sizeof(*entry) + len + 1; |
| 1371 | buffer = tr->buffer; | 1466 | buffer = tr->buffer; |
| @@ -1378,11 +1473,13 @@ int trace_array_vprintk(struct trace_array *tr, | |||
| 1378 | 1473 | ||
| 1379 | memcpy(&entry->buf, trace_buf, len); | 1474 | memcpy(&entry->buf, trace_buf, len); |
| 1380 | entry->buf[len] = '\0'; | 1475 | entry->buf[len] = '\0'; |
| 1381 | if (!filter_check_discard(call, entry, buffer, event)) | 1476 | if (!filter_check_discard(call, entry, buffer, event)) { |
| 1382 | ring_buffer_unlock_commit(buffer, event); | 1477 | ring_buffer_unlock_commit(buffer, event); |
| 1478 | ftrace_trace_stack(buffer, irq_flags, 6, pc); | ||
| 1479 | } | ||
| 1383 | 1480 | ||
| 1384 | out_unlock: | 1481 | out_unlock: |
| 1385 | __raw_spin_unlock(&trace_buf_lock); | 1482 | arch_spin_unlock(&trace_buf_lock); |
| 1386 | raw_local_irq_restore(irq_flags); | 1483 | raw_local_irq_restore(irq_flags); |
| 1387 | unpause_graph_tracing(); | 1484 | unpause_graph_tracing(); |
| 1388 | out: | 1485 | out: |
| @@ -1516,6 +1613,8 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos) | |||
| 1516 | int i = (int)*pos; | 1613 | int i = (int)*pos; |
| 1517 | void *ent; | 1614 | void *ent; |
| 1518 | 1615 | ||
| 1616 | WARN_ON_ONCE(iter->leftover); | ||
| 1617 | |||
| 1519 | (*pos)++; | 1618 | (*pos)++; |
| 1520 | 1619 | ||
| 1521 | /* can't go backwards */ | 1620 | /* can't go backwards */ |
| @@ -1567,12 +1666,6 @@ static void tracing_iter_reset(struct trace_iterator *iter, int cpu) | |||
| 1567 | } | 1666 | } |
| 1568 | 1667 | ||
| 1569 | /* | 1668 | /* |
| 1570 | * No necessary locking here. The worst thing which can | ||
| 1571 | * happen is loosing events consumed at the same time | ||
| 1572 | * by a trace_pipe reader. | ||
| 1573 | * Other than that, we don't risk to crash the ring buffer | ||
| 1574 | * because it serializes the readers. | ||
| 1575 | * | ||
| 1576 | * The current tracer is copied to avoid a global locking | 1669 | * The current tracer is copied to avoid a global locking |
| 1577 | * all around. | 1670 | * all around. |
| 1578 | */ | 1671 | */ |
| @@ -1614,17 +1707,29 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
| 1614 | ; | 1707 | ; |
| 1615 | 1708 | ||
| 1616 | } else { | 1709 | } else { |
| 1617 | l = *pos - 1; | 1710 | /* |
| 1618 | p = s_next(m, p, &l); | 1711 | * If we overflowed the seq_file before, then we want |
| 1712 | * to just reuse the trace_seq buffer again. | ||
| 1713 | */ | ||
| 1714 | if (iter->leftover) | ||
| 1715 | p = iter; | ||
| 1716 | else { | ||
| 1717 | l = *pos - 1; | ||
| 1718 | p = s_next(m, p, &l); | ||
| 1719 | } | ||
| 1619 | } | 1720 | } |
| 1620 | 1721 | ||
| 1621 | trace_event_read_lock(); | 1722 | trace_event_read_lock(); |
| 1723 | trace_access_lock(cpu_file); | ||
| 1622 | return p; | 1724 | return p; |
| 1623 | } | 1725 | } |
| 1624 | 1726 | ||
| 1625 | static void s_stop(struct seq_file *m, void *p) | 1727 | static void s_stop(struct seq_file *m, void *p) |
| 1626 | { | 1728 | { |
| 1729 | struct trace_iterator *iter = m->private; | ||
| 1730 | |||
| 1627 | atomic_dec(&trace_record_cmdline_disabled); | 1731 | atomic_dec(&trace_record_cmdline_disabled); |
| 1732 | trace_access_unlock(iter->cpu_file); | ||
| 1628 | trace_event_read_unlock(); | 1733 | trace_event_read_unlock(); |
| 1629 | } | 1734 | } |
| 1630 | 1735 | ||
| @@ -1923,6 +2028,7 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter) | |||
| 1923 | static int s_show(struct seq_file *m, void *v) | 2028 | static int s_show(struct seq_file *m, void *v) |
| 1924 | { | 2029 | { |
| 1925 | struct trace_iterator *iter = v; | 2030 | struct trace_iterator *iter = v; |
| 2031 | int ret; | ||
| 1926 | 2032 | ||
| 1927 | if (iter->ent == NULL) { | 2033 | if (iter->ent == NULL) { |
| 1928 | if (iter->tr) { | 2034 | if (iter->tr) { |
| @@ -1942,9 +2048,27 @@ static int s_show(struct seq_file *m, void *v) | |||
| 1942 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | 2048 | if (!(trace_flags & TRACE_ITER_VERBOSE)) |
| 1943 | print_func_help_header(m); | 2049 | print_func_help_header(m); |
| 1944 | } | 2050 | } |
| 2051 | } else if (iter->leftover) { | ||
| 2052 | /* | ||
| 2053 | * If we filled the seq_file buffer earlier, we | ||
| 2054 | * want to just show it now. | ||
| 2055 | */ | ||
| 2056 | ret = trace_print_seq(m, &iter->seq); | ||
| 2057 | |||
| 2058 | /* ret should this time be zero, but you never know */ | ||
| 2059 | iter->leftover = ret; | ||
| 2060 | |||
| 1945 | } else { | 2061 | } else { |
| 1946 | print_trace_line(iter); | 2062 | print_trace_line(iter); |
| 1947 | trace_print_seq(m, &iter->seq); | 2063 | ret = trace_print_seq(m, &iter->seq); |
| 2064 | /* | ||
| 2065 | * If we overflow the seq_file buffer, then it will | ||
| 2066 | * ask us for this data again at start up. | ||
| 2067 | * Use that instead. | ||
| 2068 | * ret is 0 if seq_file write succeeded. | ||
| 2069 | * -1 otherwise. | ||
| 2070 | */ | ||
| 2071 | iter->leftover = ret; | ||
| 1948 | } | 2072 | } |
| 1949 | 2073 | ||
| 1950 | return 0; | 2074 | return 0; |
| @@ -2254,7 +2378,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
| 2254 | mutex_lock(&tracing_cpumask_update_lock); | 2378 | mutex_lock(&tracing_cpumask_update_lock); |
| 2255 | 2379 | ||
| 2256 | local_irq_disable(); | 2380 | local_irq_disable(); |
| 2257 | __raw_spin_lock(&ftrace_max_lock); | 2381 | arch_spin_lock(&ftrace_max_lock); |
| 2258 | for_each_tracing_cpu(cpu) { | 2382 | for_each_tracing_cpu(cpu) { |
| 2259 | /* | 2383 | /* |
| 2260 | * Increase/decrease the disabled counter if we are | 2384 | * Increase/decrease the disabled counter if we are |
| @@ -2269,7 +2393,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
| 2269 | atomic_dec(&global_trace.data[cpu]->disabled); | 2393 | atomic_dec(&global_trace.data[cpu]->disabled); |
| 2270 | } | 2394 | } |
| 2271 | } | 2395 | } |
| 2272 | __raw_spin_unlock(&ftrace_max_lock); | 2396 | arch_spin_unlock(&ftrace_max_lock); |
| 2273 | local_irq_enable(); | 2397 | local_irq_enable(); |
| 2274 | 2398 | ||
| 2275 | cpumask_copy(tracing_cpumask, tracing_cpumask_new); | 2399 | cpumask_copy(tracing_cpumask, tracing_cpumask_new); |
| @@ -2291,67 +2415,49 @@ static const struct file_operations tracing_cpumask_fops = { | |||
| 2291 | .write = tracing_cpumask_write, | 2415 | .write = tracing_cpumask_write, |
| 2292 | }; | 2416 | }; |
| 2293 | 2417 | ||
| 2294 | static ssize_t | 2418 | static int tracing_trace_options_show(struct seq_file *m, void *v) |
| 2295 | tracing_trace_options_read(struct file *filp, char __user *ubuf, | ||
| 2296 | size_t cnt, loff_t *ppos) | ||
| 2297 | { | 2419 | { |
| 2298 | struct tracer_opt *trace_opts; | 2420 | struct tracer_opt *trace_opts; |
| 2299 | u32 tracer_flags; | 2421 | u32 tracer_flags; |
| 2300 | int len = 0; | ||
| 2301 | char *buf; | ||
| 2302 | int r = 0; | ||
| 2303 | int i; | 2422 | int i; |
| 2304 | 2423 | ||
| 2305 | |||
| 2306 | /* calculate max size */ | ||
| 2307 | for (i = 0; trace_options[i]; i++) { | ||
| 2308 | len += strlen(trace_options[i]); | ||
| 2309 | len += 3; /* "no" and newline */ | ||
| 2310 | } | ||
| 2311 | |||
| 2312 | mutex_lock(&trace_types_lock); | 2424 | mutex_lock(&trace_types_lock); |
| 2313 | tracer_flags = current_trace->flags->val; | 2425 | tracer_flags = current_trace->flags->val; |
| 2314 | trace_opts = current_trace->flags->opts; | 2426 | trace_opts = current_trace->flags->opts; |
| 2315 | 2427 | ||
| 2316 | /* | ||
| 2317 | * Increase the size with names of options specific | ||
| 2318 | * of the current tracer. | ||
| 2319 | */ | ||
| 2320 | for (i = 0; trace_opts[i].name; i++) { | ||
| 2321 | len += strlen(trace_opts[i].name); | ||
| 2322 | len += 3; /* "no" and newline */ | ||
| 2323 | } | ||
| 2324 | |||
| 2325 | /* +1 for \0 */ | ||
| 2326 | buf = kmalloc(len + 1, GFP_KERNEL); | ||
| 2327 | if (!buf) { | ||
| 2328 | mutex_unlock(&trace_types_lock); | ||
| 2329 | return -ENOMEM; | ||
| 2330 | } | ||
| 2331 | |||
| 2332 | for (i = 0; trace_options[i]; i++) { | 2428 | for (i = 0; trace_options[i]; i++) { |
| 2333 | if (trace_flags & (1 << i)) | 2429 | if (trace_flags & (1 << i)) |
| 2334 | r += sprintf(buf + r, "%s\n", trace_options[i]); | 2430 | seq_printf(m, "%s\n", trace_options[i]); |
| 2335 | else | 2431 | else |
| 2336 | r += sprintf(buf + r, "no%s\n", trace_options[i]); | 2432 | seq_printf(m, "no%s\n", trace_options[i]); |
| 2337 | } | 2433 | } |
| 2338 | 2434 | ||
| 2339 | for (i = 0; trace_opts[i].name; i++) { | 2435 | for (i = 0; trace_opts[i].name; i++) { |
| 2340 | if (tracer_flags & trace_opts[i].bit) | 2436 | if (tracer_flags & trace_opts[i].bit) |
| 2341 | r += sprintf(buf + r, "%s\n", | 2437 | seq_printf(m, "%s\n", trace_opts[i].name); |
| 2342 | trace_opts[i].name); | ||
| 2343 | else | 2438 | else |
| 2344 | r += sprintf(buf + r, "no%s\n", | 2439 | seq_printf(m, "no%s\n", trace_opts[i].name); |
| 2345 | trace_opts[i].name); | ||
| 2346 | } | 2440 | } |
| 2347 | mutex_unlock(&trace_types_lock); | 2441 | mutex_unlock(&trace_types_lock); |
| 2348 | 2442 | ||
| 2349 | WARN_ON(r >= len + 1); | 2443 | return 0; |
| 2444 | } | ||
| 2350 | 2445 | ||
| 2351 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 2446 | static int __set_tracer_option(struct tracer *trace, |
| 2447 | struct tracer_flags *tracer_flags, | ||
| 2448 | struct tracer_opt *opts, int neg) | ||
| 2449 | { | ||
| 2450 | int ret; | ||
| 2352 | 2451 | ||
| 2353 | kfree(buf); | 2452 | ret = trace->set_flag(tracer_flags->val, opts->bit, !neg); |
| 2354 | return r; | 2453 | if (ret) |
| 2454 | return ret; | ||
| 2455 | |||
| 2456 | if (neg) | ||
| 2457 | tracer_flags->val &= ~opts->bit; | ||
| 2458 | else | ||
| 2459 | tracer_flags->val |= opts->bit; | ||
| 2460 | return 0; | ||
| 2355 | } | 2461 | } |
| 2356 | 2462 | ||
| 2357 | /* Try to assign a tracer specific option */ | 2463 | /* Try to assign a tracer specific option */ |
| @@ -2359,33 +2465,17 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | |||
| 2359 | { | 2465 | { |
| 2360 | struct tracer_flags *tracer_flags = trace->flags; | 2466 | struct tracer_flags *tracer_flags = trace->flags; |
| 2361 | struct tracer_opt *opts = NULL; | 2467 | struct tracer_opt *opts = NULL; |
| 2362 | int ret = 0, i = 0; | 2468 | int i; |
| 2363 | int len; | ||
| 2364 | 2469 | ||
| 2365 | for (i = 0; tracer_flags->opts[i].name; i++) { | 2470 | for (i = 0; tracer_flags->opts[i].name; i++) { |
| 2366 | opts = &tracer_flags->opts[i]; | 2471 | opts = &tracer_flags->opts[i]; |
| 2367 | len = strlen(opts->name); | ||
| 2368 | 2472 | ||
| 2369 | if (strncmp(cmp, opts->name, len) == 0) { | 2473 | if (strcmp(cmp, opts->name) == 0) |
| 2370 | ret = trace->set_flag(tracer_flags->val, | 2474 | return __set_tracer_option(trace, trace->flags, |
| 2371 | opts->bit, !neg); | 2475 | opts, neg); |
| 2372 | break; | ||
| 2373 | } | ||
| 2374 | } | 2476 | } |
| 2375 | /* Not found */ | ||
| 2376 | if (!tracer_flags->opts[i].name) | ||
| 2377 | return -EINVAL; | ||
| 2378 | 2477 | ||
| 2379 | /* Refused to handle */ | 2478 | return -EINVAL; |
| 2380 | if (ret) | ||
| 2381 | return ret; | ||
| 2382 | |||
| 2383 | if (neg) | ||
| 2384 | tracer_flags->val &= ~opts->bit; | ||
| 2385 | else | ||
| 2386 | tracer_flags->val |= opts->bit; | ||
| 2387 | |||
| 2388 | return 0; | ||
| 2389 | } | 2479 | } |
| 2390 | 2480 | ||
| 2391 | static void set_tracer_flags(unsigned int mask, int enabled) | 2481 | static void set_tracer_flags(unsigned int mask, int enabled) |
| @@ -2405,7 +2495,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
| 2405 | size_t cnt, loff_t *ppos) | 2495 | size_t cnt, loff_t *ppos) |
| 2406 | { | 2496 | { |
| 2407 | char buf[64]; | 2497 | char buf[64]; |
| 2408 | char *cmp = buf; | 2498 | char *cmp; |
| 2409 | int neg = 0; | 2499 | int neg = 0; |
| 2410 | int ret; | 2500 | int ret; |
| 2411 | int i; | 2501 | int i; |
| @@ -2417,16 +2507,15 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
| 2417 | return -EFAULT; | 2507 | return -EFAULT; |
| 2418 | 2508 | ||
| 2419 | buf[cnt] = 0; | 2509 | buf[cnt] = 0; |
| 2510 | cmp = strstrip(buf); | ||
| 2420 | 2511 | ||
| 2421 | if (strncmp(buf, "no", 2) == 0) { | 2512 | if (strncmp(cmp, "no", 2) == 0) { |
| 2422 | neg = 1; | 2513 | neg = 1; |
| 2423 | cmp += 2; | 2514 | cmp += 2; |
| 2424 | } | 2515 | } |
| 2425 | 2516 | ||
| 2426 | for (i = 0; trace_options[i]; i++) { | 2517 | for (i = 0; trace_options[i]; i++) { |
| 2427 | int len = strlen(trace_options[i]); | 2518 | if (strcmp(cmp, trace_options[i]) == 0) { |
| 2428 | |||
| 2429 | if (strncmp(cmp, trace_options[i], len) == 0) { | ||
| 2430 | set_tracer_flags(1 << i, !neg); | 2519 | set_tracer_flags(1 << i, !neg); |
| 2431 | break; | 2520 | break; |
| 2432 | } | 2521 | } |
| @@ -2446,9 +2535,18 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
| 2446 | return cnt; | 2535 | return cnt; |
| 2447 | } | 2536 | } |
| 2448 | 2537 | ||
| 2538 | static int tracing_trace_options_open(struct inode *inode, struct file *file) | ||
| 2539 | { | ||
| 2540 | if (tracing_disabled) | ||
| 2541 | return -ENODEV; | ||
| 2542 | return single_open(file, tracing_trace_options_show, NULL); | ||
| 2543 | } | ||
| 2544 | |||
| 2449 | static const struct file_operations tracing_iter_fops = { | 2545 | static const struct file_operations tracing_iter_fops = { |
| 2450 | .open = tracing_open_generic, | 2546 | .open = tracing_trace_options_open, |
| 2451 | .read = tracing_trace_options_read, | 2547 | .read = seq_read, |
| 2548 | .llseek = seq_lseek, | ||
| 2549 | .release = single_release, | ||
| 2452 | .write = tracing_trace_options_write, | 2550 | .write = tracing_trace_options_write, |
| 2453 | }; | 2551 | }; |
| 2454 | 2552 | ||
| @@ -2822,22 +2920,6 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) | |||
| 2822 | 2920 | ||
| 2823 | mutex_lock(&trace_types_lock); | 2921 | mutex_lock(&trace_types_lock); |
| 2824 | 2922 | ||
| 2825 | /* We only allow one reader per cpu */ | ||
| 2826 | if (cpu_file == TRACE_PIPE_ALL_CPU) { | ||
| 2827 | if (!cpumask_empty(tracing_reader_cpumask)) { | ||
| 2828 | ret = -EBUSY; | ||
| 2829 | goto out; | ||
| 2830 | } | ||
| 2831 | cpumask_setall(tracing_reader_cpumask); | ||
| 2832 | } else { | ||
| 2833 | if (!cpumask_test_cpu(cpu_file, tracing_reader_cpumask)) | ||
| 2834 | cpumask_set_cpu(cpu_file, tracing_reader_cpumask); | ||
| 2835 | else { | ||
| 2836 | ret = -EBUSY; | ||
| 2837 | goto out; | ||
| 2838 | } | ||
| 2839 | } | ||
| 2840 | |||
| 2841 | /* create a buffer to store the information to pass to userspace */ | 2923 | /* create a buffer to store the information to pass to userspace */ |
| 2842 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 2924 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
| 2843 | if (!iter) { | 2925 | if (!iter) { |
| @@ -2893,10 +2975,8 @@ static int tracing_release_pipe(struct inode *inode, struct file *file) | |||
| 2893 | 2975 | ||
| 2894 | mutex_lock(&trace_types_lock); | 2976 | mutex_lock(&trace_types_lock); |
| 2895 | 2977 | ||
| 2896 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) | 2978 | if (iter->trace->pipe_close) |
| 2897 | cpumask_clear(tracing_reader_cpumask); | 2979 | iter->trace->pipe_close(iter); |
| 2898 | else | ||
| 2899 | cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask); | ||
| 2900 | 2980 | ||
| 2901 | mutex_unlock(&trace_types_lock); | 2981 | mutex_unlock(&trace_types_lock); |
| 2902 | 2982 | ||
| @@ -3056,6 +3136,7 @@ waitagain: | |||
| 3056 | iter->pos = -1; | 3136 | iter->pos = -1; |
| 3057 | 3137 | ||
| 3058 | trace_event_read_lock(); | 3138 | trace_event_read_lock(); |
| 3139 | trace_access_lock(iter->cpu_file); | ||
| 3059 | while (find_next_entry_inc(iter) != NULL) { | 3140 | while (find_next_entry_inc(iter) != NULL) { |
| 3060 | enum print_line_t ret; | 3141 | enum print_line_t ret; |
| 3061 | int len = iter->seq.len; | 3142 | int len = iter->seq.len; |
| @@ -3072,6 +3153,7 @@ waitagain: | |||
| 3072 | if (iter->seq.len >= cnt) | 3153 | if (iter->seq.len >= cnt) |
| 3073 | break; | 3154 | break; |
| 3074 | } | 3155 | } |
| 3156 | trace_access_unlock(iter->cpu_file); | ||
| 3075 | trace_event_read_unlock(); | 3157 | trace_event_read_unlock(); |
| 3076 | 3158 | ||
| 3077 | /* Now copy what we have to the user */ | 3159 | /* Now copy what we have to the user */ |
| @@ -3104,7 +3186,7 @@ static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, | |||
| 3104 | __free_page(spd->pages[idx]); | 3186 | __free_page(spd->pages[idx]); |
| 3105 | } | 3187 | } |
| 3106 | 3188 | ||
| 3107 | static struct pipe_buf_operations tracing_pipe_buf_ops = { | 3189 | static const struct pipe_buf_operations tracing_pipe_buf_ops = { |
| 3108 | .can_merge = 0, | 3190 | .can_merge = 0, |
| 3109 | .map = generic_pipe_buf_map, | 3191 | .map = generic_pipe_buf_map, |
| 3110 | .unmap = generic_pipe_buf_unmap, | 3192 | .unmap = generic_pipe_buf_unmap, |
| @@ -3197,6 +3279,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
| 3197 | } | 3279 | } |
| 3198 | 3280 | ||
| 3199 | trace_event_read_lock(); | 3281 | trace_event_read_lock(); |
| 3282 | trace_access_lock(iter->cpu_file); | ||
| 3200 | 3283 | ||
| 3201 | /* Fill as many pages as possible. */ | 3284 | /* Fill as many pages as possible. */ |
| 3202 | for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) { | 3285 | for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) { |
| @@ -3220,6 +3303,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
| 3220 | trace_seq_init(&iter->seq); | 3303 | trace_seq_init(&iter->seq); |
| 3221 | } | 3304 | } |
| 3222 | 3305 | ||
| 3306 | trace_access_unlock(iter->cpu_file); | ||
| 3223 | trace_event_read_unlock(); | 3307 | trace_event_read_unlock(); |
| 3224 | mutex_unlock(&iter->mutex); | 3308 | mutex_unlock(&iter->mutex); |
| 3225 | 3309 | ||
| @@ -3320,6 +3404,16 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
| 3320 | return cnt; | 3404 | return cnt; |
| 3321 | } | 3405 | } |
| 3322 | 3406 | ||
| 3407 | static int mark_printk(const char *fmt, ...) | ||
| 3408 | { | ||
| 3409 | int ret; | ||
| 3410 | va_list args; | ||
| 3411 | va_start(args, fmt); | ||
| 3412 | ret = trace_vprintk(0, fmt, args); | ||
| 3413 | va_end(args); | ||
| 3414 | return ret; | ||
| 3415 | } | ||
| 3416 | |||
| 3323 | static ssize_t | 3417 | static ssize_t |
| 3324 | tracing_mark_write(struct file *filp, const char __user *ubuf, | 3418 | tracing_mark_write(struct file *filp, const char __user *ubuf, |
| 3325 | size_t cnt, loff_t *fpos) | 3419 | size_t cnt, loff_t *fpos) |
| @@ -3346,28 +3440,25 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
| 3346 | } else | 3440 | } else |
| 3347 | buf[cnt] = '\0'; | 3441 | buf[cnt] = '\0'; |
| 3348 | 3442 | ||
| 3349 | cnt = trace_vprintk(0, buf, NULL); | 3443 | cnt = mark_printk("%s", buf); |
| 3350 | kfree(buf); | 3444 | kfree(buf); |
| 3351 | *fpos += cnt; | 3445 | *fpos += cnt; |
| 3352 | 3446 | ||
| 3353 | return cnt; | 3447 | return cnt; |
| 3354 | } | 3448 | } |
| 3355 | 3449 | ||
| 3356 | static ssize_t tracing_clock_read(struct file *filp, char __user *ubuf, | 3450 | static int tracing_clock_show(struct seq_file *m, void *v) |
| 3357 | size_t cnt, loff_t *ppos) | ||
| 3358 | { | 3451 | { |
| 3359 | char buf[64]; | ||
| 3360 | int bufiter = 0; | ||
| 3361 | int i; | 3452 | int i; |
| 3362 | 3453 | ||
| 3363 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) | 3454 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) |
| 3364 | bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, | 3455 | seq_printf(m, |
| 3365 | "%s%s%s%s", i ? " " : "", | 3456 | "%s%s%s%s", i ? " " : "", |
| 3366 | i == trace_clock_id ? "[" : "", trace_clocks[i].name, | 3457 | i == trace_clock_id ? "[" : "", trace_clocks[i].name, |
| 3367 | i == trace_clock_id ? "]" : ""); | 3458 | i == trace_clock_id ? "]" : ""); |
| 3368 | bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, "\n"); | 3459 | seq_putc(m, '\n'); |
| 3369 | 3460 | ||
| 3370 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, bufiter); | 3461 | return 0; |
| 3371 | } | 3462 | } |
| 3372 | 3463 | ||
| 3373 | static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | 3464 | static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, |
| @@ -3409,6 +3500,13 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | |||
| 3409 | return cnt; | 3500 | return cnt; |
| 3410 | } | 3501 | } |
| 3411 | 3502 | ||
| 3503 | static int tracing_clock_open(struct inode *inode, struct file *file) | ||
| 3504 | { | ||
| 3505 | if (tracing_disabled) | ||
| 3506 | return -ENODEV; | ||
| 3507 | return single_open(file, tracing_clock_show, NULL); | ||
| 3508 | } | ||
| 3509 | |||
| 3412 | static const struct file_operations tracing_max_lat_fops = { | 3510 | static const struct file_operations tracing_max_lat_fops = { |
| 3413 | .open = tracing_open_generic, | 3511 | .open = tracing_open_generic, |
| 3414 | .read = tracing_max_lat_read, | 3512 | .read = tracing_max_lat_read, |
| @@ -3447,8 +3545,10 @@ static const struct file_operations tracing_mark_fops = { | |||
| 3447 | }; | 3545 | }; |
| 3448 | 3546 | ||
| 3449 | static const struct file_operations trace_clock_fops = { | 3547 | static const struct file_operations trace_clock_fops = { |
| 3450 | .open = tracing_open_generic, | 3548 | .open = tracing_clock_open, |
| 3451 | .read = tracing_clock_read, | 3549 | .read = seq_read, |
| 3550 | .llseek = seq_lseek, | ||
| 3551 | .release = single_release, | ||
| 3452 | .write = tracing_clock_write, | 3552 | .write = tracing_clock_write, |
| 3453 | }; | 3553 | }; |
| 3454 | 3554 | ||
| @@ -3505,10 +3605,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, | |||
| 3505 | 3605 | ||
| 3506 | info->read = 0; | 3606 | info->read = 0; |
| 3507 | 3607 | ||
| 3608 | trace_access_lock(info->cpu); | ||
| 3508 | ret = ring_buffer_read_page(info->tr->buffer, | 3609 | ret = ring_buffer_read_page(info->tr->buffer, |
| 3509 | &info->spare, | 3610 | &info->spare, |
| 3510 | count, | 3611 | count, |
| 3511 | info->cpu, 0); | 3612 | info->cpu, 0); |
| 3613 | trace_access_unlock(info->cpu); | ||
| 3512 | if (ret < 0) | 3614 | if (ret < 0) |
| 3513 | return 0; | 3615 | return 0; |
| 3514 | 3616 | ||
| @@ -3578,7 +3680,7 @@ static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, | |||
| 3578 | } | 3680 | } |
| 3579 | 3681 | ||
| 3580 | /* Pipe buffer operations for a buffer. */ | 3682 | /* Pipe buffer operations for a buffer. */ |
| 3581 | static struct pipe_buf_operations buffer_pipe_buf_ops = { | 3683 | static const struct pipe_buf_operations buffer_pipe_buf_ops = { |
| 3582 | .can_merge = 0, | 3684 | .can_merge = 0, |
| 3583 | .map = generic_pipe_buf_map, | 3685 | .map = generic_pipe_buf_map, |
| 3584 | .unmap = generic_pipe_buf_unmap, | 3686 | .unmap = generic_pipe_buf_unmap, |
| @@ -3636,6 +3738,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
| 3636 | len &= PAGE_MASK; | 3738 | len &= PAGE_MASK; |
| 3637 | } | 3739 | } |
| 3638 | 3740 | ||
| 3741 | trace_access_lock(info->cpu); | ||
| 3639 | entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); | 3742 | entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); |
| 3640 | 3743 | ||
| 3641 | for (i = 0; i < PIPE_BUFFERS && len && entries; i++, len -= PAGE_SIZE) { | 3744 | for (i = 0; i < PIPE_BUFFERS && len && entries; i++, len -= PAGE_SIZE) { |
| @@ -3683,6 +3786,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
| 3683 | entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); | 3786 | entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); |
| 3684 | } | 3787 | } |
| 3685 | 3788 | ||
| 3789 | trace_access_unlock(info->cpu); | ||
| 3686 | spd.nr_pages = i; | 3790 | spd.nr_pages = i; |
| 3687 | 3791 | ||
| 3688 | /* did we read anything? */ | 3792 | /* did we read anything? */ |
| @@ -3909,39 +4013,16 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
| 3909 | if (ret < 0) | 4013 | if (ret < 0) |
| 3910 | return ret; | 4014 | return ret; |
| 3911 | 4015 | ||
| 3912 | ret = 0; | 4016 | if (val != 0 && val != 1) |
| 3913 | switch (val) { | 4017 | return -EINVAL; |
| 3914 | case 0: | ||
| 3915 | /* do nothing if already cleared */ | ||
| 3916 | if (!(topt->flags->val & topt->opt->bit)) | ||
| 3917 | break; | ||
| 3918 | |||
| 3919 | mutex_lock(&trace_types_lock); | ||
| 3920 | if (current_trace->set_flag) | ||
| 3921 | ret = current_trace->set_flag(topt->flags->val, | ||
| 3922 | topt->opt->bit, 0); | ||
| 3923 | mutex_unlock(&trace_types_lock); | ||
| 3924 | if (ret) | ||
| 3925 | return ret; | ||
| 3926 | topt->flags->val &= ~topt->opt->bit; | ||
| 3927 | break; | ||
| 3928 | case 1: | ||
| 3929 | /* do nothing if already set */ | ||
| 3930 | if (topt->flags->val & topt->opt->bit) | ||
| 3931 | break; | ||
| 3932 | 4018 | ||
| 4019 | if (!!(topt->flags->val & topt->opt->bit) != val) { | ||
| 3933 | mutex_lock(&trace_types_lock); | 4020 | mutex_lock(&trace_types_lock); |
| 3934 | if (current_trace->set_flag) | 4021 | ret = __set_tracer_option(current_trace, topt->flags, |
| 3935 | ret = current_trace->set_flag(topt->flags->val, | 4022 | topt->opt, !val); |
| 3936 | topt->opt->bit, 1); | ||
| 3937 | mutex_unlock(&trace_types_lock); | 4023 | mutex_unlock(&trace_types_lock); |
| 3938 | if (ret) | 4024 | if (ret) |
| 3939 | return ret; | 4025 | return ret; |
| 3940 | topt->flags->val |= topt->opt->bit; | ||
| 3941 | break; | ||
| 3942 | |||
| 3943 | default: | ||
| 3944 | return -EINVAL; | ||
| 3945 | } | 4026 | } |
| 3946 | 4027 | ||
| 3947 | *ppos += cnt; | 4028 | *ppos += cnt; |
| @@ -4142,6 +4223,8 @@ static __init int tracer_init_debugfs(void) | |||
| 4142 | struct dentry *d_tracer; | 4223 | struct dentry *d_tracer; |
| 4143 | int cpu; | 4224 | int cpu; |
| 4144 | 4225 | ||
| 4226 | trace_access_lock_init(); | ||
| 4227 | |||
| 4145 | d_tracer = tracing_init_dentry(); | 4228 | d_tracer = tracing_init_dentry(); |
| 4146 | 4229 | ||
| 4147 | trace_create_file("tracing_enabled", 0644, d_tracer, | 4230 | trace_create_file("tracing_enabled", 0644, d_tracer, |
| @@ -4268,8 +4351,8 @@ trace_printk_seq(struct trace_seq *s) | |||
| 4268 | 4351 | ||
| 4269 | static void __ftrace_dump(bool disable_tracing) | 4352 | static void __ftrace_dump(bool disable_tracing) |
| 4270 | { | 4353 | { |
| 4271 | static raw_spinlock_t ftrace_dump_lock = | 4354 | static arch_spinlock_t ftrace_dump_lock = |
| 4272 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 4355 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
| 4273 | /* use static because iter can be a bit big for the stack */ | 4356 | /* use static because iter can be a bit big for the stack */ |
| 4274 | static struct trace_iterator iter; | 4357 | static struct trace_iterator iter; |
| 4275 | unsigned int old_userobj; | 4358 | unsigned int old_userobj; |
| @@ -4279,7 +4362,7 @@ static void __ftrace_dump(bool disable_tracing) | |||
| 4279 | 4362 | ||
| 4280 | /* only one dump */ | 4363 | /* only one dump */ |
| 4281 | local_irq_save(flags); | 4364 | local_irq_save(flags); |
| 4282 | __raw_spin_lock(&ftrace_dump_lock); | 4365 | arch_spin_lock(&ftrace_dump_lock); |
| 4283 | if (dump_ran) | 4366 | if (dump_ran) |
| 4284 | goto out; | 4367 | goto out; |
| 4285 | 4368 | ||
| @@ -4354,7 +4437,7 @@ static void __ftrace_dump(bool disable_tracing) | |||
| 4354 | } | 4437 | } |
| 4355 | 4438 | ||
| 4356 | out: | 4439 | out: |
| 4357 | __raw_spin_unlock(&ftrace_dump_lock); | 4440 | arch_spin_unlock(&ftrace_dump_lock); |
| 4358 | local_irq_restore(flags); | 4441 | local_irq_restore(flags); |
| 4359 | } | 4442 | } |
| 4360 | 4443 | ||
| @@ -4376,9 +4459,6 @@ __init static int tracer_alloc_buffers(void) | |||
| 4376 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) | 4459 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) |
| 4377 | goto out_free_buffer_mask; | 4460 | goto out_free_buffer_mask; |
| 4378 | 4461 | ||
| 4379 | if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) | ||
| 4380 | goto out_free_tracing_cpumask; | ||
| 4381 | |||
| 4382 | /* To save memory, keep the ring buffer size to its minimum */ | 4462 | /* To save memory, keep the ring buffer size to its minimum */ |
| 4383 | if (ring_buffer_expanded) | 4463 | if (ring_buffer_expanded) |
| 4384 | ring_buf_size = trace_buf_size; | 4464 | ring_buf_size = trace_buf_size; |
| @@ -4415,7 +4495,7 @@ __init static int tracer_alloc_buffers(void) | |||
| 4415 | /* Allocate the first page for all buffers */ | 4495 | /* Allocate the first page for all buffers */ |
| 4416 | for_each_tracing_cpu(i) { | 4496 | for_each_tracing_cpu(i) { |
| 4417 | global_trace.data[i] = &per_cpu(global_trace_cpu, i); | 4497 | global_trace.data[i] = &per_cpu(global_trace_cpu, i); |
| 4418 | max_tr.data[i] = &per_cpu(max_data, i); | 4498 | max_tr.data[i] = &per_cpu(max_tr_data, i); |
| 4419 | } | 4499 | } |
| 4420 | 4500 | ||
| 4421 | trace_init_cmdlines(); | 4501 | trace_init_cmdlines(); |
| @@ -4436,8 +4516,6 @@ __init static int tracer_alloc_buffers(void) | |||
| 4436 | return 0; | 4516 | return 0; |
| 4437 | 4517 | ||
| 4438 | out_free_cpumask: | 4518 | out_free_cpumask: |
| 4439 | free_cpumask_var(tracing_reader_cpumask); | ||
| 4440 | out_free_tracing_cpumask: | ||
| 4441 | free_cpumask_var(tracing_cpumask); | 4519 | free_cpumask_var(tracing_cpumask); |
| 4442 | out_free_buffer_mask: | 4520 | out_free_buffer_mask: |
| 4443 | free_cpumask_var(tracing_buffer_mask); | 4521 | free_cpumask_var(tracing_buffer_mask); |
