diff options
Diffstat (limited to 'kernel/trace/trace.c')
| -rw-r--r-- | kernel/trace/trace.c | 199 |
1 files changed, 147 insertions, 52 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index eac6875cb990..3ec2ee6f6560 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include <linux/splice.h> | 32 | #include <linux/splice.h> |
| 33 | #include <linux/kdebug.h> | 33 | #include <linux/kdebug.h> |
| 34 | #include <linux/string.h> | 34 | #include <linux/string.h> |
| 35 | #include <linux/rwsem.h> | ||
| 35 | #include <linux/ctype.h> | 36 | #include <linux/ctype.h> |
| 36 | #include <linux/init.h> | 37 | #include <linux/init.h> |
| 37 | #include <linux/poll.h> | 38 | #include <linux/poll.h> |
| @@ -91,20 +92,17 @@ DEFINE_PER_CPU(int, ftrace_cpu_disabled); | |||
| 91 | static inline void ftrace_disable_cpu(void) | 92 | static inline void ftrace_disable_cpu(void) |
| 92 | { | 93 | { |
| 93 | preempt_disable(); | 94 | preempt_disable(); |
| 94 | __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled)); | 95 | __this_cpu_inc(ftrace_cpu_disabled); |
| 95 | } | 96 | } |
| 96 | 97 | ||
| 97 | static inline void ftrace_enable_cpu(void) | 98 | static inline void ftrace_enable_cpu(void) |
| 98 | { | 99 | { |
| 99 | __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled)); | 100 | __this_cpu_dec(ftrace_cpu_disabled); |
| 100 | preempt_enable(); | 101 | preempt_enable(); |
| 101 | } | 102 | } |
| 102 | 103 | ||
| 103 | static cpumask_var_t __read_mostly tracing_buffer_mask; | 104 | static cpumask_var_t __read_mostly tracing_buffer_mask; |
| 104 | 105 | ||
| 105 | /* Define which cpu buffers are currently read in trace_pipe */ | ||
| 106 | static cpumask_var_t tracing_reader_cpumask; | ||
| 107 | |||
| 108 | #define for_each_tracing_cpu(cpu) \ | 106 | #define for_each_tracing_cpu(cpu) \ |
| 109 | for_each_cpu(cpu, tracing_buffer_mask) | 107 | for_each_cpu(cpu, tracing_buffer_mask) |
| 110 | 108 | ||
| @@ -243,12 +241,91 @@ static struct tracer *current_trace __read_mostly; | |||
| 243 | 241 | ||
| 244 | /* | 242 | /* |
| 245 | * trace_types_lock is used to protect the trace_types list. | 243 | * trace_types_lock is used to protect the trace_types list. |
| 246 | * This lock is also used to keep user access serialized. | ||
| 247 | * Accesses from userspace will grab this lock while userspace | ||
| 248 | * activities happen inside the kernel. | ||
| 249 | */ | 244 | */ |
| 250 | static DEFINE_MUTEX(trace_types_lock); | 245 | static DEFINE_MUTEX(trace_types_lock); |
| 251 | 246 | ||
| 247 | /* | ||
| 248 | * serialize the access of the ring buffer | ||
| 249 | * | ||
| 250 | * ring buffer serializes readers, but it is low level protection. | ||
| 251 | * The validity of the events (which returns by ring_buffer_peek() ..etc) | ||
| 252 | * are not protected by ring buffer. | ||
| 253 | * | ||
| 254 | * The content of events may become garbage if we allow other process consumes | ||
| 255 | * these events concurrently: | ||
| 256 | * A) the page of the consumed events may become a normal page | ||
| 257 | * (not reader page) in ring buffer, and this page will be rewrited | ||
| 258 | * by events producer. | ||
| 259 | * B) The page of the consumed events may become a page for splice_read, | ||
| 260 | * and this page will be returned to system. | ||
| 261 | * | ||
| 262 | * These primitives allow multi process access to different cpu ring buffer | ||
| 263 | * concurrently. | ||
| 264 | * | ||
| 265 | * These primitives don't distinguish read-only and read-consume access. | ||
| 266 | * Multi read-only access are also serialized. | ||
| 267 | */ | ||
| 268 | |||
| 269 | #ifdef CONFIG_SMP | ||
| 270 | static DECLARE_RWSEM(all_cpu_access_lock); | ||
| 271 | static DEFINE_PER_CPU(struct mutex, cpu_access_lock); | ||
| 272 | |||
| 273 | static inline void trace_access_lock(int cpu) | ||
| 274 | { | ||
| 275 | if (cpu == TRACE_PIPE_ALL_CPU) { | ||
| 276 | /* gain it for accessing the whole ring buffer. */ | ||
| 277 | down_write(&all_cpu_access_lock); | ||
| 278 | } else { | ||
| 279 | /* gain it for accessing a cpu ring buffer. */ | ||
| 280 | |||
| 281 | /* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */ | ||
| 282 | down_read(&all_cpu_access_lock); | ||
| 283 | |||
| 284 | /* Secondly block other access to this @cpu ring buffer. */ | ||
| 285 | mutex_lock(&per_cpu(cpu_access_lock, cpu)); | ||
| 286 | } | ||
| 287 | } | ||
| 288 | |||
| 289 | static inline void trace_access_unlock(int cpu) | ||
| 290 | { | ||
| 291 | if (cpu == TRACE_PIPE_ALL_CPU) { | ||
| 292 | up_write(&all_cpu_access_lock); | ||
| 293 | } else { | ||
| 294 | mutex_unlock(&per_cpu(cpu_access_lock, cpu)); | ||
| 295 | up_read(&all_cpu_access_lock); | ||
| 296 | } | ||
| 297 | } | ||
| 298 | |||
| 299 | static inline void trace_access_lock_init(void) | ||
| 300 | { | ||
| 301 | int cpu; | ||
| 302 | |||
| 303 | for_each_possible_cpu(cpu) | ||
| 304 | mutex_init(&per_cpu(cpu_access_lock, cpu)); | ||
| 305 | } | ||
| 306 | |||
| 307 | #else | ||
| 308 | |||
| 309 | static DEFINE_MUTEX(access_lock); | ||
| 310 | |||
| 311 | static inline void trace_access_lock(int cpu) | ||
| 312 | { | ||
| 313 | (void)cpu; | ||
| 314 | mutex_lock(&access_lock); | ||
| 315 | } | ||
| 316 | |||
| 317 | static inline void trace_access_unlock(int cpu) | ||
| 318 | { | ||
| 319 | (void)cpu; | ||
| 320 | mutex_unlock(&access_lock); | ||
| 321 | } | ||
| 322 | |||
| 323 | static inline void trace_access_lock_init(void) | ||
| 324 | { | ||
| 325 | } | ||
| 326 | |||
| 327 | #endif | ||
| 328 | |||
| 252 | /* trace_wait is a waitqueue for tasks blocked on trace_poll */ | 329 | /* trace_wait is a waitqueue for tasks blocked on trace_poll */ |
| 253 | static DECLARE_WAIT_QUEUE_HEAD(trace_wait); | 330 | static DECLARE_WAIT_QUEUE_HEAD(trace_wait); |
| 254 | 331 | ||
| @@ -297,6 +374,21 @@ static int __init set_buf_size(char *str) | |||
| 297 | } | 374 | } |
| 298 | __setup("trace_buf_size=", set_buf_size); | 375 | __setup("trace_buf_size=", set_buf_size); |
| 299 | 376 | ||
| 377 | static int __init set_tracing_thresh(char *str) | ||
| 378 | { | ||
| 379 | unsigned long threshhold; | ||
| 380 | int ret; | ||
| 381 | |||
| 382 | if (!str) | ||
| 383 | return 0; | ||
| 384 | ret = strict_strtoul(str, 0, &threshhold); | ||
| 385 | if (ret < 0) | ||
| 386 | return 0; | ||
| 387 | tracing_thresh = threshhold * 1000; | ||
| 388 | return 1; | ||
| 389 | } | ||
| 390 | __setup("tracing_thresh=", set_tracing_thresh); | ||
| 391 | |||
| 300 | unsigned long nsecs_to_usecs(unsigned long nsecs) | 392 | unsigned long nsecs_to_usecs(unsigned long nsecs) |
| 301 | { | 393 | { |
| 302 | return nsecs / 1000; | 394 | return nsecs / 1000; |
| @@ -502,9 +594,10 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | |||
| 502 | static arch_spinlock_t ftrace_max_lock = | 594 | static arch_spinlock_t ftrace_max_lock = |
| 503 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 595 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
| 504 | 596 | ||
| 597 | unsigned long __read_mostly tracing_thresh; | ||
| 598 | |||
| 505 | #ifdef CONFIG_TRACER_MAX_TRACE | 599 | #ifdef CONFIG_TRACER_MAX_TRACE |
| 506 | unsigned long __read_mostly tracing_max_latency; | 600 | unsigned long __read_mostly tracing_max_latency; |
| 507 | unsigned long __read_mostly tracing_thresh; | ||
| 508 | 601 | ||
| 509 | /* | 602 | /* |
| 510 | * Copy the new maximum trace into the separate maximum-trace | 603 | * Copy the new maximum trace into the separate maximum-trace |
| @@ -515,7 +608,7 @@ static void | |||
| 515 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | 608 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) |
| 516 | { | 609 | { |
| 517 | struct trace_array_cpu *data = tr->data[cpu]; | 610 | struct trace_array_cpu *data = tr->data[cpu]; |
| 518 | struct trace_array_cpu *max_data = tr->data[cpu]; | 611 | struct trace_array_cpu *max_data; |
| 519 | 612 | ||
| 520 | max_tr.cpu = cpu; | 613 | max_tr.cpu = cpu; |
| 521 | max_tr.time_start = data->preempt_timestamp; | 614 | max_tr.time_start = data->preempt_timestamp; |
| @@ -525,7 +618,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 525 | max_data->critical_start = data->critical_start; | 618 | max_data->critical_start = data->critical_start; |
| 526 | max_data->critical_end = data->critical_end; | 619 | max_data->critical_end = data->critical_end; |
| 527 | 620 | ||
| 528 | memcpy(data->comm, tsk->comm, TASK_COMM_LEN); | 621 | memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); |
| 529 | max_data->pid = tsk->pid; | 622 | max_data->pid = tsk->pid; |
| 530 | max_data->uid = task_uid(tsk); | 623 | max_data->uid = task_uid(tsk); |
| 531 | max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; | 624 | max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; |
| @@ -747,10 +840,10 @@ out: | |||
| 747 | mutex_unlock(&trace_types_lock); | 840 | mutex_unlock(&trace_types_lock); |
| 748 | } | 841 | } |
| 749 | 842 | ||
| 750 | static void __tracing_reset(struct trace_array *tr, int cpu) | 843 | static void __tracing_reset(struct ring_buffer *buffer, int cpu) |
| 751 | { | 844 | { |
| 752 | ftrace_disable_cpu(); | 845 | ftrace_disable_cpu(); |
| 753 | ring_buffer_reset_cpu(tr->buffer, cpu); | 846 | ring_buffer_reset_cpu(buffer, cpu); |
| 754 | ftrace_enable_cpu(); | 847 | ftrace_enable_cpu(); |
| 755 | } | 848 | } |
| 756 | 849 | ||
| @@ -762,7 +855,7 @@ void tracing_reset(struct trace_array *tr, int cpu) | |||
| 762 | 855 | ||
| 763 | /* Make sure all commits have finished */ | 856 | /* Make sure all commits have finished */ |
| 764 | synchronize_sched(); | 857 | synchronize_sched(); |
| 765 | __tracing_reset(tr, cpu); | 858 | __tracing_reset(buffer, cpu); |
| 766 | 859 | ||
| 767 | ring_buffer_record_enable(buffer); | 860 | ring_buffer_record_enable(buffer); |
| 768 | } | 861 | } |
| @@ -780,7 +873,7 @@ void tracing_reset_online_cpus(struct trace_array *tr) | |||
| 780 | tr->time_start = ftrace_now(tr->cpu); | 873 | tr->time_start = ftrace_now(tr->cpu); |
| 781 | 874 | ||
| 782 | for_each_online_cpu(cpu) | 875 | for_each_online_cpu(cpu) |
| 783 | __tracing_reset(tr, cpu); | 876 | __tracing_reset(buffer, cpu); |
| 784 | 877 | ||
| 785 | ring_buffer_record_enable(buffer); | 878 | ring_buffer_record_enable(buffer); |
| 786 | } | 879 | } |
| @@ -857,6 +950,8 @@ void tracing_start(void) | |||
| 857 | goto out; | 950 | goto out; |
| 858 | } | 951 | } |
| 859 | 952 | ||
| 953 | /* Prevent the buffers from switching */ | ||
| 954 | arch_spin_lock(&ftrace_max_lock); | ||
| 860 | 955 | ||
| 861 | buffer = global_trace.buffer; | 956 | buffer = global_trace.buffer; |
| 862 | if (buffer) | 957 | if (buffer) |
| @@ -866,6 +961,8 @@ void tracing_start(void) | |||
| 866 | if (buffer) | 961 | if (buffer) |
| 867 | ring_buffer_record_enable(buffer); | 962 | ring_buffer_record_enable(buffer); |
| 868 | 963 | ||
| 964 | arch_spin_unlock(&ftrace_max_lock); | ||
| 965 | |||
| 869 | ftrace_start(); | 966 | ftrace_start(); |
| 870 | out: | 967 | out: |
| 871 | spin_unlock_irqrestore(&tracing_start_lock, flags); | 968 | spin_unlock_irqrestore(&tracing_start_lock, flags); |
| @@ -887,6 +984,9 @@ void tracing_stop(void) | |||
| 887 | if (trace_stop_count++) | 984 | if (trace_stop_count++) |
| 888 | goto out; | 985 | goto out; |
| 889 | 986 | ||
| 987 | /* Prevent the buffers from switching */ | ||
| 988 | arch_spin_lock(&ftrace_max_lock); | ||
| 989 | |||
| 890 | buffer = global_trace.buffer; | 990 | buffer = global_trace.buffer; |
| 891 | if (buffer) | 991 | if (buffer) |
| 892 | ring_buffer_record_disable(buffer); | 992 | ring_buffer_record_disable(buffer); |
| @@ -895,6 +995,8 @@ void tracing_stop(void) | |||
| 895 | if (buffer) | 995 | if (buffer) |
| 896 | ring_buffer_record_disable(buffer); | 996 | ring_buffer_record_disable(buffer); |
| 897 | 997 | ||
| 998 | arch_spin_unlock(&ftrace_max_lock); | ||
| 999 | |||
| 898 | out: | 1000 | out: |
| 899 | spin_unlock_irqrestore(&tracing_start_lock, flags); | 1001 | spin_unlock_irqrestore(&tracing_start_lock, flags); |
| 900 | } | 1002 | } |
| @@ -1089,7 +1191,7 @@ trace_function(struct trace_array *tr, | |||
| 1089 | struct ftrace_entry *entry; | 1191 | struct ftrace_entry *entry; |
| 1090 | 1192 | ||
| 1091 | /* If we are reading the ring buffer, don't trace */ | 1193 | /* If we are reading the ring buffer, don't trace */ |
| 1092 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) | 1194 | if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) |
| 1093 | return; | 1195 | return; |
| 1094 | 1196 | ||
| 1095 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), | 1197 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), |
| @@ -1182,6 +1284,13 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | |||
| 1182 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) | 1284 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) |
| 1183 | return; | 1285 | return; |
| 1184 | 1286 | ||
| 1287 | /* | ||
| 1288 | * NMIs can not handle page faults, even with fix ups. | ||
| 1289 | * The save user stack can (and often does) fault. | ||
| 1290 | */ | ||
| 1291 | if (unlikely(in_nmi())) | ||
| 1292 | return; | ||
| 1293 | |||
| 1185 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, | 1294 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, |
| 1186 | sizeof(*entry), flags, pc); | 1295 | sizeof(*entry), flags, pc); |
| 1187 | if (!event) | 1296 | if (!event) |
| @@ -1320,8 +1429,10 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 1320 | entry->fmt = fmt; | 1429 | entry->fmt = fmt; |
| 1321 | 1430 | ||
| 1322 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); | 1431 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); |
| 1323 | if (!filter_check_discard(call, entry, buffer, event)) | 1432 | if (!filter_check_discard(call, entry, buffer, event)) { |
| 1324 | ring_buffer_unlock_commit(buffer, event); | 1433 | ring_buffer_unlock_commit(buffer, event); |
| 1434 | ftrace_trace_stack(buffer, flags, 6, pc); | ||
| 1435 | } | ||
| 1325 | 1436 | ||
| 1326 | out_unlock: | 1437 | out_unlock: |
| 1327 | arch_spin_unlock(&trace_buf_lock); | 1438 | arch_spin_unlock(&trace_buf_lock); |
| @@ -1394,8 +1505,10 @@ int trace_array_vprintk(struct trace_array *tr, | |||
| 1394 | 1505 | ||
| 1395 | memcpy(&entry->buf, trace_buf, len); | 1506 | memcpy(&entry->buf, trace_buf, len); |
| 1396 | entry->buf[len] = '\0'; | 1507 | entry->buf[len] = '\0'; |
| 1397 | if (!filter_check_discard(call, entry, buffer, event)) | 1508 | if (!filter_check_discard(call, entry, buffer, event)) { |
| 1398 | ring_buffer_unlock_commit(buffer, event); | 1509 | ring_buffer_unlock_commit(buffer, event); |
| 1510 | ftrace_trace_stack(buffer, irq_flags, 6, pc); | ||
| 1511 | } | ||
| 1399 | 1512 | ||
| 1400 | out_unlock: | 1513 | out_unlock: |
| 1401 | arch_spin_unlock(&trace_buf_lock); | 1514 | arch_spin_unlock(&trace_buf_lock); |
| @@ -1585,12 +1698,6 @@ static void tracing_iter_reset(struct trace_iterator *iter, int cpu) | |||
| 1585 | } | 1698 | } |
| 1586 | 1699 | ||
| 1587 | /* | 1700 | /* |
| 1588 | * No necessary locking here. The worst thing which can | ||
| 1589 | * happen is loosing events consumed at the same time | ||
| 1590 | * by a trace_pipe reader. | ||
| 1591 | * Other than that, we don't risk to crash the ring buffer | ||
| 1592 | * because it serializes the readers. | ||
| 1593 | * | ||
| 1594 | * The current tracer is copied to avoid a global locking | 1701 | * The current tracer is copied to avoid a global locking |
| 1595 | * all around. | 1702 | * all around. |
| 1596 | */ | 1703 | */ |
| @@ -1628,6 +1735,7 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
| 1628 | 1735 | ||
| 1629 | ftrace_enable_cpu(); | 1736 | ftrace_enable_cpu(); |
| 1630 | 1737 | ||
| 1738 | iter->leftover = 0; | ||
| 1631 | for (p = iter; p && l < *pos; p = s_next(m, p, &l)) | 1739 | for (p = iter; p && l < *pos; p = s_next(m, p, &l)) |
| 1632 | ; | 1740 | ; |
| 1633 | 1741 | ||
| @@ -1645,12 +1753,16 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
| 1645 | } | 1753 | } |
| 1646 | 1754 | ||
| 1647 | trace_event_read_lock(); | 1755 | trace_event_read_lock(); |
| 1756 | trace_access_lock(cpu_file); | ||
| 1648 | return p; | 1757 | return p; |
| 1649 | } | 1758 | } |
| 1650 | 1759 | ||
| 1651 | static void s_stop(struct seq_file *m, void *p) | 1760 | static void s_stop(struct seq_file *m, void *p) |
| 1652 | { | 1761 | { |
| 1762 | struct trace_iterator *iter = m->private; | ||
| 1763 | |||
| 1653 | atomic_dec(&trace_record_cmdline_disabled); | 1764 | atomic_dec(&trace_record_cmdline_disabled); |
| 1765 | trace_access_unlock(iter->cpu_file); | ||
| 1654 | trace_event_read_unlock(); | 1766 | trace_event_read_unlock(); |
| 1655 | } | 1767 | } |
| 1656 | 1768 | ||
| @@ -2841,22 +2953,6 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) | |||
| 2841 | 2953 | ||
| 2842 | mutex_lock(&trace_types_lock); | 2954 | mutex_lock(&trace_types_lock); |
| 2843 | 2955 | ||
| 2844 | /* We only allow one reader per cpu */ | ||
| 2845 | if (cpu_file == TRACE_PIPE_ALL_CPU) { | ||
| 2846 | if (!cpumask_empty(tracing_reader_cpumask)) { | ||
| 2847 | ret = -EBUSY; | ||
| 2848 | goto out; | ||
| 2849 | } | ||
| 2850 | cpumask_setall(tracing_reader_cpumask); | ||
| 2851 | } else { | ||
| 2852 | if (!cpumask_test_cpu(cpu_file, tracing_reader_cpumask)) | ||
| 2853 | cpumask_set_cpu(cpu_file, tracing_reader_cpumask); | ||
| 2854 | else { | ||
| 2855 | ret = -EBUSY; | ||
| 2856 | goto out; | ||
| 2857 | } | ||
| 2858 | } | ||
| 2859 | |||
| 2860 | /* create a buffer to store the information to pass to userspace */ | 2956 | /* create a buffer to store the information to pass to userspace */ |
| 2861 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 2957 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
| 2862 | if (!iter) { | 2958 | if (!iter) { |
| @@ -2912,12 +3008,6 @@ static int tracing_release_pipe(struct inode *inode, struct file *file) | |||
| 2912 | 3008 | ||
| 2913 | mutex_lock(&trace_types_lock); | 3009 | mutex_lock(&trace_types_lock); |
| 2914 | 3010 | ||
| 2915 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) | ||
| 2916 | cpumask_clear(tracing_reader_cpumask); | ||
| 2917 | else | ||
| 2918 | cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask); | ||
| 2919 | |||
| 2920 | |||
| 2921 | if (iter->trace->pipe_close) | 3011 | if (iter->trace->pipe_close) |
| 2922 | iter->trace->pipe_close(iter); | 3012 | iter->trace->pipe_close(iter); |
| 2923 | 3013 | ||
| @@ -3079,6 +3169,7 @@ waitagain: | |||
| 3079 | iter->pos = -1; | 3169 | iter->pos = -1; |
| 3080 | 3170 | ||
| 3081 | trace_event_read_lock(); | 3171 | trace_event_read_lock(); |
| 3172 | trace_access_lock(iter->cpu_file); | ||
| 3082 | while (find_next_entry_inc(iter) != NULL) { | 3173 | while (find_next_entry_inc(iter) != NULL) { |
| 3083 | enum print_line_t ret; | 3174 | enum print_line_t ret; |
| 3084 | int len = iter->seq.len; | 3175 | int len = iter->seq.len; |
| @@ -3095,6 +3186,7 @@ waitagain: | |||
| 3095 | if (iter->seq.len >= cnt) | 3186 | if (iter->seq.len >= cnt) |
| 3096 | break; | 3187 | break; |
| 3097 | } | 3188 | } |
| 3189 | trace_access_unlock(iter->cpu_file); | ||
| 3098 | trace_event_read_unlock(); | 3190 | trace_event_read_unlock(); |
| 3099 | 3191 | ||
| 3100 | /* Now copy what we have to the user */ | 3192 | /* Now copy what we have to the user */ |
| @@ -3220,6 +3312,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
| 3220 | } | 3312 | } |
| 3221 | 3313 | ||
| 3222 | trace_event_read_lock(); | 3314 | trace_event_read_lock(); |
| 3315 | trace_access_lock(iter->cpu_file); | ||
| 3223 | 3316 | ||
| 3224 | /* Fill as many pages as possible. */ | 3317 | /* Fill as many pages as possible. */ |
| 3225 | for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) { | 3318 | for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) { |
| @@ -3243,6 +3336,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
| 3243 | trace_seq_init(&iter->seq); | 3336 | trace_seq_init(&iter->seq); |
| 3244 | } | 3337 | } |
| 3245 | 3338 | ||
| 3339 | trace_access_unlock(iter->cpu_file); | ||
| 3246 | trace_event_read_unlock(); | 3340 | trace_event_read_unlock(); |
| 3247 | mutex_unlock(&iter->mutex); | 3341 | mutex_unlock(&iter->mutex); |
| 3248 | 3342 | ||
| @@ -3544,10 +3638,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, | |||
| 3544 | 3638 | ||
| 3545 | info->read = 0; | 3639 | info->read = 0; |
| 3546 | 3640 | ||
| 3641 | trace_access_lock(info->cpu); | ||
| 3547 | ret = ring_buffer_read_page(info->tr->buffer, | 3642 | ret = ring_buffer_read_page(info->tr->buffer, |
| 3548 | &info->spare, | 3643 | &info->spare, |
| 3549 | count, | 3644 | count, |
| 3550 | info->cpu, 0); | 3645 | info->cpu, 0); |
| 3646 | trace_access_unlock(info->cpu); | ||
| 3551 | if (ret < 0) | 3647 | if (ret < 0) |
| 3552 | return 0; | 3648 | return 0; |
| 3553 | 3649 | ||
| @@ -3675,6 +3771,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
| 3675 | len &= PAGE_MASK; | 3771 | len &= PAGE_MASK; |
| 3676 | } | 3772 | } |
| 3677 | 3773 | ||
| 3774 | trace_access_lock(info->cpu); | ||
| 3678 | entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); | 3775 | entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); |
| 3679 | 3776 | ||
| 3680 | for (i = 0; i < PIPE_BUFFERS && len && entries; i++, len -= PAGE_SIZE) { | 3777 | for (i = 0; i < PIPE_BUFFERS && len && entries; i++, len -= PAGE_SIZE) { |
| @@ -3722,6 +3819,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
| 3722 | entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); | 3819 | entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); |
| 3723 | } | 3820 | } |
| 3724 | 3821 | ||
| 3822 | trace_access_unlock(info->cpu); | ||
| 3725 | spd.nr_pages = i; | 3823 | spd.nr_pages = i; |
| 3726 | 3824 | ||
| 3727 | /* did we read anything? */ | 3825 | /* did we read anything? */ |
| @@ -4158,6 +4256,8 @@ static __init int tracer_init_debugfs(void) | |||
| 4158 | struct dentry *d_tracer; | 4256 | struct dentry *d_tracer; |
| 4159 | int cpu; | 4257 | int cpu; |
| 4160 | 4258 | ||
| 4259 | trace_access_lock_init(); | ||
| 4260 | |||
| 4161 | d_tracer = tracing_init_dentry(); | 4261 | d_tracer = tracing_init_dentry(); |
| 4162 | 4262 | ||
| 4163 | trace_create_file("tracing_enabled", 0644, d_tracer, | 4263 | trace_create_file("tracing_enabled", 0644, d_tracer, |
| @@ -4181,10 +4281,10 @@ static __init int tracer_init_debugfs(void) | |||
| 4181 | #ifdef CONFIG_TRACER_MAX_TRACE | 4281 | #ifdef CONFIG_TRACER_MAX_TRACE |
| 4182 | trace_create_file("tracing_max_latency", 0644, d_tracer, | 4282 | trace_create_file("tracing_max_latency", 0644, d_tracer, |
| 4183 | &tracing_max_latency, &tracing_max_lat_fops); | 4283 | &tracing_max_latency, &tracing_max_lat_fops); |
| 4284 | #endif | ||
| 4184 | 4285 | ||
| 4185 | trace_create_file("tracing_thresh", 0644, d_tracer, | 4286 | trace_create_file("tracing_thresh", 0644, d_tracer, |
| 4186 | &tracing_thresh, &tracing_max_lat_fops); | 4287 | &tracing_thresh, &tracing_max_lat_fops); |
| 4187 | #endif | ||
| 4188 | 4288 | ||
| 4189 | trace_create_file("README", 0444, d_tracer, | 4289 | trace_create_file("README", 0444, d_tracer, |
| 4190 | NULL, &tracing_readme_fops); | 4290 | NULL, &tracing_readme_fops); |
| @@ -4392,9 +4492,6 @@ __init static int tracer_alloc_buffers(void) | |||
| 4392 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) | 4492 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) |
| 4393 | goto out_free_buffer_mask; | 4493 | goto out_free_buffer_mask; |
| 4394 | 4494 | ||
| 4395 | if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) | ||
| 4396 | goto out_free_tracing_cpumask; | ||
| 4397 | |||
| 4398 | /* To save memory, keep the ring buffer size to its minimum */ | 4495 | /* To save memory, keep the ring buffer size to its minimum */ |
| 4399 | if (ring_buffer_expanded) | 4496 | if (ring_buffer_expanded) |
| 4400 | ring_buf_size = trace_buf_size; | 4497 | ring_buf_size = trace_buf_size; |
| @@ -4452,8 +4549,6 @@ __init static int tracer_alloc_buffers(void) | |||
| 4452 | return 0; | 4549 | return 0; |
| 4453 | 4550 | ||
| 4454 | out_free_cpumask: | 4551 | out_free_cpumask: |
| 4455 | free_cpumask_var(tracing_reader_cpumask); | ||
| 4456 | out_free_tracing_cpumask: | ||
| 4457 | free_cpumask_var(tracing_cpumask); | 4552 | free_cpumask_var(tracing_cpumask); |
| 4458 | out_free_buffer_mask: | 4553 | out_free_buffer_mask: |
| 4459 | free_cpumask_var(tracing_buffer_mask); | 4554 | free_cpumask_var(tracing_buffer_mask); |
