aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c463
1 files changed, 265 insertions, 198 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index b20d3ec75de9..ed01fdba4a55 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -12,7 +12,7 @@
12 * Copyright (C) 2004 William Lee Irwin III 12 * Copyright (C) 2004 William Lee Irwin III
13 */ 13 */
14#include <linux/ring_buffer.h> 14#include <linux/ring_buffer.h>
15#include <linux/utsrelease.h> 15#include <generated/utsrelease.h>
16#include <linux/stacktrace.h> 16#include <linux/stacktrace.h>
17#include <linux/writeback.h> 17#include <linux/writeback.h>
18#include <linux/kallsyms.h> 18#include <linux/kallsyms.h>
@@ -32,6 +32,7 @@
32#include <linux/splice.h> 32#include <linux/splice.h>
33#include <linux/kdebug.h> 33#include <linux/kdebug.h>
34#include <linux/string.h> 34#include <linux/string.h>
35#include <linux/rwsem.h>
35#include <linux/ctype.h> 36#include <linux/ctype.h>
36#include <linux/init.h> 37#include <linux/init.h>
37#include <linux/poll.h> 38#include <linux/poll.h>
@@ -86,25 +87,22 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
86 */ 87 */
87static int tracing_disabled = 1; 88static int tracing_disabled = 1;
88 89
89DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); 90DEFINE_PER_CPU(int, ftrace_cpu_disabled);
90 91
91static inline void ftrace_disable_cpu(void) 92static inline void ftrace_disable_cpu(void)
92{ 93{
93 preempt_disable(); 94 preempt_disable();
94 local_inc(&__get_cpu_var(ftrace_cpu_disabled)); 95 __this_cpu_inc(ftrace_cpu_disabled);
95} 96}
96 97
97static inline void ftrace_enable_cpu(void) 98static inline void ftrace_enable_cpu(void)
98{ 99{
99 local_dec(&__get_cpu_var(ftrace_cpu_disabled)); 100 __this_cpu_dec(ftrace_cpu_disabled);
100 preempt_enable(); 101 preempt_enable();
101} 102}
102 103
103static cpumask_var_t __read_mostly tracing_buffer_mask; 104static cpumask_var_t __read_mostly tracing_buffer_mask;
104 105
105/* Define which cpu buffers are currently read in trace_pipe */
106static cpumask_var_t tracing_reader_cpumask;
107
108#define for_each_tracing_cpu(cpu) \ 106#define for_each_tracing_cpu(cpu) \
109 for_each_cpu(cpu, tracing_buffer_mask) 107 for_each_cpu(cpu, tracing_buffer_mask)
110 108
@@ -129,7 +127,7 @@ static int tracing_set_tracer(const char *buf);
129static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; 127static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
130static char *default_bootup_tracer; 128static char *default_bootup_tracer;
131 129
132static int __init set_ftrace(char *str) 130static int __init set_cmdline_ftrace(char *str)
133{ 131{
134 strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); 132 strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
135 default_bootup_tracer = bootup_tracer_buf; 133 default_bootup_tracer = bootup_tracer_buf;
@@ -137,7 +135,7 @@ static int __init set_ftrace(char *str)
137 ring_buffer_expanded = 1; 135 ring_buffer_expanded = 1;
138 return 1; 136 return 1;
139} 137}
140__setup("ftrace=", set_ftrace); 138__setup("ftrace=", set_cmdline_ftrace);
141 139
142static int __init set_ftrace_dump_on_oops(char *str) 140static int __init set_ftrace_dump_on_oops(char *str)
143{ 141{
@@ -203,7 +201,7 @@ cycle_t ftrace_now(int cpu)
203 */ 201 */
204static struct trace_array max_tr; 202static struct trace_array max_tr;
205 203
206static DEFINE_PER_CPU(struct trace_array_cpu, max_data); 204static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
207 205
208/* tracer_enabled is used to toggle activation of a tracer */ 206/* tracer_enabled is used to toggle activation of a tracer */
209static int tracer_enabled = 1; 207static int tracer_enabled = 1;
@@ -243,12 +241,91 @@ static struct tracer *current_trace __read_mostly;
243 241
244/* 242/*
245 * trace_types_lock is used to protect the trace_types list. 243 * trace_types_lock is used to protect the trace_types list.
246 * This lock is also used to keep user access serialized.
247 * Accesses from userspace will grab this lock while userspace
248 * activities happen inside the kernel.
249 */ 244 */
250static DEFINE_MUTEX(trace_types_lock); 245static DEFINE_MUTEX(trace_types_lock);
251 246
247/*
248 * serialize the access of the ring buffer
249 *
250 * ring buffer serializes readers, but it is low level protection.
251 * The validity of the events (which returns by ring_buffer_peek() ..etc)
252 * are not protected by ring buffer.
253 *
254 * The content of events may become garbage if we allow other process consumes
255 * these events concurrently:
256 * A) the page of the consumed events may become a normal page
257 * (not reader page) in ring buffer, and this page will be rewrited
258 * by events producer.
259 * B) The page of the consumed events may become a page for splice_read,
260 * and this page will be returned to system.
261 *
262 * These primitives allow multi process access to different cpu ring buffer
263 * concurrently.
264 *
265 * These primitives don't distinguish read-only and read-consume access.
266 * Multi read-only access are also serialized.
267 */
268
269#ifdef CONFIG_SMP
270static DECLARE_RWSEM(all_cpu_access_lock);
271static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
272
273static inline void trace_access_lock(int cpu)
274{
275 if (cpu == TRACE_PIPE_ALL_CPU) {
276 /* gain it for accessing the whole ring buffer. */
277 down_write(&all_cpu_access_lock);
278 } else {
279 /* gain it for accessing a cpu ring buffer. */
280
281 /* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */
282 down_read(&all_cpu_access_lock);
283
284 /* Secondly block other access to this @cpu ring buffer. */
285 mutex_lock(&per_cpu(cpu_access_lock, cpu));
286 }
287}
288
289static inline void trace_access_unlock(int cpu)
290{
291 if (cpu == TRACE_PIPE_ALL_CPU) {
292 up_write(&all_cpu_access_lock);
293 } else {
294 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
295 up_read(&all_cpu_access_lock);
296 }
297}
298
299static inline void trace_access_lock_init(void)
300{
301 int cpu;
302
303 for_each_possible_cpu(cpu)
304 mutex_init(&per_cpu(cpu_access_lock, cpu));
305}
306
307#else
308
309static DEFINE_MUTEX(access_lock);
310
311static inline void trace_access_lock(int cpu)
312{
313 (void)cpu;
314 mutex_lock(&access_lock);
315}
316
317static inline void trace_access_unlock(int cpu)
318{
319 (void)cpu;
320 mutex_unlock(&access_lock);
321}
322
323static inline void trace_access_lock_init(void)
324{
325}
326
327#endif
328
252/* trace_wait is a waitqueue for tasks blocked on trace_poll */ 329/* trace_wait is a waitqueue for tasks blocked on trace_poll */
253static DECLARE_WAIT_QUEUE_HEAD(trace_wait); 330static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
254 331
@@ -313,7 +390,6 @@ static const char *trace_options[] = {
313 "bin", 390 "bin",
314 "block", 391 "block",
315 "stacktrace", 392 "stacktrace",
316 "sched-tree",
317 "trace_printk", 393 "trace_printk",
318 "ftrace_preempt", 394 "ftrace_preempt",
319 "branch", 395 "branch",
@@ -493,15 +569,15 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
493 * protected by per_cpu spinlocks. But the action of the swap 569 * protected by per_cpu spinlocks. But the action of the swap
494 * needs its own lock. 570 * needs its own lock.
495 * 571 *
496 * This is defined as a raw_spinlock_t in order to help 572 * This is defined as a arch_spinlock_t in order to help
497 * with performance when lockdep debugging is enabled. 573 * with performance when lockdep debugging is enabled.
498 * 574 *
499 * It is also used in other places outside the update_max_tr 575 * It is also used in other places outside the update_max_tr
500 * so it needs to be defined outside of the 576 * so it needs to be defined outside of the
501 * CONFIG_TRACER_MAX_TRACE. 577 * CONFIG_TRACER_MAX_TRACE.
502 */ 578 */
503static raw_spinlock_t ftrace_max_lock = 579static arch_spinlock_t ftrace_max_lock =
504 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 580 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
505 581
506#ifdef CONFIG_TRACER_MAX_TRACE 582#ifdef CONFIG_TRACER_MAX_TRACE
507unsigned long __read_mostly tracing_max_latency; 583unsigned long __read_mostly tracing_max_latency;
@@ -555,13 +631,13 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
555 return; 631 return;
556 632
557 WARN_ON_ONCE(!irqs_disabled()); 633 WARN_ON_ONCE(!irqs_disabled());
558 __raw_spin_lock(&ftrace_max_lock); 634 arch_spin_lock(&ftrace_max_lock);
559 635
560 tr->buffer = max_tr.buffer; 636 tr->buffer = max_tr.buffer;
561 max_tr.buffer = buf; 637 max_tr.buffer = buf;
562 638
563 __update_max_tr(tr, tsk, cpu); 639 __update_max_tr(tr, tsk, cpu);
564 __raw_spin_unlock(&ftrace_max_lock); 640 arch_spin_unlock(&ftrace_max_lock);
565} 641}
566 642
567/** 643/**
@@ -581,7 +657,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
581 return; 657 return;
582 658
583 WARN_ON_ONCE(!irqs_disabled()); 659 WARN_ON_ONCE(!irqs_disabled());
584 __raw_spin_lock(&ftrace_max_lock); 660 arch_spin_lock(&ftrace_max_lock);
585 661
586 ftrace_disable_cpu(); 662 ftrace_disable_cpu();
587 663
@@ -603,7 +679,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
603 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); 679 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
604 680
605 __update_max_tr(tr, tsk, cpu); 681 __update_max_tr(tr, tsk, cpu);
606 __raw_spin_unlock(&ftrace_max_lock); 682 arch_spin_unlock(&ftrace_max_lock);
607} 683}
608#endif /* CONFIG_TRACER_MAX_TRACE */ 684#endif /* CONFIG_TRACER_MAX_TRACE */
609 685
@@ -802,7 +878,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
802static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; 878static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
803static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; 879static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
804static int cmdline_idx; 880static int cmdline_idx;
805static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; 881static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
806 882
807/* temporary disable recording */ 883/* temporary disable recording */
808static atomic_t trace_record_cmdline_disabled __read_mostly; 884static atomic_t trace_record_cmdline_disabled __read_mostly;
@@ -915,7 +991,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
915 * nor do we want to disable interrupts, 991 * nor do we want to disable interrupts,
916 * so if we miss here, then better luck next time. 992 * so if we miss here, then better luck next time.
917 */ 993 */
918 if (!__raw_spin_trylock(&trace_cmdline_lock)) 994 if (!arch_spin_trylock(&trace_cmdline_lock))
919 return; 995 return;
920 996
921 idx = map_pid_to_cmdline[tsk->pid]; 997 idx = map_pid_to_cmdline[tsk->pid];
@@ -940,7 +1016,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
940 1016
941 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); 1017 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
942 1018
943 __raw_spin_unlock(&trace_cmdline_lock); 1019 arch_spin_unlock(&trace_cmdline_lock);
944} 1020}
945 1021
946void trace_find_cmdline(int pid, char comm[]) 1022void trace_find_cmdline(int pid, char comm[])
@@ -952,20 +1028,25 @@ void trace_find_cmdline(int pid, char comm[])
952 return; 1028 return;
953 } 1029 }
954 1030
1031 if (WARN_ON_ONCE(pid < 0)) {
1032 strcpy(comm, "<XXX>");
1033 return;
1034 }
1035
955 if (pid > PID_MAX_DEFAULT) { 1036 if (pid > PID_MAX_DEFAULT) {
956 strcpy(comm, "<...>"); 1037 strcpy(comm, "<...>");
957 return; 1038 return;
958 } 1039 }
959 1040
960 preempt_disable(); 1041 preempt_disable();
961 __raw_spin_lock(&trace_cmdline_lock); 1042 arch_spin_lock(&trace_cmdline_lock);
962 map = map_pid_to_cmdline[pid]; 1043 map = map_pid_to_cmdline[pid];
963 if (map != NO_CMDLINE_MAP) 1044 if (map != NO_CMDLINE_MAP)
964 strcpy(comm, saved_cmdlines[map]); 1045 strcpy(comm, saved_cmdlines[map]);
965 else 1046 else
966 strcpy(comm, "<...>"); 1047 strcpy(comm, "<...>");
967 1048
968 __raw_spin_unlock(&trace_cmdline_lock); 1049 arch_spin_unlock(&trace_cmdline_lock);
969 preempt_enable(); 1050 preempt_enable();
970} 1051}
971 1052
@@ -1085,7 +1166,7 @@ trace_function(struct trace_array *tr,
1085 struct ftrace_entry *entry; 1166 struct ftrace_entry *entry;
1086 1167
1087 /* If we are reading the ring buffer, don't trace */ 1168 /* If we are reading the ring buffer, don't trace */
1088 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 1169 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1089 return; 1170 return;
1090 1171
1091 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), 1172 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
@@ -1151,6 +1232,22 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1151 __ftrace_trace_stack(tr->buffer, flags, skip, pc); 1232 __ftrace_trace_stack(tr->buffer, flags, skip, pc);
1152} 1233}
1153 1234
1235/**
1236 * trace_dump_stack - record a stack back trace in the trace buffer
1237 */
1238void trace_dump_stack(void)
1239{
1240 unsigned long flags;
1241
1242 if (tracing_disabled || tracing_selftest_running)
1243 return;
1244
1245 local_save_flags(flags);
1246
1247 /* skipping 3 traces, seems to get us at the caller of this function */
1248 __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count());
1249}
1250
1154void 1251void
1155ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) 1252ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1156{ 1253{
@@ -1251,8 +1348,8 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1251 */ 1348 */
1252int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) 1349int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1253{ 1350{
1254 static raw_spinlock_t trace_buf_lock = 1351 static arch_spinlock_t trace_buf_lock =
1255 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 1352 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1256 static u32 trace_buf[TRACE_BUF_SIZE]; 1353 static u32 trace_buf[TRACE_BUF_SIZE];
1257 1354
1258 struct ftrace_event_call *call = &event_bprint; 1355 struct ftrace_event_call *call = &event_bprint;
@@ -1283,7 +1380,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1283 1380
1284 /* Lockdep uses trace_printk for lock tracing */ 1381 /* Lockdep uses trace_printk for lock tracing */
1285 local_irq_save(flags); 1382 local_irq_save(flags);
1286 __raw_spin_lock(&trace_buf_lock); 1383 arch_spin_lock(&trace_buf_lock);
1287 len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); 1384 len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1288 1385
1289 if (len > TRACE_BUF_SIZE || len < 0) 1386 if (len > TRACE_BUF_SIZE || len < 0)
@@ -1300,11 +1397,13 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1300 entry->fmt = fmt; 1397 entry->fmt = fmt;
1301 1398
1302 memcpy(entry->buf, trace_buf, sizeof(u32) * len); 1399 memcpy(entry->buf, trace_buf, sizeof(u32) * len);
1303 if (!filter_check_discard(call, entry, buffer, event)) 1400 if (!filter_check_discard(call, entry, buffer, event)) {
1304 ring_buffer_unlock_commit(buffer, event); 1401 ring_buffer_unlock_commit(buffer, event);
1402 ftrace_trace_stack(buffer, flags, 6, pc);
1403 }
1305 1404
1306out_unlock: 1405out_unlock:
1307 __raw_spin_unlock(&trace_buf_lock); 1406 arch_spin_unlock(&trace_buf_lock);
1308 local_irq_restore(flags); 1407 local_irq_restore(flags);
1309 1408
1310out: 1409out:
@@ -1334,7 +1433,7 @@ int trace_array_printk(struct trace_array *tr,
1334int trace_array_vprintk(struct trace_array *tr, 1433int trace_array_vprintk(struct trace_array *tr,
1335 unsigned long ip, const char *fmt, va_list args) 1434 unsigned long ip, const char *fmt, va_list args)
1336{ 1435{
1337 static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; 1436 static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1338 static char trace_buf[TRACE_BUF_SIZE]; 1437 static char trace_buf[TRACE_BUF_SIZE];
1339 1438
1340 struct ftrace_event_call *call = &event_print; 1439 struct ftrace_event_call *call = &event_print;
@@ -1360,12 +1459,9 @@ int trace_array_vprintk(struct trace_array *tr,
1360 1459
1361 pause_graph_tracing(); 1460 pause_graph_tracing();
1362 raw_local_irq_save(irq_flags); 1461 raw_local_irq_save(irq_flags);
1363 __raw_spin_lock(&trace_buf_lock); 1462 arch_spin_lock(&trace_buf_lock);
1364 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); 1463 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1365 1464
1366 len = min(len, TRACE_BUF_SIZE-1);
1367 trace_buf[len] = 0;
1368
1369 size = sizeof(*entry) + len + 1; 1465 size = sizeof(*entry) + len + 1;
1370 buffer = tr->buffer; 1466 buffer = tr->buffer;
1371 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 1467 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
@@ -1373,15 +1469,17 @@ int trace_array_vprintk(struct trace_array *tr,
1373 if (!event) 1469 if (!event)
1374 goto out_unlock; 1470 goto out_unlock;
1375 entry = ring_buffer_event_data(event); 1471 entry = ring_buffer_event_data(event);
1376 entry->ip = ip; 1472 entry->ip = ip;
1377 1473
1378 memcpy(&entry->buf, trace_buf, len); 1474 memcpy(&entry->buf, trace_buf, len);
1379 entry->buf[len] = 0; 1475 entry->buf[len] = '\0';
1380 if (!filter_check_discard(call, entry, buffer, event)) 1476 if (!filter_check_discard(call, entry, buffer, event)) {
1381 ring_buffer_unlock_commit(buffer, event); 1477 ring_buffer_unlock_commit(buffer, event);
1478 ftrace_trace_stack(buffer, irq_flags, 6, pc);
1479 }
1382 1480
1383 out_unlock: 1481 out_unlock:
1384 __raw_spin_unlock(&trace_buf_lock); 1482 arch_spin_unlock(&trace_buf_lock);
1385 raw_local_irq_restore(irq_flags); 1483 raw_local_irq_restore(irq_flags);
1386 unpause_graph_tracing(); 1484 unpause_graph_tracing();
1387 out: 1485 out:
@@ -1515,6 +1613,8 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1515 int i = (int)*pos; 1613 int i = (int)*pos;
1516 void *ent; 1614 void *ent;
1517 1615
1616 WARN_ON_ONCE(iter->leftover);
1617
1518 (*pos)++; 1618 (*pos)++;
1519 1619
1520 /* can't go backwards */ 1620 /* can't go backwards */
@@ -1566,12 +1666,6 @@ static void tracing_iter_reset(struct trace_iterator *iter, int cpu)
1566} 1666}
1567 1667
1568/* 1668/*
1569 * No necessary locking here. The worst thing which can
1570 * happen is loosing events consumed at the same time
1571 * by a trace_pipe reader.
1572 * Other than that, we don't risk to crash the ring buffer
1573 * because it serializes the readers.
1574 *
1575 * The current tracer is copied to avoid a global locking 1669 * The current tracer is copied to avoid a global locking
1576 * all around. 1670 * all around.
1577 */ 1671 */
@@ -1613,17 +1707,29 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1613 ; 1707 ;
1614 1708
1615 } else { 1709 } else {
1616 l = *pos - 1; 1710 /*
1617 p = s_next(m, p, &l); 1711 * If we overflowed the seq_file before, then we want
1712 * to just reuse the trace_seq buffer again.
1713 */
1714 if (iter->leftover)
1715 p = iter;
1716 else {
1717 l = *pos - 1;
1718 p = s_next(m, p, &l);
1719 }
1618 } 1720 }
1619 1721
1620 trace_event_read_lock(); 1722 trace_event_read_lock();
1723 trace_access_lock(cpu_file);
1621 return p; 1724 return p;
1622} 1725}
1623 1726
1624static void s_stop(struct seq_file *m, void *p) 1727static void s_stop(struct seq_file *m, void *p)
1625{ 1728{
1729 struct trace_iterator *iter = m->private;
1730
1626 atomic_dec(&trace_record_cmdline_disabled); 1731 atomic_dec(&trace_record_cmdline_disabled);
1732 trace_access_unlock(iter->cpu_file);
1627 trace_event_read_unlock(); 1733 trace_event_read_unlock();
1628} 1734}
1629 1735
@@ -1922,6 +2028,7 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
1922static int s_show(struct seq_file *m, void *v) 2028static int s_show(struct seq_file *m, void *v)
1923{ 2029{
1924 struct trace_iterator *iter = v; 2030 struct trace_iterator *iter = v;
2031 int ret;
1925 2032
1926 if (iter->ent == NULL) { 2033 if (iter->ent == NULL) {
1927 if (iter->tr) { 2034 if (iter->tr) {
@@ -1941,9 +2048,27 @@ static int s_show(struct seq_file *m, void *v)
1941 if (!(trace_flags & TRACE_ITER_VERBOSE)) 2048 if (!(trace_flags & TRACE_ITER_VERBOSE))
1942 print_func_help_header(m); 2049 print_func_help_header(m);
1943 } 2050 }
2051 } else if (iter->leftover) {
2052 /*
2053 * If we filled the seq_file buffer earlier, we
2054 * want to just show it now.
2055 */
2056 ret = trace_print_seq(m, &iter->seq);
2057
2058 /* ret should this time be zero, but you never know */
2059 iter->leftover = ret;
2060
1944 } else { 2061 } else {
1945 print_trace_line(iter); 2062 print_trace_line(iter);
1946 trace_print_seq(m, &iter->seq); 2063 ret = trace_print_seq(m, &iter->seq);
2064 /*
2065 * If we overflow the seq_file buffer, then it will
2066 * ask us for this data again at start up.
2067 * Use that instead.
2068 * ret is 0 if seq_file write succeeded.
2069 * -1 otherwise.
2070 */
2071 iter->leftover = ret;
1947 } 2072 }
1948 2073
1949 return 0; 2074 return 0;
@@ -2253,7 +2378,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2253 mutex_lock(&tracing_cpumask_update_lock); 2378 mutex_lock(&tracing_cpumask_update_lock);
2254 2379
2255 local_irq_disable(); 2380 local_irq_disable();
2256 __raw_spin_lock(&ftrace_max_lock); 2381 arch_spin_lock(&ftrace_max_lock);
2257 for_each_tracing_cpu(cpu) { 2382 for_each_tracing_cpu(cpu) {
2258 /* 2383 /*
2259 * Increase/decrease the disabled counter if we are 2384 * Increase/decrease the disabled counter if we are
@@ -2268,7 +2393,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2268 atomic_dec(&global_trace.data[cpu]->disabled); 2393 atomic_dec(&global_trace.data[cpu]->disabled);
2269 } 2394 }
2270 } 2395 }
2271 __raw_spin_unlock(&ftrace_max_lock); 2396 arch_spin_unlock(&ftrace_max_lock);
2272 local_irq_enable(); 2397 local_irq_enable();
2273 2398
2274 cpumask_copy(tracing_cpumask, tracing_cpumask_new); 2399 cpumask_copy(tracing_cpumask, tracing_cpumask_new);
@@ -2290,67 +2415,49 @@ static const struct file_operations tracing_cpumask_fops = {
2290 .write = tracing_cpumask_write, 2415 .write = tracing_cpumask_write,
2291}; 2416};
2292 2417
2293static ssize_t 2418static int tracing_trace_options_show(struct seq_file *m, void *v)
2294tracing_trace_options_read(struct file *filp, char __user *ubuf,
2295 size_t cnt, loff_t *ppos)
2296{ 2419{
2297 struct tracer_opt *trace_opts; 2420 struct tracer_opt *trace_opts;
2298 u32 tracer_flags; 2421 u32 tracer_flags;
2299 int len = 0;
2300 char *buf;
2301 int r = 0;
2302 int i; 2422 int i;
2303 2423
2304
2305 /* calculate max size */
2306 for (i = 0; trace_options[i]; i++) {
2307 len += strlen(trace_options[i]);
2308 len += 3; /* "no" and newline */
2309 }
2310
2311 mutex_lock(&trace_types_lock); 2424 mutex_lock(&trace_types_lock);
2312 tracer_flags = current_trace->flags->val; 2425 tracer_flags = current_trace->flags->val;
2313 trace_opts = current_trace->flags->opts; 2426 trace_opts = current_trace->flags->opts;
2314 2427
2315 /*
2316 * Increase the size with names of options specific
2317 * of the current tracer.
2318 */
2319 for (i = 0; trace_opts[i].name; i++) {
2320 len += strlen(trace_opts[i].name);
2321 len += 3; /* "no" and newline */
2322 }
2323
2324 /* +1 for \0 */
2325 buf = kmalloc(len + 1, GFP_KERNEL);
2326 if (!buf) {
2327 mutex_unlock(&trace_types_lock);
2328 return -ENOMEM;
2329 }
2330
2331 for (i = 0; trace_options[i]; i++) { 2428 for (i = 0; trace_options[i]; i++) {
2332 if (trace_flags & (1 << i)) 2429 if (trace_flags & (1 << i))
2333 r += sprintf(buf + r, "%s\n", trace_options[i]); 2430 seq_printf(m, "%s\n", trace_options[i]);
2334 else 2431 else
2335 r += sprintf(buf + r, "no%s\n", trace_options[i]); 2432 seq_printf(m, "no%s\n", trace_options[i]);
2336 } 2433 }
2337 2434
2338 for (i = 0; trace_opts[i].name; i++) { 2435 for (i = 0; trace_opts[i].name; i++) {
2339 if (tracer_flags & trace_opts[i].bit) 2436 if (tracer_flags & trace_opts[i].bit)
2340 r += sprintf(buf + r, "%s\n", 2437 seq_printf(m, "%s\n", trace_opts[i].name);
2341 trace_opts[i].name);
2342 else 2438 else
2343 r += sprintf(buf + r, "no%s\n", 2439 seq_printf(m, "no%s\n", trace_opts[i].name);
2344 trace_opts[i].name);
2345 } 2440 }
2346 mutex_unlock(&trace_types_lock); 2441 mutex_unlock(&trace_types_lock);
2347 2442
2348 WARN_ON(r >= len + 1); 2443 return 0;
2444}
2445
2446static int __set_tracer_option(struct tracer *trace,
2447 struct tracer_flags *tracer_flags,
2448 struct tracer_opt *opts, int neg)
2449{
2450 int ret;
2349 2451
2350 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2452 ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
2453 if (ret)
2454 return ret;
2351 2455
2352 kfree(buf); 2456 if (neg)
2353 return r; 2457 tracer_flags->val &= ~opts->bit;
2458 else
2459 tracer_flags->val |= opts->bit;
2460 return 0;
2354} 2461}
2355 2462
2356/* Try to assign a tracer specific option */ 2463/* Try to assign a tracer specific option */
@@ -2358,33 +2465,17 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
2358{ 2465{
2359 struct tracer_flags *tracer_flags = trace->flags; 2466 struct tracer_flags *tracer_flags = trace->flags;
2360 struct tracer_opt *opts = NULL; 2467 struct tracer_opt *opts = NULL;
2361 int ret = 0, i = 0; 2468 int i;
2362 int len;
2363 2469
2364 for (i = 0; tracer_flags->opts[i].name; i++) { 2470 for (i = 0; tracer_flags->opts[i].name; i++) {
2365 opts = &tracer_flags->opts[i]; 2471 opts = &tracer_flags->opts[i];
2366 len = strlen(opts->name);
2367 2472
2368 if (strncmp(cmp, opts->name, len) == 0) { 2473 if (strcmp(cmp, opts->name) == 0)
2369 ret = trace->set_flag(tracer_flags->val, 2474 return __set_tracer_option(trace, trace->flags,
2370 opts->bit, !neg); 2475 opts, neg);
2371 break;
2372 }
2373 } 2476 }
2374 /* Not found */
2375 if (!tracer_flags->opts[i].name)
2376 return -EINVAL;
2377 2477
2378 /* Refused to handle */ 2478 return -EINVAL;
2379 if (ret)
2380 return ret;
2381
2382 if (neg)
2383 tracer_flags->val &= ~opts->bit;
2384 else
2385 tracer_flags->val |= opts->bit;
2386
2387 return 0;
2388} 2479}
2389 2480
2390static void set_tracer_flags(unsigned int mask, int enabled) 2481static void set_tracer_flags(unsigned int mask, int enabled)
@@ -2404,7 +2495,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2404 size_t cnt, loff_t *ppos) 2495 size_t cnt, loff_t *ppos)
2405{ 2496{
2406 char buf[64]; 2497 char buf[64];
2407 char *cmp = buf; 2498 char *cmp;
2408 int neg = 0; 2499 int neg = 0;
2409 int ret; 2500 int ret;
2410 int i; 2501 int i;
@@ -2416,16 +2507,15 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2416 return -EFAULT; 2507 return -EFAULT;
2417 2508
2418 buf[cnt] = 0; 2509 buf[cnt] = 0;
2510 cmp = strstrip(buf);
2419 2511
2420 if (strncmp(buf, "no", 2) == 0) { 2512 if (strncmp(cmp, "no", 2) == 0) {
2421 neg = 1; 2513 neg = 1;
2422 cmp += 2; 2514 cmp += 2;
2423 } 2515 }
2424 2516
2425 for (i = 0; trace_options[i]; i++) { 2517 for (i = 0; trace_options[i]; i++) {
2426 int len = strlen(trace_options[i]); 2518 if (strcmp(cmp, trace_options[i]) == 0) {
2427
2428 if (strncmp(cmp, trace_options[i], len) == 0) {
2429 set_tracer_flags(1 << i, !neg); 2519 set_tracer_flags(1 << i, !neg);
2430 break; 2520 break;
2431 } 2521 }
@@ -2445,9 +2535,18 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2445 return cnt; 2535 return cnt;
2446} 2536}
2447 2537
2538static int tracing_trace_options_open(struct inode *inode, struct file *file)
2539{
2540 if (tracing_disabled)
2541 return -ENODEV;
2542 return single_open(file, tracing_trace_options_show, NULL);
2543}
2544
2448static const struct file_operations tracing_iter_fops = { 2545static const struct file_operations tracing_iter_fops = {
2449 .open = tracing_open_generic, 2546 .open = tracing_trace_options_open,
2450 .read = tracing_trace_options_read, 2547 .read = seq_read,
2548 .llseek = seq_lseek,
2549 .release = single_release,
2451 .write = tracing_trace_options_write, 2550 .write = tracing_trace_options_write,
2452}; 2551};
2453 2552
@@ -2821,22 +2920,6 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
2821 2920
2822 mutex_lock(&trace_types_lock); 2921 mutex_lock(&trace_types_lock);
2823 2922
2824 /* We only allow one reader per cpu */
2825 if (cpu_file == TRACE_PIPE_ALL_CPU) {
2826 if (!cpumask_empty(tracing_reader_cpumask)) {
2827 ret = -EBUSY;
2828 goto out;
2829 }
2830 cpumask_setall(tracing_reader_cpumask);
2831 } else {
2832 if (!cpumask_test_cpu(cpu_file, tracing_reader_cpumask))
2833 cpumask_set_cpu(cpu_file, tracing_reader_cpumask);
2834 else {
2835 ret = -EBUSY;
2836 goto out;
2837 }
2838 }
2839
2840 /* create a buffer to store the information to pass to userspace */ 2923 /* create a buffer to store the information to pass to userspace */
2841 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 2924 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2842 if (!iter) { 2925 if (!iter) {
@@ -2892,10 +2975,8 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
2892 2975
2893 mutex_lock(&trace_types_lock); 2976 mutex_lock(&trace_types_lock);
2894 2977
2895 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) 2978 if (iter->trace->pipe_close)
2896 cpumask_clear(tracing_reader_cpumask); 2979 iter->trace->pipe_close(iter);
2897 else
2898 cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask);
2899 2980
2900 mutex_unlock(&trace_types_lock); 2981 mutex_unlock(&trace_types_lock);
2901 2982
@@ -3055,6 +3136,7 @@ waitagain:
3055 iter->pos = -1; 3136 iter->pos = -1;
3056 3137
3057 trace_event_read_lock(); 3138 trace_event_read_lock();
3139 trace_access_lock(iter->cpu_file);
3058 while (find_next_entry_inc(iter) != NULL) { 3140 while (find_next_entry_inc(iter) != NULL) {
3059 enum print_line_t ret; 3141 enum print_line_t ret;
3060 int len = iter->seq.len; 3142 int len = iter->seq.len;
@@ -3071,6 +3153,7 @@ waitagain:
3071 if (iter->seq.len >= cnt) 3153 if (iter->seq.len >= cnt)
3072 break; 3154 break;
3073 } 3155 }
3156 trace_access_unlock(iter->cpu_file);
3074 trace_event_read_unlock(); 3157 trace_event_read_unlock();
3075 3158
3076 /* Now copy what we have to the user */ 3159 /* Now copy what we have to the user */
@@ -3103,7 +3186,7 @@ static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
3103 __free_page(spd->pages[idx]); 3186 __free_page(spd->pages[idx]);
3104} 3187}
3105 3188
3106static struct pipe_buf_operations tracing_pipe_buf_ops = { 3189static const struct pipe_buf_operations tracing_pipe_buf_ops = {
3107 .can_merge = 0, 3190 .can_merge = 0,
3108 .map = generic_pipe_buf_map, 3191 .map = generic_pipe_buf_map,
3109 .unmap = generic_pipe_buf_unmap, 3192 .unmap = generic_pipe_buf_unmap,
@@ -3196,6 +3279,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
3196 } 3279 }
3197 3280
3198 trace_event_read_lock(); 3281 trace_event_read_lock();
3282 trace_access_lock(iter->cpu_file);
3199 3283
3200 /* Fill as many pages as possible. */ 3284 /* Fill as many pages as possible. */
3201 for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) { 3285 for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) {
@@ -3219,6 +3303,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
3219 trace_seq_init(&iter->seq); 3303 trace_seq_init(&iter->seq);
3220 } 3304 }
3221 3305
3306 trace_access_unlock(iter->cpu_file);
3222 trace_event_read_unlock(); 3307 trace_event_read_unlock();
3223 mutex_unlock(&iter->mutex); 3308 mutex_unlock(&iter->mutex);
3224 3309
@@ -3334,7 +3419,6 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3334 size_t cnt, loff_t *fpos) 3419 size_t cnt, loff_t *fpos)
3335{ 3420{
3336 char *buf; 3421 char *buf;
3337 char *end;
3338 3422
3339 if (tracing_disabled) 3423 if (tracing_disabled)
3340 return -EINVAL; 3424 return -EINVAL;
@@ -3342,7 +3426,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3342 if (cnt > TRACE_BUF_SIZE) 3426 if (cnt > TRACE_BUF_SIZE)
3343 cnt = TRACE_BUF_SIZE; 3427 cnt = TRACE_BUF_SIZE;
3344 3428
3345 buf = kmalloc(cnt + 1, GFP_KERNEL); 3429 buf = kmalloc(cnt + 2, GFP_KERNEL);
3346 if (buf == NULL) 3430 if (buf == NULL)
3347 return -ENOMEM; 3431 return -ENOMEM;
3348 3432
@@ -3350,35 +3434,31 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3350 kfree(buf); 3434 kfree(buf);
3351 return -EFAULT; 3435 return -EFAULT;
3352 } 3436 }
3437 if (buf[cnt-1] != '\n') {
3438 buf[cnt] = '\n';
3439 buf[cnt+1] = '\0';
3440 } else
3441 buf[cnt] = '\0';
3353 3442
3354 /* Cut from the first nil or newline. */ 3443 cnt = mark_printk("%s", buf);
3355 buf[cnt] = '\0';
3356 end = strchr(buf, '\n');
3357 if (end)
3358 *end = '\0';
3359
3360 cnt = mark_printk("%s\n", buf);
3361 kfree(buf); 3444 kfree(buf);
3362 *fpos += cnt; 3445 *fpos += cnt;
3363 3446
3364 return cnt; 3447 return cnt;
3365} 3448}
3366 3449
3367static ssize_t tracing_clock_read(struct file *filp, char __user *ubuf, 3450static int tracing_clock_show(struct seq_file *m, void *v)
3368 size_t cnt, loff_t *ppos)
3369{ 3451{
3370 char buf[64];
3371 int bufiter = 0;
3372 int i; 3452 int i;
3373 3453
3374 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) 3454 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
3375 bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, 3455 seq_printf(m,
3376 "%s%s%s%s", i ? " " : "", 3456 "%s%s%s%s", i ? " " : "",
3377 i == trace_clock_id ? "[" : "", trace_clocks[i].name, 3457 i == trace_clock_id ? "[" : "", trace_clocks[i].name,
3378 i == trace_clock_id ? "]" : ""); 3458 i == trace_clock_id ? "]" : "");
3379 bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, "\n"); 3459 seq_putc(m, '\n');
3380 3460
3381 return simple_read_from_buffer(ubuf, cnt, ppos, buf, bufiter); 3461 return 0;
3382} 3462}
3383 3463
3384static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, 3464static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
@@ -3420,6 +3500,13 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
3420 return cnt; 3500 return cnt;
3421} 3501}
3422 3502
3503static int tracing_clock_open(struct inode *inode, struct file *file)
3504{
3505 if (tracing_disabled)
3506 return -ENODEV;
3507 return single_open(file, tracing_clock_show, NULL);
3508}
3509
3423static const struct file_operations tracing_max_lat_fops = { 3510static const struct file_operations tracing_max_lat_fops = {
3424 .open = tracing_open_generic, 3511 .open = tracing_open_generic,
3425 .read = tracing_max_lat_read, 3512 .read = tracing_max_lat_read,
@@ -3458,8 +3545,10 @@ static const struct file_operations tracing_mark_fops = {
3458}; 3545};
3459 3546
3460static const struct file_operations trace_clock_fops = { 3547static const struct file_operations trace_clock_fops = {
3461 .open = tracing_open_generic, 3548 .open = tracing_clock_open,
3462 .read = tracing_clock_read, 3549 .read = seq_read,
3550 .llseek = seq_lseek,
3551 .release = single_release,
3463 .write = tracing_clock_write, 3552 .write = tracing_clock_write,
3464}; 3553};
3465 3554
@@ -3516,10 +3605,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
3516 3605
3517 info->read = 0; 3606 info->read = 0;
3518 3607
3608 trace_access_lock(info->cpu);
3519 ret = ring_buffer_read_page(info->tr->buffer, 3609 ret = ring_buffer_read_page(info->tr->buffer,
3520 &info->spare, 3610 &info->spare,
3521 count, 3611 count,
3522 info->cpu, 0); 3612 info->cpu, 0);
3613 trace_access_unlock(info->cpu);
3523 if (ret < 0) 3614 if (ret < 0)
3524 return 0; 3615 return 0;
3525 3616
@@ -3589,7 +3680,7 @@ static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
3589} 3680}
3590 3681
3591/* Pipe buffer operations for a buffer. */ 3682/* Pipe buffer operations for a buffer. */
3592static struct pipe_buf_operations buffer_pipe_buf_ops = { 3683static const struct pipe_buf_operations buffer_pipe_buf_ops = {
3593 .can_merge = 0, 3684 .can_merge = 0,
3594 .map = generic_pipe_buf_map, 3685 .map = generic_pipe_buf_map,
3595 .unmap = generic_pipe_buf_unmap, 3686 .unmap = generic_pipe_buf_unmap,
@@ -3647,6 +3738,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
3647 len &= PAGE_MASK; 3738 len &= PAGE_MASK;
3648 } 3739 }
3649 3740
3741 trace_access_lock(info->cpu);
3650 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); 3742 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
3651 3743
3652 for (i = 0; i < PIPE_BUFFERS && len && entries; i++, len -= PAGE_SIZE) { 3744 for (i = 0; i < PIPE_BUFFERS && len && entries; i++, len -= PAGE_SIZE) {
@@ -3694,6 +3786,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
3694 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); 3786 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
3695 } 3787 }
3696 3788
3789 trace_access_unlock(info->cpu);
3697 spd.nr_pages = i; 3790 spd.nr_pages = i;
3698 3791
3699 /* did we read anything? */ 3792 /* did we read anything? */
@@ -3730,7 +3823,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
3730 3823
3731 s = kmalloc(sizeof(*s), GFP_KERNEL); 3824 s = kmalloc(sizeof(*s), GFP_KERNEL);
3732 if (!s) 3825 if (!s)
3733 return ENOMEM; 3826 return -ENOMEM;
3734 3827
3735 trace_seq_init(s); 3828 trace_seq_init(s);
3736 3829
@@ -3920,39 +4013,16 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
3920 if (ret < 0) 4013 if (ret < 0)
3921 return ret; 4014 return ret;
3922 4015
3923 ret = 0; 4016 if (val != 0 && val != 1)
3924 switch (val) { 4017 return -EINVAL;
3925 case 0:
3926 /* do nothing if already cleared */
3927 if (!(topt->flags->val & topt->opt->bit))
3928 break;
3929
3930 mutex_lock(&trace_types_lock);
3931 if (current_trace->set_flag)
3932 ret = current_trace->set_flag(topt->flags->val,
3933 topt->opt->bit, 0);
3934 mutex_unlock(&trace_types_lock);
3935 if (ret)
3936 return ret;
3937 topt->flags->val &= ~topt->opt->bit;
3938 break;
3939 case 1:
3940 /* do nothing if already set */
3941 if (topt->flags->val & topt->opt->bit)
3942 break;
3943 4018
4019 if (!!(topt->flags->val & topt->opt->bit) != val) {
3944 mutex_lock(&trace_types_lock); 4020 mutex_lock(&trace_types_lock);
3945 if (current_trace->set_flag) 4021 ret = __set_tracer_option(current_trace, topt->flags,
3946 ret = current_trace->set_flag(topt->flags->val, 4022 topt->opt, !val);
3947 topt->opt->bit, 1);
3948 mutex_unlock(&trace_types_lock); 4023 mutex_unlock(&trace_types_lock);
3949 if (ret) 4024 if (ret)
3950 return ret; 4025 return ret;
3951 topt->flags->val |= topt->opt->bit;
3952 break;
3953
3954 default:
3955 return -EINVAL;
3956 } 4026 }
3957 4027
3958 *ppos += cnt; 4028 *ppos += cnt;
@@ -4153,6 +4223,8 @@ static __init int tracer_init_debugfs(void)
4153 struct dentry *d_tracer; 4223 struct dentry *d_tracer;
4154 int cpu; 4224 int cpu;
4155 4225
4226 trace_access_lock_init();
4227
4156 d_tracer = tracing_init_dentry(); 4228 d_tracer = tracing_init_dentry();
4157 4229
4158 trace_create_file("tracing_enabled", 0644, d_tracer, 4230 trace_create_file("tracing_enabled", 0644, d_tracer,
@@ -4279,8 +4351,8 @@ trace_printk_seq(struct trace_seq *s)
4279 4351
4280static void __ftrace_dump(bool disable_tracing) 4352static void __ftrace_dump(bool disable_tracing)
4281{ 4353{
4282 static raw_spinlock_t ftrace_dump_lock = 4354 static arch_spinlock_t ftrace_dump_lock =
4283 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 4355 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
4284 /* use static because iter can be a bit big for the stack */ 4356 /* use static because iter can be a bit big for the stack */
4285 static struct trace_iterator iter; 4357 static struct trace_iterator iter;
4286 unsigned int old_userobj; 4358 unsigned int old_userobj;
@@ -4290,7 +4362,7 @@ static void __ftrace_dump(bool disable_tracing)
4290 4362
4291 /* only one dump */ 4363 /* only one dump */
4292 local_irq_save(flags); 4364 local_irq_save(flags);
4293 __raw_spin_lock(&ftrace_dump_lock); 4365 arch_spin_lock(&ftrace_dump_lock);
4294 if (dump_ran) 4366 if (dump_ran)
4295 goto out; 4367 goto out;
4296 4368
@@ -4365,7 +4437,7 @@ static void __ftrace_dump(bool disable_tracing)
4365 } 4437 }
4366 4438
4367 out: 4439 out:
4368 __raw_spin_unlock(&ftrace_dump_lock); 4440 arch_spin_unlock(&ftrace_dump_lock);
4369 local_irq_restore(flags); 4441 local_irq_restore(flags);
4370} 4442}
4371 4443
@@ -4387,9 +4459,6 @@ __init static int tracer_alloc_buffers(void)
4387 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) 4459 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
4388 goto out_free_buffer_mask; 4460 goto out_free_buffer_mask;
4389 4461
4390 if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
4391 goto out_free_tracing_cpumask;
4392
4393 /* To save memory, keep the ring buffer size to its minimum */ 4462 /* To save memory, keep the ring buffer size to its minimum */
4394 if (ring_buffer_expanded) 4463 if (ring_buffer_expanded)
4395 ring_buf_size = trace_buf_size; 4464 ring_buf_size = trace_buf_size;
@@ -4426,7 +4495,7 @@ __init static int tracer_alloc_buffers(void)
4426 /* Allocate the first page for all buffers */ 4495 /* Allocate the first page for all buffers */
4427 for_each_tracing_cpu(i) { 4496 for_each_tracing_cpu(i) {
4428 global_trace.data[i] = &per_cpu(global_trace_cpu, i); 4497 global_trace.data[i] = &per_cpu(global_trace_cpu, i);
4429 max_tr.data[i] = &per_cpu(max_data, i); 4498 max_tr.data[i] = &per_cpu(max_tr_data, i);
4430 } 4499 }
4431 4500
4432 trace_init_cmdlines(); 4501 trace_init_cmdlines();
@@ -4447,8 +4516,6 @@ __init static int tracer_alloc_buffers(void)
4447 return 0; 4516 return 0;
4448 4517
4449out_free_cpumask: 4518out_free_cpumask:
4450 free_cpumask_var(tracing_reader_cpumask);
4451out_free_tracing_cpumask:
4452 free_cpumask_var(tracing_cpumask); 4519 free_cpumask_var(tracing_cpumask);
4453out_free_buffer_mask: 4520out_free_buffer_mask:
4454 free_cpumask_var(tracing_buffer_mask); 4521 free_cpumask_var(tracing_buffer_mask);