aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c307
1 files changed, 159 insertions, 148 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 874f2893cff0..8b9f20ab8eed 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -12,7 +12,7 @@
12 * Copyright (C) 2004 William Lee Irwin III 12 * Copyright (C) 2004 William Lee Irwin III
13 */ 13 */
14#include <linux/ring_buffer.h> 14#include <linux/ring_buffer.h>
15#include <linux/utsrelease.h> 15#include <generated/utsrelease.h>
16#include <linux/stacktrace.h> 16#include <linux/stacktrace.h>
17#include <linux/writeback.h> 17#include <linux/writeback.h>
18#include <linux/kallsyms.h> 18#include <linux/kallsyms.h>
@@ -86,17 +86,17 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
86 */ 86 */
87static int tracing_disabled = 1; 87static int tracing_disabled = 1;
88 88
89DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); 89DEFINE_PER_CPU(int, ftrace_cpu_disabled);
90 90
91static inline void ftrace_disable_cpu(void) 91static inline void ftrace_disable_cpu(void)
92{ 92{
93 preempt_disable(); 93 preempt_disable();
94 local_inc(&__get_cpu_var(ftrace_cpu_disabled)); 94 __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled));
95} 95}
96 96
97static inline void ftrace_enable_cpu(void) 97static inline void ftrace_enable_cpu(void)
98{ 98{
99 local_dec(&__get_cpu_var(ftrace_cpu_disabled)); 99 __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled));
100 preempt_enable(); 100 preempt_enable();
101} 101}
102 102
@@ -203,7 +203,7 @@ cycle_t ftrace_now(int cpu)
203 */ 203 */
204static struct trace_array max_tr; 204static struct trace_array max_tr;
205 205
206static DEFINE_PER_CPU(struct trace_array_cpu, max_data); 206static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
207 207
208/* tracer_enabled is used to toggle activation of a tracer */ 208/* tracer_enabled is used to toggle activation of a tracer */
209static int tracer_enabled = 1; 209static int tracer_enabled = 1;
@@ -313,7 +313,6 @@ static const char *trace_options[] = {
313 "bin", 313 "bin",
314 "block", 314 "block",
315 "stacktrace", 315 "stacktrace",
316 "sched-tree",
317 "trace_printk", 316 "trace_printk",
318 "ftrace_preempt", 317 "ftrace_preempt",
319 "branch", 318 "branch",
@@ -493,15 +492,15 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
493 * protected by per_cpu spinlocks. But the action of the swap 492 * protected by per_cpu spinlocks. But the action of the swap
494 * needs its own lock. 493 * needs its own lock.
495 * 494 *
496 * This is defined as a raw_spinlock_t in order to help 495 * This is defined as a arch_spinlock_t in order to help
497 * with performance when lockdep debugging is enabled. 496 * with performance when lockdep debugging is enabled.
498 * 497 *
499 * It is also used in other places outside the update_max_tr 498 * It is also used in other places outside the update_max_tr
500 * so it needs to be defined outside of the 499 * so it needs to be defined outside of the
501 * CONFIG_TRACER_MAX_TRACE. 500 * CONFIG_TRACER_MAX_TRACE.
502 */ 501 */
503static raw_spinlock_t ftrace_max_lock = 502static arch_spinlock_t ftrace_max_lock =
504 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 503 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
505 504
506#ifdef CONFIG_TRACER_MAX_TRACE 505#ifdef CONFIG_TRACER_MAX_TRACE
507unsigned long __read_mostly tracing_max_latency; 506unsigned long __read_mostly tracing_max_latency;
@@ -555,13 +554,13 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
555 return; 554 return;
556 555
557 WARN_ON_ONCE(!irqs_disabled()); 556 WARN_ON_ONCE(!irqs_disabled());
558 __raw_spin_lock(&ftrace_max_lock); 557 arch_spin_lock(&ftrace_max_lock);
559 558
560 tr->buffer = max_tr.buffer; 559 tr->buffer = max_tr.buffer;
561 max_tr.buffer = buf; 560 max_tr.buffer = buf;
562 561
563 __update_max_tr(tr, tsk, cpu); 562 __update_max_tr(tr, tsk, cpu);
564 __raw_spin_unlock(&ftrace_max_lock); 563 arch_spin_unlock(&ftrace_max_lock);
565} 564}
566 565
567/** 566/**
@@ -581,7 +580,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
581 return; 580 return;
582 581
583 WARN_ON_ONCE(!irqs_disabled()); 582 WARN_ON_ONCE(!irqs_disabled());
584 __raw_spin_lock(&ftrace_max_lock); 583 arch_spin_lock(&ftrace_max_lock);
585 584
586 ftrace_disable_cpu(); 585 ftrace_disable_cpu();
587 586
@@ -603,7 +602,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
603 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); 602 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
604 603
605 __update_max_tr(tr, tsk, cpu); 604 __update_max_tr(tr, tsk, cpu);
606 __raw_spin_unlock(&ftrace_max_lock); 605 arch_spin_unlock(&ftrace_max_lock);
607} 606}
608#endif /* CONFIG_TRACER_MAX_TRACE */ 607#endif /* CONFIG_TRACER_MAX_TRACE */
609 608
@@ -802,7 +801,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
802static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; 801static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
803static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; 802static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
804static int cmdline_idx; 803static int cmdline_idx;
805static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; 804static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
806 805
807/* temporary disable recording */ 806/* temporary disable recording */
808static atomic_t trace_record_cmdline_disabled __read_mostly; 807static atomic_t trace_record_cmdline_disabled __read_mostly;
@@ -915,7 +914,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
915 * nor do we want to disable interrupts, 914 * nor do we want to disable interrupts,
916 * so if we miss here, then better luck next time. 915 * so if we miss here, then better luck next time.
917 */ 916 */
918 if (!__raw_spin_trylock(&trace_cmdline_lock)) 917 if (!arch_spin_trylock(&trace_cmdline_lock))
919 return; 918 return;
920 919
921 idx = map_pid_to_cmdline[tsk->pid]; 920 idx = map_pid_to_cmdline[tsk->pid];
@@ -940,7 +939,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
940 939
941 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); 940 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
942 941
943 __raw_spin_unlock(&trace_cmdline_lock); 942 arch_spin_unlock(&trace_cmdline_lock);
944} 943}
945 944
946void trace_find_cmdline(int pid, char comm[]) 945void trace_find_cmdline(int pid, char comm[])
@@ -958,14 +957,14 @@ void trace_find_cmdline(int pid, char comm[])
958 } 957 }
959 958
960 preempt_disable(); 959 preempt_disable();
961 __raw_spin_lock(&trace_cmdline_lock); 960 arch_spin_lock(&trace_cmdline_lock);
962 map = map_pid_to_cmdline[pid]; 961 map = map_pid_to_cmdline[pid];
963 if (map != NO_CMDLINE_MAP) 962 if (map != NO_CMDLINE_MAP)
964 strcpy(comm, saved_cmdlines[map]); 963 strcpy(comm, saved_cmdlines[map]);
965 else 964 else
966 strcpy(comm, "<...>"); 965 strcpy(comm, "<...>");
967 966
968 __raw_spin_unlock(&trace_cmdline_lock); 967 arch_spin_unlock(&trace_cmdline_lock);
969 preempt_enable(); 968 preempt_enable();
970} 969}
971 970
@@ -1085,7 +1084,7 @@ trace_function(struct trace_array *tr,
1085 struct ftrace_entry *entry; 1084 struct ftrace_entry *entry;
1086 1085
1087 /* If we are reading the ring buffer, don't trace */ 1086 /* If we are reading the ring buffer, don't trace */
1088 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 1087 if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
1089 return; 1088 return;
1090 1089
1091 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), 1090 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
@@ -1151,6 +1150,22 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1151 __ftrace_trace_stack(tr->buffer, flags, skip, pc); 1150 __ftrace_trace_stack(tr->buffer, flags, skip, pc);
1152} 1151}
1153 1152
1153/**
1154 * trace_dump_stack - record a stack back trace in the trace buffer
1155 */
1156void trace_dump_stack(void)
1157{
1158 unsigned long flags;
1159
1160 if (tracing_disabled || tracing_selftest_running)
1161 return;
1162
1163 local_save_flags(flags);
1164
1165 /* skipping 3 traces, seems to get us at the caller of this function */
1166 __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count());
1167}
1168
1154void 1169void
1155ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) 1170ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1156{ 1171{
@@ -1251,8 +1266,8 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1251 */ 1266 */
1252int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) 1267int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1253{ 1268{
1254 static raw_spinlock_t trace_buf_lock = 1269 static arch_spinlock_t trace_buf_lock =
1255 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 1270 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1256 static u32 trace_buf[TRACE_BUF_SIZE]; 1271 static u32 trace_buf[TRACE_BUF_SIZE];
1257 1272
1258 struct ftrace_event_call *call = &event_bprint; 1273 struct ftrace_event_call *call = &event_bprint;
@@ -1283,7 +1298,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1283 1298
1284 /* Lockdep uses trace_printk for lock tracing */ 1299 /* Lockdep uses trace_printk for lock tracing */
1285 local_irq_save(flags); 1300 local_irq_save(flags);
1286 __raw_spin_lock(&trace_buf_lock); 1301 arch_spin_lock(&trace_buf_lock);
1287 len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); 1302 len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1288 1303
1289 if (len > TRACE_BUF_SIZE || len < 0) 1304 if (len > TRACE_BUF_SIZE || len < 0)
@@ -1304,7 +1319,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1304 ring_buffer_unlock_commit(buffer, event); 1319 ring_buffer_unlock_commit(buffer, event);
1305 1320
1306out_unlock: 1321out_unlock:
1307 __raw_spin_unlock(&trace_buf_lock); 1322 arch_spin_unlock(&trace_buf_lock);
1308 local_irq_restore(flags); 1323 local_irq_restore(flags);
1309 1324
1310out: 1325out:
@@ -1334,7 +1349,7 @@ int trace_array_printk(struct trace_array *tr,
1334int trace_array_vprintk(struct trace_array *tr, 1349int trace_array_vprintk(struct trace_array *tr,
1335 unsigned long ip, const char *fmt, va_list args) 1350 unsigned long ip, const char *fmt, va_list args)
1336{ 1351{
1337 static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; 1352 static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1338 static char trace_buf[TRACE_BUF_SIZE]; 1353 static char trace_buf[TRACE_BUF_SIZE];
1339 1354
1340 struct ftrace_event_call *call = &event_print; 1355 struct ftrace_event_call *call = &event_print;
@@ -1360,12 +1375,8 @@ int trace_array_vprintk(struct trace_array *tr,
1360 1375
1361 pause_graph_tracing(); 1376 pause_graph_tracing();
1362 raw_local_irq_save(irq_flags); 1377 raw_local_irq_save(irq_flags);
1363 __raw_spin_lock(&trace_buf_lock); 1378 arch_spin_lock(&trace_buf_lock);
1364 if (args == NULL) { 1379 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1365 strncpy(trace_buf, fmt, TRACE_BUF_SIZE);
1366 len = strlen(trace_buf);
1367 } else
1368 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1369 1380
1370 size = sizeof(*entry) + len + 1; 1381 size = sizeof(*entry) + len + 1;
1371 buffer = tr->buffer; 1382 buffer = tr->buffer;
@@ -1382,7 +1393,7 @@ int trace_array_vprintk(struct trace_array *tr,
1382 ring_buffer_unlock_commit(buffer, event); 1393 ring_buffer_unlock_commit(buffer, event);
1383 1394
1384 out_unlock: 1395 out_unlock:
1385 __raw_spin_unlock(&trace_buf_lock); 1396 arch_spin_unlock(&trace_buf_lock);
1386 raw_local_irq_restore(irq_flags); 1397 raw_local_irq_restore(irq_flags);
1387 unpause_graph_tracing(); 1398 unpause_graph_tracing();
1388 out: 1399 out:
@@ -1516,6 +1527,8 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1516 int i = (int)*pos; 1527 int i = (int)*pos;
1517 void *ent; 1528 void *ent;
1518 1529
1530 WARN_ON_ONCE(iter->leftover);
1531
1519 (*pos)++; 1532 (*pos)++;
1520 1533
1521 /* can't go backwards */ 1534 /* can't go backwards */
@@ -1614,8 +1627,16 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1614 ; 1627 ;
1615 1628
1616 } else { 1629 } else {
1617 l = *pos - 1; 1630 /*
1618 p = s_next(m, p, &l); 1631 * If we overflowed the seq_file before, then we want
1632 * to just reuse the trace_seq buffer again.
1633 */
1634 if (iter->leftover)
1635 p = iter;
1636 else {
1637 l = *pos - 1;
1638 p = s_next(m, p, &l);
1639 }
1619 } 1640 }
1620 1641
1621 trace_event_read_lock(); 1642 trace_event_read_lock();
@@ -1923,6 +1944,7 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
1923static int s_show(struct seq_file *m, void *v) 1944static int s_show(struct seq_file *m, void *v)
1924{ 1945{
1925 struct trace_iterator *iter = v; 1946 struct trace_iterator *iter = v;
1947 int ret;
1926 1948
1927 if (iter->ent == NULL) { 1949 if (iter->ent == NULL) {
1928 if (iter->tr) { 1950 if (iter->tr) {
@@ -1942,9 +1964,27 @@ static int s_show(struct seq_file *m, void *v)
1942 if (!(trace_flags & TRACE_ITER_VERBOSE)) 1964 if (!(trace_flags & TRACE_ITER_VERBOSE))
1943 print_func_help_header(m); 1965 print_func_help_header(m);
1944 } 1966 }
1967 } else if (iter->leftover) {
1968 /*
1969 * If we filled the seq_file buffer earlier, we
1970 * want to just show it now.
1971 */
1972 ret = trace_print_seq(m, &iter->seq);
1973
1974 /* ret should this time be zero, but you never know */
1975 iter->leftover = ret;
1976
1945 } else { 1977 } else {
1946 print_trace_line(iter); 1978 print_trace_line(iter);
1947 trace_print_seq(m, &iter->seq); 1979 ret = trace_print_seq(m, &iter->seq);
1980 /*
1981 * If we overflow the seq_file buffer, then it will
1982 * ask us for this data again at start up.
1983 * Use that instead.
1984 * ret is 0 if seq_file write succeeded.
1985 * -1 otherwise.
1986 */
1987 iter->leftover = ret;
1948 } 1988 }
1949 1989
1950 return 0; 1990 return 0;
@@ -2254,7 +2294,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2254 mutex_lock(&tracing_cpumask_update_lock); 2294 mutex_lock(&tracing_cpumask_update_lock);
2255 2295
2256 local_irq_disable(); 2296 local_irq_disable();
2257 __raw_spin_lock(&ftrace_max_lock); 2297 arch_spin_lock(&ftrace_max_lock);
2258 for_each_tracing_cpu(cpu) { 2298 for_each_tracing_cpu(cpu) {
2259 /* 2299 /*
2260 * Increase/decrease the disabled counter if we are 2300 * Increase/decrease the disabled counter if we are
@@ -2269,7 +2309,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2269 atomic_dec(&global_trace.data[cpu]->disabled); 2309 atomic_dec(&global_trace.data[cpu]->disabled);
2270 } 2310 }
2271 } 2311 }
2272 __raw_spin_unlock(&ftrace_max_lock); 2312 arch_spin_unlock(&ftrace_max_lock);
2273 local_irq_enable(); 2313 local_irq_enable();
2274 2314
2275 cpumask_copy(tracing_cpumask, tracing_cpumask_new); 2315 cpumask_copy(tracing_cpumask, tracing_cpumask_new);
@@ -2291,67 +2331,49 @@ static const struct file_operations tracing_cpumask_fops = {
2291 .write = tracing_cpumask_write, 2331 .write = tracing_cpumask_write,
2292}; 2332};
2293 2333
2294static ssize_t 2334static int tracing_trace_options_show(struct seq_file *m, void *v)
2295tracing_trace_options_read(struct file *filp, char __user *ubuf,
2296 size_t cnt, loff_t *ppos)
2297{ 2335{
2298 struct tracer_opt *trace_opts; 2336 struct tracer_opt *trace_opts;
2299 u32 tracer_flags; 2337 u32 tracer_flags;
2300 int len = 0;
2301 char *buf;
2302 int r = 0;
2303 int i; 2338 int i;
2304 2339
2305
2306 /* calculate max size */
2307 for (i = 0; trace_options[i]; i++) {
2308 len += strlen(trace_options[i]);
2309 len += 3; /* "no" and newline */
2310 }
2311
2312 mutex_lock(&trace_types_lock); 2340 mutex_lock(&trace_types_lock);
2313 tracer_flags = current_trace->flags->val; 2341 tracer_flags = current_trace->flags->val;
2314 trace_opts = current_trace->flags->opts; 2342 trace_opts = current_trace->flags->opts;
2315 2343
2316 /*
2317 * Increase the size with names of options specific
2318 * of the current tracer.
2319 */
2320 for (i = 0; trace_opts[i].name; i++) {
2321 len += strlen(trace_opts[i].name);
2322 len += 3; /* "no" and newline */
2323 }
2324
2325 /* +1 for \0 */
2326 buf = kmalloc(len + 1, GFP_KERNEL);
2327 if (!buf) {
2328 mutex_unlock(&trace_types_lock);
2329 return -ENOMEM;
2330 }
2331
2332 for (i = 0; trace_options[i]; i++) { 2344 for (i = 0; trace_options[i]; i++) {
2333 if (trace_flags & (1 << i)) 2345 if (trace_flags & (1 << i))
2334 r += sprintf(buf + r, "%s\n", trace_options[i]); 2346 seq_printf(m, "%s\n", trace_options[i]);
2335 else 2347 else
2336 r += sprintf(buf + r, "no%s\n", trace_options[i]); 2348 seq_printf(m, "no%s\n", trace_options[i]);
2337 } 2349 }
2338 2350
2339 for (i = 0; trace_opts[i].name; i++) { 2351 for (i = 0; trace_opts[i].name; i++) {
2340 if (tracer_flags & trace_opts[i].bit) 2352 if (tracer_flags & trace_opts[i].bit)
2341 r += sprintf(buf + r, "%s\n", 2353 seq_printf(m, "%s\n", trace_opts[i].name);
2342 trace_opts[i].name);
2343 else 2354 else
2344 r += sprintf(buf + r, "no%s\n", 2355 seq_printf(m, "no%s\n", trace_opts[i].name);
2345 trace_opts[i].name);
2346 } 2356 }
2347 mutex_unlock(&trace_types_lock); 2357 mutex_unlock(&trace_types_lock);
2348 2358
2349 WARN_ON(r >= len + 1); 2359 return 0;
2360}
2350 2361
2351 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2362static int __set_tracer_option(struct tracer *trace,
2363 struct tracer_flags *tracer_flags,
2364 struct tracer_opt *opts, int neg)
2365{
2366 int ret;
2352 2367
2353 kfree(buf); 2368 ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
2354 return r; 2369 if (ret)
2370 return ret;
2371
2372 if (neg)
2373 tracer_flags->val &= ~opts->bit;
2374 else
2375 tracer_flags->val |= opts->bit;
2376 return 0;
2355} 2377}
2356 2378
2357/* Try to assign a tracer specific option */ 2379/* Try to assign a tracer specific option */
@@ -2359,33 +2381,17 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
2359{ 2381{
2360 struct tracer_flags *tracer_flags = trace->flags; 2382 struct tracer_flags *tracer_flags = trace->flags;
2361 struct tracer_opt *opts = NULL; 2383 struct tracer_opt *opts = NULL;
2362 int ret = 0, i = 0; 2384 int i;
2363 int len;
2364 2385
2365 for (i = 0; tracer_flags->opts[i].name; i++) { 2386 for (i = 0; tracer_flags->opts[i].name; i++) {
2366 opts = &tracer_flags->opts[i]; 2387 opts = &tracer_flags->opts[i];
2367 len = strlen(opts->name);
2368 2388
2369 if (strncmp(cmp, opts->name, len) == 0) { 2389 if (strcmp(cmp, opts->name) == 0)
2370 ret = trace->set_flag(tracer_flags->val, 2390 return __set_tracer_option(trace, trace->flags,
2371 opts->bit, !neg); 2391 opts, neg);
2372 break;
2373 }
2374 } 2392 }
2375 /* Not found */
2376 if (!tracer_flags->opts[i].name)
2377 return -EINVAL;
2378
2379 /* Refused to handle */
2380 if (ret)
2381 return ret;
2382
2383 if (neg)
2384 tracer_flags->val &= ~opts->bit;
2385 else
2386 tracer_flags->val |= opts->bit;
2387 2393
2388 return 0; 2394 return -EINVAL;
2389} 2395}
2390 2396
2391static void set_tracer_flags(unsigned int mask, int enabled) 2397static void set_tracer_flags(unsigned int mask, int enabled)
@@ -2405,7 +2411,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2405 size_t cnt, loff_t *ppos) 2411 size_t cnt, loff_t *ppos)
2406{ 2412{
2407 char buf[64]; 2413 char buf[64];
2408 char *cmp = buf; 2414 char *cmp;
2409 int neg = 0; 2415 int neg = 0;
2410 int ret; 2416 int ret;
2411 int i; 2417 int i;
@@ -2417,16 +2423,15 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2417 return -EFAULT; 2423 return -EFAULT;
2418 2424
2419 buf[cnt] = 0; 2425 buf[cnt] = 0;
2426 cmp = strstrip(buf);
2420 2427
2421 if (strncmp(buf, "no", 2) == 0) { 2428 if (strncmp(cmp, "no", 2) == 0) {
2422 neg = 1; 2429 neg = 1;
2423 cmp += 2; 2430 cmp += 2;
2424 } 2431 }
2425 2432
2426 for (i = 0; trace_options[i]; i++) { 2433 for (i = 0; trace_options[i]; i++) {
2427 int len = strlen(trace_options[i]); 2434 if (strcmp(cmp, trace_options[i]) == 0) {
2428
2429 if (strncmp(cmp, trace_options[i], len) == 0) {
2430 set_tracer_flags(1 << i, !neg); 2435 set_tracer_flags(1 << i, !neg);
2431 break; 2436 break;
2432 } 2437 }
@@ -2446,9 +2451,18 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2446 return cnt; 2451 return cnt;
2447} 2452}
2448 2453
2454static int tracing_trace_options_open(struct inode *inode, struct file *file)
2455{
2456 if (tracing_disabled)
2457 return -ENODEV;
2458 return single_open(file, tracing_trace_options_show, NULL);
2459}
2460
2449static const struct file_operations tracing_iter_fops = { 2461static const struct file_operations tracing_iter_fops = {
2450 .open = tracing_open_generic, 2462 .open = tracing_trace_options_open,
2451 .read = tracing_trace_options_read, 2463 .read = seq_read,
2464 .llseek = seq_lseek,
2465 .release = single_release,
2452 .write = tracing_trace_options_write, 2466 .write = tracing_trace_options_write,
2453}; 2467};
2454 2468
@@ -2898,6 +2912,10 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
2898 else 2912 else
2899 cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask); 2913 cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask);
2900 2914
2915
2916 if (iter->trace->pipe_close)
2917 iter->trace->pipe_close(iter);
2918
2901 mutex_unlock(&trace_types_lock); 2919 mutex_unlock(&trace_types_lock);
2902 2920
2903 free_cpumask_var(iter->started); 2921 free_cpumask_var(iter->started);
@@ -3104,7 +3122,7 @@ static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
3104 __free_page(spd->pages[idx]); 3122 __free_page(spd->pages[idx]);
3105} 3123}
3106 3124
3107static struct pipe_buf_operations tracing_pipe_buf_ops = { 3125static const struct pipe_buf_operations tracing_pipe_buf_ops = {
3108 .can_merge = 0, 3126 .can_merge = 0,
3109 .map = generic_pipe_buf_map, 3127 .map = generic_pipe_buf_map,
3110 .unmap = generic_pipe_buf_unmap, 3128 .unmap = generic_pipe_buf_unmap,
@@ -3320,6 +3338,16 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
3320 return cnt; 3338 return cnt;
3321} 3339}
3322 3340
3341static int mark_printk(const char *fmt, ...)
3342{
3343 int ret;
3344 va_list args;
3345 va_start(args, fmt);
3346 ret = trace_vprintk(0, fmt, args);
3347 va_end(args);
3348 return ret;
3349}
3350
3323static ssize_t 3351static ssize_t
3324tracing_mark_write(struct file *filp, const char __user *ubuf, 3352tracing_mark_write(struct file *filp, const char __user *ubuf,
3325 size_t cnt, loff_t *fpos) 3353 size_t cnt, loff_t *fpos)
@@ -3346,28 +3374,25 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3346 } else 3374 } else
3347 buf[cnt] = '\0'; 3375 buf[cnt] = '\0';
3348 3376
3349 cnt = trace_vprintk(0, buf, NULL); 3377 cnt = mark_printk("%s", buf);
3350 kfree(buf); 3378 kfree(buf);
3351 *fpos += cnt; 3379 *fpos += cnt;
3352 3380
3353 return cnt; 3381 return cnt;
3354} 3382}
3355 3383
3356static ssize_t tracing_clock_read(struct file *filp, char __user *ubuf, 3384static int tracing_clock_show(struct seq_file *m, void *v)
3357 size_t cnt, loff_t *ppos)
3358{ 3385{
3359 char buf[64];
3360 int bufiter = 0;
3361 int i; 3386 int i;
3362 3387
3363 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) 3388 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
3364 bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, 3389 seq_printf(m,
3365 "%s%s%s%s", i ? " " : "", 3390 "%s%s%s%s", i ? " " : "",
3366 i == trace_clock_id ? "[" : "", trace_clocks[i].name, 3391 i == trace_clock_id ? "[" : "", trace_clocks[i].name,
3367 i == trace_clock_id ? "]" : ""); 3392 i == trace_clock_id ? "]" : "");
3368 bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, "\n"); 3393 seq_putc(m, '\n');
3369 3394
3370 return simple_read_from_buffer(ubuf, cnt, ppos, buf, bufiter); 3395 return 0;
3371} 3396}
3372 3397
3373static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, 3398static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
@@ -3409,6 +3434,13 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
3409 return cnt; 3434 return cnt;
3410} 3435}
3411 3436
3437static int tracing_clock_open(struct inode *inode, struct file *file)
3438{
3439 if (tracing_disabled)
3440 return -ENODEV;
3441 return single_open(file, tracing_clock_show, NULL);
3442}
3443
3412static const struct file_operations tracing_max_lat_fops = { 3444static const struct file_operations tracing_max_lat_fops = {
3413 .open = tracing_open_generic, 3445 .open = tracing_open_generic,
3414 .read = tracing_max_lat_read, 3446 .read = tracing_max_lat_read,
@@ -3447,8 +3479,10 @@ static const struct file_operations tracing_mark_fops = {
3447}; 3479};
3448 3480
3449static const struct file_operations trace_clock_fops = { 3481static const struct file_operations trace_clock_fops = {
3450 .open = tracing_open_generic, 3482 .open = tracing_clock_open,
3451 .read = tracing_clock_read, 3483 .read = seq_read,
3484 .llseek = seq_lseek,
3485 .release = single_release,
3452 .write = tracing_clock_write, 3486 .write = tracing_clock_write,
3453}; 3487};
3454 3488
@@ -3578,7 +3612,7 @@ static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
3578} 3612}
3579 3613
3580/* Pipe buffer operations for a buffer. */ 3614/* Pipe buffer operations for a buffer. */
3581static struct pipe_buf_operations buffer_pipe_buf_ops = { 3615static const struct pipe_buf_operations buffer_pipe_buf_ops = {
3582 .can_merge = 0, 3616 .can_merge = 0,
3583 .map = generic_pipe_buf_map, 3617 .map = generic_pipe_buf_map,
3584 .unmap = generic_pipe_buf_unmap, 3618 .unmap = generic_pipe_buf_unmap,
@@ -3909,39 +3943,16 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
3909 if (ret < 0) 3943 if (ret < 0)
3910 return ret; 3944 return ret;
3911 3945
3912 ret = 0; 3946 if (val != 0 && val != 1)
3913 switch (val) { 3947 return -EINVAL;
3914 case 0:
3915 /* do nothing if already cleared */
3916 if (!(topt->flags->val & topt->opt->bit))
3917 break;
3918
3919 mutex_lock(&trace_types_lock);
3920 if (current_trace->set_flag)
3921 ret = current_trace->set_flag(topt->flags->val,
3922 topt->opt->bit, 0);
3923 mutex_unlock(&trace_types_lock);
3924 if (ret)
3925 return ret;
3926 topt->flags->val &= ~topt->opt->bit;
3927 break;
3928 case 1:
3929 /* do nothing if already set */
3930 if (topt->flags->val & topt->opt->bit)
3931 break;
3932 3948
3949 if (!!(topt->flags->val & topt->opt->bit) != val) {
3933 mutex_lock(&trace_types_lock); 3950 mutex_lock(&trace_types_lock);
3934 if (current_trace->set_flag) 3951 ret = __set_tracer_option(current_trace, topt->flags,
3935 ret = current_trace->set_flag(topt->flags->val, 3952 topt->opt, val);
3936 topt->opt->bit, 1);
3937 mutex_unlock(&trace_types_lock); 3953 mutex_unlock(&trace_types_lock);
3938 if (ret) 3954 if (ret)
3939 return ret; 3955 return ret;
3940 topt->flags->val |= topt->opt->bit;
3941 break;
3942
3943 default:
3944 return -EINVAL;
3945 } 3956 }
3946 3957
3947 *ppos += cnt; 3958 *ppos += cnt;
@@ -4268,8 +4279,8 @@ trace_printk_seq(struct trace_seq *s)
4268 4279
4269static void __ftrace_dump(bool disable_tracing) 4280static void __ftrace_dump(bool disable_tracing)
4270{ 4281{
4271 static raw_spinlock_t ftrace_dump_lock = 4282 static arch_spinlock_t ftrace_dump_lock =
4272 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 4283 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
4273 /* use static because iter can be a bit big for the stack */ 4284 /* use static because iter can be a bit big for the stack */
4274 static struct trace_iterator iter; 4285 static struct trace_iterator iter;
4275 unsigned int old_userobj; 4286 unsigned int old_userobj;
@@ -4279,7 +4290,7 @@ static void __ftrace_dump(bool disable_tracing)
4279 4290
4280 /* only one dump */ 4291 /* only one dump */
4281 local_irq_save(flags); 4292 local_irq_save(flags);
4282 __raw_spin_lock(&ftrace_dump_lock); 4293 arch_spin_lock(&ftrace_dump_lock);
4283 if (dump_ran) 4294 if (dump_ran)
4284 goto out; 4295 goto out;
4285 4296
@@ -4354,7 +4365,7 @@ static void __ftrace_dump(bool disable_tracing)
4354 } 4365 }
4355 4366
4356 out: 4367 out:
4357 __raw_spin_unlock(&ftrace_dump_lock); 4368 arch_spin_unlock(&ftrace_dump_lock);
4358 local_irq_restore(flags); 4369 local_irq_restore(flags);
4359} 4370}
4360 4371
@@ -4415,7 +4426,7 @@ __init static int tracer_alloc_buffers(void)
4415 /* Allocate the first page for all buffers */ 4426 /* Allocate the first page for all buffers */
4416 for_each_tracing_cpu(i) { 4427 for_each_tracing_cpu(i) {
4417 global_trace.data[i] = &per_cpu(global_trace_cpu, i); 4428 global_trace.data[i] = &per_cpu(global_trace_cpu, i);
4418 max_tr.data[i] = &per_cpu(max_data, i); 4429 max_tr.data[i] = &per_cpu(max_tr_data, i);
4419 } 4430 }
4420 4431
4421 trace_init_cmdlines(); 4432 trace_init_cmdlines();