diff options
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 238 |
1 files changed, 105 insertions, 133 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c82dfd92fdfd..0df1b0f2cb9e 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -12,7 +12,7 @@ | |||
12 | * Copyright (C) 2004 William Lee Irwin III | 12 | * Copyright (C) 2004 William Lee Irwin III |
13 | */ | 13 | */ |
14 | #include <linux/ring_buffer.h> | 14 | #include <linux/ring_buffer.h> |
15 | #include <linux/utsrelease.h> | 15 | #include <generated/utsrelease.h> |
16 | #include <linux/stacktrace.h> | 16 | #include <linux/stacktrace.h> |
17 | #include <linux/writeback.h> | 17 | #include <linux/writeback.h> |
18 | #include <linux/kallsyms.h> | 18 | #include <linux/kallsyms.h> |
@@ -313,7 +313,6 @@ static const char *trace_options[] = { | |||
313 | "bin", | 313 | "bin", |
314 | "block", | 314 | "block", |
315 | "stacktrace", | 315 | "stacktrace", |
316 | "sched-tree", | ||
317 | "trace_printk", | 316 | "trace_printk", |
318 | "ftrace_preempt", | 317 | "ftrace_preempt", |
319 | "branch", | 318 | "branch", |
@@ -493,15 +492,15 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | |||
493 | * protected by per_cpu spinlocks. But the action of the swap | 492 | * protected by per_cpu spinlocks. But the action of the swap |
494 | * needs its own lock. | 493 | * needs its own lock. |
495 | * | 494 | * |
496 | * This is defined as a raw_spinlock_t in order to help | 495 | * This is defined as a arch_spinlock_t in order to help |
497 | * with performance when lockdep debugging is enabled. | 496 | * with performance when lockdep debugging is enabled. |
498 | * | 497 | * |
499 | * It is also used in other places outside the update_max_tr | 498 | * It is also used in other places outside the update_max_tr |
500 | * so it needs to be defined outside of the | 499 | * so it needs to be defined outside of the |
501 | * CONFIG_TRACER_MAX_TRACE. | 500 | * CONFIG_TRACER_MAX_TRACE. |
502 | */ | 501 | */ |
503 | static raw_spinlock_t ftrace_max_lock = | 502 | static arch_spinlock_t ftrace_max_lock = |
504 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 503 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
505 | 504 | ||
506 | #ifdef CONFIG_TRACER_MAX_TRACE | 505 | #ifdef CONFIG_TRACER_MAX_TRACE |
507 | unsigned long __read_mostly tracing_max_latency; | 506 | unsigned long __read_mostly tracing_max_latency; |
@@ -555,13 +554,13 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
555 | return; | 554 | return; |
556 | 555 | ||
557 | WARN_ON_ONCE(!irqs_disabled()); | 556 | WARN_ON_ONCE(!irqs_disabled()); |
558 | __raw_spin_lock(&ftrace_max_lock); | 557 | arch_spin_lock(&ftrace_max_lock); |
559 | 558 | ||
560 | tr->buffer = max_tr.buffer; | 559 | tr->buffer = max_tr.buffer; |
561 | max_tr.buffer = buf; | 560 | max_tr.buffer = buf; |
562 | 561 | ||
563 | __update_max_tr(tr, tsk, cpu); | 562 | __update_max_tr(tr, tsk, cpu); |
564 | __raw_spin_unlock(&ftrace_max_lock); | 563 | arch_spin_unlock(&ftrace_max_lock); |
565 | } | 564 | } |
566 | 565 | ||
567 | /** | 566 | /** |
@@ -581,7 +580,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
581 | return; | 580 | return; |
582 | 581 | ||
583 | WARN_ON_ONCE(!irqs_disabled()); | 582 | WARN_ON_ONCE(!irqs_disabled()); |
584 | __raw_spin_lock(&ftrace_max_lock); | 583 | arch_spin_lock(&ftrace_max_lock); |
585 | 584 | ||
586 | ftrace_disable_cpu(); | 585 | ftrace_disable_cpu(); |
587 | 586 | ||
@@ -603,7 +602,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
603 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); | 602 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); |
604 | 603 | ||
605 | __update_max_tr(tr, tsk, cpu); | 604 | __update_max_tr(tr, tsk, cpu); |
606 | __raw_spin_unlock(&ftrace_max_lock); | 605 | arch_spin_unlock(&ftrace_max_lock); |
607 | } | 606 | } |
608 | #endif /* CONFIG_TRACER_MAX_TRACE */ | 607 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
609 | 608 | ||
@@ -802,7 +801,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; | |||
802 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; | 801 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; |
803 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; | 802 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; |
804 | static int cmdline_idx; | 803 | static int cmdline_idx; |
805 | static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; | 804 | static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
806 | 805 | ||
807 | /* temporary disable recording */ | 806 | /* temporary disable recording */ |
808 | static atomic_t trace_record_cmdline_disabled __read_mostly; | 807 | static atomic_t trace_record_cmdline_disabled __read_mostly; |
@@ -915,7 +914,7 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
915 | * nor do we want to disable interrupts, | 914 | * nor do we want to disable interrupts, |
916 | * so if we miss here, then better luck next time. | 915 | * so if we miss here, then better luck next time. |
917 | */ | 916 | */ |
918 | if (!__raw_spin_trylock(&trace_cmdline_lock)) | 917 | if (!arch_spin_trylock(&trace_cmdline_lock)) |
919 | return; | 918 | return; |
920 | 919 | ||
921 | idx = map_pid_to_cmdline[tsk->pid]; | 920 | idx = map_pid_to_cmdline[tsk->pid]; |
@@ -940,7 +939,7 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
940 | 939 | ||
941 | memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); | 940 | memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); |
942 | 941 | ||
943 | __raw_spin_unlock(&trace_cmdline_lock); | 942 | arch_spin_unlock(&trace_cmdline_lock); |
944 | } | 943 | } |
945 | 944 | ||
946 | void trace_find_cmdline(int pid, char comm[]) | 945 | void trace_find_cmdline(int pid, char comm[]) |
@@ -958,14 +957,14 @@ void trace_find_cmdline(int pid, char comm[]) | |||
958 | } | 957 | } |
959 | 958 | ||
960 | preempt_disable(); | 959 | preempt_disable(); |
961 | __raw_spin_lock(&trace_cmdline_lock); | 960 | arch_spin_lock(&trace_cmdline_lock); |
962 | map = map_pid_to_cmdline[pid]; | 961 | map = map_pid_to_cmdline[pid]; |
963 | if (map != NO_CMDLINE_MAP) | 962 | if (map != NO_CMDLINE_MAP) |
964 | strcpy(comm, saved_cmdlines[map]); | 963 | strcpy(comm, saved_cmdlines[map]); |
965 | else | 964 | else |
966 | strcpy(comm, "<...>"); | 965 | strcpy(comm, "<...>"); |
967 | 966 | ||
968 | __raw_spin_unlock(&trace_cmdline_lock); | 967 | arch_spin_unlock(&trace_cmdline_lock); |
969 | preempt_enable(); | 968 | preempt_enable(); |
970 | } | 969 | } |
971 | 970 | ||
@@ -1151,6 +1150,22 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, | |||
1151 | __ftrace_trace_stack(tr->buffer, flags, skip, pc); | 1150 | __ftrace_trace_stack(tr->buffer, flags, skip, pc); |
1152 | } | 1151 | } |
1153 | 1152 | ||
1153 | /** | ||
1154 | * trace_dump_stack - record a stack back trace in the trace buffer | ||
1155 | */ | ||
1156 | void trace_dump_stack(void) | ||
1157 | { | ||
1158 | unsigned long flags; | ||
1159 | |||
1160 | if (tracing_disabled || tracing_selftest_running) | ||
1161 | return; | ||
1162 | |||
1163 | local_save_flags(flags); | ||
1164 | |||
1165 | /* skipping 3 traces, seems to get us at the caller of this function */ | ||
1166 | __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); | ||
1167 | } | ||
1168 | |||
1154 | void | 1169 | void |
1155 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | 1170 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) |
1156 | { | 1171 | { |
@@ -1251,8 +1266,8 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | |||
1251 | */ | 1266 | */ |
1252 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | 1267 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) |
1253 | { | 1268 | { |
1254 | static raw_spinlock_t trace_buf_lock = | 1269 | static arch_spinlock_t trace_buf_lock = |
1255 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 1270 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
1256 | static u32 trace_buf[TRACE_BUF_SIZE]; | 1271 | static u32 trace_buf[TRACE_BUF_SIZE]; |
1257 | 1272 | ||
1258 | struct ftrace_event_call *call = &event_bprint; | 1273 | struct ftrace_event_call *call = &event_bprint; |
@@ -1283,7 +1298,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1283 | 1298 | ||
1284 | /* Lockdep uses trace_printk for lock tracing */ | 1299 | /* Lockdep uses trace_printk for lock tracing */ |
1285 | local_irq_save(flags); | 1300 | local_irq_save(flags); |
1286 | __raw_spin_lock(&trace_buf_lock); | 1301 | arch_spin_lock(&trace_buf_lock); |
1287 | len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 1302 | len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
1288 | 1303 | ||
1289 | if (len > TRACE_BUF_SIZE || len < 0) | 1304 | if (len > TRACE_BUF_SIZE || len < 0) |
@@ -1304,7 +1319,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1304 | ring_buffer_unlock_commit(buffer, event); | 1319 | ring_buffer_unlock_commit(buffer, event); |
1305 | 1320 | ||
1306 | out_unlock: | 1321 | out_unlock: |
1307 | __raw_spin_unlock(&trace_buf_lock); | 1322 | arch_spin_unlock(&trace_buf_lock); |
1308 | local_irq_restore(flags); | 1323 | local_irq_restore(flags); |
1309 | 1324 | ||
1310 | out: | 1325 | out: |
@@ -1334,7 +1349,7 @@ int trace_array_printk(struct trace_array *tr, | |||
1334 | int trace_array_vprintk(struct trace_array *tr, | 1349 | int trace_array_vprintk(struct trace_array *tr, |
1335 | unsigned long ip, const char *fmt, va_list args) | 1350 | unsigned long ip, const char *fmt, va_list args) |
1336 | { | 1351 | { |
1337 | static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; | 1352 | static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
1338 | static char trace_buf[TRACE_BUF_SIZE]; | 1353 | static char trace_buf[TRACE_BUF_SIZE]; |
1339 | 1354 | ||
1340 | struct ftrace_event_call *call = &event_print; | 1355 | struct ftrace_event_call *call = &event_print; |
@@ -1360,7 +1375,7 @@ int trace_array_vprintk(struct trace_array *tr, | |||
1360 | 1375 | ||
1361 | pause_graph_tracing(); | 1376 | pause_graph_tracing(); |
1362 | raw_local_irq_save(irq_flags); | 1377 | raw_local_irq_save(irq_flags); |
1363 | __raw_spin_lock(&trace_buf_lock); | 1378 | arch_spin_lock(&trace_buf_lock); |
1364 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 1379 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
1365 | 1380 | ||
1366 | size = sizeof(*entry) + len + 1; | 1381 | size = sizeof(*entry) + len + 1; |
@@ -1378,7 +1393,7 @@ int trace_array_vprintk(struct trace_array *tr, | |||
1378 | ring_buffer_unlock_commit(buffer, event); | 1393 | ring_buffer_unlock_commit(buffer, event); |
1379 | 1394 | ||
1380 | out_unlock: | 1395 | out_unlock: |
1381 | __raw_spin_unlock(&trace_buf_lock); | 1396 | arch_spin_unlock(&trace_buf_lock); |
1382 | raw_local_irq_restore(irq_flags); | 1397 | raw_local_irq_restore(irq_flags); |
1383 | unpause_graph_tracing(); | 1398 | unpause_graph_tracing(); |
1384 | out: | 1399 | out: |
@@ -2279,7 +2294,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2279 | mutex_lock(&tracing_cpumask_update_lock); | 2294 | mutex_lock(&tracing_cpumask_update_lock); |
2280 | 2295 | ||
2281 | local_irq_disable(); | 2296 | local_irq_disable(); |
2282 | __raw_spin_lock(&ftrace_max_lock); | 2297 | arch_spin_lock(&ftrace_max_lock); |
2283 | for_each_tracing_cpu(cpu) { | 2298 | for_each_tracing_cpu(cpu) { |
2284 | /* | 2299 | /* |
2285 | * Increase/decrease the disabled counter if we are | 2300 | * Increase/decrease the disabled counter if we are |
@@ -2294,7 +2309,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2294 | atomic_dec(&global_trace.data[cpu]->disabled); | 2309 | atomic_dec(&global_trace.data[cpu]->disabled); |
2295 | } | 2310 | } |
2296 | } | 2311 | } |
2297 | __raw_spin_unlock(&ftrace_max_lock); | 2312 | arch_spin_unlock(&ftrace_max_lock); |
2298 | local_irq_enable(); | 2313 | local_irq_enable(); |
2299 | 2314 | ||
2300 | cpumask_copy(tracing_cpumask, tracing_cpumask_new); | 2315 | cpumask_copy(tracing_cpumask, tracing_cpumask_new); |
@@ -2316,67 +2331,49 @@ static const struct file_operations tracing_cpumask_fops = { | |||
2316 | .write = tracing_cpumask_write, | 2331 | .write = tracing_cpumask_write, |
2317 | }; | 2332 | }; |
2318 | 2333 | ||
2319 | static ssize_t | 2334 | static int tracing_trace_options_show(struct seq_file *m, void *v) |
2320 | tracing_trace_options_read(struct file *filp, char __user *ubuf, | ||
2321 | size_t cnt, loff_t *ppos) | ||
2322 | { | 2335 | { |
2323 | struct tracer_opt *trace_opts; | 2336 | struct tracer_opt *trace_opts; |
2324 | u32 tracer_flags; | 2337 | u32 tracer_flags; |
2325 | int len = 0; | ||
2326 | char *buf; | ||
2327 | int r = 0; | ||
2328 | int i; | 2338 | int i; |
2329 | 2339 | ||
2330 | |||
2331 | /* calculate max size */ | ||
2332 | for (i = 0; trace_options[i]; i++) { | ||
2333 | len += strlen(trace_options[i]); | ||
2334 | len += 3; /* "no" and newline */ | ||
2335 | } | ||
2336 | |||
2337 | mutex_lock(&trace_types_lock); | 2340 | mutex_lock(&trace_types_lock); |
2338 | tracer_flags = current_trace->flags->val; | 2341 | tracer_flags = current_trace->flags->val; |
2339 | trace_opts = current_trace->flags->opts; | 2342 | trace_opts = current_trace->flags->opts; |
2340 | 2343 | ||
2341 | /* | ||
2342 | * Increase the size with names of options specific | ||
2343 | * of the current tracer. | ||
2344 | */ | ||
2345 | for (i = 0; trace_opts[i].name; i++) { | ||
2346 | len += strlen(trace_opts[i].name); | ||
2347 | len += 3; /* "no" and newline */ | ||
2348 | } | ||
2349 | |||
2350 | /* +1 for \0 */ | ||
2351 | buf = kmalloc(len + 1, GFP_KERNEL); | ||
2352 | if (!buf) { | ||
2353 | mutex_unlock(&trace_types_lock); | ||
2354 | return -ENOMEM; | ||
2355 | } | ||
2356 | |||
2357 | for (i = 0; trace_options[i]; i++) { | 2344 | for (i = 0; trace_options[i]; i++) { |
2358 | if (trace_flags & (1 << i)) | 2345 | if (trace_flags & (1 << i)) |
2359 | r += sprintf(buf + r, "%s\n", trace_options[i]); | 2346 | seq_printf(m, "%s\n", trace_options[i]); |
2360 | else | 2347 | else |
2361 | r += sprintf(buf + r, "no%s\n", trace_options[i]); | 2348 | seq_printf(m, "no%s\n", trace_options[i]); |
2362 | } | 2349 | } |
2363 | 2350 | ||
2364 | for (i = 0; trace_opts[i].name; i++) { | 2351 | for (i = 0; trace_opts[i].name; i++) { |
2365 | if (tracer_flags & trace_opts[i].bit) | 2352 | if (tracer_flags & trace_opts[i].bit) |
2366 | r += sprintf(buf + r, "%s\n", | 2353 | seq_printf(m, "%s\n", trace_opts[i].name); |
2367 | trace_opts[i].name); | ||
2368 | else | 2354 | else |
2369 | r += sprintf(buf + r, "no%s\n", | 2355 | seq_printf(m, "no%s\n", trace_opts[i].name); |
2370 | trace_opts[i].name); | ||
2371 | } | 2356 | } |
2372 | mutex_unlock(&trace_types_lock); | 2357 | mutex_unlock(&trace_types_lock); |
2373 | 2358 | ||
2374 | WARN_ON(r >= len + 1); | 2359 | return 0; |
2360 | } | ||
2375 | 2361 | ||
2376 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 2362 | static int __set_tracer_option(struct tracer *trace, |
2363 | struct tracer_flags *tracer_flags, | ||
2364 | struct tracer_opt *opts, int neg) | ||
2365 | { | ||
2366 | int ret; | ||
2377 | 2367 | ||
2378 | kfree(buf); | 2368 | ret = trace->set_flag(tracer_flags->val, opts->bit, !neg); |
2379 | return r; | 2369 | if (ret) |
2370 | return ret; | ||
2371 | |||
2372 | if (neg) | ||
2373 | tracer_flags->val &= ~opts->bit; | ||
2374 | else | ||
2375 | tracer_flags->val |= opts->bit; | ||
2376 | return 0; | ||
2380 | } | 2377 | } |
2381 | 2378 | ||
2382 | /* Try to assign a tracer specific option */ | 2379 | /* Try to assign a tracer specific option */ |
@@ -2384,33 +2381,17 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | |||
2384 | { | 2381 | { |
2385 | struct tracer_flags *tracer_flags = trace->flags; | 2382 | struct tracer_flags *tracer_flags = trace->flags; |
2386 | struct tracer_opt *opts = NULL; | 2383 | struct tracer_opt *opts = NULL; |
2387 | int ret = 0, i = 0; | 2384 | int i; |
2388 | int len; | ||
2389 | 2385 | ||
2390 | for (i = 0; tracer_flags->opts[i].name; i++) { | 2386 | for (i = 0; tracer_flags->opts[i].name; i++) { |
2391 | opts = &tracer_flags->opts[i]; | 2387 | opts = &tracer_flags->opts[i]; |
2392 | len = strlen(opts->name); | ||
2393 | 2388 | ||
2394 | if (strncmp(cmp, opts->name, len) == 0) { | 2389 | if (strcmp(cmp, opts->name) == 0) |
2395 | ret = trace->set_flag(tracer_flags->val, | 2390 | return __set_tracer_option(trace, trace->flags, |
2396 | opts->bit, !neg); | 2391 | opts, neg); |
2397 | break; | ||
2398 | } | ||
2399 | } | 2392 | } |
2400 | /* Not found */ | ||
2401 | if (!tracer_flags->opts[i].name) | ||
2402 | return -EINVAL; | ||
2403 | |||
2404 | /* Refused to handle */ | ||
2405 | if (ret) | ||
2406 | return ret; | ||
2407 | |||
2408 | if (neg) | ||
2409 | tracer_flags->val &= ~opts->bit; | ||
2410 | else | ||
2411 | tracer_flags->val |= opts->bit; | ||
2412 | 2393 | ||
2413 | return 0; | 2394 | return -EINVAL; |
2414 | } | 2395 | } |
2415 | 2396 | ||
2416 | static void set_tracer_flags(unsigned int mask, int enabled) | 2397 | static void set_tracer_flags(unsigned int mask, int enabled) |
@@ -2430,7 +2411,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
2430 | size_t cnt, loff_t *ppos) | 2411 | size_t cnt, loff_t *ppos) |
2431 | { | 2412 | { |
2432 | char buf[64]; | 2413 | char buf[64]; |
2433 | char *cmp = buf; | 2414 | char *cmp; |
2434 | int neg = 0; | 2415 | int neg = 0; |
2435 | int ret; | 2416 | int ret; |
2436 | int i; | 2417 | int i; |
@@ -2442,16 +2423,15 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
2442 | return -EFAULT; | 2423 | return -EFAULT; |
2443 | 2424 | ||
2444 | buf[cnt] = 0; | 2425 | buf[cnt] = 0; |
2426 | cmp = strstrip(buf); | ||
2445 | 2427 | ||
2446 | if (strncmp(buf, "no", 2) == 0) { | 2428 | if (strncmp(cmp, "no", 2) == 0) { |
2447 | neg = 1; | 2429 | neg = 1; |
2448 | cmp += 2; | 2430 | cmp += 2; |
2449 | } | 2431 | } |
2450 | 2432 | ||
2451 | for (i = 0; trace_options[i]; i++) { | 2433 | for (i = 0; trace_options[i]; i++) { |
2452 | int len = strlen(trace_options[i]); | 2434 | if (strcmp(cmp, trace_options[i]) == 0) { |
2453 | |||
2454 | if (strncmp(cmp, trace_options[i], len) == 0) { | ||
2455 | set_tracer_flags(1 << i, !neg); | 2435 | set_tracer_flags(1 << i, !neg); |
2456 | break; | 2436 | break; |
2457 | } | 2437 | } |
@@ -2471,9 +2451,18 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
2471 | return cnt; | 2451 | return cnt; |
2472 | } | 2452 | } |
2473 | 2453 | ||
2454 | static int tracing_trace_options_open(struct inode *inode, struct file *file) | ||
2455 | { | ||
2456 | if (tracing_disabled) | ||
2457 | return -ENODEV; | ||
2458 | return single_open(file, tracing_trace_options_show, NULL); | ||
2459 | } | ||
2460 | |||
2474 | static const struct file_operations tracing_iter_fops = { | 2461 | static const struct file_operations tracing_iter_fops = { |
2475 | .open = tracing_open_generic, | 2462 | .open = tracing_trace_options_open, |
2476 | .read = tracing_trace_options_read, | 2463 | .read = seq_read, |
2464 | .llseek = seq_lseek, | ||
2465 | .release = single_release, | ||
2477 | .write = tracing_trace_options_write, | 2466 | .write = tracing_trace_options_write, |
2478 | }; | 2467 | }; |
2479 | 2468 | ||
@@ -3133,7 +3122,7 @@ static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, | |||
3133 | __free_page(spd->pages[idx]); | 3122 | __free_page(spd->pages[idx]); |
3134 | } | 3123 | } |
3135 | 3124 | ||
3136 | static struct pipe_buf_operations tracing_pipe_buf_ops = { | 3125 | static const struct pipe_buf_operations tracing_pipe_buf_ops = { |
3137 | .can_merge = 0, | 3126 | .can_merge = 0, |
3138 | .map = generic_pipe_buf_map, | 3127 | .map = generic_pipe_buf_map, |
3139 | .unmap = generic_pipe_buf_unmap, | 3128 | .unmap = generic_pipe_buf_unmap, |
@@ -3392,21 +3381,18 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
3392 | return cnt; | 3381 | return cnt; |
3393 | } | 3382 | } |
3394 | 3383 | ||
3395 | static ssize_t tracing_clock_read(struct file *filp, char __user *ubuf, | 3384 | static int tracing_clock_show(struct seq_file *m, void *v) |
3396 | size_t cnt, loff_t *ppos) | ||
3397 | { | 3385 | { |
3398 | char buf[64]; | ||
3399 | int bufiter = 0; | ||
3400 | int i; | 3386 | int i; |
3401 | 3387 | ||
3402 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) | 3388 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) |
3403 | bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, | 3389 | seq_printf(m, |
3404 | "%s%s%s%s", i ? " " : "", | 3390 | "%s%s%s%s", i ? " " : "", |
3405 | i == trace_clock_id ? "[" : "", trace_clocks[i].name, | 3391 | i == trace_clock_id ? "[" : "", trace_clocks[i].name, |
3406 | i == trace_clock_id ? "]" : ""); | 3392 | i == trace_clock_id ? "]" : ""); |
3407 | bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, "\n"); | 3393 | seq_putc(m, '\n'); |
3408 | 3394 | ||
3409 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, bufiter); | 3395 | return 0; |
3410 | } | 3396 | } |
3411 | 3397 | ||
3412 | static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | 3398 | static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, |
@@ -3448,6 +3434,13 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | |||
3448 | return cnt; | 3434 | return cnt; |
3449 | } | 3435 | } |
3450 | 3436 | ||
3437 | static int tracing_clock_open(struct inode *inode, struct file *file) | ||
3438 | { | ||
3439 | if (tracing_disabled) | ||
3440 | return -ENODEV; | ||
3441 | return single_open(file, tracing_clock_show, NULL); | ||
3442 | } | ||
3443 | |||
3451 | static const struct file_operations tracing_max_lat_fops = { | 3444 | static const struct file_operations tracing_max_lat_fops = { |
3452 | .open = tracing_open_generic, | 3445 | .open = tracing_open_generic, |
3453 | .read = tracing_max_lat_read, | 3446 | .read = tracing_max_lat_read, |
@@ -3486,8 +3479,10 @@ static const struct file_operations tracing_mark_fops = { | |||
3486 | }; | 3479 | }; |
3487 | 3480 | ||
3488 | static const struct file_operations trace_clock_fops = { | 3481 | static const struct file_operations trace_clock_fops = { |
3489 | .open = tracing_open_generic, | 3482 | .open = tracing_clock_open, |
3490 | .read = tracing_clock_read, | 3483 | .read = seq_read, |
3484 | .llseek = seq_lseek, | ||
3485 | .release = single_release, | ||
3491 | .write = tracing_clock_write, | 3486 | .write = tracing_clock_write, |
3492 | }; | 3487 | }; |
3493 | 3488 | ||
@@ -3617,7 +3612,7 @@ static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, | |||
3617 | } | 3612 | } |
3618 | 3613 | ||
3619 | /* Pipe buffer operations for a buffer. */ | 3614 | /* Pipe buffer operations for a buffer. */ |
3620 | static struct pipe_buf_operations buffer_pipe_buf_ops = { | 3615 | static const struct pipe_buf_operations buffer_pipe_buf_ops = { |
3621 | .can_merge = 0, | 3616 | .can_merge = 0, |
3622 | .map = generic_pipe_buf_map, | 3617 | .map = generic_pipe_buf_map, |
3623 | .unmap = generic_pipe_buf_unmap, | 3618 | .unmap = generic_pipe_buf_unmap, |
@@ -3948,39 +3943,16 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
3948 | if (ret < 0) | 3943 | if (ret < 0) |
3949 | return ret; | 3944 | return ret; |
3950 | 3945 | ||
3951 | ret = 0; | 3946 | if (val != 0 && val != 1) |
3952 | switch (val) { | 3947 | return -EINVAL; |
3953 | case 0: | ||
3954 | /* do nothing if already cleared */ | ||
3955 | if (!(topt->flags->val & topt->opt->bit)) | ||
3956 | break; | ||
3957 | |||
3958 | mutex_lock(&trace_types_lock); | ||
3959 | if (current_trace->set_flag) | ||
3960 | ret = current_trace->set_flag(topt->flags->val, | ||
3961 | topt->opt->bit, 0); | ||
3962 | mutex_unlock(&trace_types_lock); | ||
3963 | if (ret) | ||
3964 | return ret; | ||
3965 | topt->flags->val &= ~topt->opt->bit; | ||
3966 | break; | ||
3967 | case 1: | ||
3968 | /* do nothing if already set */ | ||
3969 | if (topt->flags->val & topt->opt->bit) | ||
3970 | break; | ||
3971 | 3948 | ||
3949 | if (!!(topt->flags->val & topt->opt->bit) != val) { | ||
3972 | mutex_lock(&trace_types_lock); | 3950 | mutex_lock(&trace_types_lock); |
3973 | if (current_trace->set_flag) | 3951 | ret = __set_tracer_option(current_trace, topt->flags, |
3974 | ret = current_trace->set_flag(topt->flags->val, | 3952 | topt->opt, !val); |
3975 | topt->opt->bit, 1); | ||
3976 | mutex_unlock(&trace_types_lock); | 3953 | mutex_unlock(&trace_types_lock); |
3977 | if (ret) | 3954 | if (ret) |
3978 | return ret; | 3955 | return ret; |
3979 | topt->flags->val |= topt->opt->bit; | ||
3980 | break; | ||
3981 | |||
3982 | default: | ||
3983 | return -EINVAL; | ||
3984 | } | 3956 | } |
3985 | 3957 | ||
3986 | *ppos += cnt; | 3958 | *ppos += cnt; |
@@ -4307,8 +4279,8 @@ trace_printk_seq(struct trace_seq *s) | |||
4307 | 4279 | ||
4308 | static void __ftrace_dump(bool disable_tracing) | 4280 | static void __ftrace_dump(bool disable_tracing) |
4309 | { | 4281 | { |
4310 | static raw_spinlock_t ftrace_dump_lock = | 4282 | static arch_spinlock_t ftrace_dump_lock = |
4311 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 4283 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
4312 | /* use static because iter can be a bit big for the stack */ | 4284 | /* use static because iter can be a bit big for the stack */ |
4313 | static struct trace_iterator iter; | 4285 | static struct trace_iterator iter; |
4314 | unsigned int old_userobj; | 4286 | unsigned int old_userobj; |
@@ -4318,7 +4290,7 @@ static void __ftrace_dump(bool disable_tracing) | |||
4318 | 4290 | ||
4319 | /* only one dump */ | 4291 | /* only one dump */ |
4320 | local_irq_save(flags); | 4292 | local_irq_save(flags); |
4321 | __raw_spin_lock(&ftrace_dump_lock); | 4293 | arch_spin_lock(&ftrace_dump_lock); |
4322 | if (dump_ran) | 4294 | if (dump_ran) |
4323 | goto out; | 4295 | goto out; |
4324 | 4296 | ||
@@ -4393,7 +4365,7 @@ static void __ftrace_dump(bool disable_tracing) | |||
4393 | } | 4365 | } |
4394 | 4366 | ||
4395 | out: | 4367 | out: |
4396 | __raw_spin_unlock(&ftrace_dump_lock); | 4368 | arch_spin_unlock(&ftrace_dump_lock); |
4397 | local_irq_restore(flags); | 4369 | local_irq_restore(flags); |
4398 | } | 4370 | } |
4399 | 4371 | ||