diff options
author | H. Peter Anvin <hpa@zytor.com> | 2010-02-10 19:55:28 -0500 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2010-02-10 19:55:28 -0500 |
commit | 84abd88a70090cf00f9e45c3a81680874f17626e (patch) | |
tree | 4f58b80057f6e1f5817af1dc33a5458b3dfc9a99 /kernel/trace/trace.c | |
parent | 13ca0fcaa33f6b1984c4111b6ec5df42689fea6f (diff) | |
parent | e28cab42f384745c8a947a9ccd51e4aae52f5d51 (diff) |
Merge remote branch 'linus/master' into x86/bootmem
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 333 |
1 files changed, 169 insertions, 164 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 45068269ebb1..eac6875cb990 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -12,7 +12,7 @@ | |||
12 | * Copyright (C) 2004 William Lee Irwin III | 12 | * Copyright (C) 2004 William Lee Irwin III |
13 | */ | 13 | */ |
14 | #include <linux/ring_buffer.h> | 14 | #include <linux/ring_buffer.h> |
15 | #include <linux/utsrelease.h> | 15 | #include <generated/utsrelease.h> |
16 | #include <linux/stacktrace.h> | 16 | #include <linux/stacktrace.h> |
17 | #include <linux/writeback.h> | 17 | #include <linux/writeback.h> |
18 | #include <linux/kallsyms.h> | 18 | #include <linux/kallsyms.h> |
@@ -86,17 +86,17 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set) | |||
86 | */ | 86 | */ |
87 | static int tracing_disabled = 1; | 87 | static int tracing_disabled = 1; |
88 | 88 | ||
89 | DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); | 89 | DEFINE_PER_CPU(int, ftrace_cpu_disabled); |
90 | 90 | ||
91 | static inline void ftrace_disable_cpu(void) | 91 | static inline void ftrace_disable_cpu(void) |
92 | { | 92 | { |
93 | preempt_disable(); | 93 | preempt_disable(); |
94 | local_inc(&__get_cpu_var(ftrace_cpu_disabled)); | 94 | __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled)); |
95 | } | 95 | } |
96 | 96 | ||
97 | static inline void ftrace_enable_cpu(void) | 97 | static inline void ftrace_enable_cpu(void) |
98 | { | 98 | { |
99 | local_dec(&__get_cpu_var(ftrace_cpu_disabled)); | 99 | __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled)); |
100 | preempt_enable(); | 100 | preempt_enable(); |
101 | } | 101 | } |
102 | 102 | ||
@@ -129,7 +129,7 @@ static int tracing_set_tracer(const char *buf); | |||
129 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; | 129 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; |
130 | static char *default_bootup_tracer; | 130 | static char *default_bootup_tracer; |
131 | 131 | ||
132 | static int __init set_ftrace(char *str) | 132 | static int __init set_cmdline_ftrace(char *str) |
133 | { | 133 | { |
134 | strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); | 134 | strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); |
135 | default_bootup_tracer = bootup_tracer_buf; | 135 | default_bootup_tracer = bootup_tracer_buf; |
@@ -137,7 +137,7 @@ static int __init set_ftrace(char *str) | |||
137 | ring_buffer_expanded = 1; | 137 | ring_buffer_expanded = 1; |
138 | return 1; | 138 | return 1; |
139 | } | 139 | } |
140 | __setup("ftrace=", set_ftrace); | 140 | __setup("ftrace=", set_cmdline_ftrace); |
141 | 141 | ||
142 | static int __init set_ftrace_dump_on_oops(char *str) | 142 | static int __init set_ftrace_dump_on_oops(char *str) |
143 | { | 143 | { |
@@ -203,7 +203,7 @@ cycle_t ftrace_now(int cpu) | |||
203 | */ | 203 | */ |
204 | static struct trace_array max_tr; | 204 | static struct trace_array max_tr; |
205 | 205 | ||
206 | static DEFINE_PER_CPU(struct trace_array_cpu, max_data); | 206 | static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data); |
207 | 207 | ||
208 | /* tracer_enabled is used to toggle activation of a tracer */ | 208 | /* tracer_enabled is used to toggle activation of a tracer */ |
209 | static int tracer_enabled = 1; | 209 | static int tracer_enabled = 1; |
@@ -313,7 +313,6 @@ static const char *trace_options[] = { | |||
313 | "bin", | 313 | "bin", |
314 | "block", | 314 | "block", |
315 | "stacktrace", | 315 | "stacktrace", |
316 | "sched-tree", | ||
317 | "trace_printk", | 316 | "trace_printk", |
318 | "ftrace_preempt", | 317 | "ftrace_preempt", |
319 | "branch", | 318 | "branch", |
@@ -493,15 +492,15 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | |||
493 | * protected by per_cpu spinlocks. But the action of the swap | 492 | * protected by per_cpu spinlocks. But the action of the swap |
494 | * needs its own lock. | 493 | * needs its own lock. |
495 | * | 494 | * |
496 | * This is defined as a raw_spinlock_t in order to help | 495 | * This is defined as a arch_spinlock_t in order to help |
497 | * with performance when lockdep debugging is enabled. | 496 | * with performance when lockdep debugging is enabled. |
498 | * | 497 | * |
499 | * It is also used in other places outside the update_max_tr | 498 | * It is also used in other places outside the update_max_tr |
500 | * so it needs to be defined outside of the | 499 | * so it needs to be defined outside of the |
501 | * CONFIG_TRACER_MAX_TRACE. | 500 | * CONFIG_TRACER_MAX_TRACE. |
502 | */ | 501 | */ |
503 | static raw_spinlock_t ftrace_max_lock = | 502 | static arch_spinlock_t ftrace_max_lock = |
504 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 503 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
505 | 504 | ||
506 | #ifdef CONFIG_TRACER_MAX_TRACE | 505 | #ifdef CONFIG_TRACER_MAX_TRACE |
507 | unsigned long __read_mostly tracing_max_latency; | 506 | unsigned long __read_mostly tracing_max_latency; |
@@ -555,13 +554,13 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
555 | return; | 554 | return; |
556 | 555 | ||
557 | WARN_ON_ONCE(!irqs_disabled()); | 556 | WARN_ON_ONCE(!irqs_disabled()); |
558 | __raw_spin_lock(&ftrace_max_lock); | 557 | arch_spin_lock(&ftrace_max_lock); |
559 | 558 | ||
560 | tr->buffer = max_tr.buffer; | 559 | tr->buffer = max_tr.buffer; |
561 | max_tr.buffer = buf; | 560 | max_tr.buffer = buf; |
562 | 561 | ||
563 | __update_max_tr(tr, tsk, cpu); | 562 | __update_max_tr(tr, tsk, cpu); |
564 | __raw_spin_unlock(&ftrace_max_lock); | 563 | arch_spin_unlock(&ftrace_max_lock); |
565 | } | 564 | } |
566 | 565 | ||
567 | /** | 566 | /** |
@@ -581,7 +580,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
581 | return; | 580 | return; |
582 | 581 | ||
583 | WARN_ON_ONCE(!irqs_disabled()); | 582 | WARN_ON_ONCE(!irqs_disabled()); |
584 | __raw_spin_lock(&ftrace_max_lock); | 583 | arch_spin_lock(&ftrace_max_lock); |
585 | 584 | ||
586 | ftrace_disable_cpu(); | 585 | ftrace_disable_cpu(); |
587 | 586 | ||
@@ -603,7 +602,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
603 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); | 602 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); |
604 | 603 | ||
605 | __update_max_tr(tr, tsk, cpu); | 604 | __update_max_tr(tr, tsk, cpu); |
606 | __raw_spin_unlock(&ftrace_max_lock); | 605 | arch_spin_unlock(&ftrace_max_lock); |
607 | } | 606 | } |
608 | #endif /* CONFIG_TRACER_MAX_TRACE */ | 607 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
609 | 608 | ||
@@ -802,7 +801,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; | |||
802 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; | 801 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; |
803 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; | 802 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; |
804 | static int cmdline_idx; | 803 | static int cmdline_idx; |
805 | static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; | 804 | static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
806 | 805 | ||
807 | /* temporary disable recording */ | 806 | /* temporary disable recording */ |
808 | static atomic_t trace_record_cmdline_disabled __read_mostly; | 807 | static atomic_t trace_record_cmdline_disabled __read_mostly; |
@@ -915,7 +914,7 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
915 | * nor do we want to disable interrupts, | 914 | * nor do we want to disable interrupts, |
916 | * so if we miss here, then better luck next time. | 915 | * so if we miss here, then better luck next time. |
917 | */ | 916 | */ |
918 | if (!__raw_spin_trylock(&trace_cmdline_lock)) | 917 | if (!arch_spin_trylock(&trace_cmdline_lock)) |
919 | return; | 918 | return; |
920 | 919 | ||
921 | idx = map_pid_to_cmdline[tsk->pid]; | 920 | idx = map_pid_to_cmdline[tsk->pid]; |
@@ -940,7 +939,7 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
940 | 939 | ||
941 | memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); | 940 | memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); |
942 | 941 | ||
943 | __raw_spin_unlock(&trace_cmdline_lock); | 942 | arch_spin_unlock(&trace_cmdline_lock); |
944 | } | 943 | } |
945 | 944 | ||
946 | void trace_find_cmdline(int pid, char comm[]) | 945 | void trace_find_cmdline(int pid, char comm[]) |
@@ -952,20 +951,25 @@ void trace_find_cmdline(int pid, char comm[]) | |||
952 | return; | 951 | return; |
953 | } | 952 | } |
954 | 953 | ||
954 | if (WARN_ON_ONCE(pid < 0)) { | ||
955 | strcpy(comm, "<XXX>"); | ||
956 | return; | ||
957 | } | ||
958 | |||
955 | if (pid > PID_MAX_DEFAULT) { | 959 | if (pid > PID_MAX_DEFAULT) { |
956 | strcpy(comm, "<...>"); | 960 | strcpy(comm, "<...>"); |
957 | return; | 961 | return; |
958 | } | 962 | } |
959 | 963 | ||
960 | preempt_disable(); | 964 | preempt_disable(); |
961 | __raw_spin_lock(&trace_cmdline_lock); | 965 | arch_spin_lock(&trace_cmdline_lock); |
962 | map = map_pid_to_cmdline[pid]; | 966 | map = map_pid_to_cmdline[pid]; |
963 | if (map != NO_CMDLINE_MAP) | 967 | if (map != NO_CMDLINE_MAP) |
964 | strcpy(comm, saved_cmdlines[map]); | 968 | strcpy(comm, saved_cmdlines[map]); |
965 | else | 969 | else |
966 | strcpy(comm, "<...>"); | 970 | strcpy(comm, "<...>"); |
967 | 971 | ||
968 | __raw_spin_unlock(&trace_cmdline_lock); | 972 | arch_spin_unlock(&trace_cmdline_lock); |
969 | preempt_enable(); | 973 | preempt_enable(); |
970 | } | 974 | } |
971 | 975 | ||
@@ -1085,7 +1089,7 @@ trace_function(struct trace_array *tr, | |||
1085 | struct ftrace_entry *entry; | 1089 | struct ftrace_entry *entry; |
1086 | 1090 | ||
1087 | /* If we are reading the ring buffer, don't trace */ | 1091 | /* If we are reading the ring buffer, don't trace */ |
1088 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 1092 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) |
1089 | return; | 1093 | return; |
1090 | 1094 | ||
1091 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), | 1095 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), |
@@ -1151,6 +1155,22 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, | |||
1151 | __ftrace_trace_stack(tr->buffer, flags, skip, pc); | 1155 | __ftrace_trace_stack(tr->buffer, flags, skip, pc); |
1152 | } | 1156 | } |
1153 | 1157 | ||
1158 | /** | ||
1159 | * trace_dump_stack - record a stack back trace in the trace buffer | ||
1160 | */ | ||
1161 | void trace_dump_stack(void) | ||
1162 | { | ||
1163 | unsigned long flags; | ||
1164 | |||
1165 | if (tracing_disabled || tracing_selftest_running) | ||
1166 | return; | ||
1167 | |||
1168 | local_save_flags(flags); | ||
1169 | |||
1170 | /* skipping 3 traces, seems to get us at the caller of this function */ | ||
1171 | __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); | ||
1172 | } | ||
1173 | |||
1154 | void | 1174 | void |
1155 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | 1175 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) |
1156 | { | 1176 | { |
@@ -1251,8 +1271,8 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | |||
1251 | */ | 1271 | */ |
1252 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | 1272 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) |
1253 | { | 1273 | { |
1254 | static raw_spinlock_t trace_buf_lock = | 1274 | static arch_spinlock_t trace_buf_lock = |
1255 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 1275 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
1256 | static u32 trace_buf[TRACE_BUF_SIZE]; | 1276 | static u32 trace_buf[TRACE_BUF_SIZE]; |
1257 | 1277 | ||
1258 | struct ftrace_event_call *call = &event_bprint; | 1278 | struct ftrace_event_call *call = &event_bprint; |
@@ -1283,7 +1303,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1283 | 1303 | ||
1284 | /* Lockdep uses trace_printk for lock tracing */ | 1304 | /* Lockdep uses trace_printk for lock tracing */ |
1285 | local_irq_save(flags); | 1305 | local_irq_save(flags); |
1286 | __raw_spin_lock(&trace_buf_lock); | 1306 | arch_spin_lock(&trace_buf_lock); |
1287 | len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 1307 | len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
1288 | 1308 | ||
1289 | if (len > TRACE_BUF_SIZE || len < 0) | 1309 | if (len > TRACE_BUF_SIZE || len < 0) |
@@ -1304,7 +1324,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1304 | ring_buffer_unlock_commit(buffer, event); | 1324 | ring_buffer_unlock_commit(buffer, event); |
1305 | 1325 | ||
1306 | out_unlock: | 1326 | out_unlock: |
1307 | __raw_spin_unlock(&trace_buf_lock); | 1327 | arch_spin_unlock(&trace_buf_lock); |
1308 | local_irq_restore(flags); | 1328 | local_irq_restore(flags); |
1309 | 1329 | ||
1310 | out: | 1330 | out: |
@@ -1334,7 +1354,7 @@ int trace_array_printk(struct trace_array *tr, | |||
1334 | int trace_array_vprintk(struct trace_array *tr, | 1354 | int trace_array_vprintk(struct trace_array *tr, |
1335 | unsigned long ip, const char *fmt, va_list args) | 1355 | unsigned long ip, const char *fmt, va_list args) |
1336 | { | 1356 | { |
1337 | static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; | 1357 | static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
1338 | static char trace_buf[TRACE_BUF_SIZE]; | 1358 | static char trace_buf[TRACE_BUF_SIZE]; |
1339 | 1359 | ||
1340 | struct ftrace_event_call *call = &event_print; | 1360 | struct ftrace_event_call *call = &event_print; |
@@ -1360,12 +1380,9 @@ int trace_array_vprintk(struct trace_array *tr, | |||
1360 | 1380 | ||
1361 | pause_graph_tracing(); | 1381 | pause_graph_tracing(); |
1362 | raw_local_irq_save(irq_flags); | 1382 | raw_local_irq_save(irq_flags); |
1363 | __raw_spin_lock(&trace_buf_lock); | 1383 | arch_spin_lock(&trace_buf_lock); |
1364 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 1384 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
1365 | 1385 | ||
1366 | len = min(len, TRACE_BUF_SIZE-1); | ||
1367 | trace_buf[len] = 0; | ||
1368 | |||
1369 | size = sizeof(*entry) + len + 1; | 1386 | size = sizeof(*entry) + len + 1; |
1370 | buffer = tr->buffer; | 1387 | buffer = tr->buffer; |
1371 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, | 1388 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, |
@@ -1373,15 +1390,15 @@ int trace_array_vprintk(struct trace_array *tr, | |||
1373 | if (!event) | 1390 | if (!event) |
1374 | goto out_unlock; | 1391 | goto out_unlock; |
1375 | entry = ring_buffer_event_data(event); | 1392 | entry = ring_buffer_event_data(event); |
1376 | entry->ip = ip; | 1393 | entry->ip = ip; |
1377 | 1394 | ||
1378 | memcpy(&entry->buf, trace_buf, len); | 1395 | memcpy(&entry->buf, trace_buf, len); |
1379 | entry->buf[len] = 0; | 1396 | entry->buf[len] = '\0'; |
1380 | if (!filter_check_discard(call, entry, buffer, event)) | 1397 | if (!filter_check_discard(call, entry, buffer, event)) |
1381 | ring_buffer_unlock_commit(buffer, event); | 1398 | ring_buffer_unlock_commit(buffer, event); |
1382 | 1399 | ||
1383 | out_unlock: | 1400 | out_unlock: |
1384 | __raw_spin_unlock(&trace_buf_lock); | 1401 | arch_spin_unlock(&trace_buf_lock); |
1385 | raw_local_irq_restore(irq_flags); | 1402 | raw_local_irq_restore(irq_flags); |
1386 | unpause_graph_tracing(); | 1403 | unpause_graph_tracing(); |
1387 | out: | 1404 | out: |
@@ -1393,7 +1410,7 @@ int trace_array_vprintk(struct trace_array *tr, | |||
1393 | 1410 | ||
1394 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | 1411 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) |
1395 | { | 1412 | { |
1396 | return trace_array_printk(&global_trace, ip, fmt, args); | 1413 | return trace_array_vprintk(&global_trace, ip, fmt, args); |
1397 | } | 1414 | } |
1398 | EXPORT_SYMBOL_GPL(trace_vprintk); | 1415 | EXPORT_SYMBOL_GPL(trace_vprintk); |
1399 | 1416 | ||
@@ -1515,6 +1532,8 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos) | |||
1515 | int i = (int)*pos; | 1532 | int i = (int)*pos; |
1516 | void *ent; | 1533 | void *ent; |
1517 | 1534 | ||
1535 | WARN_ON_ONCE(iter->leftover); | ||
1536 | |||
1518 | (*pos)++; | 1537 | (*pos)++; |
1519 | 1538 | ||
1520 | /* can't go backwards */ | 1539 | /* can't go backwards */ |
@@ -1613,8 +1632,16 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
1613 | ; | 1632 | ; |
1614 | 1633 | ||
1615 | } else { | 1634 | } else { |
1616 | l = *pos - 1; | 1635 | /* |
1617 | p = s_next(m, p, &l); | 1636 | * If we overflowed the seq_file before, then we want |
1637 | * to just reuse the trace_seq buffer again. | ||
1638 | */ | ||
1639 | if (iter->leftover) | ||
1640 | p = iter; | ||
1641 | else { | ||
1642 | l = *pos - 1; | ||
1643 | p = s_next(m, p, &l); | ||
1644 | } | ||
1618 | } | 1645 | } |
1619 | 1646 | ||
1620 | trace_event_read_lock(); | 1647 | trace_event_read_lock(); |
@@ -1922,6 +1949,7 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter) | |||
1922 | static int s_show(struct seq_file *m, void *v) | 1949 | static int s_show(struct seq_file *m, void *v) |
1923 | { | 1950 | { |
1924 | struct trace_iterator *iter = v; | 1951 | struct trace_iterator *iter = v; |
1952 | int ret; | ||
1925 | 1953 | ||
1926 | if (iter->ent == NULL) { | 1954 | if (iter->ent == NULL) { |
1927 | if (iter->tr) { | 1955 | if (iter->tr) { |
@@ -1941,9 +1969,27 @@ static int s_show(struct seq_file *m, void *v) | |||
1941 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | 1969 | if (!(trace_flags & TRACE_ITER_VERBOSE)) |
1942 | print_func_help_header(m); | 1970 | print_func_help_header(m); |
1943 | } | 1971 | } |
1972 | } else if (iter->leftover) { | ||
1973 | /* | ||
1974 | * If we filled the seq_file buffer earlier, we | ||
1975 | * want to just show it now. | ||
1976 | */ | ||
1977 | ret = trace_print_seq(m, &iter->seq); | ||
1978 | |||
1979 | /* ret should this time be zero, but you never know */ | ||
1980 | iter->leftover = ret; | ||
1981 | |||
1944 | } else { | 1982 | } else { |
1945 | print_trace_line(iter); | 1983 | print_trace_line(iter); |
1946 | trace_print_seq(m, &iter->seq); | 1984 | ret = trace_print_seq(m, &iter->seq); |
1985 | /* | ||
1986 | * If we overflow the seq_file buffer, then it will | ||
1987 | * ask us for this data again at start up. | ||
1988 | * Use that instead. | ||
1989 | * ret is 0 if seq_file write succeeded. | ||
1990 | * -1 otherwise. | ||
1991 | */ | ||
1992 | iter->leftover = ret; | ||
1947 | } | 1993 | } |
1948 | 1994 | ||
1949 | return 0; | 1995 | return 0; |
@@ -2253,7 +2299,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2253 | mutex_lock(&tracing_cpumask_update_lock); | 2299 | mutex_lock(&tracing_cpumask_update_lock); |
2254 | 2300 | ||
2255 | local_irq_disable(); | 2301 | local_irq_disable(); |
2256 | __raw_spin_lock(&ftrace_max_lock); | 2302 | arch_spin_lock(&ftrace_max_lock); |
2257 | for_each_tracing_cpu(cpu) { | 2303 | for_each_tracing_cpu(cpu) { |
2258 | /* | 2304 | /* |
2259 | * Increase/decrease the disabled counter if we are | 2305 | * Increase/decrease the disabled counter if we are |
@@ -2268,7 +2314,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2268 | atomic_dec(&global_trace.data[cpu]->disabled); | 2314 | atomic_dec(&global_trace.data[cpu]->disabled); |
2269 | } | 2315 | } |
2270 | } | 2316 | } |
2271 | __raw_spin_unlock(&ftrace_max_lock); | 2317 | arch_spin_unlock(&ftrace_max_lock); |
2272 | local_irq_enable(); | 2318 | local_irq_enable(); |
2273 | 2319 | ||
2274 | cpumask_copy(tracing_cpumask, tracing_cpumask_new); | 2320 | cpumask_copy(tracing_cpumask, tracing_cpumask_new); |
@@ -2290,67 +2336,49 @@ static const struct file_operations tracing_cpumask_fops = { | |||
2290 | .write = tracing_cpumask_write, | 2336 | .write = tracing_cpumask_write, |
2291 | }; | 2337 | }; |
2292 | 2338 | ||
2293 | static ssize_t | 2339 | static int tracing_trace_options_show(struct seq_file *m, void *v) |
2294 | tracing_trace_options_read(struct file *filp, char __user *ubuf, | ||
2295 | size_t cnt, loff_t *ppos) | ||
2296 | { | 2340 | { |
2297 | struct tracer_opt *trace_opts; | 2341 | struct tracer_opt *trace_opts; |
2298 | u32 tracer_flags; | 2342 | u32 tracer_flags; |
2299 | int len = 0; | ||
2300 | char *buf; | ||
2301 | int r = 0; | ||
2302 | int i; | 2343 | int i; |
2303 | 2344 | ||
2304 | |||
2305 | /* calculate max size */ | ||
2306 | for (i = 0; trace_options[i]; i++) { | ||
2307 | len += strlen(trace_options[i]); | ||
2308 | len += 3; /* "no" and newline */ | ||
2309 | } | ||
2310 | |||
2311 | mutex_lock(&trace_types_lock); | 2345 | mutex_lock(&trace_types_lock); |
2312 | tracer_flags = current_trace->flags->val; | 2346 | tracer_flags = current_trace->flags->val; |
2313 | trace_opts = current_trace->flags->opts; | 2347 | trace_opts = current_trace->flags->opts; |
2314 | 2348 | ||
2315 | /* | ||
2316 | * Increase the size with names of options specific | ||
2317 | * of the current tracer. | ||
2318 | */ | ||
2319 | for (i = 0; trace_opts[i].name; i++) { | ||
2320 | len += strlen(trace_opts[i].name); | ||
2321 | len += 3; /* "no" and newline */ | ||
2322 | } | ||
2323 | |||
2324 | /* +1 for \0 */ | ||
2325 | buf = kmalloc(len + 1, GFP_KERNEL); | ||
2326 | if (!buf) { | ||
2327 | mutex_unlock(&trace_types_lock); | ||
2328 | return -ENOMEM; | ||
2329 | } | ||
2330 | |||
2331 | for (i = 0; trace_options[i]; i++) { | 2349 | for (i = 0; trace_options[i]; i++) { |
2332 | if (trace_flags & (1 << i)) | 2350 | if (trace_flags & (1 << i)) |
2333 | r += sprintf(buf + r, "%s\n", trace_options[i]); | 2351 | seq_printf(m, "%s\n", trace_options[i]); |
2334 | else | 2352 | else |
2335 | r += sprintf(buf + r, "no%s\n", trace_options[i]); | 2353 | seq_printf(m, "no%s\n", trace_options[i]); |
2336 | } | 2354 | } |
2337 | 2355 | ||
2338 | for (i = 0; trace_opts[i].name; i++) { | 2356 | for (i = 0; trace_opts[i].name; i++) { |
2339 | if (tracer_flags & trace_opts[i].bit) | 2357 | if (tracer_flags & trace_opts[i].bit) |
2340 | r += sprintf(buf + r, "%s\n", | 2358 | seq_printf(m, "%s\n", trace_opts[i].name); |
2341 | trace_opts[i].name); | ||
2342 | else | 2359 | else |
2343 | r += sprintf(buf + r, "no%s\n", | 2360 | seq_printf(m, "no%s\n", trace_opts[i].name); |
2344 | trace_opts[i].name); | ||
2345 | } | 2361 | } |
2346 | mutex_unlock(&trace_types_lock); | 2362 | mutex_unlock(&trace_types_lock); |
2347 | 2363 | ||
2348 | WARN_ON(r >= len + 1); | 2364 | return 0; |
2365 | } | ||
2349 | 2366 | ||
2350 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 2367 | static int __set_tracer_option(struct tracer *trace, |
2368 | struct tracer_flags *tracer_flags, | ||
2369 | struct tracer_opt *opts, int neg) | ||
2370 | { | ||
2371 | int ret; | ||
2351 | 2372 | ||
2352 | kfree(buf); | 2373 | ret = trace->set_flag(tracer_flags->val, opts->bit, !neg); |
2353 | return r; | 2374 | if (ret) |
2375 | return ret; | ||
2376 | |||
2377 | if (neg) | ||
2378 | tracer_flags->val &= ~opts->bit; | ||
2379 | else | ||
2380 | tracer_flags->val |= opts->bit; | ||
2381 | return 0; | ||
2354 | } | 2382 | } |
2355 | 2383 | ||
2356 | /* Try to assign a tracer specific option */ | 2384 | /* Try to assign a tracer specific option */ |
@@ -2358,33 +2386,17 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | |||
2358 | { | 2386 | { |
2359 | struct tracer_flags *tracer_flags = trace->flags; | 2387 | struct tracer_flags *tracer_flags = trace->flags; |
2360 | struct tracer_opt *opts = NULL; | 2388 | struct tracer_opt *opts = NULL; |
2361 | int ret = 0, i = 0; | 2389 | int i; |
2362 | int len; | ||
2363 | 2390 | ||
2364 | for (i = 0; tracer_flags->opts[i].name; i++) { | 2391 | for (i = 0; tracer_flags->opts[i].name; i++) { |
2365 | opts = &tracer_flags->opts[i]; | 2392 | opts = &tracer_flags->opts[i]; |
2366 | len = strlen(opts->name); | ||
2367 | 2393 | ||
2368 | if (strncmp(cmp, opts->name, len) == 0) { | 2394 | if (strcmp(cmp, opts->name) == 0) |
2369 | ret = trace->set_flag(tracer_flags->val, | 2395 | return __set_tracer_option(trace, trace->flags, |
2370 | opts->bit, !neg); | 2396 | opts, neg); |
2371 | break; | ||
2372 | } | ||
2373 | } | 2397 | } |
2374 | /* Not found */ | ||
2375 | if (!tracer_flags->opts[i].name) | ||
2376 | return -EINVAL; | ||
2377 | 2398 | ||
2378 | /* Refused to handle */ | 2399 | return -EINVAL; |
2379 | if (ret) | ||
2380 | return ret; | ||
2381 | |||
2382 | if (neg) | ||
2383 | tracer_flags->val &= ~opts->bit; | ||
2384 | else | ||
2385 | tracer_flags->val |= opts->bit; | ||
2386 | |||
2387 | return 0; | ||
2388 | } | 2400 | } |
2389 | 2401 | ||
2390 | static void set_tracer_flags(unsigned int mask, int enabled) | 2402 | static void set_tracer_flags(unsigned int mask, int enabled) |
@@ -2404,7 +2416,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
2404 | size_t cnt, loff_t *ppos) | 2416 | size_t cnt, loff_t *ppos) |
2405 | { | 2417 | { |
2406 | char buf[64]; | 2418 | char buf[64]; |
2407 | char *cmp = buf; | 2419 | char *cmp; |
2408 | int neg = 0; | 2420 | int neg = 0; |
2409 | int ret; | 2421 | int ret; |
2410 | int i; | 2422 | int i; |
@@ -2416,16 +2428,15 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
2416 | return -EFAULT; | 2428 | return -EFAULT; |
2417 | 2429 | ||
2418 | buf[cnt] = 0; | 2430 | buf[cnt] = 0; |
2431 | cmp = strstrip(buf); | ||
2419 | 2432 | ||
2420 | if (strncmp(buf, "no", 2) == 0) { | 2433 | if (strncmp(cmp, "no", 2) == 0) { |
2421 | neg = 1; | 2434 | neg = 1; |
2422 | cmp += 2; | 2435 | cmp += 2; |
2423 | } | 2436 | } |
2424 | 2437 | ||
2425 | for (i = 0; trace_options[i]; i++) { | 2438 | for (i = 0; trace_options[i]; i++) { |
2426 | int len = strlen(trace_options[i]); | 2439 | if (strcmp(cmp, trace_options[i]) == 0) { |
2427 | |||
2428 | if (strncmp(cmp, trace_options[i], len) == 0) { | ||
2429 | set_tracer_flags(1 << i, !neg); | 2440 | set_tracer_flags(1 << i, !neg); |
2430 | break; | 2441 | break; |
2431 | } | 2442 | } |
@@ -2440,14 +2451,23 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
2440 | return ret; | 2451 | return ret; |
2441 | } | 2452 | } |
2442 | 2453 | ||
2443 | filp->f_pos += cnt; | 2454 | *ppos += cnt; |
2444 | 2455 | ||
2445 | return cnt; | 2456 | return cnt; |
2446 | } | 2457 | } |
2447 | 2458 | ||
2459 | static int tracing_trace_options_open(struct inode *inode, struct file *file) | ||
2460 | { | ||
2461 | if (tracing_disabled) | ||
2462 | return -ENODEV; | ||
2463 | return single_open(file, tracing_trace_options_show, NULL); | ||
2464 | } | ||
2465 | |||
2448 | static const struct file_operations tracing_iter_fops = { | 2466 | static const struct file_operations tracing_iter_fops = { |
2449 | .open = tracing_open_generic, | 2467 | .open = tracing_trace_options_open, |
2450 | .read = tracing_trace_options_read, | 2468 | .read = seq_read, |
2469 | .llseek = seq_lseek, | ||
2470 | .release = single_release, | ||
2451 | .write = tracing_trace_options_write, | 2471 | .write = tracing_trace_options_write, |
2452 | }; | 2472 | }; |
2453 | 2473 | ||
@@ -2582,7 +2602,7 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf, | |||
2582 | } | 2602 | } |
2583 | mutex_unlock(&trace_types_lock); | 2603 | mutex_unlock(&trace_types_lock); |
2584 | 2604 | ||
2585 | filp->f_pos += cnt; | 2605 | *ppos += cnt; |
2586 | 2606 | ||
2587 | return cnt; | 2607 | return cnt; |
2588 | } | 2608 | } |
@@ -2764,7 +2784,7 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf, | |||
2764 | if (err) | 2784 | if (err) |
2765 | return err; | 2785 | return err; |
2766 | 2786 | ||
2767 | filp->f_pos += ret; | 2787 | *ppos += ret; |
2768 | 2788 | ||
2769 | return ret; | 2789 | return ret; |
2770 | } | 2790 | } |
@@ -2897,6 +2917,10 @@ static int tracing_release_pipe(struct inode *inode, struct file *file) | |||
2897 | else | 2917 | else |
2898 | cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask); | 2918 | cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask); |
2899 | 2919 | ||
2920 | |||
2921 | if (iter->trace->pipe_close) | ||
2922 | iter->trace->pipe_close(iter); | ||
2923 | |||
2900 | mutex_unlock(&trace_types_lock); | 2924 | mutex_unlock(&trace_types_lock); |
2901 | 2925 | ||
2902 | free_cpumask_var(iter->started); | 2926 | free_cpumask_var(iter->started); |
@@ -3103,7 +3127,7 @@ static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, | |||
3103 | __free_page(spd->pages[idx]); | 3127 | __free_page(spd->pages[idx]); |
3104 | } | 3128 | } |
3105 | 3129 | ||
3106 | static struct pipe_buf_operations tracing_pipe_buf_ops = { | 3130 | static const struct pipe_buf_operations tracing_pipe_buf_ops = { |
3107 | .can_merge = 0, | 3131 | .can_merge = 0, |
3108 | .map = generic_pipe_buf_map, | 3132 | .map = generic_pipe_buf_map, |
3109 | .unmap = generic_pipe_buf_unmap, | 3133 | .unmap = generic_pipe_buf_unmap, |
@@ -3299,7 +3323,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
3299 | } | 3323 | } |
3300 | } | 3324 | } |
3301 | 3325 | ||
3302 | filp->f_pos += cnt; | 3326 | *ppos += cnt; |
3303 | 3327 | ||
3304 | /* If check pages failed, return ENOMEM */ | 3328 | /* If check pages failed, return ENOMEM */ |
3305 | if (tracing_disabled) | 3329 | if (tracing_disabled) |
@@ -3334,7 +3358,6 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
3334 | size_t cnt, loff_t *fpos) | 3358 | size_t cnt, loff_t *fpos) |
3335 | { | 3359 | { |
3336 | char *buf; | 3360 | char *buf; |
3337 | char *end; | ||
3338 | 3361 | ||
3339 | if (tracing_disabled) | 3362 | if (tracing_disabled) |
3340 | return -EINVAL; | 3363 | return -EINVAL; |
@@ -3342,7 +3365,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
3342 | if (cnt > TRACE_BUF_SIZE) | 3365 | if (cnt > TRACE_BUF_SIZE) |
3343 | cnt = TRACE_BUF_SIZE; | 3366 | cnt = TRACE_BUF_SIZE; |
3344 | 3367 | ||
3345 | buf = kmalloc(cnt + 1, GFP_KERNEL); | 3368 | buf = kmalloc(cnt + 2, GFP_KERNEL); |
3346 | if (buf == NULL) | 3369 | if (buf == NULL) |
3347 | return -ENOMEM; | 3370 | return -ENOMEM; |
3348 | 3371 | ||
@@ -3350,35 +3373,31 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
3350 | kfree(buf); | 3373 | kfree(buf); |
3351 | return -EFAULT; | 3374 | return -EFAULT; |
3352 | } | 3375 | } |
3376 | if (buf[cnt-1] != '\n') { | ||
3377 | buf[cnt] = '\n'; | ||
3378 | buf[cnt+1] = '\0'; | ||
3379 | } else | ||
3380 | buf[cnt] = '\0'; | ||
3353 | 3381 | ||
3354 | /* Cut from the first nil or newline. */ | 3382 | cnt = mark_printk("%s", buf); |
3355 | buf[cnt] = '\0'; | ||
3356 | end = strchr(buf, '\n'); | ||
3357 | if (end) | ||
3358 | *end = '\0'; | ||
3359 | |||
3360 | cnt = mark_printk("%s\n", buf); | ||
3361 | kfree(buf); | 3383 | kfree(buf); |
3362 | *fpos += cnt; | 3384 | *fpos += cnt; |
3363 | 3385 | ||
3364 | return cnt; | 3386 | return cnt; |
3365 | } | 3387 | } |
3366 | 3388 | ||
3367 | static ssize_t tracing_clock_read(struct file *filp, char __user *ubuf, | 3389 | static int tracing_clock_show(struct seq_file *m, void *v) |
3368 | size_t cnt, loff_t *ppos) | ||
3369 | { | 3390 | { |
3370 | char buf[64]; | ||
3371 | int bufiter = 0; | ||
3372 | int i; | 3391 | int i; |
3373 | 3392 | ||
3374 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) | 3393 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) |
3375 | bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, | 3394 | seq_printf(m, |
3376 | "%s%s%s%s", i ? " " : "", | 3395 | "%s%s%s%s", i ? " " : "", |
3377 | i == trace_clock_id ? "[" : "", trace_clocks[i].name, | 3396 | i == trace_clock_id ? "[" : "", trace_clocks[i].name, |
3378 | i == trace_clock_id ? "]" : ""); | 3397 | i == trace_clock_id ? "]" : ""); |
3379 | bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, "\n"); | 3398 | seq_putc(m, '\n'); |
3380 | 3399 | ||
3381 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, bufiter); | 3400 | return 0; |
3382 | } | 3401 | } |
3383 | 3402 | ||
3384 | static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | 3403 | static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, |
@@ -3420,6 +3439,13 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | |||
3420 | return cnt; | 3439 | return cnt; |
3421 | } | 3440 | } |
3422 | 3441 | ||
3442 | static int tracing_clock_open(struct inode *inode, struct file *file) | ||
3443 | { | ||
3444 | if (tracing_disabled) | ||
3445 | return -ENODEV; | ||
3446 | return single_open(file, tracing_clock_show, NULL); | ||
3447 | } | ||
3448 | |||
3423 | static const struct file_operations tracing_max_lat_fops = { | 3449 | static const struct file_operations tracing_max_lat_fops = { |
3424 | .open = tracing_open_generic, | 3450 | .open = tracing_open_generic, |
3425 | .read = tracing_max_lat_read, | 3451 | .read = tracing_max_lat_read, |
@@ -3458,8 +3484,10 @@ static const struct file_operations tracing_mark_fops = { | |||
3458 | }; | 3484 | }; |
3459 | 3485 | ||
3460 | static const struct file_operations trace_clock_fops = { | 3486 | static const struct file_operations trace_clock_fops = { |
3461 | .open = tracing_open_generic, | 3487 | .open = tracing_clock_open, |
3462 | .read = tracing_clock_read, | 3488 | .read = seq_read, |
3489 | .llseek = seq_lseek, | ||
3490 | .release = single_release, | ||
3463 | .write = tracing_clock_write, | 3491 | .write = tracing_clock_write, |
3464 | }; | 3492 | }; |
3465 | 3493 | ||
@@ -3589,7 +3617,7 @@ static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, | |||
3589 | } | 3617 | } |
3590 | 3618 | ||
3591 | /* Pipe buffer operations for a buffer. */ | 3619 | /* Pipe buffer operations for a buffer. */ |
3592 | static struct pipe_buf_operations buffer_pipe_buf_ops = { | 3620 | static const struct pipe_buf_operations buffer_pipe_buf_ops = { |
3593 | .can_merge = 0, | 3621 | .can_merge = 0, |
3594 | .map = generic_pipe_buf_map, | 3622 | .map = generic_pipe_buf_map, |
3595 | .unmap = generic_pipe_buf_unmap, | 3623 | .unmap = generic_pipe_buf_unmap, |
@@ -3730,7 +3758,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf, | |||
3730 | 3758 | ||
3731 | s = kmalloc(sizeof(*s), GFP_KERNEL); | 3759 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
3732 | if (!s) | 3760 | if (!s) |
3733 | return ENOMEM; | 3761 | return -ENOMEM; |
3734 | 3762 | ||
3735 | trace_seq_init(s); | 3763 | trace_seq_init(s); |
3736 | 3764 | ||
@@ -3920,39 +3948,16 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
3920 | if (ret < 0) | 3948 | if (ret < 0) |
3921 | return ret; | 3949 | return ret; |
3922 | 3950 | ||
3923 | ret = 0; | 3951 | if (val != 0 && val != 1) |
3924 | switch (val) { | 3952 | return -EINVAL; |
3925 | case 0: | ||
3926 | /* do nothing if already cleared */ | ||
3927 | if (!(topt->flags->val & topt->opt->bit)) | ||
3928 | break; | ||
3929 | |||
3930 | mutex_lock(&trace_types_lock); | ||
3931 | if (current_trace->set_flag) | ||
3932 | ret = current_trace->set_flag(topt->flags->val, | ||
3933 | topt->opt->bit, 0); | ||
3934 | mutex_unlock(&trace_types_lock); | ||
3935 | if (ret) | ||
3936 | return ret; | ||
3937 | topt->flags->val &= ~topt->opt->bit; | ||
3938 | break; | ||
3939 | case 1: | ||
3940 | /* do nothing if already set */ | ||
3941 | if (topt->flags->val & topt->opt->bit) | ||
3942 | break; | ||
3943 | 3953 | ||
3954 | if (!!(topt->flags->val & topt->opt->bit) != val) { | ||
3944 | mutex_lock(&trace_types_lock); | 3955 | mutex_lock(&trace_types_lock); |
3945 | if (current_trace->set_flag) | 3956 | ret = __set_tracer_option(current_trace, topt->flags, |
3946 | ret = current_trace->set_flag(topt->flags->val, | 3957 | topt->opt, !val); |
3947 | topt->opt->bit, 1); | ||
3948 | mutex_unlock(&trace_types_lock); | 3958 | mutex_unlock(&trace_types_lock); |
3949 | if (ret) | 3959 | if (ret) |
3950 | return ret; | 3960 | return ret; |
3951 | topt->flags->val |= topt->opt->bit; | ||
3952 | break; | ||
3953 | |||
3954 | default: | ||
3955 | return -EINVAL; | ||
3956 | } | 3961 | } |
3957 | 3962 | ||
3958 | *ppos += cnt; | 3963 | *ppos += cnt; |
@@ -4279,8 +4284,8 @@ trace_printk_seq(struct trace_seq *s) | |||
4279 | 4284 | ||
4280 | static void __ftrace_dump(bool disable_tracing) | 4285 | static void __ftrace_dump(bool disable_tracing) |
4281 | { | 4286 | { |
4282 | static raw_spinlock_t ftrace_dump_lock = | 4287 | static arch_spinlock_t ftrace_dump_lock = |
4283 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 4288 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
4284 | /* use static because iter can be a bit big for the stack */ | 4289 | /* use static because iter can be a bit big for the stack */ |
4285 | static struct trace_iterator iter; | 4290 | static struct trace_iterator iter; |
4286 | unsigned int old_userobj; | 4291 | unsigned int old_userobj; |
@@ -4290,7 +4295,7 @@ static void __ftrace_dump(bool disable_tracing) | |||
4290 | 4295 | ||
4291 | /* only one dump */ | 4296 | /* only one dump */ |
4292 | local_irq_save(flags); | 4297 | local_irq_save(flags); |
4293 | __raw_spin_lock(&ftrace_dump_lock); | 4298 | arch_spin_lock(&ftrace_dump_lock); |
4294 | if (dump_ran) | 4299 | if (dump_ran) |
4295 | goto out; | 4300 | goto out; |
4296 | 4301 | ||
@@ -4365,7 +4370,7 @@ static void __ftrace_dump(bool disable_tracing) | |||
4365 | } | 4370 | } |
4366 | 4371 | ||
4367 | out: | 4372 | out: |
4368 | __raw_spin_unlock(&ftrace_dump_lock); | 4373 | arch_spin_unlock(&ftrace_dump_lock); |
4369 | local_irq_restore(flags); | 4374 | local_irq_restore(flags); |
4370 | } | 4375 | } |
4371 | 4376 | ||
@@ -4426,7 +4431,7 @@ __init static int tracer_alloc_buffers(void) | |||
4426 | /* Allocate the first page for all buffers */ | 4431 | /* Allocate the first page for all buffers */ |
4427 | for_each_tracing_cpu(i) { | 4432 | for_each_tracing_cpu(i) { |
4428 | global_trace.data[i] = &per_cpu(global_trace_cpu, i); | 4433 | global_trace.data[i] = &per_cpu(global_trace_cpu, i); |
4429 | max_tr.data[i] = &per_cpu(max_data, i); | 4434 | max_tr.data[i] = &per_cpu(max_tr_data, i); |
4430 | } | 4435 | } |
4431 | 4436 | ||
4432 | trace_init_cmdlines(); | 4437 | trace_init_cmdlines(); |