diff options
Diffstat (limited to 'kernel/trace/trace.c')
| -rw-r--r-- | kernel/trace/trace.c | 318 | 
1 files changed, 159 insertions, 159 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index b20d3ec75de9..0df1b0f2cb9e 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c  | |||
| @@ -12,7 +12,7 @@ | |||
| 12 | * Copyright (C) 2004 William Lee Irwin III | 12 | * Copyright (C) 2004 William Lee Irwin III | 
| 13 | */ | 13 | */ | 
| 14 | #include <linux/ring_buffer.h> | 14 | #include <linux/ring_buffer.h> | 
| 15 | #include <linux/utsrelease.h> | 15 | #include <generated/utsrelease.h> | 
| 16 | #include <linux/stacktrace.h> | 16 | #include <linux/stacktrace.h> | 
| 17 | #include <linux/writeback.h> | 17 | #include <linux/writeback.h> | 
| 18 | #include <linux/kallsyms.h> | 18 | #include <linux/kallsyms.h> | 
| @@ -86,17 +86,17 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set) | |||
| 86 | */ | 86 | */ | 
| 87 | static int tracing_disabled = 1; | 87 | static int tracing_disabled = 1; | 
| 88 | 88 | ||
| 89 | DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); | 89 | DEFINE_PER_CPU(int, ftrace_cpu_disabled); | 
| 90 | 90 | ||
| 91 | static inline void ftrace_disable_cpu(void) | 91 | static inline void ftrace_disable_cpu(void) | 
| 92 | { | 92 | { | 
| 93 | preempt_disable(); | 93 | preempt_disable(); | 
| 94 | local_inc(&__get_cpu_var(ftrace_cpu_disabled)); | 94 | __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled)); | 
| 95 | } | 95 | } | 
| 96 | 96 | ||
| 97 | static inline void ftrace_enable_cpu(void) | 97 | static inline void ftrace_enable_cpu(void) | 
| 98 | { | 98 | { | 
| 99 | local_dec(&__get_cpu_var(ftrace_cpu_disabled)); | 99 | __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled)); | 
| 100 | preempt_enable(); | 100 | preempt_enable(); | 
| 101 | } | 101 | } | 
| 102 | 102 | ||
| @@ -129,7 +129,7 @@ static int tracing_set_tracer(const char *buf); | |||
| 129 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; | 129 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; | 
| 130 | static char *default_bootup_tracer; | 130 | static char *default_bootup_tracer; | 
| 131 | 131 | ||
| 132 | static int __init set_ftrace(char *str) | 132 | static int __init set_cmdline_ftrace(char *str) | 
| 133 | { | 133 | { | 
| 134 | strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); | 134 | strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); | 
| 135 | default_bootup_tracer = bootup_tracer_buf; | 135 | default_bootup_tracer = bootup_tracer_buf; | 
| @@ -137,7 +137,7 @@ static int __init set_ftrace(char *str) | |||
| 137 | ring_buffer_expanded = 1; | 137 | ring_buffer_expanded = 1; | 
| 138 | return 1; | 138 | return 1; | 
| 139 | } | 139 | } | 
| 140 | __setup("ftrace=", set_ftrace); | 140 | __setup("ftrace=", set_cmdline_ftrace); | 
| 141 | 141 | ||
| 142 | static int __init set_ftrace_dump_on_oops(char *str) | 142 | static int __init set_ftrace_dump_on_oops(char *str) | 
| 143 | { | 143 | { | 
| @@ -203,7 +203,7 @@ cycle_t ftrace_now(int cpu) | |||
| 203 | */ | 203 | */ | 
| 204 | static struct trace_array max_tr; | 204 | static struct trace_array max_tr; | 
| 205 | 205 | ||
| 206 | static DEFINE_PER_CPU(struct trace_array_cpu, max_data); | 206 | static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data); | 
| 207 | 207 | ||
| 208 | /* tracer_enabled is used to toggle activation of a tracer */ | 208 | /* tracer_enabled is used to toggle activation of a tracer */ | 
| 209 | static int tracer_enabled = 1; | 209 | static int tracer_enabled = 1; | 
| @@ -313,7 +313,6 @@ static const char *trace_options[] = { | |||
| 313 | "bin", | 313 | "bin", | 
| 314 | "block", | 314 | "block", | 
| 315 | "stacktrace", | 315 | "stacktrace", | 
| 316 | "sched-tree", | ||
| 317 | "trace_printk", | 316 | "trace_printk", | 
| 318 | "ftrace_preempt", | 317 | "ftrace_preempt", | 
| 319 | "branch", | 318 | "branch", | 
| @@ -493,15 +492,15 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | |||
| 493 | * protected by per_cpu spinlocks. But the action of the swap | 492 | * protected by per_cpu spinlocks. But the action of the swap | 
| 494 | * needs its own lock. | 493 | * needs its own lock. | 
| 495 | * | 494 | * | 
| 496 | * This is defined as a raw_spinlock_t in order to help | 495 | * This is defined as a arch_spinlock_t in order to help | 
| 497 | * with performance when lockdep debugging is enabled. | 496 | * with performance when lockdep debugging is enabled. | 
| 498 | * | 497 | * | 
| 499 | * It is also used in other places outside the update_max_tr | 498 | * It is also used in other places outside the update_max_tr | 
| 500 | * so it needs to be defined outside of the | 499 | * so it needs to be defined outside of the | 
| 501 | * CONFIG_TRACER_MAX_TRACE. | 500 | * CONFIG_TRACER_MAX_TRACE. | 
| 502 | */ | 501 | */ | 
| 503 | static raw_spinlock_t ftrace_max_lock = | 502 | static arch_spinlock_t ftrace_max_lock = | 
| 504 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 503 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 
| 505 | 504 | ||
| 506 | #ifdef CONFIG_TRACER_MAX_TRACE | 505 | #ifdef CONFIG_TRACER_MAX_TRACE | 
| 507 | unsigned long __read_mostly tracing_max_latency; | 506 | unsigned long __read_mostly tracing_max_latency; | 
| @@ -555,13 +554,13 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 555 | return; | 554 | return; | 
| 556 | 555 | ||
| 557 | WARN_ON_ONCE(!irqs_disabled()); | 556 | WARN_ON_ONCE(!irqs_disabled()); | 
| 558 | __raw_spin_lock(&ftrace_max_lock); | 557 | arch_spin_lock(&ftrace_max_lock); | 
| 559 | 558 | ||
| 560 | tr->buffer = max_tr.buffer; | 559 | tr->buffer = max_tr.buffer; | 
| 561 | max_tr.buffer = buf; | 560 | max_tr.buffer = buf; | 
| 562 | 561 | ||
| 563 | __update_max_tr(tr, tsk, cpu); | 562 | __update_max_tr(tr, tsk, cpu); | 
| 564 | __raw_spin_unlock(&ftrace_max_lock); | 563 | arch_spin_unlock(&ftrace_max_lock); | 
| 565 | } | 564 | } | 
| 566 | 565 | ||
| 567 | /** | 566 | /** | 
| @@ -581,7 +580,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 581 | return; | 580 | return; | 
| 582 | 581 | ||
| 583 | WARN_ON_ONCE(!irqs_disabled()); | 582 | WARN_ON_ONCE(!irqs_disabled()); | 
| 584 | __raw_spin_lock(&ftrace_max_lock); | 583 | arch_spin_lock(&ftrace_max_lock); | 
| 585 | 584 | ||
| 586 | ftrace_disable_cpu(); | 585 | ftrace_disable_cpu(); | 
| 587 | 586 | ||
| @@ -603,7 +602,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 603 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); | 602 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); | 
| 604 | 603 | ||
| 605 | __update_max_tr(tr, tsk, cpu); | 604 | __update_max_tr(tr, tsk, cpu); | 
| 606 | __raw_spin_unlock(&ftrace_max_lock); | 605 | arch_spin_unlock(&ftrace_max_lock); | 
| 607 | } | 606 | } | 
| 608 | #endif /* CONFIG_TRACER_MAX_TRACE */ | 607 | #endif /* CONFIG_TRACER_MAX_TRACE */ | 
| 609 | 608 | ||
| @@ -802,7 +801,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; | |||
| 802 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; | 801 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; | 
| 803 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; | 802 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; | 
| 804 | static int cmdline_idx; | 803 | static int cmdline_idx; | 
| 805 | static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; | 804 | static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; | 
| 806 | 805 | ||
| 807 | /* temporary disable recording */ | 806 | /* temporary disable recording */ | 
| 808 | static atomic_t trace_record_cmdline_disabled __read_mostly; | 807 | static atomic_t trace_record_cmdline_disabled __read_mostly; | 
| @@ -915,7 +914,7 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
| 915 | * nor do we want to disable interrupts, | 914 | * nor do we want to disable interrupts, | 
| 916 | * so if we miss here, then better luck next time. | 915 | * so if we miss here, then better luck next time. | 
| 917 | */ | 916 | */ | 
| 918 | if (!__raw_spin_trylock(&trace_cmdline_lock)) | 917 | if (!arch_spin_trylock(&trace_cmdline_lock)) | 
| 919 | return; | 918 | return; | 
| 920 | 919 | ||
| 921 | idx = map_pid_to_cmdline[tsk->pid]; | 920 | idx = map_pid_to_cmdline[tsk->pid]; | 
| @@ -940,7 +939,7 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
| 940 | 939 | ||
| 941 | memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); | 940 | memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); | 
| 942 | 941 | ||
| 943 | __raw_spin_unlock(&trace_cmdline_lock); | 942 | arch_spin_unlock(&trace_cmdline_lock); | 
| 944 | } | 943 | } | 
| 945 | 944 | ||
| 946 | void trace_find_cmdline(int pid, char comm[]) | 945 | void trace_find_cmdline(int pid, char comm[]) | 
| @@ -958,14 +957,14 @@ void trace_find_cmdline(int pid, char comm[]) | |||
| 958 | } | 957 | } | 
| 959 | 958 | ||
| 960 | preempt_disable(); | 959 | preempt_disable(); | 
| 961 | __raw_spin_lock(&trace_cmdline_lock); | 960 | arch_spin_lock(&trace_cmdline_lock); | 
| 962 | map = map_pid_to_cmdline[pid]; | 961 | map = map_pid_to_cmdline[pid]; | 
| 963 | if (map != NO_CMDLINE_MAP) | 962 | if (map != NO_CMDLINE_MAP) | 
| 964 | strcpy(comm, saved_cmdlines[map]); | 963 | strcpy(comm, saved_cmdlines[map]); | 
| 965 | else | 964 | else | 
| 966 | strcpy(comm, "<...>"); | 965 | strcpy(comm, "<...>"); | 
| 967 | 966 | ||
| 968 | __raw_spin_unlock(&trace_cmdline_lock); | 967 | arch_spin_unlock(&trace_cmdline_lock); | 
| 969 | preempt_enable(); | 968 | preempt_enable(); | 
| 970 | } | 969 | } | 
| 971 | 970 | ||
| @@ -1085,7 +1084,7 @@ trace_function(struct trace_array *tr, | |||
| 1085 | struct ftrace_entry *entry; | 1084 | struct ftrace_entry *entry; | 
| 1086 | 1085 | ||
| 1087 | /* If we are reading the ring buffer, don't trace */ | 1086 | /* If we are reading the ring buffer, don't trace */ | 
| 1088 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 1087 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) | 
| 1089 | return; | 1088 | return; | 
| 1090 | 1089 | ||
| 1091 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), | 1090 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), | 
| @@ -1151,6 +1150,22 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, | |||
| 1151 | __ftrace_trace_stack(tr->buffer, flags, skip, pc); | 1150 | __ftrace_trace_stack(tr->buffer, flags, skip, pc); | 
| 1152 | } | 1151 | } | 
| 1153 | 1152 | ||
| 1153 | /** | ||
| 1154 | * trace_dump_stack - record a stack back trace in the trace buffer | ||
| 1155 | */ | ||
| 1156 | void trace_dump_stack(void) | ||
| 1157 | { | ||
| 1158 | unsigned long flags; | ||
| 1159 | |||
| 1160 | if (tracing_disabled || tracing_selftest_running) | ||
| 1161 | return; | ||
| 1162 | |||
| 1163 | local_save_flags(flags); | ||
| 1164 | |||
| 1165 | /* skipping 3 traces, seems to get us at the caller of this function */ | ||
| 1166 | __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); | ||
| 1167 | } | ||
| 1168 | |||
| 1154 | void | 1169 | void | 
| 1155 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | 1170 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | 
| 1156 | { | 1171 | { | 
| @@ -1251,8 +1266,8 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | |||
| 1251 | */ | 1266 | */ | 
| 1252 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | 1267 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | 
| 1253 | { | 1268 | { | 
| 1254 | static raw_spinlock_t trace_buf_lock = | 1269 | static arch_spinlock_t trace_buf_lock = | 
| 1255 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 1270 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 
| 1256 | static u32 trace_buf[TRACE_BUF_SIZE]; | 1271 | static u32 trace_buf[TRACE_BUF_SIZE]; | 
| 1257 | 1272 | ||
| 1258 | struct ftrace_event_call *call = &event_bprint; | 1273 | struct ftrace_event_call *call = &event_bprint; | 
| @@ -1283,7 +1298,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 1283 | 1298 | ||
| 1284 | /* Lockdep uses trace_printk for lock tracing */ | 1299 | /* Lockdep uses trace_printk for lock tracing */ | 
| 1285 | local_irq_save(flags); | 1300 | local_irq_save(flags); | 
| 1286 | __raw_spin_lock(&trace_buf_lock); | 1301 | arch_spin_lock(&trace_buf_lock); | 
| 1287 | len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 1302 | len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 
| 1288 | 1303 | ||
| 1289 | if (len > TRACE_BUF_SIZE || len < 0) | 1304 | if (len > TRACE_BUF_SIZE || len < 0) | 
| @@ -1304,7 +1319,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 1304 | ring_buffer_unlock_commit(buffer, event); | 1319 | ring_buffer_unlock_commit(buffer, event); | 
| 1305 | 1320 | ||
| 1306 | out_unlock: | 1321 | out_unlock: | 
| 1307 | __raw_spin_unlock(&trace_buf_lock); | 1322 | arch_spin_unlock(&trace_buf_lock); | 
| 1308 | local_irq_restore(flags); | 1323 | local_irq_restore(flags); | 
| 1309 | 1324 | ||
| 1310 | out: | 1325 | out: | 
| @@ -1334,7 +1349,7 @@ int trace_array_printk(struct trace_array *tr, | |||
| 1334 | int trace_array_vprintk(struct trace_array *tr, | 1349 | int trace_array_vprintk(struct trace_array *tr, | 
| 1335 | unsigned long ip, const char *fmt, va_list args) | 1350 | unsigned long ip, const char *fmt, va_list args) | 
| 1336 | { | 1351 | { | 
| 1337 | static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; | 1352 | static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED; | 
| 1338 | static char trace_buf[TRACE_BUF_SIZE]; | 1353 | static char trace_buf[TRACE_BUF_SIZE]; | 
| 1339 | 1354 | ||
| 1340 | struct ftrace_event_call *call = &event_print; | 1355 | struct ftrace_event_call *call = &event_print; | 
| @@ -1360,12 +1375,9 @@ int trace_array_vprintk(struct trace_array *tr, | |||
| 1360 | 1375 | ||
| 1361 | pause_graph_tracing(); | 1376 | pause_graph_tracing(); | 
| 1362 | raw_local_irq_save(irq_flags); | 1377 | raw_local_irq_save(irq_flags); | 
| 1363 | __raw_spin_lock(&trace_buf_lock); | 1378 | arch_spin_lock(&trace_buf_lock); | 
| 1364 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 1379 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 
| 1365 | 1380 | ||
| 1366 | len = min(len, TRACE_BUF_SIZE-1); | ||
| 1367 | trace_buf[len] = 0; | ||
| 1368 | |||
| 1369 | size = sizeof(*entry) + len + 1; | 1381 | size = sizeof(*entry) + len + 1; | 
| 1370 | buffer = tr->buffer; | 1382 | buffer = tr->buffer; | 
| 1371 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, | 1383 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, | 
| @@ -1373,15 +1385,15 @@ int trace_array_vprintk(struct trace_array *tr, | |||
| 1373 | if (!event) | 1385 | if (!event) | 
| 1374 | goto out_unlock; | 1386 | goto out_unlock; | 
| 1375 | entry = ring_buffer_event_data(event); | 1387 | entry = ring_buffer_event_data(event); | 
| 1376 | entry->ip = ip; | 1388 | entry->ip = ip; | 
| 1377 | 1389 | ||
| 1378 | memcpy(&entry->buf, trace_buf, len); | 1390 | memcpy(&entry->buf, trace_buf, len); | 
| 1379 | entry->buf[len] = 0; | 1391 | entry->buf[len] = '\0'; | 
| 1380 | if (!filter_check_discard(call, entry, buffer, event)) | 1392 | if (!filter_check_discard(call, entry, buffer, event)) | 
| 1381 | ring_buffer_unlock_commit(buffer, event); | 1393 | ring_buffer_unlock_commit(buffer, event); | 
| 1382 | 1394 | ||
| 1383 | out_unlock: | 1395 | out_unlock: | 
| 1384 | __raw_spin_unlock(&trace_buf_lock); | 1396 | arch_spin_unlock(&trace_buf_lock); | 
| 1385 | raw_local_irq_restore(irq_flags); | 1397 | raw_local_irq_restore(irq_flags); | 
| 1386 | unpause_graph_tracing(); | 1398 | unpause_graph_tracing(); | 
| 1387 | out: | 1399 | out: | 
| @@ -1515,6 +1527,8 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos) | |||
| 1515 | int i = (int)*pos; | 1527 | int i = (int)*pos; | 
| 1516 | void *ent; | 1528 | void *ent; | 
| 1517 | 1529 | ||
| 1530 | WARN_ON_ONCE(iter->leftover); | ||
| 1531 | |||
| 1518 | (*pos)++; | 1532 | (*pos)++; | 
| 1519 | 1533 | ||
| 1520 | /* can't go backwards */ | 1534 | /* can't go backwards */ | 
| @@ -1613,8 +1627,16 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
| 1613 | ; | 1627 | ; | 
| 1614 | 1628 | ||
| 1615 | } else { | 1629 | } else { | 
| 1616 | l = *pos - 1; | 1630 | /* | 
| 1617 | p = s_next(m, p, &l); | 1631 | * If we overflowed the seq_file before, then we want | 
| 1632 | * to just reuse the trace_seq buffer again. | ||
| 1633 | */ | ||
| 1634 | if (iter->leftover) | ||
| 1635 | p = iter; | ||
| 1636 | else { | ||
| 1637 | l = *pos - 1; | ||
| 1638 | p = s_next(m, p, &l); | ||
| 1639 | } | ||
| 1618 | } | 1640 | } | 
| 1619 | 1641 | ||
| 1620 | trace_event_read_lock(); | 1642 | trace_event_read_lock(); | 
| @@ -1922,6 +1944,7 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter) | |||
| 1922 | static int s_show(struct seq_file *m, void *v) | 1944 | static int s_show(struct seq_file *m, void *v) | 
| 1923 | { | 1945 | { | 
| 1924 | struct trace_iterator *iter = v; | 1946 | struct trace_iterator *iter = v; | 
| 1947 | int ret; | ||
| 1925 | 1948 | ||
| 1926 | if (iter->ent == NULL) { | 1949 | if (iter->ent == NULL) { | 
| 1927 | if (iter->tr) { | 1950 | if (iter->tr) { | 
| @@ -1941,9 +1964,27 @@ static int s_show(struct seq_file *m, void *v) | |||
| 1941 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | 1964 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | 
| 1942 | print_func_help_header(m); | 1965 | print_func_help_header(m); | 
| 1943 | } | 1966 | } | 
| 1967 | } else if (iter->leftover) { | ||
| 1968 | /* | ||
| 1969 | * If we filled the seq_file buffer earlier, we | ||
| 1970 | * want to just show it now. | ||
| 1971 | */ | ||
| 1972 | ret = trace_print_seq(m, &iter->seq); | ||
| 1973 | |||
| 1974 | /* ret should this time be zero, but you never know */ | ||
| 1975 | iter->leftover = ret; | ||
| 1976 | |||
| 1944 | } else { | 1977 | } else { | 
| 1945 | print_trace_line(iter); | 1978 | print_trace_line(iter); | 
| 1946 | trace_print_seq(m, &iter->seq); | 1979 | ret = trace_print_seq(m, &iter->seq); | 
| 1980 | /* | ||
| 1981 | * If we overflow the seq_file buffer, then it will | ||
| 1982 | * ask us for this data again at start up. | ||
| 1983 | * Use that instead. | ||
| 1984 | * ret is 0 if seq_file write succeeded. | ||
| 1985 | * -1 otherwise. | ||
| 1986 | */ | ||
| 1987 | iter->leftover = ret; | ||
| 1947 | } | 1988 | } | 
| 1948 | 1989 | ||
| 1949 | return 0; | 1990 | return 0; | 
| @@ -2253,7 +2294,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
| 2253 | mutex_lock(&tracing_cpumask_update_lock); | 2294 | mutex_lock(&tracing_cpumask_update_lock); | 
| 2254 | 2295 | ||
| 2255 | local_irq_disable(); | 2296 | local_irq_disable(); | 
| 2256 | __raw_spin_lock(&ftrace_max_lock); | 2297 | arch_spin_lock(&ftrace_max_lock); | 
| 2257 | for_each_tracing_cpu(cpu) { | 2298 | for_each_tracing_cpu(cpu) { | 
| 2258 | /* | 2299 | /* | 
| 2259 | * Increase/decrease the disabled counter if we are | 2300 | * Increase/decrease the disabled counter if we are | 
| @@ -2268,7 +2309,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
| 2268 | atomic_dec(&global_trace.data[cpu]->disabled); | 2309 | atomic_dec(&global_trace.data[cpu]->disabled); | 
| 2269 | } | 2310 | } | 
| 2270 | } | 2311 | } | 
| 2271 | __raw_spin_unlock(&ftrace_max_lock); | 2312 | arch_spin_unlock(&ftrace_max_lock); | 
| 2272 | local_irq_enable(); | 2313 | local_irq_enable(); | 
| 2273 | 2314 | ||
| 2274 | cpumask_copy(tracing_cpumask, tracing_cpumask_new); | 2315 | cpumask_copy(tracing_cpumask, tracing_cpumask_new); | 
| @@ -2290,67 +2331,49 @@ static const struct file_operations tracing_cpumask_fops = { | |||
| 2290 | .write = tracing_cpumask_write, | 2331 | .write = tracing_cpumask_write, | 
| 2291 | }; | 2332 | }; | 
| 2292 | 2333 | ||
| 2293 | static ssize_t | 2334 | static int tracing_trace_options_show(struct seq_file *m, void *v) | 
| 2294 | tracing_trace_options_read(struct file *filp, char __user *ubuf, | ||
| 2295 | size_t cnt, loff_t *ppos) | ||
| 2296 | { | 2335 | { | 
| 2297 | struct tracer_opt *trace_opts; | 2336 | struct tracer_opt *trace_opts; | 
| 2298 | u32 tracer_flags; | 2337 | u32 tracer_flags; | 
| 2299 | int len = 0; | ||
| 2300 | char *buf; | ||
| 2301 | int r = 0; | ||
| 2302 | int i; | 2338 | int i; | 
| 2303 | 2339 | ||
| 2304 | |||
| 2305 | /* calculate max size */ | ||
| 2306 | for (i = 0; trace_options[i]; i++) { | ||
| 2307 | len += strlen(trace_options[i]); | ||
| 2308 | len += 3; /* "no" and newline */ | ||
| 2309 | } | ||
| 2310 | |||
| 2311 | mutex_lock(&trace_types_lock); | 2340 | mutex_lock(&trace_types_lock); | 
| 2312 | tracer_flags = current_trace->flags->val; | 2341 | tracer_flags = current_trace->flags->val; | 
| 2313 | trace_opts = current_trace->flags->opts; | 2342 | trace_opts = current_trace->flags->opts; | 
| 2314 | 2343 | ||
| 2315 | /* | ||
| 2316 | * Increase the size with names of options specific | ||
| 2317 | * of the current tracer. | ||
| 2318 | */ | ||
| 2319 | for (i = 0; trace_opts[i].name; i++) { | ||
| 2320 | len += strlen(trace_opts[i].name); | ||
| 2321 | len += 3; /* "no" and newline */ | ||
| 2322 | } | ||
| 2323 | |||
| 2324 | /* +1 for \0 */ | ||
| 2325 | buf = kmalloc(len + 1, GFP_KERNEL); | ||
| 2326 | if (!buf) { | ||
| 2327 | mutex_unlock(&trace_types_lock); | ||
| 2328 | return -ENOMEM; | ||
| 2329 | } | ||
| 2330 | |||
| 2331 | for (i = 0; trace_options[i]; i++) { | 2344 | for (i = 0; trace_options[i]; i++) { | 
| 2332 | if (trace_flags & (1 << i)) | 2345 | if (trace_flags & (1 << i)) | 
| 2333 | r += sprintf(buf + r, "%s\n", trace_options[i]); | 2346 | seq_printf(m, "%s\n", trace_options[i]); | 
| 2334 | else | 2347 | else | 
| 2335 | r += sprintf(buf + r, "no%s\n", trace_options[i]); | 2348 | seq_printf(m, "no%s\n", trace_options[i]); | 
| 2336 | } | 2349 | } | 
| 2337 | 2350 | ||
| 2338 | for (i = 0; trace_opts[i].name; i++) { | 2351 | for (i = 0; trace_opts[i].name; i++) { | 
| 2339 | if (tracer_flags & trace_opts[i].bit) | 2352 | if (tracer_flags & trace_opts[i].bit) | 
| 2340 | r += sprintf(buf + r, "%s\n", | 2353 | seq_printf(m, "%s\n", trace_opts[i].name); | 
| 2341 | trace_opts[i].name); | ||
| 2342 | else | 2354 | else | 
| 2343 | r += sprintf(buf + r, "no%s\n", | 2355 | seq_printf(m, "no%s\n", trace_opts[i].name); | 
| 2344 | trace_opts[i].name); | ||
| 2345 | } | 2356 | } | 
| 2346 | mutex_unlock(&trace_types_lock); | 2357 | mutex_unlock(&trace_types_lock); | 
| 2347 | 2358 | ||
| 2348 | WARN_ON(r >= len + 1); | 2359 | return 0; | 
| 2360 | } | ||
| 2349 | 2361 | ||
| 2350 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 2362 | static int __set_tracer_option(struct tracer *trace, | 
| 2363 | struct tracer_flags *tracer_flags, | ||
| 2364 | struct tracer_opt *opts, int neg) | ||
| 2365 | { | ||
| 2366 | int ret; | ||
| 2351 | 2367 | ||
| 2352 | kfree(buf); | 2368 | ret = trace->set_flag(tracer_flags->val, opts->bit, !neg); | 
| 2353 | return r; | 2369 | if (ret) | 
| 2370 | return ret; | ||
| 2371 | |||
| 2372 | if (neg) | ||
| 2373 | tracer_flags->val &= ~opts->bit; | ||
| 2374 | else | ||
| 2375 | tracer_flags->val |= opts->bit; | ||
| 2376 | return 0; | ||
| 2354 | } | 2377 | } | 
| 2355 | 2378 | ||
| 2356 | /* Try to assign a tracer specific option */ | 2379 | /* Try to assign a tracer specific option */ | 
| @@ -2358,33 +2381,17 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | |||
| 2358 | { | 2381 | { | 
| 2359 | struct tracer_flags *tracer_flags = trace->flags; | 2382 | struct tracer_flags *tracer_flags = trace->flags; | 
| 2360 | struct tracer_opt *opts = NULL; | 2383 | struct tracer_opt *opts = NULL; | 
| 2361 | int ret = 0, i = 0; | 2384 | int i; | 
| 2362 | int len; | ||
| 2363 | 2385 | ||
| 2364 | for (i = 0; tracer_flags->opts[i].name; i++) { | 2386 | for (i = 0; tracer_flags->opts[i].name; i++) { | 
| 2365 | opts = &tracer_flags->opts[i]; | 2387 | opts = &tracer_flags->opts[i]; | 
| 2366 | len = strlen(opts->name); | ||
| 2367 | 2388 | ||
| 2368 | if (strncmp(cmp, opts->name, len) == 0) { | 2389 | if (strcmp(cmp, opts->name) == 0) | 
| 2369 | ret = trace->set_flag(tracer_flags->val, | 2390 | return __set_tracer_option(trace, trace->flags, | 
| 2370 | opts->bit, !neg); | 2391 | opts, neg); | 
| 2371 | break; | ||
| 2372 | } | ||
| 2373 | } | 2392 | } | 
| 2374 | /* Not found */ | ||
| 2375 | if (!tracer_flags->opts[i].name) | ||
| 2376 | return -EINVAL; | ||
| 2377 | |||
| 2378 | /* Refused to handle */ | ||
| 2379 | if (ret) | ||
| 2380 | return ret; | ||
| 2381 | |||
| 2382 | if (neg) | ||
| 2383 | tracer_flags->val &= ~opts->bit; | ||
| 2384 | else | ||
| 2385 | tracer_flags->val |= opts->bit; | ||
| 2386 | 2393 | ||
| 2387 | return 0; | 2394 | return -EINVAL; | 
| 2388 | } | 2395 | } | 
| 2389 | 2396 | ||
| 2390 | static void set_tracer_flags(unsigned int mask, int enabled) | 2397 | static void set_tracer_flags(unsigned int mask, int enabled) | 
| @@ -2404,7 +2411,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
| 2404 | size_t cnt, loff_t *ppos) | 2411 | size_t cnt, loff_t *ppos) | 
| 2405 | { | 2412 | { | 
| 2406 | char buf[64]; | 2413 | char buf[64]; | 
| 2407 | char *cmp = buf; | 2414 | char *cmp; | 
| 2408 | int neg = 0; | 2415 | int neg = 0; | 
| 2409 | int ret; | 2416 | int ret; | 
| 2410 | int i; | 2417 | int i; | 
| @@ -2416,16 +2423,15 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
| 2416 | return -EFAULT; | 2423 | return -EFAULT; | 
| 2417 | 2424 | ||
| 2418 | buf[cnt] = 0; | 2425 | buf[cnt] = 0; | 
| 2426 | cmp = strstrip(buf); | ||
| 2419 | 2427 | ||
| 2420 | if (strncmp(buf, "no", 2) == 0) { | 2428 | if (strncmp(cmp, "no", 2) == 0) { | 
| 2421 | neg = 1; | 2429 | neg = 1; | 
| 2422 | cmp += 2; | 2430 | cmp += 2; | 
| 2423 | } | 2431 | } | 
| 2424 | 2432 | ||
| 2425 | for (i = 0; trace_options[i]; i++) { | 2433 | for (i = 0; trace_options[i]; i++) { | 
| 2426 | int len = strlen(trace_options[i]); | 2434 | if (strcmp(cmp, trace_options[i]) == 0) { | 
| 2427 | |||
| 2428 | if (strncmp(cmp, trace_options[i], len) == 0) { | ||
| 2429 | set_tracer_flags(1 << i, !neg); | 2435 | set_tracer_flags(1 << i, !neg); | 
| 2430 | break; | 2436 | break; | 
| 2431 | } | 2437 | } | 
| @@ -2445,9 +2451,18 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
| 2445 | return cnt; | 2451 | return cnt; | 
| 2446 | } | 2452 | } | 
| 2447 | 2453 | ||
| 2454 | static int tracing_trace_options_open(struct inode *inode, struct file *file) | ||
| 2455 | { | ||
| 2456 | if (tracing_disabled) | ||
| 2457 | return -ENODEV; | ||
| 2458 | return single_open(file, tracing_trace_options_show, NULL); | ||
| 2459 | } | ||
| 2460 | |||
| 2448 | static const struct file_operations tracing_iter_fops = { | 2461 | static const struct file_operations tracing_iter_fops = { | 
| 2449 | .open = tracing_open_generic, | 2462 | .open = tracing_trace_options_open, | 
| 2450 | .read = tracing_trace_options_read, | 2463 | .read = seq_read, | 
| 2464 | .llseek = seq_lseek, | ||
| 2465 | .release = single_release, | ||
| 2451 | .write = tracing_trace_options_write, | 2466 | .write = tracing_trace_options_write, | 
| 2452 | }; | 2467 | }; | 
| 2453 | 2468 | ||
| @@ -2897,6 +2912,10 @@ static int tracing_release_pipe(struct inode *inode, struct file *file) | |||
| 2897 | else | 2912 | else | 
| 2898 | cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask); | 2913 | cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask); | 
| 2899 | 2914 | ||
| 2915 | |||
| 2916 | if (iter->trace->pipe_close) | ||
| 2917 | iter->trace->pipe_close(iter); | ||
| 2918 | |||
| 2900 | mutex_unlock(&trace_types_lock); | 2919 | mutex_unlock(&trace_types_lock); | 
| 2901 | 2920 | ||
| 2902 | free_cpumask_var(iter->started); | 2921 | free_cpumask_var(iter->started); | 
| @@ -3103,7 +3122,7 @@ static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, | |||
| 3103 | __free_page(spd->pages[idx]); | 3122 | __free_page(spd->pages[idx]); | 
| 3104 | } | 3123 | } | 
| 3105 | 3124 | ||
| 3106 | static struct pipe_buf_operations tracing_pipe_buf_ops = { | 3125 | static const struct pipe_buf_operations tracing_pipe_buf_ops = { | 
| 3107 | .can_merge = 0, | 3126 | .can_merge = 0, | 
| 3108 | .map = generic_pipe_buf_map, | 3127 | .map = generic_pipe_buf_map, | 
| 3109 | .unmap = generic_pipe_buf_unmap, | 3128 | .unmap = generic_pipe_buf_unmap, | 
| @@ -3334,7 +3353,6 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
| 3334 | size_t cnt, loff_t *fpos) | 3353 | size_t cnt, loff_t *fpos) | 
| 3335 | { | 3354 | { | 
| 3336 | char *buf; | 3355 | char *buf; | 
| 3337 | char *end; | ||
| 3338 | 3356 | ||
| 3339 | if (tracing_disabled) | 3357 | if (tracing_disabled) | 
| 3340 | return -EINVAL; | 3358 | return -EINVAL; | 
| @@ -3342,7 +3360,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
| 3342 | if (cnt > TRACE_BUF_SIZE) | 3360 | if (cnt > TRACE_BUF_SIZE) | 
| 3343 | cnt = TRACE_BUF_SIZE; | 3361 | cnt = TRACE_BUF_SIZE; | 
| 3344 | 3362 | ||
| 3345 | buf = kmalloc(cnt + 1, GFP_KERNEL); | 3363 | buf = kmalloc(cnt + 2, GFP_KERNEL); | 
| 3346 | if (buf == NULL) | 3364 | if (buf == NULL) | 
| 3347 | return -ENOMEM; | 3365 | return -ENOMEM; | 
| 3348 | 3366 | ||
| @@ -3350,35 +3368,31 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
| 3350 | kfree(buf); | 3368 | kfree(buf); | 
| 3351 | return -EFAULT; | 3369 | return -EFAULT; | 
| 3352 | } | 3370 | } | 
| 3371 | if (buf[cnt-1] != '\n') { | ||
| 3372 | buf[cnt] = '\n'; | ||
| 3373 | buf[cnt+1] = '\0'; | ||
| 3374 | } else | ||
| 3375 | buf[cnt] = '\0'; | ||
| 3353 | 3376 | ||
| 3354 | /* Cut from the first nil or newline. */ | 3377 | cnt = mark_printk("%s", buf); | 
| 3355 | buf[cnt] = '\0'; | ||
| 3356 | end = strchr(buf, '\n'); | ||
| 3357 | if (end) | ||
| 3358 | *end = '\0'; | ||
| 3359 | |||
| 3360 | cnt = mark_printk("%s\n", buf); | ||
| 3361 | kfree(buf); | 3378 | kfree(buf); | 
| 3362 | *fpos += cnt; | 3379 | *fpos += cnt; | 
| 3363 | 3380 | ||
| 3364 | return cnt; | 3381 | return cnt; | 
| 3365 | } | 3382 | } | 
| 3366 | 3383 | ||
| 3367 | static ssize_t tracing_clock_read(struct file *filp, char __user *ubuf, | 3384 | static int tracing_clock_show(struct seq_file *m, void *v) | 
| 3368 | size_t cnt, loff_t *ppos) | ||
| 3369 | { | 3385 | { | 
| 3370 | char buf[64]; | ||
| 3371 | int bufiter = 0; | ||
| 3372 | int i; | 3386 | int i; | 
| 3373 | 3387 | ||
| 3374 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) | 3388 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) | 
| 3375 | bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, | 3389 | seq_printf(m, | 
| 3376 | "%s%s%s%s", i ? " " : "", | 3390 | "%s%s%s%s", i ? " " : "", | 
| 3377 | i == trace_clock_id ? "[" : "", trace_clocks[i].name, | 3391 | i == trace_clock_id ? "[" : "", trace_clocks[i].name, | 
| 3378 | i == trace_clock_id ? "]" : ""); | 3392 | i == trace_clock_id ? "]" : ""); | 
| 3379 | bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, "\n"); | 3393 | seq_putc(m, '\n'); | 
| 3380 | 3394 | ||
| 3381 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, bufiter); | 3395 | return 0; | 
| 3382 | } | 3396 | } | 
| 3383 | 3397 | ||
| 3384 | static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | 3398 | static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | 
| @@ -3420,6 +3434,13 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | |||
| 3420 | return cnt; | 3434 | return cnt; | 
| 3421 | } | 3435 | } | 
| 3422 | 3436 | ||
| 3437 | static int tracing_clock_open(struct inode *inode, struct file *file) | ||
| 3438 | { | ||
| 3439 | if (tracing_disabled) | ||
| 3440 | return -ENODEV; | ||
| 3441 | return single_open(file, tracing_clock_show, NULL); | ||
| 3442 | } | ||
| 3443 | |||
| 3423 | static const struct file_operations tracing_max_lat_fops = { | 3444 | static const struct file_operations tracing_max_lat_fops = { | 
| 3424 | .open = tracing_open_generic, | 3445 | .open = tracing_open_generic, | 
| 3425 | .read = tracing_max_lat_read, | 3446 | .read = tracing_max_lat_read, | 
| @@ -3458,8 +3479,10 @@ static const struct file_operations tracing_mark_fops = { | |||
| 3458 | }; | 3479 | }; | 
| 3459 | 3480 | ||
| 3460 | static const struct file_operations trace_clock_fops = { | 3481 | static const struct file_operations trace_clock_fops = { | 
| 3461 | .open = tracing_open_generic, | 3482 | .open = tracing_clock_open, | 
| 3462 | .read = tracing_clock_read, | 3483 | .read = seq_read, | 
| 3484 | .llseek = seq_lseek, | ||
| 3485 | .release = single_release, | ||
| 3463 | .write = tracing_clock_write, | 3486 | .write = tracing_clock_write, | 
| 3464 | }; | 3487 | }; | 
| 3465 | 3488 | ||
| @@ -3589,7 +3612,7 @@ static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, | |||
| 3589 | } | 3612 | } | 
| 3590 | 3613 | ||
| 3591 | /* Pipe buffer operations for a buffer. */ | 3614 | /* Pipe buffer operations for a buffer. */ | 
| 3592 | static struct pipe_buf_operations buffer_pipe_buf_ops = { | 3615 | static const struct pipe_buf_operations buffer_pipe_buf_ops = { | 
| 3593 | .can_merge = 0, | 3616 | .can_merge = 0, | 
| 3594 | .map = generic_pipe_buf_map, | 3617 | .map = generic_pipe_buf_map, | 
| 3595 | .unmap = generic_pipe_buf_unmap, | 3618 | .unmap = generic_pipe_buf_unmap, | 
| @@ -3730,7 +3753,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf, | |||
| 3730 | 3753 | ||
| 3731 | s = kmalloc(sizeof(*s), GFP_KERNEL); | 3754 | s = kmalloc(sizeof(*s), GFP_KERNEL); | 
| 3732 | if (!s) | 3755 | if (!s) | 
| 3733 | return ENOMEM; | 3756 | return -ENOMEM; | 
| 3734 | 3757 | ||
| 3735 | trace_seq_init(s); | 3758 | trace_seq_init(s); | 
| 3736 | 3759 | ||
| @@ -3920,39 +3943,16 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
| 3920 | if (ret < 0) | 3943 | if (ret < 0) | 
| 3921 | return ret; | 3944 | return ret; | 
| 3922 | 3945 | ||
| 3923 | ret = 0; | 3946 | if (val != 0 && val != 1) | 
| 3924 | switch (val) { | 3947 | return -EINVAL; | 
| 3925 | case 0: | ||
| 3926 | /* do nothing if already cleared */ | ||
| 3927 | if (!(topt->flags->val & topt->opt->bit)) | ||
| 3928 | break; | ||
| 3929 | |||
| 3930 | mutex_lock(&trace_types_lock); | ||
| 3931 | if (current_trace->set_flag) | ||
| 3932 | ret = current_trace->set_flag(topt->flags->val, | ||
| 3933 | topt->opt->bit, 0); | ||
| 3934 | mutex_unlock(&trace_types_lock); | ||
| 3935 | if (ret) | ||
| 3936 | return ret; | ||
| 3937 | topt->flags->val &= ~topt->opt->bit; | ||
| 3938 | break; | ||
| 3939 | case 1: | ||
| 3940 | /* do nothing if already set */ | ||
| 3941 | if (topt->flags->val & topt->opt->bit) | ||
| 3942 | break; | ||
| 3943 | 3948 | ||
| 3949 | if (!!(topt->flags->val & topt->opt->bit) != val) { | ||
| 3944 | mutex_lock(&trace_types_lock); | 3950 | mutex_lock(&trace_types_lock); | 
| 3945 | if (current_trace->set_flag) | 3951 | ret = __set_tracer_option(current_trace, topt->flags, | 
| 3946 | ret = current_trace->set_flag(topt->flags->val, | 3952 | topt->opt, !val); | 
| 3947 | topt->opt->bit, 1); | ||
| 3948 | mutex_unlock(&trace_types_lock); | 3953 | mutex_unlock(&trace_types_lock); | 
| 3949 | if (ret) | 3954 | if (ret) | 
| 3950 | return ret; | 3955 | return ret; | 
| 3951 | topt->flags->val |= topt->opt->bit; | ||
| 3952 | break; | ||
| 3953 | |||
| 3954 | default: | ||
| 3955 | return -EINVAL; | ||
| 3956 | } | 3956 | } | 
| 3957 | 3957 | ||
| 3958 | *ppos += cnt; | 3958 | *ppos += cnt; | 
| @@ -4279,8 +4279,8 @@ trace_printk_seq(struct trace_seq *s) | |||
| 4279 | 4279 | ||
| 4280 | static void __ftrace_dump(bool disable_tracing) | 4280 | static void __ftrace_dump(bool disable_tracing) | 
| 4281 | { | 4281 | { | 
| 4282 | static raw_spinlock_t ftrace_dump_lock = | 4282 | static arch_spinlock_t ftrace_dump_lock = | 
| 4283 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 4283 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 
| 4284 | /* use static because iter can be a bit big for the stack */ | 4284 | /* use static because iter can be a bit big for the stack */ | 
| 4285 | static struct trace_iterator iter; | 4285 | static struct trace_iterator iter; | 
| 4286 | unsigned int old_userobj; | 4286 | unsigned int old_userobj; | 
| @@ -4290,7 +4290,7 @@ static void __ftrace_dump(bool disable_tracing) | |||
| 4290 | 4290 | ||
| 4291 | /* only one dump */ | 4291 | /* only one dump */ | 
| 4292 | local_irq_save(flags); | 4292 | local_irq_save(flags); | 
| 4293 | __raw_spin_lock(&ftrace_dump_lock); | 4293 | arch_spin_lock(&ftrace_dump_lock); | 
| 4294 | if (dump_ran) | 4294 | if (dump_ran) | 
| 4295 | goto out; | 4295 | goto out; | 
| 4296 | 4296 | ||
| @@ -4365,7 +4365,7 @@ static void __ftrace_dump(bool disable_tracing) | |||
| 4365 | } | 4365 | } | 
| 4366 | 4366 | ||
| 4367 | out: | 4367 | out: | 
| 4368 | __raw_spin_unlock(&ftrace_dump_lock); | 4368 | arch_spin_unlock(&ftrace_dump_lock); | 
| 4369 | local_irq_restore(flags); | 4369 | local_irq_restore(flags); | 
| 4370 | } | 4370 | } | 
| 4371 | 4371 | ||
| @@ -4426,7 +4426,7 @@ __init static int tracer_alloc_buffers(void) | |||
| 4426 | /* Allocate the first page for all buffers */ | 4426 | /* Allocate the first page for all buffers */ | 
| 4427 | for_each_tracing_cpu(i) { | 4427 | for_each_tracing_cpu(i) { | 
| 4428 | global_trace.data[i] = &per_cpu(global_trace_cpu, i); | 4428 | global_trace.data[i] = &per_cpu(global_trace_cpu, i); | 
| 4429 | max_tr.data[i] = &per_cpu(max_data, i); | 4429 | max_tr.data[i] = &per_cpu(max_tr_data, i); | 
| 4430 | } | 4430 | } | 
| 4431 | 4431 | ||
| 4432 | trace_init_cmdlines(); | 4432 | trace_init_cmdlines(); | 
