diff options
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 153 |
1 files changed, 89 insertions, 64 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 6c0f6a8a22eb..31118ae16f03 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -86,17 +86,17 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set) | |||
86 | */ | 86 | */ |
87 | static int tracing_disabled = 1; | 87 | static int tracing_disabled = 1; |
88 | 88 | ||
89 | DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); | 89 | DEFINE_PER_CPU(int, ftrace_cpu_disabled); |
90 | 90 | ||
91 | static inline void ftrace_disable_cpu(void) | 91 | static inline void ftrace_disable_cpu(void) |
92 | { | 92 | { |
93 | preempt_disable(); | 93 | preempt_disable(); |
94 | local_inc(&__get_cpu_var(ftrace_cpu_disabled)); | 94 | __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled)); |
95 | } | 95 | } |
96 | 96 | ||
97 | static inline void ftrace_enable_cpu(void) | 97 | static inline void ftrace_enable_cpu(void) |
98 | { | 98 | { |
99 | local_dec(&__get_cpu_var(ftrace_cpu_disabled)); | 99 | __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled)); |
100 | preempt_enable(); | 100 | preempt_enable(); |
101 | } | 101 | } |
102 | 102 | ||
@@ -129,7 +129,7 @@ static int tracing_set_tracer(const char *buf); | |||
129 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; | 129 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; |
130 | static char *default_bootup_tracer; | 130 | static char *default_bootup_tracer; |
131 | 131 | ||
132 | static int __init set_ftrace(char *str) | 132 | static int __init set_cmdline_ftrace(char *str) |
133 | { | 133 | { |
134 | strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); | 134 | strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); |
135 | default_bootup_tracer = bootup_tracer_buf; | 135 | default_bootup_tracer = bootup_tracer_buf; |
@@ -137,7 +137,7 @@ static int __init set_ftrace(char *str) | |||
137 | ring_buffer_expanded = 1; | 137 | ring_buffer_expanded = 1; |
138 | return 1; | 138 | return 1; |
139 | } | 139 | } |
140 | __setup("ftrace=", set_ftrace); | 140 | __setup("ftrace=", set_cmdline_ftrace); |
141 | 141 | ||
142 | static int __init set_ftrace_dump_on_oops(char *str) | 142 | static int __init set_ftrace_dump_on_oops(char *str) |
143 | { | 143 | { |
@@ -203,7 +203,7 @@ cycle_t ftrace_now(int cpu) | |||
203 | */ | 203 | */ |
204 | static struct trace_array max_tr; | 204 | static struct trace_array max_tr; |
205 | 205 | ||
206 | static DEFINE_PER_CPU(struct trace_array_cpu, max_data); | 206 | static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data); |
207 | 207 | ||
208 | /* tracer_enabled is used to toggle activation of a tracer */ | 208 | /* tracer_enabled is used to toggle activation of a tracer */ |
209 | static int tracer_enabled = 1; | 209 | static int tracer_enabled = 1; |
@@ -415,7 +415,7 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf, | |||
415 | 415 | ||
416 | /* read the non-space input */ | 416 | /* read the non-space input */ |
417 | while (cnt && !isspace(ch)) { | 417 | while (cnt && !isspace(ch)) { |
418 | if (parser->idx < parser->size) | 418 | if (parser->idx < parser->size - 1) |
419 | parser->buffer[parser->idx++] = ch; | 419 | parser->buffer[parser->idx++] = ch; |
420 | else { | 420 | else { |
421 | ret = -EINVAL; | 421 | ret = -EINVAL; |
@@ -493,15 +493,15 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | |||
493 | * protected by per_cpu spinlocks. But the action of the swap | 493 | * protected by per_cpu spinlocks. But the action of the swap |
494 | * needs its own lock. | 494 | * needs its own lock. |
495 | * | 495 | * |
496 | * This is defined as a raw_spinlock_t in order to help | 496 | * This is defined as a arch_spinlock_t in order to help |
497 | * with performance when lockdep debugging is enabled. | 497 | * with performance when lockdep debugging is enabled. |
498 | * | 498 | * |
499 | * It is also used in other places outside the update_max_tr | 499 | * It is also used in other places outside the update_max_tr |
500 | * so it needs to be defined outside of the | 500 | * so it needs to be defined outside of the |
501 | * CONFIG_TRACER_MAX_TRACE. | 501 | * CONFIG_TRACER_MAX_TRACE. |
502 | */ | 502 | */ |
503 | static raw_spinlock_t ftrace_max_lock = | 503 | static arch_spinlock_t ftrace_max_lock = |
504 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 504 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
505 | 505 | ||
506 | #ifdef CONFIG_TRACER_MAX_TRACE | 506 | #ifdef CONFIG_TRACER_MAX_TRACE |
507 | unsigned long __read_mostly tracing_max_latency; | 507 | unsigned long __read_mostly tracing_max_latency; |
@@ -555,13 +555,13 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
555 | return; | 555 | return; |
556 | 556 | ||
557 | WARN_ON_ONCE(!irqs_disabled()); | 557 | WARN_ON_ONCE(!irqs_disabled()); |
558 | __raw_spin_lock(&ftrace_max_lock); | 558 | arch_spin_lock(&ftrace_max_lock); |
559 | 559 | ||
560 | tr->buffer = max_tr.buffer; | 560 | tr->buffer = max_tr.buffer; |
561 | max_tr.buffer = buf; | 561 | max_tr.buffer = buf; |
562 | 562 | ||
563 | __update_max_tr(tr, tsk, cpu); | 563 | __update_max_tr(tr, tsk, cpu); |
564 | __raw_spin_unlock(&ftrace_max_lock); | 564 | arch_spin_unlock(&ftrace_max_lock); |
565 | } | 565 | } |
566 | 566 | ||
567 | /** | 567 | /** |
@@ -581,7 +581,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
581 | return; | 581 | return; |
582 | 582 | ||
583 | WARN_ON_ONCE(!irqs_disabled()); | 583 | WARN_ON_ONCE(!irqs_disabled()); |
584 | __raw_spin_lock(&ftrace_max_lock); | 584 | arch_spin_lock(&ftrace_max_lock); |
585 | 585 | ||
586 | ftrace_disable_cpu(); | 586 | ftrace_disable_cpu(); |
587 | 587 | ||
@@ -603,7 +603,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
603 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); | 603 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); |
604 | 604 | ||
605 | __update_max_tr(tr, tsk, cpu); | 605 | __update_max_tr(tr, tsk, cpu); |
606 | __raw_spin_unlock(&ftrace_max_lock); | 606 | arch_spin_unlock(&ftrace_max_lock); |
607 | } | 607 | } |
608 | #endif /* CONFIG_TRACER_MAX_TRACE */ | 608 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
609 | 609 | ||
@@ -802,7 +802,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; | |||
802 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; | 802 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; |
803 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; | 803 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; |
804 | static int cmdline_idx; | 804 | static int cmdline_idx; |
805 | static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; | 805 | static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
806 | 806 | ||
807 | /* temporary disable recording */ | 807 | /* temporary disable recording */ |
808 | static atomic_t trace_record_cmdline_disabled __read_mostly; | 808 | static atomic_t trace_record_cmdline_disabled __read_mostly; |
@@ -915,7 +915,7 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
915 | * nor do we want to disable interrupts, | 915 | * nor do we want to disable interrupts, |
916 | * so if we miss here, then better luck next time. | 916 | * so if we miss here, then better luck next time. |
917 | */ | 917 | */ |
918 | if (!__raw_spin_trylock(&trace_cmdline_lock)) | 918 | if (!arch_spin_trylock(&trace_cmdline_lock)) |
919 | return; | 919 | return; |
920 | 920 | ||
921 | idx = map_pid_to_cmdline[tsk->pid]; | 921 | idx = map_pid_to_cmdline[tsk->pid]; |
@@ -940,7 +940,7 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
940 | 940 | ||
941 | memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); | 941 | memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); |
942 | 942 | ||
943 | __raw_spin_unlock(&trace_cmdline_lock); | 943 | arch_spin_unlock(&trace_cmdline_lock); |
944 | } | 944 | } |
945 | 945 | ||
946 | void trace_find_cmdline(int pid, char comm[]) | 946 | void trace_find_cmdline(int pid, char comm[]) |
@@ -958,14 +958,14 @@ void trace_find_cmdline(int pid, char comm[]) | |||
958 | } | 958 | } |
959 | 959 | ||
960 | preempt_disable(); | 960 | preempt_disable(); |
961 | __raw_spin_lock(&trace_cmdline_lock); | 961 | arch_spin_lock(&trace_cmdline_lock); |
962 | map = map_pid_to_cmdline[pid]; | 962 | map = map_pid_to_cmdline[pid]; |
963 | if (map != NO_CMDLINE_MAP) | 963 | if (map != NO_CMDLINE_MAP) |
964 | strcpy(comm, saved_cmdlines[map]); | 964 | strcpy(comm, saved_cmdlines[map]); |
965 | else | 965 | else |
966 | strcpy(comm, "<...>"); | 966 | strcpy(comm, "<...>"); |
967 | 967 | ||
968 | __raw_spin_unlock(&trace_cmdline_lock); | 968 | arch_spin_unlock(&trace_cmdline_lock); |
969 | preempt_enable(); | 969 | preempt_enable(); |
970 | } | 970 | } |
971 | 971 | ||
@@ -1085,7 +1085,7 @@ trace_function(struct trace_array *tr, | |||
1085 | struct ftrace_entry *entry; | 1085 | struct ftrace_entry *entry; |
1086 | 1086 | ||
1087 | /* If we are reading the ring buffer, don't trace */ | 1087 | /* If we are reading the ring buffer, don't trace */ |
1088 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 1088 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) |
1089 | return; | 1089 | return; |
1090 | 1090 | ||
1091 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), | 1091 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), |
@@ -1251,8 +1251,8 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | |||
1251 | */ | 1251 | */ |
1252 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | 1252 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) |
1253 | { | 1253 | { |
1254 | static raw_spinlock_t trace_buf_lock = | 1254 | static arch_spinlock_t trace_buf_lock = |
1255 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 1255 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
1256 | static u32 trace_buf[TRACE_BUF_SIZE]; | 1256 | static u32 trace_buf[TRACE_BUF_SIZE]; |
1257 | 1257 | ||
1258 | struct ftrace_event_call *call = &event_bprint; | 1258 | struct ftrace_event_call *call = &event_bprint; |
@@ -1283,7 +1283,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1283 | 1283 | ||
1284 | /* Lockdep uses trace_printk for lock tracing */ | 1284 | /* Lockdep uses trace_printk for lock tracing */ |
1285 | local_irq_save(flags); | 1285 | local_irq_save(flags); |
1286 | __raw_spin_lock(&trace_buf_lock); | 1286 | arch_spin_lock(&trace_buf_lock); |
1287 | len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 1287 | len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
1288 | 1288 | ||
1289 | if (len > TRACE_BUF_SIZE || len < 0) | 1289 | if (len > TRACE_BUF_SIZE || len < 0) |
@@ -1304,7 +1304,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1304 | ring_buffer_unlock_commit(buffer, event); | 1304 | ring_buffer_unlock_commit(buffer, event); |
1305 | 1305 | ||
1306 | out_unlock: | 1306 | out_unlock: |
1307 | __raw_spin_unlock(&trace_buf_lock); | 1307 | arch_spin_unlock(&trace_buf_lock); |
1308 | local_irq_restore(flags); | 1308 | local_irq_restore(flags); |
1309 | 1309 | ||
1310 | out: | 1310 | out: |
@@ -1334,7 +1334,7 @@ int trace_array_printk(struct trace_array *tr, | |||
1334 | int trace_array_vprintk(struct trace_array *tr, | 1334 | int trace_array_vprintk(struct trace_array *tr, |
1335 | unsigned long ip, const char *fmt, va_list args) | 1335 | unsigned long ip, const char *fmt, va_list args) |
1336 | { | 1336 | { |
1337 | static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; | 1337 | static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
1338 | static char trace_buf[TRACE_BUF_SIZE]; | 1338 | static char trace_buf[TRACE_BUF_SIZE]; |
1339 | 1339 | ||
1340 | struct ftrace_event_call *call = &event_print; | 1340 | struct ftrace_event_call *call = &event_print; |
@@ -1360,12 +1360,9 @@ int trace_array_vprintk(struct trace_array *tr, | |||
1360 | 1360 | ||
1361 | pause_graph_tracing(); | 1361 | pause_graph_tracing(); |
1362 | raw_local_irq_save(irq_flags); | 1362 | raw_local_irq_save(irq_flags); |
1363 | __raw_spin_lock(&trace_buf_lock); | 1363 | arch_spin_lock(&trace_buf_lock); |
1364 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 1364 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
1365 | 1365 | ||
1366 | len = min(len, TRACE_BUF_SIZE-1); | ||
1367 | trace_buf[len] = 0; | ||
1368 | |||
1369 | size = sizeof(*entry) + len + 1; | 1366 | size = sizeof(*entry) + len + 1; |
1370 | buffer = tr->buffer; | 1367 | buffer = tr->buffer; |
1371 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, | 1368 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, |
@@ -1373,15 +1370,15 @@ int trace_array_vprintk(struct trace_array *tr, | |||
1373 | if (!event) | 1370 | if (!event) |
1374 | goto out_unlock; | 1371 | goto out_unlock; |
1375 | entry = ring_buffer_event_data(event); | 1372 | entry = ring_buffer_event_data(event); |
1376 | entry->ip = ip; | 1373 | entry->ip = ip; |
1377 | 1374 | ||
1378 | memcpy(&entry->buf, trace_buf, len); | 1375 | memcpy(&entry->buf, trace_buf, len); |
1379 | entry->buf[len] = 0; | 1376 | entry->buf[len] = '\0'; |
1380 | if (!filter_check_discard(call, entry, buffer, event)) | 1377 | if (!filter_check_discard(call, entry, buffer, event)) |
1381 | ring_buffer_unlock_commit(buffer, event); | 1378 | ring_buffer_unlock_commit(buffer, event); |
1382 | 1379 | ||
1383 | out_unlock: | 1380 | out_unlock: |
1384 | __raw_spin_unlock(&trace_buf_lock); | 1381 | arch_spin_unlock(&trace_buf_lock); |
1385 | raw_local_irq_restore(irq_flags); | 1382 | raw_local_irq_restore(irq_flags); |
1386 | unpause_graph_tracing(); | 1383 | unpause_graph_tracing(); |
1387 | out: | 1384 | out: |
@@ -1393,7 +1390,7 @@ int trace_array_vprintk(struct trace_array *tr, | |||
1393 | 1390 | ||
1394 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | 1391 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) |
1395 | { | 1392 | { |
1396 | return trace_array_printk(&global_trace, ip, fmt, args); | 1393 | return trace_array_vprintk(&global_trace, ip, fmt, args); |
1397 | } | 1394 | } |
1398 | EXPORT_SYMBOL_GPL(trace_vprintk); | 1395 | EXPORT_SYMBOL_GPL(trace_vprintk); |
1399 | 1396 | ||
@@ -1515,6 +1512,8 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos) | |||
1515 | int i = (int)*pos; | 1512 | int i = (int)*pos; |
1516 | void *ent; | 1513 | void *ent; |
1517 | 1514 | ||
1515 | WARN_ON_ONCE(iter->leftover); | ||
1516 | |||
1518 | (*pos)++; | 1517 | (*pos)++; |
1519 | 1518 | ||
1520 | /* can't go backwards */ | 1519 | /* can't go backwards */ |
@@ -1613,8 +1612,16 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
1613 | ; | 1612 | ; |
1614 | 1613 | ||
1615 | } else { | 1614 | } else { |
1616 | l = *pos - 1; | 1615 | /* |
1617 | p = s_next(m, p, &l); | 1616 | * If we overflowed the seq_file before, then we want |
1617 | * to just reuse the trace_seq buffer again. | ||
1618 | */ | ||
1619 | if (iter->leftover) | ||
1620 | p = iter; | ||
1621 | else { | ||
1622 | l = *pos - 1; | ||
1623 | p = s_next(m, p, &l); | ||
1624 | } | ||
1618 | } | 1625 | } |
1619 | 1626 | ||
1620 | trace_event_read_lock(); | 1627 | trace_event_read_lock(); |
@@ -1922,6 +1929,7 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter) | |||
1922 | static int s_show(struct seq_file *m, void *v) | 1929 | static int s_show(struct seq_file *m, void *v) |
1923 | { | 1930 | { |
1924 | struct trace_iterator *iter = v; | 1931 | struct trace_iterator *iter = v; |
1932 | int ret; | ||
1925 | 1933 | ||
1926 | if (iter->ent == NULL) { | 1934 | if (iter->ent == NULL) { |
1927 | if (iter->tr) { | 1935 | if (iter->tr) { |
@@ -1941,9 +1949,27 @@ static int s_show(struct seq_file *m, void *v) | |||
1941 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | 1949 | if (!(trace_flags & TRACE_ITER_VERBOSE)) |
1942 | print_func_help_header(m); | 1950 | print_func_help_header(m); |
1943 | } | 1951 | } |
1952 | } else if (iter->leftover) { | ||
1953 | /* | ||
1954 | * If we filled the seq_file buffer earlier, we | ||
1955 | * want to just show it now. | ||
1956 | */ | ||
1957 | ret = trace_print_seq(m, &iter->seq); | ||
1958 | |||
1959 | /* ret should this time be zero, but you never know */ | ||
1960 | iter->leftover = ret; | ||
1961 | |||
1944 | } else { | 1962 | } else { |
1945 | print_trace_line(iter); | 1963 | print_trace_line(iter); |
1946 | trace_print_seq(m, &iter->seq); | 1964 | ret = trace_print_seq(m, &iter->seq); |
1965 | /* | ||
1966 | * If we overflow the seq_file buffer, then it will | ||
1967 | * ask us for this data again at start up. | ||
1968 | * Use that instead. | ||
1969 | * ret is 0 if seq_file write succeeded. | ||
1970 | * -1 otherwise. | ||
1971 | */ | ||
1972 | iter->leftover = ret; | ||
1947 | } | 1973 | } |
1948 | 1974 | ||
1949 | return 0; | 1975 | return 0; |
@@ -1984,11 +2010,9 @@ __tracing_open(struct inode *inode, struct file *file) | |||
1984 | if (current_trace) | 2010 | if (current_trace) |
1985 | *iter->trace = *current_trace; | 2011 | *iter->trace = *current_trace; |
1986 | 2012 | ||
1987 | if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) | 2013 | if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) |
1988 | goto fail; | 2014 | goto fail; |
1989 | 2015 | ||
1990 | cpumask_clear(iter->started); | ||
1991 | |||
1992 | if (current_trace && current_trace->print_max) | 2016 | if (current_trace && current_trace->print_max) |
1993 | iter->tr = &max_tr; | 2017 | iter->tr = &max_tr; |
1994 | else | 2018 | else |
@@ -2255,7 +2279,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2255 | mutex_lock(&tracing_cpumask_update_lock); | 2279 | mutex_lock(&tracing_cpumask_update_lock); |
2256 | 2280 | ||
2257 | local_irq_disable(); | 2281 | local_irq_disable(); |
2258 | __raw_spin_lock(&ftrace_max_lock); | 2282 | arch_spin_lock(&ftrace_max_lock); |
2259 | for_each_tracing_cpu(cpu) { | 2283 | for_each_tracing_cpu(cpu) { |
2260 | /* | 2284 | /* |
2261 | * Increase/decrease the disabled counter if we are | 2285 | * Increase/decrease the disabled counter if we are |
@@ -2270,7 +2294,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2270 | atomic_dec(&global_trace.data[cpu]->disabled); | 2294 | atomic_dec(&global_trace.data[cpu]->disabled); |
2271 | } | 2295 | } |
2272 | } | 2296 | } |
2273 | __raw_spin_unlock(&ftrace_max_lock); | 2297 | arch_spin_unlock(&ftrace_max_lock); |
2274 | local_irq_enable(); | 2298 | local_irq_enable(); |
2275 | 2299 | ||
2276 | cpumask_copy(tracing_cpumask, tracing_cpumask_new); | 2300 | cpumask_copy(tracing_cpumask, tracing_cpumask_new); |
@@ -2442,7 +2466,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
2442 | return ret; | 2466 | return ret; |
2443 | } | 2467 | } |
2444 | 2468 | ||
2445 | filp->f_pos += cnt; | 2469 | *ppos += cnt; |
2446 | 2470 | ||
2447 | return cnt; | 2471 | return cnt; |
2448 | } | 2472 | } |
@@ -2584,7 +2608,7 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf, | |||
2584 | } | 2608 | } |
2585 | mutex_unlock(&trace_types_lock); | 2609 | mutex_unlock(&trace_types_lock); |
2586 | 2610 | ||
2587 | filp->f_pos += cnt; | 2611 | *ppos += cnt; |
2588 | 2612 | ||
2589 | return cnt; | 2613 | return cnt; |
2590 | } | 2614 | } |
@@ -2766,7 +2790,7 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf, | |||
2766 | if (err) | 2790 | if (err) |
2767 | return err; | 2791 | return err; |
2768 | 2792 | ||
2769 | filp->f_pos += ret; | 2793 | *ppos += ret; |
2770 | 2794 | ||
2771 | return ret; | 2795 | return ret; |
2772 | } | 2796 | } |
@@ -2899,6 +2923,10 @@ static int tracing_release_pipe(struct inode *inode, struct file *file) | |||
2899 | else | 2923 | else |
2900 | cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask); | 2924 | cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask); |
2901 | 2925 | ||
2926 | |||
2927 | if (iter->trace->pipe_close) | ||
2928 | iter->trace->pipe_close(iter); | ||
2929 | |||
2902 | mutex_unlock(&trace_types_lock); | 2930 | mutex_unlock(&trace_types_lock); |
2903 | 2931 | ||
2904 | free_cpumask_var(iter->started); | 2932 | free_cpumask_var(iter->started); |
@@ -3105,7 +3133,7 @@ static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, | |||
3105 | __free_page(spd->pages[idx]); | 3133 | __free_page(spd->pages[idx]); |
3106 | } | 3134 | } |
3107 | 3135 | ||
3108 | static struct pipe_buf_operations tracing_pipe_buf_ops = { | 3136 | static const struct pipe_buf_operations tracing_pipe_buf_ops = { |
3109 | .can_merge = 0, | 3137 | .can_merge = 0, |
3110 | .map = generic_pipe_buf_map, | 3138 | .map = generic_pipe_buf_map, |
3111 | .unmap = generic_pipe_buf_unmap, | 3139 | .unmap = generic_pipe_buf_unmap, |
@@ -3301,7 +3329,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
3301 | } | 3329 | } |
3302 | } | 3330 | } |
3303 | 3331 | ||
3304 | filp->f_pos += cnt; | 3332 | *ppos += cnt; |
3305 | 3333 | ||
3306 | /* If check pages failed, return ENOMEM */ | 3334 | /* If check pages failed, return ENOMEM */ |
3307 | if (tracing_disabled) | 3335 | if (tracing_disabled) |
@@ -3336,7 +3364,6 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
3336 | size_t cnt, loff_t *fpos) | 3364 | size_t cnt, loff_t *fpos) |
3337 | { | 3365 | { |
3338 | char *buf; | 3366 | char *buf; |
3339 | char *end; | ||
3340 | 3367 | ||
3341 | if (tracing_disabled) | 3368 | if (tracing_disabled) |
3342 | return -EINVAL; | 3369 | return -EINVAL; |
@@ -3344,7 +3371,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
3344 | if (cnt > TRACE_BUF_SIZE) | 3371 | if (cnt > TRACE_BUF_SIZE) |
3345 | cnt = TRACE_BUF_SIZE; | 3372 | cnt = TRACE_BUF_SIZE; |
3346 | 3373 | ||
3347 | buf = kmalloc(cnt + 1, GFP_KERNEL); | 3374 | buf = kmalloc(cnt + 2, GFP_KERNEL); |
3348 | if (buf == NULL) | 3375 | if (buf == NULL) |
3349 | return -ENOMEM; | 3376 | return -ENOMEM; |
3350 | 3377 | ||
@@ -3352,14 +3379,13 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
3352 | kfree(buf); | 3379 | kfree(buf); |
3353 | return -EFAULT; | 3380 | return -EFAULT; |
3354 | } | 3381 | } |
3382 | if (buf[cnt-1] != '\n') { | ||
3383 | buf[cnt] = '\n'; | ||
3384 | buf[cnt+1] = '\0'; | ||
3385 | } else | ||
3386 | buf[cnt] = '\0'; | ||
3355 | 3387 | ||
3356 | /* Cut from the first nil or newline. */ | 3388 | cnt = mark_printk("%s", buf); |
3357 | buf[cnt] = '\0'; | ||
3358 | end = strchr(buf, '\n'); | ||
3359 | if (end) | ||
3360 | *end = '\0'; | ||
3361 | |||
3362 | cnt = mark_printk("%s\n", buf); | ||
3363 | kfree(buf); | 3389 | kfree(buf); |
3364 | *fpos += cnt; | 3390 | *fpos += cnt; |
3365 | 3391 | ||
@@ -3591,7 +3617,7 @@ static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, | |||
3591 | } | 3617 | } |
3592 | 3618 | ||
3593 | /* Pipe buffer operations for a buffer. */ | 3619 | /* Pipe buffer operations for a buffer. */ |
3594 | static struct pipe_buf_operations buffer_pipe_buf_ops = { | 3620 | static const struct pipe_buf_operations buffer_pipe_buf_ops = { |
3595 | .can_merge = 0, | 3621 | .can_merge = 0, |
3596 | .map = generic_pipe_buf_map, | 3622 | .map = generic_pipe_buf_map, |
3597 | .unmap = generic_pipe_buf_unmap, | 3623 | .unmap = generic_pipe_buf_unmap, |
@@ -3732,7 +3758,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf, | |||
3732 | 3758 | ||
3733 | s = kmalloc(sizeof(*s), GFP_KERNEL); | 3759 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
3734 | if (!s) | 3760 | if (!s) |
3735 | return ENOMEM; | 3761 | return -ENOMEM; |
3736 | 3762 | ||
3737 | trace_seq_init(s); | 3763 | trace_seq_init(s); |
3738 | 3764 | ||
@@ -4281,8 +4307,8 @@ trace_printk_seq(struct trace_seq *s) | |||
4281 | 4307 | ||
4282 | static void __ftrace_dump(bool disable_tracing) | 4308 | static void __ftrace_dump(bool disable_tracing) |
4283 | { | 4309 | { |
4284 | static raw_spinlock_t ftrace_dump_lock = | 4310 | static arch_spinlock_t ftrace_dump_lock = |
4285 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 4311 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
4286 | /* use static because iter can be a bit big for the stack */ | 4312 | /* use static because iter can be a bit big for the stack */ |
4287 | static struct trace_iterator iter; | 4313 | static struct trace_iterator iter; |
4288 | unsigned int old_userobj; | 4314 | unsigned int old_userobj; |
@@ -4292,7 +4318,7 @@ static void __ftrace_dump(bool disable_tracing) | |||
4292 | 4318 | ||
4293 | /* only one dump */ | 4319 | /* only one dump */ |
4294 | local_irq_save(flags); | 4320 | local_irq_save(flags); |
4295 | __raw_spin_lock(&ftrace_dump_lock); | 4321 | arch_spin_lock(&ftrace_dump_lock); |
4296 | if (dump_ran) | 4322 | if (dump_ran) |
4297 | goto out; | 4323 | goto out; |
4298 | 4324 | ||
@@ -4367,7 +4393,7 @@ static void __ftrace_dump(bool disable_tracing) | |||
4367 | } | 4393 | } |
4368 | 4394 | ||
4369 | out: | 4395 | out: |
4370 | __raw_spin_unlock(&ftrace_dump_lock); | 4396 | arch_spin_unlock(&ftrace_dump_lock); |
4371 | local_irq_restore(flags); | 4397 | local_irq_restore(flags); |
4372 | } | 4398 | } |
4373 | 4399 | ||
@@ -4389,7 +4415,7 @@ __init static int tracer_alloc_buffers(void) | |||
4389 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) | 4415 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) |
4390 | goto out_free_buffer_mask; | 4416 | goto out_free_buffer_mask; |
4391 | 4417 | ||
4392 | if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) | 4418 | if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) |
4393 | goto out_free_tracing_cpumask; | 4419 | goto out_free_tracing_cpumask; |
4394 | 4420 | ||
4395 | /* To save memory, keep the ring buffer size to its minimum */ | 4421 | /* To save memory, keep the ring buffer size to its minimum */ |
@@ -4400,7 +4426,6 @@ __init static int tracer_alloc_buffers(void) | |||
4400 | 4426 | ||
4401 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); | 4427 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); |
4402 | cpumask_copy(tracing_cpumask, cpu_all_mask); | 4428 | cpumask_copy(tracing_cpumask, cpu_all_mask); |
4403 | cpumask_clear(tracing_reader_cpumask); | ||
4404 | 4429 | ||
4405 | /* TODO: make the number of buffers hot pluggable with CPUS */ | 4430 | /* TODO: make the number of buffers hot pluggable with CPUS */ |
4406 | global_trace.buffer = ring_buffer_alloc(ring_buf_size, | 4431 | global_trace.buffer = ring_buffer_alloc(ring_buf_size, |
@@ -4429,7 +4454,7 @@ __init static int tracer_alloc_buffers(void) | |||
4429 | /* Allocate the first page for all buffers */ | 4454 | /* Allocate the first page for all buffers */ |
4430 | for_each_tracing_cpu(i) { | 4455 | for_each_tracing_cpu(i) { |
4431 | global_trace.data[i] = &per_cpu(global_trace_cpu, i); | 4456 | global_trace.data[i] = &per_cpu(global_trace_cpu, i); |
4432 | max_tr.data[i] = &per_cpu(max_data, i); | 4457 | max_tr.data[i] = &per_cpu(max_tr_data, i); |
4433 | } | 4458 | } |
4434 | 4459 | ||
4435 | trace_init_cmdlines(); | 4460 | trace_init_cmdlines(); |