diff options
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 391 |
1 files changed, 217 insertions, 174 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 44f916a04065..9ec59f541156 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -101,10 +101,7 @@ static inline void ftrace_enable_cpu(void) | |||
101 | preempt_enable(); | 101 | preempt_enable(); |
102 | } | 102 | } |
103 | 103 | ||
104 | static cpumask_var_t __read_mostly tracing_buffer_mask; | 104 | cpumask_var_t __read_mostly tracing_buffer_mask; |
105 | |||
106 | #define for_each_tracing_cpu(cpu) \ | ||
107 | for_each_cpu(cpu, tracing_buffer_mask) | ||
108 | 105 | ||
109 | /* | 106 | /* |
110 | * ftrace_dump_on_oops - variable to dump ftrace buffer on oops | 107 | * ftrace_dump_on_oops - variable to dump ftrace buffer on oops |
@@ -117,9 +114,12 @@ static cpumask_var_t __read_mostly tracing_buffer_mask; | |||
117 | * | 114 | * |
118 | * It is default off, but you can enable it with either specifying | 115 | * It is default off, but you can enable it with either specifying |
119 | * "ftrace_dump_on_oops" in the kernel command line, or setting | 116 | * "ftrace_dump_on_oops" in the kernel command line, or setting |
120 | * /proc/sys/kernel/ftrace_dump_on_oops to true. | 117 | * /proc/sys/kernel/ftrace_dump_on_oops |
118 | * Set 1 if you want to dump buffers of all CPUs | ||
119 | * Set 2 if you want to dump the buffer of the CPU that triggered oops | ||
121 | */ | 120 | */ |
122 | int ftrace_dump_on_oops; | 121 | |
122 | enum ftrace_dump_mode ftrace_dump_on_oops; | ||
123 | 123 | ||
124 | static int tracing_set_tracer(const char *buf); | 124 | static int tracing_set_tracer(const char *buf); |
125 | 125 | ||
@@ -139,8 +139,17 @@ __setup("ftrace=", set_cmdline_ftrace); | |||
139 | 139 | ||
140 | static int __init set_ftrace_dump_on_oops(char *str) | 140 | static int __init set_ftrace_dump_on_oops(char *str) |
141 | { | 141 | { |
142 | ftrace_dump_on_oops = 1; | 142 | if (*str++ != '=' || !*str) { |
143 | return 1; | 143 | ftrace_dump_on_oops = DUMP_ALL; |
144 | return 1; | ||
145 | } | ||
146 | |||
147 | if (!strcmp("orig_cpu", str)) { | ||
148 | ftrace_dump_on_oops = DUMP_ORIG; | ||
149 | return 1; | ||
150 | } | ||
151 | |||
152 | return 0; | ||
144 | } | 153 | } |
145 | __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); | 154 | __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); |
146 | 155 | ||
@@ -332,7 +341,7 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait); | |||
332 | /* trace_flags holds trace_options default values */ | 341 | /* trace_flags holds trace_options default values */ |
333 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | 342 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | |
334 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | | 343 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | |
335 | TRACE_ITER_GRAPH_TIME; | 344 | TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD; |
336 | 345 | ||
337 | static int trace_stop_count; | 346 | static int trace_stop_count; |
338 | static DEFINE_SPINLOCK(tracing_start_lock); | 347 | static DEFINE_SPINLOCK(tracing_start_lock); |
@@ -416,6 +425,7 @@ static const char *trace_options[] = { | |||
416 | "latency-format", | 425 | "latency-format", |
417 | "sleep-time", | 426 | "sleep-time", |
418 | "graph-time", | 427 | "graph-time", |
428 | "record-cmd", | ||
419 | NULL | 429 | NULL |
420 | }; | 430 | }; |
421 | 431 | ||
@@ -647,6 +657,10 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
647 | return; | 657 | return; |
648 | 658 | ||
649 | WARN_ON_ONCE(!irqs_disabled()); | 659 | WARN_ON_ONCE(!irqs_disabled()); |
660 | if (!current_trace->use_max_tr) { | ||
661 | WARN_ON_ONCE(1); | ||
662 | return; | ||
663 | } | ||
650 | arch_spin_lock(&ftrace_max_lock); | 664 | arch_spin_lock(&ftrace_max_lock); |
651 | 665 | ||
652 | tr->buffer = max_tr.buffer; | 666 | tr->buffer = max_tr.buffer; |
@@ -673,6 +687,11 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
673 | return; | 687 | return; |
674 | 688 | ||
675 | WARN_ON_ONCE(!irqs_disabled()); | 689 | WARN_ON_ONCE(!irqs_disabled()); |
690 | if (!current_trace->use_max_tr) { | ||
691 | WARN_ON_ONCE(1); | ||
692 | return; | ||
693 | } | ||
694 | |||
676 | arch_spin_lock(&ftrace_max_lock); | 695 | arch_spin_lock(&ftrace_max_lock); |
677 | 696 | ||
678 | ftrace_disable_cpu(); | 697 | ftrace_disable_cpu(); |
@@ -717,18 +736,11 @@ __acquires(kernel_lock) | |||
717 | return -1; | 736 | return -1; |
718 | } | 737 | } |
719 | 738 | ||
720 | if (strlen(type->name) > MAX_TRACER_SIZE) { | 739 | if (strlen(type->name) >= MAX_TRACER_SIZE) { |
721 | pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); | 740 | pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); |
722 | return -1; | 741 | return -1; |
723 | } | 742 | } |
724 | 743 | ||
725 | /* | ||
726 | * When this gets called we hold the BKL which means that | ||
727 | * preemption is disabled. Various trace selftests however | ||
728 | * need to disable and enable preemption for successful tests. | ||
729 | * So we drop the BKL here and grab it after the tests again. | ||
730 | */ | ||
731 | unlock_kernel(); | ||
732 | mutex_lock(&trace_types_lock); | 744 | mutex_lock(&trace_types_lock); |
733 | 745 | ||
734 | tracing_selftest_running = true; | 746 | tracing_selftest_running = true; |
@@ -810,7 +822,6 @@ __acquires(kernel_lock) | |||
810 | #endif | 822 | #endif |
811 | 823 | ||
812 | out_unlock: | 824 | out_unlock: |
813 | lock_kernel(); | ||
814 | return ret; | 825 | return ret; |
815 | } | 826 | } |
816 | 827 | ||
@@ -1319,61 +1330,6 @@ static void __trace_userstack(struct trace_array *tr, unsigned long flags) | |||
1319 | 1330 | ||
1320 | #endif /* CONFIG_STACKTRACE */ | 1331 | #endif /* CONFIG_STACKTRACE */ |
1321 | 1332 | ||
1322 | static void | ||
1323 | ftrace_trace_special(void *__tr, | ||
1324 | unsigned long arg1, unsigned long arg2, unsigned long arg3, | ||
1325 | int pc) | ||
1326 | { | ||
1327 | struct ftrace_event_call *call = &event_special; | ||
1328 | struct ring_buffer_event *event; | ||
1329 | struct trace_array *tr = __tr; | ||
1330 | struct ring_buffer *buffer = tr->buffer; | ||
1331 | struct special_entry *entry; | ||
1332 | |||
1333 | event = trace_buffer_lock_reserve(buffer, TRACE_SPECIAL, | ||
1334 | sizeof(*entry), 0, pc); | ||
1335 | if (!event) | ||
1336 | return; | ||
1337 | entry = ring_buffer_event_data(event); | ||
1338 | entry->arg1 = arg1; | ||
1339 | entry->arg2 = arg2; | ||
1340 | entry->arg3 = arg3; | ||
1341 | |||
1342 | if (!filter_check_discard(call, entry, buffer, event)) | ||
1343 | trace_buffer_unlock_commit(buffer, event, 0, pc); | ||
1344 | } | ||
1345 | |||
1346 | void | ||
1347 | __trace_special(void *__tr, void *__data, | ||
1348 | unsigned long arg1, unsigned long arg2, unsigned long arg3) | ||
1349 | { | ||
1350 | ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count()); | ||
1351 | } | ||
1352 | |||
1353 | void | ||
1354 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | ||
1355 | { | ||
1356 | struct trace_array *tr = &global_trace; | ||
1357 | struct trace_array_cpu *data; | ||
1358 | unsigned long flags; | ||
1359 | int cpu; | ||
1360 | int pc; | ||
1361 | |||
1362 | if (tracing_disabled) | ||
1363 | return; | ||
1364 | |||
1365 | pc = preempt_count(); | ||
1366 | local_irq_save(flags); | ||
1367 | cpu = raw_smp_processor_id(); | ||
1368 | data = tr->data[cpu]; | ||
1369 | |||
1370 | if (likely(atomic_inc_return(&data->disabled) == 1)) | ||
1371 | ftrace_trace_special(tr, arg1, arg2, arg3, pc); | ||
1372 | |||
1373 | atomic_dec(&data->disabled); | ||
1374 | local_irq_restore(flags); | ||
1375 | } | ||
1376 | |||
1377 | /** | 1333 | /** |
1378 | * trace_vbprintk - write binary msg to tracing buffer | 1334 | * trace_vbprintk - write binary msg to tracing buffer |
1379 | * | 1335 | * |
@@ -1392,7 +1348,6 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1392 | struct bprint_entry *entry; | 1348 | struct bprint_entry *entry; |
1393 | unsigned long flags; | 1349 | unsigned long flags; |
1394 | int disable; | 1350 | int disable; |
1395 | int resched; | ||
1396 | int cpu, len = 0, size, pc; | 1351 | int cpu, len = 0, size, pc; |
1397 | 1352 | ||
1398 | if (unlikely(tracing_selftest_running || tracing_disabled)) | 1353 | if (unlikely(tracing_selftest_running || tracing_disabled)) |
@@ -1402,7 +1357,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1402 | pause_graph_tracing(); | 1357 | pause_graph_tracing(); |
1403 | 1358 | ||
1404 | pc = preempt_count(); | 1359 | pc = preempt_count(); |
1405 | resched = ftrace_preempt_disable(); | 1360 | preempt_disable_notrace(); |
1406 | cpu = raw_smp_processor_id(); | 1361 | cpu = raw_smp_processor_id(); |
1407 | data = tr->data[cpu]; | 1362 | data = tr->data[cpu]; |
1408 | 1363 | ||
@@ -1440,7 +1395,7 @@ out_unlock: | |||
1440 | 1395 | ||
1441 | out: | 1396 | out: |
1442 | atomic_dec_return(&data->disabled); | 1397 | atomic_dec_return(&data->disabled); |
1443 | ftrace_preempt_enable(resched); | 1398 | preempt_enable_notrace(); |
1444 | unpause_graph_tracing(); | 1399 | unpause_graph_tracing(); |
1445 | 1400 | ||
1446 | return len; | 1401 | return len; |
@@ -1527,11 +1482,6 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
1527 | } | 1482 | } |
1528 | EXPORT_SYMBOL_GPL(trace_vprintk); | 1483 | EXPORT_SYMBOL_GPL(trace_vprintk); |
1529 | 1484 | ||
1530 | enum trace_file_type { | ||
1531 | TRACE_FILE_LAT_FMT = 1, | ||
1532 | TRACE_FILE_ANNOTATE = 2, | ||
1533 | }; | ||
1534 | |||
1535 | static void trace_iterator_increment(struct trace_iterator *iter) | 1485 | static void trace_iterator_increment(struct trace_iterator *iter) |
1536 | { | 1486 | { |
1537 | /* Don't allow ftrace to trace into the ring buffers */ | 1487 | /* Don't allow ftrace to trace into the ring buffers */ |
@@ -1545,7 +1495,8 @@ static void trace_iterator_increment(struct trace_iterator *iter) | |||
1545 | } | 1495 | } |
1546 | 1496 | ||
1547 | static struct trace_entry * | 1497 | static struct trace_entry * |
1548 | peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) | 1498 | peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, |
1499 | unsigned long *lost_events) | ||
1549 | { | 1500 | { |
1550 | struct ring_buffer_event *event; | 1501 | struct ring_buffer_event *event; |
1551 | struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; | 1502 | struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; |
@@ -1556,7 +1507,8 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) | |||
1556 | if (buf_iter) | 1507 | if (buf_iter) |
1557 | event = ring_buffer_iter_peek(buf_iter, ts); | 1508 | event = ring_buffer_iter_peek(buf_iter, ts); |
1558 | else | 1509 | else |
1559 | event = ring_buffer_peek(iter->tr->buffer, cpu, ts); | 1510 | event = ring_buffer_peek(iter->tr->buffer, cpu, ts, |
1511 | lost_events); | ||
1560 | 1512 | ||
1561 | ftrace_enable_cpu(); | 1513 | ftrace_enable_cpu(); |
1562 | 1514 | ||
@@ -1564,10 +1516,12 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) | |||
1564 | } | 1516 | } |
1565 | 1517 | ||
1566 | static struct trace_entry * | 1518 | static struct trace_entry * |
1567 | __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | 1519 | __find_next_entry(struct trace_iterator *iter, int *ent_cpu, |
1520 | unsigned long *missing_events, u64 *ent_ts) | ||
1568 | { | 1521 | { |
1569 | struct ring_buffer *buffer = iter->tr->buffer; | 1522 | struct ring_buffer *buffer = iter->tr->buffer; |
1570 | struct trace_entry *ent, *next = NULL; | 1523 | struct trace_entry *ent, *next = NULL; |
1524 | unsigned long lost_events = 0, next_lost = 0; | ||
1571 | int cpu_file = iter->cpu_file; | 1525 | int cpu_file = iter->cpu_file; |
1572 | u64 next_ts = 0, ts; | 1526 | u64 next_ts = 0, ts; |
1573 | int next_cpu = -1; | 1527 | int next_cpu = -1; |
@@ -1580,7 +1534,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
1580 | if (cpu_file > TRACE_PIPE_ALL_CPU) { | 1534 | if (cpu_file > TRACE_PIPE_ALL_CPU) { |
1581 | if (ring_buffer_empty_cpu(buffer, cpu_file)) | 1535 | if (ring_buffer_empty_cpu(buffer, cpu_file)) |
1582 | return NULL; | 1536 | return NULL; |
1583 | ent = peek_next_entry(iter, cpu_file, ent_ts); | 1537 | ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); |
1584 | if (ent_cpu) | 1538 | if (ent_cpu) |
1585 | *ent_cpu = cpu_file; | 1539 | *ent_cpu = cpu_file; |
1586 | 1540 | ||
@@ -1592,7 +1546,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
1592 | if (ring_buffer_empty_cpu(buffer, cpu)) | 1546 | if (ring_buffer_empty_cpu(buffer, cpu)) |
1593 | continue; | 1547 | continue; |
1594 | 1548 | ||
1595 | ent = peek_next_entry(iter, cpu, &ts); | 1549 | ent = peek_next_entry(iter, cpu, &ts, &lost_events); |
1596 | 1550 | ||
1597 | /* | 1551 | /* |
1598 | * Pick the entry with the smallest timestamp: | 1552 | * Pick the entry with the smallest timestamp: |
@@ -1601,6 +1555,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
1601 | next = ent; | 1555 | next = ent; |
1602 | next_cpu = cpu; | 1556 | next_cpu = cpu; |
1603 | next_ts = ts; | 1557 | next_ts = ts; |
1558 | next_lost = lost_events; | ||
1604 | } | 1559 | } |
1605 | } | 1560 | } |
1606 | 1561 | ||
@@ -1610,6 +1565,9 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
1610 | if (ent_ts) | 1565 | if (ent_ts) |
1611 | *ent_ts = next_ts; | 1566 | *ent_ts = next_ts; |
1612 | 1567 | ||
1568 | if (missing_events) | ||
1569 | *missing_events = next_lost; | ||
1570 | |||
1613 | return next; | 1571 | return next; |
1614 | } | 1572 | } |
1615 | 1573 | ||
@@ -1617,13 +1575,14 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
1617 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | 1575 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, |
1618 | int *ent_cpu, u64 *ent_ts) | 1576 | int *ent_cpu, u64 *ent_ts) |
1619 | { | 1577 | { |
1620 | return __find_next_entry(iter, ent_cpu, ent_ts); | 1578 | return __find_next_entry(iter, ent_cpu, NULL, ent_ts); |
1621 | } | 1579 | } |
1622 | 1580 | ||
1623 | /* Find the next real entry, and increment the iterator to the next entry */ | 1581 | /* Find the next real entry, and increment the iterator to the next entry */ |
1624 | static void *find_next_entry_inc(struct trace_iterator *iter) | 1582 | void *trace_find_next_entry_inc(struct trace_iterator *iter) |
1625 | { | 1583 | { |
1626 | iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); | 1584 | iter->ent = __find_next_entry(iter, &iter->cpu, |
1585 | &iter->lost_events, &iter->ts); | ||
1627 | 1586 | ||
1628 | if (iter->ent) | 1587 | if (iter->ent) |
1629 | trace_iterator_increment(iter); | 1588 | trace_iterator_increment(iter); |
@@ -1635,7 +1594,8 @@ static void trace_consume(struct trace_iterator *iter) | |||
1635 | { | 1594 | { |
1636 | /* Don't allow ftrace to trace into the ring buffers */ | 1595 | /* Don't allow ftrace to trace into the ring buffers */ |
1637 | ftrace_disable_cpu(); | 1596 | ftrace_disable_cpu(); |
1638 | ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts); | 1597 | ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts, |
1598 | &iter->lost_events); | ||
1639 | ftrace_enable_cpu(); | 1599 | ftrace_enable_cpu(); |
1640 | } | 1600 | } |
1641 | 1601 | ||
@@ -1654,19 +1614,19 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos) | |||
1654 | return NULL; | 1614 | return NULL; |
1655 | 1615 | ||
1656 | if (iter->idx < 0) | 1616 | if (iter->idx < 0) |
1657 | ent = find_next_entry_inc(iter); | 1617 | ent = trace_find_next_entry_inc(iter); |
1658 | else | 1618 | else |
1659 | ent = iter; | 1619 | ent = iter; |
1660 | 1620 | ||
1661 | while (ent && iter->idx < i) | 1621 | while (ent && iter->idx < i) |
1662 | ent = find_next_entry_inc(iter); | 1622 | ent = trace_find_next_entry_inc(iter); |
1663 | 1623 | ||
1664 | iter->pos = *pos; | 1624 | iter->pos = *pos; |
1665 | 1625 | ||
1666 | return ent; | 1626 | return ent; |
1667 | } | 1627 | } |
1668 | 1628 | ||
1669 | static void tracing_iter_reset(struct trace_iterator *iter, int cpu) | 1629 | void tracing_iter_reset(struct trace_iterator *iter, int cpu) |
1670 | { | 1630 | { |
1671 | struct trace_array *tr = iter->tr; | 1631 | struct trace_array *tr = iter->tr; |
1672 | struct ring_buffer_event *event; | 1632 | struct ring_buffer_event *event; |
@@ -1786,7 +1746,7 @@ static void print_func_help_header(struct seq_file *m) | |||
1786 | } | 1746 | } |
1787 | 1747 | ||
1788 | 1748 | ||
1789 | static void | 1749 | void |
1790 | print_trace_header(struct seq_file *m, struct trace_iterator *iter) | 1750 | print_trace_header(struct seq_file *m, struct trace_iterator *iter) |
1791 | { | 1751 | { |
1792 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | 1752 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); |
@@ -1914,7 +1874,7 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter) | |||
1914 | } | 1874 | } |
1915 | 1875 | ||
1916 | if (event) | 1876 | if (event) |
1917 | return event->trace(iter, sym_flags); | 1877 | return event->funcs->trace(iter, sym_flags, event); |
1918 | 1878 | ||
1919 | if (!trace_seq_printf(s, "Unknown type %d\n", entry->type)) | 1879 | if (!trace_seq_printf(s, "Unknown type %d\n", entry->type)) |
1920 | goto partial; | 1880 | goto partial; |
@@ -1940,7 +1900,7 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter) | |||
1940 | 1900 | ||
1941 | event = ftrace_find_event(entry->type); | 1901 | event = ftrace_find_event(entry->type); |
1942 | if (event) | 1902 | if (event) |
1943 | return event->raw(iter, 0); | 1903 | return event->funcs->raw(iter, 0, event); |
1944 | 1904 | ||
1945 | if (!trace_seq_printf(s, "%d ?\n", entry->type)) | 1905 | if (!trace_seq_printf(s, "%d ?\n", entry->type)) |
1946 | goto partial; | 1906 | goto partial; |
@@ -1967,7 +1927,7 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter) | |||
1967 | 1927 | ||
1968 | event = ftrace_find_event(entry->type); | 1928 | event = ftrace_find_event(entry->type); |
1969 | if (event) { | 1929 | if (event) { |
1970 | enum print_line_t ret = event->hex(iter, 0); | 1930 | enum print_line_t ret = event->funcs->hex(iter, 0, event); |
1971 | if (ret != TRACE_TYPE_HANDLED) | 1931 | if (ret != TRACE_TYPE_HANDLED) |
1972 | return ret; | 1932 | return ret; |
1973 | } | 1933 | } |
@@ -1992,10 +1952,11 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter) | |||
1992 | } | 1952 | } |
1993 | 1953 | ||
1994 | event = ftrace_find_event(entry->type); | 1954 | event = ftrace_find_event(entry->type); |
1995 | return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED; | 1955 | return event ? event->funcs->binary(iter, 0, event) : |
1956 | TRACE_TYPE_HANDLED; | ||
1996 | } | 1957 | } |
1997 | 1958 | ||
1998 | static int trace_empty(struct trace_iterator *iter) | 1959 | int trace_empty(struct trace_iterator *iter) |
1999 | { | 1960 | { |
2000 | int cpu; | 1961 | int cpu; |
2001 | 1962 | ||
@@ -2026,10 +1987,14 @@ static int trace_empty(struct trace_iterator *iter) | |||
2026 | } | 1987 | } |
2027 | 1988 | ||
2028 | /* Called with trace_event_read_lock() held. */ | 1989 | /* Called with trace_event_read_lock() held. */ |
2029 | static enum print_line_t print_trace_line(struct trace_iterator *iter) | 1990 | enum print_line_t print_trace_line(struct trace_iterator *iter) |
2030 | { | 1991 | { |
2031 | enum print_line_t ret; | 1992 | enum print_line_t ret; |
2032 | 1993 | ||
1994 | if (iter->lost_events) | ||
1995 | trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", | ||
1996 | iter->cpu, iter->lost_events); | ||
1997 | |||
2033 | if (iter->trace && iter->trace->print_line) { | 1998 | if (iter->trace && iter->trace->print_line) { |
2034 | ret = iter->trace->print_line(iter); | 1999 | ret = iter->trace->print_line(iter); |
2035 | if (ret != TRACE_TYPE_UNHANDLED) | 2000 | if (ret != TRACE_TYPE_UNHANDLED) |
@@ -2058,6 +2023,23 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter) | |||
2058 | return print_trace_fmt(iter); | 2023 | return print_trace_fmt(iter); |
2059 | } | 2024 | } |
2060 | 2025 | ||
2026 | void trace_default_header(struct seq_file *m) | ||
2027 | { | ||
2028 | struct trace_iterator *iter = m->private; | ||
2029 | |||
2030 | if (iter->iter_flags & TRACE_FILE_LAT_FMT) { | ||
2031 | /* print nothing if the buffers are empty */ | ||
2032 | if (trace_empty(iter)) | ||
2033 | return; | ||
2034 | print_trace_header(m, iter); | ||
2035 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | ||
2036 | print_lat_help_header(m); | ||
2037 | } else { | ||
2038 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | ||
2039 | print_func_help_header(m); | ||
2040 | } | ||
2041 | } | ||
2042 | |||
2061 | static int s_show(struct seq_file *m, void *v) | 2043 | static int s_show(struct seq_file *m, void *v) |
2062 | { | 2044 | { |
2063 | struct trace_iterator *iter = v; | 2045 | struct trace_iterator *iter = v; |
@@ -2070,17 +2052,9 @@ static int s_show(struct seq_file *m, void *v) | |||
2070 | } | 2052 | } |
2071 | if (iter->trace && iter->trace->print_header) | 2053 | if (iter->trace && iter->trace->print_header) |
2072 | iter->trace->print_header(m); | 2054 | iter->trace->print_header(m); |
2073 | else if (iter->iter_flags & TRACE_FILE_LAT_FMT) { | 2055 | else |
2074 | /* print nothing if the buffers are empty */ | 2056 | trace_default_header(m); |
2075 | if (trace_empty(iter)) | 2057 | |
2076 | return 0; | ||
2077 | print_trace_header(m, iter); | ||
2078 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | ||
2079 | print_lat_help_header(m); | ||
2080 | } else { | ||
2081 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | ||
2082 | print_func_help_header(m); | ||
2083 | } | ||
2084 | } else if (iter->leftover) { | 2058 | } else if (iter->leftover) { |
2085 | /* | 2059 | /* |
2086 | * If we filled the seq_file buffer earlier, we | 2060 | * If we filled the seq_file buffer earlier, we |
@@ -2166,15 +2140,20 @@ __tracing_open(struct inode *inode, struct file *file) | |||
2166 | 2140 | ||
2167 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { | 2141 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { |
2168 | for_each_tracing_cpu(cpu) { | 2142 | for_each_tracing_cpu(cpu) { |
2169 | |||
2170 | iter->buffer_iter[cpu] = | 2143 | iter->buffer_iter[cpu] = |
2171 | ring_buffer_read_start(iter->tr->buffer, cpu); | 2144 | ring_buffer_read_prepare(iter->tr->buffer, cpu); |
2145 | } | ||
2146 | ring_buffer_read_prepare_sync(); | ||
2147 | for_each_tracing_cpu(cpu) { | ||
2148 | ring_buffer_read_start(iter->buffer_iter[cpu]); | ||
2172 | tracing_iter_reset(iter, cpu); | 2149 | tracing_iter_reset(iter, cpu); |
2173 | } | 2150 | } |
2174 | } else { | 2151 | } else { |
2175 | cpu = iter->cpu_file; | 2152 | cpu = iter->cpu_file; |
2176 | iter->buffer_iter[cpu] = | 2153 | iter->buffer_iter[cpu] = |
2177 | ring_buffer_read_start(iter->tr->buffer, cpu); | 2154 | ring_buffer_read_prepare(iter->tr->buffer, cpu); |
2155 | ring_buffer_read_prepare_sync(); | ||
2156 | ring_buffer_read_start(iter->buffer_iter[cpu]); | ||
2178 | tracing_iter_reset(iter, cpu); | 2157 | tracing_iter_reset(iter, cpu); |
2179 | } | 2158 | } |
2180 | 2159 | ||
@@ -2353,6 +2332,7 @@ static const struct file_operations show_traces_fops = { | |||
2353 | .open = show_traces_open, | 2332 | .open = show_traces_open, |
2354 | .read = seq_read, | 2333 | .read = seq_read, |
2355 | .release = seq_release, | 2334 | .release = seq_release, |
2335 | .llseek = seq_lseek, | ||
2356 | }; | 2336 | }; |
2357 | 2337 | ||
2358 | /* | 2338 | /* |
@@ -2446,6 +2426,7 @@ static const struct file_operations tracing_cpumask_fops = { | |||
2446 | .open = tracing_open_generic, | 2426 | .open = tracing_open_generic, |
2447 | .read = tracing_cpumask_read, | 2427 | .read = tracing_cpumask_read, |
2448 | .write = tracing_cpumask_write, | 2428 | .write = tracing_cpumask_write, |
2429 | .llseek = generic_file_llseek, | ||
2449 | }; | 2430 | }; |
2450 | 2431 | ||
2451 | static int tracing_trace_options_show(struct seq_file *m, void *v) | 2432 | static int tracing_trace_options_show(struct seq_file *m, void *v) |
@@ -2521,6 +2502,9 @@ static void set_tracer_flags(unsigned int mask, int enabled) | |||
2521 | trace_flags |= mask; | 2502 | trace_flags |= mask; |
2522 | else | 2503 | else |
2523 | trace_flags &= ~mask; | 2504 | trace_flags &= ~mask; |
2505 | |||
2506 | if (mask == TRACE_ITER_RECORD_CMD) | ||
2507 | trace_event_enable_cmd_record(enabled); | ||
2524 | } | 2508 | } |
2525 | 2509 | ||
2526 | static ssize_t | 2510 | static ssize_t |
@@ -2612,6 +2596,7 @@ tracing_readme_read(struct file *filp, char __user *ubuf, | |||
2612 | static const struct file_operations tracing_readme_fops = { | 2596 | static const struct file_operations tracing_readme_fops = { |
2613 | .open = tracing_open_generic, | 2597 | .open = tracing_open_generic, |
2614 | .read = tracing_readme_read, | 2598 | .read = tracing_readme_read, |
2599 | .llseek = generic_file_llseek, | ||
2615 | }; | 2600 | }; |
2616 | 2601 | ||
2617 | static ssize_t | 2602 | static ssize_t |
@@ -2662,6 +2647,7 @@ tracing_saved_cmdlines_read(struct file *file, char __user *ubuf, | |||
2662 | static const struct file_operations tracing_saved_cmdlines_fops = { | 2647 | static const struct file_operations tracing_saved_cmdlines_fops = { |
2663 | .open = tracing_open_generic, | 2648 | .open = tracing_open_generic, |
2664 | .read = tracing_saved_cmdlines_read, | 2649 | .read = tracing_saved_cmdlines_read, |
2650 | .llseek = generic_file_llseek, | ||
2665 | }; | 2651 | }; |
2666 | 2652 | ||
2667 | static ssize_t | 2653 | static ssize_t |
@@ -2757,6 +2743,9 @@ static int tracing_resize_ring_buffer(unsigned long size) | |||
2757 | if (ret < 0) | 2743 | if (ret < 0) |
2758 | return ret; | 2744 | return ret; |
2759 | 2745 | ||
2746 | if (!current_trace->use_max_tr) | ||
2747 | goto out; | ||
2748 | |||
2760 | ret = ring_buffer_resize(max_tr.buffer, size); | 2749 | ret = ring_buffer_resize(max_tr.buffer, size); |
2761 | if (ret < 0) { | 2750 | if (ret < 0) { |
2762 | int r; | 2751 | int r; |
@@ -2784,11 +2773,14 @@ static int tracing_resize_ring_buffer(unsigned long size) | |||
2784 | return ret; | 2773 | return ret; |
2785 | } | 2774 | } |
2786 | 2775 | ||
2776 | max_tr.entries = size; | ||
2777 | out: | ||
2787 | global_trace.entries = size; | 2778 | global_trace.entries = size; |
2788 | 2779 | ||
2789 | return ret; | 2780 | return ret; |
2790 | } | 2781 | } |
2791 | 2782 | ||
2783 | |||
2792 | /** | 2784 | /** |
2793 | * tracing_update_buffers - used by tracing facility to expand ring buffers | 2785 | * tracing_update_buffers - used by tracing facility to expand ring buffers |
2794 | * | 2786 | * |
@@ -2849,12 +2841,26 @@ static int tracing_set_tracer(const char *buf) | |||
2849 | trace_branch_disable(); | 2841 | trace_branch_disable(); |
2850 | if (current_trace && current_trace->reset) | 2842 | if (current_trace && current_trace->reset) |
2851 | current_trace->reset(tr); | 2843 | current_trace->reset(tr); |
2852 | 2844 | if (current_trace && current_trace->use_max_tr) { | |
2845 | /* | ||
2846 | * We don't free the ring buffer. instead, resize it because | ||
2847 | * The max_tr ring buffer has some state (e.g. ring->clock) and | ||
2848 | * we want preserve it. | ||
2849 | */ | ||
2850 | ring_buffer_resize(max_tr.buffer, 1); | ||
2851 | max_tr.entries = 1; | ||
2852 | } | ||
2853 | destroy_trace_option_files(topts); | 2853 | destroy_trace_option_files(topts); |
2854 | 2854 | ||
2855 | current_trace = t; | 2855 | current_trace = t; |
2856 | 2856 | ||
2857 | topts = create_trace_option_files(current_trace); | 2857 | topts = create_trace_option_files(current_trace); |
2858 | if (current_trace->use_max_tr) { | ||
2859 | ret = ring_buffer_resize(max_tr.buffer, global_trace.entries); | ||
2860 | if (ret < 0) | ||
2861 | goto out; | ||
2862 | max_tr.entries = global_trace.entries; | ||
2863 | } | ||
2858 | 2864 | ||
2859 | if (t->init) { | 2865 | if (t->init) { |
2860 | ret = tracer_init(t, tr); | 2866 | ret = tracer_init(t, tr); |
@@ -2991,6 +2997,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) | |||
2991 | if (iter->trace->pipe_open) | 2997 | if (iter->trace->pipe_open) |
2992 | iter->trace->pipe_open(iter); | 2998 | iter->trace->pipe_open(iter); |
2993 | 2999 | ||
3000 | nonseekable_open(inode, filp); | ||
2994 | out: | 3001 | out: |
2995 | mutex_unlock(&trace_types_lock); | 3002 | mutex_unlock(&trace_types_lock); |
2996 | return ret; | 3003 | return ret; |
@@ -3170,7 +3177,7 @@ waitagain: | |||
3170 | 3177 | ||
3171 | trace_event_read_lock(); | 3178 | trace_event_read_lock(); |
3172 | trace_access_lock(iter->cpu_file); | 3179 | trace_access_lock(iter->cpu_file); |
3173 | while (find_next_entry_inc(iter) != NULL) { | 3180 | while (trace_find_next_entry_inc(iter) != NULL) { |
3174 | enum print_line_t ret; | 3181 | enum print_line_t ret; |
3175 | int len = iter->seq.len; | 3182 | int len = iter->seq.len; |
3176 | 3183 | ||
@@ -3253,7 +3260,7 @@ tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) | |||
3253 | if (ret != TRACE_TYPE_NO_CONSUME) | 3260 | if (ret != TRACE_TYPE_NO_CONSUME) |
3254 | trace_consume(iter); | 3261 | trace_consume(iter); |
3255 | rem -= count; | 3262 | rem -= count; |
3256 | if (!find_next_entry_inc(iter)) { | 3263 | if (!trace_find_next_entry_inc(iter)) { |
3257 | rem = 0; | 3264 | rem = 0; |
3258 | iter->ent = NULL; | 3265 | iter->ent = NULL; |
3259 | break; | 3266 | break; |
@@ -3269,12 +3276,12 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
3269 | size_t len, | 3276 | size_t len, |
3270 | unsigned int flags) | 3277 | unsigned int flags) |
3271 | { | 3278 | { |
3272 | struct page *pages[PIPE_BUFFERS]; | 3279 | struct page *pages_def[PIPE_DEF_BUFFERS]; |
3273 | struct partial_page partial[PIPE_BUFFERS]; | 3280 | struct partial_page partial_def[PIPE_DEF_BUFFERS]; |
3274 | struct trace_iterator *iter = filp->private_data; | 3281 | struct trace_iterator *iter = filp->private_data; |
3275 | struct splice_pipe_desc spd = { | 3282 | struct splice_pipe_desc spd = { |
3276 | .pages = pages, | 3283 | .pages = pages_def, |
3277 | .partial = partial, | 3284 | .partial = partial_def, |
3278 | .nr_pages = 0, /* This gets updated below. */ | 3285 | .nr_pages = 0, /* This gets updated below. */ |
3279 | .flags = flags, | 3286 | .flags = flags, |
3280 | .ops = &tracing_pipe_buf_ops, | 3287 | .ops = &tracing_pipe_buf_ops, |
@@ -3285,6 +3292,9 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
3285 | size_t rem; | 3292 | size_t rem; |
3286 | unsigned int i; | 3293 | unsigned int i; |
3287 | 3294 | ||
3295 | if (splice_grow_spd(pipe, &spd)) | ||
3296 | return -ENOMEM; | ||
3297 | |||
3288 | /* copy the tracer to avoid using a global lock all around */ | 3298 | /* copy the tracer to avoid using a global lock all around */ |
3289 | mutex_lock(&trace_types_lock); | 3299 | mutex_lock(&trace_types_lock); |
3290 | if (unlikely(old_tracer != current_trace && current_trace)) { | 3300 | if (unlikely(old_tracer != current_trace && current_trace)) { |
@@ -3306,7 +3316,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
3306 | if (ret <= 0) | 3316 | if (ret <= 0) |
3307 | goto out_err; | 3317 | goto out_err; |
3308 | 3318 | ||
3309 | if (!iter->ent && !find_next_entry_inc(iter)) { | 3319 | if (!iter->ent && !trace_find_next_entry_inc(iter)) { |
3310 | ret = -EFAULT; | 3320 | ret = -EFAULT; |
3311 | goto out_err; | 3321 | goto out_err; |
3312 | } | 3322 | } |
@@ -3315,23 +3325,23 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
3315 | trace_access_lock(iter->cpu_file); | 3325 | trace_access_lock(iter->cpu_file); |
3316 | 3326 | ||
3317 | /* Fill as many pages as possible. */ | 3327 | /* Fill as many pages as possible. */ |
3318 | for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) { | 3328 | for (i = 0, rem = len; i < pipe->buffers && rem; i++) { |
3319 | pages[i] = alloc_page(GFP_KERNEL); | 3329 | spd.pages[i] = alloc_page(GFP_KERNEL); |
3320 | if (!pages[i]) | 3330 | if (!spd.pages[i]) |
3321 | break; | 3331 | break; |
3322 | 3332 | ||
3323 | rem = tracing_fill_pipe_page(rem, iter); | 3333 | rem = tracing_fill_pipe_page(rem, iter); |
3324 | 3334 | ||
3325 | /* Copy the data into the page, so we can start over. */ | 3335 | /* Copy the data into the page, so we can start over. */ |
3326 | ret = trace_seq_to_buffer(&iter->seq, | 3336 | ret = trace_seq_to_buffer(&iter->seq, |
3327 | page_address(pages[i]), | 3337 | page_address(spd.pages[i]), |
3328 | iter->seq.len); | 3338 | iter->seq.len); |
3329 | if (ret < 0) { | 3339 | if (ret < 0) { |
3330 | __free_page(pages[i]); | 3340 | __free_page(spd.pages[i]); |
3331 | break; | 3341 | break; |
3332 | } | 3342 | } |
3333 | partial[i].offset = 0; | 3343 | spd.partial[i].offset = 0; |
3334 | partial[i].len = iter->seq.len; | 3344 | spd.partial[i].len = iter->seq.len; |
3335 | 3345 | ||
3336 | trace_seq_init(&iter->seq); | 3346 | trace_seq_init(&iter->seq); |
3337 | } | 3347 | } |
@@ -3342,12 +3352,14 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
3342 | 3352 | ||
3343 | spd.nr_pages = i; | 3353 | spd.nr_pages = i; |
3344 | 3354 | ||
3345 | return splice_to_pipe(pipe, &spd); | 3355 | ret = splice_to_pipe(pipe, &spd); |
3356 | out: | ||
3357 | splice_shrink_spd(pipe, &spd); | ||
3358 | return ret; | ||
3346 | 3359 | ||
3347 | out_err: | 3360 | out_err: |
3348 | mutex_unlock(&iter->mutex); | 3361 | mutex_unlock(&iter->mutex); |
3349 | 3362 | goto out; | |
3350 | return ret; | ||
3351 | } | 3363 | } |
3352 | 3364 | ||
3353 | static ssize_t | 3365 | static ssize_t |
@@ -3431,7 +3443,6 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
3431 | } | 3443 | } |
3432 | 3444 | ||
3433 | tracing_start(); | 3445 | tracing_start(); |
3434 | max_tr.entries = global_trace.entries; | ||
3435 | mutex_unlock(&trace_types_lock); | 3446 | mutex_unlock(&trace_types_lock); |
3436 | 3447 | ||
3437 | return cnt; | 3448 | return cnt; |
@@ -3452,6 +3463,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
3452 | size_t cnt, loff_t *fpos) | 3463 | size_t cnt, loff_t *fpos) |
3453 | { | 3464 | { |
3454 | char *buf; | 3465 | char *buf; |
3466 | size_t written; | ||
3455 | 3467 | ||
3456 | if (tracing_disabled) | 3468 | if (tracing_disabled) |
3457 | return -EINVAL; | 3469 | return -EINVAL; |
@@ -3473,11 +3485,15 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
3473 | } else | 3485 | } else |
3474 | buf[cnt] = '\0'; | 3486 | buf[cnt] = '\0'; |
3475 | 3487 | ||
3476 | cnt = mark_printk("%s", buf); | 3488 | written = mark_printk("%s", buf); |
3477 | kfree(buf); | 3489 | kfree(buf); |
3478 | *fpos += cnt; | 3490 | *fpos += written; |
3479 | 3491 | ||
3480 | return cnt; | 3492 | /* don't tell userspace we wrote more - it might confuse them */ |
3493 | if (written > cnt) | ||
3494 | written = cnt; | ||
3495 | |||
3496 | return written; | ||
3481 | } | 3497 | } |
3482 | 3498 | ||
3483 | static int tracing_clock_show(struct seq_file *m, void *v) | 3499 | static int tracing_clock_show(struct seq_file *m, void *v) |
@@ -3544,18 +3560,21 @@ static const struct file_operations tracing_max_lat_fops = { | |||
3544 | .open = tracing_open_generic, | 3560 | .open = tracing_open_generic, |
3545 | .read = tracing_max_lat_read, | 3561 | .read = tracing_max_lat_read, |
3546 | .write = tracing_max_lat_write, | 3562 | .write = tracing_max_lat_write, |
3563 | .llseek = generic_file_llseek, | ||
3547 | }; | 3564 | }; |
3548 | 3565 | ||
3549 | static const struct file_operations tracing_ctrl_fops = { | 3566 | static const struct file_operations tracing_ctrl_fops = { |
3550 | .open = tracing_open_generic, | 3567 | .open = tracing_open_generic, |
3551 | .read = tracing_ctrl_read, | 3568 | .read = tracing_ctrl_read, |
3552 | .write = tracing_ctrl_write, | 3569 | .write = tracing_ctrl_write, |
3570 | .llseek = generic_file_llseek, | ||
3553 | }; | 3571 | }; |
3554 | 3572 | ||
3555 | static const struct file_operations set_tracer_fops = { | 3573 | static const struct file_operations set_tracer_fops = { |
3556 | .open = tracing_open_generic, | 3574 | .open = tracing_open_generic, |
3557 | .read = tracing_set_trace_read, | 3575 | .read = tracing_set_trace_read, |
3558 | .write = tracing_set_trace_write, | 3576 | .write = tracing_set_trace_write, |
3577 | .llseek = generic_file_llseek, | ||
3559 | }; | 3578 | }; |
3560 | 3579 | ||
3561 | static const struct file_operations tracing_pipe_fops = { | 3580 | static const struct file_operations tracing_pipe_fops = { |
@@ -3564,17 +3583,20 @@ static const struct file_operations tracing_pipe_fops = { | |||
3564 | .read = tracing_read_pipe, | 3583 | .read = tracing_read_pipe, |
3565 | .splice_read = tracing_splice_read_pipe, | 3584 | .splice_read = tracing_splice_read_pipe, |
3566 | .release = tracing_release_pipe, | 3585 | .release = tracing_release_pipe, |
3586 | .llseek = no_llseek, | ||
3567 | }; | 3587 | }; |
3568 | 3588 | ||
3569 | static const struct file_operations tracing_entries_fops = { | 3589 | static const struct file_operations tracing_entries_fops = { |
3570 | .open = tracing_open_generic, | 3590 | .open = tracing_open_generic, |
3571 | .read = tracing_entries_read, | 3591 | .read = tracing_entries_read, |
3572 | .write = tracing_entries_write, | 3592 | .write = tracing_entries_write, |
3593 | .llseek = generic_file_llseek, | ||
3573 | }; | 3594 | }; |
3574 | 3595 | ||
3575 | static const struct file_operations tracing_mark_fops = { | 3596 | static const struct file_operations tracing_mark_fops = { |
3576 | .open = tracing_open_generic, | 3597 | .open = tracing_open_generic, |
3577 | .write = tracing_mark_write, | 3598 | .write = tracing_mark_write, |
3599 | .llseek = generic_file_llseek, | ||
3578 | }; | 3600 | }; |
3579 | 3601 | ||
3580 | static const struct file_operations trace_clock_fops = { | 3602 | static const struct file_operations trace_clock_fops = { |
@@ -3620,7 +3642,6 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, | |||
3620 | size_t count, loff_t *ppos) | 3642 | size_t count, loff_t *ppos) |
3621 | { | 3643 | { |
3622 | struct ftrace_buffer_info *info = filp->private_data; | 3644 | struct ftrace_buffer_info *info = filp->private_data; |
3623 | unsigned int pos; | ||
3624 | ssize_t ret; | 3645 | ssize_t ret; |
3625 | size_t size; | 3646 | size_t size; |
3626 | 3647 | ||
@@ -3647,11 +3668,6 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, | |||
3647 | if (ret < 0) | 3668 | if (ret < 0) |
3648 | return 0; | 3669 | return 0; |
3649 | 3670 | ||
3650 | pos = ring_buffer_page_len(info->spare); | ||
3651 | |||
3652 | if (pos < PAGE_SIZE) | ||
3653 | memset(info->spare + pos, 0, PAGE_SIZE - pos); | ||
3654 | |||
3655 | read: | 3671 | read: |
3656 | size = PAGE_SIZE - info->read; | 3672 | size = PAGE_SIZE - info->read; |
3657 | if (size > count) | 3673 | if (size > count) |
@@ -3746,11 +3762,11 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
3746 | unsigned int flags) | 3762 | unsigned int flags) |
3747 | { | 3763 | { |
3748 | struct ftrace_buffer_info *info = file->private_data; | 3764 | struct ftrace_buffer_info *info = file->private_data; |
3749 | struct partial_page partial[PIPE_BUFFERS]; | 3765 | struct partial_page partial_def[PIPE_DEF_BUFFERS]; |
3750 | struct page *pages[PIPE_BUFFERS]; | 3766 | struct page *pages_def[PIPE_DEF_BUFFERS]; |
3751 | struct splice_pipe_desc spd = { | 3767 | struct splice_pipe_desc spd = { |
3752 | .pages = pages, | 3768 | .pages = pages_def, |
3753 | .partial = partial, | 3769 | .partial = partial_def, |
3754 | .flags = flags, | 3770 | .flags = flags, |
3755 | .ops = &buffer_pipe_buf_ops, | 3771 | .ops = &buffer_pipe_buf_ops, |
3756 | .spd_release = buffer_spd_release, | 3772 | .spd_release = buffer_spd_release, |
@@ -3759,22 +3775,28 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
3759 | int entries, size, i; | 3775 | int entries, size, i; |
3760 | size_t ret; | 3776 | size_t ret; |
3761 | 3777 | ||
3778 | if (splice_grow_spd(pipe, &spd)) | ||
3779 | return -ENOMEM; | ||
3780 | |||
3762 | if (*ppos & (PAGE_SIZE - 1)) { | 3781 | if (*ppos & (PAGE_SIZE - 1)) { |
3763 | WARN_ONCE(1, "Ftrace: previous read must page-align\n"); | 3782 | WARN_ONCE(1, "Ftrace: previous read must page-align\n"); |
3764 | return -EINVAL; | 3783 | ret = -EINVAL; |
3784 | goto out; | ||
3765 | } | 3785 | } |
3766 | 3786 | ||
3767 | if (len & (PAGE_SIZE - 1)) { | 3787 | if (len & (PAGE_SIZE - 1)) { |
3768 | WARN_ONCE(1, "Ftrace: splice_read should page-align\n"); | 3788 | WARN_ONCE(1, "Ftrace: splice_read should page-align\n"); |
3769 | if (len < PAGE_SIZE) | 3789 | if (len < PAGE_SIZE) { |
3770 | return -EINVAL; | 3790 | ret = -EINVAL; |
3791 | goto out; | ||
3792 | } | ||
3771 | len &= PAGE_MASK; | 3793 | len &= PAGE_MASK; |
3772 | } | 3794 | } |
3773 | 3795 | ||
3774 | trace_access_lock(info->cpu); | 3796 | trace_access_lock(info->cpu); |
3775 | entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); | 3797 | entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); |
3776 | 3798 | ||
3777 | for (i = 0; i < PIPE_BUFFERS && len && entries; i++, len -= PAGE_SIZE) { | 3799 | for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) { |
3778 | struct page *page; | 3800 | struct page *page; |
3779 | int r; | 3801 | int r; |
3780 | 3802 | ||
@@ -3829,11 +3851,12 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
3829 | else | 3851 | else |
3830 | ret = 0; | 3852 | ret = 0; |
3831 | /* TODO: block */ | 3853 | /* TODO: block */ |
3832 | return ret; | 3854 | goto out; |
3833 | } | 3855 | } |
3834 | 3856 | ||
3835 | ret = splice_to_pipe(pipe, &spd); | 3857 | ret = splice_to_pipe(pipe, &spd); |
3836 | 3858 | splice_shrink_spd(pipe, &spd); | |
3859 | out: | ||
3837 | return ret; | 3860 | return ret; |
3838 | } | 3861 | } |
3839 | 3862 | ||
@@ -3879,6 +3902,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf, | |||
3879 | static const struct file_operations tracing_stats_fops = { | 3902 | static const struct file_operations tracing_stats_fops = { |
3880 | .open = tracing_open_generic, | 3903 | .open = tracing_open_generic, |
3881 | .read = tracing_stats_read, | 3904 | .read = tracing_stats_read, |
3905 | .llseek = generic_file_llseek, | ||
3882 | }; | 3906 | }; |
3883 | 3907 | ||
3884 | #ifdef CONFIG_DYNAMIC_FTRACE | 3908 | #ifdef CONFIG_DYNAMIC_FTRACE |
@@ -3915,6 +3939,7 @@ tracing_read_dyn_info(struct file *filp, char __user *ubuf, | |||
3915 | static const struct file_operations tracing_dyn_info_fops = { | 3939 | static const struct file_operations tracing_dyn_info_fops = { |
3916 | .open = tracing_open_generic, | 3940 | .open = tracing_open_generic, |
3917 | .read = tracing_read_dyn_info, | 3941 | .read = tracing_read_dyn_info, |
3942 | .llseek = generic_file_llseek, | ||
3918 | }; | 3943 | }; |
3919 | #endif | 3944 | #endif |
3920 | 3945 | ||
@@ -4068,6 +4093,7 @@ static const struct file_operations trace_options_fops = { | |||
4068 | .open = tracing_open_generic, | 4093 | .open = tracing_open_generic, |
4069 | .read = trace_options_read, | 4094 | .read = trace_options_read, |
4070 | .write = trace_options_write, | 4095 | .write = trace_options_write, |
4096 | .llseek = generic_file_llseek, | ||
4071 | }; | 4097 | }; |
4072 | 4098 | ||
4073 | static ssize_t | 4099 | static ssize_t |
@@ -4119,6 +4145,7 @@ static const struct file_operations trace_options_core_fops = { | |||
4119 | .open = tracing_open_generic, | 4145 | .open = tracing_open_generic, |
4120 | .read = trace_options_core_read, | 4146 | .read = trace_options_core_read, |
4121 | .write = trace_options_core_write, | 4147 | .write = trace_options_core_write, |
4148 | .llseek = generic_file_llseek, | ||
4122 | }; | 4149 | }; |
4123 | 4150 | ||
4124 | struct dentry *trace_create_file(const char *name, | 4151 | struct dentry *trace_create_file(const char *name, |
@@ -4308,9 +4335,6 @@ static __init int tracer_init_debugfs(void) | |||
4308 | trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, | 4335 | trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, |
4309 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); | 4336 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); |
4310 | #endif | 4337 | #endif |
4311 | #ifdef CONFIG_SYSPROF_TRACER | ||
4312 | init_tracer_sysprof_debugfs(d_tracer); | ||
4313 | #endif | ||
4314 | 4338 | ||
4315 | create_trace_options_dir(); | 4339 | create_trace_options_dir(); |
4316 | 4340 | ||
@@ -4324,7 +4348,7 @@ static int trace_panic_handler(struct notifier_block *this, | |||
4324 | unsigned long event, void *unused) | 4348 | unsigned long event, void *unused) |
4325 | { | 4349 | { |
4326 | if (ftrace_dump_on_oops) | 4350 | if (ftrace_dump_on_oops) |
4327 | ftrace_dump(); | 4351 | ftrace_dump(ftrace_dump_on_oops); |
4328 | return NOTIFY_OK; | 4352 | return NOTIFY_OK; |
4329 | } | 4353 | } |
4330 | 4354 | ||
@@ -4341,7 +4365,7 @@ static int trace_die_handler(struct notifier_block *self, | |||
4341 | switch (val) { | 4365 | switch (val) { |
4342 | case DIE_OOPS: | 4366 | case DIE_OOPS: |
4343 | if (ftrace_dump_on_oops) | 4367 | if (ftrace_dump_on_oops) |
4344 | ftrace_dump(); | 4368 | ftrace_dump(ftrace_dump_on_oops); |
4345 | break; | 4369 | break; |
4346 | default: | 4370 | default: |
4347 | break; | 4371 | break; |
@@ -4367,7 +4391,7 @@ static struct notifier_block trace_die_notifier = { | |||
4367 | */ | 4391 | */ |
4368 | #define KERN_TRACE KERN_EMERG | 4392 | #define KERN_TRACE KERN_EMERG |
4369 | 4393 | ||
4370 | static void | 4394 | void |
4371 | trace_printk_seq(struct trace_seq *s) | 4395 | trace_printk_seq(struct trace_seq *s) |
4372 | { | 4396 | { |
4373 | /* Probably should print a warning here. */ | 4397 | /* Probably should print a warning here. */ |
@@ -4382,7 +4406,15 @@ trace_printk_seq(struct trace_seq *s) | |||
4382 | trace_seq_init(s); | 4406 | trace_seq_init(s); |
4383 | } | 4407 | } |
4384 | 4408 | ||
4385 | static void __ftrace_dump(bool disable_tracing) | 4409 | void trace_init_global_iter(struct trace_iterator *iter) |
4410 | { | ||
4411 | iter->tr = &global_trace; | ||
4412 | iter->trace = current_trace; | ||
4413 | iter->cpu_file = TRACE_PIPE_ALL_CPU; | ||
4414 | } | ||
4415 | |||
4416 | static void | ||
4417 | __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) | ||
4386 | { | 4418 | { |
4387 | static arch_spinlock_t ftrace_dump_lock = | 4419 | static arch_spinlock_t ftrace_dump_lock = |
4388 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 4420 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
@@ -4406,8 +4438,10 @@ static void __ftrace_dump(bool disable_tracing) | |||
4406 | if (disable_tracing) | 4438 | if (disable_tracing) |
4407 | ftrace_kill(); | 4439 | ftrace_kill(); |
4408 | 4440 | ||
4441 | trace_init_global_iter(&iter); | ||
4442 | |||
4409 | for_each_tracing_cpu(cpu) { | 4443 | for_each_tracing_cpu(cpu) { |
4410 | atomic_inc(&global_trace.data[cpu]->disabled); | 4444 | atomic_inc(&iter.tr->data[cpu]->disabled); |
4411 | } | 4445 | } |
4412 | 4446 | ||
4413 | old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; | 4447 | old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; |
@@ -4415,12 +4449,25 @@ static void __ftrace_dump(bool disable_tracing) | |||
4415 | /* don't look at user memory in panic mode */ | 4449 | /* don't look at user memory in panic mode */ |
4416 | trace_flags &= ~TRACE_ITER_SYM_USEROBJ; | 4450 | trace_flags &= ~TRACE_ITER_SYM_USEROBJ; |
4417 | 4451 | ||
4418 | printk(KERN_TRACE "Dumping ftrace buffer:\n"); | ||
4419 | |||
4420 | /* Simulate the iterator */ | 4452 | /* Simulate the iterator */ |
4421 | iter.tr = &global_trace; | 4453 | iter.tr = &global_trace; |
4422 | iter.trace = current_trace; | 4454 | iter.trace = current_trace; |
4423 | iter.cpu_file = TRACE_PIPE_ALL_CPU; | 4455 | |
4456 | switch (oops_dump_mode) { | ||
4457 | case DUMP_ALL: | ||
4458 | iter.cpu_file = TRACE_PIPE_ALL_CPU; | ||
4459 | break; | ||
4460 | case DUMP_ORIG: | ||
4461 | iter.cpu_file = raw_smp_processor_id(); | ||
4462 | break; | ||
4463 | case DUMP_NONE: | ||
4464 | goto out_enable; | ||
4465 | default: | ||
4466 | printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n"); | ||
4467 | iter.cpu_file = TRACE_PIPE_ALL_CPU; | ||
4468 | } | ||
4469 | |||
4470 | printk(KERN_TRACE "Dumping ftrace buffer:\n"); | ||
4424 | 4471 | ||
4425 | /* | 4472 | /* |
4426 | * We need to stop all tracing on all CPUS to read the | 4473 | * We need to stop all tracing on all CPUS to read the |
@@ -4443,7 +4490,7 @@ static void __ftrace_dump(bool disable_tracing) | |||
4443 | iter.iter_flags |= TRACE_FILE_LAT_FMT; | 4490 | iter.iter_flags |= TRACE_FILE_LAT_FMT; |
4444 | iter.pos = -1; | 4491 | iter.pos = -1; |
4445 | 4492 | ||
4446 | if (find_next_entry_inc(&iter) != NULL) { | 4493 | if (trace_find_next_entry_inc(&iter) != NULL) { |
4447 | int ret; | 4494 | int ret; |
4448 | 4495 | ||
4449 | ret = print_trace_line(&iter); | 4496 | ret = print_trace_line(&iter); |
@@ -4459,12 +4506,13 @@ static void __ftrace_dump(bool disable_tracing) | |||
4459 | else | 4506 | else |
4460 | printk(KERN_TRACE "---------------------------------\n"); | 4507 | printk(KERN_TRACE "---------------------------------\n"); |
4461 | 4508 | ||
4509 | out_enable: | ||
4462 | /* Re-enable tracing if requested */ | 4510 | /* Re-enable tracing if requested */ |
4463 | if (!disable_tracing) { | 4511 | if (!disable_tracing) { |
4464 | trace_flags |= old_userobj; | 4512 | trace_flags |= old_userobj; |
4465 | 4513 | ||
4466 | for_each_tracing_cpu(cpu) { | 4514 | for_each_tracing_cpu(cpu) { |
4467 | atomic_dec(&global_trace.data[cpu]->disabled); | 4515 | atomic_dec(&iter.tr->data[cpu]->disabled); |
4468 | } | 4516 | } |
4469 | tracing_on(); | 4517 | tracing_on(); |
4470 | } | 4518 | } |
@@ -4475,9 +4523,9 @@ static void __ftrace_dump(bool disable_tracing) | |||
4475 | } | 4523 | } |
4476 | 4524 | ||
4477 | /* By default: disable tracing after the dump */ | 4525 | /* By default: disable tracing after the dump */ |
4478 | void ftrace_dump(void) | 4526 | void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) |
4479 | { | 4527 | { |
4480 | __ftrace_dump(true); | 4528 | __ftrace_dump(true, oops_dump_mode); |
4481 | } | 4529 | } |
4482 | 4530 | ||
4483 | __init static int tracer_alloc_buffers(void) | 4531 | __init static int tracer_alloc_buffers(void) |
@@ -4513,16 +4561,14 @@ __init static int tracer_alloc_buffers(void) | |||
4513 | 4561 | ||
4514 | 4562 | ||
4515 | #ifdef CONFIG_TRACER_MAX_TRACE | 4563 | #ifdef CONFIG_TRACER_MAX_TRACE |
4516 | max_tr.buffer = ring_buffer_alloc(ring_buf_size, | 4564 | max_tr.buffer = ring_buffer_alloc(1, TRACE_BUFFER_FLAGS); |
4517 | TRACE_BUFFER_FLAGS); | ||
4518 | if (!max_tr.buffer) { | 4565 | if (!max_tr.buffer) { |
4519 | printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); | 4566 | printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); |
4520 | WARN_ON(1); | 4567 | WARN_ON(1); |
4521 | ring_buffer_free(global_trace.buffer); | 4568 | ring_buffer_free(global_trace.buffer); |
4522 | goto out_free_cpumask; | 4569 | goto out_free_cpumask; |
4523 | } | 4570 | } |
4524 | max_tr.entries = ring_buffer_size(max_tr.buffer); | 4571 | max_tr.entries = 1; |
4525 | WARN_ON(max_tr.entries != global_trace.entries); | ||
4526 | #endif | 4572 | #endif |
4527 | 4573 | ||
4528 | /* Allocate the first page for all buffers */ | 4574 | /* Allocate the first page for all buffers */ |
@@ -4535,9 +4581,6 @@ __init static int tracer_alloc_buffers(void) | |||
4535 | 4581 | ||
4536 | register_tracer(&nop_trace); | 4582 | register_tracer(&nop_trace); |
4537 | current_trace = &nop_trace; | 4583 | current_trace = &nop_trace; |
4538 | #ifdef CONFIG_BOOT_TRACER | ||
4539 | register_tracer(&boot_tracer); | ||
4540 | #endif | ||
4541 | /* All seems OK, enable tracing */ | 4584 | /* All seems OK, enable tracing */ |
4542 | tracing_disabled = 0; | 4585 | tracing_disabled = 0; |
4543 | 4586 | ||