diff options
author | Steven Rostedt <srostedt@redhat.com> | 2010-03-31 19:49:26 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2010-03-31 22:57:06 -0400 |
commit | bc21b478425ac73f66a5ec0b375a5e0d12d609ce (patch) | |
tree | a5e8be66a9114f73d4c87112d515424f14e7bebd | |
parent | 66a8cb95ed04025664d1db4e952155ee1dccd048 (diff) |
tracing: Show the lost events in the trace_pipe output
Now that the ring buffer can keep track of where events are lost.
Use this information to the output of trace_pipe:
hackbench-3588 [001] 1326.701660: lock_acquire: ffffffff816591e0 read rcu_read_lock
hackbench-3588 [001] 1326.701661: lock_acquire: ffff88003f4091f0 &(&dentry->d_lock)->rlock
hackbench-3588 [001] 1326.701664: lock_release: ffff88003f4091f0 &(&dentry->d_lock)->rlock
CPU:1 [LOST 673 EVENTS]
hackbench-3588 [001] 1326.702711: kmem_cache_free: call_site=ffffffff81102b85 ptr=ffff880026d96738
hackbench-3588 [001] 1326.702712: lock_release: ffff88003e1480a8 &mm->mmap_sem
hackbench-3588 [001] 1326.702713: lock_acquire: ffff88003e1480a8 &mm->mmap_sem
Even works with the function graph tracer:
2) ! 170.098 us | }
2) 4.036 us | rcu_irq_exit();
2) 3.657 us | idle_cpu();
2) ! 190.301 us | }
CPU:2 [LOST 2196 EVENTS]
2) 0.853 us | } /* cancel_dirty_page */
2) | remove_from_page_cache() {
2) 1.578 us | _raw_spin_lock_irq();
2) | __remove_from_page_cache() {
Note, it does not work with the iterator "trace" file, since it requires
the use of consuming the page from the ring buffer to determine how many
events were lost, which the iterator does not do.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r-- | include/linux/ftrace_event.h | 1 | ||||
-rw-r--r-- | kernel/trace/trace.c | 30 |
2 files changed, 23 insertions, 8 deletions
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index c0f4b364c711..39e71b0a3bfd 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -58,6 +58,7 @@ struct trace_iterator { | |||
58 | /* The below is zeroed out in pipe_read */ | 58 | /* The below is zeroed out in pipe_read */ |
59 | struct trace_seq seq; | 59 | struct trace_seq seq; |
60 | struct trace_entry *ent; | 60 | struct trace_entry *ent; |
61 | unsigned long lost_events; | ||
61 | int leftover; | 62 | int leftover; |
62 | int cpu; | 63 | int cpu; |
63 | u64 ts; | 64 | u64 ts; |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index fabb0033a9be..0498bebcbfd1 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1545,7 +1545,8 @@ static void trace_iterator_increment(struct trace_iterator *iter) | |||
1545 | } | 1545 | } |
1546 | 1546 | ||
1547 | static struct trace_entry * | 1547 | static struct trace_entry * |
1548 | peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) | 1548 | peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, |
1549 | unsigned long *lost_events) | ||
1549 | { | 1550 | { |
1550 | struct ring_buffer_event *event; | 1551 | struct ring_buffer_event *event; |
1551 | struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; | 1552 | struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; |
@@ -1556,7 +1557,8 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) | |||
1556 | if (buf_iter) | 1557 | if (buf_iter) |
1557 | event = ring_buffer_iter_peek(buf_iter, ts); | 1558 | event = ring_buffer_iter_peek(buf_iter, ts); |
1558 | else | 1559 | else |
1559 | event = ring_buffer_peek(iter->tr->buffer, cpu, ts, NULL); | 1560 | event = ring_buffer_peek(iter->tr->buffer, cpu, ts, |
1561 | lost_events); | ||
1560 | 1562 | ||
1561 | ftrace_enable_cpu(); | 1563 | ftrace_enable_cpu(); |
1562 | 1564 | ||
@@ -1564,10 +1566,12 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) | |||
1564 | } | 1566 | } |
1565 | 1567 | ||
1566 | static struct trace_entry * | 1568 | static struct trace_entry * |
1567 | __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | 1569 | __find_next_entry(struct trace_iterator *iter, int *ent_cpu, |
1570 | unsigned long *missing_events, u64 *ent_ts) | ||
1568 | { | 1571 | { |
1569 | struct ring_buffer *buffer = iter->tr->buffer; | 1572 | struct ring_buffer *buffer = iter->tr->buffer; |
1570 | struct trace_entry *ent, *next = NULL; | 1573 | struct trace_entry *ent, *next = NULL; |
1574 | unsigned long lost_events, next_lost = 0; | ||
1571 | int cpu_file = iter->cpu_file; | 1575 | int cpu_file = iter->cpu_file; |
1572 | u64 next_ts = 0, ts; | 1576 | u64 next_ts = 0, ts; |
1573 | int next_cpu = -1; | 1577 | int next_cpu = -1; |
@@ -1580,7 +1584,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
1580 | if (cpu_file > TRACE_PIPE_ALL_CPU) { | 1584 | if (cpu_file > TRACE_PIPE_ALL_CPU) { |
1581 | if (ring_buffer_empty_cpu(buffer, cpu_file)) | 1585 | if (ring_buffer_empty_cpu(buffer, cpu_file)) |
1582 | return NULL; | 1586 | return NULL; |
1583 | ent = peek_next_entry(iter, cpu_file, ent_ts); | 1587 | ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); |
1584 | if (ent_cpu) | 1588 | if (ent_cpu) |
1585 | *ent_cpu = cpu_file; | 1589 | *ent_cpu = cpu_file; |
1586 | 1590 | ||
@@ -1592,7 +1596,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
1592 | if (ring_buffer_empty_cpu(buffer, cpu)) | 1596 | if (ring_buffer_empty_cpu(buffer, cpu)) |
1593 | continue; | 1597 | continue; |
1594 | 1598 | ||
1595 | ent = peek_next_entry(iter, cpu, &ts); | 1599 | ent = peek_next_entry(iter, cpu, &ts, &lost_events); |
1596 | 1600 | ||
1597 | /* | 1601 | /* |
1598 | * Pick the entry with the smallest timestamp: | 1602 | * Pick the entry with the smallest timestamp: |
@@ -1601,6 +1605,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
1601 | next = ent; | 1605 | next = ent; |
1602 | next_cpu = cpu; | 1606 | next_cpu = cpu; |
1603 | next_ts = ts; | 1607 | next_ts = ts; |
1608 | next_lost = lost_events; | ||
1604 | } | 1609 | } |
1605 | } | 1610 | } |
1606 | 1611 | ||
@@ -1610,6 +1615,9 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
1610 | if (ent_ts) | 1615 | if (ent_ts) |
1611 | *ent_ts = next_ts; | 1616 | *ent_ts = next_ts; |
1612 | 1617 | ||
1618 | if (missing_events) | ||
1619 | *missing_events = next_lost; | ||
1620 | |||
1613 | return next; | 1621 | return next; |
1614 | } | 1622 | } |
1615 | 1623 | ||
@@ -1617,13 +1625,14 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
1617 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | 1625 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, |
1618 | int *ent_cpu, u64 *ent_ts) | 1626 | int *ent_cpu, u64 *ent_ts) |
1619 | { | 1627 | { |
1620 | return __find_next_entry(iter, ent_cpu, ent_ts); | 1628 | return __find_next_entry(iter, ent_cpu, NULL, ent_ts); |
1621 | } | 1629 | } |
1622 | 1630 | ||
1623 | /* Find the next real entry, and increment the iterator to the next entry */ | 1631 | /* Find the next real entry, and increment the iterator to the next entry */ |
1624 | static void *find_next_entry_inc(struct trace_iterator *iter) | 1632 | static void *find_next_entry_inc(struct trace_iterator *iter) |
1625 | { | 1633 | { |
1626 | iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); | 1634 | iter->ent = __find_next_entry(iter, &iter->cpu, |
1635 | &iter->lost_events, &iter->ts); | ||
1627 | 1636 | ||
1628 | if (iter->ent) | 1637 | if (iter->ent) |
1629 | trace_iterator_increment(iter); | 1638 | trace_iterator_increment(iter); |
@@ -1635,7 +1644,8 @@ static void trace_consume(struct trace_iterator *iter) | |||
1635 | { | 1644 | { |
1636 | /* Don't allow ftrace to trace into the ring buffers */ | 1645 | /* Don't allow ftrace to trace into the ring buffers */ |
1637 | ftrace_disable_cpu(); | 1646 | ftrace_disable_cpu(); |
1638 | ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts, NULL); | 1647 | ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts, |
1648 | &iter->lost_events); | ||
1639 | ftrace_enable_cpu(); | 1649 | ftrace_enable_cpu(); |
1640 | } | 1650 | } |
1641 | 1651 | ||
@@ -2030,6 +2040,10 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter) | |||
2030 | { | 2040 | { |
2031 | enum print_line_t ret; | 2041 | enum print_line_t ret; |
2032 | 2042 | ||
2043 | if (iter->lost_events) | ||
2044 | trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", | ||
2045 | iter->cpu, iter->lost_events); | ||
2046 | |||
2033 | if (iter->trace && iter->trace->print_line) { | 2047 | if (iter->trace && iter->trace->print_line) { |
2034 | ret = iter->trace->print_line(iter); | 2048 | ret = iter->trace->print_line(iter); |
2035 | if (ret != TRACE_TYPE_UNHANDLED) | 2049 | if (ret != TRACE_TYPE_UNHANDLED) |