aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c64
1 files changed, 61 insertions, 3 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 94af1fe56bb4..2f76193c3489 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -130,7 +130,7 @@ struct buffer_page {
130static inline void free_buffer_page(struct buffer_page *bpage) 130static inline void free_buffer_page(struct buffer_page *bpage)
131{ 131{
132 if (bpage->page) 132 if (bpage->page)
133 __free_page(bpage->page); 133 free_page((unsigned long)bpage->page);
134 kfree(bpage); 134 kfree(bpage);
135} 135}
136 136
@@ -966,7 +966,9 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
966 if (unlikely(*delta > (1ULL << 59) && !once++)) { 966 if (unlikely(*delta > (1ULL << 59) && !once++)) {
967 printk(KERN_WARNING "Delta way too big! %llu" 967 printk(KERN_WARNING "Delta way too big! %llu"
968 " ts=%llu write stamp = %llu\n", 968 " ts=%llu write stamp = %llu\n",
969 *delta, *ts, cpu_buffer->write_stamp); 969 (unsigned long long)*delta,
970 (unsigned long long)*ts,
971 (unsigned long long)cpu_buffer->write_stamp);
970 WARN_ON(1); 972 WARN_ON(1);
971 } 973 }
972 974
@@ -1020,8 +1022,23 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1020 struct ring_buffer_event *event; 1022 struct ring_buffer_event *event;
1021 u64 ts, delta; 1023 u64 ts, delta;
1022 int commit = 0; 1024 int commit = 0;
1025 int nr_loops = 0;
1023 1026
1024 again: 1027 again:
1028 /*
1029 * We allow for interrupts to reenter here and do a trace.
1030 * If one does, it will cause this original code to loop
1031 * back here. Even with heavy interrupts happening, this
1032 * should only happen a few times in a row. If this happens
1033 * 1000 times in a row, there must be either an interrupt
1034 * storm or we have something buggy.
1035 * Bail!
1036 */
1037 if (unlikely(++nr_loops > 1000)) {
1038 RB_WARN_ON(cpu_buffer, 1);
1039 return NULL;
1040 }
1041
1025 ts = ring_buffer_time_stamp(cpu_buffer->cpu); 1042 ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1026 1043
1027 /* 1044 /*
@@ -1043,7 +1060,7 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1043 1060
1044 /* Did the write stamp get updated already? */ 1061 /* Did the write stamp get updated already? */
1045 if (unlikely(ts < cpu_buffer->write_stamp)) 1062 if (unlikely(ts < cpu_buffer->write_stamp))
1046 goto again; 1063 delta = 0;
1047 1064
1048 if (test_time_stamp(delta)) { 1065 if (test_time_stamp(delta)) {
1049 1066
@@ -1530,10 +1547,23 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1530{ 1547{
1531 struct buffer_page *reader = NULL; 1548 struct buffer_page *reader = NULL;
1532 unsigned long flags; 1549 unsigned long flags;
1550 int nr_loops = 0;
1533 1551
1534 spin_lock_irqsave(&cpu_buffer->lock, flags); 1552 spin_lock_irqsave(&cpu_buffer->lock, flags);
1535 1553
1536 again: 1554 again:
1555 /*
1556 * This should normally only loop twice. But because the
1557 * start of the reader inserts an empty page, it causes
1558 * a case where we will loop three times. There should be no
1559 * reason to loop four times (that I know of).
1560 */
1561 if (unlikely(++nr_loops > 3)) {
1562 RB_WARN_ON(cpu_buffer, 1);
1563 reader = NULL;
1564 goto out;
1565 }
1566
1537 reader = cpu_buffer->reader_page; 1567 reader = cpu_buffer->reader_page;
1538 1568
1539 /* If there's more to read, return this page */ 1569 /* If there's more to read, return this page */
@@ -1663,6 +1693,7 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1663 struct ring_buffer_per_cpu *cpu_buffer; 1693 struct ring_buffer_per_cpu *cpu_buffer;
1664 struct ring_buffer_event *event; 1694 struct ring_buffer_event *event;
1665 struct buffer_page *reader; 1695 struct buffer_page *reader;
1696 int nr_loops = 0;
1666 1697
1667 if (!cpu_isset(cpu, buffer->cpumask)) 1698 if (!cpu_isset(cpu, buffer->cpumask))
1668 return NULL; 1699 return NULL;
@@ -1670,6 +1701,19 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1670 cpu_buffer = buffer->buffers[cpu]; 1701 cpu_buffer = buffer->buffers[cpu];
1671 1702
1672 again: 1703 again:
1704 /*
1705 * We repeat when a timestamp is encountered. It is possible
1706 * to get multiple timestamps from an interrupt entering just
1707 * as one timestamp is about to be written. The max times
1708 * that this can happen is the number of nested interrupts we
1709 * can have. Nesting 10 deep of interrupts is clearly
1710 * an anomaly.
1711 */
1712 if (unlikely(++nr_loops > 10)) {
1713 RB_WARN_ON(cpu_buffer, 1);
1714 return NULL;
1715 }
1716
1673 reader = rb_get_reader_page(cpu_buffer); 1717 reader = rb_get_reader_page(cpu_buffer);
1674 if (!reader) 1718 if (!reader)
1675 return NULL; 1719 return NULL;
@@ -1720,6 +1764,7 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1720 struct ring_buffer *buffer; 1764 struct ring_buffer *buffer;
1721 struct ring_buffer_per_cpu *cpu_buffer; 1765 struct ring_buffer_per_cpu *cpu_buffer;
1722 struct ring_buffer_event *event; 1766 struct ring_buffer_event *event;
1767 int nr_loops = 0;
1723 1768
1724 if (ring_buffer_iter_empty(iter)) 1769 if (ring_buffer_iter_empty(iter))
1725 return NULL; 1770 return NULL;
@@ -1728,6 +1773,19 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1728 buffer = cpu_buffer->buffer; 1773 buffer = cpu_buffer->buffer;
1729 1774
1730 again: 1775 again:
1776 /*
1777 * We repeat when a timestamp is encountered. It is possible
1778 * to get multiple timestamps from an interrupt entering just
1779 * as one timestamp is about to be written. The max times
1780 * that this can happen is the number of nested interrupts we
1781 * can have. Nesting 10 deep of interrupts is clearly
1782 * an anomaly.
1783 */
1784 if (unlikely(++nr_loops > 10)) {
1785 RB_WARN_ON(cpu_buffer, 1);
1786 return NULL;
1787 }
1788
1731 if (rb_per_cpu_empty(cpu_buffer)) 1789 if (rb_per_cpu_empty(cpu_buffer))
1732 return NULL; 1790 return NULL;
1733 1791