aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c56
1 files changed, 56 insertions, 0 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index a2dea5008826..6781e9aab2c0 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1027,8 +1027,23 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1027 struct ring_buffer_event *event; 1027 struct ring_buffer_event *event;
1028 u64 ts, delta; 1028 u64 ts, delta;
1029 int commit = 0; 1029 int commit = 0;
1030 int nr_loops = 0;
1030 1031
1031 again: 1032 again:
1033 /*
1034 * We allow for interrupts to reenter here and do a trace.
1035 * If one does, it will cause this original code to loop
1036 * back here. Even with heavy interrupts happening, this
1037 * should only happen a few times in a row. If this happens
1038 * 1000 times in a row, there must be either an interrupt
1039 * storm or we have something buggy.
1040 * Bail!
1041 */
1042 if (unlikely(++nr_loops > 1000)) {
1043 RB_WARN_ON(cpu_buffer, 1);
1044 return NULL;
1045 }
1046
1032 ts = ring_buffer_time_stamp(cpu_buffer->cpu); 1047 ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1033 1048
1034 /* 1049 /*
@@ -1526,11 +1541,24 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1526{ 1541{
1527 struct buffer_page *reader = NULL; 1542 struct buffer_page *reader = NULL;
1528 unsigned long flags; 1543 unsigned long flags;
1544 int nr_loops = 0;
1529 1545
1530 local_irq_save(flags); 1546 local_irq_save(flags);
1531 __raw_spin_lock(&cpu_buffer->lock); 1547 __raw_spin_lock(&cpu_buffer->lock);
1532 1548
1533 again: 1549 again:
1550 /*
1551 * This should normally only loop twice. But because the
1552 * start of the reader inserts an empty page, it causes
1553 * a case where we will loop three times. There should be no
1554 * reason to loop four times (that I know of).
1555 */
1556 if (unlikely(++nr_loops > 3)) {
1557 RB_WARN_ON(cpu_buffer, 1);
1558 reader = NULL;
1559 goto out;
1560 }
1561
1534 reader = cpu_buffer->reader_page; 1562 reader = cpu_buffer->reader_page;
1535 1563
1536 /* If there's more to read, return this page */ 1564 /* If there's more to read, return this page */
@@ -1661,6 +1689,7 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1661 struct ring_buffer_per_cpu *cpu_buffer; 1689 struct ring_buffer_per_cpu *cpu_buffer;
1662 struct ring_buffer_event *event; 1690 struct ring_buffer_event *event;
1663 struct buffer_page *reader; 1691 struct buffer_page *reader;
1692 int nr_loops = 0;
1664 1693
1665 if (!cpu_isset(cpu, buffer->cpumask)) 1694 if (!cpu_isset(cpu, buffer->cpumask))
1666 return NULL; 1695 return NULL;
@@ -1668,6 +1697,19 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1668 cpu_buffer = buffer->buffers[cpu]; 1697 cpu_buffer = buffer->buffers[cpu];
1669 1698
1670 again: 1699 again:
1700 /*
1701 * We repeat when a timestamp is encountered. It is possible
1702 * to get multiple timestamps from an interrupt entering just
1703 * as one timestamp is about to be written. The max times
1704 * that this can happen is the number of nested interrupts we
1705 * can have. Nesting 10 deep of interrupts is clearly
1706 * an anomaly.
1707 */
1708 if (unlikely(++nr_loops > 10)) {
1709 RB_WARN_ON(cpu_buffer, 1);
1710 return NULL;
1711 }
1712
1671 reader = rb_get_reader_page(cpu_buffer); 1713 reader = rb_get_reader_page(cpu_buffer);
1672 if (!reader) 1714 if (!reader)
1673 return NULL; 1715 return NULL;
@@ -1718,6 +1760,7 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1718 struct ring_buffer *buffer; 1760 struct ring_buffer *buffer;
1719 struct ring_buffer_per_cpu *cpu_buffer; 1761 struct ring_buffer_per_cpu *cpu_buffer;
1720 struct ring_buffer_event *event; 1762 struct ring_buffer_event *event;
1763 int nr_loops = 0;
1721 1764
1722 if (ring_buffer_iter_empty(iter)) 1765 if (ring_buffer_iter_empty(iter))
1723 return NULL; 1766 return NULL;
@@ -1726,6 +1769,19 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1726 buffer = cpu_buffer->buffer; 1769 buffer = cpu_buffer->buffer;
1727 1770
1728 again: 1771 again:
1772 /*
1773 * We repeat when a timestamp is encountered. It is possible
1774 * to get multiple timestamps from an interrupt entering just
1775 * as one timestamp is about to be written. The max times
1776 * that this can happen is the number of nested interrupts we
1777 * can have. Nesting 10 deep of interrupts is clearly
1778 * an anomaly.
1779 */
1780 if (unlikely(++nr_loops > 10)) {
1781 RB_WARN_ON(cpu_buffer, 1);
1782 return NULL;
1783 }
1784
1729 if (rb_per_cpu_empty(cpu_buffer)) 1785 if (rb_per_cpu_empty(cpu_buffer))
1730 return NULL; 1786 return NULL;
1731 1787