aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c109
1 files changed, 78 insertions, 31 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index a6b8f9d7ac96..17c2ccebb567 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -154,6 +154,7 @@ static inline int test_time_stamp(u64 delta)
154struct ring_buffer_per_cpu { 154struct ring_buffer_per_cpu {
155 int cpu; 155 int cpu;
156 struct ring_buffer *buffer; 156 struct ring_buffer *buffer;
157 spinlock_t reader_lock; /* serialize readers */
157 raw_spinlock_t lock; 158 raw_spinlock_t lock;
158 struct lock_class_key lock_key; 159 struct lock_class_key lock_key;
159 struct list_head pages; 160 struct list_head pages;
@@ -321,6 +322,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
321 322
322 cpu_buffer->cpu = cpu; 323 cpu_buffer->cpu = cpu;
323 cpu_buffer->buffer = buffer; 324 cpu_buffer->buffer = buffer;
325 spin_lock_init(&cpu_buffer->reader_lock);
324 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 326 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
325 INIT_LIST_HEAD(&cpu_buffer->pages); 327 INIT_LIST_HEAD(&cpu_buffer->pages);
326 328
@@ -1476,6 +1478,9 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1476void ring_buffer_iter_reset(struct ring_buffer_iter *iter) 1478void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1477{ 1479{
1478 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 1480 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1481 unsigned long flags;
1482
1483 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1479 1484
1480 /* Iterator usage is expected to have record disabled */ 1485 /* Iterator usage is expected to have record disabled */
1481 if (list_empty(&cpu_buffer->reader_page->list)) { 1486 if (list_empty(&cpu_buffer->reader_page->list)) {
@@ -1489,6 +1494,8 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1489 iter->read_stamp = cpu_buffer->read_stamp; 1494 iter->read_stamp = cpu_buffer->read_stamp;
1490 else 1495 else
1491 iter->read_stamp = iter->head_page->time_stamp; 1496 iter->read_stamp = iter->head_page->time_stamp;
1497
1498 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1492} 1499}
1493 1500
1494/** 1501/**
@@ -1707,17 +1714,8 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
1707 rb_advance_iter(iter); 1714 rb_advance_iter(iter);
1708} 1715}
1709 1716
1710/** 1717static struct ring_buffer_event *
1711 * ring_buffer_peek - peek at the next event to be read 1718rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1712 * @buffer: The ring buffer to read
1713 * @cpu: The cpu to peak at
1714 * @ts: The timestamp counter of this event.
1715 *
1716 * This will return the event that will be read next, but does
1717 * not consume the data.
1718 */
1719struct ring_buffer_event *
1720ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1721{ 1719{
1722 struct ring_buffer_per_cpu *cpu_buffer; 1720 struct ring_buffer_per_cpu *cpu_buffer;
1723 struct ring_buffer_event *event; 1721 struct ring_buffer_event *event;
@@ -1779,16 +1777,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1779 return NULL; 1777 return NULL;
1780} 1778}
1781 1779
1782/** 1780static struct ring_buffer_event *
1783 * ring_buffer_iter_peek - peek at the next event to be read 1781rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1784 * @iter: The ring buffer iterator
1785 * @ts: The timestamp counter of this event.
1786 *
1787 * This will return the event that will be read next, but does
1788 * not increment the iterator.
1789 */
1790struct ring_buffer_event *
1791ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1792{ 1782{
1793 struct ring_buffer *buffer; 1783 struct ring_buffer *buffer;
1794 struct ring_buffer_per_cpu *cpu_buffer; 1784 struct ring_buffer_per_cpu *cpu_buffer;
@@ -1850,6 +1840,51 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1850} 1840}
1851 1841
1852/** 1842/**
1843 * ring_buffer_peek - peek at the next event to be read
1844 * @buffer: The ring buffer to read
1845 * @cpu: The cpu to peak at
1846 * @ts: The timestamp counter of this event.
1847 *
1848 * This will return the event that will be read next, but does
1849 * not consume the data.
1850 */
1851struct ring_buffer_event *
1852ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1853{
1854 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
1855 struct ring_buffer_event *event;
1856 unsigned long flags;
1857
1858 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1859 event = rb_buffer_peek(buffer, cpu, ts);
1860 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1861
1862 return event;
1863}
1864
1865/**
1866 * ring_buffer_iter_peek - peek at the next event to be read
1867 * @iter: The ring buffer iterator
1868 * @ts: The timestamp counter of this event.
1869 *
1870 * This will return the event that will be read next, but does
1871 * not increment the iterator.
1872 */
1873struct ring_buffer_event *
1874ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1875{
1876 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1877 struct ring_buffer_event *event;
1878 unsigned long flags;
1879
1880 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1881 event = rb_iter_peek(iter, ts);
1882 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1883
1884 return event;
1885}
1886
1887/**
1853 * ring_buffer_consume - return an event and consume it 1888 * ring_buffer_consume - return an event and consume it
1854 * @buffer: The ring buffer to get the next event from 1889 * @buffer: The ring buffer to get the next event from
1855 * 1890 *
@@ -1860,19 +1895,24 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1860struct ring_buffer_event * 1895struct ring_buffer_event *
1861ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) 1896ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1862{ 1897{
1863 struct ring_buffer_per_cpu *cpu_buffer; 1898 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
1864 struct ring_buffer_event *event; 1899 struct ring_buffer_event *event;
1900 unsigned long flags;
1865 1901
1866 if (!cpu_isset(cpu, buffer->cpumask)) 1902 if (!cpu_isset(cpu, buffer->cpumask))
1867 return NULL; 1903 return NULL;
1868 1904
1869 event = ring_buffer_peek(buffer, cpu, ts); 1905 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1906
1907 event = rb_buffer_peek(buffer, cpu, ts);
1870 if (!event) 1908 if (!event)
1871 return NULL; 1909 goto out;
1872 1910
1873 cpu_buffer = buffer->buffers[cpu];
1874 rb_advance_reader(cpu_buffer); 1911 rb_advance_reader(cpu_buffer);
1875 1912
1913 out:
1914 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1915
1876 return event; 1916 return event;
1877} 1917}
1878 1918
@@ -1909,11 +1949,11 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
1909 atomic_inc(&cpu_buffer->record_disabled); 1949 atomic_inc(&cpu_buffer->record_disabled);
1910 synchronize_sched(); 1950 synchronize_sched();
1911 1951
1912 local_irq_save(flags); 1952 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1913 __raw_spin_lock(&cpu_buffer->lock); 1953 __raw_spin_lock(&cpu_buffer->lock);
1914 ring_buffer_iter_reset(iter); 1954 ring_buffer_iter_reset(iter);
1915 __raw_spin_unlock(&cpu_buffer->lock); 1955 __raw_spin_unlock(&cpu_buffer->lock);
1916 local_irq_restore(flags); 1956 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1917 1957
1918 return iter; 1958 return iter;
1919} 1959}
@@ -1945,12 +1985,17 @@ struct ring_buffer_event *
1945ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) 1985ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
1946{ 1986{
1947 struct ring_buffer_event *event; 1987 struct ring_buffer_event *event;
1988 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1989 unsigned long flags;
1948 1990
1949 event = ring_buffer_iter_peek(iter, ts); 1991 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1992 event = rb_iter_peek(iter, ts);
1950 if (!event) 1993 if (!event)
1951 return NULL; 1994 goto out;
1952 1995
1953 rb_advance_iter(iter); 1996 rb_advance_iter(iter);
1997 out:
1998 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1954 1999
1955 return event; 2000 return event;
1956} 2001}
@@ -1999,13 +2044,15 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
1999 if (!cpu_isset(cpu, buffer->cpumask)) 2044 if (!cpu_isset(cpu, buffer->cpumask))
2000 return; 2045 return;
2001 2046
2002 local_irq_save(flags); 2047 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2048
2003 __raw_spin_lock(&cpu_buffer->lock); 2049 __raw_spin_lock(&cpu_buffer->lock);
2004 2050
2005 rb_reset_cpu(cpu_buffer); 2051 rb_reset_cpu(cpu_buffer);
2006 2052
2007 __raw_spin_unlock(&cpu_buffer->lock); 2053 __raw_spin_unlock(&cpu_buffer->lock);
2008 local_irq_restore(flags); 2054
2055 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2009} 2056}
2010 2057
2011/** 2058/**