aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c103
1 files changed, 79 insertions, 24 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 7f69cfeaadf7..a9d9760dc7b6 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -69,6 +69,7 @@ void tracing_on(void)
69{ 69{
70 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); 70 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
71} 71}
72EXPORT_SYMBOL_GPL(tracing_on);
72 73
73/** 74/**
74 * tracing_off - turn off all tracing buffers 75 * tracing_off - turn off all tracing buffers
@@ -82,6 +83,7 @@ void tracing_off(void)
82{ 83{
83 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); 84 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
84} 85}
86EXPORT_SYMBOL_GPL(tracing_off);
85 87
86/** 88/**
87 * tracing_off_permanent - permanently disable ring buffers 89 * tracing_off_permanent - permanently disable ring buffers
@@ -107,16 +109,18 @@ u64 ring_buffer_time_stamp(int cpu)
107 preempt_disable_notrace(); 109 preempt_disable_notrace();
108 /* shift to debug/test normalization and TIME_EXTENTS */ 110 /* shift to debug/test normalization and TIME_EXTENTS */
109 time = sched_clock() << DEBUG_SHIFT; 111 time = sched_clock() << DEBUG_SHIFT;
110 preempt_enable_notrace(); 112 preempt_enable_no_resched_notrace();
111 113
112 return time; 114 return time;
113} 115}
116EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
114 117
115void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) 118void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
116{ 119{
117 /* Just stupid testing the normalize function and deltas */ 120 /* Just stupid testing the normalize function and deltas */
118 *ts >>= DEBUG_SHIFT; 121 *ts >>= DEBUG_SHIFT;
119} 122}
123EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
120 124
121#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event)) 125#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
122#define RB_ALIGNMENT_SHIFT 2 126#define RB_ALIGNMENT_SHIFT 2
@@ -166,6 +170,7 @@ unsigned ring_buffer_event_length(struct ring_buffer_event *event)
166{ 170{
167 return rb_event_length(event); 171 return rb_event_length(event);
168} 172}
173EXPORT_SYMBOL_GPL(ring_buffer_event_length);
169 174
170/* inline for ring buffer fast paths */ 175/* inline for ring buffer fast paths */
171static inline void * 176static inline void *
@@ -187,9 +192,10 @@ void *ring_buffer_event_data(struct ring_buffer_event *event)
187{ 192{
188 return rb_event_data(event); 193 return rb_event_data(event);
189} 194}
195EXPORT_SYMBOL_GPL(ring_buffer_event_data);
190 196
191#define for_each_buffer_cpu(buffer, cpu) \ 197#define for_each_buffer_cpu(buffer, cpu) \
192 for_each_cpu_mask(cpu, buffer->cpumask) 198 for_each_cpu(cpu, buffer->cpumask)
193 199
194#define TS_SHIFT 27 200#define TS_SHIFT 27
195#define TS_MASK ((1ULL << TS_SHIFT) - 1) 201#define TS_MASK ((1ULL << TS_SHIFT) - 1)
@@ -258,11 +264,10 @@ struct ring_buffer_per_cpu {
258}; 264};
259 265
260struct ring_buffer { 266struct ring_buffer {
261 unsigned long size;
262 unsigned pages; 267 unsigned pages;
263 unsigned flags; 268 unsigned flags;
264 int cpus; 269 int cpus;
265 cpumask_t cpumask; 270 cpumask_var_t cpumask;
266 atomic_t record_disabled; 271 atomic_t record_disabled;
267 272
268 struct mutex mutex; 273 struct mutex mutex;
@@ -428,7 +433,7 @@ extern int ring_buffer_page_too_big(void);
428 433
429/** 434/**
430 * ring_buffer_alloc - allocate a new ring_buffer 435 * ring_buffer_alloc - allocate a new ring_buffer
431 * @size: the size in bytes that is needed. 436 * @size: the size in bytes per cpu that is needed.
432 * @flags: attributes to set for the ring buffer. 437 * @flags: attributes to set for the ring buffer.
433 * 438 *
434 * Currently the only flag that is available is the RB_FL_OVERWRITE 439 * Currently the only flag that is available is the RB_FL_OVERWRITE
@@ -453,6 +458,9 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
453 if (!buffer) 458 if (!buffer)
454 return NULL; 459 return NULL;
455 460
461 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
462 goto fail_free_buffer;
463
456 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 464 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
457 buffer->flags = flags; 465 buffer->flags = flags;
458 466
@@ -460,14 +468,14 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
460 if (buffer->pages == 1) 468 if (buffer->pages == 1)
461 buffer->pages++; 469 buffer->pages++;
462 470
463 buffer->cpumask = cpu_possible_map; 471 cpumask_copy(buffer->cpumask, cpu_possible_mask);
464 buffer->cpus = nr_cpu_ids; 472 buffer->cpus = nr_cpu_ids;
465 473
466 bsize = sizeof(void *) * nr_cpu_ids; 474 bsize = sizeof(void *) * nr_cpu_ids;
467 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), 475 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
468 GFP_KERNEL); 476 GFP_KERNEL);
469 if (!buffer->buffers) 477 if (!buffer->buffers)
470 goto fail_free_buffer; 478 goto fail_free_cpumask;
471 479
472 for_each_buffer_cpu(buffer, cpu) { 480 for_each_buffer_cpu(buffer, cpu) {
473 buffer->buffers[cpu] = 481 buffer->buffers[cpu] =
@@ -487,10 +495,14 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
487 } 495 }
488 kfree(buffer->buffers); 496 kfree(buffer->buffers);
489 497
498 fail_free_cpumask:
499 free_cpumask_var(buffer->cpumask);
500
490 fail_free_buffer: 501 fail_free_buffer:
491 kfree(buffer); 502 kfree(buffer);
492 return NULL; 503 return NULL;
493} 504}
505EXPORT_SYMBOL_GPL(ring_buffer_alloc);
494 506
495/** 507/**
496 * ring_buffer_free - free a ring buffer. 508 * ring_buffer_free - free a ring buffer.
@@ -504,8 +516,11 @@ ring_buffer_free(struct ring_buffer *buffer)
504 for_each_buffer_cpu(buffer, cpu) 516 for_each_buffer_cpu(buffer, cpu)
505 rb_free_cpu_buffer(buffer->buffers[cpu]); 517 rb_free_cpu_buffer(buffer->buffers[cpu]);
506 518
519 free_cpumask_var(buffer->cpumask);
520
507 kfree(buffer); 521 kfree(buffer);
508} 522}
523EXPORT_SYMBOL_GPL(ring_buffer_free);
509 524
510static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); 525static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
511 526
@@ -681,6 +696,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
681 mutex_unlock(&buffer->mutex); 696 mutex_unlock(&buffer->mutex);
682 return -ENOMEM; 697 return -ENOMEM;
683} 698}
699EXPORT_SYMBOL_GPL(ring_buffer_resize);
684 700
685static inline int rb_null_event(struct ring_buffer_event *event) 701static inline int rb_null_event(struct ring_buffer_event *event)
686{ 702{
@@ -839,6 +855,7 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
839 * back to us). This allows us to do a simple loop to 855 * back to us). This allows us to do a simple loop to
840 * assign the commit to the tail. 856 * assign the commit to the tail.
841 */ 857 */
858 again:
842 while (cpu_buffer->commit_page != cpu_buffer->tail_page) { 859 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
843 cpu_buffer->commit_page->page->commit = 860 cpu_buffer->commit_page->page->commit =
844 cpu_buffer->commit_page->write; 861 cpu_buffer->commit_page->write;
@@ -854,6 +871,17 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
854 cpu_buffer->commit_page->write; 871 cpu_buffer->commit_page->write;
855 barrier(); 872 barrier();
856 } 873 }
874
875 /* again, keep gcc from optimizing */
876 barrier();
877
878 /*
879 * If an interrupt came in just after the first while loop
880 * and pushed the tail page forward, we will be left with
881 * a dangling commit that will never go forward.
882 */
883 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
884 goto again;
857} 885}
858 886
859static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) 887static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
@@ -951,12 +979,15 @@ static struct ring_buffer_event *
951__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, 979__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
952 unsigned type, unsigned long length, u64 *ts) 980 unsigned type, unsigned long length, u64 *ts)
953{ 981{
954 struct buffer_page *tail_page, *head_page, *reader_page; 982 struct buffer_page *tail_page, *head_page, *reader_page, *commit_page;
955 unsigned long tail, write; 983 unsigned long tail, write;
956 struct ring_buffer *buffer = cpu_buffer->buffer; 984 struct ring_buffer *buffer = cpu_buffer->buffer;
957 struct ring_buffer_event *event; 985 struct ring_buffer_event *event;
958 unsigned long flags; 986 unsigned long flags;
959 987
988 commit_page = cpu_buffer->commit_page;
989 /* we just need to protect against interrupts */
990 barrier();
960 tail_page = cpu_buffer->tail_page; 991 tail_page = cpu_buffer->tail_page;
961 write = local_add_return(length, &tail_page->write); 992 write = local_add_return(length, &tail_page->write);
962 tail = write - length; 993 tail = write - length;
@@ -982,7 +1013,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
982 * it all the way around the buffer, bail, and warn 1013 * it all the way around the buffer, bail, and warn
983 * about it. 1014 * about it.
984 */ 1015 */
985 if (unlikely(next_page == cpu_buffer->commit_page)) { 1016 if (unlikely(next_page == commit_page)) {
986 WARN_ON_ONCE(1); 1017 WARN_ON_ONCE(1);
987 goto out_unlock; 1018 goto out_unlock;
988 } 1019 }
@@ -1260,7 +1291,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
1260 1291
1261 cpu = raw_smp_processor_id(); 1292 cpu = raw_smp_processor_id();
1262 1293
1263 if (!cpu_isset(cpu, buffer->cpumask)) 1294 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1264 goto out; 1295 goto out;
1265 1296
1266 cpu_buffer = buffer->buffers[cpu]; 1297 cpu_buffer = buffer->buffers[cpu];
@@ -1290,6 +1321,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
1290 ftrace_preempt_enable(resched); 1321 ftrace_preempt_enable(resched);
1291 return NULL; 1322 return NULL;
1292} 1323}
1324EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
1293 1325
1294static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, 1326static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1295 struct ring_buffer_event *event) 1327 struct ring_buffer_event *event)
@@ -1336,6 +1368,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1336 1368
1337 return 0; 1369 return 0;
1338} 1370}
1371EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
1339 1372
1340/** 1373/**
1341 * ring_buffer_write - write data to the buffer without reserving 1374 * ring_buffer_write - write data to the buffer without reserving
@@ -1371,7 +1404,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
1371 1404
1372 cpu = raw_smp_processor_id(); 1405 cpu = raw_smp_processor_id();
1373 1406
1374 if (!cpu_isset(cpu, buffer->cpumask)) 1407 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1375 goto out; 1408 goto out;
1376 1409
1377 cpu_buffer = buffer->buffers[cpu]; 1410 cpu_buffer = buffer->buffers[cpu];
@@ -1397,6 +1430,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
1397 1430
1398 return ret; 1431 return ret;
1399} 1432}
1433EXPORT_SYMBOL_GPL(ring_buffer_write);
1400 1434
1401static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) 1435static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1402{ 1436{
@@ -1423,6 +1457,7 @@ void ring_buffer_record_disable(struct ring_buffer *buffer)
1423{ 1457{
1424 atomic_inc(&buffer->record_disabled); 1458 atomic_inc(&buffer->record_disabled);
1425} 1459}
1460EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
1426 1461
1427/** 1462/**
1428 * ring_buffer_record_enable - enable writes to the buffer 1463 * ring_buffer_record_enable - enable writes to the buffer
@@ -1435,6 +1470,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer)
1435{ 1470{
1436 atomic_dec(&buffer->record_disabled); 1471 atomic_dec(&buffer->record_disabled);
1437} 1472}
1473EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
1438 1474
1439/** 1475/**
1440 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer 1476 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
@@ -1450,12 +1486,13 @@ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1450{ 1486{
1451 struct ring_buffer_per_cpu *cpu_buffer; 1487 struct ring_buffer_per_cpu *cpu_buffer;
1452 1488
1453 if (!cpu_isset(cpu, buffer->cpumask)) 1489 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1454 return; 1490 return;
1455 1491
1456 cpu_buffer = buffer->buffers[cpu]; 1492 cpu_buffer = buffer->buffers[cpu];
1457 atomic_inc(&cpu_buffer->record_disabled); 1493 atomic_inc(&cpu_buffer->record_disabled);
1458} 1494}
1495EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
1459 1496
1460/** 1497/**
1461 * ring_buffer_record_enable_cpu - enable writes to the buffer 1498 * ring_buffer_record_enable_cpu - enable writes to the buffer
@@ -1469,12 +1506,13 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1469{ 1506{
1470 struct ring_buffer_per_cpu *cpu_buffer; 1507 struct ring_buffer_per_cpu *cpu_buffer;
1471 1508
1472 if (!cpu_isset(cpu, buffer->cpumask)) 1509 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1473 return; 1510 return;
1474 1511
1475 cpu_buffer = buffer->buffers[cpu]; 1512 cpu_buffer = buffer->buffers[cpu];
1476 atomic_dec(&cpu_buffer->record_disabled); 1513 atomic_dec(&cpu_buffer->record_disabled);
1477} 1514}
1515EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
1478 1516
1479/** 1517/**
1480 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer 1518 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
@@ -1485,12 +1523,13 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1485{ 1523{
1486 struct ring_buffer_per_cpu *cpu_buffer; 1524 struct ring_buffer_per_cpu *cpu_buffer;
1487 1525
1488 if (!cpu_isset(cpu, buffer->cpumask)) 1526 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1489 return 0; 1527 return 0;
1490 1528
1491 cpu_buffer = buffer->buffers[cpu]; 1529 cpu_buffer = buffer->buffers[cpu];
1492 return cpu_buffer->entries; 1530 return cpu_buffer->entries;
1493} 1531}
1532EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
1494 1533
1495/** 1534/**
1496 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer 1535 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
@@ -1501,12 +1540,13 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1501{ 1540{
1502 struct ring_buffer_per_cpu *cpu_buffer; 1541 struct ring_buffer_per_cpu *cpu_buffer;
1503 1542
1504 if (!cpu_isset(cpu, buffer->cpumask)) 1543 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1505 return 0; 1544 return 0;
1506 1545
1507 cpu_buffer = buffer->buffers[cpu]; 1546 cpu_buffer = buffer->buffers[cpu];
1508 return cpu_buffer->overrun; 1547 return cpu_buffer->overrun;
1509} 1548}
1549EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
1510 1550
1511/** 1551/**
1512 * ring_buffer_entries - get the number of entries in a buffer 1552 * ring_buffer_entries - get the number of entries in a buffer
@@ -1529,6 +1569,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1529 1569
1530 return entries; 1570 return entries;
1531} 1571}
1572EXPORT_SYMBOL_GPL(ring_buffer_entries);
1532 1573
1533/** 1574/**
1534 * ring_buffer_overrun_cpu - get the number of overruns in buffer 1575 * ring_buffer_overrun_cpu - get the number of overruns in buffer
@@ -1551,6 +1592,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1551 1592
1552 return overruns; 1593 return overruns;
1553} 1594}
1595EXPORT_SYMBOL_GPL(ring_buffer_overruns);
1554 1596
1555static void rb_iter_reset(struct ring_buffer_iter *iter) 1597static void rb_iter_reset(struct ring_buffer_iter *iter)
1556{ 1598{
@@ -1586,6 +1628,7 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1586 rb_iter_reset(iter); 1628 rb_iter_reset(iter);
1587 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 1629 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1588} 1630}
1631EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
1589 1632
1590/** 1633/**
1591 * ring_buffer_iter_empty - check if an iterator has no more to read 1634 * ring_buffer_iter_empty - check if an iterator has no more to read
@@ -1600,6 +1643,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1600 return iter->head_page == cpu_buffer->commit_page && 1643 return iter->head_page == cpu_buffer->commit_page &&
1601 iter->head == rb_commit_index(cpu_buffer); 1644 iter->head == rb_commit_index(cpu_buffer);
1602} 1645}
1646EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
1603 1647
1604static void 1648static void
1605rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, 1649rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
@@ -1814,7 +1858,7 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1814 struct buffer_page *reader; 1858 struct buffer_page *reader;
1815 int nr_loops = 0; 1859 int nr_loops = 0;
1816 1860
1817 if (!cpu_isset(cpu, buffer->cpumask)) 1861 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1818 return NULL; 1862 return NULL;
1819 1863
1820 cpu_buffer = buffer->buffers[cpu]; 1864 cpu_buffer = buffer->buffers[cpu];
@@ -1866,6 +1910,7 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1866 1910
1867 return NULL; 1911 return NULL;
1868} 1912}
1913EXPORT_SYMBOL_GPL(ring_buffer_peek);
1869 1914
1870static struct ring_buffer_event * 1915static struct ring_buffer_event *
1871rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 1916rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
@@ -1926,6 +1971,7 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1926 1971
1927 return NULL; 1972 return NULL;
1928} 1973}
1974EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
1929 1975
1930/** 1976/**
1931 * ring_buffer_peek - peek at the next event to be read 1977 * ring_buffer_peek - peek at the next event to be read
@@ -1987,7 +2033,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1987 struct ring_buffer_event *event; 2033 struct ring_buffer_event *event;
1988 unsigned long flags; 2034 unsigned long flags;
1989 2035
1990 if (!cpu_isset(cpu, buffer->cpumask)) 2036 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1991 return NULL; 2037 return NULL;
1992 2038
1993 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2039 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
@@ -2003,6 +2049,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2003 2049
2004 return event; 2050 return event;
2005} 2051}
2052EXPORT_SYMBOL_GPL(ring_buffer_consume);
2006 2053
2007/** 2054/**
2008 * ring_buffer_read_start - start a non consuming read of the buffer 2055 * ring_buffer_read_start - start a non consuming read of the buffer
@@ -2023,7 +2070,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2023 struct ring_buffer_iter *iter; 2070 struct ring_buffer_iter *iter;
2024 unsigned long flags; 2071 unsigned long flags;
2025 2072
2026 if (!cpu_isset(cpu, buffer->cpumask)) 2073 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2027 return NULL; 2074 return NULL;
2028 2075
2029 iter = kmalloc(sizeof(*iter), GFP_KERNEL); 2076 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
@@ -2045,6 +2092,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2045 2092
2046 return iter; 2093 return iter;
2047} 2094}
2095EXPORT_SYMBOL_GPL(ring_buffer_read_start);
2048 2096
2049/** 2097/**
2050 * ring_buffer_finish - finish reading the iterator of the buffer 2098 * ring_buffer_finish - finish reading the iterator of the buffer
@@ -2061,6 +2109,7 @@ ring_buffer_read_finish(struct ring_buffer_iter *iter)
2061 atomic_dec(&cpu_buffer->record_disabled); 2109 atomic_dec(&cpu_buffer->record_disabled);
2062 kfree(iter); 2110 kfree(iter);
2063} 2111}
2112EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
2064 2113
2065/** 2114/**
2066 * ring_buffer_read - read the next item in the ring buffer by the iterator 2115 * ring_buffer_read - read the next item in the ring buffer by the iterator
@@ -2087,6 +2136,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
2087 2136
2088 return event; 2137 return event;
2089} 2138}
2139EXPORT_SYMBOL_GPL(ring_buffer_read);
2090 2140
2091/** 2141/**
2092 * ring_buffer_size - return the size of the ring buffer (in bytes) 2142 * ring_buffer_size - return the size of the ring buffer (in bytes)
@@ -2096,6 +2146,7 @@ unsigned long ring_buffer_size(struct ring_buffer *buffer)
2096{ 2146{
2097 return BUF_PAGE_SIZE * buffer->pages; 2147 return BUF_PAGE_SIZE * buffer->pages;
2098} 2148}
2149EXPORT_SYMBOL_GPL(ring_buffer_size);
2099 2150
2100static void 2151static void
2101rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) 2152rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
@@ -2129,7 +2180,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2129 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 2180 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2130 unsigned long flags; 2181 unsigned long flags;
2131 2182
2132 if (!cpu_isset(cpu, buffer->cpumask)) 2183 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2133 return; 2184 return;
2134 2185
2135 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2186 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
@@ -2142,6 +2193,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2142 2193
2143 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2194 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2144} 2195}
2196EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
2145 2197
2146/** 2198/**
2147 * ring_buffer_reset - reset a ring buffer 2199 * ring_buffer_reset - reset a ring buffer
@@ -2154,6 +2206,7 @@ void ring_buffer_reset(struct ring_buffer *buffer)
2154 for_each_buffer_cpu(buffer, cpu) 2206 for_each_buffer_cpu(buffer, cpu)
2155 ring_buffer_reset_cpu(buffer, cpu); 2207 ring_buffer_reset_cpu(buffer, cpu);
2156} 2208}
2209EXPORT_SYMBOL_GPL(ring_buffer_reset);
2157 2210
2158/** 2211/**
2159 * rind_buffer_empty - is the ring buffer empty? 2212 * rind_buffer_empty - is the ring buffer empty?
@@ -2172,6 +2225,7 @@ int ring_buffer_empty(struct ring_buffer *buffer)
2172 } 2225 }
2173 return 1; 2226 return 1;
2174} 2227}
2228EXPORT_SYMBOL_GPL(ring_buffer_empty);
2175 2229
2176/** 2230/**
2177 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? 2231 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
@@ -2182,12 +2236,13 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2182{ 2236{
2183 struct ring_buffer_per_cpu *cpu_buffer; 2237 struct ring_buffer_per_cpu *cpu_buffer;
2184 2238
2185 if (!cpu_isset(cpu, buffer->cpumask)) 2239 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2186 return 1; 2240 return 1;
2187 2241
2188 cpu_buffer = buffer->buffers[cpu]; 2242 cpu_buffer = buffer->buffers[cpu];
2189 return rb_per_cpu_empty(cpu_buffer); 2243 return rb_per_cpu_empty(cpu_buffer);
2190} 2244}
2245EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
2191 2246
2192/** 2247/**
2193 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers 2248 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
@@ -2205,13 +2260,12 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2205 struct ring_buffer_per_cpu *cpu_buffer_a; 2260 struct ring_buffer_per_cpu *cpu_buffer_a;
2206 struct ring_buffer_per_cpu *cpu_buffer_b; 2261 struct ring_buffer_per_cpu *cpu_buffer_b;
2207 2262
2208 if (!cpu_isset(cpu, buffer_a->cpumask) || 2263 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
2209 !cpu_isset(cpu, buffer_b->cpumask)) 2264 !cpumask_test_cpu(cpu, buffer_b->cpumask))
2210 return -EINVAL; 2265 return -EINVAL;
2211 2266
2212 /* At least make sure the two buffers are somewhat the same */ 2267 /* At least make sure the two buffers are somewhat the same */
2213 if (buffer_a->size != buffer_b->size || 2268 if (buffer_a->pages != buffer_b->pages)
2214 buffer_a->pages != buffer_b->pages)
2215 return -EINVAL; 2269 return -EINVAL;
2216 2270
2217 cpu_buffer_a = buffer_a->buffers[cpu]; 2271 cpu_buffer_a = buffer_a->buffers[cpu];
@@ -2237,6 +2291,7 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2237 2291
2238 return 0; 2292 return 0;
2239} 2293}
2294EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
2240 2295
2241static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer, 2296static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
2242 struct buffer_data_page *bpage) 2297 struct buffer_data_page *bpage)