diff options
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r-- | kernel/trace/ring_buffer.c | 86 |
1 files changed, 67 insertions, 19 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 76f34c0ef29c..8b0daf0662ef 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -69,6 +69,7 @@ void tracing_on(void) | |||
69 | { | 69 | { |
70 | set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); | 70 | set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); |
71 | } | 71 | } |
72 | EXPORT_SYMBOL_GPL(tracing_on); | ||
72 | 73 | ||
73 | /** | 74 | /** |
74 | * tracing_off - turn off all tracing buffers | 75 | * tracing_off - turn off all tracing buffers |
@@ -82,6 +83,7 @@ void tracing_off(void) | |||
82 | { | 83 | { |
83 | clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); | 84 | clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); |
84 | } | 85 | } |
86 | EXPORT_SYMBOL_GPL(tracing_off); | ||
85 | 87 | ||
86 | /** | 88 | /** |
87 | * tracing_off_permanent - permanently disable ring buffers | 89 | * tracing_off_permanent - permanently disable ring buffers |
@@ -111,12 +113,14 @@ u64 ring_buffer_time_stamp(int cpu) | |||
111 | 113 | ||
112 | return time; | 114 | return time; |
113 | } | 115 | } |
116 | EXPORT_SYMBOL_GPL(ring_buffer_time_stamp); | ||
114 | 117 | ||
115 | void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) | 118 | void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) |
116 | { | 119 | { |
117 | /* Just stupid testing the normalize function and deltas */ | 120 | /* Just stupid testing the normalize function and deltas */ |
118 | *ts >>= DEBUG_SHIFT; | 121 | *ts >>= DEBUG_SHIFT; |
119 | } | 122 | } |
123 | EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); | ||
120 | 124 | ||
121 | #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event)) | 125 | #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event)) |
122 | #define RB_ALIGNMENT_SHIFT 2 | 126 | #define RB_ALIGNMENT_SHIFT 2 |
@@ -164,8 +168,15 @@ rb_event_length(struct ring_buffer_event *event) | |||
164 | */ | 168 | */ |
165 | unsigned ring_buffer_event_length(struct ring_buffer_event *event) | 169 | unsigned ring_buffer_event_length(struct ring_buffer_event *event) |
166 | { | 170 | { |
167 | return rb_event_length(event); | 171 | unsigned length = rb_event_length(event); |
172 | if (event->type != RINGBUF_TYPE_DATA) | ||
173 | return length; | ||
174 | length -= RB_EVNT_HDR_SIZE; | ||
175 | if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) | ||
176 | length -= sizeof(event->array[0]); | ||
177 | return length; | ||
168 | } | 178 | } |
179 | EXPORT_SYMBOL_GPL(ring_buffer_event_length); | ||
169 | 180 | ||
170 | /* inline for ring buffer fast paths */ | 181 | /* inline for ring buffer fast paths */ |
171 | static inline void * | 182 | static inline void * |
@@ -187,9 +198,10 @@ void *ring_buffer_event_data(struct ring_buffer_event *event) | |||
187 | { | 198 | { |
188 | return rb_event_data(event); | 199 | return rb_event_data(event); |
189 | } | 200 | } |
201 | EXPORT_SYMBOL_GPL(ring_buffer_event_data); | ||
190 | 202 | ||
191 | #define for_each_buffer_cpu(buffer, cpu) \ | 203 | #define for_each_buffer_cpu(buffer, cpu) \ |
192 | for_each_cpu_mask(cpu, buffer->cpumask) | 204 | for_each_cpu(cpu, buffer->cpumask) |
193 | 205 | ||
194 | #define TS_SHIFT 27 | 206 | #define TS_SHIFT 27 |
195 | #define TS_MASK ((1ULL << TS_SHIFT) - 1) | 207 | #define TS_MASK ((1ULL << TS_SHIFT) - 1) |
@@ -261,7 +273,7 @@ struct ring_buffer { | |||
261 | unsigned pages; | 273 | unsigned pages; |
262 | unsigned flags; | 274 | unsigned flags; |
263 | int cpus; | 275 | int cpus; |
264 | cpumask_t cpumask; | 276 | cpumask_var_t cpumask; |
265 | atomic_t record_disabled; | 277 | atomic_t record_disabled; |
266 | 278 | ||
267 | struct mutex mutex; | 279 | struct mutex mutex; |
@@ -427,7 +439,7 @@ extern int ring_buffer_page_too_big(void); | |||
427 | 439 | ||
428 | /** | 440 | /** |
429 | * ring_buffer_alloc - allocate a new ring_buffer | 441 | * ring_buffer_alloc - allocate a new ring_buffer |
430 | * @size: the size in bytes that is needed. | 442 | * @size: the size in bytes per cpu that is needed. |
431 | * @flags: attributes to set for the ring buffer. | 443 | * @flags: attributes to set for the ring buffer. |
432 | * | 444 | * |
433 | * Currently the only flag that is available is the RB_FL_OVERWRITE | 445 | * Currently the only flag that is available is the RB_FL_OVERWRITE |
@@ -452,6 +464,9 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) | |||
452 | if (!buffer) | 464 | if (!buffer) |
453 | return NULL; | 465 | return NULL; |
454 | 466 | ||
467 | if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) | ||
468 | goto fail_free_buffer; | ||
469 | |||
455 | buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); | 470 | buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); |
456 | buffer->flags = flags; | 471 | buffer->flags = flags; |
457 | 472 | ||
@@ -459,14 +474,14 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) | |||
459 | if (buffer->pages == 1) | 474 | if (buffer->pages == 1) |
460 | buffer->pages++; | 475 | buffer->pages++; |
461 | 476 | ||
462 | buffer->cpumask = cpu_possible_map; | 477 | cpumask_copy(buffer->cpumask, cpu_possible_mask); |
463 | buffer->cpus = nr_cpu_ids; | 478 | buffer->cpus = nr_cpu_ids; |
464 | 479 | ||
465 | bsize = sizeof(void *) * nr_cpu_ids; | 480 | bsize = sizeof(void *) * nr_cpu_ids; |
466 | buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), | 481 | buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), |
467 | GFP_KERNEL); | 482 | GFP_KERNEL); |
468 | if (!buffer->buffers) | 483 | if (!buffer->buffers) |
469 | goto fail_free_buffer; | 484 | goto fail_free_cpumask; |
470 | 485 | ||
471 | for_each_buffer_cpu(buffer, cpu) { | 486 | for_each_buffer_cpu(buffer, cpu) { |
472 | buffer->buffers[cpu] = | 487 | buffer->buffers[cpu] = |
@@ -486,10 +501,14 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) | |||
486 | } | 501 | } |
487 | kfree(buffer->buffers); | 502 | kfree(buffer->buffers); |
488 | 503 | ||
504 | fail_free_cpumask: | ||
505 | free_cpumask_var(buffer->cpumask); | ||
506 | |||
489 | fail_free_buffer: | 507 | fail_free_buffer: |
490 | kfree(buffer); | 508 | kfree(buffer); |
491 | return NULL; | 509 | return NULL; |
492 | } | 510 | } |
511 | EXPORT_SYMBOL_GPL(ring_buffer_alloc); | ||
493 | 512 | ||
494 | /** | 513 | /** |
495 | * ring_buffer_free - free a ring buffer. | 514 | * ring_buffer_free - free a ring buffer. |
@@ -503,8 +522,11 @@ ring_buffer_free(struct ring_buffer *buffer) | |||
503 | for_each_buffer_cpu(buffer, cpu) | 522 | for_each_buffer_cpu(buffer, cpu) |
504 | rb_free_cpu_buffer(buffer->buffers[cpu]); | 523 | rb_free_cpu_buffer(buffer->buffers[cpu]); |
505 | 524 | ||
525 | free_cpumask_var(buffer->cpumask); | ||
526 | |||
506 | kfree(buffer); | 527 | kfree(buffer); |
507 | } | 528 | } |
529 | EXPORT_SYMBOL_GPL(ring_buffer_free); | ||
508 | 530 | ||
509 | static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); | 531 | static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); |
510 | 532 | ||
@@ -680,6 +702,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
680 | mutex_unlock(&buffer->mutex); | 702 | mutex_unlock(&buffer->mutex); |
681 | return -ENOMEM; | 703 | return -ENOMEM; |
682 | } | 704 | } |
705 | EXPORT_SYMBOL_GPL(ring_buffer_resize); | ||
683 | 706 | ||
684 | static inline int rb_null_event(struct ring_buffer_event *event) | 707 | static inline int rb_null_event(struct ring_buffer_event *event) |
685 | { | 708 | { |
@@ -1274,7 +1297,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, | |||
1274 | 1297 | ||
1275 | cpu = raw_smp_processor_id(); | 1298 | cpu = raw_smp_processor_id(); |
1276 | 1299 | ||
1277 | if (!cpu_isset(cpu, buffer->cpumask)) | 1300 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1278 | goto out; | 1301 | goto out; |
1279 | 1302 | ||
1280 | cpu_buffer = buffer->buffers[cpu]; | 1303 | cpu_buffer = buffer->buffers[cpu]; |
@@ -1304,6 +1327,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, | |||
1304 | ftrace_preempt_enable(resched); | 1327 | ftrace_preempt_enable(resched); |
1305 | return NULL; | 1328 | return NULL; |
1306 | } | 1329 | } |
1330 | EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); | ||
1307 | 1331 | ||
1308 | static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, | 1332 | static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, |
1309 | struct ring_buffer_event *event) | 1333 | struct ring_buffer_event *event) |
@@ -1350,6 +1374,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer, | |||
1350 | 1374 | ||
1351 | return 0; | 1375 | return 0; |
1352 | } | 1376 | } |
1377 | EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); | ||
1353 | 1378 | ||
1354 | /** | 1379 | /** |
1355 | * ring_buffer_write - write data to the buffer without reserving | 1380 | * ring_buffer_write - write data to the buffer without reserving |
@@ -1385,7 +1410,7 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
1385 | 1410 | ||
1386 | cpu = raw_smp_processor_id(); | 1411 | cpu = raw_smp_processor_id(); |
1387 | 1412 | ||
1388 | if (!cpu_isset(cpu, buffer->cpumask)) | 1413 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1389 | goto out; | 1414 | goto out; |
1390 | 1415 | ||
1391 | cpu_buffer = buffer->buffers[cpu]; | 1416 | cpu_buffer = buffer->buffers[cpu]; |
@@ -1411,6 +1436,7 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
1411 | 1436 | ||
1412 | return ret; | 1437 | return ret; |
1413 | } | 1438 | } |
1439 | EXPORT_SYMBOL_GPL(ring_buffer_write); | ||
1414 | 1440 | ||
1415 | static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) | 1441 | static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) |
1416 | { | 1442 | { |
@@ -1437,6 +1463,7 @@ void ring_buffer_record_disable(struct ring_buffer *buffer) | |||
1437 | { | 1463 | { |
1438 | atomic_inc(&buffer->record_disabled); | 1464 | atomic_inc(&buffer->record_disabled); |
1439 | } | 1465 | } |
1466 | EXPORT_SYMBOL_GPL(ring_buffer_record_disable); | ||
1440 | 1467 | ||
1441 | /** | 1468 | /** |
1442 | * ring_buffer_record_enable - enable writes to the buffer | 1469 | * ring_buffer_record_enable - enable writes to the buffer |
@@ -1449,6 +1476,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer) | |||
1449 | { | 1476 | { |
1450 | atomic_dec(&buffer->record_disabled); | 1477 | atomic_dec(&buffer->record_disabled); |
1451 | } | 1478 | } |
1479 | EXPORT_SYMBOL_GPL(ring_buffer_record_enable); | ||
1452 | 1480 | ||
1453 | /** | 1481 | /** |
1454 | * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer | 1482 | * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer |
@@ -1464,12 +1492,13 @@ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) | |||
1464 | { | 1492 | { |
1465 | struct ring_buffer_per_cpu *cpu_buffer; | 1493 | struct ring_buffer_per_cpu *cpu_buffer; |
1466 | 1494 | ||
1467 | if (!cpu_isset(cpu, buffer->cpumask)) | 1495 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1468 | return; | 1496 | return; |
1469 | 1497 | ||
1470 | cpu_buffer = buffer->buffers[cpu]; | 1498 | cpu_buffer = buffer->buffers[cpu]; |
1471 | atomic_inc(&cpu_buffer->record_disabled); | 1499 | atomic_inc(&cpu_buffer->record_disabled); |
1472 | } | 1500 | } |
1501 | EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); | ||
1473 | 1502 | ||
1474 | /** | 1503 | /** |
1475 | * ring_buffer_record_enable_cpu - enable writes to the buffer | 1504 | * ring_buffer_record_enable_cpu - enable writes to the buffer |
@@ -1483,12 +1512,13 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) | |||
1483 | { | 1512 | { |
1484 | struct ring_buffer_per_cpu *cpu_buffer; | 1513 | struct ring_buffer_per_cpu *cpu_buffer; |
1485 | 1514 | ||
1486 | if (!cpu_isset(cpu, buffer->cpumask)) | 1515 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1487 | return; | 1516 | return; |
1488 | 1517 | ||
1489 | cpu_buffer = buffer->buffers[cpu]; | 1518 | cpu_buffer = buffer->buffers[cpu]; |
1490 | atomic_dec(&cpu_buffer->record_disabled); | 1519 | atomic_dec(&cpu_buffer->record_disabled); |
1491 | } | 1520 | } |
1521 | EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); | ||
1492 | 1522 | ||
1493 | /** | 1523 | /** |
1494 | * ring_buffer_entries_cpu - get the number of entries in a cpu buffer | 1524 | * ring_buffer_entries_cpu - get the number of entries in a cpu buffer |
@@ -1499,12 +1529,13 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) | |||
1499 | { | 1529 | { |
1500 | struct ring_buffer_per_cpu *cpu_buffer; | 1530 | struct ring_buffer_per_cpu *cpu_buffer; |
1501 | 1531 | ||
1502 | if (!cpu_isset(cpu, buffer->cpumask)) | 1532 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1503 | return 0; | 1533 | return 0; |
1504 | 1534 | ||
1505 | cpu_buffer = buffer->buffers[cpu]; | 1535 | cpu_buffer = buffer->buffers[cpu]; |
1506 | return cpu_buffer->entries; | 1536 | return cpu_buffer->entries; |
1507 | } | 1537 | } |
1538 | EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); | ||
1508 | 1539 | ||
1509 | /** | 1540 | /** |
1510 | * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer | 1541 | * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer |
@@ -1515,12 +1546,13 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) | |||
1515 | { | 1546 | { |
1516 | struct ring_buffer_per_cpu *cpu_buffer; | 1547 | struct ring_buffer_per_cpu *cpu_buffer; |
1517 | 1548 | ||
1518 | if (!cpu_isset(cpu, buffer->cpumask)) | 1549 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1519 | return 0; | 1550 | return 0; |
1520 | 1551 | ||
1521 | cpu_buffer = buffer->buffers[cpu]; | 1552 | cpu_buffer = buffer->buffers[cpu]; |
1522 | return cpu_buffer->overrun; | 1553 | return cpu_buffer->overrun; |
1523 | } | 1554 | } |
1555 | EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); | ||
1524 | 1556 | ||
1525 | /** | 1557 | /** |
1526 | * ring_buffer_entries - get the number of entries in a buffer | 1558 | * ring_buffer_entries - get the number of entries in a buffer |
@@ -1543,6 +1575,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer) | |||
1543 | 1575 | ||
1544 | return entries; | 1576 | return entries; |
1545 | } | 1577 | } |
1578 | EXPORT_SYMBOL_GPL(ring_buffer_entries); | ||
1546 | 1579 | ||
1547 | /** | 1580 | /** |
1548 | * ring_buffer_overrun_cpu - get the number of overruns in buffer | 1581 | * ring_buffer_overrun_cpu - get the number of overruns in buffer |
@@ -1565,6 +1598,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer) | |||
1565 | 1598 | ||
1566 | return overruns; | 1599 | return overruns; |
1567 | } | 1600 | } |
1601 | EXPORT_SYMBOL_GPL(ring_buffer_overruns); | ||
1568 | 1602 | ||
1569 | static void rb_iter_reset(struct ring_buffer_iter *iter) | 1603 | static void rb_iter_reset(struct ring_buffer_iter *iter) |
1570 | { | 1604 | { |
@@ -1600,6 +1634,7 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter) | |||
1600 | rb_iter_reset(iter); | 1634 | rb_iter_reset(iter); |
1601 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 1635 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
1602 | } | 1636 | } |
1637 | EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); | ||
1603 | 1638 | ||
1604 | /** | 1639 | /** |
1605 | * ring_buffer_iter_empty - check if an iterator has no more to read | 1640 | * ring_buffer_iter_empty - check if an iterator has no more to read |
@@ -1614,6 +1649,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter) | |||
1614 | return iter->head_page == cpu_buffer->commit_page && | 1649 | return iter->head_page == cpu_buffer->commit_page && |
1615 | iter->head == rb_commit_index(cpu_buffer); | 1650 | iter->head == rb_commit_index(cpu_buffer); |
1616 | } | 1651 | } |
1652 | EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); | ||
1617 | 1653 | ||
1618 | static void | 1654 | static void |
1619 | rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, | 1655 | rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, |
@@ -1828,7 +1864,7 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
1828 | struct buffer_page *reader; | 1864 | struct buffer_page *reader; |
1829 | int nr_loops = 0; | 1865 | int nr_loops = 0; |
1830 | 1866 | ||
1831 | if (!cpu_isset(cpu, buffer->cpumask)) | 1867 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1832 | return NULL; | 1868 | return NULL; |
1833 | 1869 | ||
1834 | cpu_buffer = buffer->buffers[cpu]; | 1870 | cpu_buffer = buffer->buffers[cpu]; |
@@ -1880,6 +1916,7 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
1880 | 1916 | ||
1881 | return NULL; | 1917 | return NULL; |
1882 | } | 1918 | } |
1919 | EXPORT_SYMBOL_GPL(ring_buffer_peek); | ||
1883 | 1920 | ||
1884 | static struct ring_buffer_event * | 1921 | static struct ring_buffer_event * |
1885 | rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | 1922 | rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) |
@@ -1940,6 +1977,7 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
1940 | 1977 | ||
1941 | return NULL; | 1978 | return NULL; |
1942 | } | 1979 | } |
1980 | EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); | ||
1943 | 1981 | ||
1944 | /** | 1982 | /** |
1945 | * ring_buffer_peek - peek at the next event to be read | 1983 | * ring_buffer_peek - peek at the next event to be read |
@@ -2001,7 +2039,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
2001 | struct ring_buffer_event *event; | 2039 | struct ring_buffer_event *event; |
2002 | unsigned long flags; | 2040 | unsigned long flags; |
2003 | 2041 | ||
2004 | if (!cpu_isset(cpu, buffer->cpumask)) | 2042 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
2005 | return NULL; | 2043 | return NULL; |
2006 | 2044 | ||
2007 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 2045 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
@@ -2017,6 +2055,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
2017 | 2055 | ||
2018 | return event; | 2056 | return event; |
2019 | } | 2057 | } |
2058 | EXPORT_SYMBOL_GPL(ring_buffer_consume); | ||
2020 | 2059 | ||
2021 | /** | 2060 | /** |
2022 | * ring_buffer_read_start - start a non consuming read of the buffer | 2061 | * ring_buffer_read_start - start a non consuming read of the buffer |
@@ -2037,7 +2076,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | |||
2037 | struct ring_buffer_iter *iter; | 2076 | struct ring_buffer_iter *iter; |
2038 | unsigned long flags; | 2077 | unsigned long flags; |
2039 | 2078 | ||
2040 | if (!cpu_isset(cpu, buffer->cpumask)) | 2079 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
2041 | return NULL; | 2080 | return NULL; |
2042 | 2081 | ||
2043 | iter = kmalloc(sizeof(*iter), GFP_KERNEL); | 2082 | iter = kmalloc(sizeof(*iter), GFP_KERNEL); |
@@ -2059,6 +2098,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | |||
2059 | 2098 | ||
2060 | return iter; | 2099 | return iter; |
2061 | } | 2100 | } |
2101 | EXPORT_SYMBOL_GPL(ring_buffer_read_start); | ||
2062 | 2102 | ||
2063 | /** | 2103 | /** |
2064 | * ring_buffer_finish - finish reading the iterator of the buffer | 2104 | * ring_buffer_finish - finish reading the iterator of the buffer |
@@ -2075,6 +2115,7 @@ ring_buffer_read_finish(struct ring_buffer_iter *iter) | |||
2075 | atomic_dec(&cpu_buffer->record_disabled); | 2115 | atomic_dec(&cpu_buffer->record_disabled); |
2076 | kfree(iter); | 2116 | kfree(iter); |
2077 | } | 2117 | } |
2118 | EXPORT_SYMBOL_GPL(ring_buffer_read_finish); | ||
2078 | 2119 | ||
2079 | /** | 2120 | /** |
2080 | * ring_buffer_read - read the next item in the ring buffer by the iterator | 2121 | * ring_buffer_read - read the next item in the ring buffer by the iterator |
@@ -2101,6 +2142,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) | |||
2101 | 2142 | ||
2102 | return event; | 2143 | return event; |
2103 | } | 2144 | } |
2145 | EXPORT_SYMBOL_GPL(ring_buffer_read); | ||
2104 | 2146 | ||
2105 | /** | 2147 | /** |
2106 | * ring_buffer_size - return the size of the ring buffer (in bytes) | 2148 | * ring_buffer_size - return the size of the ring buffer (in bytes) |
@@ -2110,6 +2152,7 @@ unsigned long ring_buffer_size(struct ring_buffer *buffer) | |||
2110 | { | 2152 | { |
2111 | return BUF_PAGE_SIZE * buffer->pages; | 2153 | return BUF_PAGE_SIZE * buffer->pages; |
2112 | } | 2154 | } |
2155 | EXPORT_SYMBOL_GPL(ring_buffer_size); | ||
2113 | 2156 | ||
2114 | static void | 2157 | static void |
2115 | rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) | 2158 | rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) |
@@ -2143,7 +2186,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
2143 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | 2186 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; |
2144 | unsigned long flags; | 2187 | unsigned long flags; |
2145 | 2188 | ||
2146 | if (!cpu_isset(cpu, buffer->cpumask)) | 2189 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
2147 | return; | 2190 | return; |
2148 | 2191 | ||
2149 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 2192 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
@@ -2156,6 +2199,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
2156 | 2199 | ||
2157 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 2200 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
2158 | } | 2201 | } |
2202 | EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); | ||
2159 | 2203 | ||
2160 | /** | 2204 | /** |
2161 | * ring_buffer_reset - reset a ring buffer | 2205 | * ring_buffer_reset - reset a ring buffer |
@@ -2168,6 +2212,7 @@ void ring_buffer_reset(struct ring_buffer *buffer) | |||
2168 | for_each_buffer_cpu(buffer, cpu) | 2212 | for_each_buffer_cpu(buffer, cpu) |
2169 | ring_buffer_reset_cpu(buffer, cpu); | 2213 | ring_buffer_reset_cpu(buffer, cpu); |
2170 | } | 2214 | } |
2215 | EXPORT_SYMBOL_GPL(ring_buffer_reset); | ||
2171 | 2216 | ||
2172 | /** | 2217 | /** |
2173 | * rind_buffer_empty - is the ring buffer empty? | 2218 | * rind_buffer_empty - is the ring buffer empty? |
@@ -2186,6 +2231,7 @@ int ring_buffer_empty(struct ring_buffer *buffer) | |||
2186 | } | 2231 | } |
2187 | return 1; | 2232 | return 1; |
2188 | } | 2233 | } |
2234 | EXPORT_SYMBOL_GPL(ring_buffer_empty); | ||
2189 | 2235 | ||
2190 | /** | 2236 | /** |
2191 | * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? | 2237 | * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? |
@@ -2196,12 +2242,13 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) | |||
2196 | { | 2242 | { |
2197 | struct ring_buffer_per_cpu *cpu_buffer; | 2243 | struct ring_buffer_per_cpu *cpu_buffer; |
2198 | 2244 | ||
2199 | if (!cpu_isset(cpu, buffer->cpumask)) | 2245 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
2200 | return 1; | 2246 | return 1; |
2201 | 2247 | ||
2202 | cpu_buffer = buffer->buffers[cpu]; | 2248 | cpu_buffer = buffer->buffers[cpu]; |
2203 | return rb_per_cpu_empty(cpu_buffer); | 2249 | return rb_per_cpu_empty(cpu_buffer); |
2204 | } | 2250 | } |
2251 | EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); | ||
2205 | 2252 | ||
2206 | /** | 2253 | /** |
2207 | * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers | 2254 | * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers |
@@ -2219,8 +2266,8 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | |||
2219 | struct ring_buffer_per_cpu *cpu_buffer_a; | 2266 | struct ring_buffer_per_cpu *cpu_buffer_a; |
2220 | struct ring_buffer_per_cpu *cpu_buffer_b; | 2267 | struct ring_buffer_per_cpu *cpu_buffer_b; |
2221 | 2268 | ||
2222 | if (!cpu_isset(cpu, buffer_a->cpumask) || | 2269 | if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || |
2223 | !cpu_isset(cpu, buffer_b->cpumask)) | 2270 | !cpumask_test_cpu(cpu, buffer_b->cpumask)) |
2224 | return -EINVAL; | 2271 | return -EINVAL; |
2225 | 2272 | ||
2226 | /* At least make sure the two buffers are somewhat the same */ | 2273 | /* At least make sure the two buffers are somewhat the same */ |
@@ -2250,6 +2297,7 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | |||
2250 | 2297 | ||
2251 | return 0; | 2298 | return 0; |
2252 | } | 2299 | } |
2300 | EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); | ||
2253 | 2301 | ||
2254 | static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer, | 2302 | static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer, |
2255 | struct buffer_data_page *bpage) | 2303 | struct buffer_data_page *bpage) |