diff options
author | Robert Richter <robert.richter@amd.com> | 2008-12-11 10:49:22 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-12 00:54:55 -0500 |
commit | c4f50183f90fb1fd99aa5941f01b90cd1b882d2e (patch) | |
tree | 3ceafb906a0b940364ee45a8ac0bb1d0c3018f78 /kernel | |
parent | 211117ff09b7d81d91b7857651587128ed8b13d9 (diff) |
ring_buffer: adding EXPORT_SYMBOLs
I added EXPORT_SYMBOL_GPLs for all functions part of the API
(ring_buffer.h). This is required since oprofile is using the ring
buffer and the compilation as modules would fail otherwise.
Signed-off-by: Robert Richter <robert.richter@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/ring_buffer.c | 34 |
1 files changed, 34 insertions, 0 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index c8996d239e4c..30d57dd01a85 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -31,6 +31,7 @@ void tracing_on(void) | |||
31 | { | 31 | { |
32 | ring_buffers_off = 0; | 32 | ring_buffers_off = 0; |
33 | } | 33 | } |
34 | EXPORT_SYMBOL_GPL(tracing_on); | ||
34 | 35 | ||
35 | /** | 36 | /** |
36 | * tracing_off - turn off all tracing buffers | 37 | * tracing_off - turn off all tracing buffers |
@@ -44,6 +45,7 @@ void tracing_off(void) | |||
44 | { | 45 | { |
45 | ring_buffers_off = 1; | 46 | ring_buffers_off = 1; |
46 | } | 47 | } |
48 | EXPORT_SYMBOL_GPL(tracing_off); | ||
47 | 49 | ||
48 | /* Up this if you want to test the TIME_EXTENTS and normalization */ | 50 | /* Up this if you want to test the TIME_EXTENTS and normalization */ |
49 | #define DEBUG_SHIFT 0 | 51 | #define DEBUG_SHIFT 0 |
@@ -60,12 +62,14 @@ u64 ring_buffer_time_stamp(int cpu) | |||
60 | 62 | ||
61 | return time; | 63 | return time; |
62 | } | 64 | } |
65 | EXPORT_SYMBOL_GPL(ring_buffer_time_stamp); | ||
63 | 66 | ||
64 | void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) | 67 | void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) |
65 | { | 68 | { |
66 | /* Just stupid testing the normalize function and deltas */ | 69 | /* Just stupid testing the normalize function and deltas */ |
67 | *ts >>= DEBUG_SHIFT; | 70 | *ts >>= DEBUG_SHIFT; |
68 | } | 71 | } |
72 | EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); | ||
69 | 73 | ||
70 | #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event)) | 74 | #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event)) |
71 | #define RB_ALIGNMENT_SHIFT 2 | 75 | #define RB_ALIGNMENT_SHIFT 2 |
@@ -115,6 +119,7 @@ unsigned ring_buffer_event_length(struct ring_buffer_event *event) | |||
115 | { | 119 | { |
116 | return rb_event_length(event); | 120 | return rb_event_length(event); |
117 | } | 121 | } |
122 | EXPORT_SYMBOL_GPL(ring_buffer_event_length); | ||
118 | 123 | ||
119 | /* inline for ring buffer fast paths */ | 124 | /* inline for ring buffer fast paths */ |
120 | static inline void * | 125 | static inline void * |
@@ -136,6 +141,7 @@ void *ring_buffer_event_data(struct ring_buffer_event *event) | |||
136 | { | 141 | { |
137 | return rb_event_data(event); | 142 | return rb_event_data(event); |
138 | } | 143 | } |
144 | EXPORT_SYMBOL_GPL(ring_buffer_event_data); | ||
139 | 145 | ||
140 | #define for_each_buffer_cpu(buffer, cpu) \ | 146 | #define for_each_buffer_cpu(buffer, cpu) \ |
141 | for_each_cpu_mask(cpu, buffer->cpumask) | 147 | for_each_cpu_mask(cpu, buffer->cpumask) |
@@ -444,6 +450,7 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) | |||
444 | kfree(buffer); | 450 | kfree(buffer); |
445 | return NULL; | 451 | return NULL; |
446 | } | 452 | } |
453 | EXPORT_SYMBOL_GPL(ring_buffer_alloc); | ||
447 | 454 | ||
448 | /** | 455 | /** |
449 | * ring_buffer_free - free a ring buffer. | 456 | * ring_buffer_free - free a ring buffer. |
@@ -459,6 +466,7 @@ ring_buffer_free(struct ring_buffer *buffer) | |||
459 | 466 | ||
460 | kfree(buffer); | 467 | kfree(buffer); |
461 | } | 468 | } |
469 | EXPORT_SYMBOL_GPL(ring_buffer_free); | ||
462 | 470 | ||
463 | static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); | 471 | static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); |
464 | 472 | ||
@@ -620,6 +628,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
620 | mutex_unlock(&buffer->mutex); | 628 | mutex_unlock(&buffer->mutex); |
621 | return -ENOMEM; | 629 | return -ENOMEM; |
622 | } | 630 | } |
631 | EXPORT_SYMBOL_GPL(ring_buffer_resize); | ||
623 | 632 | ||
624 | static inline int rb_null_event(struct ring_buffer_event *event) | 633 | static inline int rb_null_event(struct ring_buffer_event *event) |
625 | { | 634 | { |
@@ -1220,6 +1229,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, | |||
1220 | preempt_enable_notrace(); | 1229 | preempt_enable_notrace(); |
1221 | return NULL; | 1230 | return NULL; |
1222 | } | 1231 | } |
1232 | EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); | ||
1223 | 1233 | ||
1224 | static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, | 1234 | static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, |
1225 | struct ring_buffer_event *event) | 1235 | struct ring_buffer_event *event) |
@@ -1269,6 +1279,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer, | |||
1269 | 1279 | ||
1270 | return 0; | 1280 | return 0; |
1271 | } | 1281 | } |
1282 | EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); | ||
1272 | 1283 | ||
1273 | /** | 1284 | /** |
1274 | * ring_buffer_write - write data to the buffer without reserving | 1285 | * ring_buffer_write - write data to the buffer without reserving |
@@ -1334,6 +1345,7 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
1334 | 1345 | ||
1335 | return ret; | 1346 | return ret; |
1336 | } | 1347 | } |
1348 | EXPORT_SYMBOL_GPL(ring_buffer_write); | ||
1337 | 1349 | ||
1338 | static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) | 1350 | static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) |
1339 | { | 1351 | { |
@@ -1360,6 +1372,7 @@ void ring_buffer_record_disable(struct ring_buffer *buffer) | |||
1360 | { | 1372 | { |
1361 | atomic_inc(&buffer->record_disabled); | 1373 | atomic_inc(&buffer->record_disabled); |
1362 | } | 1374 | } |
1375 | EXPORT_SYMBOL_GPL(ring_buffer_record_disable); | ||
1363 | 1376 | ||
1364 | /** | 1377 | /** |
1365 | * ring_buffer_record_enable - enable writes to the buffer | 1378 | * ring_buffer_record_enable - enable writes to the buffer |
@@ -1372,6 +1385,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer) | |||
1372 | { | 1385 | { |
1373 | atomic_dec(&buffer->record_disabled); | 1386 | atomic_dec(&buffer->record_disabled); |
1374 | } | 1387 | } |
1388 | EXPORT_SYMBOL_GPL(ring_buffer_record_enable); | ||
1375 | 1389 | ||
1376 | /** | 1390 | /** |
1377 | * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer | 1391 | * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer |
@@ -1393,6 +1407,7 @@ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) | |||
1393 | cpu_buffer = buffer->buffers[cpu]; | 1407 | cpu_buffer = buffer->buffers[cpu]; |
1394 | atomic_inc(&cpu_buffer->record_disabled); | 1408 | atomic_inc(&cpu_buffer->record_disabled); |
1395 | } | 1409 | } |
1410 | EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); | ||
1396 | 1411 | ||
1397 | /** | 1412 | /** |
1398 | * ring_buffer_record_enable_cpu - enable writes to the buffer | 1413 | * ring_buffer_record_enable_cpu - enable writes to the buffer |
@@ -1412,6 +1427,7 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) | |||
1412 | cpu_buffer = buffer->buffers[cpu]; | 1427 | cpu_buffer = buffer->buffers[cpu]; |
1413 | atomic_dec(&cpu_buffer->record_disabled); | 1428 | atomic_dec(&cpu_buffer->record_disabled); |
1414 | } | 1429 | } |
1430 | EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); | ||
1415 | 1431 | ||
1416 | /** | 1432 | /** |
1417 | * ring_buffer_entries_cpu - get the number of entries in a cpu buffer | 1433 | * ring_buffer_entries_cpu - get the number of entries in a cpu buffer |
@@ -1428,6 +1444,7 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) | |||
1428 | cpu_buffer = buffer->buffers[cpu]; | 1444 | cpu_buffer = buffer->buffers[cpu]; |
1429 | return cpu_buffer->entries; | 1445 | return cpu_buffer->entries; |
1430 | } | 1446 | } |
1447 | EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); | ||
1431 | 1448 | ||
1432 | /** | 1449 | /** |
1433 | * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer | 1450 | * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer |
@@ -1444,6 +1461,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) | |||
1444 | cpu_buffer = buffer->buffers[cpu]; | 1461 | cpu_buffer = buffer->buffers[cpu]; |
1445 | return cpu_buffer->overrun; | 1462 | return cpu_buffer->overrun; |
1446 | } | 1463 | } |
1464 | EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); | ||
1447 | 1465 | ||
1448 | /** | 1466 | /** |
1449 | * ring_buffer_entries - get the number of entries in a buffer | 1467 | * ring_buffer_entries - get the number of entries in a buffer |
@@ -1466,6 +1484,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer) | |||
1466 | 1484 | ||
1467 | return entries; | 1485 | return entries; |
1468 | } | 1486 | } |
1487 | EXPORT_SYMBOL_GPL(ring_buffer_entries); | ||
1469 | 1488 | ||
1470 | /** | 1489 | /** |
1471 | * ring_buffer_overrun_cpu - get the number of overruns in buffer | 1490 | * ring_buffer_overrun_cpu - get the number of overruns in buffer |
@@ -1488,6 +1507,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer) | |||
1488 | 1507 | ||
1489 | return overruns; | 1508 | return overruns; |
1490 | } | 1509 | } |
1510 | EXPORT_SYMBOL_GPL(ring_buffer_overruns); | ||
1491 | 1511 | ||
1492 | /** | 1512 | /** |
1493 | * ring_buffer_iter_reset - reset an iterator | 1513 | * ring_buffer_iter_reset - reset an iterator |
@@ -1513,6 +1533,7 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter) | |||
1513 | else | 1533 | else |
1514 | iter->read_stamp = iter->head_page->time_stamp; | 1534 | iter->read_stamp = iter->head_page->time_stamp; |
1515 | } | 1535 | } |
1536 | EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); | ||
1516 | 1537 | ||
1517 | /** | 1538 | /** |
1518 | * ring_buffer_iter_empty - check if an iterator has no more to read | 1539 | * ring_buffer_iter_empty - check if an iterator has no more to read |
@@ -1527,6 +1548,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter) | |||
1527 | return iter->head_page == cpu_buffer->commit_page && | 1548 | return iter->head_page == cpu_buffer->commit_page && |
1528 | iter->head == rb_commit_index(cpu_buffer); | 1549 | iter->head == rb_commit_index(cpu_buffer); |
1529 | } | 1550 | } |
1551 | EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); | ||
1530 | 1552 | ||
1531 | static void | 1553 | static void |
1532 | rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, | 1554 | rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, |
@@ -1797,6 +1819,7 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
1797 | 1819 | ||
1798 | return NULL; | 1820 | return NULL; |
1799 | } | 1821 | } |
1822 | EXPORT_SYMBOL_GPL(ring_buffer_peek); | ||
1800 | 1823 | ||
1801 | /** | 1824 | /** |
1802 | * ring_buffer_iter_peek - peek at the next event to be read | 1825 | * ring_buffer_iter_peek - peek at the next event to be read |
@@ -1867,6 +1890,7 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
1867 | 1890 | ||
1868 | return NULL; | 1891 | return NULL; |
1869 | } | 1892 | } |
1893 | EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); | ||
1870 | 1894 | ||
1871 | /** | 1895 | /** |
1872 | * ring_buffer_consume - return an event and consume it | 1896 | * ring_buffer_consume - return an event and consume it |
@@ -1894,6 +1918,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
1894 | 1918 | ||
1895 | return event; | 1919 | return event; |
1896 | } | 1920 | } |
1921 | EXPORT_SYMBOL_GPL(ring_buffer_consume); | ||
1897 | 1922 | ||
1898 | /** | 1923 | /** |
1899 | * ring_buffer_read_start - start a non consuming read of the buffer | 1924 | * ring_buffer_read_start - start a non consuming read of the buffer |
@@ -1934,6 +1959,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | |||
1934 | 1959 | ||
1935 | return iter; | 1960 | return iter; |
1936 | } | 1961 | } |
1962 | EXPORT_SYMBOL_GPL(ring_buffer_read_start); | ||
1937 | 1963 | ||
1938 | /** | 1964 | /** |
1939 | * ring_buffer_finish - finish reading the iterator of the buffer | 1965 | * ring_buffer_finish - finish reading the iterator of the buffer |
@@ -1950,6 +1976,7 @@ ring_buffer_read_finish(struct ring_buffer_iter *iter) | |||
1950 | atomic_dec(&cpu_buffer->record_disabled); | 1976 | atomic_dec(&cpu_buffer->record_disabled); |
1951 | kfree(iter); | 1977 | kfree(iter); |
1952 | } | 1978 | } |
1979 | EXPORT_SYMBOL_GPL(ring_buffer_read_finish); | ||
1953 | 1980 | ||
1954 | /** | 1981 | /** |
1955 | * ring_buffer_read - read the next item in the ring buffer by the iterator | 1982 | * ring_buffer_read - read the next item in the ring buffer by the iterator |
@@ -1971,6 +1998,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) | |||
1971 | 1998 | ||
1972 | return event; | 1999 | return event; |
1973 | } | 2000 | } |
2001 | EXPORT_SYMBOL_GPL(ring_buffer_read); | ||
1974 | 2002 | ||
1975 | /** | 2003 | /** |
1976 | * ring_buffer_size - return the size of the ring buffer (in bytes) | 2004 | * ring_buffer_size - return the size of the ring buffer (in bytes) |
@@ -1980,6 +2008,7 @@ unsigned long ring_buffer_size(struct ring_buffer *buffer) | |||
1980 | { | 2008 | { |
1981 | return BUF_PAGE_SIZE * buffer->pages; | 2009 | return BUF_PAGE_SIZE * buffer->pages; |
1982 | } | 2010 | } |
2011 | EXPORT_SYMBOL_GPL(ring_buffer_size); | ||
1983 | 2012 | ||
1984 | static void | 2013 | static void |
1985 | rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) | 2014 | rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) |
@@ -2022,6 +2051,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
2022 | 2051 | ||
2023 | spin_unlock_irqrestore(&cpu_buffer->lock, flags); | 2052 | spin_unlock_irqrestore(&cpu_buffer->lock, flags); |
2024 | } | 2053 | } |
2054 | EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); | ||
2025 | 2055 | ||
2026 | /** | 2056 | /** |
2027 | * ring_buffer_reset - reset a ring buffer | 2057 | * ring_buffer_reset - reset a ring buffer |
@@ -2034,6 +2064,7 @@ void ring_buffer_reset(struct ring_buffer *buffer) | |||
2034 | for_each_buffer_cpu(buffer, cpu) | 2064 | for_each_buffer_cpu(buffer, cpu) |
2035 | ring_buffer_reset_cpu(buffer, cpu); | 2065 | ring_buffer_reset_cpu(buffer, cpu); |
2036 | } | 2066 | } |
2067 | EXPORT_SYMBOL_GPL(ring_buffer_reset); | ||
2037 | 2068 | ||
2038 | /** | 2069 | /** |
2039 | * rind_buffer_empty - is the ring buffer empty? | 2070 | * rind_buffer_empty - is the ring buffer empty? |
@@ -2052,6 +2083,7 @@ int ring_buffer_empty(struct ring_buffer *buffer) | |||
2052 | } | 2083 | } |
2053 | return 1; | 2084 | return 1; |
2054 | } | 2085 | } |
2086 | EXPORT_SYMBOL_GPL(ring_buffer_empty); | ||
2055 | 2087 | ||
2056 | /** | 2088 | /** |
2057 | * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? | 2089 | * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? |
@@ -2068,6 +2100,7 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) | |||
2068 | cpu_buffer = buffer->buffers[cpu]; | 2100 | cpu_buffer = buffer->buffers[cpu]; |
2069 | return rb_per_cpu_empty(cpu_buffer); | 2101 | return rb_per_cpu_empty(cpu_buffer); |
2070 | } | 2102 | } |
2103 | EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); | ||
2071 | 2104 | ||
2072 | /** | 2105 | /** |
2073 | * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers | 2106 | * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers |
@@ -2117,6 +2150,7 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | |||
2117 | 2150 | ||
2118 | return 0; | 2151 | return 0; |
2119 | } | 2152 | } |
2153 | EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); | ||
2120 | 2154 | ||
2121 | static ssize_t | 2155 | static ssize_t |
2122 | rb_simple_read(struct file *filp, char __user *ubuf, | 2156 | rb_simple_read(struct file *filp, char __user *ubuf, |