diff options
author | Robert Richter <robert.richter@amd.com> | 2009-01-08 08:27:34 -0500 |
---|---|---|
committer | Robert Richter <robert.richter@amd.com> | 2009-01-08 08:27:34 -0500 |
commit | d2852b932f0bb5e89177aa27c7bcf07f4167e129 (patch) | |
tree | 96d975ae155ba307acd07968939fff22afedddf7 /kernel | |
parent | 4a6908a3a050aacc9c3a2f36b276b46c0629ad91 (diff) | |
parent | 14f0ca8eaea42a5b5a69cfcb699665dd2618db5f (diff) |
Merge branch 'oprofile/ring_buffer' into oprofile/oprofile-for-tip
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/ring_buffer.c | 44 | ||||
-rw-r--r-- | kernel/trace/trace.c | 4 |
2 files changed, 44 insertions, 4 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 668bbb5ef2bd..d42b882dfe4b 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -31,6 +31,7 @@ void tracing_on(void) | |||
31 | { | 31 | { |
32 | ring_buffers_off = 0; | 32 | ring_buffers_off = 0; |
33 | } | 33 | } |
34 | EXPORT_SYMBOL_GPL(tracing_on); | ||
34 | 35 | ||
35 | /** | 36 | /** |
36 | * tracing_off - turn off all tracing buffers | 37 | * tracing_off - turn off all tracing buffers |
@@ -44,6 +45,7 @@ void tracing_off(void) | |||
44 | { | 45 | { |
45 | ring_buffers_off = 1; | 46 | ring_buffers_off = 1; |
46 | } | 47 | } |
48 | EXPORT_SYMBOL_GPL(tracing_off); | ||
47 | 49 | ||
48 | /* Up this if you want to test the TIME_EXTENTS and normalization */ | 50 | /* Up this if you want to test the TIME_EXTENTS and normalization */ |
49 | #define DEBUG_SHIFT 0 | 51 | #define DEBUG_SHIFT 0 |
@@ -60,12 +62,14 @@ u64 ring_buffer_time_stamp(int cpu) | |||
60 | 62 | ||
61 | return time; | 63 | return time; |
62 | } | 64 | } |
65 | EXPORT_SYMBOL_GPL(ring_buffer_time_stamp); | ||
63 | 66 | ||
64 | void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) | 67 | void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) |
65 | { | 68 | { |
66 | /* Just stupid testing the normalize function and deltas */ | 69 | /* Just stupid testing the normalize function and deltas */ |
67 | *ts >>= DEBUG_SHIFT; | 70 | *ts >>= DEBUG_SHIFT; |
68 | } | 71 | } |
72 | EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); | ||
69 | 73 | ||
70 | #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event)) | 74 | #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event)) |
71 | #define RB_ALIGNMENT_SHIFT 2 | 75 | #define RB_ALIGNMENT_SHIFT 2 |
@@ -113,8 +117,15 @@ rb_event_length(struct ring_buffer_event *event) | |||
113 | */ | 117 | */ |
114 | unsigned ring_buffer_event_length(struct ring_buffer_event *event) | 118 | unsigned ring_buffer_event_length(struct ring_buffer_event *event) |
115 | { | 119 | { |
116 | return rb_event_length(event); | 120 | unsigned length = rb_event_length(event); |
121 | if (event->type != RINGBUF_TYPE_DATA) | ||
122 | return length; | ||
123 | length -= RB_EVNT_HDR_SIZE; | ||
124 | if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) | ||
125 | length -= sizeof(event->array[0]); | ||
126 | return length; | ||
117 | } | 127 | } |
128 | EXPORT_SYMBOL_GPL(ring_buffer_event_length); | ||
118 | 129 | ||
119 | /* inline for ring buffer fast paths */ | 130 | /* inline for ring buffer fast paths */ |
120 | static inline void * | 131 | static inline void * |
@@ -136,6 +147,7 @@ void *ring_buffer_event_data(struct ring_buffer_event *event) | |||
136 | { | 147 | { |
137 | return rb_event_data(event); | 148 | return rb_event_data(event); |
138 | } | 149 | } |
150 | EXPORT_SYMBOL_GPL(ring_buffer_event_data); | ||
139 | 151 | ||
140 | #define for_each_buffer_cpu(buffer, cpu) \ | 152 | #define for_each_buffer_cpu(buffer, cpu) \ |
141 | for_each_cpu_mask(cpu, buffer->cpumask) | 153 | for_each_cpu_mask(cpu, buffer->cpumask) |
@@ -381,7 +393,7 @@ extern int ring_buffer_page_too_big(void); | |||
381 | 393 | ||
382 | /** | 394 | /** |
383 | * ring_buffer_alloc - allocate a new ring_buffer | 395 | * ring_buffer_alloc - allocate a new ring_buffer |
384 | * @size: the size in bytes that is needed. | 396 | * @size: the size in bytes per cpu that is needed. |
385 | * @flags: attributes to set for the ring buffer. | 397 | * @flags: attributes to set for the ring buffer. |
386 | * | 398 | * |
387 | * Currently the only flag that is available is the RB_FL_OVERWRITE | 399 | * Currently the only flag that is available is the RB_FL_OVERWRITE |
@@ -444,6 +456,7 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) | |||
444 | kfree(buffer); | 456 | kfree(buffer); |
445 | return NULL; | 457 | return NULL; |
446 | } | 458 | } |
459 | EXPORT_SYMBOL_GPL(ring_buffer_alloc); | ||
447 | 460 | ||
448 | /** | 461 | /** |
449 | * ring_buffer_free - free a ring buffer. | 462 | * ring_buffer_free - free a ring buffer. |
@@ -459,6 +472,7 @@ ring_buffer_free(struct ring_buffer *buffer) | |||
459 | 472 | ||
460 | kfree(buffer); | 473 | kfree(buffer); |
461 | } | 474 | } |
475 | EXPORT_SYMBOL_GPL(ring_buffer_free); | ||
462 | 476 | ||
463 | static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); | 477 | static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); |
464 | 478 | ||
@@ -620,6 +634,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
620 | mutex_unlock(&buffer->mutex); | 634 | mutex_unlock(&buffer->mutex); |
621 | return -ENOMEM; | 635 | return -ENOMEM; |
622 | } | 636 | } |
637 | EXPORT_SYMBOL_GPL(ring_buffer_resize); | ||
623 | 638 | ||
624 | static inline int rb_null_event(struct ring_buffer_event *event) | 639 | static inline int rb_null_event(struct ring_buffer_event *event) |
625 | { | 640 | { |
@@ -1220,6 +1235,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, | |||
1220 | preempt_enable_notrace(); | 1235 | preempt_enable_notrace(); |
1221 | return NULL; | 1236 | return NULL; |
1222 | } | 1237 | } |
1238 | EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); | ||
1223 | 1239 | ||
1224 | static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, | 1240 | static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, |
1225 | struct ring_buffer_event *event) | 1241 | struct ring_buffer_event *event) |
@@ -1269,6 +1285,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer, | |||
1269 | 1285 | ||
1270 | return 0; | 1286 | return 0; |
1271 | } | 1287 | } |
1288 | EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); | ||
1272 | 1289 | ||
1273 | /** | 1290 | /** |
1274 | * ring_buffer_write - write data to the buffer without reserving | 1291 | * ring_buffer_write - write data to the buffer without reserving |
@@ -1334,6 +1351,7 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
1334 | 1351 | ||
1335 | return ret; | 1352 | return ret; |
1336 | } | 1353 | } |
1354 | EXPORT_SYMBOL_GPL(ring_buffer_write); | ||
1337 | 1355 | ||
1338 | static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) | 1356 | static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) |
1339 | { | 1357 | { |
@@ -1360,6 +1378,7 @@ void ring_buffer_record_disable(struct ring_buffer *buffer) | |||
1360 | { | 1378 | { |
1361 | atomic_inc(&buffer->record_disabled); | 1379 | atomic_inc(&buffer->record_disabled); |
1362 | } | 1380 | } |
1381 | EXPORT_SYMBOL_GPL(ring_buffer_record_disable); | ||
1363 | 1382 | ||
1364 | /** | 1383 | /** |
1365 | * ring_buffer_record_enable - enable writes to the buffer | 1384 | * ring_buffer_record_enable - enable writes to the buffer |
@@ -1372,6 +1391,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer) | |||
1372 | { | 1391 | { |
1373 | atomic_dec(&buffer->record_disabled); | 1392 | atomic_dec(&buffer->record_disabled); |
1374 | } | 1393 | } |
1394 | EXPORT_SYMBOL_GPL(ring_buffer_record_enable); | ||
1375 | 1395 | ||
1376 | /** | 1396 | /** |
1377 | * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer | 1397 | * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer |
@@ -1393,6 +1413,7 @@ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) | |||
1393 | cpu_buffer = buffer->buffers[cpu]; | 1413 | cpu_buffer = buffer->buffers[cpu]; |
1394 | atomic_inc(&cpu_buffer->record_disabled); | 1414 | atomic_inc(&cpu_buffer->record_disabled); |
1395 | } | 1415 | } |
1416 | EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); | ||
1396 | 1417 | ||
1397 | /** | 1418 | /** |
1398 | * ring_buffer_record_enable_cpu - enable writes to the buffer | 1419 | * ring_buffer_record_enable_cpu - enable writes to the buffer |
@@ -1412,6 +1433,7 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) | |||
1412 | cpu_buffer = buffer->buffers[cpu]; | 1433 | cpu_buffer = buffer->buffers[cpu]; |
1413 | atomic_dec(&cpu_buffer->record_disabled); | 1434 | atomic_dec(&cpu_buffer->record_disabled); |
1414 | } | 1435 | } |
1436 | EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); | ||
1415 | 1437 | ||
1416 | /** | 1438 | /** |
1417 | * ring_buffer_entries_cpu - get the number of entries in a cpu buffer | 1439 | * ring_buffer_entries_cpu - get the number of entries in a cpu buffer |
@@ -1428,6 +1450,7 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) | |||
1428 | cpu_buffer = buffer->buffers[cpu]; | 1450 | cpu_buffer = buffer->buffers[cpu]; |
1429 | return cpu_buffer->entries; | 1451 | return cpu_buffer->entries; |
1430 | } | 1452 | } |
1453 | EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); | ||
1431 | 1454 | ||
1432 | /** | 1455 | /** |
1433 | * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer | 1456 | * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer |
@@ -1444,6 +1467,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) | |||
1444 | cpu_buffer = buffer->buffers[cpu]; | 1467 | cpu_buffer = buffer->buffers[cpu]; |
1445 | return cpu_buffer->overrun; | 1468 | return cpu_buffer->overrun; |
1446 | } | 1469 | } |
1470 | EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); | ||
1447 | 1471 | ||
1448 | /** | 1472 | /** |
1449 | * ring_buffer_entries - get the number of entries in a buffer | 1473 | * ring_buffer_entries - get the number of entries in a buffer |
@@ -1466,6 +1490,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer) | |||
1466 | 1490 | ||
1467 | return entries; | 1491 | return entries; |
1468 | } | 1492 | } |
1493 | EXPORT_SYMBOL_GPL(ring_buffer_entries); | ||
1469 | 1494 | ||
1470 | /** | 1495 | /** |
1471 | * ring_buffer_overrun_cpu - get the number of overruns in buffer | 1496 | * ring_buffer_overrun_cpu - get the number of overruns in buffer |
@@ -1488,6 +1513,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer) | |||
1488 | 1513 | ||
1489 | return overruns; | 1514 | return overruns; |
1490 | } | 1515 | } |
1516 | EXPORT_SYMBOL_GPL(ring_buffer_overruns); | ||
1491 | 1517 | ||
1492 | /** | 1518 | /** |
1493 | * ring_buffer_iter_reset - reset an iterator | 1519 | * ring_buffer_iter_reset - reset an iterator |
@@ -1513,6 +1539,7 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter) | |||
1513 | else | 1539 | else |
1514 | iter->read_stamp = iter->head_page->time_stamp; | 1540 | iter->read_stamp = iter->head_page->time_stamp; |
1515 | } | 1541 | } |
1542 | EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); | ||
1516 | 1543 | ||
1517 | /** | 1544 | /** |
1518 | * ring_buffer_iter_empty - check if an iterator has no more to read | 1545 | * ring_buffer_iter_empty - check if an iterator has no more to read |
@@ -1527,6 +1554,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter) | |||
1527 | return iter->head_page == cpu_buffer->commit_page && | 1554 | return iter->head_page == cpu_buffer->commit_page && |
1528 | iter->head == rb_commit_index(cpu_buffer); | 1555 | iter->head == rb_commit_index(cpu_buffer); |
1529 | } | 1556 | } |
1557 | EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); | ||
1530 | 1558 | ||
1531 | static void | 1559 | static void |
1532 | rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, | 1560 | rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, |
@@ -1797,6 +1825,7 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
1797 | 1825 | ||
1798 | return NULL; | 1826 | return NULL; |
1799 | } | 1827 | } |
1828 | EXPORT_SYMBOL_GPL(ring_buffer_peek); | ||
1800 | 1829 | ||
1801 | /** | 1830 | /** |
1802 | * ring_buffer_iter_peek - peek at the next event to be read | 1831 | * ring_buffer_iter_peek - peek at the next event to be read |
@@ -1867,6 +1896,7 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
1867 | 1896 | ||
1868 | return NULL; | 1897 | return NULL; |
1869 | } | 1898 | } |
1899 | EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); | ||
1870 | 1900 | ||
1871 | /** | 1901 | /** |
1872 | * ring_buffer_consume - return an event and consume it | 1902 | * ring_buffer_consume - return an event and consume it |
@@ -1894,6 +1924,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
1894 | 1924 | ||
1895 | return event; | 1925 | return event; |
1896 | } | 1926 | } |
1927 | EXPORT_SYMBOL_GPL(ring_buffer_consume); | ||
1897 | 1928 | ||
1898 | /** | 1929 | /** |
1899 | * ring_buffer_read_start - start a non consuming read of the buffer | 1930 | * ring_buffer_read_start - start a non consuming read of the buffer |
@@ -1934,6 +1965,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | |||
1934 | 1965 | ||
1935 | return iter; | 1966 | return iter; |
1936 | } | 1967 | } |
1968 | EXPORT_SYMBOL_GPL(ring_buffer_read_start); | ||
1937 | 1969 | ||
1938 | /** | 1970 | /** |
1939 | * ring_buffer_finish - finish reading the iterator of the buffer | 1971 | * ring_buffer_finish - finish reading the iterator of the buffer |
@@ -1950,6 +1982,7 @@ ring_buffer_read_finish(struct ring_buffer_iter *iter) | |||
1950 | atomic_dec(&cpu_buffer->record_disabled); | 1982 | atomic_dec(&cpu_buffer->record_disabled); |
1951 | kfree(iter); | 1983 | kfree(iter); |
1952 | } | 1984 | } |
1985 | EXPORT_SYMBOL_GPL(ring_buffer_read_finish); | ||
1953 | 1986 | ||
1954 | /** | 1987 | /** |
1955 | * ring_buffer_read - read the next item in the ring buffer by the iterator | 1988 | * ring_buffer_read - read the next item in the ring buffer by the iterator |
@@ -1971,6 +2004,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) | |||
1971 | 2004 | ||
1972 | return event; | 2005 | return event; |
1973 | } | 2006 | } |
2007 | EXPORT_SYMBOL_GPL(ring_buffer_read); | ||
1974 | 2008 | ||
1975 | /** | 2009 | /** |
1976 | * ring_buffer_size - return the size of the ring buffer (in bytes) | 2010 | * ring_buffer_size - return the size of the ring buffer (in bytes) |
@@ -1980,6 +2014,7 @@ unsigned long ring_buffer_size(struct ring_buffer *buffer) | |||
1980 | { | 2014 | { |
1981 | return BUF_PAGE_SIZE * buffer->pages; | 2015 | return BUF_PAGE_SIZE * buffer->pages; |
1982 | } | 2016 | } |
2017 | EXPORT_SYMBOL_GPL(ring_buffer_size); | ||
1983 | 2018 | ||
1984 | static void | 2019 | static void |
1985 | rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) | 2020 | rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) |
@@ -2022,6 +2057,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
2022 | 2057 | ||
2023 | spin_unlock_irqrestore(&cpu_buffer->lock, flags); | 2058 | spin_unlock_irqrestore(&cpu_buffer->lock, flags); |
2024 | } | 2059 | } |
2060 | EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); | ||
2025 | 2061 | ||
2026 | /** | 2062 | /** |
2027 | * ring_buffer_reset - reset a ring buffer | 2063 | * ring_buffer_reset - reset a ring buffer |
@@ -2034,6 +2070,7 @@ void ring_buffer_reset(struct ring_buffer *buffer) | |||
2034 | for_each_buffer_cpu(buffer, cpu) | 2070 | for_each_buffer_cpu(buffer, cpu) |
2035 | ring_buffer_reset_cpu(buffer, cpu); | 2071 | ring_buffer_reset_cpu(buffer, cpu); |
2036 | } | 2072 | } |
2073 | EXPORT_SYMBOL_GPL(ring_buffer_reset); | ||
2037 | 2074 | ||
2038 | /** | 2075 | /** |
2039 | * rind_buffer_empty - is the ring buffer empty? | 2076 | * rind_buffer_empty - is the ring buffer empty? |
@@ -2052,6 +2089,7 @@ int ring_buffer_empty(struct ring_buffer *buffer) | |||
2052 | } | 2089 | } |
2053 | return 1; | 2090 | return 1; |
2054 | } | 2091 | } |
2092 | EXPORT_SYMBOL_GPL(ring_buffer_empty); | ||
2055 | 2093 | ||
2056 | /** | 2094 | /** |
2057 | * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? | 2095 | * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? |
@@ -2068,6 +2106,7 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) | |||
2068 | cpu_buffer = buffer->buffers[cpu]; | 2106 | cpu_buffer = buffer->buffers[cpu]; |
2069 | return rb_per_cpu_empty(cpu_buffer); | 2107 | return rb_per_cpu_empty(cpu_buffer); |
2070 | } | 2108 | } |
2109 | EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); | ||
2071 | 2110 | ||
2072 | /** | 2111 | /** |
2073 | * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers | 2112 | * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers |
@@ -2117,6 +2156,7 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | |||
2117 | 2156 | ||
2118 | return 0; | 2157 | return 0; |
2119 | } | 2158 | } |
2159 | EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); | ||
2120 | 2160 | ||
2121 | static ssize_t | 2161 | static ssize_t |
2122 | rb_simple_read(struct file *filp, char __user *ubuf, | 2162 | rb_simple_read(struct file *filp, char __user *ubuf, |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d86e3252f300..a96b335fe75c 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -914,7 +914,7 @@ enum trace_file_type { | |||
914 | TRACE_FILE_LAT_FMT = 1, | 914 | TRACE_FILE_LAT_FMT = 1, |
915 | }; | 915 | }; |
916 | 916 | ||
917 | static void trace_iterator_increment(struct trace_iterator *iter, int cpu) | 917 | static void trace_iterator_increment(struct trace_iterator *iter) |
918 | { | 918 | { |
919 | /* Don't allow ftrace to trace into the ring buffers */ | 919 | /* Don't allow ftrace to trace into the ring buffers */ |
920 | ftrace_disable_cpu(); | 920 | ftrace_disable_cpu(); |
@@ -993,7 +993,7 @@ static void *find_next_entry_inc(struct trace_iterator *iter) | |||
993 | iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); | 993 | iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); |
994 | 994 | ||
995 | if (iter->ent) | 995 | if (iter->ent) |
996 | trace_iterator_increment(iter, iter->cpu); | 996 | trace_iterator_increment(iter); |
997 | 997 | ||
998 | return iter->ent ? iter : NULL; | 998 | return iter->ent ? iter : NULL; |
999 | } | 999 | } |