aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-12-30 20:31:25 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2008-12-30 20:31:25 -0500
commit526ea064f953fc5ad2fb905b537f490b9374a0f0 (patch)
treec4ff0cb65ce6442863c7c342f641a41f0995329a /kernel/trace
parentdb5e53fbf0abf5cadc83be57032242e5e7c6c394 (diff)
parentd69d59f49763e6bd047c591c6c1f84c8e13da931 (diff)
Merge branch 'oprofile-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'oprofile-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: oprofile: select RING_BUFFER ring_buffer: adding EXPORT_SYMBOLs oprofile: fix lost sample counter oprofile: remove nr_available_slots() oprofile: port to the new ring_buffer ring_buffer: add remaining cpu functions to ring_buffer.h oprofile: moving cpu_buffer_reset() to cpu_buffer.h oprofile: adding cpu_buffer_entries() oprofile: adding cpu_buffer_write_commit() oprofile: adding cpu buffer r/w access functions ftrace: remove unused function arg in trace_iterator_increment() ring_buffer: update description for ring_buffer_alloc() oprofile: set values to default when creating oprofilefs oprofile: implement switch/case in buffer_sync.c x86/oprofile: cleanup IBS init/exit functions in op_model_amd.c x86/oprofile: reordering IBS code in op_model_amd.c oprofile: fix typo oprofile: whitspace changes only oprofile: update comment for oprofile_add_sample() oprofile: comment cleanup
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ring_buffer.c36
-rw-r--r--kernel/trace/trace.c4
2 files changed, 37 insertions, 3 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 76f34c0ef29c..1d601a7c4587 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -69,6 +69,7 @@ void tracing_on(void)
69{ 69{
70 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); 70 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
71} 71}
72EXPORT_SYMBOL_GPL(tracing_on);
72 73
73/** 74/**
74 * tracing_off - turn off all tracing buffers 75 * tracing_off - turn off all tracing buffers
@@ -82,6 +83,7 @@ void tracing_off(void)
82{ 83{
83 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); 84 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
84} 85}
86EXPORT_SYMBOL_GPL(tracing_off);
85 87
86/** 88/**
87 * tracing_off_permanent - permanently disable ring buffers 89 * tracing_off_permanent - permanently disable ring buffers
@@ -111,12 +113,14 @@ u64 ring_buffer_time_stamp(int cpu)
111 113
112 return time; 114 return time;
113} 115}
116EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
114 117
115void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) 118void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
116{ 119{
117 /* Just stupid testing the normalize function and deltas */ 120 /* Just stupid testing the normalize function and deltas */
118 *ts >>= DEBUG_SHIFT; 121 *ts >>= DEBUG_SHIFT;
119} 122}
123EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
120 124
121#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event)) 125#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
122#define RB_ALIGNMENT_SHIFT 2 126#define RB_ALIGNMENT_SHIFT 2
@@ -166,6 +170,7 @@ unsigned ring_buffer_event_length(struct ring_buffer_event *event)
166{ 170{
167 return rb_event_length(event); 171 return rb_event_length(event);
168} 172}
173EXPORT_SYMBOL_GPL(ring_buffer_event_length);
169 174
170/* inline for ring buffer fast paths */ 175/* inline for ring buffer fast paths */
171static inline void * 176static inline void *
@@ -187,6 +192,7 @@ void *ring_buffer_event_data(struct ring_buffer_event *event)
187{ 192{
188 return rb_event_data(event); 193 return rb_event_data(event);
189} 194}
195EXPORT_SYMBOL_GPL(ring_buffer_event_data);
190 196
191#define for_each_buffer_cpu(buffer, cpu) \ 197#define for_each_buffer_cpu(buffer, cpu) \
192 for_each_cpu_mask(cpu, buffer->cpumask) 198 for_each_cpu_mask(cpu, buffer->cpumask)
@@ -427,7 +433,7 @@ extern int ring_buffer_page_too_big(void);
427 433
428/** 434/**
429 * ring_buffer_alloc - allocate a new ring_buffer 435 * ring_buffer_alloc - allocate a new ring_buffer
430 * @size: the size in bytes that is needed. 436 * @size: the size in bytes per cpu that is needed.
431 * @flags: attributes to set for the ring buffer. 437 * @flags: attributes to set for the ring buffer.
432 * 438 *
433 * Currently the only flag that is available is the RB_FL_OVERWRITE 439 * Currently the only flag that is available is the RB_FL_OVERWRITE
@@ -490,6 +496,7 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
490 kfree(buffer); 496 kfree(buffer);
491 return NULL; 497 return NULL;
492} 498}
499EXPORT_SYMBOL_GPL(ring_buffer_alloc);
493 500
494/** 501/**
495 * ring_buffer_free - free a ring buffer. 502 * ring_buffer_free - free a ring buffer.
@@ -505,6 +512,7 @@ ring_buffer_free(struct ring_buffer *buffer)
505 512
506 kfree(buffer); 513 kfree(buffer);
507} 514}
515EXPORT_SYMBOL_GPL(ring_buffer_free);
508 516
509static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); 517static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
510 518
@@ -680,6 +688,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
680 mutex_unlock(&buffer->mutex); 688 mutex_unlock(&buffer->mutex);
681 return -ENOMEM; 689 return -ENOMEM;
682} 690}
691EXPORT_SYMBOL_GPL(ring_buffer_resize);
683 692
684static inline int rb_null_event(struct ring_buffer_event *event) 693static inline int rb_null_event(struct ring_buffer_event *event)
685{ 694{
@@ -1304,6 +1313,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
1304 ftrace_preempt_enable(resched); 1313 ftrace_preempt_enable(resched);
1305 return NULL; 1314 return NULL;
1306} 1315}
1316EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
1307 1317
1308static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, 1318static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1309 struct ring_buffer_event *event) 1319 struct ring_buffer_event *event)
@@ -1350,6 +1360,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1350 1360
1351 return 0; 1361 return 0;
1352} 1362}
1363EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
1353 1364
1354/** 1365/**
1355 * ring_buffer_write - write data to the buffer without reserving 1366 * ring_buffer_write - write data to the buffer without reserving
@@ -1411,6 +1422,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
1411 1422
1412 return ret; 1423 return ret;
1413} 1424}
1425EXPORT_SYMBOL_GPL(ring_buffer_write);
1414 1426
1415static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) 1427static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1416{ 1428{
@@ -1437,6 +1449,7 @@ void ring_buffer_record_disable(struct ring_buffer *buffer)
1437{ 1449{
1438 atomic_inc(&buffer->record_disabled); 1450 atomic_inc(&buffer->record_disabled);
1439} 1451}
1452EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
1440 1453
1441/** 1454/**
1442 * ring_buffer_record_enable - enable writes to the buffer 1455 * ring_buffer_record_enable - enable writes to the buffer
@@ -1449,6 +1462,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer)
1449{ 1462{
1450 atomic_dec(&buffer->record_disabled); 1463 atomic_dec(&buffer->record_disabled);
1451} 1464}
1465EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
1452 1466
1453/** 1467/**
1454 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer 1468 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
@@ -1470,6 +1484,7 @@ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1470 cpu_buffer = buffer->buffers[cpu]; 1484 cpu_buffer = buffer->buffers[cpu];
1471 atomic_inc(&cpu_buffer->record_disabled); 1485 atomic_inc(&cpu_buffer->record_disabled);
1472} 1486}
1487EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
1473 1488
1474/** 1489/**
1475 * ring_buffer_record_enable_cpu - enable writes to the buffer 1490 * ring_buffer_record_enable_cpu - enable writes to the buffer
@@ -1489,6 +1504,7 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1489 cpu_buffer = buffer->buffers[cpu]; 1504 cpu_buffer = buffer->buffers[cpu];
1490 atomic_dec(&cpu_buffer->record_disabled); 1505 atomic_dec(&cpu_buffer->record_disabled);
1491} 1506}
1507EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
1492 1508
1493/** 1509/**
1494 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer 1510 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
@@ -1505,6 +1521,7 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1505 cpu_buffer = buffer->buffers[cpu]; 1521 cpu_buffer = buffer->buffers[cpu];
1506 return cpu_buffer->entries; 1522 return cpu_buffer->entries;
1507} 1523}
1524EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
1508 1525
1509/** 1526/**
1510 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer 1527 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
@@ -1521,6 +1538,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1521 cpu_buffer = buffer->buffers[cpu]; 1538 cpu_buffer = buffer->buffers[cpu];
1522 return cpu_buffer->overrun; 1539 return cpu_buffer->overrun;
1523} 1540}
1541EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
1524 1542
1525/** 1543/**
1526 * ring_buffer_entries - get the number of entries in a buffer 1544 * ring_buffer_entries - get the number of entries in a buffer
@@ -1543,6 +1561,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1543 1561
1544 return entries; 1562 return entries;
1545} 1563}
1564EXPORT_SYMBOL_GPL(ring_buffer_entries);
1546 1565
1547/** 1566/**
1548 * ring_buffer_overrun_cpu - get the number of overruns in buffer 1567 * ring_buffer_overrun_cpu - get the number of overruns in buffer
@@ -1565,6 +1584,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1565 1584
1566 return overruns; 1585 return overruns;
1567} 1586}
1587EXPORT_SYMBOL_GPL(ring_buffer_overruns);
1568 1588
1569static void rb_iter_reset(struct ring_buffer_iter *iter) 1589static void rb_iter_reset(struct ring_buffer_iter *iter)
1570{ 1590{
@@ -1600,6 +1620,7 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1600 rb_iter_reset(iter); 1620 rb_iter_reset(iter);
1601 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 1621 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1602} 1622}
1623EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
1603 1624
1604/** 1625/**
1605 * ring_buffer_iter_empty - check if an iterator has no more to read 1626 * ring_buffer_iter_empty - check if an iterator has no more to read
@@ -1614,6 +1635,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1614 return iter->head_page == cpu_buffer->commit_page && 1635 return iter->head_page == cpu_buffer->commit_page &&
1615 iter->head == rb_commit_index(cpu_buffer); 1636 iter->head == rb_commit_index(cpu_buffer);
1616} 1637}
1638EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
1617 1639
1618static void 1640static void
1619rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, 1641rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
@@ -1880,6 +1902,7 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1880 1902
1881 return NULL; 1903 return NULL;
1882} 1904}
1905EXPORT_SYMBOL_GPL(ring_buffer_peek);
1883 1906
1884static struct ring_buffer_event * 1907static struct ring_buffer_event *
1885rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 1908rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
@@ -1940,6 +1963,7 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1940 1963
1941 return NULL; 1964 return NULL;
1942} 1965}
1966EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
1943 1967
1944/** 1968/**
1945 * ring_buffer_peek - peek at the next event to be read 1969 * ring_buffer_peek - peek at the next event to be read
@@ -2017,6 +2041,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2017 2041
2018 return event; 2042 return event;
2019} 2043}
2044EXPORT_SYMBOL_GPL(ring_buffer_consume);
2020 2045
2021/** 2046/**
2022 * ring_buffer_read_start - start a non consuming read of the buffer 2047 * ring_buffer_read_start - start a non consuming read of the buffer
@@ -2059,6 +2084,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2059 2084
2060 return iter; 2085 return iter;
2061} 2086}
2087EXPORT_SYMBOL_GPL(ring_buffer_read_start);
2062 2088
2063/** 2089/**
2064 * ring_buffer_finish - finish reading the iterator of the buffer 2090 * ring_buffer_finish - finish reading the iterator of the buffer
@@ -2075,6 +2101,7 @@ ring_buffer_read_finish(struct ring_buffer_iter *iter)
2075 atomic_dec(&cpu_buffer->record_disabled); 2101 atomic_dec(&cpu_buffer->record_disabled);
2076 kfree(iter); 2102 kfree(iter);
2077} 2103}
2104EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
2078 2105
2079/** 2106/**
2080 * ring_buffer_read - read the next item in the ring buffer by the iterator 2107 * ring_buffer_read - read the next item in the ring buffer by the iterator
@@ -2101,6 +2128,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
2101 2128
2102 return event; 2129 return event;
2103} 2130}
2131EXPORT_SYMBOL_GPL(ring_buffer_read);
2104 2132
2105/** 2133/**
2106 * ring_buffer_size - return the size of the ring buffer (in bytes) 2134 * ring_buffer_size - return the size of the ring buffer (in bytes)
@@ -2110,6 +2138,7 @@ unsigned long ring_buffer_size(struct ring_buffer *buffer)
2110{ 2138{
2111 return BUF_PAGE_SIZE * buffer->pages; 2139 return BUF_PAGE_SIZE * buffer->pages;
2112} 2140}
2141EXPORT_SYMBOL_GPL(ring_buffer_size);
2113 2142
2114static void 2143static void
2115rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) 2144rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
@@ -2156,6 +2185,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2156 2185
2157 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2186 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2158} 2187}
2188EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
2159 2189
2160/** 2190/**
2161 * ring_buffer_reset - reset a ring buffer 2191 * ring_buffer_reset - reset a ring buffer
@@ -2168,6 +2198,7 @@ void ring_buffer_reset(struct ring_buffer *buffer)
2168 for_each_buffer_cpu(buffer, cpu) 2198 for_each_buffer_cpu(buffer, cpu)
2169 ring_buffer_reset_cpu(buffer, cpu); 2199 ring_buffer_reset_cpu(buffer, cpu);
2170} 2200}
2201EXPORT_SYMBOL_GPL(ring_buffer_reset);
2171 2202
2172/** 2203/**
2173 * rind_buffer_empty - is the ring buffer empty? 2204 * rind_buffer_empty - is the ring buffer empty?
@@ -2186,6 +2217,7 @@ int ring_buffer_empty(struct ring_buffer *buffer)
2186 } 2217 }
2187 return 1; 2218 return 1;
2188} 2219}
2220EXPORT_SYMBOL_GPL(ring_buffer_empty);
2189 2221
2190/** 2222/**
2191 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? 2223 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
@@ -2202,6 +2234,7 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2202 cpu_buffer = buffer->buffers[cpu]; 2234 cpu_buffer = buffer->buffers[cpu];
2203 return rb_per_cpu_empty(cpu_buffer); 2235 return rb_per_cpu_empty(cpu_buffer);
2204} 2236}
2237EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
2205 2238
2206/** 2239/**
2207 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers 2240 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
@@ -2250,6 +2283,7 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2250 2283
2251 return 0; 2284 return 0;
2252} 2285}
2286EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
2253 2287
2254static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer, 2288static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
2255 struct buffer_data_page *bpage) 2289 struct buffer_data_page *bpage)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f4bb3800318b..3608f6cb2f7a 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1310,7 +1310,7 @@ enum trace_file_type {
1310 TRACE_FILE_ANNOTATE = 2, 1310 TRACE_FILE_ANNOTATE = 2,
1311}; 1311};
1312 1312
1313static void trace_iterator_increment(struct trace_iterator *iter, int cpu) 1313static void trace_iterator_increment(struct trace_iterator *iter)
1314{ 1314{
1315 /* Don't allow ftrace to trace into the ring buffers */ 1315 /* Don't allow ftrace to trace into the ring buffers */
1316 ftrace_disable_cpu(); 1316 ftrace_disable_cpu();
@@ -1389,7 +1389,7 @@ static void *find_next_entry_inc(struct trace_iterator *iter)
1389 iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); 1389 iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
1390 1390
1391 if (iter->ent) 1391 if (iter->ent)
1392 trace_iterator_increment(iter, iter->cpu); 1392 trace_iterator_increment(iter);
1393 1393
1394 return iter->ent ? iter : NULL; 1394 return iter->ent ? iter : NULL;
1395} 1395}