aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/trace/ring_buffer.c71
1 files changed, 54 insertions, 17 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 092fe0c8fdae..c8d2a66e1d1f 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -218,17 +218,12 @@ enum {
218 218
219static inline int rb_null_event(struct ring_buffer_event *event) 219static inline int rb_null_event(struct ring_buffer_event *event)
220{ 220{
221 return event->type_len == RINGBUF_TYPE_PADDING 221 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
222 && event->time_delta == 0;
223}
224
225static inline int rb_discarded_event(struct ring_buffer_event *event)
226{
227 return event->type_len == RINGBUF_TYPE_PADDING && event->time_delta;
228} 222}
229 223
230static void rb_event_set_padding(struct ring_buffer_event *event) 224static void rb_event_set_padding(struct ring_buffer_event *event)
231{ 225{
226 /* padding has a NULL time_delta */
232 event->type_len = RINGBUF_TYPE_PADDING; 227 event->type_len = RINGBUF_TYPE_PADDING;
233 event->time_delta = 0; 228 event->time_delta = 0;
234} 229}
@@ -1778,9 +1773,6 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
1778 event->type_len = RINGBUF_TYPE_PADDING; 1773 event->type_len = RINGBUF_TYPE_PADDING;
1779 /* time delta must be non zero */ 1774 /* time delta must be non zero */
1780 event->time_delta = 1; 1775 event->time_delta = 1;
1781 /* Account for this as an entry */
1782 local_inc(&tail_page->entries);
1783 local_inc(&cpu_buffer->entries);
1784 1776
1785 /* Set write to end of buffer */ 1777 /* Set write to end of buffer */
1786 length = (tail + length) - BUF_PAGE_SIZE; 1778 length = (tail + length) - BUF_PAGE_SIZE;
@@ -2269,18 +2261,23 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
2269} 2261}
2270EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); 2262EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
2271 2263
2272static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, 2264static void
2265rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2273 struct ring_buffer_event *event) 2266 struct ring_buffer_event *event)
2274{ 2267{
2275 local_inc(&cpu_buffer->entries);
2276
2277 /* 2268 /*
2278 * The event first in the commit queue updates the 2269 * The event first in the commit queue updates the
2279 * time stamp. 2270 * time stamp.
2280 */ 2271 */
2281 if (rb_event_is_commit(cpu_buffer, event)) 2272 if (rb_event_is_commit(cpu_buffer, event))
2282 cpu_buffer->write_stamp += event->time_delta; 2273 cpu_buffer->write_stamp += event->time_delta;
2274}
2283 2275
2276static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2277 struct ring_buffer_event *event)
2278{
2279 local_inc(&cpu_buffer->entries);
2280 rb_update_write_stamp(cpu_buffer, event);
2284 rb_end_commit(cpu_buffer); 2281 rb_end_commit(cpu_buffer);
2285} 2282}
2286 2283
@@ -2327,6 +2324,46 @@ static inline void rb_event_discard(struct ring_buffer_event *event)
2327 event->time_delta = 1; 2324 event->time_delta = 1;
2328} 2325}
2329 2326
2327/*
2328 * Decrement the entries to the page that an event is on.
2329 * The event does not even need to exist, only the pointer
2330 * to the page it is on. This may only be called before the commit
2331 * takes place.
2332 */
2333static inline void
2334rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2335 struct ring_buffer_event *event)
2336{
2337 unsigned long addr = (unsigned long)event;
2338 struct buffer_page *bpage = cpu_buffer->commit_page;
2339 struct buffer_page *start;
2340
2341 addr &= PAGE_MASK;
2342
2343 /* Do the likely case first */
2344 if (likely(bpage->page == (void *)addr)) {
2345 local_dec(&bpage->entries);
2346 return;
2347 }
2348
2349 /*
2350 * Because the commit page may be on the reader page we
2351 * start with the next page and check the end loop there.
2352 */
2353 rb_inc_page(cpu_buffer, &bpage);
2354 start = bpage;
2355 do {
2356 if (bpage->page == (void *)addr) {
2357 local_dec(&bpage->entries);
2358 return;
2359 }
2360 rb_inc_page(cpu_buffer, &bpage);
2361 } while (bpage != start);
2362
2363 /* commit not part of this buffer?? */
2364 RB_WARN_ON(cpu_buffer, 1);
2365}
2366
2330/** 2367/**
2331 * ring_buffer_commit_discard - discard an event that has not been committed 2368 * ring_buffer_commit_discard - discard an event that has not been committed
2332 * @buffer: the ring buffer 2369 * @buffer: the ring buffer
@@ -2365,14 +2402,15 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer,
2365 */ 2402 */
2366 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); 2403 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
2367 2404
2405 rb_decrement_entry(cpu_buffer, event);
2368 if (rb_try_to_discard(cpu_buffer, event)) 2406 if (rb_try_to_discard(cpu_buffer, event))
2369 goto out; 2407 goto out;
2370 2408
2371 /* 2409 /*
2372 * The commit is still visible by the reader, so we 2410 * The commit is still visible by the reader, so we
2373 * must increment entries. 2411 * must still update the timestamp.
2374 */ 2412 */
2375 local_inc(&cpu_buffer->entries); 2413 rb_update_write_stamp(cpu_buffer, event);
2376 out: 2414 out:
2377 rb_end_commit(cpu_buffer); 2415 rb_end_commit(cpu_buffer);
2378 2416
@@ -2884,8 +2922,7 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
2884 2922
2885 event = rb_reader_event(cpu_buffer); 2923 event = rb_reader_event(cpu_buffer);
2886 2924
2887 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX 2925 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
2888 || rb_discarded_event(event))
2889 cpu_buffer->read++; 2926 cpu_buffer->read++;
2890 2927
2891 rb_update_read_stamp(cpu_buffer, event); 2928 rb_update_read_stamp(cpu_buffer, event);