aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-10-26 11:03:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-26 11:03:38 -0400
commit7115e3fcf45514db7525a05365b10454ff7f345e (patch)
tree17450e6337d559cc35dae6a7a73abab01ac63f00 /kernel/trace/ring_buffer.c
parent1f6e05171bb5cc32a4d6437ab2269fc21d169ca7 (diff)
parentc752d04066a36ae30b29795f3fa3f536292c1f8c (diff)
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (121 commits) perf symbols: Increase symbol KSYM_NAME_LEN size perf hists browser: Refuse 'a' hotkey on non symbolic views perf ui browser: Use libslang to read keys perf tools: Fix tracing info recording perf hists browser: Elide DSO column when it is set to just one DSO, ditto for threads perf hists: Don't consider filtered entries when calculating column widths perf hists: Don't decay total_period for filtered entries perf hists browser: Honour symbol_conf.show_{nr_samples,total_period} perf hists browser: Do not exit on tab key with single event perf annotate browser: Don't change selection line when returning from callq perf tools: handle endianness of feature bitmap perf tools: Add prelink suggestion to dso update message perf script: Fix unknown feature comment perf hists browser: Apply the dso and thread filters when merging new batches perf hists: Move the dso and thread filters from hist_browser perf ui browser: Honour the xterm colors perf top tui: Give color hints just on the percentage, like on --stdio perf ui browser: Make the colors configurable and change the defaults perf tui: Remove unneeded call to newtCls on startup perf hists: Don't format the percentage on hist_entry__snprintf ... Fix up conflicts in arch/x86/kernel/kprobes.c manually. Ingo's tree did the insane "add volatile to const array", which just doesn't make sense ("volatile const"?). But we could remove the const *and* make the array volatile to make doubly sure that gcc doesn't optimize it away.. Also fix up kernel/trace/ring_buffer.c non-data-conflicts manually: the reader_lock has been turned into a raw lock by the core locking merge, and there was a new user of it introduced in this perf core merge. Make sure that new use also uses the raw accessor functions.
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c70
1 files changed, 69 insertions, 1 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index f2f821acc59..f5b7b5c1195 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -488,12 +488,14 @@ struct ring_buffer_per_cpu {
488 struct buffer_page *reader_page; 488 struct buffer_page *reader_page;
489 unsigned long lost_events; 489 unsigned long lost_events;
490 unsigned long last_overrun; 490 unsigned long last_overrun;
491 local_t entries_bytes;
491 local_t commit_overrun; 492 local_t commit_overrun;
492 local_t overrun; 493 local_t overrun;
493 local_t entries; 494 local_t entries;
494 local_t committing; 495 local_t committing;
495 local_t commits; 496 local_t commits;
496 unsigned long read; 497 unsigned long read;
498 unsigned long read_bytes;
497 u64 write_stamp; 499 u64 write_stamp;
498 u64 read_stamp; 500 u64 read_stamp;
499}; 501};
@@ -1708,6 +1710,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
1708 * the counters. 1710 * the counters.
1709 */ 1711 */
1710 local_add(entries, &cpu_buffer->overrun); 1712 local_add(entries, &cpu_buffer->overrun);
1713 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1711 1714
1712 /* 1715 /*
1713 * The entries will be zeroed out when we move the 1716 * The entries will be zeroed out when we move the
@@ -1863,6 +1866,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
1863 event = __rb_page_index(tail_page, tail); 1866 event = __rb_page_index(tail_page, tail);
1864 kmemcheck_annotate_bitfield(event, bitfield); 1867 kmemcheck_annotate_bitfield(event, bitfield);
1865 1868
1869 /* account for padding bytes */
1870 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
1871
1866 /* 1872 /*
1867 * Save the original length to the meta data. 1873 * Save the original length to the meta data.
1868 * This will be used by the reader to add lost event 1874 * This will be used by the reader to add lost event
@@ -2054,6 +2060,9 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
2054 if (!tail) 2060 if (!tail)
2055 tail_page->page->time_stamp = ts; 2061 tail_page->page->time_stamp = ts;
2056 2062
2063 /* account for these added bytes */
2064 local_add(length, &cpu_buffer->entries_bytes);
2065
2057 return event; 2066 return event;
2058} 2067}
2059 2068
@@ -2076,6 +2085,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2076 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { 2085 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2077 unsigned long write_mask = 2086 unsigned long write_mask =
2078 local_read(&bpage->write) & ~RB_WRITE_MASK; 2087 local_read(&bpage->write) & ~RB_WRITE_MASK;
2088 unsigned long event_length = rb_event_length(event);
2079 /* 2089 /*
2080 * This is on the tail page. It is possible that 2090 * This is on the tail page. It is possible that
2081 * a write could come in and move the tail page 2091 * a write could come in and move the tail page
@@ -2085,8 +2095,11 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2085 old_index += write_mask; 2095 old_index += write_mask;
2086 new_index += write_mask; 2096 new_index += write_mask;
2087 index = local_cmpxchg(&bpage->write, old_index, new_index); 2097 index = local_cmpxchg(&bpage->write, old_index, new_index);
2088 if (index == old_index) 2098 if (index == old_index) {
2099 /* update counters */
2100 local_sub(event_length, &cpu_buffer->entries_bytes);
2089 return 1; 2101 return 1;
2102 }
2090 } 2103 }
2091 2104
2092 /* could not discard */ 2105 /* could not discard */
@@ -2661,6 +2674,58 @@ rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
2661} 2674}
2662 2675
2663/** 2676/**
2677 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
2678 * @buffer: The ring buffer
2679 * @cpu: The per CPU buffer to read from.
2680 */
2681unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
2682{
2683 unsigned long flags;
2684 struct ring_buffer_per_cpu *cpu_buffer;
2685 struct buffer_page *bpage;
2686 unsigned long ret;
2687
2688 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2689 return 0;
2690
2691 cpu_buffer = buffer->buffers[cpu];
2692 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2693 /*
2694 * if the tail is on reader_page, oldest time stamp is on the reader
2695 * page
2696 */
2697 if (cpu_buffer->tail_page == cpu_buffer->reader_page)
2698 bpage = cpu_buffer->reader_page;
2699 else
2700 bpage = rb_set_head_page(cpu_buffer);
2701 ret = bpage->page->time_stamp;
2702 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2703
2704 return ret;
2705}
2706EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
2707
2708/**
2709 * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
2710 * @buffer: The ring buffer
2711 * @cpu: The per CPU buffer to read from.
2712 */
2713unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
2714{
2715 struct ring_buffer_per_cpu *cpu_buffer;
2716 unsigned long ret;
2717
2718 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2719 return 0;
2720
2721 cpu_buffer = buffer->buffers[cpu];
2722 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
2723
2724 return ret;
2725}
2726EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
2727
2728/**
2664 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer 2729 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
2665 * @buffer: The ring buffer 2730 * @buffer: The ring buffer
2666 * @cpu: The per CPU buffer to get the entries from. 2731 * @cpu: The per CPU buffer to get the entries from.
@@ -3527,11 +3592,13 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
3527 cpu_buffer->reader_page->read = 0; 3592 cpu_buffer->reader_page->read = 0;
3528 3593
3529 local_set(&cpu_buffer->commit_overrun, 0); 3594 local_set(&cpu_buffer->commit_overrun, 0);
3595 local_set(&cpu_buffer->entries_bytes, 0);
3530 local_set(&cpu_buffer->overrun, 0); 3596 local_set(&cpu_buffer->overrun, 0);
3531 local_set(&cpu_buffer->entries, 0); 3597 local_set(&cpu_buffer->entries, 0);
3532 local_set(&cpu_buffer->committing, 0); 3598 local_set(&cpu_buffer->committing, 0);
3533 local_set(&cpu_buffer->commits, 0); 3599 local_set(&cpu_buffer->commits, 0);
3534 cpu_buffer->read = 0; 3600 cpu_buffer->read = 0;
3601 cpu_buffer->read_bytes = 0;
3535 3602
3536 cpu_buffer->write_stamp = 0; 3603 cpu_buffer->write_stamp = 0;
3537 cpu_buffer->read_stamp = 0; 3604 cpu_buffer->read_stamp = 0;
@@ -3918,6 +3985,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
3918 } else { 3985 } else {
3919 /* update the entry counter */ 3986 /* update the entry counter */
3920 cpu_buffer->read += rb_page_entries(reader); 3987 cpu_buffer->read += rb_page_entries(reader);
3988 cpu_buffer->read_bytes += BUF_PAGE_SIZE;
3921 3989
3922 /* swap the pages */ 3990 /* swap the pages */
3923 rb_init_page(bpage); 3991 rb_init_page(bpage);