diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2011-05-19 13:55:04 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-06-09 06:57:54 -0400 |
commit | 76369139ceb955deefc509e6e12ce9d6ce50ccab (patch) | |
tree | 5ba2732188d2fecd4fd49b93f73e51e17fb69082 /kernel/events/internal.h | |
parent | b58f6b0dd3d677338b9065388cc2cc942b86338e (diff) |
perf: Split up buffer handling from core code
And create the internal perf events header.
v2: Keep an internal inlined perf_output_copy()
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Stephane Eranian <eranian@google.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/1305827704-5607-1-git-send-email-fweisbec@gmail.com
[ v3: use clearer 'ring_buffer' and 'rb' naming ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/events/internal.h')
-rw-r--r-- | kernel/events/internal.h | 97 |
1 files changed, 97 insertions, 0 deletions
diff --git a/kernel/events/internal.h b/kernel/events/internal.h new file mode 100644 index 000000000000..114f27f3a624 --- /dev/null +++ b/kernel/events/internal.h | |||
@@ -0,0 +1,97 @@ | |||
1 | #ifndef _KERNEL_EVENTS_INTERNAL_H | ||
2 | #define _KERNEL_EVENTS_INTERNAL_H | ||
3 | |||
4 | #define RING_BUFFER_WRITABLE 0x01 | ||
5 | |||
6 | struct ring_buffer { | ||
7 | atomic_t refcount; | ||
8 | struct rcu_head rcu_head; | ||
9 | #ifdef CONFIG_PERF_USE_VMALLOC | ||
10 | struct work_struct work; | ||
11 | int page_order; /* allocation order */ | ||
12 | #endif | ||
13 | int nr_pages; /* nr of data pages */ | ||
14 | int writable; /* are we writable */ | ||
15 | |||
16 | atomic_t poll; /* POLL_ for wakeups */ | ||
17 | |||
18 | local_t head; /* write position */ | ||
19 | local_t nest; /* nested writers */ | ||
20 | local_t events; /* event limit */ | ||
21 | local_t wakeup; /* wakeup stamp */ | ||
22 | local_t lost; /* nr records lost */ | ||
23 | |||
24 | long watermark; /* wakeup watermark */ | ||
25 | |||
26 | struct perf_event_mmap_page *user_page; | ||
27 | void *data_pages[0]; | ||
28 | }; | ||
29 | |||
30 | |||
31 | extern void rb_free(struct ring_buffer *rb); | ||
32 | extern struct ring_buffer * | ||
33 | rb_alloc(int nr_pages, long watermark, int cpu, int flags); | ||
34 | extern void perf_event_wakeup(struct perf_event *event); | ||
35 | |||
36 | extern void | ||
37 | perf_event_header__init_id(struct perf_event_header *header, | ||
38 | struct perf_sample_data *data, | ||
39 | struct perf_event *event); | ||
40 | extern void | ||
41 | perf_event__output_id_sample(struct perf_event *event, | ||
42 | struct perf_output_handle *handle, | ||
43 | struct perf_sample_data *sample); | ||
44 | |||
45 | extern struct page * | ||
46 | perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff); | ||
47 | |||
48 | #ifdef CONFIG_PERF_USE_VMALLOC | ||
49 | /* | ||
50 | * Back perf_mmap() with vmalloc memory. | ||
51 | * | ||
52 | * Required for architectures that have d-cache aliasing issues. | ||
53 | */ | ||
54 | |||
55 | static inline int page_order(struct ring_buffer *rb) | ||
56 | { | ||
57 | return rb->page_order; | ||
58 | } | ||
59 | |||
60 | #else | ||
61 | |||
62 | static inline int page_order(struct ring_buffer *rb) | ||
63 | { | ||
64 | return 0; | ||
65 | } | ||
66 | #endif | ||
67 | |||
68 | static unsigned long perf_data_size(struct ring_buffer *rb) | ||
69 | { | ||
70 | return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); | ||
71 | } | ||
72 | |||
73 | static inline void | ||
74 | __output_copy(struct perf_output_handle *handle, | ||
75 | const void *buf, unsigned int len) | ||
76 | { | ||
77 | do { | ||
78 | unsigned long size = min_t(unsigned long, handle->size, len); | ||
79 | |||
80 | memcpy(handle->addr, buf, size); | ||
81 | |||
82 | len -= size; | ||
83 | handle->addr += size; | ||
84 | buf += size; | ||
85 | handle->size -= size; | ||
86 | if (!handle->size) { | ||
87 | struct ring_buffer *rb = handle->rb; | ||
88 | |||
89 | handle->page++; | ||
90 | handle->page &= rb->nr_pages - 1; | ||
91 | handle->addr = rb->data_pages[handle->page]; | ||
92 | handle->size = PAGE_SIZE << page_order(rb); | ||
93 | } | ||
94 | } while (len); | ||
95 | } | ||
96 | |||
97 | #endif /* _KERNEL_EVENTS_INTERNAL_H */ | ||