aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events/ring_buffer.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2011-05-19 13:55:04 -0400
committerIngo Molnar <mingo@elte.hu>2011-06-09 06:57:54 -0400
commit76369139ceb955deefc509e6e12ce9d6ce50ccab (patch)
tree5ba2732188d2fecd4fd49b93f73e51e17fb69082 /kernel/events/ring_buffer.c
parentb58f6b0dd3d677338b9065388cc2cc942b86338e (diff)
perf: Split up buffer handling from core code
And create the internal perf events header. v2: Keep an internal inlined perf_output_copy() Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Borislav Petkov <bp@alien8.de> Cc: Stephane Eranian <eranian@google.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Steven Rostedt <rostedt@goodmis.org> Link: http://lkml.kernel.org/r/1305827704-5607-1-git-send-email-fweisbec@gmail.com [ v3: use clearer 'ring_buffer' and 'rb' naming ] Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/events/ring_buffer.c')
-rw-r--r--kernel/events/ring_buffer.c399
1 files changed, 399 insertions, 0 deletions
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
new file mode 100644
index 000000000000..fde52595d8f7
--- /dev/null
+++ b/kernel/events/ring_buffer.c
@@ -0,0 +1,399 @@
1/*
2 * Performance events ring-buffer code:
3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 *
9 * For licensing details see kernel-base/COPYING
10 */
11
12#include <linux/perf_event.h>
13#include <linux/vmalloc.h>
14#include <linux/slab.h>
15
16#include "internal.h"
17
18static bool perf_output_space(struct ring_buffer *rb, unsigned long tail,
19 unsigned long offset, unsigned long head)
20{
21 unsigned long mask;
22
23 if (!rb->writable)
24 return true;
25
26 mask = perf_data_size(rb) - 1;
27
28 offset = (offset - tail) & mask;
29 head = (head - tail) & mask;
30
31 if ((int)(head - offset) < 0)
32 return false;
33
34 return true;
35}
36
37static void perf_output_wakeup(struct perf_output_handle *handle)
38{
39 atomic_set(&handle->rb->poll, POLL_IN);
40
41 if (handle->nmi) {
42 handle->event->pending_wakeup = 1;
43 irq_work_queue(&handle->event->pending);
44 } else
45 perf_event_wakeup(handle->event);
46}
47
48/*
49 * We need to ensure a later event_id doesn't publish a head when a former
50 * event isn't done writing. However since we need to deal with NMIs we
51 * cannot fully serialize things.
52 *
53 * We only publish the head (and generate a wakeup) when the outer-most
54 * event completes.
55 */
56static void perf_output_get_handle(struct perf_output_handle *handle)
57{
58 struct ring_buffer *rb = handle->rb;
59
60 preempt_disable();
61 local_inc(&rb->nest);
62 handle->wakeup = local_read(&rb->wakeup);
63}
64
65static void perf_output_put_handle(struct perf_output_handle *handle)
66{
67 struct ring_buffer *rb = handle->rb;
68 unsigned long head;
69
70again:
71 head = local_read(&rb->head);
72
73 /*
74 * IRQ/NMI can happen here, which means we can miss a head update.
75 */
76
77 if (!local_dec_and_test(&rb->nest))
78 goto out;
79
80 /*
81 * Publish the known good head. Rely on the full barrier implied
82 * by atomic_dec_and_test() order the rb->head read and this
83 * write.
84 */
85 rb->user_page->data_head = head;
86
87 /*
88 * Now check if we missed an update, rely on the (compiler)
89 * barrier in atomic_dec_and_test() to re-read rb->head.
90 */
91 if (unlikely(head != local_read(&rb->head))) {
92 local_inc(&rb->nest);
93 goto again;
94 }
95
96 if (handle->wakeup != local_read(&rb->wakeup))
97 perf_output_wakeup(handle);
98
99out:
100 preempt_enable();
101}
102
103int perf_output_begin(struct perf_output_handle *handle,
104 struct perf_event *event, unsigned int size,
105 int nmi, int sample)
106{
107 struct ring_buffer *rb;
108 unsigned long tail, offset, head;
109 int have_lost;
110 struct perf_sample_data sample_data;
111 struct {
112 struct perf_event_header header;
113 u64 id;
114 u64 lost;
115 } lost_event;
116
117 rcu_read_lock();
118 /*
119 * For inherited events we send all the output towards the parent.
120 */
121 if (event->parent)
122 event = event->parent;
123
124 rb = rcu_dereference(event->rb);
125 if (!rb)
126 goto out;
127
128 handle->rb = rb;
129 handle->event = event;
130 handle->nmi = nmi;
131 handle->sample = sample;
132
133 if (!rb->nr_pages)
134 goto out;
135
136 have_lost = local_read(&rb->lost);
137 if (have_lost) {
138 lost_event.header.size = sizeof(lost_event);
139 perf_event_header__init_id(&lost_event.header, &sample_data,
140 event);
141 size += lost_event.header.size;
142 }
143
144 perf_output_get_handle(handle);
145
146 do {
147 /*
148 * Userspace could choose to issue a mb() before updating the
149 * tail pointer. So that all reads will be completed before the
150 * write is issued.
151 */
152 tail = ACCESS_ONCE(rb->user_page->data_tail);
153 smp_rmb();
154 offset = head = local_read(&rb->head);
155 head += size;
156 if (unlikely(!perf_output_space(rb, tail, offset, head)))
157 goto fail;
158 } while (local_cmpxchg(&rb->head, offset, head) != offset);
159
160 if (head - local_read(&rb->wakeup) > rb->watermark)
161 local_add(rb->watermark, &rb->wakeup);
162
163 handle->page = offset >> (PAGE_SHIFT + page_order(rb));
164 handle->page &= rb->nr_pages - 1;
165 handle->size = offset & ((PAGE_SIZE << page_order(rb)) - 1);
166 handle->addr = rb->data_pages[handle->page];
167 handle->addr += handle->size;
168 handle->size = (PAGE_SIZE << page_order(rb)) - handle->size;
169
170 if (have_lost) {
171 lost_event.header.type = PERF_RECORD_LOST;
172 lost_event.header.misc = 0;
173 lost_event.id = event->id;
174 lost_event.lost = local_xchg(&rb->lost, 0);
175
176 perf_output_put(handle, lost_event);
177 perf_event__output_id_sample(event, handle, &sample_data);
178 }
179
180 return 0;
181
182fail:
183 local_inc(&rb->lost);
184 perf_output_put_handle(handle);
185out:
186 rcu_read_unlock();
187
188 return -ENOSPC;
189}
190
191void perf_output_copy(struct perf_output_handle *handle,
192 const void *buf, unsigned int len)
193{
194 __output_copy(handle, buf, len);
195}
196
197void perf_output_end(struct perf_output_handle *handle)
198{
199 struct perf_event *event = handle->event;
200 struct ring_buffer *rb = handle->rb;
201
202 int wakeup_events = event->attr.wakeup_events;
203
204 if (handle->sample && wakeup_events) {
205 int events = local_inc_return(&rb->events);
206 if (events >= wakeup_events) {
207 local_sub(wakeup_events, &rb->events);
208 local_inc(&rb->wakeup);
209 }
210 }
211
212 perf_output_put_handle(handle);
213 rcu_read_unlock();
214}
215
216static void
217ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
218{
219 long max_size = perf_data_size(rb);
220
221 if (watermark)
222 rb->watermark = min(max_size, watermark);
223
224 if (!rb->watermark)
225 rb->watermark = max_size / 2;
226
227 if (flags & RING_BUFFER_WRITABLE)
228 rb->writable = 1;
229
230 atomic_set(&rb->refcount, 1);
231}
232
233#ifndef CONFIG_PERF_USE_VMALLOC
234
235/*
236 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
237 */
238
239struct page *
240perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
241{
242 if (pgoff > rb->nr_pages)
243 return NULL;
244
245 if (pgoff == 0)
246 return virt_to_page(rb->user_page);
247
248 return virt_to_page(rb->data_pages[pgoff - 1]);
249}
250
251static void *perf_mmap_alloc_page(int cpu)
252{
253 struct page *page;
254 int node;
255
256 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
257 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
258 if (!page)
259 return NULL;
260
261 return page_address(page);
262}
263
264struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
265{
266 struct ring_buffer *rb;
267 unsigned long size;
268 int i;
269
270 size = sizeof(struct ring_buffer);
271 size += nr_pages * sizeof(void *);
272
273 rb = kzalloc(size, GFP_KERNEL);
274 if (!rb)
275 goto fail;
276
277 rb->user_page = perf_mmap_alloc_page(cpu);
278 if (!rb->user_page)
279 goto fail_user_page;
280
281 for (i = 0; i < nr_pages; i++) {
282 rb->data_pages[i] = perf_mmap_alloc_page(cpu);
283 if (!rb->data_pages[i])
284 goto fail_data_pages;
285 }
286
287 rb->nr_pages = nr_pages;
288
289 ring_buffer_init(rb, watermark, flags);
290
291 return rb;
292
293fail_data_pages:
294 for (i--; i >= 0; i--)
295 free_page((unsigned long)rb->data_pages[i]);
296
297 free_page((unsigned long)rb->user_page);
298
299fail_user_page:
300 kfree(rb);
301
302fail:
303 return NULL;
304}
305
306static void perf_mmap_free_page(unsigned long addr)
307{
308 struct page *page = virt_to_page((void *)addr);
309
310 page->mapping = NULL;
311 __free_page(page);
312}
313
314void rb_free(struct ring_buffer *rb)
315{
316 int i;
317
318 perf_mmap_free_page((unsigned long)rb->user_page);
319 for (i = 0; i < rb->nr_pages; i++)
320 perf_mmap_free_page((unsigned long)rb->data_pages[i]);
321 kfree(rb);
322}
323
324#else
325
326struct page *
327perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
328{
329 if (pgoff > (1UL << page_order(rb)))
330 return NULL;
331
332 return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
333}
334
335static void perf_mmap_unmark_page(void *addr)
336{
337 struct page *page = vmalloc_to_page(addr);
338
339 page->mapping = NULL;
340}
341
342static void rb_free_work(struct work_struct *work)
343{
344 struct ring_buffer *rb;
345 void *base;
346 int i, nr;
347
348 rb = container_of(work, struct ring_buffer, work);
349 nr = 1 << page_order(rb);
350
351 base = rb->user_page;
352 for (i = 0; i < nr + 1; i++)
353 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
354
355 vfree(base);
356 kfree(rb);
357}
358
359void rb_free(struct ring_buffer *rb)
360{
361 schedule_work(&rb->work);
362}
363
364struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
365{
366 struct ring_buffer *rb;
367 unsigned long size;
368 void *all_buf;
369
370 size = sizeof(struct ring_buffer);
371 size += sizeof(void *);
372
373 rb = kzalloc(size, GFP_KERNEL);
374 if (!rb)
375 goto fail;
376
377 INIT_WORK(&rb->work, rb_free_work);
378
379 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
380 if (!all_buf)
381 goto fail_all_buf;
382
383 rb->user_page = all_buf;
384 rb->data_pages[0] = all_buf + PAGE_SIZE;
385 rb->page_order = ilog2(nr_pages);
386 rb->nr_pages = 1;
387
388 ring_buffer_init(rb, watermark, flags);
389
390 return rb;
391
392fail_all_buf:
393 kfree(rb);
394
395fail:
396 return NULL;
397}
398
399#endif