aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/core.c5
-rw-r--r--kernel/events/internal.h4
-rw-r--r--kernel/events/ring_buffer.c139
3 files changed, 145 insertions, 3 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index dbc2eff32230..81e8d14ac59a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3423,7 +3423,6 @@ static void free_event_rcu(struct rcu_head *head)
3423 kfree(event); 3423 kfree(event);
3424} 3424}
3425 3425
3426static void ring_buffer_put(struct ring_buffer *rb);
3427static void ring_buffer_attach(struct perf_event *event, 3426static void ring_buffer_attach(struct perf_event *event,
3428 struct ring_buffer *rb); 3427 struct ring_buffer *rb);
3429 3428
@@ -4361,7 +4360,7 @@ static void rb_free_rcu(struct rcu_head *rcu_head)
4361 rb_free(rb); 4360 rb_free(rb);
4362} 4361}
4363 4362
4364static struct ring_buffer *ring_buffer_get(struct perf_event *event) 4363struct ring_buffer *ring_buffer_get(struct perf_event *event)
4365{ 4364{
4366 struct ring_buffer *rb; 4365 struct ring_buffer *rb;
4367 4366
@@ -4376,7 +4375,7 @@ static struct ring_buffer *ring_buffer_get(struct perf_event *event)
4376 return rb; 4375 return rb;
4377} 4376}
4378 4377
4379static void ring_buffer_put(struct ring_buffer *rb) 4378void ring_buffer_put(struct ring_buffer *rb)
4380{ 4379{
4381 if (!atomic_dec_and_test(&rb->refcount)) 4380 if (!atomic_dec_and_test(&rb->refcount))
4382 return; 4381 return;
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 4d117a981431..b701ebc32570 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -36,6 +36,8 @@ struct ring_buffer {
36 struct user_struct *mmap_user; 36 struct user_struct *mmap_user;
37 37
38 /* AUX area */ 38 /* AUX area */
39 local_t aux_head;
40 local_t aux_nest;
39 unsigned long aux_pgoff; 41 unsigned long aux_pgoff;
40 int aux_nr_pages; 42 int aux_nr_pages;
41 atomic_t aux_mmap_count; 43 atomic_t aux_mmap_count;
@@ -56,6 +58,8 @@ extern void perf_event_wakeup(struct perf_event *event);
56extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event, 58extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
57 pgoff_t pgoff, int nr_pages, int flags); 59 pgoff_t pgoff, int nr_pages, int flags);
58extern void rb_free_aux(struct ring_buffer *rb); 60extern void rb_free_aux(struct ring_buffer *rb);
61extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
62extern void ring_buffer_put(struct ring_buffer *rb);
59 63
60static inline bool rb_has_aux(struct ring_buffer *rb) 64static inline bool rb_has_aux(struct ring_buffer *rb)
61{ 65{
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 6e3be7a10c50..0cc7b0f39058 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -243,6 +243,145 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
243 spin_lock_init(&rb->event_lock); 243 spin_lock_init(&rb->event_lock);
244} 244}
245 245
246/*
247 * This is called before hardware starts writing to the AUX area to
248 * obtain an output handle and make sure there's room in the buffer.
249 * When the capture completes, call perf_aux_output_end() to commit
250 * the recorded data to the buffer.
251 *
252 * The ordering is similar to that of perf_output_{begin,end}, with
253 * the exception of (B), which should be taken care of by the pmu
254 * driver, since ordering rules will differ depending on hardware.
255 */
256void *perf_aux_output_begin(struct perf_output_handle *handle,
257 struct perf_event *event)
258{
259 struct perf_event *output_event = event;
260 unsigned long aux_head, aux_tail;
261 struct ring_buffer *rb;
262
263 if (output_event->parent)
264 output_event = output_event->parent;
265
266 /*
267 * Since this will typically be open across pmu::add/pmu::del, we
268 * grab ring_buffer's refcount instead of holding rcu read lock
269 * to make sure it doesn't disappear under us.
270 */
271 rb = ring_buffer_get(output_event);
272 if (!rb)
273 return NULL;
274
275 if (!rb_has_aux(rb) || !atomic_inc_not_zero(&rb->aux_refcount))
276 goto err;
277
278 /*
279 * Nesting is not supported for AUX area, make sure nested
280 * writers are caught early
281 */
282 if (WARN_ON_ONCE(local_xchg(&rb->aux_nest, 1)))
283 goto err_put;
284
285 aux_head = local_read(&rb->aux_head);
286 aux_tail = ACCESS_ONCE(rb->user_page->aux_tail);
287
288 handle->rb = rb;
289 handle->event = event;
290 handle->head = aux_head;
291 if (aux_head - aux_tail < perf_aux_size(rb))
292 handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
293 else
294 handle->size = 0;
295
296 /*
297 * handle->size computation depends on aux_tail load; this forms a
298 * control dependency barrier separating aux_tail load from aux data
299 * store that will be enabled on successful return
300 */
301 if (!handle->size) { /* A, matches D */
302 event->pending_disable = 1;
303 perf_output_wakeup(handle);
304 local_set(&rb->aux_nest, 0);
305 goto err_put;
306 }
307
308 return handle->rb->aux_priv;
309
310err_put:
311 rb_free_aux(rb);
312
313err:
314 ring_buffer_put(rb);
315 handle->event = NULL;
316
317 return NULL;
318}
319
320/*
321 * Commit the data written by hardware into the ring buffer by adjusting
322 * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
323 * pmu driver's responsibility to observe ordering rules of the hardware,
324 * so that all the data is externally visible before this is called.
325 */
326void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
327 bool truncated)
328{
329 struct ring_buffer *rb = handle->rb;
330 unsigned long aux_head = local_read(&rb->aux_head);
331 u64 flags = 0;
332
333 if (truncated)
334 flags |= PERF_AUX_FLAG_TRUNCATED;
335
336 local_add(size, &rb->aux_head);
337
338 if (size || flags) {
339 /*
340 * Only send RECORD_AUX if we have something useful to communicate
341 */
342
343 perf_event_aux_event(handle->event, aux_head, size, flags);
344 }
345
346 rb->user_page->aux_head = local_read(&rb->aux_head);
347
348 perf_output_wakeup(handle);
349 handle->event = NULL;
350
351 local_set(&rb->aux_nest, 0);
352 rb_free_aux(rb);
353 ring_buffer_put(rb);
354}
355
356/*
357 * Skip over a given number of bytes in the AUX buffer, due to, for example,
358 * hardware's alignment constraints.
359 */
360int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
361{
362 struct ring_buffer *rb = handle->rb;
363 unsigned long aux_head;
364
365 if (size > handle->size)
366 return -ENOSPC;
367
368 local_add(size, &rb->aux_head);
369
370 handle->head = aux_head;
371 handle->size -= size;
372
373 return 0;
374}
375
376void *perf_get_aux(struct perf_output_handle *handle)
377{
378 /* this is only valid between perf_aux_output_begin and *_end */
379 if (!handle->event)
380 return NULL;
381
382 return handle->rb->aux_priv;
383}
384
246#define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY) 385#define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
247 386
248static struct page *rb_alloc_aux_page(int node, int order) 387static struct page *rb_alloc_aux_page(int node, int order)