diff options
author | Adrian Hunter <adrian.hunter@intel.com> | 2018-03-06 04:13:17 -0500 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2018-04-05 10:03:33 -0400 |
commit | 0d75f123a6dcdacb3550b0c3c44a283f7259289e (patch) | |
tree | 74b59df541cf8fa7d1d84fd18f23363df62722e2 | |
parent | d1e7e602cd64cf61f87dbf30df07c24df9eb1d99 (diff) |
perf auxtrace: Make auxtrace_queues__add_buffer() allocate struct buffer
In preparation for supporting AUX area sampling buffers,
auxtrace_queues__add_buffer() needs to be more generic. To that end,
move memory allocation for struct buffer into it.
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Link: http://lkml.kernel.org/r/1520327598-1317-7-git-send-email-adrian.hunter@intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-rw-r--r-- | tools/perf/util/auxtrace.c | 54 |
1 files changed, 24 insertions, 30 deletions
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c index fb357a00dd86..e1aff91c54a8 100644 --- a/tools/perf/util/auxtrace.c +++ b/tools/perf/util/auxtrace.c | |||
@@ -308,7 +308,11 @@ static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues, | |||
308 | struct auxtrace_buffer *buffer, | 308 | struct auxtrace_buffer *buffer, |
309 | struct auxtrace_buffer **buffer_ptr) | 309 | struct auxtrace_buffer **buffer_ptr) |
310 | { | 310 | { |
311 | int err; | 311 | int err = -ENOMEM; |
312 | |||
313 | buffer = memdup(buffer, sizeof(*buffer)); | ||
314 | if (!buffer) | ||
315 | return -ENOMEM; | ||
312 | 316 | ||
313 | if (session->one_mmap) { | 317 | if (session->one_mmap) { |
314 | buffer->data = buffer->data_offset - session->one_mmap_offset + | 318 | buffer->data = buffer->data_offset - session->one_mmap_offset + |
@@ -316,24 +320,28 @@ static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues, | |||
316 | } else if (perf_data__is_pipe(session->data)) { | 320 | } else if (perf_data__is_pipe(session->data)) { |
317 | buffer->data = auxtrace_copy_data(buffer->size, session); | 321 | buffer->data = auxtrace_copy_data(buffer->size, session); |
318 | if (!buffer->data) | 322 | if (!buffer->data) |
319 | return -ENOMEM; | 323 | goto out_free; |
320 | buffer->data_needs_freeing = true; | 324 | buffer->data_needs_freeing = true; |
321 | } else if (BITS_PER_LONG == 32 && | 325 | } else if (BITS_PER_LONG == 32 && |
322 | buffer->size > BUFFER_LIMIT_FOR_32_BIT) { | 326 | buffer->size > BUFFER_LIMIT_FOR_32_BIT) { |
323 | err = auxtrace_queues__split_buffer(queues, idx, buffer); | 327 | err = auxtrace_queues__split_buffer(queues, idx, buffer); |
324 | if (err) | 328 | if (err) |
325 | return err; | 329 | goto out_free; |
326 | } | 330 | } |
327 | 331 | ||
328 | err = auxtrace_queues__queue_buffer(queues, idx, buffer); | 332 | err = auxtrace_queues__queue_buffer(queues, idx, buffer); |
329 | if (err) | 333 | if (err) |
330 | return err; | 334 | goto out_free; |
331 | 335 | ||
332 | /* FIXME: Doesn't work for split buffer */ | 336 | /* FIXME: Doesn't work for split buffer */ |
333 | if (buffer_ptr) | 337 | if (buffer_ptr) |
334 | *buffer_ptr = buffer; | 338 | *buffer_ptr = buffer; |
335 | 339 | ||
336 | return 0; | 340 | return 0; |
341 | |||
342 | out_free: | ||
343 | auxtrace_buffer__free(buffer); | ||
344 | return err; | ||
337 | } | 345 | } |
338 | 346 | ||
339 | static bool filter_cpu(struct perf_session *session, int cpu) | 347 | static bool filter_cpu(struct perf_session *session, int cpu) |
@@ -348,36 +356,22 @@ int auxtrace_queues__add_event(struct auxtrace_queues *queues, | |||
348 | union perf_event *event, off_t data_offset, | 356 | union perf_event *event, off_t data_offset, |
349 | struct auxtrace_buffer **buffer_ptr) | 357 | struct auxtrace_buffer **buffer_ptr) |
350 | { | 358 | { |
351 | struct auxtrace_buffer *buffer; | 359 | struct auxtrace_buffer buffer = { |
352 | unsigned int idx; | 360 | .pid = -1, |
353 | int err; | 361 | .tid = event->auxtrace.tid, |
362 | .cpu = event->auxtrace.cpu, | ||
363 | .data_offset = data_offset, | ||
364 | .offset = event->auxtrace.offset, | ||
365 | .reference = event->auxtrace.reference, | ||
366 | .size = event->auxtrace.size, | ||
367 | }; | ||
368 | unsigned int idx = event->auxtrace.idx; | ||
354 | 369 | ||
355 | if (filter_cpu(session, event->auxtrace.cpu)) | 370 | if (filter_cpu(session, event->auxtrace.cpu)) |
356 | return 0; | 371 | return 0; |
357 | 372 | ||
358 | buffer = zalloc(sizeof(struct auxtrace_buffer)); | 373 | return auxtrace_queues__add_buffer(queues, session, idx, &buffer, |
359 | if (!buffer) | 374 | buffer_ptr); |
360 | return -ENOMEM; | ||
361 | |||
362 | buffer->pid = -1; | ||
363 | buffer->tid = event->auxtrace.tid; | ||
364 | buffer->cpu = event->auxtrace.cpu; | ||
365 | buffer->data_offset = data_offset; | ||
366 | buffer->offset = event->auxtrace.offset; | ||
367 | buffer->reference = event->auxtrace.reference; | ||
368 | buffer->size = event->auxtrace.size; | ||
369 | idx = event->auxtrace.idx; | ||
370 | |||
371 | err = auxtrace_queues__add_buffer(queues, session, idx, buffer, | ||
372 | buffer_ptr); | ||
373 | if (err) | ||
374 | goto out_err; | ||
375 | |||
376 | return 0; | ||
377 | |||
378 | out_err: | ||
379 | auxtrace_buffer__free(buffer); | ||
380 | return err; | ||
381 | } | 375 | } |
382 | 376 | ||
383 | static int auxtrace_queues__add_indexed_event(struct auxtrace_queues *queues, | 377 | static int auxtrace_queues__add_indexed_event(struct auxtrace_queues *queues, |