aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/perf/builtin-lock.c148
1 files changed, 146 insertions, 2 deletions
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index fb9ab2ad3f9..e12c844df1e 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -460,6 +460,150 @@ process_raw_event(void *data, int cpu,
460 process_lock_release_event(data, event, cpu, timestamp, thread); 460 process_lock_release_event(data, event, cpu, timestamp, thread);
461} 461}
462 462
463struct raw_event_queue {
464 u64 timestamp;
465 int cpu;
466 void *data;
467 struct thread *thread;
468 struct list_head list;
469};
470
471static LIST_HEAD(raw_event_head);
472
473#define FLUSH_PERIOD (5 * NSEC_PER_SEC)
474
475static u64 flush_limit = ULLONG_MAX;
476static u64 last_flush = 0;
477struct raw_event_queue *last_inserted;
478
479static void flush_raw_event_queue(u64 limit)
480{
481 struct raw_event_queue *tmp, *iter;
482
483 list_for_each_entry_safe(iter, tmp, &raw_event_head, list) {
484 if (iter->timestamp > limit)
485 return;
486
487 if (iter == last_inserted)
488 last_inserted = NULL;
489
490 process_raw_event(iter->data, iter->cpu, iter->timestamp,
491 iter->thread);
492
493 last_flush = iter->timestamp;
494 list_del(&iter->list);
495 free(iter->data);
496 free(iter);
497 }
498}
499
500static void __queue_raw_event_end(struct raw_event_queue *new)
501{
502 struct raw_event_queue *iter;
503
504 list_for_each_entry_reverse(iter, &raw_event_head, list) {
505 if (iter->timestamp < new->timestamp) {
506 list_add(&new->list, &iter->list);
507 return;
508 }
509 }
510
511 list_add(&new->list, &raw_event_head);
512}
513
514static void __queue_raw_event_before(struct raw_event_queue *new,
515 struct raw_event_queue *iter)
516{
517 list_for_each_entry_continue_reverse(iter, &raw_event_head, list) {
518 if (iter->timestamp < new->timestamp) {
519 list_add(&new->list, &iter->list);
520 return;
521 }
522 }
523
524 list_add(&new->list, &raw_event_head);
525}
526
527static void __queue_raw_event_after(struct raw_event_queue *new,
528 struct raw_event_queue *iter)
529{
530 list_for_each_entry_continue(iter, &raw_event_head, list) {
531 if (iter->timestamp > new->timestamp) {
532 list_add_tail(&new->list, &iter->list);
533 return;
534 }
535 }
536 list_add_tail(&new->list, &raw_event_head);
537}
538
539/* The queue is ordered by time */
540static void __queue_raw_event(struct raw_event_queue *new)
541{
542 if (!last_inserted) {
543 __queue_raw_event_end(new);
544 return;
545 }
546
547 /*
548 * Most of the time the current event has a timestamp
549 * very close to the last event inserted, unless we just switched
550 * to another event buffer. Having a sorting based on a list and
551 * on the last inserted event that is close to the current one is
552 * probably more efficient than an rbtree based sorting.
553 */
554 if (last_inserted->timestamp >= new->timestamp)
555 __queue_raw_event_before(new, last_inserted);
556 else
557 __queue_raw_event_after(new, last_inserted);
558}
559
560static void queue_raw_event(void *data, int raw_size, int cpu,
561 u64 timestamp, struct thread *thread)
562{
563 struct raw_event_queue *new;
564
565 if (flush_limit == ULLONG_MAX)
566 flush_limit = timestamp + FLUSH_PERIOD;
567
568 if (timestamp < last_flush) {
569 printf("Warning: Timestamp below last timeslice flush\n");
570 return;
571 }
572
573 new = malloc(sizeof(*new));
574 if (!new)
575 die("Not enough memory\n");
576
577 new->timestamp = timestamp;
578 new->cpu = cpu;
579 new->thread = thread;
580
581 new->data = malloc(raw_size);
582 if (!new->data)
583 die("Not enough memory\n");
584
585 memcpy(new->data, data, raw_size);
586
587 __queue_raw_event(new);
588 last_inserted = new;
589
590 /*
591 * We want to have a slice of events covering 2 * FLUSH_PERIOD
592 * If FLUSH_PERIOD is big enough, it ensures every events that occured
593 * in the first half of the timeslice have all been buffered and there
594 * are none remaining (we need that because of the weakly ordered
595 * event recording we have). Then once we reach the 2 * FLUSH_PERIOD
596 * timeslice, we flush the first half to be gentle with the memory
597 * (the second half can still get new events in the middle, so wait
598 * another period to flush it)
599 */
600 if (new->timestamp > flush_limit &&
601 new->timestamp - flush_limit > FLUSH_PERIOD) {
602 flush_limit += FLUSH_PERIOD;
603 flush_raw_event_queue(flush_limit);
604 }
605}
606
463static int process_sample_event(event_t *event, struct perf_session *session) 607static int process_sample_event(event_t *event, struct perf_session *session)
464{ 608{
465 struct thread *thread; 609 struct thread *thread;
@@ -480,7 +624,7 @@ static int process_sample_event(event_t *event, struct perf_session *session)
480 if (profile_cpu != -1 && profile_cpu != (int) data.cpu) 624 if (profile_cpu != -1 && profile_cpu != (int) data.cpu)
481 return 0; 625 return 0;
482 626
483 process_raw_event(data.raw_data, data.cpu, data.time, thread); 627 queue_raw_event(data.raw_data, data.raw_size, data.cpu, data.time, thread);
484 628
485 return 0; 629 return 0;
486} 630}
@@ -576,6 +720,7 @@ static void __cmd_report(void)
576 setup_pager(); 720 setup_pager();
577 select_key(); 721 select_key();
578 read_events(); 722 read_events();
723 flush_raw_event_queue(ULLONG_MAX);
579 sort_result(); 724 sort_result();
580 print_result(); 725 print_result();
581} 726}
@@ -608,7 +753,6 @@ static const char *record_args[] = {
608 "record", 753 "record",
609 "-a", 754 "-a",
610 "-R", 755 "-R",
611 "-M",
612 "-f", 756 "-f",
613 "-m", "1024", 757 "-m", "1024",
614 "-c", "1", 758 "-c", "1",