aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/util/session.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-11-30 12:49:33 -0500
committerArnaldo Carvalho de Melo <acme@redhat.com>2010-11-30 16:52:36 -0500
commita1225decc43849a73f7e4c333c3fdbbb8a9c1e65 (patch)
treed08e7f7aaec99038f592176923cc1990767c69f2 /tools/perf/util/session.c
parentc320c7b7d380e630f595de1236d9d085b035d5b4 (diff)
perf session: Fix list sort algorithm
The homebrewn sort algorithm fails to sort in time order. One of the problem spots is that it fails to deal with equal timestamps correctly. My first gut reaction was to replace the fancy list with an rbtree, but the performance is 3 times worse. Rewrite it so it works. Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> LKML-Reference: <20101130163819.908482530@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/util/session.c')
-rw-r--r--tools/perf/util/session.c113
1 files changed, 47 insertions, 66 deletions
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 3ae69550fa0b..daca557b9d28 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -104,7 +104,7 @@ struct perf_session *perf_session__new(const char *filename, int mode, bool forc
104 self->mmap_window = 32; 104 self->mmap_window = 32;
105 self->machines = RB_ROOT; 105 self->machines = RB_ROOT;
106 self->repipe = repipe; 106 self->repipe = repipe;
107 INIT_LIST_HEAD(&self->ordered_samples.samples_head); 107 INIT_LIST_HEAD(&self->ordered_samples.samples);
108 machine__init(&self->host_machine, "", HOST_KERNEL_ID); 108 machine__init(&self->host_machine, "", HOST_KERNEL_ID);
109 109
110 if (mode == O_RDONLY) { 110 if (mode == O_RDONLY) {
@@ -393,27 +393,33 @@ struct sample_queue {
393static void flush_sample_queue(struct perf_session *s, 393static void flush_sample_queue(struct perf_session *s,
394 struct perf_event_ops *ops) 394 struct perf_event_ops *ops)
395{ 395{
396 struct list_head *head = &s->ordered_samples.samples_head; 396 struct ordered_samples *os = &s->ordered_samples;
397 u64 limit = s->ordered_samples.next_flush; 397 struct list_head *head = &os->samples;
398 struct sample_queue *tmp, *iter; 398 struct sample_queue *tmp, *iter;
399 u64 limit = os->next_flush;
400 u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
399 401
400 if (!ops->ordered_samples || !limit) 402 if (!ops->ordered_samples || !limit)
401 return; 403 return;
402 404
403 list_for_each_entry_safe(iter, tmp, head, list) { 405 list_for_each_entry_safe(iter, tmp, head, list) {
404 if (iter->timestamp > limit) 406 if (iter->timestamp > limit)
405 return; 407 break;
406
407 if (iter == s->ordered_samples.last_inserted)
408 s->ordered_samples.last_inserted = NULL;
409 408
410 ops->sample((event_t *)iter->event, s); 409 ops->sample((event_t *)iter->event, s);
411 410
412 s->ordered_samples.last_flush = iter->timestamp; 411 os->last_flush = iter->timestamp;
413 list_del(&iter->list); 412 list_del(&iter->list);
414 free(iter->event); 413 free(iter->event);
415 free(iter); 414 free(iter);
416 } 415 }
416
417 if (list_empty(head)) {
418 os->last_sample = NULL;
419 } else if (last_ts <= limit) {
420 os->last_sample =
421 list_entry(head->prev, struct sample_queue, list);
422 }
417} 423}
418 424
419/* 425/*
@@ -465,71 +471,50 @@ static int process_finished_round(event_t *event __used,
465 return 0; 471 return 0;
466} 472}
467 473
468static void __queue_sample_end(struct sample_queue *new, struct list_head *head)
469{
470 struct sample_queue *iter;
471
472 list_for_each_entry_reverse(iter, head, list) {
473 if (iter->timestamp < new->timestamp) {
474 list_add(&new->list, &iter->list);
475 return;
476 }
477 }
478
479 list_add(&new->list, head);
480}
481
482static void __queue_sample_before(struct sample_queue *new,
483 struct sample_queue *iter,
484 struct list_head *head)
485{
486 list_for_each_entry_continue_reverse(iter, head, list) {
487 if (iter->timestamp < new->timestamp) {
488 list_add(&new->list, &iter->list);
489 return;
490 }
491 }
492
493 list_add(&new->list, head);
494}
495
496static void __queue_sample_after(struct sample_queue *new,
497 struct sample_queue *iter,
498 struct list_head *head)
499{
500 list_for_each_entry_continue(iter, head, list) {
501 if (iter->timestamp > new->timestamp) {
502 list_add_tail(&new->list, &iter->list);
503 return;
504 }
505 }
506 list_add_tail(&new->list, head);
507}
508
509/* The queue is ordered by time */ 474/* The queue is ordered by time */
510static void __queue_sample_event(struct sample_queue *new, 475static void __queue_sample_event(struct sample_queue *new,
511 struct perf_session *s) 476 struct perf_session *s)
512{ 477{
513 struct sample_queue *last_inserted = s->ordered_samples.last_inserted; 478 struct ordered_samples *os = &s->ordered_samples;
514 struct list_head *head = &s->ordered_samples.samples_head; 479 struct sample_queue *sample = os->last_sample;
480 u64 timestamp = new->timestamp;
481 struct list_head *p;
515 482
483 os->last_sample = new;
516 484
517 if (!last_inserted) { 485 if (!sample) {
518 __queue_sample_end(new, head); 486 list_add(&new->list, &os->samples);
487 os->max_timestamp = timestamp;
519 return; 488 return;
520 } 489 }
521 490
522 /* 491 /*
523 * Most of the time the current event has a timestamp 492 * last_sample might point to some random place in the list as it's
524 * very close to the last event inserted, unless we just switched 493 * the last queued event. We expect that the new event is close to
525 * to another event buffer. Having a sorting based on a list and 494 * this.
526 * on the last inserted event that is close to the current one is
527 * probably more efficient than an rbtree based sorting.
528 */ 495 */
529 if (last_inserted->timestamp >= new->timestamp) 496 if (sample->timestamp <= timestamp) {
530 __queue_sample_before(new, last_inserted, head); 497 while (sample->timestamp <= timestamp) {
531 else 498 p = sample->list.next;
532 __queue_sample_after(new, last_inserted, head); 499 if (p == &os->samples) {
500 list_add_tail(&new->list, &os->samples);
501 os->max_timestamp = timestamp;
502 return;
503 }
504 sample = list_entry(p, struct sample_queue, list);
505 }
506 list_add_tail(&new->list, &sample->list);
507 } else {
508 while (sample->timestamp > timestamp) {
509 p = sample->list.prev;
510 if (p == &os->samples) {
511 list_add(&new->list, &os->samples);
512 return;
513 }
514 sample = list_entry(p, struct sample_queue, list);
515 }
516 list_add(&new->list, &sample->list);
517 }
533} 518}
534 519
535static int queue_sample_event(event_t *event, struct sample_data *data, 520static int queue_sample_event(event_t *event, struct sample_data *data,
@@ -559,10 +544,6 @@ static int queue_sample_event(event_t *event, struct sample_data *data,
559 memcpy(new->event, event, event->header.size); 544 memcpy(new->event, event, event->header.size);
560 545
561 __queue_sample_event(new, s); 546 __queue_sample_event(new, s);
562 s->ordered_samples.last_inserted = new;
563
564 if (new->timestamp > s->ordered_samples.max_timestamp)
565 s->ordered_samples.max_timestamp = new->timestamp;
566 547
567 return 0; 548 return 0;
568} 549}