aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/builtin-lock.c
diff options
context:
space:
mode:
authorDavid Ahern <dsahern@gmail.com>2012-08-26 14:24:43 -0400
committerArnaldo Carvalho de Melo <acme@redhat.com>2012-09-05 16:19:38 -0400
commit33d6aef5136075930f7e9a05175bf4f772d8428e (patch)
tree6a8181c91f37f66ad198a2e7ed411bb8c741e357 /tools/perf/builtin-lock.c
parent1e6d53223884225f0c3f9f1a3ac54a224d97ab24 (diff)
perf lock: Remove use of die and handle errors
Allows perf to clean up properly on exit. Signed-off-by: David Ahern <dsahern@gmail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1346005487-62961-4-git-send-email-dsahern@gmail.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/builtin-lock.c')
-rw-r--r--tools/perf/builtin-lock.c181
1 files changed, 124 insertions, 57 deletions
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index 585aae2858b8..75153c87e650 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -161,8 +161,10 @@ static struct thread_stat *thread_stat_findnew_after_first(u32 tid)
161 return st; 161 return st;
162 162
163 st = zalloc(sizeof(struct thread_stat)); 163 st = zalloc(sizeof(struct thread_stat));
164 if (!st) 164 if (!st) {
165 die("memory allocation failed\n"); 165 pr_err("memory allocation failed\n");
166 return NULL;
167 }
166 168
167 st->tid = tid; 169 st->tid = tid;
168 INIT_LIST_HEAD(&st->seq_list); 170 INIT_LIST_HEAD(&st->seq_list);
@@ -181,8 +183,10 @@ static struct thread_stat *thread_stat_findnew_first(u32 tid)
181 struct thread_stat *st; 183 struct thread_stat *st;
182 184
183 st = zalloc(sizeof(struct thread_stat)); 185 st = zalloc(sizeof(struct thread_stat));
184 if (!st) 186 if (!st) {
185 die("memory allocation failed\n"); 187 pr_err("memory allocation failed\n");
188 return NULL;
189 }
186 st->tid = tid; 190 st->tid = tid;
187 INIT_LIST_HEAD(&st->seq_list); 191 INIT_LIST_HEAD(&st->seq_list);
188 192
@@ -248,18 +252,20 @@ struct lock_key keys[] = {
248 { NULL, NULL } 252 { NULL, NULL }
249}; 253};
250 254
251static void select_key(void) 255static int select_key(void)
252{ 256{
253 int i; 257 int i;
254 258
255 for (i = 0; keys[i].name; i++) { 259 for (i = 0; keys[i].name; i++) {
256 if (!strcmp(keys[i].name, sort_key)) { 260 if (!strcmp(keys[i].name, sort_key)) {
257 compare = keys[i].key; 261 compare = keys[i].key;
258 return; 262 return 0;
259 } 263 }
260 } 264 }
261 265
262 die("Unknown compare key:%s\n", sort_key); 266 pr_err("Unknown compare key: %s\n", sort_key);
267
268 return -1;
263} 269}
264 270
265static void insert_to_result(struct lock_stat *st, 271static void insert_to_result(struct lock_stat *st,
@@ -324,7 +330,8 @@ static struct lock_stat *lock_stat_findnew(void *addr, const char *name)
324 return new; 330 return new;
325 331
326alloc_failed: 332alloc_failed:
327 die("memory allocation failed\n"); 333 pr_err("memory allocation failed\n");
334 return NULL;
328} 335}
329 336
330static const char *input_name; 337static const char *input_name;
@@ -356,16 +363,16 @@ struct trace_release_event {
356}; 363};
357 364
358struct trace_lock_handler { 365struct trace_lock_handler {
359 void (*acquire_event)(struct trace_acquire_event *, 366 int (*acquire_event)(struct trace_acquire_event *,
360 const struct perf_sample *sample); 367 const struct perf_sample *sample);
361 368
362 void (*acquired_event)(struct trace_acquired_event *, 369 int (*acquired_event)(struct trace_acquired_event *,
363 const struct perf_sample *sample); 370 const struct perf_sample *sample);
364 371
365 void (*contended_event)(struct trace_contended_event *, 372 int (*contended_event)(struct trace_contended_event *,
366 const struct perf_sample *sample); 373 const struct perf_sample *sample);
367 374
368 void (*release_event)(struct trace_release_event *, 375 int (*release_event)(struct trace_release_event *,
369 const struct perf_sample *sample); 376 const struct perf_sample *sample);
370}; 377};
371 378
@@ -379,8 +386,10 @@ static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr)
379 } 386 }
380 387
381 seq = zalloc(sizeof(struct lock_seq_stat)); 388 seq = zalloc(sizeof(struct lock_seq_stat));
382 if (!seq) 389 if (!seq) {
383 die("Not enough memory\n"); 390 pr_err("memory allocation failed\n");
391 return NULL;
392 }
384 seq->state = SEQ_STATE_UNINITIALIZED; 393 seq->state = SEQ_STATE_UNINITIALIZED;
385 seq->addr = addr; 394 seq->addr = addr;
386 395
@@ -403,7 +412,7 @@ enum acquire_flags {
403 READ_LOCK = 2, 412 READ_LOCK = 2,
404}; 413};
405 414
406static void 415static int
407report_lock_acquire_event(struct trace_acquire_event *acquire_event, 416report_lock_acquire_event(struct trace_acquire_event *acquire_event,
408 const struct perf_sample *sample) 417 const struct perf_sample *sample)
409{ 418{
@@ -412,11 +421,18 @@ report_lock_acquire_event(struct trace_acquire_event *acquire_event,
412 struct lock_seq_stat *seq; 421 struct lock_seq_stat *seq;
413 422
414 ls = lock_stat_findnew(acquire_event->addr, acquire_event->name); 423 ls = lock_stat_findnew(acquire_event->addr, acquire_event->name);
424 if (!ls)
425 return -1;
415 if (ls->discard) 426 if (ls->discard)
416 return; 427 return 0;
417 428
418 ts = thread_stat_findnew(sample->tid); 429 ts = thread_stat_findnew(sample->tid);
430 if (!ts)
431 return -1;
432
419 seq = get_seq(ts, acquire_event->addr); 433 seq = get_seq(ts, acquire_event->addr);
434 if (!seq)
435 return -1;
420 436
421 switch (seq->state) { 437 switch (seq->state) {
422 case SEQ_STATE_UNINITIALIZED: 438 case SEQ_STATE_UNINITIALIZED:
@@ -461,10 +477,10 @@ broken:
461 ls->nr_acquire++; 477 ls->nr_acquire++;
462 seq->prev_event_time = sample->time; 478 seq->prev_event_time = sample->time;
463end: 479end:
464 return; 480 return 0;
465} 481}
466 482
467static void 483static int
468report_lock_acquired_event(struct trace_acquired_event *acquired_event, 484report_lock_acquired_event(struct trace_acquired_event *acquired_event,
469 const struct perf_sample *sample) 485 const struct perf_sample *sample)
470{ 486{
@@ -475,16 +491,23 @@ report_lock_acquired_event(struct trace_acquired_event *acquired_event,
475 u64 contended_term; 491 u64 contended_term;
476 492
477 ls = lock_stat_findnew(acquired_event->addr, acquired_event->name); 493 ls = lock_stat_findnew(acquired_event->addr, acquired_event->name);
494 if (!ls)
495 return -1;
478 if (ls->discard) 496 if (ls->discard)
479 return; 497 return 0;
480 498
481 ts = thread_stat_findnew(sample->tid); 499 ts = thread_stat_findnew(sample->tid);
500 if (!ts)
501 return -1;
502
482 seq = get_seq(ts, acquired_event->addr); 503 seq = get_seq(ts, acquired_event->addr);
504 if (!seq)
505 return -1;
483 506
484 switch (seq->state) { 507 switch (seq->state) {
485 case SEQ_STATE_UNINITIALIZED: 508 case SEQ_STATE_UNINITIALIZED:
486 /* orphan event, do nothing */ 509 /* orphan event, do nothing */
487 return; 510 return 0;
488 case SEQ_STATE_ACQUIRING: 511 case SEQ_STATE_ACQUIRING:
489 break; 512 break;
490 case SEQ_STATE_CONTENDED: 513 case SEQ_STATE_CONTENDED:
@@ -515,10 +538,10 @@ report_lock_acquired_event(struct trace_acquired_event *acquired_event,
515 ls->nr_acquired++; 538 ls->nr_acquired++;
516 seq->prev_event_time = timestamp; 539 seq->prev_event_time = timestamp;
517end: 540end:
518 return; 541 return 0;
519} 542}
520 543
521static void 544static int
522report_lock_contended_event(struct trace_contended_event *contended_event, 545report_lock_contended_event(struct trace_contended_event *contended_event,
523 const struct perf_sample *sample) 546 const struct perf_sample *sample)
524{ 547{
@@ -527,16 +550,23 @@ report_lock_contended_event(struct trace_contended_event *contended_event,
527 struct lock_seq_stat *seq; 550 struct lock_seq_stat *seq;
528 551
529 ls = lock_stat_findnew(contended_event->addr, contended_event->name); 552 ls = lock_stat_findnew(contended_event->addr, contended_event->name);
553 if (!ls)
554 return -1;
530 if (ls->discard) 555 if (ls->discard)
531 return; 556 return 0;
532 557
533 ts = thread_stat_findnew(sample->tid); 558 ts = thread_stat_findnew(sample->tid);
559 if (!ts)
560 return -1;
561
534 seq = get_seq(ts, contended_event->addr); 562 seq = get_seq(ts, contended_event->addr);
563 if (!seq)
564 return -1;
535 565
536 switch (seq->state) { 566 switch (seq->state) {
537 case SEQ_STATE_UNINITIALIZED: 567 case SEQ_STATE_UNINITIALIZED:
538 /* orphan event, do nothing */ 568 /* orphan event, do nothing */
539 return; 569 return 0;
540 case SEQ_STATE_ACQUIRING: 570 case SEQ_STATE_ACQUIRING:
541 break; 571 break;
542 case SEQ_STATE_RELEASED: 572 case SEQ_STATE_RELEASED:
@@ -559,10 +589,10 @@ report_lock_contended_event(struct trace_contended_event *contended_event,
559 ls->nr_contended++; 589 ls->nr_contended++;
560 seq->prev_event_time = sample->time; 590 seq->prev_event_time = sample->time;
561end: 591end:
562 return; 592 return 0;
563} 593}
564 594
565static void 595static int
566report_lock_release_event(struct trace_release_event *release_event, 596report_lock_release_event(struct trace_release_event *release_event,
567 const struct perf_sample *sample) 597 const struct perf_sample *sample)
568{ 598{
@@ -571,11 +601,18 @@ report_lock_release_event(struct trace_release_event *release_event,
571 struct lock_seq_stat *seq; 601 struct lock_seq_stat *seq;
572 602
573 ls = lock_stat_findnew(release_event->addr, release_event->name); 603 ls = lock_stat_findnew(release_event->addr, release_event->name);
604 if (!ls)
605 return -1;
574 if (ls->discard) 606 if (ls->discard)
575 return; 607 return 0;
576 608
577 ts = thread_stat_findnew(sample->tid); 609 ts = thread_stat_findnew(sample->tid);
610 if (!ts)
611 return -1;
612
578 seq = get_seq(ts, release_event->addr); 613 seq = get_seq(ts, release_event->addr);
614 if (!seq)
615 return -1;
579 616
580 switch (seq->state) { 617 switch (seq->state) {
581 case SEQ_STATE_UNINITIALIZED: 618 case SEQ_STATE_UNINITIALIZED:
@@ -609,7 +646,7 @@ free_seq:
609 list_del(&seq->list); 646 list_del(&seq->list);
610 free(seq); 647 free(seq);
611end: 648end:
612 return; 649 return 0;
613} 650}
614 651
615/* lock oriented handlers */ 652/* lock oriented handlers */
@@ -623,13 +660,14 @@ static struct trace_lock_handler report_lock_ops = {
623 660
624static struct trace_lock_handler *trace_handler; 661static struct trace_lock_handler *trace_handler;
625 662
626static void perf_evsel__process_lock_acquire(struct perf_evsel *evsel, 663static int perf_evsel__process_lock_acquire(struct perf_evsel *evsel,
627 struct perf_sample *sample) 664 struct perf_sample *sample)
628{ 665{
629 struct trace_acquire_event acquire_event; 666 struct trace_acquire_event acquire_event;
630 struct event_format *event = evsel->tp_format; 667 struct event_format *event = evsel->tp_format;
631 void *data = sample->raw_data; 668 void *data = sample->raw_data;
632 u64 tmp; /* this is required for casting... */ 669 u64 tmp; /* this is required for casting... */
670 int rc = 0;
633 671
634 tmp = raw_field_value(event, "lockdep_addr", data); 672 tmp = raw_field_value(event, "lockdep_addr", data);
635 memcpy(&acquire_event.addr, &tmp, sizeof(void *)); 673 memcpy(&acquire_event.addr, &tmp, sizeof(void *));
@@ -637,70 +675,84 @@ static void perf_evsel__process_lock_acquire(struct perf_evsel *evsel,
637 acquire_event.flag = (int)raw_field_value(event, "flag", data); 675 acquire_event.flag = (int)raw_field_value(event, "flag", data);
638 676
639 if (trace_handler->acquire_event) 677 if (trace_handler->acquire_event)
640 trace_handler->acquire_event(&acquire_event, sample); 678 rc = trace_handler->acquire_event(&acquire_event, sample);
679
680 return rc;
641} 681}
642 682
643static void perf_evsel__process_lock_acquired(struct perf_evsel *evsel, 683static int perf_evsel__process_lock_acquired(struct perf_evsel *evsel,
644 struct perf_sample *sample) 684 struct perf_sample *sample)
645{ 685{
646 struct trace_acquired_event acquired_event; 686 struct trace_acquired_event acquired_event;
647 struct event_format *event = evsel->tp_format; 687 struct event_format *event = evsel->tp_format;
648 void *data = sample->raw_data; 688 void *data = sample->raw_data;
649 u64 tmp; /* this is required for casting... */ 689 u64 tmp; /* this is required for casting... */
690 int rc = 0;
650 691
651 tmp = raw_field_value(event, "lockdep_addr", data); 692 tmp = raw_field_value(event, "lockdep_addr", data);
652 memcpy(&acquired_event.addr, &tmp, sizeof(void *)); 693 memcpy(&acquired_event.addr, &tmp, sizeof(void *));
653 acquired_event.name = (char *)raw_field_ptr(event, "name", data); 694 acquired_event.name = (char *)raw_field_ptr(event, "name", data);
654 695
655 if (trace_handler->acquire_event) 696 if (trace_handler->acquired_event)
656 trace_handler->acquired_event(&acquired_event, sample); 697 rc = trace_handler->acquired_event(&acquired_event, sample);
698
699 return rc;
657} 700}
658 701
659static void perf_evsel__process_lock_contended(struct perf_evsel *evsel, 702static int perf_evsel__process_lock_contended(struct perf_evsel *evsel,
660 struct perf_sample *sample) 703 struct perf_sample *sample)
661{ 704{
662 struct trace_contended_event contended_event; 705 struct trace_contended_event contended_event;
663 struct event_format *event = evsel->tp_format; 706 struct event_format *event = evsel->tp_format;
664 void *data = sample->raw_data; 707 void *data = sample->raw_data;
665 u64 tmp; /* this is required for casting... */ 708 u64 tmp; /* this is required for casting... */
709 int rc = 0;
666 710
667 tmp = raw_field_value(event, "lockdep_addr", data); 711 tmp = raw_field_value(event, "lockdep_addr", data);
668 memcpy(&contended_event.addr, &tmp, sizeof(void *)); 712 memcpy(&contended_event.addr, &tmp, sizeof(void *));
669 contended_event.name = (char *)raw_field_ptr(event, "name", data); 713 contended_event.name = (char *)raw_field_ptr(event, "name", data);
670 714
671 if (trace_handler->acquire_event) 715 if (trace_handler->contended_event)
672 trace_handler->contended_event(&contended_event, sample); 716 rc = trace_handler->contended_event(&contended_event, sample);
717
718 return rc;
673} 719}
674 720
675static void perf_evsel__process_lock_release(struct perf_evsel *evsel, 721static int perf_evsel__process_lock_release(struct perf_evsel *evsel,
676 struct perf_sample *sample) 722 struct perf_sample *sample)
677{ 723{
678 struct trace_release_event release_event; 724 struct trace_release_event release_event;
679 struct event_format *event = evsel->tp_format; 725 struct event_format *event = evsel->tp_format;
680 void *data = sample->raw_data; 726 void *data = sample->raw_data;
681 u64 tmp; /* this is required for casting... */ 727 u64 tmp; /* this is required for casting... */
728 int rc = 0;
682 729
683 tmp = raw_field_value(event, "lockdep_addr", data); 730 tmp = raw_field_value(event, "lockdep_addr", data);
684 memcpy(&release_event.addr, &tmp, sizeof(void *)); 731 memcpy(&release_event.addr, &tmp, sizeof(void *));
685 release_event.name = (char *)raw_field_ptr(event, "name", data); 732 release_event.name = (char *)raw_field_ptr(event, "name", data);
686 733
687 if (trace_handler->acquire_event) 734 if (trace_handler->release_event)
688 trace_handler->release_event(&release_event, sample); 735 rc = trace_handler->release_event(&release_event, sample);
736
737 return rc;
689} 738}
690 739
691static void perf_evsel__process_lock_event(struct perf_evsel *evsel, 740static int perf_evsel__process_lock_event(struct perf_evsel *evsel,
692 struct perf_sample *sample) 741 struct perf_sample *sample)
693{ 742{
694 struct event_format *event = evsel->tp_format; 743 struct event_format *event = evsel->tp_format;
744 int rc = 0;
695 745
696 if (!strcmp(event->name, "lock_acquire")) 746 if (!strcmp(event->name, "lock_acquire"))
697 perf_evsel__process_lock_acquire(evsel, sample); 747 rc = perf_evsel__process_lock_acquire(evsel, sample);
698 if (!strcmp(event->name, "lock_acquired")) 748 if (!strcmp(event->name, "lock_acquired"))
699 perf_evsel__process_lock_acquired(evsel, sample); 749 rc = perf_evsel__process_lock_acquired(evsel, sample);
700 if (!strcmp(event->name, "lock_contended")) 750 if (!strcmp(event->name, "lock_contended"))
701 perf_evsel__process_lock_contended(evsel, sample); 751 rc = perf_evsel__process_lock_contended(evsel, sample);
702 if (!strcmp(event->name, "lock_release")) 752 if (!strcmp(event->name, "lock_release"))
703 perf_evsel__process_lock_release(evsel, sample); 753 rc = perf_evsel__process_lock_release(evsel, sample);
754
755 return rc;
704} 756}
705 757
706static void print_bad_events(int bad, int total) 758static void print_bad_events(int bad, int total)
@@ -802,14 +854,20 @@ static void dump_map(void)
802 } 854 }
803} 855}
804 856
805static void dump_info(void) 857static int dump_info(void)
806{ 858{
859 int rc = 0;
860
807 if (info_threads) 861 if (info_threads)
808 dump_threads(); 862 dump_threads();
809 else if (info_map) 863 else if (info_map)
810 dump_map(); 864 dump_map();
811 else 865 else {
812 die("Unknown type of information\n"); 866 rc = -1;
867 pr_err("Unknown type of information\n");
868 }
869
870 return rc;
813} 871}
814 872
815static int process_sample_event(struct perf_tool *tool __used, 873static int process_sample_event(struct perf_tool *tool __used,
@@ -826,8 +884,7 @@ static int process_sample_event(struct perf_tool *tool __used,
826 return -1; 884 return -1;
827 } 885 }
828 886
829 perf_evsel__process_lock_event(evsel, sample); 887 return perf_evsel__process_lock_event(evsel, sample);
830 return 0;
831} 888}
832 889
833static struct perf_tool eops = { 890static struct perf_tool eops = {
@@ -839,8 +896,10 @@ static struct perf_tool eops = {
839static int read_events(void) 896static int read_events(void)
840{ 897{
841 session = perf_session__new(input_name, O_RDONLY, 0, false, &eops); 898 session = perf_session__new(input_name, O_RDONLY, 0, false, &eops);
842 if (!session) 899 if (!session) {
843 die("Initializing perf session failed\n"); 900 pr_err("Initializing perf session failed\n");
901 return -1;
902 }
844 903
845 return perf_session__process_events(session, &eops); 904 return perf_session__process_events(session, &eops);
846} 905}
@@ -857,13 +916,18 @@ static void sort_result(void)
857 } 916 }
858} 917}
859 918
860static void __cmd_report(void) 919static int __cmd_report(void)
861{ 920{
862 setup_pager(); 921 setup_pager();
863 select_key(); 922
864 read_events(); 923 if ((select_key() != 0) ||
924 (read_events() != 0))
925 return -1;
926
865 sort_result(); 927 sort_result();
866 print_result(); 928 print_result();
929
930 return 0;
867} 931}
868 932
869static const char * const report_usage[] = { 933static const char * const report_usage[] = {
@@ -959,6 +1023,7 @@ static int __cmd_record(int argc, const char **argv)
959int cmd_lock(int argc, const char **argv, const char *prefix __used) 1023int cmd_lock(int argc, const char **argv, const char *prefix __used)
960{ 1024{
961 unsigned int i; 1025 unsigned int i;
1026 int rc = 0;
962 1027
963 symbol__init(); 1028 symbol__init();
964 for (i = 0; i < LOCKHASH_SIZE; i++) 1029 for (i = 0; i < LOCKHASH_SIZE; i++)
@@ -993,11 +1058,13 @@ int cmd_lock(int argc, const char **argv, const char *prefix __used)
993 /* recycling report_lock_ops */ 1058 /* recycling report_lock_ops */
994 trace_handler = &report_lock_ops; 1059 trace_handler = &report_lock_ops;
995 setup_pager(); 1060 setup_pager();
996 read_events(); 1061 if (read_events() != 0)
997 dump_info(); 1062 rc = -1;
1063 else
1064 rc = dump_info();
998 } else { 1065 } else {
999 usage_with_options(lock_usage, lock_options); 1066 usage_with_options(lock_usage, lock_options);
1000 } 1067 }
1001 1068
1002 return 0; 1069 return rc;
1003} 1070}