diff options
author | Arnaldo Carvalho de Melo <acme@redhat.com> | 2012-09-24 09:52:12 -0400 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2012-09-24 09:52:12 -0400 |
commit | 746f16ec6ae370d58ecf4370c9955bd6f35d44a3 (patch) | |
tree | d733d4de2283f5f5dc621a784f8b107e8845f5f4 /tools/perf/builtin-lock.c | |
parent | 0f7d2f1b65e3415854bbe842705f698a3350d7da (diff) |
perf lock: Use perf_evsel__intval and perf_session__set_tracepoints_handlers
Following the model of 'perf sched':
. raw_field_value searches first on the common fields, that are unused
in this tool
. Leave using perf_evsel__intval to the actual handlers, some may not
need to incur some of the cost because they may not need all the
fields values.
. Using perf_session__set_tracepoints_handlers will save all those
strcmp to find the right handler at sample processing time, do it just
once and get the handler from evsel->handler.func.
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Namhyung Kim <namhyung@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-v9x3q9rv4caxtox7wtjpchq5@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/builtin-lock.c')
-rw-r--r-- | tools/perf/builtin-lock.c | 233 |
1 files changed, 87 insertions, 146 deletions
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c index a8035207a3dd..7d6e09949880 100644 --- a/tools/perf/builtin-lock.c +++ b/tools/perf/builtin-lock.c | |||
@@ -1,6 +1,7 @@ | |||
1 | #include "builtin.h" | 1 | #include "builtin.h" |
2 | #include "perf.h" | 2 | #include "perf.h" |
3 | 3 | ||
4 | #include "util/evlist.h" | ||
4 | #include "util/evsel.h" | 5 | #include "util/evsel.h" |
5 | #include "util/util.h" | 6 | #include "util/util.h" |
6 | #include "util/cache.h" | 7 | #include "util/cache.h" |
@@ -41,7 +42,7 @@ struct lock_stat { | |||
41 | struct rb_node rb; /* used for sorting */ | 42 | struct rb_node rb; /* used for sorting */ |
42 | 43 | ||
43 | /* | 44 | /* |
44 | * FIXME: raw_field_value() returns unsigned long long, | 45 | * FIXME: perf_evsel__intval() returns u64, |
45 | * so address of lockdep_map should be dealed as 64bit. | 46 | * so address of lockdep_map should be dealed as 64bit. |
46 | * Is there more better solution? | 47 | * Is there more better solution? |
47 | */ | 48 | */ |
@@ -336,44 +337,18 @@ alloc_failed: | |||
336 | 337 | ||
337 | static const char *input_name; | 338 | static const char *input_name; |
338 | 339 | ||
339 | struct raw_event_sample { | ||
340 | u32 size; | ||
341 | char data[0]; | ||
342 | }; | ||
343 | |||
344 | struct trace_acquire_event { | ||
345 | void *addr; | ||
346 | const char *name; | ||
347 | int flag; | ||
348 | }; | ||
349 | |||
350 | struct trace_acquired_event { | ||
351 | void *addr; | ||
352 | const char *name; | ||
353 | }; | ||
354 | |||
355 | struct trace_contended_event { | ||
356 | void *addr; | ||
357 | const char *name; | ||
358 | }; | ||
359 | |||
360 | struct trace_release_event { | ||
361 | void *addr; | ||
362 | const char *name; | ||
363 | }; | ||
364 | |||
365 | struct trace_lock_handler { | 340 | struct trace_lock_handler { |
366 | int (*acquire_event)(struct trace_acquire_event *, | 341 | int (*acquire_event)(struct perf_evsel *evsel, |
367 | const struct perf_sample *sample); | 342 | struct perf_sample *sample); |
368 | 343 | ||
369 | int (*acquired_event)(struct trace_acquired_event *, | 344 | int (*acquired_event)(struct perf_evsel *evsel, |
370 | const struct perf_sample *sample); | 345 | struct perf_sample *sample); |
371 | 346 | ||
372 | int (*contended_event)(struct trace_contended_event *, | 347 | int (*contended_event)(struct perf_evsel *evsel, |
373 | const struct perf_sample *sample); | 348 | struct perf_sample *sample); |
374 | 349 | ||
375 | int (*release_event)(struct trace_release_event *, | 350 | int (*release_event)(struct perf_evsel *evsel, |
376 | const struct perf_sample *sample); | 351 | struct perf_sample *sample); |
377 | }; | 352 | }; |
378 | 353 | ||
379 | static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr) | 354 | static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr) |
@@ -412,15 +387,20 @@ enum acquire_flags { | |||
412 | READ_LOCK = 2, | 387 | READ_LOCK = 2, |
413 | }; | 388 | }; |
414 | 389 | ||
415 | static int | 390 | static int report_lock_acquire_event(struct perf_evsel *evsel, |
416 | report_lock_acquire_event(struct trace_acquire_event *acquire_event, | 391 | struct perf_sample *sample) |
417 | const struct perf_sample *sample) | ||
418 | { | 392 | { |
393 | void *addr; | ||
419 | struct lock_stat *ls; | 394 | struct lock_stat *ls; |
420 | struct thread_stat *ts; | 395 | struct thread_stat *ts; |
421 | struct lock_seq_stat *seq; | 396 | struct lock_seq_stat *seq; |
397 | const char *name = perf_evsel__strval(evsel, sample, "name"); | ||
398 | u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr"); | ||
399 | int flag = perf_evsel__intval(evsel, sample, "flag"); | ||
422 | 400 | ||
423 | ls = lock_stat_findnew(acquire_event->addr, acquire_event->name); | 401 | memcpy(&addr, &tmp, sizeof(void *)); |
402 | |||
403 | ls = lock_stat_findnew(addr, name); | ||
424 | if (!ls) | 404 | if (!ls) |
425 | return -1; | 405 | return -1; |
426 | if (ls->discard) | 406 | if (ls->discard) |
@@ -430,19 +410,19 @@ report_lock_acquire_event(struct trace_acquire_event *acquire_event, | |||
430 | if (!ts) | 410 | if (!ts) |
431 | return -1; | 411 | return -1; |
432 | 412 | ||
433 | seq = get_seq(ts, acquire_event->addr); | 413 | seq = get_seq(ts, addr); |
434 | if (!seq) | 414 | if (!seq) |
435 | return -1; | 415 | return -1; |
436 | 416 | ||
437 | switch (seq->state) { | 417 | switch (seq->state) { |
438 | case SEQ_STATE_UNINITIALIZED: | 418 | case SEQ_STATE_UNINITIALIZED: |
439 | case SEQ_STATE_RELEASED: | 419 | case SEQ_STATE_RELEASED: |
440 | if (!acquire_event->flag) { | 420 | if (!flag) { |
441 | seq->state = SEQ_STATE_ACQUIRING; | 421 | seq->state = SEQ_STATE_ACQUIRING; |
442 | } else { | 422 | } else { |
443 | if (acquire_event->flag & TRY_LOCK) | 423 | if (flag & TRY_LOCK) |
444 | ls->nr_trylock++; | 424 | ls->nr_trylock++; |
445 | if (acquire_event->flag & READ_LOCK) | 425 | if (flag & READ_LOCK) |
446 | ls->nr_readlock++; | 426 | ls->nr_readlock++; |
447 | seq->state = SEQ_STATE_READ_ACQUIRED; | 427 | seq->state = SEQ_STATE_READ_ACQUIRED; |
448 | seq->read_count = 1; | 428 | seq->read_count = 1; |
@@ -450,7 +430,7 @@ report_lock_acquire_event(struct trace_acquire_event *acquire_event, | |||
450 | } | 430 | } |
451 | break; | 431 | break; |
452 | case SEQ_STATE_READ_ACQUIRED: | 432 | case SEQ_STATE_READ_ACQUIRED: |
453 | if (acquire_event->flag & READ_LOCK) { | 433 | if (flag & READ_LOCK) { |
454 | seq->read_count++; | 434 | seq->read_count++; |
455 | ls->nr_acquired++; | 435 | ls->nr_acquired++; |
456 | goto end; | 436 | goto end; |
@@ -480,17 +460,20 @@ end: | |||
480 | return 0; | 460 | return 0; |
481 | } | 461 | } |
482 | 462 | ||
483 | static int | 463 | static int report_lock_acquired_event(struct perf_evsel *evsel, |
484 | report_lock_acquired_event(struct trace_acquired_event *acquired_event, | 464 | struct perf_sample *sample) |
485 | const struct perf_sample *sample) | ||
486 | { | 465 | { |
487 | u64 timestamp = sample->time; | 466 | void *addr; |
488 | struct lock_stat *ls; | 467 | struct lock_stat *ls; |
489 | struct thread_stat *ts; | 468 | struct thread_stat *ts; |
490 | struct lock_seq_stat *seq; | 469 | struct lock_seq_stat *seq; |
491 | u64 contended_term; | 470 | u64 contended_term; |
471 | const char *name = perf_evsel__strval(evsel, sample, "name"); | ||
472 | u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr"); | ||
473 | |||
474 | memcpy(&addr, &tmp, sizeof(void *)); | ||
492 | 475 | ||
493 | ls = lock_stat_findnew(acquired_event->addr, acquired_event->name); | 476 | ls = lock_stat_findnew(addr, name); |
494 | if (!ls) | 477 | if (!ls) |
495 | return -1; | 478 | return -1; |
496 | if (ls->discard) | 479 | if (ls->discard) |
@@ -500,7 +483,7 @@ report_lock_acquired_event(struct trace_acquired_event *acquired_event, | |||
500 | if (!ts) | 483 | if (!ts) |
501 | return -1; | 484 | return -1; |
502 | 485 | ||
503 | seq = get_seq(ts, acquired_event->addr); | 486 | seq = get_seq(ts, addr); |
504 | if (!seq) | 487 | if (!seq) |
505 | return -1; | 488 | return -1; |
506 | 489 | ||
@@ -511,7 +494,7 @@ report_lock_acquired_event(struct trace_acquired_event *acquired_event, | |||
511 | case SEQ_STATE_ACQUIRING: | 494 | case SEQ_STATE_ACQUIRING: |
512 | break; | 495 | break; |
513 | case SEQ_STATE_CONTENDED: | 496 | case SEQ_STATE_CONTENDED: |
514 | contended_term = timestamp - seq->prev_event_time; | 497 | contended_term = sample->time - seq->prev_event_time; |
515 | ls->wait_time_total += contended_term; | 498 | ls->wait_time_total += contended_term; |
516 | if (contended_term < ls->wait_time_min) | 499 | if (contended_term < ls->wait_time_min) |
517 | ls->wait_time_min = contended_term; | 500 | ls->wait_time_min = contended_term; |
@@ -536,20 +519,24 @@ report_lock_acquired_event(struct trace_acquired_event *acquired_event, | |||
536 | 519 | ||
537 | seq->state = SEQ_STATE_ACQUIRED; | 520 | seq->state = SEQ_STATE_ACQUIRED; |
538 | ls->nr_acquired++; | 521 | ls->nr_acquired++; |
539 | seq->prev_event_time = timestamp; | 522 | seq->prev_event_time = sample->time; |
540 | end: | 523 | end: |
541 | return 0; | 524 | return 0; |
542 | } | 525 | } |
543 | 526 | ||
544 | static int | 527 | static int report_lock_contended_event(struct perf_evsel *evsel, |
545 | report_lock_contended_event(struct trace_contended_event *contended_event, | 528 | struct perf_sample *sample) |
546 | const struct perf_sample *sample) | ||
547 | { | 529 | { |
530 | void *addr; | ||
548 | struct lock_stat *ls; | 531 | struct lock_stat *ls; |
549 | struct thread_stat *ts; | 532 | struct thread_stat *ts; |
550 | struct lock_seq_stat *seq; | 533 | struct lock_seq_stat *seq; |
534 | const char *name = perf_evsel__strval(evsel, sample, "name"); | ||
535 | u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr"); | ||
536 | |||
537 | memcpy(&addr, &tmp, sizeof(void *)); | ||
551 | 538 | ||
552 | ls = lock_stat_findnew(contended_event->addr, contended_event->name); | 539 | ls = lock_stat_findnew(addr, name); |
553 | if (!ls) | 540 | if (!ls) |
554 | return -1; | 541 | return -1; |
555 | if (ls->discard) | 542 | if (ls->discard) |
@@ -559,7 +546,7 @@ report_lock_contended_event(struct trace_contended_event *contended_event, | |||
559 | if (!ts) | 546 | if (!ts) |
560 | return -1; | 547 | return -1; |
561 | 548 | ||
562 | seq = get_seq(ts, contended_event->addr); | 549 | seq = get_seq(ts, addr); |
563 | if (!seq) | 550 | if (!seq) |
564 | return -1; | 551 | return -1; |
565 | 552 | ||
@@ -592,15 +579,19 @@ end: | |||
592 | return 0; | 579 | return 0; |
593 | } | 580 | } |
594 | 581 | ||
595 | static int | 582 | static int report_lock_release_event(struct perf_evsel *evsel, |
596 | report_lock_release_event(struct trace_release_event *release_event, | 583 | struct perf_sample *sample) |
597 | const struct perf_sample *sample) | ||
598 | { | 584 | { |
585 | void *addr; | ||
599 | struct lock_stat *ls; | 586 | struct lock_stat *ls; |
600 | struct thread_stat *ts; | 587 | struct thread_stat *ts; |
601 | struct lock_seq_stat *seq; | 588 | struct lock_seq_stat *seq; |
589 | const char *name = perf_evsel__strval(evsel, sample, "name"); | ||
590 | u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr"); | ||
602 | 591 | ||
603 | ls = lock_stat_findnew(release_event->addr, release_event->name); | 592 | memcpy(&addr, &tmp, sizeof(void *)); |
593 | |||
594 | ls = lock_stat_findnew(addr, name); | ||
604 | if (!ls) | 595 | if (!ls) |
605 | return -1; | 596 | return -1; |
606 | if (ls->discard) | 597 | if (ls->discard) |
@@ -610,7 +601,7 @@ report_lock_release_event(struct trace_release_event *release_event, | |||
610 | if (!ts) | 601 | if (!ts) |
611 | return -1; | 602 | return -1; |
612 | 603 | ||
613 | seq = get_seq(ts, release_event->addr); | 604 | seq = get_seq(ts, addr); |
614 | if (!seq) | 605 | if (!seq) |
615 | return -1; | 606 | return -1; |
616 | 607 | ||
@@ -663,96 +654,33 @@ static struct trace_lock_handler *trace_handler; | |||
663 | static int perf_evsel__process_lock_acquire(struct perf_evsel *evsel, | 654 | static int perf_evsel__process_lock_acquire(struct perf_evsel *evsel, |
664 | struct perf_sample *sample) | 655 | struct perf_sample *sample) |
665 | { | 656 | { |
666 | struct trace_acquire_event acquire_event; | ||
667 | struct event_format *event = evsel->tp_format; | ||
668 | void *data = sample->raw_data; | ||
669 | u64 tmp; /* this is required for casting... */ | ||
670 | int rc = 0; | ||
671 | |||
672 | tmp = raw_field_value(event, "lockdep_addr", data); | ||
673 | memcpy(&acquire_event.addr, &tmp, sizeof(void *)); | ||
674 | acquire_event.name = (char *)raw_field_ptr(event, "name", data); | ||
675 | acquire_event.flag = (int)raw_field_value(event, "flag", data); | ||
676 | |||
677 | if (trace_handler->acquire_event) | 657 | if (trace_handler->acquire_event) |
678 | rc = trace_handler->acquire_event(&acquire_event, sample); | 658 | return trace_handler->acquire_event(evsel, sample); |
679 | 659 | return 0; | |
680 | return rc; | ||
681 | } | 660 | } |
682 | 661 | ||
683 | static int perf_evsel__process_lock_acquired(struct perf_evsel *evsel, | 662 | static int perf_evsel__process_lock_acquired(struct perf_evsel *evsel, |
684 | struct perf_sample *sample) | 663 | struct perf_sample *sample) |
685 | { | 664 | { |
686 | struct trace_acquired_event acquired_event; | ||
687 | struct event_format *event = evsel->tp_format; | ||
688 | void *data = sample->raw_data; | ||
689 | u64 tmp; /* this is required for casting... */ | ||
690 | int rc = 0; | ||
691 | |||
692 | tmp = raw_field_value(event, "lockdep_addr", data); | ||
693 | memcpy(&acquired_event.addr, &tmp, sizeof(void *)); | ||
694 | acquired_event.name = (char *)raw_field_ptr(event, "name", data); | ||
695 | |||
696 | if (trace_handler->acquired_event) | 665 | if (trace_handler->acquired_event) |
697 | rc = trace_handler->acquired_event(&acquired_event, sample); | 666 | return trace_handler->acquired_event(evsel, sample); |
698 | 667 | return 0; | |
699 | return rc; | ||
700 | } | 668 | } |
701 | 669 | ||
702 | static int perf_evsel__process_lock_contended(struct perf_evsel *evsel, | 670 | static int perf_evsel__process_lock_contended(struct perf_evsel *evsel, |
703 | struct perf_sample *sample) | 671 | struct perf_sample *sample) |
704 | { | 672 | { |
705 | struct trace_contended_event contended_event; | ||
706 | struct event_format *event = evsel->tp_format; | ||
707 | void *data = sample->raw_data; | ||
708 | u64 tmp; /* this is required for casting... */ | ||
709 | int rc = 0; | ||
710 | |||
711 | tmp = raw_field_value(event, "lockdep_addr", data); | ||
712 | memcpy(&contended_event.addr, &tmp, sizeof(void *)); | ||
713 | contended_event.name = (char *)raw_field_ptr(event, "name", data); | ||
714 | |||
715 | if (trace_handler->contended_event) | 673 | if (trace_handler->contended_event) |
716 | rc = trace_handler->contended_event(&contended_event, sample); | 674 | return trace_handler->contended_event(evsel, sample); |
717 | 675 | return 0; | |
718 | return rc; | ||
719 | } | 676 | } |
720 | 677 | ||
721 | static int perf_evsel__process_lock_release(struct perf_evsel *evsel, | 678 | static int perf_evsel__process_lock_release(struct perf_evsel *evsel, |
722 | struct perf_sample *sample) | 679 | struct perf_sample *sample) |
723 | { | 680 | { |
724 | struct trace_release_event release_event; | ||
725 | struct event_format *event = evsel->tp_format; | ||
726 | void *data = sample->raw_data; | ||
727 | u64 tmp; /* this is required for casting... */ | ||
728 | int rc = 0; | ||
729 | |||
730 | tmp = raw_field_value(event, "lockdep_addr", data); | ||
731 | memcpy(&release_event.addr, &tmp, sizeof(void *)); | ||
732 | release_event.name = (char *)raw_field_ptr(event, "name", data); | ||
733 | |||
734 | if (trace_handler->release_event) | 681 | if (trace_handler->release_event) |
735 | rc = trace_handler->release_event(&release_event, sample); | 682 | return trace_handler->release_event(evsel, sample); |
736 | 683 | return 0; | |
737 | return rc; | ||
738 | } | ||
739 | |||
740 | static int perf_evsel__process_lock_event(struct perf_evsel *evsel, | ||
741 | struct perf_sample *sample) | ||
742 | { | ||
743 | struct event_format *event = evsel->tp_format; | ||
744 | int rc = 0; | ||
745 | |||
746 | if (!strcmp(event->name, "lock_acquire")) | ||
747 | rc = perf_evsel__process_lock_acquire(evsel, sample); | ||
748 | if (!strcmp(event->name, "lock_acquired")) | ||
749 | rc = perf_evsel__process_lock_acquired(evsel, sample); | ||
750 | if (!strcmp(event->name, "lock_contended")) | ||
751 | rc = perf_evsel__process_lock_contended(evsel, sample); | ||
752 | if (!strcmp(event->name, "lock_release")) | ||
753 | rc = perf_evsel__process_lock_release(evsel, sample); | ||
754 | |||
755 | return rc; | ||
756 | } | 684 | } |
757 | 685 | ||
758 | static void print_bad_events(int bad, int total) | 686 | static void print_bad_events(int bad, int total) |
@@ -870,6 +798,9 @@ static int dump_info(void) | |||
870 | return rc; | 798 | return rc; |
871 | } | 799 | } |
872 | 800 | ||
801 | typedef int (*tracepoint_handler)(struct perf_evsel *evsel, | ||
802 | struct perf_sample *sample); | ||
803 | |||
873 | static int process_sample_event(struct perf_tool *tool __maybe_unused, | 804 | static int process_sample_event(struct perf_tool *tool __maybe_unused, |
874 | union perf_event *event, | 805 | union perf_event *event, |
875 | struct perf_sample *sample, | 806 | struct perf_sample *sample, |
@@ -884,7 +815,12 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused, | |||
884 | return -1; | 815 | return -1; |
885 | } | 816 | } |
886 | 817 | ||
887 | return perf_evsel__process_lock_event(evsel, sample); | 818 | if (evsel->handler.func != NULL) { |
819 | tracepoint_handler f = evsel->handler.func; | ||
820 | return f(evsel, sample); | ||
821 | } | ||
822 | |||
823 | return 0; | ||
888 | } | 824 | } |
889 | 825 | ||
890 | static struct perf_tool eops = { | 826 | static struct perf_tool eops = { |
@@ -893,6 +829,13 @@ static struct perf_tool eops = { | |||
893 | .ordered_samples = true, | 829 | .ordered_samples = true, |
894 | }; | 830 | }; |
895 | 831 | ||
832 | static const struct perf_evsel_str_handler lock_tracepoints[] = { | ||
833 | { "lock:lock_acquire", perf_evsel__process_lock_acquire, }, /* CONFIG_LOCKDEP */ | ||
834 | { "lock:lock_acquired", perf_evsel__process_lock_acquired, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */ | ||
835 | { "lock:lock_contended", perf_evsel__process_lock_contended, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */ | ||
836 | { "lock:lock_release", perf_evsel__process_lock_release, }, /* CONFIG_LOCKDEP */ | ||
837 | }; | ||
838 | |||
896 | static int read_events(void) | 839 | static int read_events(void) |
897 | { | 840 | { |
898 | session = perf_session__new(input_name, O_RDONLY, 0, false, &eops); | 841 | session = perf_session__new(input_name, O_RDONLY, 0, false, &eops); |
@@ -901,6 +844,11 @@ static int read_events(void) | |||
901 | return -1; | 844 | return -1; |
902 | } | 845 | } |
903 | 846 | ||
847 | if (perf_session__set_tracepoints_handlers(session, lock_tracepoints)) { | ||
848 | pr_err("Initializing perf session tracepoint handlers failed\n"); | ||
849 | return -1; | ||
850 | } | ||
851 | |||
904 | return perf_session__process_events(session, &eops); | 852 | return perf_session__process_events(session, &eops); |
905 | } | 853 | } |
906 | 854 | ||
@@ -967,13 +915,6 @@ static const struct option lock_options[] = { | |||
967 | OPT_END() | 915 | OPT_END() |
968 | }; | 916 | }; |
969 | 917 | ||
970 | static const char * const lock_tracepoints[] = { | ||
971 | "lock:lock_acquire", /* CONFIG_LOCKDEP */ | ||
972 | "lock:lock_acquired", /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */ | ||
973 | "lock:lock_contended", /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */ | ||
974 | "lock:lock_release", /* CONFIG_LOCKDEP */ | ||
975 | }; | ||
976 | |||
977 | static const char *record_args[] = { | 918 | static const char *record_args[] = { |
978 | "record", | 919 | "record", |
979 | "-R", | 920 | "-R", |
@@ -988,10 +929,10 @@ static int __cmd_record(int argc, const char **argv) | |||
988 | const char **rec_argv; | 929 | const char **rec_argv; |
989 | 930 | ||
990 | for (i = 0; i < ARRAY_SIZE(lock_tracepoints); i++) { | 931 | for (i = 0; i < ARRAY_SIZE(lock_tracepoints); i++) { |
991 | if (!is_valid_tracepoint(lock_tracepoints[i])) { | 932 | if (!is_valid_tracepoint(lock_tracepoints[i].name)) { |
992 | pr_err("tracepoint %s is not enabled. " | 933 | pr_err("tracepoint %s is not enabled. " |
993 | "Are CONFIG_LOCKDEP and CONFIG_LOCK_STAT enabled?\n", | 934 | "Are CONFIG_LOCKDEP and CONFIG_LOCK_STAT enabled?\n", |
994 | lock_tracepoints[i]); | 935 | lock_tracepoints[i].name); |
995 | return 1; | 936 | return 1; |
996 | } | 937 | } |
997 | } | 938 | } |
@@ -1009,7 +950,7 @@ static int __cmd_record(int argc, const char **argv) | |||
1009 | 950 | ||
1010 | for (j = 0; j < ARRAY_SIZE(lock_tracepoints); j++) { | 951 | for (j = 0; j < ARRAY_SIZE(lock_tracepoints); j++) { |
1011 | rec_argv[i++] = "-e"; | 952 | rec_argv[i++] = "-e"; |
1012 | rec_argv[i++] = strdup(lock_tracepoints[j]); | 953 | rec_argv[i++] = strdup(lock_tracepoints[j].name); |
1013 | } | 954 | } |
1014 | 955 | ||
1015 | for (j = 1; j < (unsigned int)argc; j++, i++) | 956 | for (j = 1; j < (unsigned int)argc; j++, i++) |