aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2012-12-10 12:53:43 -0500
committerArnaldo Carvalho de Melo <acme@redhat.com>2012-12-10 12:53:43 -0500
commit7be5ebe8767eaa482e18f566de5f56c1519abf59 (patch)
tree353f1845712fbe56ac6990dbc8fb83e07dc6e06a
parent3f067dcab711c2df7eefcfc5b3aa9a0e2b5f7d42 (diff)
perf evsel: Update sample_size when setting sample_type bits
We use evsel->sample_size to detect underflows in perf_evsel__parse_sample, but we were failing to update it after perf_evsel__init(), i.e. when we decide, after creating an evsel, that we want some extra field bit set. Fix it by introducing methods to set a bit that will take care of correctly adjusting evsel->sample_size. Cc: David Ahern <dsahern@gmail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Namhyung Kim <namhyung@gmail.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/n/tip-2ny5pzsing0dcth7hws48x9c@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-rw-r--r--tools/perf/builtin-record.c2
-rw-r--r--tools/perf/builtin-top.c11
-rw-r--r--tools/perf/tests/perf-record.c6
-rw-r--r--tools/perf/util/evlist.c2
-rw-r--r--tools/perf/util/evsel.c45
-rw-r--r--tools/perf/util/evsel.h11
6 files changed, 54 insertions, 23 deletions
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index f3151d3c70ce..0be6605db9ea 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -286,7 +286,7 @@ try_again:
286 */ 286 */
287 opts->sample_id_all_missing = true; 287 opts->sample_id_all_missing = true;
288 if (!opts->sample_time && !opts->raw_samples && !time_needed) 288 if (!opts->sample_time && !opts->raw_samples && !time_needed)
289 attr->sample_type &= ~PERF_SAMPLE_TIME; 289 perf_evsel__reset_sample_bit(pos, TIME);
290 290
291 goto retry_sample_id; 291 goto retry_sample_id;
292 } 292 }
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 987e1b8a9c2e..31a7c51aac76 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -901,24 +901,25 @@ static void perf_top__start_counters(struct perf_top *top)
901 list_for_each_entry(counter, &evlist->entries, node) { 901 list_for_each_entry(counter, &evlist->entries, node) {
902 struct perf_event_attr *attr = &counter->attr; 902 struct perf_event_attr *attr = &counter->attr;
903 903
904 attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; 904 perf_evsel__set_sample_bit(counter, IP);
905 perf_evsel__set_sample_bit(counter, TID);
905 906
906 if (top->freq) { 907 if (top->freq) {
907 attr->sample_type |= PERF_SAMPLE_PERIOD; 908 perf_evsel__set_sample_bit(counter, PERIOD);
908 attr->freq = 1; 909 attr->freq = 1;
909 attr->sample_freq = top->freq; 910 attr->sample_freq = top->freq;
910 } 911 }
911 912
912 if (evlist->nr_entries > 1) { 913 if (evlist->nr_entries > 1) {
913 attr->sample_type |= PERF_SAMPLE_ID; 914 perf_evsel__set_sample_bit(counter, ID);
914 attr->read_format |= PERF_FORMAT_ID; 915 attr->read_format |= PERF_FORMAT_ID;
915 } 916 }
916 917
917 if (perf_target__has_cpu(&top->target)) 918 if (perf_target__has_cpu(&top->target))
918 attr->sample_type |= PERF_SAMPLE_CPU; 919 perf_evsel__set_sample_bit(counter, CPU);
919 920
920 if (symbol_conf.use_callchain) 921 if (symbol_conf.use_callchain)
921 attr->sample_type |= PERF_SAMPLE_CALLCHAIN; 922 perf_evsel__set_sample_bit(counter, CALLCHAIN);
922 923
923 attr->mmap = 1; 924 attr->mmap = 1;
924 attr->comm = 1; 925 attr->comm = 1;
diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c
index 70e0d4421df8..5902772492b6 100644
--- a/tools/perf/tests/perf-record.c
+++ b/tools/perf/tests/perf-record.c
@@ -103,9 +103,9 @@ int test__PERF_RECORD(void)
103 * Config the evsels, setting attr->comm on the first one, etc. 103 * Config the evsels, setting attr->comm on the first one, etc.
104 */ 104 */
105 evsel = perf_evlist__first(evlist); 105 evsel = perf_evlist__first(evlist);
106 evsel->attr.sample_type |= PERF_SAMPLE_CPU; 106 perf_evsel__set_sample_bit(evsel, CPU);
107 evsel->attr.sample_type |= PERF_SAMPLE_TID; 107 perf_evsel__set_sample_bit(evsel, TID);
108 evsel->attr.sample_type |= PERF_SAMPLE_TIME; 108 perf_evsel__set_sample_bit(evsel, TIME);
109 perf_evlist__config_attrs(evlist, &opts); 109 perf_evlist__config_attrs(evlist, &opts);
110 110
111 err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask); 111 err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index d0e1e821fe85..265565942ae5 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -61,7 +61,7 @@ void perf_evlist__config_attrs(struct perf_evlist *evlist,
61 perf_evsel__config(evsel, opts); 61 perf_evsel__config(evsel, opts);
62 62
63 if (evlist->nr_entries > 1) 63 if (evlist->nr_entries > 1)
64 evsel->attr.sample_type |= PERF_SAMPLE_ID; 64 perf_evsel__set_sample_bit(evsel, ID);
65 } 65 }
66} 66}
67 67
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index bb58b05f905f..fc80f5a32fa6 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -50,6 +50,24 @@ void hists__init(struct hists *hists)
50 pthread_mutex_init(&hists->lock, NULL); 50 pthread_mutex_init(&hists->lock, NULL);
51} 51}
52 52
53void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
54 enum perf_event_sample_format bit)
55{
56 if (!(evsel->attr.sample_type & bit)) {
57 evsel->attr.sample_type |= bit;
58 evsel->sample_size += sizeof(u64);
59 }
60}
61
62void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
63 enum perf_event_sample_format bit)
64{
65 if (evsel->attr.sample_type & bit) {
66 evsel->attr.sample_type &= ~bit;
67 evsel->sample_size -= sizeof(u64);
68 }
69}
70
53void perf_evsel__init(struct perf_evsel *evsel, 71void perf_evsel__init(struct perf_evsel *evsel,
54 struct perf_event_attr *attr, int idx) 72 struct perf_event_attr *attr, int idx)
55{ 73{
@@ -445,7 +463,8 @@ void perf_evsel__config(struct perf_evsel *evsel,
445 PERF_FORMAT_TOTAL_TIME_RUNNING | 463 PERF_FORMAT_TOTAL_TIME_RUNNING |
446 PERF_FORMAT_ID; 464 PERF_FORMAT_ID;
447 465
448 attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID; 466 perf_evsel__set_sample_bit(evsel, IP);
467 perf_evsel__set_sample_bit(evsel, TID);
449 468
450 /* 469 /*
451 * We default some events to a 1 default interval. But keep 470 * We default some events to a 1 default interval. But keep
@@ -454,7 +473,7 @@ void perf_evsel__config(struct perf_evsel *evsel,
454 if (!attr->sample_period || (opts->user_freq != UINT_MAX && 473 if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
455 opts->user_interval != ULLONG_MAX)) { 474 opts->user_interval != ULLONG_MAX)) {
456 if (opts->freq) { 475 if (opts->freq) {
457 attr->sample_type |= PERF_SAMPLE_PERIOD; 476 perf_evsel__set_sample_bit(evsel, PERIOD);
458 attr->freq = 1; 477 attr->freq = 1;
459 attr->sample_freq = opts->freq; 478 attr->sample_freq = opts->freq;
460 } else { 479 } else {
@@ -469,16 +488,16 @@ void perf_evsel__config(struct perf_evsel *evsel,
469 attr->inherit_stat = 1; 488 attr->inherit_stat = 1;
470 489
471 if (opts->sample_address) { 490 if (opts->sample_address) {
472 attr->sample_type |= PERF_SAMPLE_ADDR; 491 perf_evsel__set_sample_bit(evsel, ADDR);
473 attr->mmap_data = track; 492 attr->mmap_data = track;
474 } 493 }
475 494
476 if (opts->call_graph) { 495 if (opts->call_graph) {
477 attr->sample_type |= PERF_SAMPLE_CALLCHAIN; 496 perf_evsel__set_sample_bit(evsel, CALLCHAIN);
478 497
479 if (opts->call_graph == CALLCHAIN_DWARF) { 498 if (opts->call_graph == CALLCHAIN_DWARF) {
480 attr->sample_type |= PERF_SAMPLE_REGS_USER | 499 perf_evsel__set_sample_bit(evsel, REGS_USER);
481 PERF_SAMPLE_STACK_USER; 500 perf_evsel__set_sample_bit(evsel, STACK_USER);
482 attr->sample_regs_user = PERF_REGS_MASK; 501 attr->sample_regs_user = PERF_REGS_MASK;
483 attr->sample_stack_user = opts->stack_dump_size; 502 attr->sample_stack_user = opts->stack_dump_size;
484 attr->exclude_callchain_user = 1; 503 attr->exclude_callchain_user = 1;
@@ -486,20 +505,20 @@ void perf_evsel__config(struct perf_evsel *evsel,
486 } 505 }
487 506
488 if (perf_target__has_cpu(&opts->target)) 507 if (perf_target__has_cpu(&opts->target))
489 attr->sample_type |= PERF_SAMPLE_CPU; 508 perf_evsel__set_sample_bit(evsel, CPU);
490 509
491 if (opts->period) 510 if (opts->period)
492 attr->sample_type |= PERF_SAMPLE_PERIOD; 511 perf_evsel__set_sample_bit(evsel, PERIOD);
493 512
494 if (!opts->sample_id_all_missing && 513 if (!opts->sample_id_all_missing &&
495 (opts->sample_time || !opts->no_inherit || 514 (opts->sample_time || !opts->no_inherit ||
496 perf_target__has_cpu(&opts->target))) 515 perf_target__has_cpu(&opts->target)))
497 attr->sample_type |= PERF_SAMPLE_TIME; 516 perf_evsel__set_sample_bit(evsel, TIME);
498 517
499 if (opts->raw_samples) { 518 if (opts->raw_samples) {
500 attr->sample_type |= PERF_SAMPLE_TIME; 519 perf_evsel__set_sample_bit(evsel, TIME);
501 attr->sample_type |= PERF_SAMPLE_RAW; 520 perf_evsel__set_sample_bit(evsel, RAW);
502 attr->sample_type |= PERF_SAMPLE_CPU; 521 perf_evsel__set_sample_bit(evsel, CPU);
503 } 522 }
504 523
505 if (opts->no_delay) { 524 if (opts->no_delay) {
@@ -507,7 +526,7 @@ void perf_evsel__config(struct perf_evsel *evsel,
507 attr->wakeup_events = 1; 526 attr->wakeup_events = 1;
508 } 527 }
509 if (opts->branch_stack) { 528 if (opts->branch_stack) {
510 attr->sample_type |= PERF_SAMPLE_BRANCH_STACK; 529 perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
511 attr->branch_sample_type = opts->branch_stack; 530 attr->branch_sample_type = opts->branch_stack;
512 } 531 }
513 532
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 3f7ff471de2b..739853969243 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -118,6 +118,17 @@ void perf_evsel__free_fd(struct perf_evsel *evsel);
118void perf_evsel__free_id(struct perf_evsel *evsel); 118void perf_evsel__free_id(struct perf_evsel *evsel);
119void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads); 119void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
120 120
121void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
122 enum perf_event_sample_format bit);
123void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
124 enum perf_event_sample_format bit);
125
126#define perf_evsel__set_sample_bit(evsel, bit) \
127 __perf_evsel__set_sample_bit(evsel, PERF_SAMPLE_##bit)
128
129#define perf_evsel__reset_sample_bit(evsel, bit) \
130 __perf_evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit)
131
121int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads, 132int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
122 const char *filter); 133 const char *filter);
123 134