aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf
diff options
context:
space:
mode:
authorJiri Olsa <jolsa@redhat.com>2012-08-08 06:22:36 -0400
committerArnaldo Carvalho de Melo <acme@redhat.com>2012-08-14 16:03:49 -0400
commit6a4bb04caacc8c2d06f345130e9086e3fea38ca7 (patch)
tree96a4c4e049c303a92ababf2807b5b8044f55bf74 /tools/perf
parentf5b1135bf79557563a814e53ecd610cce663c1e3 (diff)
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups based on the way they are specified on the command line. Adding functionality to the '{}' group syntax introduced in earlier patch. The current '--group/-g' option behaviour remains intact. If you specify it for record/stat/top command, all the specified events become members of a single group with the first event as a group leader. With the new '{}' group syntax you can create group like: # perf record -e '{cycles,faults}' ls resulting in single event group containing 'cycles' and 'faults' events, with cycles event as group leader. All groups are created with regards to threads and cpus. Thus recording an event group within a 2 threads on server with 4 CPUs will create 8 separate groups. Examples (first event in brackets is group leader): # 1 group (cpu-clock,task-clock) perf record --group -e cpu-clock,task-clock ls perf record -e '{cpu-clock,task-clock}' ls # 2 groups (cpu-clock,task-clock) (minor-faults,major-faults) perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls # 1 group (cpu-clock,task-clock,minor-faults,major-faults) perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls # 2 groups (cpu-clock,task-clock) (minor-faults,major-faults) perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \ -e instructions ls # 1 group # (cpu-clock,task-clock,minor-faults,major-faults,instructions) perf record --group -e cpu-clock,task-clock \ -e minor-faults,major-faults -e instructions ls perf record -e '{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls It's possible to use standard event modifier for a group, which spans over all events in the group and updates each event modifier settings, for example: # perf record -r '{faults:k,cache-references}:p' resulting in ':kp' modifier being used for 'faults' and ':p' modifier being used for 'cache-references' event. Reviewed-by: Namhyung Kim <namhyung@kernel.org> Signed-off-by: Jiri Olsa <jolsa@redhat.com> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: Andi Kleen <andi@firstfloor.org> Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ulrich Drepper <drepper@gmail.com> Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf')
-rw-r--r--tools/perf/builtin-record.c13
-rw-r--r--tools/perf/builtin-stat.c13
-rw-r--r--tools/perf/builtin-test.c8
-rw-r--r--tools/perf/builtin-top.c12
-rw-r--r--tools/perf/util/evlist.c20
-rw-r--r--tools/perf/util/evlist.h3
-rw-r--r--tools/perf/util/evsel.c51
-rw-r--r--tools/perf/util/evsel.h11
-rw-r--r--tools/perf/util/parse-events.c26
-rw-r--r--tools/perf/util/parse-events.h1
-rw-r--r--tools/perf/util/python.c7
11 files changed, 96 insertions, 69 deletions
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 22dd05d3680c..f5b6137c0f7e 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -185,18 +185,18 @@ static bool perf_evlist__equal(struct perf_evlist *evlist,
185 185
186static void perf_record__open(struct perf_record *rec) 186static void perf_record__open(struct perf_record *rec)
187{ 187{
188 struct perf_evsel *pos, *first; 188 struct perf_evsel *pos;
189 struct perf_evlist *evlist = rec->evlist; 189 struct perf_evlist *evlist = rec->evlist;
190 struct perf_session *session = rec->session; 190 struct perf_session *session = rec->session;
191 struct perf_record_opts *opts = &rec->opts; 191 struct perf_record_opts *opts = &rec->opts;
192 192
193 first = list_entry(evlist->entries.next, struct perf_evsel, node);
194
195 perf_evlist__config_attrs(evlist, opts); 193 perf_evlist__config_attrs(evlist, opts);
196 194
195 if (opts->group)
196 perf_evlist__group(evlist);
197
197 list_for_each_entry(pos, &evlist->entries, node) { 198 list_for_each_entry(pos, &evlist->entries, node) {
198 struct perf_event_attr *attr = &pos->attr; 199 struct perf_event_attr *attr = &pos->attr;
199 struct xyarray *group_fd = NULL;
200 /* 200 /*
201 * Check if parse_single_tracepoint_event has already asked for 201 * Check if parse_single_tracepoint_event has already asked for
202 * PERF_SAMPLE_TIME. 202 * PERF_SAMPLE_TIME.
@@ -211,16 +211,13 @@ static void perf_record__open(struct perf_record *rec)
211 */ 211 */
212 bool time_needed = attr->sample_type & PERF_SAMPLE_TIME; 212 bool time_needed = attr->sample_type & PERF_SAMPLE_TIME;
213 213
214 if (opts->group && pos != first)
215 group_fd = first->fd;
216fallback_missing_features: 214fallback_missing_features:
217 if (opts->exclude_guest_missing) 215 if (opts->exclude_guest_missing)
218 attr->exclude_guest = attr->exclude_host = 0; 216 attr->exclude_guest = attr->exclude_host = 0;
219retry_sample_id: 217retry_sample_id:
220 attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1; 218 attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1;
221try_again: 219try_again:
222 if (perf_evsel__open(pos, evlist->cpus, evlist->threads, 220 if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
223 opts->group, group_fd) < 0) {
224 int err = errno; 221 int err = errno;
225 222
226 if (err == EPERM || err == EACCES) { 223 if (err == EPERM || err == EACCES) {
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 861f0aec77ae..23908a85bba9 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -281,13 +281,9 @@ static int create_perf_stat_counter(struct perf_evsel *evsel,
281 struct perf_evsel *first) 281 struct perf_evsel *first)
282{ 282{
283 struct perf_event_attr *attr = &evsel->attr; 283 struct perf_event_attr *attr = &evsel->attr;
284 struct xyarray *group_fd = NULL;
285 bool exclude_guest_missing = false; 284 bool exclude_guest_missing = false;
286 int ret; 285 int ret;
287 286
288 if (group && evsel != first)
289 group_fd = first->fd;
290
291 if (scale) 287 if (scale)
292 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | 288 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
293 PERF_FORMAT_TOTAL_TIME_RUNNING; 289 PERF_FORMAT_TOTAL_TIME_RUNNING;
@@ -299,8 +295,7 @@ retry:
299 evsel->attr.exclude_guest = evsel->attr.exclude_host = 0; 295 evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
300 296
301 if (perf_target__has_cpu(&target)) { 297 if (perf_target__has_cpu(&target)) {
302 ret = perf_evsel__open_per_cpu(evsel, evsel_list->cpus, 298 ret = perf_evsel__open_per_cpu(evsel, evsel_list->cpus);
303 group, group_fd);
304 if (ret) 299 if (ret)
305 goto check_ret; 300 goto check_ret;
306 return 0; 301 return 0;
@@ -311,8 +306,7 @@ retry:
311 attr->enable_on_exec = 1; 306 attr->enable_on_exec = 1;
312 } 307 }
313 308
314 ret = perf_evsel__open_per_thread(evsel, evsel_list->threads, 309 ret = perf_evsel__open_per_thread(evsel, evsel_list->threads);
315 group, group_fd);
316 if (!ret) 310 if (!ret)
317 return 0; 311 return 0;
318 /* fall through */ 312 /* fall through */
@@ -483,6 +477,9 @@ static int run_perf_stat(int argc __used, const char **argv)
483 close(child_ready_pipe[0]); 477 close(child_ready_pipe[0]);
484 } 478 }
485 479
480 if (group)
481 perf_evlist__group(evsel_list);
482
486 first = list_entry(evsel_list->entries.next, struct perf_evsel, node); 483 first = list_entry(evsel_list->entries.next, struct perf_evsel, node);
487 484
488 list_for_each_entry(counter, &evsel_list->entries, node) { 485 list_for_each_entry(counter, &evsel_list->entries, node) {
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index 1d592f5cbea9..9a479b68fc9b 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -294,7 +294,7 @@ static int test__open_syscall_event(void)
294 goto out_thread_map_delete; 294 goto out_thread_map_delete;
295 } 295 }
296 296
297 if (perf_evsel__open_per_thread(evsel, threads, false, NULL) < 0) { 297 if (perf_evsel__open_per_thread(evsel, threads) < 0) {
298 pr_debug("failed to open counter: %s, " 298 pr_debug("failed to open counter: %s, "
299 "tweak /proc/sys/kernel/perf_event_paranoid?\n", 299 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
300 strerror(errno)); 300 strerror(errno));
@@ -369,7 +369,7 @@ static int test__open_syscall_event_on_all_cpus(void)
369 goto out_thread_map_delete; 369 goto out_thread_map_delete;
370 } 370 }
371 371
372 if (perf_evsel__open(evsel, cpus, threads, false, NULL) < 0) { 372 if (perf_evsel__open(evsel, cpus, threads) < 0) {
373 pr_debug("failed to open counter: %s, " 373 pr_debug("failed to open counter: %s, "
374 "tweak /proc/sys/kernel/perf_event_paranoid?\n", 374 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
375 strerror(errno)); 375 strerror(errno));
@@ -533,7 +533,7 @@ static int test__basic_mmap(void)
533 533
534 perf_evlist__add(evlist, evsels[i]); 534 perf_evlist__add(evlist, evsels[i]);
535 535
536 if (perf_evsel__open(evsels[i], cpus, threads, false, NULL) < 0) { 536 if (perf_evsel__open(evsels[i], cpus, threads) < 0) {
537 pr_debug("failed to open counter: %s, " 537 pr_debug("failed to open counter: %s, "
538 "tweak /proc/sys/kernel/perf_event_paranoid?\n", 538 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
539 strerror(errno)); 539 strerror(errno));
@@ -737,7 +737,7 @@ static int test__PERF_RECORD(void)
737 * Call sys_perf_event_open on all the fds on all the evsels, 737 * Call sys_perf_event_open on all the fds on all the evsels,
738 * grouping them if asked to. 738 * grouping them if asked to.
739 */ 739 */
740 err = perf_evlist__open(evlist, opts.group); 740 err = perf_evlist__open(evlist);
741 if (err < 0) { 741 if (err < 0) {
742 pr_debug("perf_evlist__open: %s\n", strerror(errno)); 742 pr_debug("perf_evlist__open: %s\n", strerror(errno));
743 goto out_delete_evlist; 743 goto out_delete_evlist;
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index e45a1ba61722..392d2192b75e 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -886,17 +886,14 @@ static void perf_top__mmap_read(struct perf_top *top)
886 886
887static void perf_top__start_counters(struct perf_top *top) 887static void perf_top__start_counters(struct perf_top *top)
888{ 888{
889 struct perf_evsel *counter, *first; 889 struct perf_evsel *counter;
890 struct perf_evlist *evlist = top->evlist; 890 struct perf_evlist *evlist = top->evlist;
891 891
892 first = list_entry(evlist->entries.next, struct perf_evsel, node); 892 if (top->group)
893 perf_evlist__group(evlist);
893 894
894 list_for_each_entry(counter, &evlist->entries, node) { 895 list_for_each_entry(counter, &evlist->entries, node) {
895 struct perf_event_attr *attr = &counter->attr; 896 struct perf_event_attr *attr = &counter->attr;
896 struct xyarray *group_fd = NULL;
897
898 if (top->group && counter != first)
899 group_fd = first->fd;
900 897
901 attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; 898 attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
902 899
@@ -927,8 +924,7 @@ retry_sample_id:
927 attr->sample_id_all = top->sample_id_all_missing ? 0 : 1; 924 attr->sample_id_all = top->sample_id_all_missing ? 0 : 1;
928try_again: 925try_again:
929 if (perf_evsel__open(counter, top->evlist->cpus, 926 if (perf_evsel__open(counter, top->evlist->cpus,
930 top->evlist->threads, top->group, 927 top->evlist->threads) < 0) {
931 group_fd) < 0) {
932 int err = errno; 928 int err = errno;
933 929
934 if (err == EPERM || err == EACCES) { 930 if (err == EPERM || err == EACCES) {
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 9b38681add9e..feffee3f2bd8 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -108,6 +108,12 @@ void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
108 evlist->nr_entries += nr_entries; 108 evlist->nr_entries += nr_entries;
109} 109}
110 110
111void perf_evlist__group(struct perf_evlist *evlist)
112{
113 if (evlist->nr_entries)
114 parse_events__set_leader(&evlist->entries);
115}
116
111int perf_evlist__add_default(struct perf_evlist *evlist) 117int perf_evlist__add_default(struct perf_evlist *evlist)
112{ 118{
113 struct perf_event_attr attr = { 119 struct perf_event_attr attr = {
@@ -757,21 +763,13 @@ void perf_evlist__set_selected(struct perf_evlist *evlist,
757 evlist->selected = evsel; 763 evlist->selected = evsel;
758} 764}
759 765
760int perf_evlist__open(struct perf_evlist *evlist, bool group) 766int perf_evlist__open(struct perf_evlist *evlist)
761{ 767{
762 struct perf_evsel *evsel, *first; 768 struct perf_evsel *evsel;
763 int err, ncpus, nthreads; 769 int err, ncpus, nthreads;
764 770
765 first = list_entry(evlist->entries.next, struct perf_evsel, node);
766
767 list_for_each_entry(evsel, &evlist->entries, node) { 771 list_for_each_entry(evsel, &evlist->entries, node) {
768 struct xyarray *group_fd = NULL; 772 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
769
770 if (group && evsel != first)
771 group_fd = first->fd;
772
773 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads,
774 group, group_fd);
775 if (err < 0) 773 if (err < 0)
776 goto out_err; 774 goto out_err;
777 } 775 }
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 528c1acd9298..a19ccd7b51fa 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -85,7 +85,7 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
85 85
86union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx); 86union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx);
87 87
88int perf_evlist__open(struct perf_evlist *evlist, bool group); 88int perf_evlist__open(struct perf_evlist *evlist);
89 89
90void perf_evlist__config_attrs(struct perf_evlist *evlist, 90void perf_evlist__config_attrs(struct perf_evlist *evlist,
91 struct perf_record_opts *opts); 91 struct perf_record_opts *opts);
@@ -132,4 +132,5 @@ void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
132 struct list_head *list, 132 struct list_head *list,
133 int nr_entries); 133 int nr_entries);
134 134
135void perf_evlist__group(struct perf_evlist *evlist);
135#endif /* __PERF_EVLIST_H */ 136#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 9c54e8fc2dfc..f5b68e73d751 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -21,7 +21,6 @@
21#include "perf_regs.h" 21#include "perf_regs.h"
22 22
23#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 23#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
24#define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
25 24
26static int __perf_evsel__sample_size(u64 sample_type) 25static int __perf_evsel__sample_size(u64 sample_type)
27{ 26{
@@ -493,6 +492,7 @@ void perf_evsel__delete(struct perf_evsel *evsel)
493{ 492{
494 perf_evsel__exit(evsel); 493 perf_evsel__exit(evsel);
495 close_cgroup(evsel->cgrp); 494 close_cgroup(evsel->cgrp);
495 free(evsel->group_name);
496 free(evsel->name); 496 free(evsel->name);
497 free(evsel); 497 free(evsel);
498} 498}
@@ -568,9 +568,28 @@ int __perf_evsel__read(struct perf_evsel *evsel,
568 return 0; 568 return 0;
569} 569}
570 570
571static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
572{
573 struct perf_evsel *leader = evsel->leader;
574 int fd;
575
576 if (!leader)
577 return -1;
578
579 /*
580 * Leader must be already processed/open,
581 * if not it's a bug.
582 */
583 BUG_ON(!leader->fd);
584
585 fd = FD(leader, cpu, thread);
586 BUG_ON(fd == -1);
587
588 return fd;
589}
590
571static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, 591static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
572 struct thread_map *threads, bool group, 592 struct thread_map *threads)
573 struct xyarray *group_fds)
574{ 593{
575 int cpu, thread; 594 int cpu, thread;
576 unsigned long flags = 0; 595 unsigned long flags = 0;
@@ -586,13 +605,15 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
586 } 605 }
587 606
588 for (cpu = 0; cpu < cpus->nr; cpu++) { 607 for (cpu = 0; cpu < cpus->nr; cpu++) {
589 int group_fd = group_fds ? GROUP_FD(group_fds, cpu) : -1;
590 608
591 for (thread = 0; thread < threads->nr; thread++) { 609 for (thread = 0; thread < threads->nr; thread++) {
610 int group_fd;
592 611
593 if (!evsel->cgrp) 612 if (!evsel->cgrp)
594 pid = threads->map[thread]; 613 pid = threads->map[thread];
595 614
615 group_fd = get_group_fd(evsel, cpu, thread);
616
596 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr, 617 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
597 pid, 618 pid,
598 cpus->map[cpu], 619 cpus->map[cpu],
@@ -602,8 +623,9 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
602 goto out_close; 623 goto out_close;
603 } 624 }
604 625
605 if (group && group_fd == -1) 626 pr_debug("event cpu %d, thread %d, fd %d, group %d\n",
606 group_fd = FD(evsel, cpu, thread); 627 cpu, pid, FD(evsel, cpu, thread),
628 group_fd);
607 } 629 }
608 } 630 }
609 631
@@ -647,8 +669,7 @@ static struct {
647}; 669};
648 670
649int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, 671int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
650 struct thread_map *threads, bool group, 672 struct thread_map *threads)
651 struct xyarray *group_fd)
652{ 673{
653 if (cpus == NULL) { 674 if (cpus == NULL) {
654 /* Work around old compiler warnings about strict aliasing */ 675 /* Work around old compiler warnings about strict aliasing */
@@ -658,23 +679,19 @@ int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
658 if (threads == NULL) 679 if (threads == NULL)
659 threads = &empty_thread_map.map; 680 threads = &empty_thread_map.map;
660 681
661 return __perf_evsel__open(evsel, cpus, threads, group, group_fd); 682 return __perf_evsel__open(evsel, cpus, threads);
662} 683}
663 684
664int perf_evsel__open_per_cpu(struct perf_evsel *evsel, 685int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
665 struct cpu_map *cpus, bool group, 686 struct cpu_map *cpus)
666 struct xyarray *group_fd)
667{ 687{
668 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group, 688 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
669 group_fd);
670} 689}
671 690
672int perf_evsel__open_per_thread(struct perf_evsel *evsel, 691int perf_evsel__open_per_thread(struct perf_evsel *evsel,
673 struct thread_map *threads, bool group, 692 struct thread_map *threads)
674 struct xyarray *group_fd)
675{ 693{
676 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group, 694 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
677 group_fd);
678} 695}
679 696
680static int perf_event__parse_id_sample(const union perf_event *event, u64 type, 697static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 6a258c90e7ce..c411b421c888 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -70,6 +70,8 @@ struct perf_evsel {
70 bool supported; 70 bool supported;
71 /* parse modifier helper */ 71 /* parse modifier helper */
72 int exclude_GH; 72 int exclude_GH;
73 struct perf_evsel *leader;
74 char *group_name;
73}; 75};
74 76
75struct cpu_map; 77struct cpu_map;
@@ -109,14 +111,11 @@ void perf_evsel__free_id(struct perf_evsel *evsel);
109void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads); 111void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
110 112
111int perf_evsel__open_per_cpu(struct perf_evsel *evsel, 113int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
112 struct cpu_map *cpus, bool group, 114 struct cpu_map *cpus);
113 struct xyarray *group_fds);
114int perf_evsel__open_per_thread(struct perf_evsel *evsel, 115int perf_evsel__open_per_thread(struct perf_evsel *evsel,
115 struct thread_map *threads, bool group, 116 struct thread_map *threads);
116 struct xyarray *group_fds);
117int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, 117int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
118 struct thread_map *threads, bool group, 118 struct thread_map *threads);
119 struct xyarray *group_fds);
120void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads); 119void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads);
121 120
122#define perf_evsel__match(evsel, t, c) \ 121#define perf_evsel__match(evsel, t, c) \
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 4364575ce6ac..f6453cd414ae 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -611,14 +611,32 @@ int parse_events_add_pmu(struct list_head **list, int *idx,
611 pmu_event_name(head_config)); 611 pmu_event_name(head_config));
612} 612}
613 613
614int parse_events__modifier_group(struct list_head *list __used, 614struct perf_evsel *parse_events__set_leader(struct list_head *list)
615 char *event_mod __used)
616{ 615{
617 return 0; 616 struct perf_evsel *evsel, *leader;
617
618 leader = list_entry(list->next, struct perf_evsel, node);
619 leader->leader = NULL;
620
621 list_for_each_entry(evsel, list, node)
622 if (evsel != leader)
623 evsel->leader = leader;
624
625 return leader;
618} 626}
619 627
620void parse_events__group(char *name __used, struct list_head *list __used) 628int parse_events__modifier_group(struct list_head *list,
629 char *event_mod)
621{ 630{
631 return parse_events__modifier_event(list, event_mod, true);
632}
633
634void parse_events__group(char *name, struct list_head *list)
635{
636 struct perf_evsel *leader;
637
638 leader = parse_events__set_leader(list);
639 leader->group_name = name ? strdup(name) : NULL;
622} 640}
623 641
624void parse_events_update_lists(struct list_head *list_event, 642void parse_events_update_lists(struct list_head *list_event,
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 75a6800082a1..e1a184c9e358 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -92,6 +92,7 @@ int parse_events_add_breakpoint(struct list_head **list, int *idx,
92 void *ptr, char *type); 92 void *ptr, char *type);
93int parse_events_add_pmu(struct list_head **list, int *idx, 93int parse_events_add_pmu(struct list_head **list, int *idx,
94 char *pmu , struct list_head *head_config); 94 char *pmu , struct list_head *head_config);
95struct perf_evsel *parse_events__set_leader(struct list_head *list);
95void parse_events__group(char *name, struct list_head *list); 96void parse_events__group(char *name, struct list_head *list);
96void parse_events_update_lists(struct list_head *list_event, 97void parse_events_update_lists(struct list_head *list_event,
97 struct list_head *list_all); 98 struct list_head *list_all);
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 0688bfb6d280..f5bba4b8eb9e 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -627,7 +627,7 @@ static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
627 * This will group just the fds for this single evsel, to group 627 * This will group just the fds for this single evsel, to group
628 * multiple events, use evlist.open(). 628 * multiple events, use evlist.open().
629 */ 629 */
630 if (perf_evsel__open(evsel, cpus, threads, group, NULL) < 0) { 630 if (perf_evsel__open(evsel, cpus, threads) < 0) {
631 PyErr_SetFromErrno(PyExc_OSError); 631 PyErr_SetFromErrno(PyExc_OSError);
632 return NULL; 632 return NULL;
633 } 633 }
@@ -824,7 +824,10 @@ static PyObject *pyrf_evlist__open(struct pyrf_evlist *pevlist,
824 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist, &group)) 824 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist, &group))
825 return NULL; 825 return NULL;
826 826
827 if (perf_evlist__open(evlist, group) < 0) { 827 if (group)
828 perf_evlist__group(evlist);
829
830 if (perf_evlist__open(evlist) < 0) {
828 PyErr_SetFromErrno(PyExc_OSError); 831 PyErr_SetFromErrno(PyExc_OSError);
829 return NULL; 832 return NULL;
830 } 833 }