aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorMatt Fleming <matt.fleming@intel.com>2015-01-23 13:45:42 -0500
committerIngo Molnar <mingo@kernel.org>2015-02-25 07:53:30 -0500
commit79dff51e900fd26a073be8b23acfbd8c15edb181 (patch)
tree9e10aa04dbe2513fcfba2b9f18da154b70025f0d /kernel
parenteacd3ecc34472ce3751eedfc94e44c7cc6eb6305 (diff)
perf: Move cgroup init before PMU ->event_init()
The Intel QoS PMU needs to know whether an event is part of a cgroup during ->event_init(), because tasks in the same cgroup share a monitoring ID. Move the cgroup initialisation before calling into the PMU driver. Signed-off-by: Matt Fleming <matt.fleming@intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Kanaka Juvva <kanaka.d.juvva@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Vikas Shivappa <vikas.shivappa@linux.intel.com> Link: http://lkml.kernel.org/r/1422038748-21397-4-git-send-email-matt@codeblueprint.co.uk Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c28
1 files changed, 16 insertions, 12 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 4e8dc596f101..1fc3bae5904a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7116,7 +7116,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
7116 struct perf_event *group_leader, 7116 struct perf_event *group_leader,
7117 struct perf_event *parent_event, 7117 struct perf_event *parent_event,
7118 perf_overflow_handler_t overflow_handler, 7118 perf_overflow_handler_t overflow_handler,
7119 void *context) 7119 void *context, int cgroup_fd)
7120{ 7120{
7121 struct pmu *pmu; 7121 struct pmu *pmu;
7122 struct perf_event *event; 7122 struct perf_event *event;
@@ -7212,6 +7212,12 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
7212 if (!has_branch_stack(event)) 7212 if (!has_branch_stack(event))
7213 event->attr.branch_sample_type = 0; 7213 event->attr.branch_sample_type = 0;
7214 7214
7215 if (cgroup_fd != -1) {
7216 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
7217 if (err)
7218 goto err_ns;
7219 }
7220
7215 pmu = perf_init_event(event); 7221 pmu = perf_init_event(event);
7216 if (!pmu) 7222 if (!pmu)
7217 goto err_ns; 7223 goto err_ns;
@@ -7235,6 +7241,8 @@ err_pmu:
7235 event->destroy(event); 7241 event->destroy(event);
7236 module_put(pmu->module); 7242 module_put(pmu->module);
7237err_ns: 7243err_ns:
7244 if (is_cgroup_event(event))
7245 perf_detach_cgroup(event);
7238 if (event->ns) 7246 if (event->ns)
7239 put_pid_ns(event->ns); 7247 put_pid_ns(event->ns);
7240 kfree(event); 7248 kfree(event);
@@ -7453,6 +7461,7 @@ SYSCALL_DEFINE5(perf_event_open,
7453 int move_group = 0; 7461 int move_group = 0;
7454 int err; 7462 int err;
7455 int f_flags = O_RDWR; 7463 int f_flags = O_RDWR;
7464 int cgroup_fd = -1;
7456 7465
7457 /* for future expandability... */ 7466 /* for future expandability... */
7458 if (flags & ~PERF_FLAG_ALL) 7467 if (flags & ~PERF_FLAG_ALL)
@@ -7518,21 +7527,16 @@ SYSCALL_DEFINE5(perf_event_open,
7518 7527
7519 get_online_cpus(); 7528 get_online_cpus();
7520 7529
7530 if (flags & PERF_FLAG_PID_CGROUP)
7531 cgroup_fd = pid;
7532
7521 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, 7533 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
7522 NULL, NULL); 7534 NULL, NULL, cgroup_fd);
7523 if (IS_ERR(event)) { 7535 if (IS_ERR(event)) {
7524 err = PTR_ERR(event); 7536 err = PTR_ERR(event);
7525 goto err_cpus; 7537 goto err_cpus;
7526 } 7538 }
7527 7539
7528 if (flags & PERF_FLAG_PID_CGROUP) {
7529 err = perf_cgroup_connect(pid, event, &attr, group_leader);
7530 if (err) {
7531 __free_event(event);
7532 goto err_cpus;
7533 }
7534 }
7535
7536 if (is_sampling_event(event)) { 7540 if (is_sampling_event(event)) {
7537 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { 7541 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
7538 err = -ENOTSUPP; 7542 err = -ENOTSUPP;
@@ -7769,7 +7773,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
7769 */ 7773 */
7770 7774
7771 event = perf_event_alloc(attr, cpu, task, NULL, NULL, 7775 event = perf_event_alloc(attr, cpu, task, NULL, NULL,
7772 overflow_handler, context); 7776 overflow_handler, context, -1);
7773 if (IS_ERR(event)) { 7777 if (IS_ERR(event)) {
7774 err = PTR_ERR(event); 7778 err = PTR_ERR(event);
7775 goto err; 7779 goto err;
@@ -8130,7 +8134,7 @@ inherit_event(struct perf_event *parent_event,
8130 parent_event->cpu, 8134 parent_event->cpu,
8131 child, 8135 child,
8132 group_leader, parent_event, 8136 group_leader, parent_event,
8133 NULL, NULL); 8137 NULL, NULL, -1);
8134 if (IS_ERR(child_event)) 8138 if (IS_ERR(child_event))
8135 return child_event; 8139 return child_event;
8136 8140