aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-09-17 05:28:48 -0400
committerIngo Molnar <mingo@elte.hu>2010-09-17 06:48:48 -0400
commitb04243ef7006cda301819f54ee7ce0a3632489e3 (patch)
treee8e83c53d40dce08ad5bfc37ec1e58b3a5aa7adc /kernel/perf_event.c
parentd14b12d7adbf214f33eb59f800b5c3d5ed9268e8 (diff)
perf: Complete software pmu grouping
Aside from allowing software events into a !software group, allow adding !software events to pure software groups. Once we've moved the software group and attached the first !software event, the group will no longer be a pure software group and hence no longer be eligible for movement, at which point the straight ctx comparison is correct again. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Stephane Eranian <eranian@google.com> Cc: Robert Richter <robert.richter@amd.com> Cc: Paul Mackerras <paulus@samba.org> LKML-Reference: <20100917093009.410784731@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c65
1 files changed, 60 insertions, 5 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index ce95617f5d2c..6d7eef5f3c41 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -5184,6 +5184,7 @@ int perf_pmu_register(struct pmu *pmu)
5184 5184
5185 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 5185 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
5186 __perf_event_init_context(&cpuctx->ctx); 5186 __perf_event_init_context(&cpuctx->ctx);
5187 cpuctx->ctx.type = cpu_context;
5187 cpuctx->ctx.pmu = pmu; 5188 cpuctx->ctx.pmu = pmu;
5188 cpuctx->timer_interval = TICK_NSEC; 5189 cpuctx->timer_interval = TICK_NSEC;
5189 hrtimer_init(&cpuctx->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 5190 hrtimer_init(&cpuctx->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -5517,7 +5518,8 @@ SYSCALL_DEFINE5(perf_event_open,
5517 struct perf_event_attr __user *, attr_uptr, 5518 struct perf_event_attr __user *, attr_uptr,
5518 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) 5519 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
5519{ 5520{
5520 struct perf_event *event, *group_leader = NULL, *output_event = NULL; 5521 struct perf_event *group_leader = NULL, *output_event = NULL;
5522 struct perf_event *event, *sibling;
5521 struct perf_event_attr attr; 5523 struct perf_event_attr attr;
5522 struct perf_event_context *ctx; 5524 struct perf_event_context *ctx;
5523 struct file *event_file = NULL; 5525 struct file *event_file = NULL;
@@ -5525,6 +5527,7 @@ SYSCALL_DEFINE5(perf_event_open,
5525 struct task_struct *task = NULL; 5527 struct task_struct *task = NULL;
5526 struct pmu *pmu; 5528 struct pmu *pmu;
5527 int event_fd; 5529 int event_fd;
5530 int move_group = 0;
5528 int fput_needed = 0; 5531 int fput_needed = 0;
5529 int err; 5532 int err;
5530 5533
@@ -5574,8 +5577,29 @@ SYSCALL_DEFINE5(perf_event_open,
5574 * any hardware group. 5577 * any hardware group.
5575 */ 5578 */
5576 pmu = event->pmu; 5579 pmu = event->pmu;
5577 if ((pmu->task_ctx_nr == perf_sw_context) && group_leader) 5580
5578 pmu = group_leader->pmu; 5581 if (group_leader &&
5582 (is_software_event(event) != is_software_event(group_leader))) {
5583 if (is_software_event(event)) {
5584 /*
5585 * If event and group_leader are not both a software
5586 * event, and event is, then group leader is not.
5587 *
5588 * Allow the addition of software events to !software
5589 * groups, this is safe because software events never
5590 * fail to schedule.
5591 */
5592 pmu = group_leader->pmu;
5593 } else if (is_software_event(group_leader) &&
5594 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
5595 /*
5596 * In case the group is a pure software group, and we
5597 * try to add a hardware event, move the whole group to
5598 * the hardware context.
5599 */
5600 move_group = 1;
5601 }
5602 }
5579 5603
5580 if (pid != -1) 5604 if (pid != -1)
5581 task = find_lively_task_by_vpid(pid); 5605 task = find_lively_task_by_vpid(pid);
@@ -5605,8 +5629,14 @@ SYSCALL_DEFINE5(perf_event_open,
5605 * Do not allow to attach to a group in a different 5629 * Do not allow to attach to a group in a different
5606 * task or CPU context: 5630 * task or CPU context:
5607 */ 5631 */
5608 if (group_leader->ctx != ctx) 5632 if (move_group) {
5609 goto err_context; 5633 if (group_leader->ctx->type != ctx->type)
5634 goto err_context;
5635 } else {
5636 if (group_leader->ctx != ctx)
5637 goto err_context;
5638 }
5639
5610 /* 5640 /*
5611 * Only a group leader can be exclusive or pinned 5641 * Only a group leader can be exclusive or pinned
5612 */ 5642 */
@@ -5626,9 +5656,34 @@ SYSCALL_DEFINE5(perf_event_open,
5626 goto err_context; 5656 goto err_context;
5627 } 5657 }
5628 5658
5659 if (move_group) {
5660 struct perf_event_context *gctx = group_leader->ctx;
5661
5662 mutex_lock(&gctx->mutex);
5663 perf_event_remove_from_context(group_leader);
5664 list_for_each_entry(sibling, &group_leader->sibling_list,
5665 group_entry) {
5666 perf_event_remove_from_context(sibling);
5667 put_ctx(gctx);
5668 }
5669 mutex_unlock(&gctx->mutex);
5670 put_ctx(gctx);
5671 }
5672
5629 event->filp = event_file; 5673 event->filp = event_file;
5630 WARN_ON_ONCE(ctx->parent_ctx); 5674 WARN_ON_ONCE(ctx->parent_ctx);
5631 mutex_lock(&ctx->mutex); 5675 mutex_lock(&ctx->mutex);
5676
5677 if (move_group) {
5678 perf_install_in_context(ctx, group_leader, cpu);
5679 get_ctx(ctx);
5680 list_for_each_entry(sibling, &group_leader->sibling_list,
5681 group_entry) {
5682 perf_install_in_context(ctx, sibling, cpu);
5683 get_ctx(ctx);
5684 }
5685 }
5686
5632 perf_install_in_context(ctx, event, cpu); 5687 perf_install_in_context(ctx, event, cpu);
5633 ++ctx->generation; 5688 ++ctx->generation;
5634 mutex_unlock(&ctx->mutex); 5689 mutex_unlock(&ctx->mutex);