aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/perf_event.h6
-rw-r--r--kernel/perf_event.c65
2 files changed, 66 insertions, 5 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 39d8860b2684..165287fd2cc4 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -804,12 +804,18 @@ struct perf_event {
804#endif /* CONFIG_PERF_EVENTS */ 804#endif /* CONFIG_PERF_EVENTS */
805}; 805};
806 806
807enum perf_event_context_type {
808 task_context,
809 cpu_context,
810};
811
807/** 812/**
808 * struct perf_event_context - event context structure 813 * struct perf_event_context - event context structure
809 * 814 *
810 * Used as a container for task events and CPU events as well: 815 * Used as a container for task events and CPU events as well:
811 */ 816 */
812struct perf_event_context { 817struct perf_event_context {
818 enum perf_event_context_type type;
813 struct pmu *pmu; 819 struct pmu *pmu;
814 /* 820 /*
815 * Protect the states of the events in the list, 821 * Protect the states of the events in the list,
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index ce95617f5d2c..6d7eef5f3c41 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -5184,6 +5184,7 @@ int perf_pmu_register(struct pmu *pmu)
5184 5184
5185 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 5185 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
5186 __perf_event_init_context(&cpuctx->ctx); 5186 __perf_event_init_context(&cpuctx->ctx);
5187 cpuctx->ctx.type = cpu_context;
5187 cpuctx->ctx.pmu = pmu; 5188 cpuctx->ctx.pmu = pmu;
5188 cpuctx->timer_interval = TICK_NSEC; 5189 cpuctx->timer_interval = TICK_NSEC;
5189 hrtimer_init(&cpuctx->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 5190 hrtimer_init(&cpuctx->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -5517,7 +5518,8 @@ SYSCALL_DEFINE5(perf_event_open,
5517 struct perf_event_attr __user *, attr_uptr, 5518 struct perf_event_attr __user *, attr_uptr,
5518 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) 5519 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
5519{ 5520{
5520 struct perf_event *event, *group_leader = NULL, *output_event = NULL; 5521 struct perf_event *group_leader = NULL, *output_event = NULL;
5522 struct perf_event *event, *sibling;
5521 struct perf_event_attr attr; 5523 struct perf_event_attr attr;
5522 struct perf_event_context *ctx; 5524 struct perf_event_context *ctx;
5523 struct file *event_file = NULL; 5525 struct file *event_file = NULL;
@@ -5525,6 +5527,7 @@ SYSCALL_DEFINE5(perf_event_open,
5525 struct task_struct *task = NULL; 5527 struct task_struct *task = NULL;
5526 struct pmu *pmu; 5528 struct pmu *pmu;
5527 int event_fd; 5529 int event_fd;
5530 int move_group = 0;
5528 int fput_needed = 0; 5531 int fput_needed = 0;
5529 int err; 5532 int err;
5530 5533
@@ -5574,8 +5577,29 @@ SYSCALL_DEFINE5(perf_event_open,
5574 * any hardware group. 5577 * any hardware group.
5575 */ 5578 */
5576 pmu = event->pmu; 5579 pmu = event->pmu;
5577 if ((pmu->task_ctx_nr == perf_sw_context) && group_leader) 5580
5578 pmu = group_leader->pmu; 5581 if (group_leader &&
5582 (is_software_event(event) != is_software_event(group_leader))) {
5583 if (is_software_event(event)) {
5584 /*
5585 * If event and group_leader are not both a software
5586 * event, and event is, then group leader is not.
5587 *
5588 * Allow the addition of software events to !software
5589 * groups, this is safe because software events never
5590 * fail to schedule.
5591 */
5592 pmu = group_leader->pmu;
5593 } else if (is_software_event(group_leader) &&
5594 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
5595 /*
5596 * In case the group is a pure software group, and we
5597 * try to add a hardware event, move the whole group to
5598 * the hardware context.
5599 */
5600 move_group = 1;
5601 }
5602 }
5579 5603
5580 if (pid != -1) 5604 if (pid != -1)
5581 task = find_lively_task_by_vpid(pid); 5605 task = find_lively_task_by_vpid(pid);
@@ -5605,8 +5629,14 @@ SYSCALL_DEFINE5(perf_event_open,
5605 * Do not allow to attach to a group in a different 5629 * Do not allow to attach to a group in a different
5606 * task or CPU context: 5630 * task or CPU context:
5607 */ 5631 */
5608 if (group_leader->ctx != ctx) 5632 if (move_group) {
5609 goto err_context; 5633 if (group_leader->ctx->type != ctx->type)
5634 goto err_context;
5635 } else {
5636 if (group_leader->ctx != ctx)
5637 goto err_context;
5638 }
5639
5610 /* 5640 /*
5611 * Only a group leader can be exclusive or pinned 5641 * Only a group leader can be exclusive or pinned
5612 */ 5642 */
@@ -5626,9 +5656,34 @@ SYSCALL_DEFINE5(perf_event_open,
5626 goto err_context; 5656 goto err_context;
5627 } 5657 }
5628 5658
5659 if (move_group) {
5660 struct perf_event_context *gctx = group_leader->ctx;
5661
5662 mutex_lock(&gctx->mutex);
5663 perf_event_remove_from_context(group_leader);
5664 list_for_each_entry(sibling, &group_leader->sibling_list,
5665 group_entry) {
5666 perf_event_remove_from_context(sibling);
5667 put_ctx(gctx);
5668 }
5669 mutex_unlock(&gctx->mutex);
5670 put_ctx(gctx);
5671 }
5672
5629 event->filp = event_file; 5673 event->filp = event_file;
5630 WARN_ON_ONCE(ctx->parent_ctx); 5674 WARN_ON_ONCE(ctx->parent_ctx);
5631 mutex_lock(&ctx->mutex); 5675 mutex_lock(&ctx->mutex);
5676
5677 if (move_group) {
5678 perf_install_in_context(ctx, group_leader, cpu);
5679 get_ctx(ctx);
5680 list_for_each_entry(sibling, &group_leader->sibling_list,
5681 group_entry) {
5682 perf_install_in_context(ctx, sibling, cpu);
5683 get_ctx(ctx);
5684 }
5685 }
5686
5632 perf_install_in_context(ctx, event, cpu); 5687 perf_install_in_context(ctx, event, cpu);
5633 ++ctx->generation; 5688 ++ctx->generation;
5634 mutex_unlock(&ctx->mutex); 5689 mutex_unlock(&ctx->mutex);