aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-09-07 09:35:33 -0400
committerIngo Molnar <mingo@elte.hu>2010-09-09 14:46:33 -0400
commit97dee4f3206622f31396dede2b5ddb8670458f56 (patch)
tree0698df23dc32aa44cfc75968163825841c534160 /kernel/perf_event.c
parent108b02cfce04ee90b0a07ee0b104baffd39f5934 (diff)
perf: Move some code around
Move all inherit code near each other. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: paulus <paulus@samba.org> Cc: stephane eranian <eranian@googlemail.com> Cc: Robert Richter <robert.richter@amd.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Lin Ming <ming.m.lin@intel.com> Cc: Yanmin <yanmin_zhang@linux.intel.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c200
1 files changed, 100 insertions, 100 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 8ca6e690ffe3..dae0e2f30293 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -5556,106 +5556,6 @@ err:
5556} 5556}
5557EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter); 5557EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
5558 5558
5559/*
5560 * inherit a event from parent task to child task:
5561 */
5562static struct perf_event *
5563inherit_event(struct perf_event *parent_event,
5564 struct task_struct *parent,
5565 struct perf_event_context *parent_ctx,
5566 struct task_struct *child,
5567 struct perf_event *group_leader,
5568 struct perf_event_context *child_ctx)
5569{
5570 struct perf_event *child_event;
5571
5572 /*
5573 * Instead of creating recursive hierarchies of events,
5574 * we link inherited events back to the original parent,
5575 * which has a filp for sure, which we use as the reference
5576 * count:
5577 */
5578 if (parent_event->parent)
5579 parent_event = parent_event->parent;
5580
5581 child_event = perf_event_alloc(&parent_event->attr,
5582 parent_event->cpu,
5583 group_leader, parent_event,
5584 NULL);
5585 if (IS_ERR(child_event))
5586 return child_event;
5587 get_ctx(child_ctx);
5588
5589 /*
5590 * Make the child state follow the state of the parent event,
5591 * not its attr.disabled bit. We hold the parent's mutex,
5592 * so we won't race with perf_event_{en, dis}able_family.
5593 */
5594 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
5595 child_event->state = PERF_EVENT_STATE_INACTIVE;
5596 else
5597 child_event->state = PERF_EVENT_STATE_OFF;
5598
5599 if (parent_event->attr.freq) {
5600 u64 sample_period = parent_event->hw.sample_period;
5601 struct hw_perf_event *hwc = &child_event->hw;
5602
5603 hwc->sample_period = sample_period;
5604 hwc->last_period = sample_period;
5605
5606 local64_set(&hwc->period_left, sample_period);
5607 }
5608
5609 child_event->ctx = child_ctx;
5610 child_event->overflow_handler = parent_event->overflow_handler;
5611
5612 /*
5613 * Link it up in the child's context:
5614 */
5615 add_event_to_ctx(child_event, child_ctx);
5616
5617 /*
5618 * Get a reference to the parent filp - we will fput it
5619 * when the child event exits. This is safe to do because
5620 * we are in the parent and we know that the filp still
5621 * exists and has a nonzero count:
5622 */
5623 atomic_long_inc(&parent_event->filp->f_count);
5624
5625 /*
5626 * Link this into the parent event's child list
5627 */
5628 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
5629 mutex_lock(&parent_event->child_mutex);
5630 list_add_tail(&child_event->child_list, &parent_event->child_list);
5631 mutex_unlock(&parent_event->child_mutex);
5632
5633 return child_event;
5634}
5635
5636static int inherit_group(struct perf_event *parent_event,
5637 struct task_struct *parent,
5638 struct perf_event_context *parent_ctx,
5639 struct task_struct *child,
5640 struct perf_event_context *child_ctx)
5641{
5642 struct perf_event *leader;
5643 struct perf_event *sub;
5644 struct perf_event *child_ctr;
5645
5646 leader = inherit_event(parent_event, parent, parent_ctx,
5647 child, NULL, child_ctx);
5648 if (IS_ERR(leader))
5649 return PTR_ERR(leader);
5650 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
5651 child_ctr = inherit_event(sub, parent, parent_ctx,
5652 child, leader, child_ctx);
5653 if (IS_ERR(child_ctr))
5654 return PTR_ERR(child_ctr);
5655 }
5656 return 0;
5657}
5658
5659static void sync_child_event(struct perf_event *child_event, 5559static void sync_child_event(struct perf_event *child_event,
5660 struct task_struct *child) 5560 struct task_struct *child)
5661{ 5561{
@@ -5844,6 +5744,106 @@ again:
5844 put_ctx(ctx); 5744 put_ctx(ctx);
5845} 5745}
5846 5746
5747/*
5748 * inherit a event from parent task to child task:
5749 */
5750static struct perf_event *
5751inherit_event(struct perf_event *parent_event,
5752 struct task_struct *parent,
5753 struct perf_event_context *parent_ctx,
5754 struct task_struct *child,
5755 struct perf_event *group_leader,
5756 struct perf_event_context *child_ctx)
5757{
5758 struct perf_event *child_event;
5759
5760 /*
5761 * Instead of creating recursive hierarchies of events,
5762 * we link inherited events back to the original parent,
5763 * which has a filp for sure, which we use as the reference
5764 * count:
5765 */
5766 if (parent_event->parent)
5767 parent_event = parent_event->parent;
5768
5769 child_event = perf_event_alloc(&parent_event->attr,
5770 parent_event->cpu,
5771 group_leader, parent_event,
5772 NULL);
5773 if (IS_ERR(child_event))
5774 return child_event;
5775 get_ctx(child_ctx);
5776
5777 /*
5778 * Make the child state follow the state of the parent event,
5779 * not its attr.disabled bit. We hold the parent's mutex,
5780 * so we won't race with perf_event_{en, dis}able_family.
5781 */
5782 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
5783 child_event->state = PERF_EVENT_STATE_INACTIVE;
5784 else
5785 child_event->state = PERF_EVENT_STATE_OFF;
5786
5787 if (parent_event->attr.freq) {
5788 u64 sample_period = parent_event->hw.sample_period;
5789 struct hw_perf_event *hwc = &child_event->hw;
5790
5791 hwc->sample_period = sample_period;
5792 hwc->last_period = sample_period;
5793
5794 local64_set(&hwc->period_left, sample_period);
5795 }
5796
5797 child_event->ctx = child_ctx;
5798 child_event->overflow_handler = parent_event->overflow_handler;
5799
5800 /*
5801 * Link it up in the child's context:
5802 */
5803 add_event_to_ctx(child_event, child_ctx);
5804
5805 /*
5806 * Get a reference to the parent filp - we will fput it
5807 * when the child event exits. This is safe to do because
5808 * we are in the parent and we know that the filp still
5809 * exists and has a nonzero count:
5810 */
5811 atomic_long_inc(&parent_event->filp->f_count);
5812
5813 /*
5814 * Link this into the parent event's child list
5815 */
5816 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
5817 mutex_lock(&parent_event->child_mutex);
5818 list_add_tail(&child_event->child_list, &parent_event->child_list);
5819 mutex_unlock(&parent_event->child_mutex);
5820
5821 return child_event;
5822}
5823
5824static int inherit_group(struct perf_event *parent_event,
5825 struct task_struct *parent,
5826 struct perf_event_context *parent_ctx,
5827 struct task_struct *child,
5828 struct perf_event_context *child_ctx)
5829{
5830 struct perf_event *leader;
5831 struct perf_event *sub;
5832 struct perf_event *child_ctr;
5833
5834 leader = inherit_event(parent_event, parent, parent_ctx,
5835 child, NULL, child_ctx);
5836 if (IS_ERR(leader))
5837 return PTR_ERR(leader);
5838 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
5839 child_ctr = inherit_event(sub, parent, parent_ctx,
5840 child, leader, child_ctx);
5841 if (IS_ERR(child_ctr))
5842 return PTR_ERR(child_ctr);
5843 }
5844 return 0;
5845}
5846
5847static int 5847static int
5848inherit_task_group(struct perf_event *event, struct task_struct *parent, 5848inherit_task_group(struct perf_event *event, struct task_struct *parent,
5849 struct perf_event_context *parent_ctx, 5849 struct perf_event_context *parent_ctx,