aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_event.c22
1 files changed, 20 insertions, 2 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 39afdb07d758..517d827f4982 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -691,6 +691,8 @@ group_sched_in(struct perf_event *group_event,
691{ 691{
692 struct perf_event *event, *partial_group = NULL; 692 struct perf_event *event, *partial_group = NULL;
693 struct pmu *pmu = group_event->pmu; 693 struct pmu *pmu = group_event->pmu;
694 u64 now = ctx->time;
695 bool simulate = false;
694 696
695 if (group_event->state == PERF_EVENT_STATE_OFF) 697 if (group_event->state == PERF_EVENT_STATE_OFF)
696 return 0; 698 return 0;
@@ -719,11 +721,27 @@ group_error:
719 /* 721 /*
720 * Groups can be scheduled in as one unit only, so undo any 722 * Groups can be scheduled in as one unit only, so undo any
721 * partial group before returning: 723 * partial group before returning:
724 * The events up to the failed event are scheduled out normally,
725 * tstamp_stopped will be updated.
726 *
727 * The failed events and the remaining siblings need to have
728 * their timings updated as if they had gone thru event_sched_in()
729 * and event_sched_out(). This is required to get consistent timings
730 * across the group. This also takes care of the case where the group
731 * could never be scheduled by ensuring tstamp_stopped is set to mark
732 * the time the event was actually stopped, such that time delta
733 * calculation in update_event_times() is correct.
722 */ 734 */
723 list_for_each_entry(event, &group_event->sibling_list, group_entry) { 735 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
724 if (event == partial_group) 736 if (event == partial_group)
725 break; 737 simulate = true;
726 event_sched_out(event, cpuctx, ctx); 738
739 if (simulate) {
740 event->tstamp_running += now - event->tstamp_stopped;
741 event->tstamp_stopped = now;
742 } else {
743 event_sched_out(event, cpuctx, ctx);
744 }
727 } 745 }
728 event_sched_out(group_event, cpuctx, ctx); 746 event_sched_out(group_event, cpuctx, ctx);
729 747