diff options
author | Stephane Eranian <eranian@google.com> | 2010-10-20 09:25:01 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-10-22 08:18:27 -0400 |
commit | d7842da470f244d258f21c5f72cd8388b3541d04 (patch) | |
tree | 02196f905f3c27ab9dfc85b26dbb32348d943d60 /kernel/perf_event.c | |
parent | 9ffcfa6f1f63eeac15555b745c292eb9f59130f6 (diff) |
perf_events: Fix for transaction recovery in group_sched_in()
This new version (see commit 8e5fc1a) is much simpler and ensures that
in case of error in group_sched_in() during event_sched_in(), the
events up to the failed event go through regular event_sched_out().
But the failed event and the remaining events in the group have their
timings adjusted as if they had also gone through event_sched_in() and
event_sched_out(). This ensures timing uniformity across all events in
a group. This also takes care of the tstamp_stopped problem in case
the group could never be scheduled. The tstamp_stopped is updated as
if the event had actually run.
With this patch, the following now reports correct time_enabled,
in case the NMI watchdog is active:
$ task -e unhalted_core_cycles,instructions_retired,baclears,baclears
noploop 1
noploop for 1 seconds
0 unhalted_core_cycles (100.00% scaling, ena=997,552,872, run=0)
0 instructions_retired (100.00% scaling, ena=997,552,872, run=0)
0 baclears (100.00% scaling, ena=997,552,872, run=0)
0 baclears (100.00% scaling, ena=997,552,872, run=0)
And the older test case also works:
$ task -einstructions_retired,baclears,baclears -e
unhalted_core_cycles,baclears,baclears sleep 5
1680885 instructions_retired (69.39% scaling, ena=950756, run=291006)
10735 baclears (69.39% scaling, ena=950756, run=291006)
10735 baclears (69.39% scaling, ena=950756, run=291006)
0 unhalted_core_cycles (100.00% scaling, ena=817932, run=0)
0 baclears (100.00% scaling, ena=817932, run=0)
0 baclears (100.00% scaling, ena=817932, run=0)
Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4cbeeebc.8ee7d80a.5a28.0d5f@mx.google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r-- | kernel/perf_event.c | 22 |
1 files changed, 20 insertions, 2 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 39afdb07d758..517d827f4982 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -691,6 +691,8 @@ group_sched_in(struct perf_event *group_event, | |||
691 | { | 691 | { |
692 | struct perf_event *event, *partial_group = NULL; | 692 | struct perf_event *event, *partial_group = NULL; |
693 | struct pmu *pmu = group_event->pmu; | 693 | struct pmu *pmu = group_event->pmu; |
694 | u64 now = ctx->time; | ||
695 | bool simulate = false; | ||
694 | 696 | ||
695 | if (group_event->state == PERF_EVENT_STATE_OFF) | 697 | if (group_event->state == PERF_EVENT_STATE_OFF) |
696 | return 0; | 698 | return 0; |
@@ -719,11 +721,27 @@ group_error: | |||
719 | /* | 721 | /* |
720 | * Groups can be scheduled in as one unit only, so undo any | 722 | * Groups can be scheduled in as one unit only, so undo any |
721 | * partial group before returning: | 723 | * partial group before returning: |
724 | * The events up to the failed event are scheduled out normally, | ||
725 | * tstamp_stopped will be updated. | ||
726 | * | ||
727 | * The failed events and the remaining siblings need to have | ||
728 | * their timings updated as if they had gone thru event_sched_in() | ||
729 | * and event_sched_out(). This is required to get consistent timings | ||
730 | * across the group. This also takes care of the case where the group | ||
731 | * could never be scheduled by ensuring tstamp_stopped is set to mark | ||
732 | * the time the event was actually stopped, such that time delta | ||
733 | * calculation in update_event_times() is correct. | ||
722 | */ | 734 | */ |
723 | list_for_each_entry(event, &group_event->sibling_list, group_entry) { | 735 | list_for_each_entry(event, &group_event->sibling_list, group_entry) { |
724 | if (event == partial_group) | 736 | if (event == partial_group) |
725 | break; | 737 | simulate = true; |
726 | event_sched_out(event, cpuctx, ctx); | 738 | |
739 | if (simulate) { | ||
740 | event->tstamp_running += now - event->tstamp_stopped; | ||
741 | event->tstamp_stopped = now; | ||
742 | } else { | ||
743 | event_sched_out(event, cpuctx, ctx); | ||
744 | } | ||
727 | } | 745 | } |
728 | event_sched_out(group_event, cpuctx, ctx); | 746 | event_sched_out(group_event, cpuctx, ctx); |
729 | 747 | ||