diff options
author | Soeren Sandmann <sandmann@daimi.au.dk> | 2009-09-15 08:33:08 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-10-23 03:35:02 -0400 |
commit | 721a669b7225edeeb0ca8e2bf71b83882326a71b (patch) | |
tree | 51bc7438941db50224489091eed677ba67decf56 | |
parent | 2e600d01c131ee189f55ca1879cd364b9e056df8 (diff) |
perf events: Fix swevent hrtimer sampling by keeping track of remaining time when enabling/disabling swevent hrtimers
Make the hrtimer based events work for sysprof.
Whenever a swevent is scheduled out, the hrtimer is canceled.
When it is scheduled back in, the timer is restarted. This
happens every scheduler tick, which means the timer never
expired because it was getting repeatedly restarted over and
over with the same period.
To fix that, save the remaining time when disabling; when
reenabling, use that saved time as the period instead of the
user-specified sampling period.
Also, move the starting and stopping of the hrtimers to helper
functions instead of duplicating the code.
Signed-off-by: Søren Sandmann Pedersen <sandmann@redhat.com>
LKML-Reference: <ye8vdi7mluz.fsf@camel16.daimi.au.dk>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/linux/perf_event.h | 4 | ||||
-rw-r--r-- | kernel/perf_event.c | 61 |
2 files changed, 43 insertions, 22 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 2e6d95f97419..9e7012689a84 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -471,8 +471,8 @@ struct hw_perf_event { | |||
471 | unsigned long event_base; | 471 | unsigned long event_base; |
472 | int idx; | 472 | int idx; |
473 | }; | 473 | }; |
474 | union { /* software */ | 474 | struct { /* software */ |
475 | atomic64_t count; | 475 | s64 remaining; |
476 | struct hrtimer hrtimer; | 476 | struct hrtimer hrtimer; |
477 | }; | 477 | }; |
478 | }; | 478 | }; |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index afb7ef3dbc44..33ff019f9aa6 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -3969,6 +3969,42 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) | |||
3969 | return ret; | 3969 | return ret; |
3970 | } | 3970 | } |
3971 | 3971 | ||
3972 | static void perf_swevent_start_hrtimer(struct perf_event *event) | ||
3973 | { | ||
3974 | struct hw_perf_event *hwc = &event->hw; | ||
3975 | |||
3976 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
3977 | hwc->hrtimer.function = perf_swevent_hrtimer; | ||
3978 | if (hwc->sample_period) { | ||
3979 | u64 period; | ||
3980 | |||
3981 | if (hwc->remaining) { | ||
3982 | if (hwc->remaining < 0) | ||
3983 | period = 10000; | ||
3984 | else | ||
3985 | period = hwc->remaining; | ||
3986 | hwc->remaining = 0; | ||
3987 | } else { | ||
3988 | period = max_t(u64, 10000, hwc->sample_period); | ||
3989 | } | ||
3990 | __hrtimer_start_range_ns(&hwc->hrtimer, | ||
3991 | ns_to_ktime(period), 0, | ||
3992 | HRTIMER_MODE_REL, 0); | ||
3993 | } | ||
3994 | } | ||
3995 | |||
3996 | static void perf_swevent_cancel_hrtimer(struct perf_event *event) | ||
3997 | { | ||
3998 | struct hw_perf_event *hwc = &event->hw; | ||
3999 | |||
4000 | if (hwc->sample_period) { | ||
4001 | ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); | ||
4002 | hwc->remaining = ktime_to_ns(remaining); | ||
4003 | |||
4004 | hrtimer_cancel(&hwc->hrtimer); | ||
4005 | } | ||
4006 | } | ||
4007 | |||
3972 | /* | 4008 | /* |
3973 | * Software event: cpu wall time clock | 4009 | * Software event: cpu wall time clock |
3974 | */ | 4010 | */ |
@@ -3991,22 +4027,14 @@ static int cpu_clock_perf_event_enable(struct perf_event *event) | |||
3991 | int cpu = raw_smp_processor_id(); | 4027 | int cpu = raw_smp_processor_id(); |
3992 | 4028 | ||
3993 | atomic64_set(&hwc->prev_count, cpu_clock(cpu)); | 4029 | atomic64_set(&hwc->prev_count, cpu_clock(cpu)); |
3994 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 4030 | perf_swevent_start_hrtimer(event); |
3995 | hwc->hrtimer.function = perf_swevent_hrtimer; | ||
3996 | if (hwc->sample_period) { | ||
3997 | u64 period = max_t(u64, 10000, hwc->sample_period); | ||
3998 | __hrtimer_start_range_ns(&hwc->hrtimer, | ||
3999 | ns_to_ktime(period), 0, | ||
4000 | HRTIMER_MODE_REL, 0); | ||
4001 | } | ||
4002 | 4031 | ||
4003 | return 0; | 4032 | return 0; |
4004 | } | 4033 | } |
4005 | 4034 | ||
4006 | static void cpu_clock_perf_event_disable(struct perf_event *event) | 4035 | static void cpu_clock_perf_event_disable(struct perf_event *event) |
4007 | { | 4036 | { |
4008 | if (event->hw.sample_period) | 4037 | perf_swevent_cancel_hrtimer(event); |
4009 | hrtimer_cancel(&event->hw.hrtimer); | ||
4010 | cpu_clock_perf_event_update(event); | 4038 | cpu_clock_perf_event_update(event); |
4011 | } | 4039 | } |
4012 | 4040 | ||
@@ -4043,22 +4071,15 @@ static int task_clock_perf_event_enable(struct perf_event *event) | |||
4043 | now = event->ctx->time; | 4071 | now = event->ctx->time; |
4044 | 4072 | ||
4045 | atomic64_set(&hwc->prev_count, now); | 4073 | atomic64_set(&hwc->prev_count, now); |
4046 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 4074 | |
4047 | hwc->hrtimer.function = perf_swevent_hrtimer; | 4075 | perf_swevent_start_hrtimer(event); |
4048 | if (hwc->sample_period) { | ||
4049 | u64 period = max_t(u64, 10000, hwc->sample_period); | ||
4050 | __hrtimer_start_range_ns(&hwc->hrtimer, | ||
4051 | ns_to_ktime(period), 0, | ||
4052 | HRTIMER_MODE_REL, 0); | ||
4053 | } | ||
4054 | 4076 | ||
4055 | return 0; | 4077 | return 0; |
4056 | } | 4078 | } |
4057 | 4079 | ||
4058 | static void task_clock_perf_event_disable(struct perf_event *event) | 4080 | static void task_clock_perf_event_disable(struct perf_event *event) |
4059 | { | 4081 | { |
4060 | if (event->hw.sample_period) | 4082 | perf_swevent_cancel_hrtimer(event); |
4061 | hrtimer_cancel(&event->hw.hrtimer); | ||
4062 | task_clock_perf_event_update(event, event->ctx->time); | 4083 | task_clock_perf_event_update(event, event->ctx->time); |
4063 | 4084 | ||
4064 | } | 4085 | } |